PostgreSQL 命令行操作完整帮助文档

Scroll Down

PostgreSQL 命令行操作完整帮助文档

📖 目录

  1. 快速导出CSV
  2. 数据导入PostgreSQL
  3. 执行大文件SQL
  4. 常用维护命令
  5. 性能优化建议
  6. 与MySQL的区别

1. 快速导出CSV

1.1 基础导出命令

# 基本格式
psql -U用户名 -d数据库名 -c "查询语句" -o 输出文件.csv

# 示例:导出整个表
psql -Upostgres -dmydb -c "SELECT * FROM users" -o users.csv

# 示例:导出特定字段
psql -Upostgres -dmydb -c "COPY (SELECT id, name, email FROM customers) TO STDOUT WITH CSV" > customers.csv

1.2 使用COPY命令导出(推荐)

# 方法1:通过psql执行COPY命令
psql -Upostgres -dmydb -c "\COPY (SELECT * FROM table) TO '/tmp/output.csv' WITH CSV HEADER"

# 方法2:直接使用SQL COPY命令(需要超级用户权限)
psql -Upostgres -dmydb -c "COPY table TO '/tmp/output.csv' WITH (FORMAT CSV, HEADER true)"

# 方法3:导出为制表符分隔文件
psql -Upostgres -dmydb -c "\COPY table TO '/tmp/output.tsv' WITH DELIMITER E'\t' CSV HEADER"

1.3 格式化CSV导出

# 包含表头
psql -Upostgres -dmydb -c "\COPY table TO '/tmp/output.csv' WITH CSV HEADER"

# 不包含表头
psql -Upostgres -dmydb -c "\COPY table TO '/tmp/output.csv' WITH CSV"

# 自定义分隔符和引号
psql -Upostgres -dmydb -c "\COPY table TO '/tmp/output.csv' WITH (FORMAT CSV, DELIMITER '|', QUOTE '\"', HEADER true)"

# 编码指定
psql -Upostgres -dmydb -c "\COPY table TO '/tmp/output.csv' WITH (FORMAT CSV, HEADER true, ENCODING 'UTF8')"

1.4 使用管道导出

# 方法1:直接重定向
psql -Upostgres -dmydb -t -A -F"," -c "SELECT * FROM table" > output.csv

# 方法2:使用awk格式化
psql -Upostgres -dmydb -t -A -F$'\t' -c "SELECT * FROM table" | 
  awk 'BEGIN {FS="\t"; OFS=","} {print $1, $2, $3}' > output.csv

# 方法3:添加表头
echo "id,name,email" > output.csv && \
psql -Upostgres -dmydb -t -A -F"," -c "SELECT id, name, email FROM users" >> output.csv

1.5 复杂查询导出

# 多表关联导出
psql -Upostgres -dmydb -c "
\COPY (
  SELECT u.id, u.name, o.order_date, o.total_amount
  FROM users u
  JOIN orders o ON u.id = o.user_id
  WHERE o.status = 'completed'
) TO '/tmp/user_orders.csv' WITH CSV HEADER
"

# 带窗口函数的查询导出
psql -Upostgres -dmydb -c "
\COPY (
  SELECT 
    id,
    name,
    amount,
    SUM(amount) OVER (PARTITION BY category ORDER BY created_at) as running_total
  FROM transactions
) TO '/tmp/transactions.csv' WITH CSV HEADER
"

1.6 压缩导出

# 直接压缩
psql -Upostgres -dmydb -c "\COPY table TO STDOUT WITH CSV" | gzip > data.csv.gz

# 使用pg_dump导出并压缩
pg_dump -Upostgres -dmydb -t table --data-only --inserts | gzip > data.sql.gz

# 并行压缩(使用pigz)
psql -Upostgres -dmydb -c "\COPY table TO STDOUT WITH CSV" | pigz -9 > data.csv.gz

1.7 分块导出大数据

#!/bin/bash
# 分页导出大表
LIMIT=100000
OFFSET=0
PAGE=1

while true; do
  psql -Upostgres -dmydb -t -A -F"," -c "
    SELECT * FROM huge_table 
    ORDER BY id
    LIMIT $LIMIT OFFSET $OFFSET
  " > "data_part_$PAGE.csv"
  
  # 检查是否还有数据
  if [ ! -s "data_part_$PAGE.csv" ]; then
    rm "data_part_$PAGE.csv"
    break
  fi
  
  OFFSET=$((OFFSET + LIMIT))
  PAGE=$((PAGE + 1))
done

1.8 JSON格式导出

# 导出为JSON格式
psql -Upostgres -dmydb -t -A -c "
SELECT json_agg(row_to_json(t)) 
FROM (
  SELECT * FROM table LIMIT 1000
) t
" > data.json

# 每行一个JSON对象
psql -Upostgres -dmydb -t -A -c "
SELECT row_to_json(row) 
FROM (
  SELECT * FROM table LIMIT 1000
) row
" | jq . > data_lines.json

2. 数据导入PostgreSQL

2.1 使用COPY命令导入(高性能)

# 基本格式
psql -Upostgres -dmydb -c "\COPY table FROM '/path/to/data.csv' WITH CSV HEADER"

# 示例:导入CSV文件
psql -Upostgres -dmydb -c "\COPY users FROM '/tmp/users.csv' WITH (FORMAT CSV, HEADER true, DELIMITER ',')"

# 导入制表符分隔文件
psql -Upostgres -dmydb -c "\COPY users FROM '/tmp/users.tsv' WITH (FORMAT CSV, DELIMITER E'\t', HEADER true)"

2.2 使用SQL COPY命令

# 需要超级用户权限
psql -Upostgres -dmydb -c "COPY users FROM '/tmp/users.csv' WITH (FORMAT CSV, HEADER true, ENCODING 'UTF8')"

# 导入时指定列
psql -Upostgres -dmydb -c "COPY users(id, name, email) FROM '/tmp/users.csv' WITH CSV HEADER"

# 忽略错误继续导入
psql -Upostgres -dmydb -c "COPY users FROM '/tmp/users.csv' WITH CSV HEADER ON CONFLICT DO NOTHING"

2.3 命令行管道导入

# CSV文件导入
cat data.csv | psql -Upostgres -dmydb -c "\COPY table FROM STDIN WITH CSV HEADER"

# 从标准输入导入
echo "1,John Doe,john@example.com" | psql -Upostgres -dmydb -c "\COPY users FROM STDIN WITH CSV"

# 使用sed预处理后导入
cat data.csv | sed 's/\"\"/\\\"/g' | psql -Upostgres -dmydb -c "\COPY table FROM STDIN WITH CSV HEADER"

2.4 导入压缩文件

# 解压并导入
gunzip -c data.csv.gz | psql -Upostgres -dmydb -c "\COPY table FROM STDIN WITH CSV HEADER"

# 一步完成(gzip)
zcat data.csv.gz | psql -Upostgres -dmydb -c "\COPY table FROM STDIN WITH CSV HEADER"

# 使用其他压缩格式
bunzip2 -c data.csv.bz2 | psql -Upostgres -dmydb -c "\COPY table FROM STDIN WITH CSV HEADER"

2.5 导入时数据转换

# 创建临时表导入
psql -Upostgres -dmydb << EOF
-- 创建临时表
CREATE TEMP TABLE temp_import AS SELECT * FROM users LIMIT 0;

-- 导入数据到临时表
\COPY temp_import FROM '/tmp/data.csv' WITH CSV HEADER;

-- 数据转换并插入到目标表
INSERT INTO users (name, email, created_at)
SELECT 
  INITCAP(name),
  LOWER(email),
  NOW()
FROM temp_import
ON CONFLICT (email) DO UPDATE SET
  name = EXCLUDED.name;

-- 清理临时表
DROP TABLE temp_import;
EOF

2.6 使用pg_bulkload(高性能导入工具)

# 安装pg_bulkload后使用
pg_bulkload -i /tmp/data.csv -O users -l /tmp/bulkload.log \
  -o "TYPE=CSV" -o "DELIMITER=," -o "PARSE_BADFILE=/tmp/parse_errors.csv"

2.7 导入JSON数据

# JSON数组导入
psql -Upostgres -dmydb -c "
INSERT INTO users (name, email)
SELECT 
  data->>'name',
  data->>'email'
FROM json_array_elements('[{\"name\":\"John\",\"email\":\"john@example.com\"}]'::json) AS data
"

# 从JSON文件导入
cat data.json | psql -Upostgres -dmydb -c "
INSERT INTO users (name, email)
SELECT 
  value->>'name',
  value->>'email'
FROM json_array_elements(pg_read_file('/tmp/data.json')::json) AS value
"

2.8 导入前预处理脚本

#!/bin/bash
# 预处理并导入
INPUT_FILE="data.csv"
TEMP_FILE="$(mktemp)"

# 清理数据
cat "$INPUT_FILE" | \
  sed 's/\"\"/\\\"\"/g' | \           # 处理双引号
  iconv -f GBK -t UTF-8//IGNORE | \   # 转换编码
  awk -F',' 'NR==1 || $1 !~ /^#/ {print}' | \  # 移除注释行
  dos2unix > "$TEMP_FILE"              # 转换换行符

# 导入清理后的数据
psql -Upostgres -dmydb -c "\COPY users FROM '$TEMP_FILE' WITH CSV HEADER"

# 清理临时文件
rm "$TEMP_FILE"

3. 执行大文件SQL

3.1 基础执行方式

# 方法1:直接执行
psql -Upostgres -dmydb -f large_file.sql

# 方法2:从标准输入读取
cat large_file.sql | psql -Upostgres -dmydb

# 方法3:交互式执行
psql -Upostgres -dmydb
mydb=# \i /path/to/large_file.sql

# 方法4:带进度显示
pv large_file.sql | psql -Upostgres -dmydb

3.2 执行控制参数

# 设置超时
psql -Upostgres -dmydb --single-transaction --set=ON_ERROR_STOP=1 -f large_file.sql

# 只显示错误
psql -Upostgres -dmydb -q -f large_file.sql 2> errors.log

# 记录执行时间
time psql -Upostgres -dmydb -f large_file.sql

3.3 分割大文件执行

# 按文件大小分割
split -l 10000 large_file.sql chunk_

# 按事务分割并执行
for file in chunk_*; do
  echo "Processing $file..."
  psql -Upostgres -dmydb -v ON_ERROR_STOP=1 -f "$file"
  if [ $? -ne 0 ]; then
    echo "Error processing $file"
    break
  fi
  rm "$file"
done

3.4 分批次执行大更新

#!/bin/bash
# 分批次执行大更新
BATCH_SIZE=10000
TABLE="huge_table"

# 获取总行数
TOTAL=$(psql -Upostgres -dmydb -t -A -c "SELECT COUNT(*) FROM $TABLE WHERE status = 'pending'")

# 计算批次数
BATCHES=$(( (TOTAL + BATCH_SIZE - 1) / BATCH_SIZE ))

for ((i=0; i<BATCHES; i++)); do
  OFFSET=$((i * BATCH_SIZE))
  echo "Processing batch $((i+1))/$BATCHES..."
  
  psql -Upostgres -dmydb <<EOF
    BEGIN;
    WITH updated AS (
      SELECT id
      FROM $TABLE
      WHERE status = 'pending'
      ORDER BY id
      LIMIT $BATCH_SIZE
      FOR UPDATE SKIP LOCKED
    )
    UPDATE $TABLE t
    SET status = 'processed',
        processed_at = NOW()
    FROM updated u
    WHERE t.id = u.id;
    COMMIT;
EOF
  
  sleep 0.5  # 避免锁竞争
done

3.5 执行时监控进度

#!/bin/bash
# 带进度监控的执行
SQL_FILE="large_update.sql"
TOTAL_LINES=$(wc -l < "$SQL_FILE")
CURRENT=0

while IFS= read -r line; do
  ((CURRENT++))
  # 每100行输出一次进度
  if (( CURRENT % 100 == 0 )); then
    PERCENT=$(( CURRENT * 100 / TOTAL_LINES ))
    echo -ne "Progress: $PERCENT% ($CURRENT/$TOTAL_LINES)\r"
  fi
  echo "$line"
done < "$SQL_FILE" | psql -Upostgres -dmydb --single-transaction

3.6 使用事务块执行

# 在单个事务中执行
psql -Upostgres -dmydb << EOF
BEGIN;
\i /path/to/large_file.sql
COMMIT;
EOF

# 带错误回滚
psql -Upostgres -dmydb -v ON_ERROR_STOP=1 << EOF
BEGIN;
\i /path/to/file1.sql
\i /path/to/file2.sql
COMMIT;
EOF

3.7 并行执行多个SQL文件

#!/bin/bash
# 并行执行脚本
SQL_DIR="/path/to/sql/files"
MAX_JOBS=4  # 最大并行数

# 使用GNU parallel并行执行
export PGPASSWORD="your_password"
find "$SQL_DIR" -name "*.sql" | parallel -j $MAX_JOBS "
  echo 'Processing {}'
  psql -Upostgres -dmydb -f {} 2>&1 | tee {}.log
"

3.8 大文件索引创建优化

# 禁用自动提交,批量创建索引
psql -Upostgres -dmydb << EOF
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;

BEGIN;
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_email ON users(email);
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_users_created ON users(created_at);
COMMIT;
EOF

4. 常用维护命令

4.1 数据库备份与恢复

# 备份单个数据库
pg_dump -Upostgres -dmydb -Fc -f mydb_backup.dump

# 备份所有数据库
pg_dumpall -Upostgres -f all_dbs_backup.sql

# 压缩备份
pg_dump -Upostgres -dmydb | gzip > mydb_$(date +%Y%m%d).sql.gz

# 只备份模式(结构)
pg_dump -Upostgres -dmydb -s -f mydb_schema.sql

# 只备份数据
pg_dump -Upostgres -dmydb -a -f mydb_data.sql

# 恢复数据库
pg_restore -Upostgres -dmydb mydb_backup.dump

# 从压缩文件恢复
gunzip -c backup.sql.gz | psql -Upostgres -dmydb

4.2 数据库状态监控

# 查看活动连接
psql -Upostgres -c "SELECT * FROM pg_stat_activity;"

# 查看锁信息
psql -Upostgres -c "SELECT * FROM pg_locks;"

# 查看数据库大小
psql -Upostgres -c "
SELECT 
  datname AS database,
  pg_size_pretty(pg_database_size(datname)) AS size
FROM pg_database
ORDER BY pg_database_size(datname) DESC;
"

# 查看表大小
psql -Upostgres -dmydb -c "
SELECT 
  schemaname,
  tablename,
  pg_size_pretty(pg_total_relation_size(schemaname || '.' || tablename)) AS total_size
FROM pg_tables
WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
ORDER BY pg_total_relation_size(schemaname || '.' || tablename) DESC;
"

4.3 性能分析

# 查看慢查询
psql -Upostgres -c "
SELECT 
  query,
  calls,
  total_time,
  mean_time,
  rows
FROM pg_stat_statements
ORDER BY total_time DESC
LIMIT 10;
"

# 查看索引使用情况
psql -Upostgres -dmydb -c "
SELECT 
  schemaname,
  tablename,
  indexname,
  idx_scan,
  idx_tup_read,
  idx_tup_fetch
FROM pg_stat_all_indexes
WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
ORDER BY idx_scan DESC;
"

4.4 维护任务

# 重新索引
psql -Upostgres -dmydb -c "REINDEX DATABASE mydb;"

# 清理表(VACUUM)
psql -Upostgres -dmydb -c "VACUUM ANALYZE table_name;"

# 更新统计信息
psql -Upostgres -dmydb -c "ANALYZE table_name;"

# 检查表健康状态
psql -Upostgres -dmydb -c "SELECT schemaname, tablename, n_dead_tup FROM pg_stat_user_tables;"

4.5 用户和权限管理

# 创建用户
psql -Upostgres -c "CREATE USER myuser WITH PASSWORD 'mypassword';"

# 创建数据库
psql -Upostgres -c "CREATE DATABASE mydb OWNER myuser;"

# 授予权限
psql -Upostgres -c "GRANT ALL PRIVILEGES ON DATABASE mydb TO myuser;"

# 查看用户权限
psql -Upostgres -c "\du"

# 修改密码
psql -Upostgres -c "ALTER USER myuser WITH PASSWORD 'newpassword';"

5. 性能优化建议

5.1 导出优化

# 1. 使用COPY命令而不是SELECT ... TO STDOUT
# 2. 大表导出时使用游标分批处理
# 3. 导出前关闭自动提交
# 4. 使用并行导出工具如pg_dump的并行模式

# 并行导出示例
pg_dump -Upostgres -dmydb -j 4 -Fd -f /tmp/backup_dir

5.2 导入优化

# 1. 导入前禁用约束和索引
BEGIN;
ALTER TABLE table_name DISABLE TRIGGER ALL;
-- 导入数据
ALTER TABLE table_name ENABLE TRIGGER ALL;
COMMIT;

# 2. 使用COPY命令而不是INSERT
# 3. 增大maintenance_work_mem用于创建索引
SET maintenance_work_mem = '1GB';

# 4. 使用UNLOGGED表提高导入速度
CREATE UNLOGGED TABLE temp_import (...);
-- 导入数据到临时表
ALTER TABLE temp_import SET LOGGED;

5.3 大文件SQL执行优化

# 1. 使用事务包裹批量操作
BEGIN;
-- 批量操作
COMMIT;

# 2. 调整wal相关参数
SET synchronous_commit = off;
SET fsync = off;
SET full_page_writes = off;

# 3. 使用游标处理大结果集
BEGIN;
DECLARE cur CURSOR FOR SELECT * FROM large_table;
-- 处理数据
COMMIT;

5.4 配置文件优化

# postgresql.conf 相关配置

# 内存相关
shared_buffers = 4GB              # 25% of total RAM
work_mem = 64MB                   # for complex sorts
maintenance_work_mem = 1GB        # for CREATE INDEX

# WAL相关
wal_level = replica
max_wal_size = 4GB
min_wal_size = 1GB
checkpoint_timeout = 30min

# 连接相关
max_connections = 200
shared_preload_libraries = 'pg_stat_statements'

# 并行查询
max_parallel_workers_per_gather = 4
max_parallel_workers = 8

5.5 批量操作优化

# 使用批量INSERT
INSERT INTO table (col1, col2) VALUES
(v1, v2),
(v3, v4),
...;

# 使用CTE批量更新
WITH batch AS (
  SELECT id, new_value
  FROM source_table
  LIMIT 10000
)
UPDATE target_table t
SET value = b.new_value
FROM batch b
WHERE t.id = b.id;

# 使用COPY进行批量插入
\COPY table FROM 'data.csv' WITH CSV

6. 与MySQL的主要区别

6.1 语法差异

# PostgreSQL                               # MySQL
\COPY table TO 'file.csv'                 # SELECT ... INTO OUTFILE
\COPY table FROM 'file.csv'               # LOAD DATA INFILE
pg_dump -Fc                               # mysqldump
pg_restore                                # mysql < backup.sql
\d table                                  # DESCRIBE table
\dt                                       # SHOW TABLES
\du                                       # SHOW GRANTS
\df                                       # SHOW FUNCTION STATUS

6.2 性能优化差异

# PostgreSQL                               # MySQL
VACUUM ANALYZE                           # OPTIMIZE TABLE
SET enable_seqscan = off;                # USE INDEX
EXPLAIN ANALYZE                          # EXPLAIN FORMAT=JSON
pg_stat_statements                       # PERFORMANCE_SCHEMA
pg_buffercache                           # SHOW ENGINE INNODB STATUS

6.3 数据类型差异

# PostgreSQL                               # MySQL
SERIAL                                   # AUTO_INCREMENT
TEXT (无长度限制)                       # VARCHAR(65535)
BYTEA                                    # BLOB
JSONB                                    # JSON
ARRAY[]                                  # 无原生数组
UUID                                     # CHAR(36)

6.4 功能特性差异

# PostgreSQL特有功能                    # MySQL对应
表继承(INHERITS)                      # 无
物化视图(MATERIALIZED VIEW)          # 无
窗口函数(WINDOW FUNCTIONS)           # 8.0+支持
公共表表达式(WITH queries)           # 8.0+支持
全文搜索(tsvector/tsquery)           # FULLTEXT索引

📋 快速参考速查表

# 导出速查
psql -Uuser -dmydb -c "SELECT ..." -o file.csv                  # 简单导出
psql -Uuser -dmydb -c "\COPY table TO 'file.csv' WITH CSV"      # COPY导出
pg_dump -Uuser -dmydb -t table -a --inserts > data.sql         # 数据导出

# 导入速查
psql -Uuser -dmydb -f file.sql                                 # 执行SQL文件
psql -Uuser -dmydb -c "\COPY table FROM 'file.csv' WITH CSV"   # 导入CSV
cat file.sql | psql -Uuser -dmydb                              # 管道导入

# 维护速查
pg_dump -Uuser -dmydb -Fc -f backup.dump                       # 二进制备份
pg_restore -Uuser -dmydb backup.dump                           # 恢复备份
psql -Uuser -c "\l"                                            # 列出数据库
psql -Uuser -dmydb -c "\dt"                                    # 列出表

# 实用管道组合
cat data.csv | grep -v 'NULL' | psql -Uuser -dmydb -c "\COPY table FROM STDIN"  # 过滤后导入
pg_dump -Uuser -dmydb | gzip > backup.sql.gz                                    # 压缩备份
gunzip -c backup.sql.gz | psql -Uuser -dmydb                                    # 压缩恢复

⚠️ 注意事项

  1. 权限要求COPY TO/FROM 需要文件系统权限,服务器端COPY需要超级用户权限
  2. 路径问题:客户端\COPY使用客户端文件系统,服务器端COPY使用服务器文件系统
  3. 字符编码:确保数据库编码和文件编码一致(推荐UTF-8)
  4. 大文件处理:超过内存限制时使用游标或分批处理
  5. 并发控制:PostgreSQL使用MVCC,注意长事务的影响
  6. 锁机制CREATE INDEX CONCURRENTLY 可以避免写锁

🔧 故障排查

# 查看错误日志
tail -f /var/log/postgresql/postgresql-*.log

# 查看当前连接状态
psql -Upostgres -c "SELECT * FROM pg_stat_activity WHERE state = 'active';"

# 检查表状态
psql -Upostgres -dmydb -c "VACUUM VERBOSE ANALYZE table_name;"

# 查看锁等待
psql -Upostgres -c "
SELECT 
  blocked_locks.pid AS blocked_pid,
  blocked_activity.query AS blocked_query,
  blocking_locks.pid AS blocking_pid,
  blocking_activity.query AS blocking_query
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks ON blocking_locks.locktype = blocked_locks.locktype
  AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
  AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
  AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
  AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
  AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
  AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
  AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
  AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
  AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
  AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.granted;
"

# 查看数据库大小和年龄
psql -Upostgres -c "
SELECT 
  datname,
  age(datfrozenxid) as xid_age,
  pg_size_pretty(pg_database_size(datname)) as size
FROM pg_database
ORDER BY age(datfrozenxid) DESC;
"

🚀 高级技巧

使用外部数据包装器(FDW)

# 创建外部表
psql -Upostgres -dmydb -c "
CREATE EXTENSION IF NOT EXISTS file_fdw;

CREATE SERVER import_server FOREIGN DATA WRAPPER file_fdw;

CREATE FOREIGN TABLE external_users (
  id INTEGER,
  name TEXT,
  email TEXT
) SERVER import_server
OPTIONS (filename '/tmp/users.csv', format 'csv', header 'true');
"

使用pg_stat_statements监控

# 启用pg_stat_statements
psql -Upostgres -c "
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;

-- 查看最耗时的查询
SELECT 
  query,
  calls,
  total_exec_time,
  mean_exec_time,
  rows
FROM pg_stat_statements
ORDER BY total_exec_time DESC
LIMIT 10;
"

使用扩展功能

# 常用扩展
psql -Upostgres -dmydb -c "
CREATE EXTENSION IF NOT EXISTS hstore;      -- 键值对存储
CREATE EXTENSION IF NOT EXISTS postgis;     -- 地理空间数据
CREATE EXTENSION IF NOT EXISTS uuid-ossp;   -- UUID生成
CREATE EXTENSION IF NOT EXISTS pgcrypto;    -- 加密功能
CREATE EXTENSION IF NOT EXISTS tablefunc;   -- 交叉表功能
"