Postgresql: Difference between revisions

From Halfface
Jump to navigation Jump to search
(38 intermediate revisions by the same user not shown)
Line 41: Line 41:
=describe table=
=describe table=
  \d table;
  \d table;
\d dc_store.document;


=Change view. One line per attribute(\G in mysql)=
=Change view. One line per attribute(\G in mysql)=
Line 51: Line 52:
=update=
=update=
  UPDATE users SET admin = 1 WHERE user_id = 14002;
  UPDATE users SET admin = 1 WHERE user_id = 14002;
=Create user=
CREATE USER username WITH PASSWORD 'MYPASS';
=Create database=
=Create database=
  CREATE DATABASE dbname OWNER username;
  CREATE DATABASE dbname OWNER username;
=create table=
CREATE TABLE films (
    code        char(5) CONSTRAINT firstkey PRIMARY KEY,
    title      varchar(40) NOT NULL,
    did        integer NOT NULL,
    date_prod  date,
    kind        varchar(10),
    len        interval hour to minute
);
=Delete database=
=Delete database=
  DROP DATABASE dbname;
  DROP DATABASE dbname;
Line 60: Line 74:
=Import a database=
=Import a database=
  psql username  -h hostname -d dbname < dump.sql
  psql username  -h hostname -d dbname < dump.sql
=Create user=
=change password=
  CREATE USER username WITH PASSWORD 'MYPASS';
  ALTER USER user_name WITH PASSWORD 'new_password';
 
=Set super privileges=
=Set super privileges=
  ALTER USER username WITH SUPERUSER;
  ALTER USER username WITH SUPERUSER;
Line 77: Line 92:
# Backup database.
# Backup database.
  DATABASE=very_nice_database ; pg_dump -Fc -Z 2 --file=/tmp/${DATABASE}.$(date '+%Y%m%d').dump ${DATABASE}
  DATABASE=very_nice_database ; pg_dump -Fc -Z 2 --file=/tmp/${DATABASE}.$(date '+%Y%m%d').dump ${DATABASE}
# Is database in backup
SELECT pg_is_in_backup();
=restore=
=restore=
Look at permissions on database
Look at permissions on database
Line 147: Line 165:
  ORDER BY wastedbytes DESC
  ORDER BY wastedbytes DESC


=clear space=
==clear space vacuum==
  \c database
  \c database
  vacuum;
  vacuum;
if that doesn't work do:
if that doesn't work do:
  vacuum: freeze; or vacuum full;
  vacuum: freeze; or vacuum full;
==vacuum table==
Select database
\c ucc
select role
SET search_path TO myschema, kdl;
Vacuum table
vacuum full taxation_initial_data;


=autovacuum=
=autovacuum=
Line 173: Line 198:
  su - postgres
  su - postgres
  /usr/bin/pg_ctl reload
  /usr/bin/pg_ctl reload
From psql
SELECT pg_reload_conf();


=list prepared transactions=
=list prepared transactions=
Line 181: Line 208:
=pager off=
=pager off=
  \pset pager off
  \pset pager off
=pager less=
\x auto
\pset pager on
\setenv PAGER less
=pager=
=pager=
  export PAGER=less
  export PAGER=less
Line 212: Line 244:
List tablespaces
List tablespaces
  SELECT spcname FROM pg_tablespace;
  SELECT spcname FROM pg_tablespace;
=how many rows in table=
SELECT reltuples AS estimate FROM pg_class where relname = 'member_offer';
=delete rows matching value and timestamp=
=delete rows matching value and timestamp=
  DELETE FROM receipt_transaction WHERE status_point = 'FAIL_POINT_CALC' and business_date  < ( now() - interval '+14 day' ) ;
  DELETE FROM receipt_transaction WHERE status_point = 'FAIL_POINT_CALC' and business_date  < ( now() - interval '+14 day' ) ;
=user password=
=user password=
  ~/.pgpass
  ~/.pgpass
Line 219: Line 255:
=kill=
=kill=
Kill less agressive
Kill less agressive
  pg_cancel_backend(16967)
  select pg_cancel_backend(16967);
kill
kill
  select pg_terminate_backend(16967) from pg_stat_activity;
  select pg_terminate_backend(16967) from pg_stat_activity;
=who are you and where do you come from, whoami=
=who are you and where do you come from, whoami=
  SELECT CURRENT_USER usr, :'HOST' host, inet_server_port() port;
  SELECT CURRENT_USER usr, :'HOST' host, inet_server_port() port;
or
or
  \conninfo
  \conninfo
=running queries on standby=
max_standby_archive_delay = 600s
max_standby_streaming_delay = 600s
=analyze query=
EXPLAIN ANALYZE select ...
=version=
SELECT version();
=master/slave setup=
Streaming replication in PostgreSQL works on log shipping. Every transaction in postgres is written to a transaction log called WAL (write-ahead log) to achieve durability. A slave uses these WAL segments to continuously replicate changes from its master.
There exists three mandatory processes – wal sender , wal receiver and startup process, these play a major role in achieving streaming replication in postgres.
Log Sequence Number, or LSN, is a pointer to a location in the WAL.
=wal(write ahead log)=
pg_xlog directory holds the WAL (Write Ahead Log) files.WAL files contain a record of all changes made to the database.
=List tablespaces=
\db
=list clusters=
pg_lsclusters
=pgbackrest=
# https://pgbackrest.org/user-guide.html#introduction
# Install pgbackrest
apt-get install -y pgbackrest
# pg-primary - Create pgBackRest configuration file and directories
mkdir -p -m 770 /var/log/pgbackrest
chown postgres:postgres /var/log/pgbackrest
mkdir -p /etc/pgbackrest
mkdir -p /etc/pgbackrest/conf.d
touch /etc/pgbackrest/pgbackrest.conf
chmod 640 /etc/pgbackrest/pgbackrest.conf
chown postgres:postgres /etc/pgbackrest/pgbackrest.conf
# Does it work?
sudo -u postgres pgbackrest
# Configure the PostgreSQL cluster data directory
# pg-primary:/etc/pgbackrest/pgbackrest.conf
[billo_dev]
pg1-path=/var/lib/postgresql/12/billo_dev
# On machine taking backups.
# Configure the pgBackRest repository path
# /etc/pgbackrest/pgbackrest.conf
[demo]
pg1-path=/var/lib/postgresql/12/demo
[global]
repo1-path=/var/lib/pgbackrest
# /etc/postgresql/12/main/postgresql.conf
archive_command = 'pgbackrest --stanza=billo_dev archive-push %p'
archive_mode = on
max_wal_senders = 3
wal_level = replica
# setup backup.
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info stanza-create
# check configuration
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info check
# Run backup
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info backup
# Differential backup
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info --type=diff backup
# Full backup
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info --type=full backup
# Every 10 minutes except for 2:00-2:59
*/10 0-1,3-23 * * * pgbackrest --stanza=billo_dev --type=diff backup
# 02:10 02:20 02:30 02:40 02:50
10-50/10 2 * * *    pgbackrest --stanza=billo_dev --type=diff backup
# 3:00 Full
0 2 * * *          pgbackrest --stanza=billo_dev --type=full backup
# List backup information
sudo -u postgres pgbackrest info
...
# Restore backup last backup.
sudo -u postgres find /var/lib/postgresql/12/billo_dev -mindepth 1 -delete
# Restore the cluster and start PostgreSQL
sudo -u postgres pgbackrest --stanza=billo_dev restore
sudo pg_ctlcluster 12 restore start
...
# Restore backup from certain time.
sudo -u postgres find /var/lib/postgresql/12/billo_dev -mindepth 1 -delete
# Restore the cluster and start PostgreSQL
sudo -u postgres pgbackrest --stanza=billo_dev --set 20220321-113900F_20220321-122001D restore
sudo pg_ctlcluster 12 restore start
==install on remote machine==
===repository===
sudo adduser --disabled-password --gecos "" pgbackrest
===setup directories===
mkdir -p -m 770 /var/log/pgbackrest
chown pgbackrest:pgbackrest /var/log/pgbackrest
mkdir -p /etc/pgbackrest
mkdir -p /etc/pgbackrest/conf.d
touch /etc/pgbackrest/pgbackrest.conf
chmod 640 /etc/pgbackrest/pgbackrest.conf
chown pgbackrest:pgbackrest /etc/pgbackrest/pgbackrest.conf
mkdir -p /var/lib/pgbackrest
chmod 750 /var/lib/pgbackrest
chown pgbackrest:pgbackrest /var/lib/pgbackrest
===passwordless login===
sudo -u pgbackrest mkdir -m 750 /home/pgbackrest/.ssh
sudo -u pgbackrest ssh-keygen -f /home/pgbackrest/.ssh/id_rsa -t rsa -b 4096 -N ""
==Install on Postgres server==
sudo -u postgres mkdir -m 750 -p /var/lib/postgresql/.ssh
sudo -u postgres ssh-keygen -f /var/lib/postgresql/.ssh/id_rsa -t rsa -b 4096 -N ""
===ssh keys===
server    -> postgres  -> ssh pgbackrest@repository /usr/bin/pgbackrest
no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command="/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }" ssh-rsa $public_ssh_key postgres@server
repository -> pgbackrest -> ssh postgres@server /usr/bin/pgbackrest.
no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command="/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }" ssh-rsa $public_ssh_key pgbackrest@repository
=prompt=
~/.psqlrc file:
\set PROMPT1 '(%n@%M:%>) %[%033[00;33m%]%`date +%H:%M:%S`%[%033[00m%] [%[%033[01;31m%]%/%[%[%033[00m%]] > '
\set PROMPT2 '%M %n@%/%R %# '
=index=
Size of index.
SELECT i.indexrelname, x.indexrelid, pg_size_pretty(size)
FROM (SELECT indexrelid, pg_indexes_size(indrelid) AS size
      FROM pg_index) x
JOIN pg_stat_user_indexes i ON i.indexrelid = x.indexrelid
ORDER BY size DESC;

Revision as of 13:40, 26 May 2023

psql

run command from cli

psql -c "SELECT datname FROM pg_database;"

Connect to remote database:

export PGPASSWORD=password && psql --host=10.111.222.1 --port=5492 --username=user --no-password postgres

install psql

apt-get install freetds-bin

List databases

Alternative 1

SELECT datname FROM pg_database;

Alternative 2

gives more information too.

\l
    Name     |    Owner    | Encoding |   Collate   |    Ctype    |   Access privileges   
-------------+-------------+----------+-------------+-------------+-----------------------
 indata_mdb  | uc_etl_prod | UTF8     | sv_SE.UTF-8 | sv_SE.UTF-8 |
...

Alternative 3

\l+

select database

\c testdb;

show tables

List all tables in all schemas

\dt *.*

list tables in current schema

\dt

Or from command line.

su - postgres -c "psql -c \"\\dt \" database"

list schemas

\dn

schema/search_path

List current schema

SHOW search_path;

Change search path.

SET search_path TO myschema, public;

describe table

\d table;
\d dc_store.document;

Change view. One line per attribute(\G in mysql)

\x on
psql -x -c "select * from blabla limit 1;"

exit/quit

\q

update

UPDATE users SET admin = 1 WHERE user_id = 14002;

Create user

CREATE USER username WITH PASSWORD 'MYPASS';

Create database

CREATE DATABASE dbname OWNER username;

create table

CREATE TABLE films (
   code        char(5) CONSTRAINT firstkey PRIMARY KEY,
   title       varchar(40) NOT NULL,
   did         integer NOT NULL,
   date_prod   date,
   kind        varchar(10),
   len         interval hour to minute
);

Delete database

DROP DATABASE dbname;

delete database from cli

dropdb $dbname

Import a database

psql username  -h hostname -d dbname < dump.sql

change password

ALTER USER user_name WITH PASSWORD 'new_password';

Set super privileges

ALTER USER username WITH SUPERUSER;

Deleting user

DROP USER nomusr

Getting help

\? or \h

size of database

SELECT pg_size_pretty( pg_database_size('database') );

size of table

SELECT pg_size_pretty( pg_total_relation_size('tablename') );

clean archives older than 2 days

su postgres -c "/usr/pgsql-9.5/bin/pg_archivecleanup /var/lib/pgsql/9.5/archivedir/ $(basename $(find /var/lib/pgsql/9.5/archivedir/ -ctime +2 | tail -n 1))"

backup

  1. Backup database.
DATABASE=very_nice_database ; pg_dump -Fc -Z 2 --file=/tmp/${DATABASE}.$(date '+%Y%m%d').dump ${DATABASE}
  1. Is database in backup
SELECT pg_is_in_backup();

restore

Look at permissions on database

psql -c "\l"
export DATABASE=very_nice_database

Drop database connectipons.

psql -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '${DATABASE}' AND pid <> pg_backend_pid();"

Drop database.

dropdb ${DATABASE}

Create database

createdb -T template0 ${DATABASE}

Restore database

pg_restore -j8 -d ${DATABASE} --no-privileges --no-owner --clean --if-exists --exit-on-error /tmp/${DATABASE}.$(date '+%Y%m%d' --date "-4 days").dump
  1. Set correct permissions.
psql -c "ALTER DATABASE ${DATABASE} OWNER TO ${OWNER};"

bookmarks

https://wiki.postgresql.org/wiki/Disk_Usage

bloat

Every record that has been deleted but is still taking some space is called a dead tuple. 
Once there is no dependency on those dead tuples with the already running transactions, the dead tuples are no longer needed. 
Thus, PostgreSQL runs VACUUM on such Tables. VACUUM reclaims the storage occupied by these dead tuples. 
The space occupied by these dead tuples may be referred to as Bloat. 
VACUUM scans the pages for dead tuples and marks them to the freespace map (FSM).

Query for bloat(which looks bloated... :-)

SELECT
 current_database(), schemaname, tablename, /*reltuples::bigint, relpages::bigint, otta,*/
 ROUND((CASE WHEN otta=0 THEN 0.0 ELSE sml.relpages::FLOAT/otta END)::NUMERIC,1) AS tbloat,
 CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::BIGINT END AS wastedbytes,
 iname, /*ituples::bigint, ipages::bigint, iotta,*/
 ROUND((CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages::FLOAT/iotta END)::NUMERIC,1) AS ibloat,
 CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes
FROM (
 SELECT
   schemaname, tablename, cc.reltuples, cc.relpages, bs,
   CEIL((cc.reltuples*((datahdr+ma-
     (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::FLOAT)) AS otta,
   COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages,
   COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::FLOAT)),0) AS iotta -- very rough approximation, assumes all cols
 FROM (
   SELECT
     ma,bs,schemaname,tablename,
     (datawidth+(hdr+ma-(CASE WHEN hdr%ma=0 THEN ma ELSE hdr%ma END)))::NUMERIC AS datahdr,
     (maxfracsum*(nullhdr+ma-(CASE WHEN nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
   FROM (
     SELECT
       schemaname, tablename, hdr, ma, bs,
       SUM((1-null_frac)*avg_width) AS datawidth,
       MAX(null_frac) AS maxfracsum,
       hdr+(
         SELECT 1+COUNT(*)/8
         FROM pg_stats s2
         WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename
       ) AS nullhdr
     FROM pg_stats s, (
       SELECT
         (SELECT current_setting('block_size')::NUMERIC) AS bs,
         CASE WHEN SUBSTRING(v,12,3) IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr,
         CASE WHEN v ~ 'mingw32' THEN 8 ELSE 4 END AS ma
       FROM (SELECT version() AS v) AS foo
     ) AS constants
     GROUP BY 1,2,3,4,5
   ) AS foo
 ) AS rs
 JOIN pg_class cc ON cc.relname = rs.tablename
 JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = rs.schemaname AND nn.nspname <> 'information_schema'
 LEFT JOIN pg_index i ON indrelid = cc.oid
 LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid
) AS sml
ORDER BY wastedbytes DESC

clear space vacuum

\c database
vacuum;

if that doesn't work do:

vacuum: freeze; or vacuum full;

vacuum table

Select database

\c ucc

select role

SET search_path TO myschema, kdl;

Vacuum table

vacuum full taxation_initial_data;

autovacuum

Is autovacuum enabled.

SHOW autovacuum;

count table

SELECT COUNT(*) FROM TABLE_NAME;

If you don't need an exact count, the current statistic from the catalog table pg_class might be good enough and is much faster to retrieve for big tables.

SELECT reltuples AS approximate_row_count FROM pg_class WHERE relname = 'table_name';

nuber of connections

su - postgres -c "psql -t -c 'SELECT * FROM pg_stat_activity;'" | wc -l

drop connections

SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'dbname' AND pid <> pg_backend_pid();

postgres configuration

postgresql.conf

pg_hba.conf

Connections are configured in this file.

reread configuration

su - postgres
/usr/bin/pg_ctl reload

From psql

SELECT pg_reload_conf();

list prepared transactions

select * from pg_prepared_xacts where database='cti_batch';

remove prepared connecion

ROLLBACK PREPARED '131077_AAAAAAAAAAAAAP//rB6PFn41NfBevSfGABG7nTE=_AAAAAAAAAAAAAP//rB6PFn41NfBevSfGABG7pAAAAAIAAAAA';

pager off

\pset pager off

pager less

\x auto
\pset pager on
\setenv PAGER less

pager

export PAGER=less

config file

.my.cnf

.pgpass
hostname:port:database:username:password

Permissions in postgres

pg_hba.conf  local file         which is a local file that defines which sources for login credentials to use. (ldap, local users... to me it feels like nsswitch.conf)
owner        database           of database. The right to modify or destroy an object is always the privilege of the owner only.
role:s       separate database  are like user and groups combined. One role can inherit another. Different attributes gives various capabilities. login, superuser...
privileges   database           on (table, function...): SELECT , INSERT , UPDATE , DELETE
You can login with one role and then switch to another.

roles/users

Which roles and users exist.

\du

set new role

set role super-man;

current role

user name of current execution context

SELECT current_user;

session user

SELECT session_user;

display privileges

\dp
\z

tablespaces

List tablespaces

SELECT spcname FROM pg_tablespace;

how many rows in table

SELECT reltuples AS estimate FROM pg_class where relname = 'member_offer';

delete rows matching value and timestamp

DELETE FROM receipt_transaction WHERE status_point = 'FAIL_POINT_CALC' and business_date  < ( now() - interval '+14 day' ) ;

user password

~/.pgpass
hostname:port:database:username:password

kill

Kill less agressive

select pg_cancel_backend(16967);

kill

select pg_terminate_backend(16967) from pg_stat_activity;

who are you and where do you come from, whoami

SELECT CURRENT_USER usr, :'HOST' host, inet_server_port() port;

or

\conninfo

running queries on standby

max_standby_archive_delay = 600s
max_standby_streaming_delay = 600s

analyze query

EXPLAIN ANALYZE select ...

version

SELECT version();

master/slave setup

Streaming replication in PostgreSQL works on log shipping. Every transaction in postgres is written to a transaction log called WAL (write-ahead log) to achieve durability. A slave uses these WAL segments to continuously replicate changes from its master. There exists three mandatory processes – wal sender , wal receiver and startup process, these play a major role in achieving streaming replication in postgres. Log Sequence Number, or LSN, is a pointer to a location in the WAL.

wal(write ahead log)

pg_xlog directory holds the WAL (Write Ahead Log) files.WAL files contain a record of all changes made to the database.

List tablespaces

\db

list clusters

pg_lsclusters

pgbackrest

# https://pgbackrest.org/user-guide.html#introduction
# Install pgbackrest
apt-get install -y pgbackrest
# pg-primary - Create pgBackRest configuration file and directories
mkdir -p -m 770 /var/log/pgbackrest
chown postgres:postgres /var/log/pgbackrest
mkdir -p /etc/pgbackrest
mkdir -p /etc/pgbackrest/conf.d
touch /etc/pgbackrest/pgbackrest.conf
chmod 640 /etc/pgbackrest/pgbackrest.conf
chown postgres:postgres /etc/pgbackrest/pgbackrest.conf
# Does it work?
sudo -u postgres pgbackrest
# Configure the PostgreSQL cluster data directory
# pg-primary:/etc/pgbackrest/pgbackrest.conf
[billo_dev]
pg1-path=/var/lib/postgresql/12/billo_dev
# On machine taking backups.
# Configure the pgBackRest repository path
# /etc/pgbackrest/pgbackrest.conf
[demo]
pg1-path=/var/lib/postgresql/12/demo
[global]
repo1-path=/var/lib/pgbackrest
# /etc/postgresql/12/main/postgresql.conf 
archive_command = 'pgbackrest --stanza=billo_dev archive-push %p'
archive_mode = on
max_wal_senders = 3
wal_level = replica
# setup backup.
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info stanza-create
# check configuration
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info check
# Run backup
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info backup
# Differential backup
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info --type=diff backup
# Full backup
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info --type=full backup
# Every 10 minutes except for 2:00-2:59
*/10 0-1,3-23 * * * pgbackrest --stanza=billo_dev --type=diff backup
# 02:10 02:20 02:30 02:40 02:50
10-50/10 2 * * *    pgbackrest --stanza=billo_dev --type=diff backup
# 3:00 Full
0 2 * * *           pgbackrest --stanza=billo_dev --type=full backup
# List backup information
sudo -u postgres pgbackrest info 
...
# Restore backup last backup.
sudo -u postgres find /var/lib/postgresql/12/billo_dev -mindepth 1 -delete
# Restore the cluster and start PostgreSQL
sudo -u postgres pgbackrest --stanza=billo_dev restore
sudo pg_ctlcluster 12 restore start
...
# Restore backup from certain time.
sudo -u postgres find /var/lib/postgresql/12/billo_dev -mindepth 1 -delete
# Restore the cluster and start PostgreSQL
sudo -u postgres pgbackrest --stanza=billo_dev --set 20220321-113900F_20220321-122001D restore
sudo pg_ctlcluster 12 restore start

install on remote machine

repository

sudo adduser --disabled-password --gecos "" pgbackrest

setup directories

mkdir -p -m 770 /var/log/pgbackrest
chown pgbackrest:pgbackrest /var/log/pgbackrest
mkdir -p /etc/pgbackrest
mkdir -p /etc/pgbackrest/conf.d
touch /etc/pgbackrest/pgbackrest.conf
chmod 640 /etc/pgbackrest/pgbackrest.conf
chown pgbackrest:pgbackrest /etc/pgbackrest/pgbackrest.conf
mkdir -p /var/lib/pgbackrest
chmod 750 /var/lib/pgbackrest
chown pgbackrest:pgbackrest /var/lib/pgbackrest

passwordless login

sudo -u pgbackrest mkdir -m 750 /home/pgbackrest/.ssh
sudo -u pgbackrest ssh-keygen -f /home/pgbackrest/.ssh/id_rsa -t rsa -b 4096 -N ""

Install on Postgres server

sudo -u postgres mkdir -m 750 -p /var/lib/postgresql/.ssh
sudo -u postgres ssh-keygen -f /var/lib/postgresql/.ssh/id_rsa -t rsa -b 4096 -N ""

ssh keys

server     -> postgres   -> ssh pgbackrest@repository /usr/bin/pgbackrest
no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command="/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }" ssh-rsa $public_ssh_key postgres@server
repository -> pgbackrest -> ssh postgres@server /usr/bin/pgbackrest.
no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command="/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }" ssh-rsa $public_ssh_key pgbackrest@repository

prompt

~/.psqlrc file:
\set PROMPT1 '(%n@%M:%>) %[%033[00;33m%]%`date +%H:%M:%S`%[%033[00m%] [%[%033[01;31m%]%/%[%[%033[00m%]] > '
\set PROMPT2 '%M %n@%/%R %# '

index

Size of index.

SELECT i.indexrelname, x.indexrelid, pg_size_pretty(size)
FROM (SELECT indexrelid, pg_indexes_size(indrelid) AS size
      FROM pg_index) x
JOIN pg_stat_user_indexes i ON i.indexrelid = x.indexrelid
ORDER BY size DESC;