Postgresql: Difference between revisions
(→roles) |
|||
(81 intermediate revisions by the same user not shown) | |||
Line 3: | Line 3: | ||
psql -c "SELECT datname FROM pg_database;" | psql -c "SELECT datname FROM pg_database;" | ||
==Connect to remote database:== | ==Connect to remote database:== | ||
export PGPASSWORD=password && psql --host= | export PGPASSWORD=<password> && psql --host=<host> --port=5432 --username=<username> --no-password postgres | ||
=install psql= | =install psql= | ||
Line 18: | Line 18: | ||
indata_mdb | uc_etl_prod | UTF8 | sv_SE.UTF-8 | sv_SE.UTF-8 | | indata_mdb | uc_etl_prod | UTF8 | sv_SE.UTF-8 | sv_SE.UTF-8 | | ||
... | ... | ||
==Alternative 3== | |||
\l+ | |||
=select database= | =select database= | ||
Line 28: | Line 30: | ||
Or from command line. | Or from command line. | ||
su - postgres -c "psql -c \"\\dt \" database" | su - postgres -c "psql -c \"\\dt \" database" | ||
=list schemas= | |||
\dn | |||
=list schemas with permissions= | |||
\dn+ | |||
=schema/search_path= | =schema/search_path= | ||
List current schema | List current schema | ||
Line 36: | Line 43: | ||
=describe table= | =describe table= | ||
\d table; | \d table; | ||
\d dc_store.document; | |||
=Change view. One line per attribute= | =Change view. One line per attribute(\G in mysql)= | ||
\x on | \x on | ||
psql -x -c "select * from blabla limit 1;" | |||
=exit/quit= | =exit/quit= | ||
\q | \q | ||
Line 44: | Line 54: | ||
=update= | =update= | ||
UPDATE users SET admin = 1 WHERE user_id = 14002; | UPDATE users SET admin = 1 WHERE user_id = 14002; | ||
=Create user= | |||
CREATE USER username WITH PASSWORD 'MYPASS'; | |||
=Create database= | =Create database= | ||
CREATE DATABASE | CREATE DATABASE database OWNER username; | ||
=create table= | |||
CREATE TABLE table ( | |||
code char(5) CONSTRAINT firstkey PRIMARY KEY, | |||
title varchar(40) NOT NULL, | |||
did integer NOT NULL, | |||
date_prod date, | |||
kind varchar(10), | |||
len interval hour to minute | |||
); | |||
=create readaccess role and grant user access= | |||
CREATE ROLE readaccess; | |||
GRANT CONNECT ON DATABASE <database> TO readaccess; | |||
GRANT USAGE ON SCHEMA public TO readaccess; | |||
GRANT SELECT ON ALL TABLES IN SCHEMA public TO readaccess; | |||
CREATE USER <user> WITH PASSWORD '<password>'; | |||
GRANT readaccess TO <user>; | |||
=Delete database= | =Delete database= | ||
DROP DATABASE dbname; | DROP DATABASE dbname; | ||
Line 53: | Line 83: | ||
=Import a database= | =Import a database= | ||
psql username -h hostname -d dbname < dump.sql | psql username -h hostname -d dbname < dump.sql | ||
= | =change password= | ||
ALTER USER user_name WITH PASSWORD 'new_password'; | |||
=Set super privileges= | =Set super privileges= | ||
ALTER USER username WITH SUPERUSER; | ALTER USER username WITH SUPERUSER; | ||
=Deleting user= | =Deleting user= | ||
DROP USER nomusr | DROP USER nomusr | ||
Line 72: | Line 101: | ||
# Backup database. | # Backup database. | ||
DATABASE=very_nice_database ; pg_dump -Fc -Z 2 --file=/tmp/${DATABASE}.$(date '+%Y%m%d').dump ${DATABASE} | DATABASE=very_nice_database ; pg_dump -Fc -Z 2 --file=/tmp/${DATABASE}.$(date '+%Y%m%d').dump ${DATABASE} | ||
# Is database in backup | |||
SELECT pg_is_in_backup(); | |||
=restore= | =restore= | ||
Look at permissions on database | Look at permissions on database | ||
Line 89: | Line 121: | ||
=bookmarks= | =bookmarks= | ||
https://wiki.postgresql.org/wiki/Disk_Usage | https://wiki.postgresql.org/wiki/Disk_Usage | ||
=clear space= | =bloat= | ||
Every record that has been deleted but is still taking some space is called a dead tuple. | |||
Once there is no dependency on those dead tuples with the already running transactions, the dead tuples are no longer needed. | |||
Thus, PostgreSQL runs VACUUM on such Tables. VACUUM reclaims the storage occupied by these dead tuples. | |||
The space occupied by these dead tuples may be referred to as Bloat. | |||
VACUUM scans the pages for dead tuples and marks them to the freespace map (FSM). | |||
Query for bloat(which looks bloated... :-) | |||
SELECT | |||
current_database(), schemaname, tablename, /*reltuples::bigint, relpages::bigint, otta,*/ | |||
ROUND((CASE WHEN otta=0 THEN 0.0 ELSE sml.relpages::FLOAT/otta END)::NUMERIC,1) AS tbloat, | |||
CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::BIGINT END AS wastedbytes, | |||
iname, /*ituples::bigint, ipages::bigint, iotta,*/ | |||
ROUND((CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages::FLOAT/iotta END)::NUMERIC,1) AS ibloat, | |||
CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes | |||
FROM ( | |||
SELECT | |||
schemaname, tablename, cc.reltuples, cc.relpages, bs, | |||
CEIL((cc.reltuples*((datahdr+ma- | |||
(CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::FLOAT)) AS otta, | |||
COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, | |||
COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::FLOAT)),0) AS iotta -- very rough approximation, assumes all cols | |||
FROM ( | |||
SELECT | |||
ma,bs,schemaname,tablename, | |||
(datawidth+(hdr+ma-(CASE WHEN hdr%ma=0 THEN ma ELSE hdr%ma END)))::NUMERIC AS datahdr, | |||
(maxfracsum*(nullhdr+ma-(CASE WHEN nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 | |||
FROM ( | |||
SELECT | |||
schemaname, tablename, hdr, ma, bs, | |||
SUM((1-null_frac)*avg_width) AS datawidth, | |||
MAX(null_frac) AS maxfracsum, | |||
hdr+( | |||
SELECT 1+COUNT(*)/8 | |||
FROM pg_stats s2 | |||
WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename | |||
) AS nullhdr | |||
FROM pg_stats s, ( | |||
SELECT | |||
(SELECT current_setting('block_size')::NUMERIC) AS bs, | |||
CASE WHEN SUBSTRING(v,12,3) IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr, | |||
CASE WHEN v ~ 'mingw32' THEN 8 ELSE 4 END AS ma | |||
FROM (SELECT version() AS v) AS foo | |||
) AS constants | |||
GROUP BY 1,2,3,4,5 | |||
) AS foo | |||
) AS rs | |||
JOIN pg_class cc ON cc.relname = rs.tablename | |||
JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = rs.schemaname AND nn.nspname <> 'information_schema' | |||
LEFT JOIN pg_index i ON indrelid = cc.oid | |||
LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid | |||
) AS sml | |||
ORDER BY wastedbytes DESC | |||
==clear space vacuum== | |||
\c database | \c database | ||
vacuum; | vacuum; | ||
if that doesn't work do: | if that doesn't work do: | ||
vacuum: freeze; or vacuum full; | vacuum: freeze; or vacuum full; | ||
==vacuum table== | |||
Select database | |||
\c ucc | |||
select role | |||
SET search_path TO myschema, kdl; | |||
Vacuum table | |||
vacuum full taxation_initial_data; | |||
=autovacuum= | =autovacuum= | ||
Is autovacuum enabled. | Is autovacuum enabled. | ||
Line 102: | Line 195: | ||
If you don't need an exact count, the current statistic from the catalog table pg_class might be good enough and is much faster to retrieve for big tables. | If you don't need an exact count, the current statistic from the catalog table pg_class might be good enough and is much faster to retrieve for big tables. | ||
SELECT reltuples AS approximate_row_count FROM pg_class WHERE relname = 'table_name'; | SELECT reltuples AS approximate_row_count FROM pg_class WHERE relname = 'table_name'; | ||
= | =number of connections= | ||
su - postgres -c "psql -t -c 'SELECT * FROM pg_stat_activity;'" | wc -l | su - postgres -c "psql -t -c 'SELECT * FROM pg_stat_activity;'" | wc -l | ||
=drop connections= | =drop connections= | ||
SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'dbname' AND pid <> pg_backend_pid(); | SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'dbname' AND pid <> pg_backend_pid(); | ||
=postgres configuration= | |||
postgresql.conf | |||
=pg_hba.conf= | =pg_hba.conf= | ||
Connections are configured in this file. | Connections are configured in this file. | ||
=reread configuration= | |||
su - postgres | |||
/usr/bin/pg_ctl reload | |||
From psql | |||
SELECT pg_reload_conf(); | |||
=list prepared transactions= | =list prepared transactions= | ||
select * from pg_prepared_xacts where database='cti_batch'; | select * from pg_prepared_xacts where database='cti_batch'; | ||
Line 116: | Line 218: | ||
=pager off= | =pager off= | ||
\pset pager off | \pset pager off | ||
=pager less= | |||
\x auto | |||
\pset pager on | |||
\setenv PAGER less | |||
=pager= | =pager= | ||
export PAGER=less | export PAGER=less | ||
Line 122: | Line 229: | ||
.pgpass | .pgpass | ||
hostname:port:database:username:password | hostname:port:database:username:password | ||
=roles= | hostname:port:*:username:password | ||
Which roles exist. | |||
=Permissions in postgres= | |||
pg_hba.conf local file which is a local file that defines which sources for login credentials to use. (ldap, local users... to me it feels like nsswitch.conf) | |||
owner database of database. The right to modify or destroy an object is always the privilege of the owner only. | |||
role:s separate database are like user and groups combined. One role can inherit another. Different attributes gives various capabilities. login, superuser... | |||
privileges database on (table, function...): SELECT , INSERT , UPDATE , DELETE | |||
You can login with one role and then switch to another. | |||
=privileges granted to users on all tables= | |||
<pre> | |||
SELECT | |||
grantee, | |||
table_schema, | |||
table_name, | |||
privilege_type | |||
FROM | |||
information_schema.role_table_grants | |||
ORDER BY | |||
grantee, table_schema, table_name; | |||
</pre> | |||
=Listing Users and Their Membership in Roles= | |||
<pre> | |||
SELECT | |||
r.rolname AS role_name, | |||
m.rolname AS member_name | |||
FROM | |||
pg_auth_members am | |||
JOIN | |||
pg_roles r ON r.oid = am.roleid | |||
JOIN | |||
pg_roles m ON m.oid = am.member | |||
ORDER BY | |||
role_name, member_name; | |||
</pre> | |||
=Listing All Users with Their Database-Level Privileges= | |||
<pre> | |||
SELECT | |||
pg_database.datname, | |||
pg_roles.rolname, | |||
has_database_privilege(pg_roles.rolname, pg_database.datname, 'CONNECT') AS can_connect, | |||
has_database_privilege(pg_roles.rolname, pg_database.datname, 'CREATE') AS can_create, | |||
has_database_privilege(pg_roles.rolname, pg_database.datname, 'TEMP') AS can_create_temp_tables | |||
FROM | |||
pg_database | |||
CROSS JOIN | |||
pg_roles | |||
ORDER BY | |||
pg_database.datname, pg_roles.rolname; | |||
</pre> | |||
=Is user superuser= | |||
<pre> | |||
SELECT | |||
rolname AS role_name, | |||
CASE WHEN rolsuper THEN 'Yes' ELSE 'No' END AS is_superuser | |||
FROM | |||
pg_roles; | |||
</pre> | |||
=roles/users= | |||
Which roles and users exist. | |||
\du | \du | ||
=set new role= | =set new role= | ||
set role super-man; | set role super-man; | ||
Line 139: | Line 306: | ||
List tablespaces | List tablespaces | ||
SELECT spcname FROM pg_tablespace; | SELECT spcname FROM pg_tablespace; | ||
=how many rows in table= | |||
SELECT reltuples AS estimate FROM pg_class where relname = 'member_offer'; | |||
=delete rows matching value and timestamp= | |||
DELETE FROM receipt_transaction WHERE status_point = 'FAIL_POINT_CALC' and business_date < ( now() - interval '+14 day' ) ; | |||
=user password= | |||
~/.pgpass | |||
hostname:port:database:username:password | |||
=kill= | |||
Kill less agressive | |||
select pg_cancel_backend(16967); | |||
kill | |||
select pg_terminate_backend(16967) from pg_stat_activity; | |||
=who are you and where do you come from, whoami= | |||
SELECT CURRENT_USER usr, :'HOST' host, inet_server_port() port; | |||
or | |||
\conninfo | |||
=running queries on standby= | |||
max_standby_archive_delay = 600s | |||
max_standby_streaming_delay = 600s | |||
=analyze query= | |||
EXPLAIN ANALYZE select ... | |||
=version= | |||
SELECT version(); | |||
=master/slave setup= | |||
Streaming replication in PostgreSQL works on log shipping. Every transaction in postgres is written to a transaction log called WAL (write-ahead log) to achieve durability. A slave uses these WAL segments to continuously replicate changes from its master. | |||
There exists three mandatory processes – wal sender , wal receiver and startup process, these play a major role in achieving streaming replication in postgres. | |||
Log Sequence Number, or LSN, is a pointer to a location in the WAL. | |||
=wal(write ahead log)= | |||
pg_xlog directory holds the WAL (Write Ahead Log) files.WAL files contain a record of all changes made to the database. | |||
=Is replication moving= | |||
This LSN(Log Sequence Number) indicates the position in the WAL(Write-Ahead Log) up to which changes have been safely flushed and confirmed by subscribers or replication processes. | |||
SELECT slot_name, plugin, confirmed_flush_lsn, slot_type, restart_lsn FROM pg_replication_slots; | |||
=how much space is used by wal= | |||
SELECT slot_name, pg_size_pretty(pg_wal_lsn_diff(pg_current_wal_lsn(),restart_lsn)) AS replicationSlotLag, active FROM pg_replication_slots ; | |||
=List tablespaces= | |||
\db | |||
=list clusters= | |||
pg_lsclusters | |||
=pgbackrest= | |||
# https://pgbackrest.org/user-guide.html#introduction | |||
# Install pgbackrest | |||
apt-get install -y pgbackrest | |||
# pg-primary - Create pgBackRest configuration file and directories | |||
mkdir -p -m 770 /var/log/pgbackrest | |||
chown postgres:postgres /var/log/pgbackrest | |||
mkdir -p /etc/pgbackrest | |||
mkdir -p /etc/pgbackrest/conf.d | |||
touch /etc/pgbackrest/pgbackrest.conf | |||
chmod 640 /etc/pgbackrest/pgbackrest.conf | |||
chown postgres:postgres /etc/pgbackrest/pgbackrest.conf | |||
# Does it work? | |||
sudo -u postgres pgbackrest | |||
# Configure the PostgreSQL cluster data directory | |||
# pg-primary:/etc/pgbackrest/pgbackrest.conf | |||
[billo_dev] | |||
pg1-path=/var/lib/postgresql/12/billo_dev | |||
# On machine taking backups. | |||
# Configure the pgBackRest repository path | |||
# /etc/pgbackrest/pgbackrest.conf | |||
[demo] | |||
pg1-path=/var/lib/postgresql/12/demo | |||
[global] | |||
repo1-path=/var/lib/pgbackrest | |||
# /etc/postgresql/12/main/postgresql.conf | |||
archive_command = 'pgbackrest --stanza=billo_dev archive-push %p' | |||
archive_mode = on | |||
max_wal_senders = 3 | |||
wal_level = replica | |||
# setup backup. | |||
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info stanza-create | |||
# check configuration | |||
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info check | |||
# Run backup | |||
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info backup | |||
# Differential backup | |||
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info --type=diff backup | |||
# Full backup | |||
sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info --type=full backup | |||
# Every 10 minutes except for 2:00-2:59 | |||
*/10 0-1,3-23 * * * pgbackrest --stanza=billo_dev --type=diff backup | |||
# 02:10 02:20 02:30 02:40 02:50 | |||
10-50/10 2 * * * pgbackrest --stanza=billo_dev --type=diff backup | |||
# 3:00 Full | |||
0 2 * * * pgbackrest --stanza=billo_dev --type=full backup | |||
# List backup information | |||
sudo -u postgres pgbackrest info | |||
... | |||
# Restore backup last backup. | |||
sudo -u postgres find /var/lib/postgresql/12/billo_dev -mindepth 1 -delete | |||
# Restore the cluster and start PostgreSQL | |||
sudo -u postgres pgbackrest --stanza=billo_dev restore | |||
sudo pg_ctlcluster 12 restore start | |||
... | |||
# Restore backup from certain time. | |||
sudo -u postgres find /var/lib/postgresql/12/billo_dev -mindepth 1 -delete | |||
# Restore the cluster and start PostgreSQL | |||
sudo -u postgres pgbackrest --stanza=billo_dev --set 20220321-113900F_20220321-122001D restore | |||
sudo pg_ctlcluster 12 restore start | |||
==install on remote machine== | |||
===repository=== | |||
sudo adduser --disabled-password --gecos "" pgbackrest | |||
===setup directories=== | |||
mkdir -p -m 770 /var/log/pgbackrest | |||
chown pgbackrest:pgbackrest /var/log/pgbackrest | |||
mkdir -p /etc/pgbackrest | |||
mkdir -p /etc/pgbackrest/conf.d | |||
touch /etc/pgbackrest/pgbackrest.conf | |||
chmod 640 /etc/pgbackrest/pgbackrest.conf | |||
chown pgbackrest:pgbackrest /etc/pgbackrest/pgbackrest.conf | |||
mkdir -p /var/lib/pgbackrest | |||
chmod 750 /var/lib/pgbackrest | |||
chown pgbackrest:pgbackrest /var/lib/pgbackrest | |||
===passwordless login=== | |||
sudo -u pgbackrest mkdir -m 750 /home/pgbackrest/.ssh | |||
sudo -u pgbackrest ssh-keygen -f /home/pgbackrest/.ssh/id_rsa -t rsa -b 4096 -N "" | |||
==Install on Postgres server== | |||
sudo -u postgres mkdir -m 750 -p /var/lib/postgresql/.ssh | |||
sudo -u postgres ssh-keygen -f /var/lib/postgresql/.ssh/id_rsa -t rsa -b 4096 -N "" | |||
===ssh keys=== | |||
server -> postgres -> ssh pgbackrest@repository /usr/bin/pgbackrest | |||
no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command="/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }" ssh-rsa $public_ssh_key postgres@server | |||
repository -> pgbackrest -> ssh postgres@server /usr/bin/pgbackrest. | |||
no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command="/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }" ssh-rsa $public_ssh_key pgbackrest@repository | |||
=prompt= | |||
~/.psqlrc file: | |||
\set PROMPT1 '(%n@%M:%>) %[%033[00;33m%]%`date +%H:%M:%S`%[%033[00m%] [%[%033[01;31m%]%/%[%[%033[00m%]] > ' | |||
\set PROMPT2 '%M %n@%/%R %# ' | |||
=index= | |||
Size of index. | |||
SELECT i.indexrelname, x.indexrelid, pg_size_pretty(size) | |||
FROM (SELECT indexrelid, pg_indexes_size(indrelid) AS size | |||
FROM pg_index) x | |||
JOIN pg_stat_user_indexes i ON i.indexrelid = x.indexrelid | |||
ORDER BY size DESC; | |||
=Privilege Abbreviation= | |||
Privilege Abbreviation Applicable Object Types | |||
SELECT r (“read”) LARGE OBJECT, SEQUENCE, TABLE (and table-like objects), table column | |||
INSERT a (“append”) TABLE, table column | |||
UPDATE w (“write”) LARGE OBJECT, SEQUENCE, TABLE, table column | |||
DELETE d TABLE | |||
TRUNCATE D TABLE | |||
REFERENCES x TABLE, table column | |||
TRIGGER t TABLE | |||
CREATE C DATABASE, SCHEMA, TABLESPACE | |||
CONNECT c DATABASE | |||
TEMPORARY T DATABASE | |||
EXECUTE X FUNCTION, PROCEDURE | |||
USAGE U DOMAIN, FOREIGN DATA WRAPPER, FOREIGN SERVER, LANGUAGE, SCHEMA, SEQUENCE, TYPE | |||
SET s PARAMETER | |||
ALTER SYSTEM A PARAMETER | |||
=which codepage is being used on database= | |||
SELECT datname, pg_encoding_to_char(encoding) AS encoding FROM pg_database; | |||
=install postgres fedora(40)= | |||
sudo dnf install postgresql-server postgresql-contrib | |||
sudo systemctl enable postgresql | |||
sudo postgresql-setup --initdb --unit postgresql | |||
sudo systemctl start postgresql | |||
sudo vim /var/lib/pgsql/data/pg_hba.conf | |||
local all all md5 | |||
sudo service postgresql restart | |||
CREATE USER username WITH PASSWORD 'password'; | |||
CREATE DATABASE database OWNER username; | |||
CREATE TABLE testtable ( | |||
code char(5) CONSTRAINT firstkey PRIMARY KEY, | |||
title varchar(40) NOT NULL, | |||
did integer NOT NULL, | |||
date_prod date, | |||
kind varchar(10), | |||
len interval hour to minute | |||
); | |||
GRANT SELECT ON TABLE testtable TO username; | |||
=uptime= | |||
select current_timestamp - pg_postmaster_start_time() as uptime; | |||
=frequently running query= | |||
with | |||
a as (select dbid, queryid, query, calls s from pg_stat_statements), | |||
b as (select dbid, queryid, query, calls s from pg_stat_statements, pg_sleep(1)) | |||
select | |||
pd.datname as db_name, | |||
substr(a.query, 1, 400) as the_query, | |||
sum(b.s-a.s) as runs_per_second | |||
from a, b, pg_database pd | |||
where | |||
a.dbid= b.dbid | |||
and | |||
a.queryid = b.queryid | |||
and | |||
pd.oid=a.dbid | |||
group by 1, 2 | |||
order by 3 desc; | |||
=sessions older than 1 second= | |||
select | |||
now()-query_start as runtime, | |||
pid as process_id, | |||
datname as db_name, | |||
client_addr, | |||
client_hostname, | |||
query | |||
from pg_stat_activity | |||
where state!='idle' | |||
and now() - query_start > '1 seconds'::interval | |||
order by 1 desc; | |||
=cpu usage per query= | |||
SELECT | |||
pss.userid, | |||
pss.dbid, | |||
pd.datname as db_name, | |||
round((pss.total_exec_time + pss.total_plan_time)::numeric, 2) as total_time, | |||
pss.calls, | |||
round((pss.mean_exec_time+pss.mean_plan_time)::numeric, 2) as mean, | |||
round((100 * (pss.total_exec_time + pss.total_plan_time) / sum((pss.total_exec_time + pss.total_plan_time)::numeric) OVER ())::numeric, 2) as cpu_portion_pctg, | |||
pss.query | |||
FROM pg_stat_statements pss, pg_database pd | |||
WHERE pd.oid=pss.dbid | |||
ORDER BY (pss.total_exec_time + pss.total_plan_time) | |||
DESC LIMIT 30; | |||
=performance troubleshooting= | |||
https://jfrog.com/community/data-science/troubleshooting-high-cpu-utilization-in-postgresql-databases-a-how-to-guide/ |
Latest revision as of 13:44, 24 October 2024
psql
run command from cli
psql -c "SELECT datname FROM pg_database;"
Connect to remote database:
export PGPASSWORD=<password> && psql --host=<host> --port=5432 --username=<username> --no-password postgres
install psql
apt-get install freetds-bin
List databases
Alternative 1
SELECT datname FROM pg_database;
Alternative 2
gives more information too.
\l Name | Owner | Encoding | Collate | Ctype | Access privileges -------------+-------------+----------+-------------+-------------+----------------------- indata_mdb | uc_etl_prod | UTF8 | sv_SE.UTF-8 | sv_SE.UTF-8 | ...
Alternative 3
\l+
select database
\c testdb;
show tables
List all tables in all schemas
\dt *.*
list tables in current schema
\dt
Or from command line.
su - postgres -c "psql -c \"\\dt \" database"
list schemas
\dn
list schemas with permissions
\dn+
schema/search_path
List current schema
SHOW search_path;
Change search path.
SET search_path TO myschema, public;
describe table
\d table; \d dc_store.document;
Change view. One line per attribute(\G in mysql)
\x on psql -x -c "select * from blabla limit 1;"
exit/quit
\q
update
UPDATE users SET admin = 1 WHERE user_id = 14002;
Create user
CREATE USER username WITH PASSWORD 'MYPASS';
Create database
CREATE DATABASE database OWNER username;
create table
CREATE TABLE table ( code char(5) CONSTRAINT firstkey PRIMARY KEY, title varchar(40) NOT NULL, did integer NOT NULL, date_prod date, kind varchar(10), len interval hour to minute );
create readaccess role and grant user access
CREATE ROLE readaccess; GRANT CONNECT ON DATABASE <database> TO readaccess; GRANT USAGE ON SCHEMA public TO readaccess; GRANT SELECT ON ALL TABLES IN SCHEMA public TO readaccess; CREATE USER <user> WITH PASSWORD '<password>'; GRANT readaccess TO <user>;
Delete database
DROP DATABASE dbname;
delete database from cli
dropdb $dbname
Import a database
psql username -h hostname -d dbname < dump.sql
change password
ALTER USER user_name WITH PASSWORD 'new_password';
Set super privileges
ALTER USER username WITH SUPERUSER;
Deleting user
DROP USER nomusr
Getting help
\? or \h
size of database
SELECT pg_size_pretty( pg_database_size('database') );
size of table
SELECT pg_size_pretty( pg_total_relation_size('tablename') );
clean archives older than 2 days
su postgres -c "/usr/pgsql-9.5/bin/pg_archivecleanup /var/lib/pgsql/9.5/archivedir/ $(basename $(find /var/lib/pgsql/9.5/archivedir/ -ctime +2 | tail -n 1))"
backup
- Backup database.
DATABASE=very_nice_database ; pg_dump -Fc -Z 2 --file=/tmp/${DATABASE}.$(date '+%Y%m%d').dump ${DATABASE}
- Is database in backup
SELECT pg_is_in_backup();
restore
Look at permissions on database
psql -c "\l" export DATABASE=very_nice_database
Drop database connectipons.
psql -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '${DATABASE}' AND pid <> pg_backend_pid();"
Drop database.
dropdb ${DATABASE}
Create database
createdb -T template0 ${DATABASE}
Restore database
pg_restore -j8 -d ${DATABASE} --no-privileges --no-owner --clean --if-exists --exit-on-error /tmp/${DATABASE}.$(date '+%Y%m%d' --date "-4 days").dump
- Set correct permissions.
psql -c "ALTER DATABASE ${DATABASE} OWNER TO ${OWNER};"
bookmarks
https://wiki.postgresql.org/wiki/Disk_Usage
bloat
Every record that has been deleted but is still taking some space is called a dead tuple. Once there is no dependency on those dead tuples with the already running transactions, the dead tuples are no longer needed. Thus, PostgreSQL runs VACUUM on such Tables. VACUUM reclaims the storage occupied by these dead tuples. The space occupied by these dead tuples may be referred to as Bloat. VACUUM scans the pages for dead tuples and marks them to the freespace map (FSM).
Query for bloat(which looks bloated... :-)
SELECT current_database(), schemaname, tablename, /*reltuples::bigint, relpages::bigint, otta,*/ ROUND((CASE WHEN otta=0 THEN 0.0 ELSE sml.relpages::FLOAT/otta END)::NUMERIC,1) AS tbloat, CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::BIGINT END AS wastedbytes, iname, /*ituples::bigint, ipages::bigint, iotta,*/ ROUND((CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages::FLOAT/iotta END)::NUMERIC,1) AS ibloat, CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes FROM ( SELECT schemaname, tablename, cc.reltuples, cc.relpages, bs, CEIL((cc.reltuples*((datahdr+ma- (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::FLOAT)) AS otta, COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::FLOAT)),0) AS iotta -- very rough approximation, assumes all cols FROM ( SELECT ma,bs,schemaname,tablename, (datawidth+(hdr+ma-(CASE WHEN hdr%ma=0 THEN ma ELSE hdr%ma END)))::NUMERIC AS datahdr, (maxfracsum*(nullhdr+ma-(CASE WHEN nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 FROM ( SELECT schemaname, tablename, hdr, ma, bs, SUM((1-null_frac)*avg_width) AS datawidth, MAX(null_frac) AS maxfracsum, hdr+( SELECT 1+COUNT(*)/8 FROM pg_stats s2 WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename ) AS nullhdr FROM pg_stats s, ( SELECT (SELECT current_setting('block_size')::NUMERIC) AS bs, CASE WHEN SUBSTRING(v,12,3) IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr, CASE WHEN v ~ 'mingw32' THEN 8 ELSE 4 END AS ma FROM (SELECT version() AS v) AS foo ) AS constants GROUP BY 1,2,3,4,5 ) AS foo ) AS rs JOIN pg_class cc ON cc.relname = rs.tablename JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = rs.schemaname AND nn.nspname <> 'information_schema' LEFT JOIN pg_index i ON indrelid = cc.oid LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid ) AS sml ORDER BY wastedbytes DESC
clear space vacuum
\c database vacuum;
if that doesn't work do:
vacuum: freeze; or vacuum full;
vacuum table
Select database
\c ucc
select role
SET search_path TO myschema, kdl;
Vacuum table
vacuum full taxation_initial_data;
autovacuum
Is autovacuum enabled.
SHOW autovacuum;
count table
SELECT COUNT(*) FROM TABLE_NAME;
If you don't need an exact count, the current statistic from the catalog table pg_class might be good enough and is much faster to retrieve for big tables.
SELECT reltuples AS approximate_row_count FROM pg_class WHERE relname = 'table_name';
number of connections
su - postgres -c "psql -t -c 'SELECT * FROM pg_stat_activity;'" | wc -l
drop connections
SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'dbname' AND pid <> pg_backend_pid();
postgres configuration
postgresql.conf
pg_hba.conf
Connections are configured in this file.
reread configuration
su - postgres /usr/bin/pg_ctl reload
From psql
SELECT pg_reload_conf();
list prepared transactions
select * from pg_prepared_xacts where database='cti_batch';
remove prepared connecion
ROLLBACK PREPARED '131077_AAAAAAAAAAAAAP//rB6PFn41NfBevSfGABG7nTE=_AAAAAAAAAAAAAP//rB6PFn41NfBevSfGABG7pAAAAAIAAAAA';
pager off
\pset pager off
pager less
\x auto \pset pager on \setenv PAGER less
pager
export PAGER=less
config file
.my.cnf
.pgpass hostname:port:database:username:password hostname:port:*:username:password
Permissions in postgres
pg_hba.conf local file which is a local file that defines which sources for login credentials to use. (ldap, local users... to me it feels like nsswitch.conf) owner database of database. The right to modify or destroy an object is always the privilege of the owner only. role:s separate database are like user and groups combined. One role can inherit another. Different attributes gives various capabilities. login, superuser... privileges database on (table, function...): SELECT , INSERT , UPDATE , DELETE You can login with one role and then switch to another.
privileges granted to users on all tables
SELECT grantee, table_schema, table_name, privilege_type FROM information_schema.role_table_grants ORDER BY grantee, table_schema, table_name;
Listing Users and Their Membership in Roles
SELECT r.rolname AS role_name, m.rolname AS member_name FROM pg_auth_members am JOIN pg_roles r ON r.oid = am.roleid JOIN pg_roles m ON m.oid = am.member ORDER BY role_name, member_name;
Listing All Users with Their Database-Level Privileges
SELECT pg_database.datname, pg_roles.rolname, has_database_privilege(pg_roles.rolname, pg_database.datname, 'CONNECT') AS can_connect, has_database_privilege(pg_roles.rolname, pg_database.datname, 'CREATE') AS can_create, has_database_privilege(pg_roles.rolname, pg_database.datname, 'TEMP') AS can_create_temp_tables FROM pg_database CROSS JOIN pg_roles ORDER BY pg_database.datname, pg_roles.rolname;
Is user superuser
SELECT rolname AS role_name, CASE WHEN rolsuper THEN 'Yes' ELSE 'No' END AS is_superuser FROM pg_roles;
roles/users
Which roles and users exist.
\du
set new role
set role super-man;
current role
user name of current execution context
SELECT current_user;
session user
SELECT session_user;
display privileges
\dp \z
tablespaces
List tablespaces
SELECT spcname FROM pg_tablespace;
how many rows in table
SELECT reltuples AS estimate FROM pg_class where relname = 'member_offer';
delete rows matching value and timestamp
DELETE FROM receipt_transaction WHERE status_point = 'FAIL_POINT_CALC' and business_date < ( now() - interval '+14 day' ) ;
user password
~/.pgpass hostname:port:database:username:password
kill
Kill less agressive
select pg_cancel_backend(16967);
kill
select pg_terminate_backend(16967) from pg_stat_activity;
who are you and where do you come from, whoami
SELECT CURRENT_USER usr, :'HOST' host, inet_server_port() port;
or
\conninfo
running queries on standby
max_standby_archive_delay = 600s max_standby_streaming_delay = 600s
analyze query
EXPLAIN ANALYZE select ...
version
SELECT version();
master/slave setup
Streaming replication in PostgreSQL works on log shipping. Every transaction in postgres is written to a transaction log called WAL (write-ahead log) to achieve durability. A slave uses these WAL segments to continuously replicate changes from its master. There exists three mandatory processes – wal sender , wal receiver and startup process, these play a major role in achieving streaming replication in postgres. Log Sequence Number, or LSN, is a pointer to a location in the WAL.
wal(write ahead log)
pg_xlog directory holds the WAL (Write Ahead Log) files.WAL files contain a record of all changes made to the database.
Is replication moving
This LSN(Log Sequence Number) indicates the position in the WAL(Write-Ahead Log) up to which changes have been safely flushed and confirmed by subscribers or replication processes.
SELECT slot_name, plugin, confirmed_flush_lsn, slot_type, restart_lsn FROM pg_replication_slots;
how much space is used by wal
SELECT slot_name, pg_size_pretty(pg_wal_lsn_diff(pg_current_wal_lsn(),restart_lsn)) AS replicationSlotLag, active FROM pg_replication_slots ;
List tablespaces
\db
list clusters
pg_lsclusters
pgbackrest
# https://pgbackrest.org/user-guide.html#introduction # Install pgbackrest apt-get install -y pgbackrest # pg-primary - Create pgBackRest configuration file and directories mkdir -p -m 770 /var/log/pgbackrest chown postgres:postgres /var/log/pgbackrest mkdir -p /etc/pgbackrest mkdir -p /etc/pgbackrest/conf.d touch /etc/pgbackrest/pgbackrest.conf chmod 640 /etc/pgbackrest/pgbackrest.conf chown postgres:postgres /etc/pgbackrest/pgbackrest.conf # Does it work? sudo -u postgres pgbackrest # Configure the PostgreSQL cluster data directory # pg-primary:/etc/pgbackrest/pgbackrest.conf [billo_dev] pg1-path=/var/lib/postgresql/12/billo_dev # On machine taking backups. # Configure the pgBackRest repository path # /etc/pgbackrest/pgbackrest.conf [demo] pg1-path=/var/lib/postgresql/12/demo [global] repo1-path=/var/lib/pgbackrest # /etc/postgresql/12/main/postgresql.conf archive_command = 'pgbackrest --stanza=billo_dev archive-push %p' archive_mode = on max_wal_senders = 3 wal_level = replica # setup backup. sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info stanza-create # check configuration sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info check # Run backup sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info backup # Differential backup sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info --type=diff backup # Full backup sudo -u postgres pgbackrest --stanza=billo_dev --log-level-console=info --type=full backup # Every 10 minutes except for 2:00-2:59 */10 0-1,3-23 * * * pgbackrest --stanza=billo_dev --type=diff backup # 02:10 02:20 02:30 02:40 02:50 10-50/10 2 * * * pgbackrest --stanza=billo_dev --type=diff backup # 3:00 Full 0 2 * * * pgbackrest --stanza=billo_dev --type=full backup # List backup information sudo -u postgres pgbackrest info ... # Restore backup last backup. sudo -u postgres find /var/lib/postgresql/12/billo_dev -mindepth 1 -delete # Restore the cluster and start PostgreSQL sudo -u postgres pgbackrest --stanza=billo_dev restore sudo pg_ctlcluster 12 restore start ... # Restore backup from certain time. sudo -u postgres find /var/lib/postgresql/12/billo_dev -mindepth 1 -delete # Restore the cluster and start PostgreSQL sudo -u postgres pgbackrest --stanza=billo_dev --set 20220321-113900F_20220321-122001D restore sudo pg_ctlcluster 12 restore start
install on remote machine
repository
sudo adduser --disabled-password --gecos "" pgbackrest
setup directories
mkdir -p -m 770 /var/log/pgbackrest chown pgbackrest:pgbackrest /var/log/pgbackrest mkdir -p /etc/pgbackrest mkdir -p /etc/pgbackrest/conf.d touch /etc/pgbackrest/pgbackrest.conf chmod 640 /etc/pgbackrest/pgbackrest.conf chown pgbackrest:pgbackrest /etc/pgbackrest/pgbackrest.conf mkdir -p /var/lib/pgbackrest chmod 750 /var/lib/pgbackrest chown pgbackrest:pgbackrest /var/lib/pgbackrest
passwordless login
sudo -u pgbackrest mkdir -m 750 /home/pgbackrest/.ssh sudo -u pgbackrest ssh-keygen -f /home/pgbackrest/.ssh/id_rsa -t rsa -b 4096 -N ""
Install on Postgres server
sudo -u postgres mkdir -m 750 -p /var/lib/postgresql/.ssh sudo -u postgres ssh-keygen -f /var/lib/postgresql/.ssh/id_rsa -t rsa -b 4096 -N ""
ssh keys
server -> postgres -> ssh pgbackrest@repository /usr/bin/pgbackrest no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command="/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }" ssh-rsa $public_ssh_key postgres@server
repository -> pgbackrest -> ssh postgres@server /usr/bin/pgbackrest. no-agent-forwarding,no-X11-forwarding,no-port-forwarding,command="/usr/bin/pgbackrest ${SSH_ORIGINAL_COMMAND#* }" ssh-rsa $public_ssh_key pgbackrest@repository
prompt
~/.psqlrc file: \set PROMPT1 '(%n@%M:%>) %[%033[00;33m%]%`date +%H:%M:%S`%[%033[00m%] [%[%033[01;31m%]%/%[%[%033[00m%]] > ' \set PROMPT2 '%M %n@%/%R %# '
index
Size of index.
SELECT i.indexrelname, x.indexrelid, pg_size_pretty(size) FROM (SELECT indexrelid, pg_indexes_size(indrelid) AS size FROM pg_index) x JOIN pg_stat_user_indexes i ON i.indexrelid = x.indexrelid ORDER BY size DESC;
Privilege Abbreviation
Privilege Abbreviation Applicable Object Types SELECT r (“read”) LARGE OBJECT, SEQUENCE, TABLE (and table-like objects), table column INSERT a (“append”) TABLE, table column UPDATE w (“write”) LARGE OBJECT, SEQUENCE, TABLE, table column DELETE d TABLE TRUNCATE D TABLE REFERENCES x TABLE, table column TRIGGER t TABLE CREATE C DATABASE, SCHEMA, TABLESPACE CONNECT c DATABASE TEMPORARY T DATABASE EXECUTE X FUNCTION, PROCEDURE USAGE U DOMAIN, FOREIGN DATA WRAPPER, FOREIGN SERVER, LANGUAGE, SCHEMA, SEQUENCE, TYPE SET s PARAMETER ALTER SYSTEM A PARAMETER
which codepage is being used on database
SELECT datname, pg_encoding_to_char(encoding) AS encoding FROM pg_database;
install postgres fedora(40)
sudo dnf install postgresql-server postgresql-contrib sudo systemctl enable postgresql sudo postgresql-setup --initdb --unit postgresql sudo systemctl start postgresql sudo vim /var/lib/pgsql/data/pg_hba.conf local all all md5 sudo service postgresql restart CREATE USER username WITH PASSWORD 'password'; CREATE DATABASE database OWNER username; CREATE TABLE testtable ( code char(5) CONSTRAINT firstkey PRIMARY KEY, title varchar(40) NOT NULL, did integer NOT NULL, date_prod date, kind varchar(10), len interval hour to minute ); GRANT SELECT ON TABLE testtable TO username;
uptime
select current_timestamp - pg_postmaster_start_time() as uptime;
frequently running query
with a as (select dbid, queryid, query, calls s from pg_stat_statements), b as (select dbid, queryid, query, calls s from pg_stat_statements, pg_sleep(1)) select pd.datname as db_name, substr(a.query, 1, 400) as the_query, sum(b.s-a.s) as runs_per_second from a, b, pg_database pd where a.dbid= b.dbid and a.queryid = b.queryid and pd.oid=a.dbid group by 1, 2 order by 3 desc;
sessions older than 1 second
select now()-query_start as runtime, pid as process_id, datname as db_name, client_addr, client_hostname, query from pg_stat_activity where state!='idle' and now() - query_start > '1 seconds'::interval order by 1 desc;
cpu usage per query
SELECT pss.userid, pss.dbid, pd.datname as db_name, round((pss.total_exec_time + pss.total_plan_time)::numeric, 2) as total_time, pss.calls, round((pss.mean_exec_time+pss.mean_plan_time)::numeric, 2) as mean, round((100 * (pss.total_exec_time + pss.total_plan_time) / sum((pss.total_exec_time + pss.total_plan_time)::numeric) OVER ())::numeric, 2) as cpu_portion_pctg, pss.query FROM pg_stat_statements pss, pg_database pd WHERE pd.oid=pss.dbid ORDER BY (pss.total_exec_time + pss.total_plan_time) DESC LIMIT 30;
performance troubleshooting
https://jfrog.com/community/data-science/troubleshooting-high-cpu-utilization-in-postgresql-databases-a-how-to-guide/