-
-
Save FrankApiyo/632a9f7fcd84bc1bd153765794ab8859 to your computer and use it in GitHub Desktop.
PostgreSQL utils
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
-- v5 | |
----------------------------------------------------------- basic instance info | |
-- show db version | |
SELECT version(); | |
-- uptime | |
SELECT pg_postmaster_start_time(); | |
-- show connections | |
SELECT * FROM pg_stat_activity; | |
-- show users | |
SELECT u.usename AS "Role name", CASE WHEN u.usesuper AND u.usecreatedb THEN CAST('superuser, create database' AS pg_catalog.text) WHEN u.usesuper THEN CAST('superuser' AS pg_catalog.text) WHEN u.usecreatedb THEN CAST('create database' as pg_catalog.text) ELSE CAST('' AS pg_catalog.text) END AS "Attributes" FROM pg_catalog.pg_user u ORDER BY 1; | |
----------------------------------------------------------- extensions | |
-- show active extensions | |
SELECT * FROM pg_catalog.pg_extension; | |
-- show available extensions | |
SELECT * FROM pg_catalog.pg_available_extensions; | |
-- enable extension | |
CREATE EXTENSION xy; | |
----------------------------------------------------------- db & table(s) info | |
-- table opts | |
SELECT reloptions FROM pg_class WHERE relname='table'; | |
-- db sizes | |
SELECT datname, pg_size_pretty(pg_database_size(datname)) FROM pg_database ORDER BY pg_database_size(datname) DESC; | |
-- table sizes | |
SELECT nspname || '.' || relname AS "relation", pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size" | |
FROM pg_class C | |
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) | |
WHERE nspname NOT IN ('pg_catalog', 'information_schema') AND C.relkind <> 'i' AND nspname !~ '^pg_toast' | |
ORDER BY pg_total_relation_size(C.oid) DESC; | |
-- Show Table Bloats | |
----------------------------- | |
-- You should be looking at: | |
-- table_bloat: table bloat, ratio between what it current is, and what it can be optimized to. | |
-- wasted_bytes: number of bytes wasted | |
-- index_bloat & wasted_index_bytes: same as above, but for indexes. | |
-- When you see a table with high bloats, then consider running VACUUM ANALYZE on it. | |
WITH | |
foo as (select schemaname, tablename, hdr, ma, bs, SUM((1-null_frac)*avg_width) AS datawidth, MAX(null_frac) AS maxfracsum, hdr + (SELECT 1+COUNT(*)/8 FROM pg_stats s2 WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename ) AS nullhdr FROM pg_stats s, (select (SELECT current_setting('block_size')::NUMERIC) AS bs, CASE WHEN SUBSTRING(v,12,3) IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr, CASE WHEN v ~ 'mingw32' THEN 8 ELSE 4 END AS ma FROM (SELECT version() AS v) AS foo) AS constants GROUP BY 1,2,3,4,5), | |
rs as (select ma,bs,schemaname,tablename, (datawidth+(hdr+ma-(CASE WHEN hdr%ma=0 THEN ma ELSE hdr%ma END)))::NUMERIC AS datahdr, (maxfracsum*(nullhdr+ma-(CASE WHEN nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 FROM foo), | |
sml as (select schemaname, tablename, cc.reltuples, cc.relpages, bs, CEIL((cc.reltuples*((datahdr+ma-(CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::FLOAT)) AS otta, COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::FLOAT)),0) AS iotta FROM rs JOIN pg_class cc ON cc.relname = rs.tablename JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = rs.schemaname AND nn.nspname <> 'information_schema' LEFT JOIN pg_index i ON indrelid = cc.oid LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid) | |
SELECT current_database() as db, schemaname as schema, tablename as table, ROUND((CASE WHEN otta=0 THEN 0.0 ELSE sml.relpages::FLOAT/otta END)::NUMERIC,1) AS table_bloat, pg_size_pretty((CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::BIGINT end)::bigint) AS wasted_mb, pg_size_pretty((CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END)::bigint) AS wasted_index_mb, iname as index_name, ROUND((CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages::FLOAT/iotta END)::NUMERIC,1) AS index_bloat, CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::BIGINT END AS wasted_bytes, CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wasted_index_bytes | |
FROM sml ORDER BY wasted_bytes desc; | |
-- Cache Hit Ratio | |
select sum(blks_hit)*100/sum(blks_hit+blks_read) as hit_ratio from pg_stat_database; | |
-- hot rate, should be about 100 | |
select s.relname, pg_size_pretty(pg_relation_size(relid)), coalesce(n_tup_ins,0) + 2 * coalesce(n_tup_upd,0) - coalesce(n_tup_hot_upd,0) + coalesce(n_tup_del,0) AS total_writes, (coalesce(n_tup_hot_upd,0)::float * 100 / (case when n_tup_upd > 0 then n_tup_upd else 1 end)::float)::numeric(10,2) AS hot_rate, (select v[1] FROM regexp_matches(reloptions::text,E'fillfactor=(d+)') as r(v) limit 1) AS fillfactor from pg_stat_all_tables s join pg_class c ON c.oid=relid order by total_writes desc limit 50; | |
----------------------------------------------------------- running queries | |
-- queries using locks | |
SELECT pid, usename, pg_blocking_pids(pid) as blocked_by, query as blocked_query | |
FROM pg_stat_activity | |
WHERE cardinality(pg_blocking_pids(pid)) > 0; | |
-- show running queries | |
SELECT pid, age(clock_timestamp(), query_start), usename, query, state | |
FROM pg_stat_activity | |
WHERE state != 'idle' AND query NOT ILIKE '%pg_stat_activity%' | |
ORDER BY query_start desc; | |
-- rows & dead rows | |
SELECT relname as table_name, n_live_tup rows_in_table, n_dead_tup dead_rows | |
FROM pg_stat_user_tables | |
ORDER BY n_live_tup DESC; | |
-- rows & dead rows by dead_rows | |
SELECT relname as table_name, n_live_tup rows_in_table, n_dead_tup dead_rows | |
FROM pg_stat_user_tables | |
ORDER BY n_dead_tup DESC; | |
-- kill query | |
SELECT pg_cancel_backend(PID); | |
-- kill idle query | |
SELECT pg_terminate_backend(PID); | |
----------------------------------------------------------- indexes info, dead rows | |
-- Does table needs an Index | |
SELECT relname, seq_scan-idx_scan AS too_much_seq, CASE WHEN seq_scan-idx_scan>0 THEN 'Missing Index?' ELSE 'OK' END, pg_relation_size(relname::regclass) AS rel_size, seq_scan, idx_scan FROM pg_stat_all_tables | |
WHERE schemaname='public' AND pg_relation_size(relname::regclass)>80000 ORDER BY too_much_seq DESC; | |
-- check for missing indexes for tables using foreign key (https://www.cybertec-postgresql.com/en/index-your-foreign-key/) | |
SELECT c.conrelid::regclass AS "table", string_agg(a.attname, ',' ORDER BY x.n) AS columns, pg_catalog.pg_size_pretty(pg_catalog.pg_relation_size(c.conrelid)) AS size, c.conname AS constraint, c.confrelid::regclass AS referenced_table FROM pg_catalog.pg_constraint c CROSS JOIN LATERAL unnest(c.conkey) WITH ORDINALITY AS x(attnum, n) JOIN pg_catalog.pg_attribute a ON a.attnum = x.attnum AND a.attrelid = c.conrelid WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_index i WHERE i.indrelid = c.conrelid AND (i.indkey::smallint[])[0:cardinality(c.conkey)-1] @> c.conkey) AND c.contype = 'f' GROUP BY c.conrelid, c.conname, c.confrelid ORDER BY pg_catalog.pg_relation_size(c.conrelid) DESC; | |
-- How many indexes are in cache | |
SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit) as idx_hit, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio FROM pg_statio_user_indexes; | |
-- all indexes by size | |
-- - number_of_scans(idx_scan) = Number of index scans initiated on this index | |
-- - tuples_read(idx_tup_read) = Number of index entries returned by scans on this index | |
-- - tuples_fetched(idx_tup_fetch) = Number of live table rows fetched by simple index scans using this index | |
SELECT relname AS table_name, indexrelname AS index_name, idx_scan, idx_tup_read, idx_tup_fetch, pg_size_pretty(pg_relation_size(indexrelname::regclass)) | |
FROM pg_stat_all_indexes | |
WHERE schemaname = 'public' | |
ORDER BY pg_relation_size(indexrelname::regclass) DESC; | |
-- all indexes by size | |
select t.tablename as table, indexname as index, c.reltuples AS num_rows, pg_size_pretty(pg_relation_size(quote_ident(t.tablename)::text)) AS table_size, pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size, (pg_relation_size(quote_ident(indexrelname)::text) / (case when (pg_relation_size(quote_ident(t.tablename)::text) / 100) != 0 then (pg_relation_size(quote_ident(t.tablename)::text) / 100) else 1 end)) as size_ratio_percent, CASE WHEN indisunique THEN 'Y' ELSE 'N' END AS unique, idx_scan AS number_of_scans, idx_tup_read AS tuples_read, idx_tup_fetch AS tuples_fetched, pg_relation_size(quote_ident(t.tablename)::text) as table_size_bytes, pg_relation_size(quote_ident(indexrelname)::text) as index_size_bytes | |
FROM pg_tables t LEFT OUTER JOIN pg_class c ON t.tablename=c.relname LEFT OUTER JOIN (SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch, indexrelname, indisunique FROM pg_index x JOIN pg_class c ON c.oid = x.indrelid JOIN pg_class ipg ON ipg.oid = x.indexrelid JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid ) AS foo ON t.tablename = foo.ctablename | |
WHERE t.schemaname='public' ORDER BY 12 desc nulls LAST, 1, 2; | |
-- probably bloated index? (>=40% of table size) for indexes bigger then 20M; see size_ratio_percent col | |
select t.tablename as table, indexname as index, c.reltuples AS num_rows, pg_size_pretty(pg_relation_size(quote_ident(t.tablename)::text)) AS table_size, pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size, (pg_relation_size(quote_ident(indexrelname)::text) / (case when (pg_relation_size(quote_ident(t.tablename)::text) / 100) != 0 then (pg_relation_size(quote_ident(t.tablename)::text) / 100) else 1 end)) as size_ratio_percent, CASE WHEN indisunique THEN 'Y' ELSE 'N' END AS unique, idx_scan AS number_of_scans, idx_tup_read AS tuples_read, idx_tup_fetch AS tuples_fetched, pg_relation_size(quote_ident(t.tablename)::text) as table_size_bytes, pg_relation_size(quote_ident(indexrelname)::text) as index_size_bytes | |
FROM pg_tables t LEFT OUTER JOIN pg_class c ON t.tablename=c.relname LEFT OUTER JOIN (SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch, indexrelname, indisunique FROM pg_index x JOIN pg_class c ON c.oid = x.indrelid JOIN pg_class ipg ON ipg.oid = x.indexrelid JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid ) AS foo ON t.tablename = foo.ctablename | |
WHERE t.schemaname='public' and pg_relation_size(quote_ident(indexrelname)::text) > 20971520 and ((pg_relation_size(quote_ident(t.tablename)::text) * 0.4) <= pg_relation_size(quote_ident(indexrelname)::text)) ORDER BY 6 desc nulls LAST, 1, 2; | |
-- table index bloat (http://pgsql.tapoueh.org/site/html/news/20080131.bloat.html / https://web.archive.org/web/20080603000756/http://pgsql.tapoueh.org/site/html/news/20080131.bloat.html) | |
SELECT schemaname, tablename, reltuples::bigint, relpages::bigint, otta, ROUND(CASE WHEN otta=0 THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat, relpages::bigint - otta AS wastedpages, bs*(sml.relpages-otta)::bigint AS wastedbytes, pg_size_pretty((bs*(relpages-otta))::numeric) AS wastedsize, iname, ituples::bigint, ipages::bigint, iotta, ROUND(CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat, CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wastedipages, CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes, CASE WHEN ipages < iotta THEN pg_size_pretty(0::numeric) ELSE pg_size_pretty((bs*(ipages-iotta))::bigint) END AS wastedisize | |
FROM (SELECT schemaname, tablename, cc.reltuples, cc.relpages, bs, CEIL((cc.reltuples*((datahdr+ma-(CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)) AS otta, COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages, COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta FROM (SELECT ma,bs,schemaname,tablename, (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr, (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2 FROM (SELECT schemaname, tablename, hdr, ma, bs, SUM((1-null_frac)*avg_width) AS datawidth, MAX(null_frac) AS maxfracsum, hdr+(SELECT 1+count(*)/8 FROM pg_stats s2 WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename) AS nullhdr FROM pg_stats s, (SELECT (SELECT current_setting('block_size')::numeric) AS bs, CASE WHEN substring(v,12,3) IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr, CASE WHEN v ~ 'mingw32' THEN 8 ELSE 4 END AS ma FROM (SELECT version() AS v) AS foo) AS constants GROUP BY 1,2,3,4,5) AS foo) AS rs JOIN pg_class cc ON cc.relname = rs.tablename JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = rs.schemaname LEFT JOIN pg_index i ON indrelid = cc.oid LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid) AS sml | |
WHERE sml.relpages - otta > 0 OR ipages - iotta > 10 ORDER BY wastedbytes DESC, wastedibytes DESC; | |
-- unused indexes | |
SELECT relname AS table_name, indexrelname AS index_name, idx_scan, idx_tup_read, idx_tup_fetch, pg_size_pretty(pg_relation_size(indexrelname::regclass)) | |
FROM pg_stat_all_indexes | |
WHERE schemaname = 'public' AND idx_scan = 0 AND idx_tup_read = 0 AND idx_tup_fetch = 0 | |
ORDER BY pg_relation_size(indexrelname::regclass) DESC; | |
-- index usage + dead rows | |
SELECT relname as table_name, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table, n_dead_tup dead_rows | |
FROM pg_stat_user_tables | |
ORDER BY n_live_tup DESC; | |
----------------------------------------------------------- index modifications - reindex, ... | |
-- "reindex" primary key without lock (note UNIQUE) | |
CREATE UNIQUE index concurrently table_temp_pkey ON "table" USING btree (col); | |
ALTER TABLE "table" DROP CONSTRAINT table_pkey, ADD CONSTRAINT table_pkey PRIMARY KEY USING INDEX table_temp_pkey; | |
-- reindex WITHOUT LOCK for non-primary key | |
CREATE index concurrently table_col_temp_idx ON table USING btree (col); | |
DROP INDEX table_col_idx; | |
ALTER INDEX table_col_temp_idx RENAME TO table_col_idx; | |
-- reindex WITH LOCK | |
reindex index table_col_idx; | |
-- rename index | |
ALTER INDEX table_col_temp_idx RENAME TO table_col_idx; | |
----------------------------------------------------------- query stats (long queries, top called, ...) | |
-- enable with those steps: | |
---- 1. shmmax https://stackoverflow.com/a/10629164/1283020 | |
---- 2. edit postgresql.conf | |
-- shared_preload_libraries = 'pg_stat_statements' | |
-- track_activity_query_size = 2048 -- Increase the max size of the query strings Postgres records | |
-- pg_stat_statements.track = all -- Track statements generated by stored procedures as well | |
---- 3. restart postgres | |
---- 4. run create ext | |
-- CREATE EXTENSION pg_stat_statements; | |
-- reset pg_stat_statements stats | |
SELECT pg_stat_statements_reset(); | |
-- top LONG RUNNING queries from all the databases | |
SELECT calls, (total_time/1000)::numeric(18,0) as total_time_seconds, rows, 100.0 * shared_blks_hit / nullif(shared_blks_hit + shared_blks_read, 0) AS hit_percent, query FROM pg_stat_statements | |
where lower(query) not similar to '%( pg_|information_schema)%' -- and dbid = (select oid from pg_database where datname = 'YOUR_DB_NAME') | |
ORDER BY total_time desc LIMIT 50; | |
-- top CALLED queries from all the databases | |
SELECT calls, (total_time/1000)::numeric(18,0) as total_time_seconds, rows, 100.0 * shared_blks_hit / nullif(shared_blks_hit + shared_blks_read, 0) AS hit_percent, query FROM pg_stat_statements | |
where query not similar to '%( pg_|information_schema)%' -- and dbid = (select oid from pg_database where datname = 'YOUR_DB_NAME') | |
ORDER BY calls desc LIMIT 50; | |
-- queries by TOTAL TIME | |
select * from pg_stat_statements order by total_time desc limit 10; | |
-- Most CPU intensive queries (PGSQL v9.4) | |
SELECT substring(query, 1, 50) AS short_query, round(total_time::numeric, 2) AS total_time, calls, rows, round(total_time::numeric / calls, 2) AS avg_time, round((100 * total_time / sum(total_time::numeric) OVER ())::numeric, 2) AS percentage_cpu FROM pg_stat_statements ORDER BY total_time DESC LIMIT 20; | |
-- Most time consuming queries (PGSQL v9.4) | |
SELECT substring(query, 1, 100) AS short_query, round(total_time::numeric, 2) AS total_time, calls, rows, round(total_time::numeric / calls, 2) AS avg_time, round((100 * total_time / sum(total_time::numeric) OVER ())::numeric, 2) AS percentage_cpu FROM pg_stat_statements ORDER BY avg_time DESC LIMIT 20; | |
-- Most writing (to shared_buffers) queries | |
select query, shared_blks_dirtied from pg_stat_statements where shared_blks_dirtied > 0 order by 2 desc; | |
----------------------------------------------------------- vacuum | |
-- autovacuum info | |
SELECT * from pg_settings where category like 'Autovacuum'; | |
-- last vacuum | |
SELECT relname, last_vacuum, last_autovacuum FROM pg_stat_user_tables; | |
-- dead rows to be removed by running vacuum | |
select schemaname as schema, relname as table, n_dead_tup as dead_rows from pg_stat_all_tables order by n_dead_tup desc; | |
-- run vacuum for table | |
VACUUM VERBOSE ANALYZE "table"; | |
----------------------------------------------------------- DATA helpers | |
-- remove duplicates (leave only the ones with highest id) | |
DELETE FROM tablename a | |
USING tablename b | |
WHERE a.id > b.id AND a.common_col = b.common_col; | |
-- vizualize explain http://tatiyants.com/pev/#/plans/new | |
EXPLAIN (ANALYZE, COSTS, VERBOSE, BUFFERS, FORMAT JSON) query...; | |
----------------------------------------------------------- Postgres Foreign Data Wrapper (postgres_fdw) | |
-- enable | |
CREATE EXTENSION postgres_fdw; | |
-- add foreign postgre instance | |
CREATE SERVER xy FOREIGN DATA WRAPPER postgres_fdw OPTIONS (dbname 'xy', host 'ip', port '5432'); | |
-- map user | |
CREATE USER MAPPING for postgres SERVER xy OPTIONS (user 'u', password 'p'); | |
-- import whole schema | |
IMPORT FOREIGN SCHEMA public FROM SERVER xy INTO public; | |
-- import only table | |
IMPORT FOREIGN SCHEMA "public" limit to (foreign_table) FROM SERVER xy INTO public; | |
-- low cursor size by default, change with | |
ALTER SERVER xy OPTIONS (fetch_size '50000'); | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment