Workshop 9.6
Dalibo & Contributors
max_worker_processes
max_parallel_workers_per_gather
min_parallel_relation_size
SET (parallel_workers = x)
parallel_setup_cost
parallel_tuple_cost
test_parallel_little
CREATE TABLE test_parallel_little (id int); INSERT INTO test_parallel_little SELECT generate_series(0,500000); SELECT n.nspname as "Schema", c.relname as "Name", pg_size_pretty(pg_table_size(c.oid)) as "Size" FROM pg_class c LEFT JOIN pg_namespace n ON n.oid=c.relnamespace WHERE c.relname = 'test_parallel_little'; Schema | Name | Size --------+---------------+-------- public | test_parallel | 17 MB
EXPLAIN (ANALYZE, VERBOSE, BUFFERS) SELECT * FROM test_parallel_little WHERE id = 12; Gather (cost=1000.00..6889.58 rows=1 width=4) (actual time=30.300..30.385 rows=1 loops=1) Output: id Workers Planned: 1 Workers Launched: 1 Buffers: shared hit=2264 -> Parallel Seq Scan on public.test_parallel_little (cost=0.00..5889.48 rows=1 width=4) (actual time=18.568..27.957 rows=0 loops=2) Output: id Filter: (test_parallel_little.id = 124862) Rows Removed by Filter: 250000 Buffers: shared hit=2213 Worker 0: actual time=7.054..25.832 rows=1 loops=1 Buffers: shared hit=978 Planning time: 0.141 ms Execution time: 31.862 ms (14 lignes)
CREATE TABLE test_parallel_little (id int); INSERT INTO test_parallel_little SELECT generate_series(0,5000000); SELECT n.nspname as "Schema", c.relname as "Name", pg_size_pretty(pg_table_size(c.oid)) as "Size" FROM pg_class c LEFT JOIN pg_namespace n ON n.oid=c.relnamespace WHERE c.relname = 'test_parallel'; Schema | Name | Size --------+---------------+-------- public | test_parallel | 173 MB
EXPLAIN (ANALYZE, VERBOSE, BUFFERS) SELECT * FROM test_parallel WHERE id=12; Gather [...] Output: id Workers Planned: 3 Workers Launched: 3 Buffers: shared hit=5465 read=16812 -> Parallel Seq Scan on public.test_parallel [...] Output: id Filter: (test_parallel.id = 124862) Rows Removed by Filter: 1250000 Buffers: shared hit=5312 read=16812 Worker 0: actual time=3.766..139.729 rows=1... Buffers: shared hit=1748 read=5466 Worker 1: actual time=138.532..138.532 rows=0... Buffers: shared hit=812 read=2669 Worker 2: actual time=138.508..138.508 rows=0... Buffers: shared hit=816 read=2683 Planning time: 0.120 ms Execution time: 151.700 ms (18 lignes)
EXPLAIN ANALYZE SELECT * FROM big b1 JOIN big b2 USING (id) WHERE b1.id < 400000; Gather [...] Workers Planned: 2 Workers Launched: 2 -> Hash Join [...] Hash Cond: (b1.id = b2.id) Worker 0: [...] Worker 1: [...] -> Parallel Seq Scan on b1 [...] Filter: (b1.id < 400000) Rows Removed by Filter: 16903334 Worker 0: [...] Worker 1: [...] -> Hash [...] Buckets: 4194304 Batches: 1 Memory Usage: 141753kB Worker 0: [...] Worker 1: [...] -> Seq Scan on public.t3 [...] Worker 0: [...] Worker 1: [...]
EXPLAIN (ANALYZE) SELECT count(*), min(C1), max(C1) FROM t1; QUERY PLAN --------------------------------------------------------------------- Finalize Aggregate (actual time=1766.820..1766.820 rows=1 loops=1) -> Gather (actual time=1766.767..1766.799 rows=3 loops=1) Workers Planned: 2 Workers Launched: 2 -> Partial Aggregate (actual time=1765.236..1765.236 rows=1 loops=3) -> Parallel Seq Scan on t1 (actual time=0.021..862.430 rows=6666667 loops=3) Planning time: 0.072 ms Execution time: 1769.164 ms (8 rows)
PARALLEL SAFE
RESTRICTED
PARALLEL UNSAFE
CURSOR
Infrastructure pour de nouvelles méthodes d’accès sous forme d’extensions.
checkpoint_flush_after
dirty_background_*
replacement_sort_tuples
pg_basebackup
pg_visibility
pg_config
pg_control*
-S slotname
--slot=slotname
-X stream
pg_start_backup()
pg_stop_backup()
backup_label
remote_apply
CREATE TABLE t1 (id integer); INSERT INTO t1 SELECT generate_series(1, 10000000); CREATE INDEX ON t1(id); CREATE EXTENSION postgres_fdw; CREATE SERVER ailleurs FOREIGN DATA WRAPPER postgres_fdw; CREATE USER MAPPING FOR postgres SERVER ailleurs; CREATE FOREIGN TABLE ft1(id integer) SERVER ailleurs OPTIONS (table_name 't1');
EXPLAIN (ANALYZE, VERBOSE, COSTS off) SELECT * FROM ft1 ORDER BY id ; QUERY PLAN -------------------------------------------------------------- Sort (actual time=9452.057..10129.182 rows=10000000 loops=1) Output: id Sort Key: ft1.id Sort Method: external sort Disk: 136856kB -> Foreign Scan on public.ft1 (actual time=1.064..5981.536 rows=10000000 loops=1) Output: id Remote SQL: SELECT id FROM public.t1 Planning time: 0.131 ms Execution time: 10425.730 ms (9 rows)
EXPLAIN (ANALYZE, VERBOSE, COSTS off) SELECT * FROM ft1 ORDER BY id; QUERY PLAN ----------------------------------------------------- Foreign Scan on public.ft1 (actual time=2.092..7438.416 rows=10000000 loops=1) Output: id Remote SQL: SELECT id FROM public.t1 ORDER BY id ASC NULLS LAST Planning time: 0.168 ms Execution time: 7748.122 ms (5 rows)
EXPLAIN (VERBOSE, ANALYZE) SELECT * FROM table1_distante JOIN table2_distante ON table1_distante.id = table2_distante.id WHERE table2_distante.id = 1; QUERY PLAN ----------------------------------------------------------- Nested Loop [...] Output: table1_distante.id, table1_distante.bla, table2_distante.id, table2_distante.bli -> Foreign Scan on public.table1_distante [...] Output: table1_distante.id, table1_distante.bla Remote SQL: SELECT id, bla FROM public.table1 WHERE ((id = 1)) -> Foreign Scan on public.table2_distante [...] Output: table2_distante.id, table2_distante.bli Remote SQL: SELECT id, bli FROM public.table2 WHERE ((id = 1)) Planning time: 0.217 ms Execution time: 65.616 ms
EXPLAIN (VERBOSE, ANALYZE) SELECT * FROM table1_distante JOIN table2_distante ON table1_distante.id=table2_distante.id WHERE table2_distante.id = 1; QUERY PLAN -------------------------------------------------------- Foreign Scan [...] Output: table1_distante.id, table1_distante.bla, table2_distante.id, table2_distante.bli Relations: (public.table1_distante) INNER JOIN (public.table2_distante) Remote SQL: SELECT r1.id, r1.bla, r2.id, r2.bli FROM (public.table1 r1 INNER JOIN public.table2 r2 ON (((r2.id = 1)) AND ((r1.id = 1)))) Planning time: 0.170 ms Execution time: 2.299 ms
jsonb_insert()
postgres=# CREATE EXTENSION ltree_plpythonu; ERREUR: l'extension « ltree » requise n'est pas installée
postgres=# CREATE EXTENSION ltree_plpythonu CASCADE; NOTICE: installing required extension "ltree" NOTICE: installing required extension "plpythonu" CREATE EXTENSION
\crosstabview [ colV [ colH [ colD [ sortcolH ] ] ] ]
pg_stat_activity
pg_blocking_pid()
pg_stat_progress_vacuum
pg_blocking_pids()
pg_*
pg_restore
archive
hot_standby
wal_level
replica