Advertisement
Guest User

Untitled

a guest
Dec 22nd, 2016
322
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 58.57 KB | None | 0 0
  1. #!/usr/bin/perl
  2.  
  3. use strict;
  4. use warnings;
  5.  
  6. use Pod::Usage;
  7.  
  8. use Time::HiRes qw/time sleep/;
  9. use POSIX;
  10. use List::Util qw/sum/;
  11. use Getopt::Long qw/:config no_ignore_case/;
  12. use DBI;
  13.  
  14. use Data::Dumper;
  15.  
  16. our $VERSION = '1.0.2';
  17.  
  18. local $| = 1;
  19. select(STDOUT);
  20.  
  21. our $_log = select();
  22. our $_log_level;
  23. our $_dbh;
  24. our $_after_round_statement;
  25.  
  26. use constant MINIMAL_COMPACT_PAGES => 10;
  27. use constant MINIMAL_COMPACT_PERCENT => 20;
  28. use constant PAGES_PER_ROUND_DIVISOR => 1000;
  29. use constant MAX_PAGES_PER_ROUND => 5;
  30. use constant PAGES_BEFORE_VACUUM_LOWER_DIVISOR => 16;
  31. use constant PAGES_BEFORE_VACUUM_LOWER_THRESHOLD => 1000;
  32. use constant PAGES_BEFORE_VACUUM_UPPER_DIVISOR => 50;
  33. use constant PROGRESS_REPORT_PERIOD => 60;
  34. use constant LOCKED_ALTER_TIMEOUT => 1000;
  35. use constant LOCKED_ALTER_COUNT => 100;
  36. use constant LOG_ALWAYS => 1000;
  37. use constant LOG_ERROR => 2;
  38. use constant LOG_WARNING => 1;
  39. use constant LOG_NOTICE => 0;
  40.  
  41. # Settings & defaults
  42.  
  43. my $show_version;
  44. my $show_help;
  45. my $show_man;
  46.  
  47. my $db_host = 'localhost';
  48. my $db_port = 5432;
  49. my $db_user = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<);
  50. my $db_passwd = '';
  51.  
  52. my $db_name = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<);
  53. my $schema_name = 'public';
  54. my $table_name;
  55.  
  56. my $verbose;
  57. my $quiet;
  58.  
  59. my $force;
  60. my $delay_ratio = 0;
  61. my $after_round_query;
  62. my $routine_vacuum;
  63. my $no_reindex;
  64. my $print_reindex_queries = 0;
  65. my $max_retry_count = 10;
  66.  
  67. my $all_db;
  68.  
  69. my $only_schema;
  70. my $exclude_schema;
  71. my $exclude_table;
  72. my %only_schemas;
  73. my %excluded_schemas;
  74. my %excluded_tables;
  75.  
  76. my $initial_reindex;
  77.  
  78. my %table_info;
  79.  
  80. unless (GetOptions(
  81. #help & man
  82. 'V|version' => \$show_version,
  83. 'help|?' => \$show_help,
  84. 'm|man' => \$show_man,
  85. #database connection parameters
  86. 'h|host=s' => \$db_host,
  87. 'p|port=i' => \$db_port,
  88. 'U|user=s' => \$db_user,
  89. 'W|password=s' => \$db_passwd,
  90. 'd|dbname=s' => \$db_name,
  91. 'n|schema=s' => \$only_schema,
  92. 't|table=s' => \$table_name,
  93. 'v|verbose' => \$verbose,
  94. 'q|quiet' => \$quiet,
  95. 'f|force' => \$force,
  96. 'r|no-reindex' => \$no_reindex,
  97. 's|print-reindex-queries' => \$print_reindex_queries,
  98. 'o|max-retry-count=i' => \$max_retry_count,
  99. 'E|delay-ratio=i' => \$delay_ratio,
  100. 'Q|after-round-query=s' => \$after_round_query,
  101. 'R|routine-vacuum' => \$routine_vacuum,
  102. 'a|all' => \$all_db,
  103. 'N|exclude-schema=s' => \$exclude_schema,
  104. 'T|exclude-table=s' => \$exclude_table,
  105. 'i|initial-reindex' => \$initial_reindex
  106. )) {
  107. show_usage();
  108. exit(0);
  109. }
  110.  
  111. my $_verbosity = ($verbose ? LOG_NOTICE : ($quiet ? LOG_ERROR : LOG_WARNING));
  112.  
  113. our $_current_db_name;
  114. our $_current_schema_name;
  115. our $_current_table_name;
  116.  
  117. sub not_enough_arguments {
  118. logger(LOG_ALWAYS, "%s: At least one of the options must be specified: all, dbname", $0);
  119. logger('quit', "Use --help for short help, --man for full manual.");
  120. }
  121.  
  122. sub no_together_arguments {
  123. logger(LOG_ALWAYS, "%s: These options cannot be specified together at once: %s", $0, join(', ', @_));
  124. logger('quit', "Use --help for short help, --man for full manual.");
  125. }
  126.  
  127. sub show_version {
  128. logger(LOG_ALWAYS, "%s v%s", $0, $VERSION);
  129. }
  130. sub show_help {
  131. pod2usage(
  132. -message => "",
  133. -verbose => 99,
  134. -exitval => 1,
  135. -sections => 'NAME|SYNOPSIS|EXAMPLES',
  136. );
  137. }
  138.  
  139. sub show_usage {
  140. pod2usage(
  141. -message => "",
  142. -verbose => 99,
  143. -exitval => 1,
  144. -sections => 'SYNOPSIS',
  145. );
  146. }
  147.  
  148. sub show_man {
  149. pod2usage(
  150. -message => "",
  151. -verbose => 99,
  152. -exitval => 1,
  153. -sections => '|NAME|SYNOPSIS|DESCRIPTION|OPTIONS|LICENSE AND COPYRIGHT|VERSION|AUTHOR'
  154. );
  155. }
  156.  
  157.  
  158. sub set_current_db_name {
  159. $_current_db_name = shift;
  160. }
  161.  
  162. sub set_current_schema_name_table_name {
  163. $_current_schema_name = shift;
  164. $_current_table_name = shift;
  165. }
  166.  
  167. sub unset_current_db_name {
  168. undef $_current_db_name;
  169. }
  170.  
  171. sub unset_current_schema_name_table_name {
  172. undef $_current_schema_name;
  173. undef $_current_table_name;
  174. }
  175.  
  176. sub unset_after_round_statement {
  177. undef $_after_round_statement;
  178. }
  179.  
  180. sub logger {
  181. my $level = shift;
  182. my $message = shift;
  183. my @message_args = @_;
  184. $level = LOG_WARNING unless ($level =~ /^\d+$/);
  185. return if ($level < $_verbosity);
  186. no strict;
  187. print $_log sprintf("%s%s$message\n", ($level == LOG_ALWAYS ? '' : sprintf("[%s] ", scalar(localtime))), ($_current_db_name ? sprintf("(%s%s) ", $_current_db_name, ($_current_schema_name && $_current_table_name) ? ":$_current_schema_name.$_current_table_name" : "") : ""), @message_args);
  188. use strict;
  189. }
  190.  
  191. sub nice_size
  192. {
  193. my $size = shift;
  194. $size = 0 unless ($size);
  195. my @sizes = qw/B KB MB GB TB PB/;
  196.  
  197. my $i = 0;
  198.  
  199. while (abs($size) > 1024) {
  200. $size = $size / 1024;
  201. $i++;
  202. }
  203. return sprintf("%.3f$sizes[$i]", $size);
  204. }
  205.  
  206. #DB procedures
  207.  
  208. sub _dbh {
  209. unless ($_dbh && ref $_dbh && $_dbh->ping) {
  210. $_dbh = db_connect($_current_db_name, $db_host, $db_port, $db_user, $db_passwd);
  211. exit(0) unless ($_dbh && ref $_dbh && $_dbh->ping);
  212. }
  213. return $_dbh;
  214. }
  215.  
  216. sub _after_round_statement {
  217. unless ($_after_round_statement) {
  218. if ($after_round_query) {
  219. $_after_round_statement = _dbh->prepare($after_round_query);
  220.  
  221. if ($DBI::err) {
  222. logger(LOG_WARNING, "SQL Error in after round query %s: %s", $after_round_query, $DBI::errstr);
  223. unset $_after_round_statement;
  224. }
  225. }
  226. }
  227. return $_after_round_statement;
  228. }
  229.  
  230. sub db_connect {
  231. my $db_name = shift;
  232. my $db_host = shift;
  233. my $db_port = shift;
  234. my $db_user = shift;
  235. my $db_password = shift;
  236.  
  237. unset_after_round_statement;
  238.  
  239. logger(LOG_WARNING, "Connecting to database");
  240.  
  241. $_dbh = DBI->connect("DBI:Pg:dbname=$db_name;host=$db_host;port=$db_port", $db_user, $db_password,{RaiseError => 0, PrintError => 0, AutoCommit => 1});
  242.  
  243. if($DBI::err) {
  244. logger(LOG_ERROR, "Cannot connect DBI:Pg:dbname=%s;host=%s;port=%s user=%s,passwd=...: %s", $db_name, $db_host, $db_port, $db_user, $DBI::errstr);
  245. return undef;
  246. }
  247.  
  248. $_dbh->do("set client_min_messages to warning;");
  249.  
  250. return $_dbh;
  251. }
  252.  
  253. sub db_disconnect {
  254. my $db_name = shift;
  255. logger(LOG_WARNING, "Disconnecting from database");
  256. _dbh->disconnect;
  257. }
  258.  
  259. sub get_databases {
  260. my $sth = _dbh->prepare("
  261. SELECT datname FROM pg_catalog.pg_database
  262. WHERE
  263. datname NOT IN ('template0')
  264. ORDER BY pg_catalog.pg_database_size(datname), datname
  265. ");
  266. $sth->execute;
  267.  
  268. if ($DBI::err) {
  269. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  270. return undef;
  271. }
  272.  
  273. my @result;
  274. while(my ($db_name) = $sth->fetchrow_array) {
  275. push @result, $db_name;
  276. }
  277.  
  278. return \@result || [];
  279. }
  280.  
  281. sub get_database_tables {
  282. my $sth = _dbh->prepare("
  283. SELECT schemaname, tablename FROM pg_catalog.pg_tables
  284. WHERE
  285. NOT (schemaname = 'pg_catalog' AND tablename = 'pg_index') AND
  286. schemaname !~ 'pg_(temp|toast|catalog).*' AND
  287. NOT schemaname = 'information_schema'
  288. ORDER BY
  289. pg_catalog.pg_relation_size(
  290. quote_ident(schemaname) || '.' || quote_ident(tablename)),
  291. schemaname, tablename
  292. ");
  293.  
  294. if ($DBI::err) {
  295. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  296. return undef;
  297. }
  298.  
  299. $sth->execute;
  300. my @result;
  301. while(my $ident = $sth->fetchrow_hashref) {
  302. push @result, $ident;
  303. }
  304. return \@result || [];
  305. }
  306.  
  307. sub get_pgstattuple_schema_name {
  308. my $sth = _dbh->prepare("
  309. SELECT nspname FROM pg_catalog.pg_proc
  310. JOIN pg_catalog.pg_namespace AS n ON pronamespace = n.oid
  311. WHERE proname = 'pgstattuple' LIMIT 1
  312. ");
  313. $sth->execute;
  314.  
  315. if ($DBI::err) {
  316. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  317. return undef;
  318. }
  319.  
  320. my ($pgstattuple_schema_name) = $sth->fetchrow_array;
  321. return $pgstattuple_schema_name;
  322. }
  323.  
  324. sub get_size_stats {
  325. my $schema_name = shift;
  326. my $table_name = shift;
  327.  
  328. my $sth = _dbh->prepare("
  329. SELECT
  330. size,
  331. total_size,
  332. ceil(size / bs) AS page_count,
  333. ceil(total_size / bs) AS total_page_count
  334. FROM (
  335. SELECT
  336. current_setting('block_size')::integer AS bs,
  337. pg_catalog.pg_relation_size(quote_ident(?)||'.'||quote_ident(?)) AS size,
  338. pg_catalog.pg_total_relation_size(quote_ident(?)||'.'||quote_ident(?)) AS total_size
  339. ) AS sq
  340. ");
  341.  
  342. $sth->execute($schema_name, $table_name, $schema_name, $table_name);
  343.  
  344. if ($DBI::err) {
  345. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  346. return undef;
  347. }
  348.  
  349. my $result = $sth->fetchrow_hashref;
  350.  
  351. if (! $result || ref $result ne 'HASH') {
  352. logger(LOG_ERROR,"Cannot get size statistics");
  353. }
  354.  
  355. return $result;
  356. }
  357.  
  358. sub get_bloat_stats {
  359. my $schema_name = shift;
  360. my $table_name = shift;
  361.  
  362. my $ident_name = $schema_name.".".$table_name;
  363.  
  364. my $pgstattuple_schema_name = get_pgstattuple_schema_name;
  365.  
  366. return undef unless($pgstattuple_schema_name);
  367.  
  368. my $sth = _dbh->prepare("SELECT
  369. ceil((size - free_space - dead_tuple_len) * 100 / fillfactor / bs) AS effective_page_count,
  370. greatest(round(
  371. (100 * (1 - (100 - free_percent - dead_tuple_percent) / fillfactor))::numeric, 2
  372. ),0) AS free_percent,
  373. greatest(ceil(size - (size - free_space - dead_tuple_len) * 100 / fillfactor), 0) AS free_space
  374. FROM (
  375. SELECT
  376. current_setting('block_size')::integer AS bs,
  377. pg_catalog.pg_relation_size(pg_catalog.pg_class.oid) AS size,
  378. coalesce(
  379. (
  380. SELECT (
  381. regexp_matches(
  382. reloptions::text, E'.*fillfactor=(\\\\d+).*'))[1]),
  383. '100')::real AS fillfactor,
  384. pgst.*
  385. FROM pg_catalog.pg_class
  386. CROSS JOIN
  387. " . _dbh->quote_identifier($pgstattuple_schema_name) . ".pgstattuple(
  388. (quote_ident(?) || '.' || quote_ident(?))) AS pgst
  389. WHERE pg_catalog.pg_class.oid = (quote_ident(?) || '.' || quote_ident(?))::regclass
  390. ) AS sq");
  391. $sth->execute($schema_name, $table_name, $schema_name, $table_name);
  392.  
  393. if ($DBI::err) {
  394. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  395. return undef;
  396. }
  397.  
  398. my $result = $sth->fetchrow_hashref;
  399.  
  400. return $result;
  401. }
  402.  
  403. sub get_update_column {
  404. my $schema_name = shift;
  405. my $table_name = shift;
  406.  
  407. my $sth = _dbh->prepare("SELECT quote_ident(attname)
  408. FROM pg_catalog.pg_attribute
  409. WHERE
  410. attnum > 0 AND -- neither system
  411. NOT attisdropped AND -- nor dropped
  412. attrelid = (quote_ident(?) || '.' || quote_ident(?))::regclass
  413. ORDER BY
  414. -- Variable legth attributes have lower priority because of the chance
  415. -- of being toasted
  416. (attlen = -1),
  417. -- Preferably not indexed attributes
  418. (
  419. attnum::text IN (
  420. SELECT regexp_split_to_table(indkey::text, ' ')
  421. FROM pg_catalog.pg_index
  422. WHERE indrelid = (quote_ident(?) || '.' || quote_ident(?))::regclass)),
  423. -- Preferably smaller attributes
  424. attlen,
  425. attnum
  426. LIMIT 1;");
  427.  
  428. $sth->execute($schema_name, $table_name, $schema_name, $table_name);
  429.  
  430. if ($DBI::err) {
  431. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  432. return undef;
  433. }
  434.  
  435. my ($result) = $sth->fetchrow_array;
  436. return $result;
  437. }
  438.  
  439. sub get_pages_per_round {
  440. my $page_count = shift;
  441. my $to_page = shift;
  442.  
  443. my $real_pages_per_round = $page_count / PAGES_PER_ROUND_DIVISOR > 1 ? $page_count / PAGES_PER_ROUND_DIVISOR : 1;
  444. my $pages_per_round = $real_pages_per_round < MAX_PAGES_PER_ROUND ? $real_pages_per_round : MAX_PAGES_PER_ROUND;
  445. my $result = ceil($pages_per_round) < $to_page ? ceil($pages_per_round) : $to_page;
  446.  
  447. return $result;
  448. }
  449.  
  450. sub get_pages_before_vacuum {
  451. my $page_count = shift;
  452. my $expected_page_count = shift;
  453.  
  454. my $pages = $page_count / PAGES_BEFORE_VACUUM_LOWER_DIVISOR < PAGES_BEFORE_VACUUM_LOWER_THRESHOLD ? $page_count / PAGES_BEFORE_VACUUM_LOWER_DIVISOR : $page_count / PAGES_BEFORE_VACUUM_LOWER_THRESHOLD;
  455. my $result = $pages > $expected_page_count / PAGES_BEFORE_VACUUM_UPPER_DIVISOR ? $pages : $expected_page_count / PAGES_BEFORE_VACUUM_UPPER_DIVISOR;
  456.  
  457. return ceil($result);
  458. }
  459.  
  460. sub get_max_tupples_per_page {
  461. my $schema_name = shift;
  462. my $table_name = shift;
  463.  
  464. my $ident_name = _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($table_name);
  465.  
  466. my $sth = _dbh->prepare("
  467. SELECT ceil(current_setting('block_size')::real / sum(attlen))
  468. FROM pg_catalog.pg_attribute
  469. WHERE
  470. attrelid = '$ident_name'::regclass AND
  471. attnum < 0;
  472. ");
  473. $sth->execute;
  474.  
  475. if ($DBI::err) {
  476. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  477. return undef;
  478. }
  479.  
  480. my ($result) = $sth->fetchrow_array;
  481.  
  482. logger(LOG_ERROR, 'Can not get max tupples per page.') unless(defined $result);
  483.  
  484. return $result;
  485. }
  486.  
  487. sub has_triggers {
  488. my $schema_name = shift;
  489. my $table_name = shift;
  490.  
  491. my $ident_name = _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($table_name);
  492.  
  493. my $sth = _dbh->prepare("SELECT count(1) FROM pg_catalog.pg_trigger
  494. WHERE
  495. tgrelid = '$ident_name'::regclass AND
  496. tgenabled IN ('A', 'R') AND
  497. (tgtype & 16)::boolean");
  498. $sth->execute;
  499.  
  500. if ($DBI::err) {
  501. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  502. return undef;
  503. }
  504.  
  505. my ($result) = $sth->fetchrow_array;
  506.  
  507. return $result;
  508. }
  509.  
  510. sub try_advisory_lock {
  511. my $schema_name = shift;
  512. my $table_name = shift;
  513.  
  514. my $sth = _dbh->prepare("
  515. SELECT pg_try_advisory_lock(
  516. 'pg_catalog.pg_class'::regclass::integer,
  517. (quote_ident(?)||'.'||quote_ident(?))::regclass::integer)::integer;
  518. ");
  519. $sth->execute($schema_name, $table_name);
  520.  
  521. if ($DBI::err) {
  522. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  523. return undef;
  524. }
  525.  
  526. my ($lock) = $sth->fetchrow_array;
  527.  
  528. logger(LOG_NOTICE, "Skipping processing: another instance is working with table %s.%s", $schema_name, $table_name) unless ($lock);
  529.  
  530. return $lock;
  531. }
  532.  
  533. sub vacuum {
  534. my $schema_name = shift;
  535. my $table_name = shift;
  536. my $analyze = shift;
  537.  
  538.  
  539. my $sth = _dbh->do('VACUUM '.($analyze ? 'ANALYZE ' : ''). _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($table_name));
  540.  
  541. if ($DBI::err) {
  542. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  543. return undef;
  544. }
  545.  
  546. return;
  547. }
  548.  
  549. sub analyze {
  550. my $schema_name = shift;
  551. my $table_name = shift;
  552.  
  553. my $sth = $_dbh->do("ANALYZE "._dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($table_name));
  554.  
  555. if ($DBI::err) {
  556. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  557. return undef;
  558. }
  559.  
  560. return;
  561. }
  562.  
  563. sub set_session_replication_role {
  564. my $sth = $_dbh->do('set session_replication_role to replica;');
  565.  
  566. if ($DBI::err) {
  567. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  568. return undef;
  569. }
  570.  
  571. return;
  572. }
  573.  
  574. sub create_clean_pages_function {
  575.  
  576. _dbh->do("
  577. CREATE OR REPLACE FUNCTION public.pgcompact_clean_pages_$$(
  578. i_table_ident text,
  579. i_column_ident text,
  580. i_to_page integer,
  581. i_page_offset integer,
  582. i_max_tupples_per_page integer)
  583. RETURNS integer
  584. LANGUAGE plpgsql AS \$\$
  585. DECLARE
  586. _from_page integer := i_to_page - i_page_offset + 1;
  587. _min_ctid tid;
  588. _max_ctid tid;
  589. _ctid_list tid[];
  590. _next_ctid_list tid[];
  591. _ctid tid;
  592. _loop integer;
  593. _result_page integer;
  594. _update_query text :=
  595. 'UPDATE ONLY ' || i_table_ident ||
  596. ' SET ' || i_column_ident || ' = ' || i_column_ident ||
  597. ' WHERE ctid = ANY(\$1) RETURNING ctid';
  598. BEGIN
  599. -- Check page argument values
  600. IF NOT (
  601. i_page_offset IS NOT NULL AND i_page_offset >= 1 AND
  602. i_to_page IS NOT NULL AND i_to_page >= 1 AND
  603. i_to_page >= i_page_offset)
  604. THEN
  605. RAISE EXCEPTION 'Wrong page arguments specified.';
  606. END IF;
  607.  
  608. -- Check that session_replication_role is set to replica to
  609. -- prevent triggers firing
  610. IF NOT (
  611. SELECT setting = 'replica'
  612. FROM pg_catalog.pg_settings
  613. WHERE name = 'session_replication_role')
  614. THEN
  615. RAISE EXCEPTION 'The session_replication_role must be set to replica.';
  616. END IF;
  617.  
  618. -- Define minimal and maximal ctid values of the range
  619. _min_ctid := (_from_page, 1)::text::tid;
  620. _max_ctid := (i_to_page, i_max_tupples_per_page)::text::tid;
  621.  
  622. -- Build a list of possible ctid values of the range
  623. SELECT array_agg((pi, ti)::text::tid)
  624. INTO _ctid_list
  625. FROM generate_series(_from_page, i_to_page) AS pi
  626. CROSS JOIN generate_series(1, i_max_tupples_per_page) AS ti;
  627.  
  628. <<_outer_loop>>
  629. FOR _loop IN 1..i_max_tupples_per_page LOOP
  630. _next_ctid_list := array[]::tid[];
  631.  
  632. -- Update all the tuples in the range
  633. FOR _ctid IN EXECUTE _update_query USING _ctid_list
  634. LOOP
  635. IF _ctid > _max_ctid THEN
  636. _result_page := -1;
  637. EXIT _outer_loop;
  638. ELSIF _ctid >= _min_ctid THEN
  639. -- The tuple is still in the range, more updates are needed
  640. _next_ctid_list := _next_ctid_list || _ctid;
  641. END IF;
  642. END LOOP;
  643.  
  644. _ctid_list := _next_ctid_list;
  645.  
  646. -- Finish processing if there are no tupples in the range left
  647. IF coalesce(array_length(_ctid_list, 1), 0) = 0 THEN
  648. _result_page := _from_page - 1;
  649. EXIT _outer_loop;
  650. END IF;
  651. END LOOP;
  652.  
  653. -- No result
  654. IF _loop = i_max_tupples_per_page AND _result_page IS NULL THEN
  655. RAISE EXCEPTION
  656. 'Maximal loops count has been reached with no result.';
  657. END IF;
  658.  
  659. RETURN _result_page;
  660. END \$\$;
  661. ");
  662.  
  663. if ($DBI::err) {
  664. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  665. return undef;
  666. }
  667.  
  668. return 1;
  669. }
  670.  
  671. sub drop_clean_pages_function {
  672. _dbh->do("
  673. DROP FUNCTION public.pgcompact_clean_pages_$$(text, text,integer, integer, integer);
  674. ");
  675. if ($DBI::err) {
  676. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  677. return undef;
  678. }
  679.  
  680. return ;
  681. }
  682.  
  683. sub clean_pages {
  684. my $schema_name = shift;
  685. my $table_name = shift;
  686. my $column_name = shift;
  687. my $to_page = shift;
  688. my $pages_per_round = shift;
  689. my $max_tupples_per_page = shift;
  690.  
  691. my $ident_name = _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($table_name);
  692. my $sth = _dbh->prepare("
  693. SELECT public.pgcompact_clean_pages_$$(?,?,?,?,?)
  694. ");
  695. $sth->execute($ident_name, $column_name, $to_page, $pages_per_round, $max_tupples_per_page);
  696.  
  697. if ($DBI::err) {
  698. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  699. return undef;
  700. }
  701.  
  702. my ($result) = $sth->fetchrow_array;
  703.  
  704. return $result;
  705. }
  706.  
  707. sub get_index_data_list {
  708. my $schema_name = shift;
  709. my $table_name = shift;
  710.  
  711. my $sth = _dbh->prepare("
  712. SELECT
  713. indexname, tablespace, indexdef,
  714. regexp_replace(indexdef, E'.* USING (\\\\w+) .*', E'\\\\1') AS indmethod,
  715. conname,
  716. CASE
  717. WHEN contype = 'p' THEN 'PRIMARY KEY'
  718. WHEN contype = 'u' THEN 'UNIQUE'
  719. ELSE NULL END AS contypedef,
  720. (
  721. SELECT
  722. bool_and(
  723. deptype IN ('n', 'a', 'i') AND
  724. NOT (refobjid = indexoid AND deptype = 'n') AND
  725. NOT (
  726. objid = indexoid AND deptype = 'i' AND
  727. (version < array[9,1] OR contype NOT IN ('p', 'u'))))
  728. FROM pg_catalog.pg_depend
  729. LEFT JOIN pg_catalog.pg_constraint ON
  730. pg_catalog.pg_constraint.oid = refobjid
  731. WHERE
  732. (objid = indexoid AND classid = pgclassid) OR
  733. (refobjid = indexoid AND refclassid = pgclassid)
  734. )::integer AS allowed,
  735. pg_catalog.pg_relation_size(indexoid)
  736. FROM (
  737. SELECT
  738. indexname, tablespace, indexdef,
  739. (
  740. quote_ident(schemaname) || '.' ||
  741. quote_ident(indexname))::regclass AS indexoid,
  742. 'pg_catalog.pg_class'::regclass AS pgclassid,
  743. string_to_array(
  744. regexp_replace(
  745. version(), E'.*PostgreSQL (\\\\d+\\\\.\\\\d+).*', E'\\\\1'),
  746. '.')::integer[] AS version
  747. FROM pg_catalog.pg_indexes
  748. WHERE
  749. schemaname = quote_ident(?) AND
  750. tablename = quote_ident(?)
  751. ) AS sq
  752. LEFT JOIN pg_catalog.pg_constraint ON
  753. conindid = indexoid AND contype IN ('p', 'u')
  754. ORDER BY 8
  755. ");
  756.  
  757. $sth->execute($schema_name, $table_name);
  758.  
  759. if ($DBI::err) {
  760. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  761. return undef;
  762. }
  763.  
  764. my @result;
  765. while(my $result = $sth->fetchrow_hashref) {
  766. push @result, $result;
  767. }
  768.  
  769. return \@result;
  770. }
  771.  
  772. sub get_index_size_statistics {
  773. my $schema_name = shift;
  774. my $index_name = shift;
  775.  
  776. my $sth = _dbh->prepare("
  777. SELECT size, ceil(size / bs) AS page_count
  778. FROM (
  779. SELECT
  780. pg_catalog.pg_relation_size((quote_ident(?) || '.' || quote_ident(?))::regclass) AS size,
  781. current_setting('block_size')::real AS bs
  782. ) AS sq
  783. ");
  784.  
  785. $sth->execute($schema_name, $index_name);
  786.  
  787. if ($DBI::err) {
  788. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  789. return undef;
  790. }
  791.  
  792. my $result = $sth->fetchrow_hashref;
  793.  
  794. return ($result && ref $result eq 'HASH' && defined $result->{size} && defined $result->{page_count} ? $result : undef);
  795. }
  796.  
  797. sub get_reindex_query {
  798. my $index_data = shift;
  799.  
  800. my $sql = $index_data->{indexdef};
  801. $sql =~ s/INDEX (\S+)/INDEX CONCURRENTLY pgcompact_index_$$/;
  802. $sql =~ s/( WHERE .*|$)/ TABLESPACE $index_data->{tablespace}$1/ if (defined $index_data->{tablespace});
  803.  
  804. return $sql;
  805.  
  806. }
  807.  
  808. sub get_alter_index_query {
  809. my $schema_name = shift;
  810. my $table_name = shift;
  811. my $index_data = shift;
  812.  
  813. my $constraint_ident = _dbh->quote_identifier($index_data->{conname}) if ($index_data && ref $index_data eq 'HASH' && $index_data->{conname});
  814.  
  815. if($constraint_ident) {
  816. return
  817. "BEGIN; SET LOCAL statement_timeout TO " . LOCKED_ALTER_TIMEOUT . ";
  818. ALTER TABLE " . _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($table_name) . " DROP CONSTRAINT $constraint_ident;
  819. ALTER TABLE " . _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($table_name) . " ADD CONSTRAINT $constraint_ident $index_data->{contypedef} USING INDEX pgcompact_index_$$;
  820. END;";
  821. } else {
  822. my $tmp_index_name = "tmp_".int(rand(1000000000));
  823. return
  824. "BEGIN;" . "
  825. ALTER INDEX " . _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($index_data->{indexname}) . " RENAME TO " . _dbh->quote_identifier($tmp_index_name) . ";
  826. ALTER INDEX " . _dbh->quote_identifier($schema_name) . ".pgcompact_index_$$ RENAME TO " . _dbh->quote_identifier($index_data->{indexname}) . ";
  827. END;
  828. DROP INDEX CONCURRENTLY " . _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($tmp_index_name) . ";";
  829. }
  830. }
  831.  
  832. sub get_straight_reindex_query {
  833. my $schema_name = shift;
  834. my $table_name = shift;
  835. my $index_data = shift;
  836.  
  837. return "REINDEX INDEX ('" . _dbh->quote_identifier($schema_name) . "." . _dbh->quote_identifier($index_data->{indexname})."')";
  838. }
  839.  
  840. sub get_index_bloat_stats {
  841. my $schema_name = shift;
  842. my $index_name = shift;
  843.  
  844. my $pgstattuple_schema_name = get_pgstattuple_schema_name;
  845.  
  846. return undef unless($pgstattuple_schema_name);
  847.  
  848. my $sth = _dbh->prepare("
  849. SELECT
  850. CASE
  851. WHEN avg_leaf_density = 'NaN' THEN 0
  852. ELSE
  853. round(
  854. (100 * (1 - avg_leaf_density / fillfactor))::numeric, 2
  855. )
  856. END AS free_percent,
  857. CASE
  858. WHEN avg_leaf_density = 'NaN' THEN 0
  859. ELSE
  860. ceil(
  861. index_size * (1 - avg_leaf_density / fillfactor)
  862. )
  863. END AS free_space
  864. FROM (
  865. SELECT
  866. coalesce(
  867. (
  868. SELECT (
  869. regexp_matches(
  870. reloptions::text, E'.*fillfactor=(\\\\d+).*'))[1]),
  871. '90')::real AS fillfactor,
  872. pgsi.*
  873. FROM pg_catalog.pg_class
  874. CROSS JOIN $pgstattuple_schema_name.pgstatindex(
  875. quote_ident(?) || '.' || quote_ident(?)) AS pgsi
  876. WHERE pg_catalog.pg_class.oid = (quote_ident(?) || '.' || quote_ident(?))::regclass
  877. ) AS oq
  878. ");
  879. $sth->execute($schema_name, $index_name, $schema_name, $index_name);
  880.  
  881. if ($DBI::err) {
  882. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  883. return undef;
  884. }
  885.  
  886. my $result = $sth->fetchrow_hashref;
  887.  
  888. return ($result && ref $result eq 'HASH' && $result->{'free_percent'} && $result->{'free_space'}) ? $result : undef;
  889. }
  890.  
  891. sub reindex {
  892. my $index_data = shift;
  893.  
  894. _dbh->do(get_reindex_query($index_data));
  895.  
  896. if ($DBI::err) {
  897. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  898. return undef;
  899. }
  900.  
  901. return;
  902. }
  903.  
  904. sub alter_index {
  905. my $schema_name = shift;
  906. my $table_name = shift;
  907. my $index_data = shift;
  908.  
  909. foreach my $sql (split(/;/, get_alter_index_query($schema_name, $table_name, $index_data))) {
  910. next if ($sql =~ /^\s*$/);
  911. _dbh->do("$sql;");
  912.  
  913. if ($DBI::err) {
  914. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  915. _dbh->do("END;");
  916. return undef;
  917. }
  918.  
  919. }
  920. }
  921.  
  922. sub drop_temp_index {
  923. my $schema_name = shift;
  924.  
  925. _dbh->do("SET LOCAL statement_timeout TO " . LOCKED_ALTER_TIMEOUT . ";");
  926. _dbh->do("DROP INDEX CONCURRENTLY ?;", undef, "$schema_name.pgcompact_index_$$");
  927.  
  928. if ($DBI::err) {
  929. logger(LOG_ERROR, "SQL Error: %s", $DBI::errstr);
  930. return undef;
  931. }
  932.  
  933. return;
  934. }
  935.  
  936. sub get_pg_backend_pid {
  937. my ($backend_pid) = _dbh->selectrow_array("select pg_backend_pid();");
  938. return $backend_pid;
  939. }
  940.  
  941. sub reindex_table {
  942. my $table_name = shift;
  943. my $schema_name = shift;
  944. my $db_name = shift;
  945. my $print_reindex_queries = shift;
  946.  
  947. my $is_reindexed;
  948.  
  949. my $index_data_list = get_index_data_list($schema_name, $table_name) || [];
  950.  
  951. if ($DBI::err) {
  952. logger(LOG_ERROR, "Table handling interrupt.");
  953. return -1;
  954. }
  955.  
  956. for my $index_data (@$index_data_list) {
  957. my $initial_index_size_stats = get_index_size_statistics($schema_name, $index_data->{indexname});
  958.  
  959. if (!$initial_index_size_stats || ref $initial_index_size_stats ne 'HASH') {
  960. logger(LOG_ERROR, "Cannot get index size statistics.");
  961. return;
  962. }
  963.  
  964. if ($initial_index_size_stats->{page_count} <= 1) {
  965. logger(LOG_NOTICE, "Skipping reindex: %s.%s, empty or 1 page index.", $schema_name, $index_data->{indexname});
  966. next;
  967. }
  968.  
  969. my $index_bloat_stats;
  970.  
  971. if (! $force) {
  972. if ($index_data->{indmethod} ne 'btree') {
  973. logger(LOG_NOTICE, "Skipping reindex: %s.%s is a %s index not a btree, reindexing is up to you.", $schema_name, $index_data->{indexname}, $index_data->{indmethod});
  974. logger(LOG_WARNING, "Reindex queries: %s.%s, initial size %d pages (%s)", $schema_name, $index_data->{indexname}, $initial_index_size_stats->{page_count}, nice_size($initial_index_size_stats->{size}));
  975. if ($index_data->{data}{allowed}) {
  976. logger(LOG_WARNING, "%s; --%s", get_reindex_query($index_data), $db_name);
  977. logger(LOG_WARNING, "%s; --%s", get_alter_index_query($schema_name, $table_name, $index_data), $db_name);
  978. } else {
  979. logger(LOG_WARNING, "%s; --%s", get_straight_reindex_query($schema_name, $table_name, $index_data), $db_name);
  980. }
  981. next;
  982. }
  983.  
  984. if ($initial_index_size_stats->{page_count} < MINIMAL_COMPACT_PAGES) {
  985. logger(LOG_NOTICE, "Skipping reindex: %s.%s, %d pages from %d pages minimum required.",$schema_name, $index_data->{indexname}, $initial_index_size_stats->{page_count}, MINIMAL_COMPACT_PAGES);
  986. next;
  987. }
  988.  
  989. $index_bloat_stats = get_index_bloat_stats($schema_name, $index_data->{indexname});
  990.  
  991. if ($index_bloat_stats && ref $index_bloat_stats eq 'HASH' && $index_bloat_stats->{'free_percent'} < MINIMAL_COMPACT_PERCENT) {
  992. logger(LOG_NOTICE, "Skipping reindex: %s.%s, %d%% space to compact from %d%% minimum required.", $schema_name, $index_data->{indexname}, $index_bloat_stats->{free_percent}, MINIMAL_COMPACT_PERCENT);
  993. next;
  994. }
  995. }
  996.  
  997. if (not $index_data->{'allowed'}) {
  998. logger(LOG_NOTICE, "Skipping reindex: %s.%s, can not reindex without heavy locks because of its dependencies, reindexing is up to you.", $schema_name, $index_data->{indexname});
  999. logger(LOG_WARNING, "Reindex queries%s: %s.%s, initial size %d pages (%s), will be reduced by %d%% (%s)",
  1000. ($force ? ' forced' : ''),
  1001. $schema_name,
  1002. $index_data->{indexname},
  1003. $initial_index_size_stats->{page_count},
  1004. nice_size($initial_index_size_stats->{size}),
  1005. $index_bloat_stats->{free_percent},
  1006. nice_size($index_bloat_stats->{free_space})
  1007. );
  1008. logger(LOG_WARNING, "%s; --%s", get_reindex_query($index_data), $db_name);
  1009. logger(LOG_WARNING, "%s; --%s", get_alter_index_query($schema_name, $table_name, $index_data), $db_name);
  1010.  
  1011. next;
  1012. }
  1013.  
  1014. if (!$no_reindex) {
  1015. my $start_reindex_time = time;
  1016. reindex($index_data);
  1017.  
  1018. if($DBI::err) {
  1019. logger(LOG_NOTICE, "Skipping index %s: %s", $index_data->{indexname});
  1020. next;
  1021. }
  1022.  
  1023. my $reindex_time = time - $start_reindex_time;
  1024.  
  1025. my $locked_alter_attempt = 0;
  1026. while ($locked_alter_attempt < LOCKED_ALTER_COUNT) {
  1027. unless(defined(alter_index($schema_name, $table_name, $index_data))) {
  1028. if ($DBI::errstr && $DBI::errstr =~ 'canceling statement due to statement timeout') {
  1029. $locked_alter_attempt++;
  1030. next;
  1031. } else {
  1032. logger(LOG_ERROR, $@);
  1033. return;
  1034. }
  1035. };
  1036. $reindex_time = time - $start_reindex_time;
  1037. last;
  1038. }
  1039. if ($locked_alter_attempt < LOCKED_ALTER_COUNT) {
  1040. my $new_stats = get_index_size_statistics($schema_name, $index_data->{indexname});
  1041. my $free_percent = 100 * (1 - $new_stats->{size} / $initial_index_size_stats->{size});
  1042. my $free_space = ($initial_index_size_stats->{size} - $new_stats->{size});
  1043. logger(LOG_WARNING, "Reindex%s: %s.%s, initial size %d pages(%s), has been reduced by %d%% (%s), duration %d seconds, attempts %d.",
  1044. ($force ? " forced" : ""),
  1045. $schema_name,
  1046. $index_data->{indexname},
  1047. $initial_index_size_stats->{page_count},
  1048. nice_size($initial_index_size_stats->{size}),
  1049. int($free_percent),
  1050. nice_size($free_space),
  1051. $reindex_time,
  1052. $locked_alter_attempt
  1053. );
  1054. $is_reindexed = (defined $is_reindexed) ? ($is_reindexed and 1) : 1;
  1055. } else {
  1056. my $drop_index_time = time;
  1057. drop_temp_index($schema_name);
  1058. $reindex_time += time - $drop_index_time;
  1059. logger(LOG_WARNING, "Reindex%s: %s.%s, lock has not been acquired, initial size %d pages(%d)",#, can be reduced by %d%% (%s), duration %d seconds.",
  1060. ($force ? " forced" : ""),
  1061. $schema_name,
  1062. $index_data->{indexname},
  1063. $initial_index_size_stats->{page_count},
  1064. nice_size($initial_index_size_stats->{size}),
  1065. #$bloat_stats->{free_percent},
  1066. #nice_size($bloat_stats->{free_space}),
  1067. #$reindex_time
  1068. );
  1069. $is_reindexed = 0;
  1070. }
  1071. }
  1072.  
  1073. if ($print_reindex_queries) {
  1074. logger(LOG_WARNING, "Reindex queries%s: %s.%s, initial size %d pages (%s), will be reduced by %d%% (%s)",
  1075. ($force ? ' forced' : ''),
  1076. $schema_name,
  1077. $index_data->{indexname},
  1078. $initial_index_size_stats->{page_count},
  1079. nice_size($initial_index_size_stats->{size}),
  1080. ($index_bloat_stats->{free_percent}||0),
  1081. nice_size($index_bloat_stats->{free_space})
  1082. );
  1083.  
  1084. logger(LOG_WARNING, "%s; --%s", get_reindex_query($index_data), $db_name);
  1085. logger(LOG_WARNING, "%s; --%s", get_alter_index_query($schema_name, $table_name, $index_data), $db_name);
  1086. }
  1087. }
  1088.  
  1089. return $is_reindexed;
  1090. }
  1091.  
  1092. #Process function
  1093.  
  1094. sub process {
  1095. my $schema_name = shift;
  1096. my $table_name = shift;
  1097. my $attempt = shift;
  1098. my $table_info = shift;
  1099.  
  1100. my $is_skipped;
  1101. my $is_locked = try_advisory_lock($schema_name, $table_name) ? 0 : 1;
  1102.  
  1103. if ($DBI::err) {
  1104. logger(LOG_ERROR, "Table handling interrupt.");
  1105. return -1;
  1106. }
  1107.  
  1108. $table_info->{stats} = get_size_stats($schema_name, $table_name);
  1109.  
  1110. if ($DBI::err) {
  1111. logger(LOG_ERROR, "Table handling interrupt.");
  1112. return -1;
  1113. }
  1114.  
  1115. $table_info->{base_stats} = {%{$table_info->{stats}}} unless ($table_info->{base_stats});
  1116.  
  1117. if (!$is_locked) {
  1118. my $vacuum_time = time;
  1119. vacuum($schema_name, $table_name);
  1120.  
  1121. if ($DBI::err) {
  1122. logger(LOG_ERROR, "Table handling interrupt.");
  1123. return -1;
  1124. }
  1125.  
  1126. $vacuum_time = time - $vacuum_time;
  1127. $table_info->{stats} = get_size_stats($schema_name, $table_name);
  1128. if ($DBI::err) {
  1129. logger(LOG_ERROR, "Table handling interrupt.");
  1130. return -1;
  1131. }
  1132.  
  1133. logger(LOG_NOTICE, "Vacuum initial: %d pages left, duration %.3f seconds.", ($table_info->{stats}{page_count}||0), $vacuum_time);
  1134.  
  1135. if($table_info->{stats}{page_count} <= 1) {
  1136. logger(LOG_NOTICE, "Skipping processing: empty or 1 page table.");
  1137. $is_skipped = 1;
  1138. }
  1139. }
  1140.  
  1141. my $bloat_stats;
  1142. my $is_reindexed;
  1143. if (!$is_locked && !$is_skipped) {
  1144.  
  1145. if ($initial_reindex && !$no_reindex && defined($attempt) && $attempt == 0) {
  1146. $is_reindexed = reindex_table($table_name, $schema_name, $db_name);
  1147. }
  1148.  
  1149. my $get_stat_time = time;
  1150. $bloat_stats = get_bloat_stats($schema_name, $table_name);
  1151.  
  1152. if ($DBI::err) {
  1153. logger(LOG_ERROR, "Table handling interrupt.");
  1154. return -1;
  1155. }
  1156.  
  1157. $get_stat_time = time - $get_stat_time;
  1158. if ($bloat_stats->{effective_page_count}) {
  1159. logger(LOG_NOTICE,"Bloat statistics with pgstattuple: duration %.3f seconds.", $get_stat_time);
  1160. } else {
  1161. my $analyze_time = time;
  1162. analyze($schema_name, $table_name);
  1163.  
  1164. if ($DBI::err) {
  1165. logger(LOG_ERROR, "Table handling interrupt.");
  1166. return -1;
  1167. }
  1168.  
  1169. $analyze_time = time - $analyze_time;
  1170. logger(LOG_WARNING, "Analyze required initial: duration %.3f second.", $analyze_time);
  1171. $get_stat_time = time;
  1172. $bloat_stats = get_bloat_stats($schema_name, $table_name);
  1173.  
  1174. if ($DBI::err) {
  1175. logger(LOG_ERROR, "Table handling interrupt.");
  1176. return -1;
  1177. }
  1178.  
  1179. $get_stat_time = time - $get_stat_time;
  1180. if ($bloat_stats->{effective_page_count}) {
  1181. logger(LOG_NOTICE, "Bloat statistics with pgstattuple: duration %.3f seconds.", $get_stat_time)
  1182. } else {
  1183. logger('qiuet', "Can not get bloat statistics, processing stopped.");
  1184. $is_skipped = 1;
  1185. }
  1186. }
  1187. }
  1188.  
  1189. if (!$is_locked && !$is_skipped) {
  1190. my $can_be_compacted = ($bloat_stats->{'free_percent'} > 0 && ($table_info->{stats}{page_count} > $bloat_stats->{effective_page_count}));
  1191. if ($can_be_compacted) {
  1192. logger(LOG_WARNING, "Statistics: %d pages (%d pages including toasts and indexes) , approximately %0.3f%% (%d pages) can be compacted reducing the size by %s.",
  1193. $table_info->{stats}{page_count},
  1194. $table_info->{stats}{total_page_count},
  1195. $bloat_stats->{'free_percent'},
  1196. ($table_info->{stats}{page_count} - $bloat_stats->{effective_page_count}),
  1197. nice_size($bloat_stats->{free_space})
  1198. );
  1199. } else {
  1200. logger(LOG_WARNING, "Statistics: %d pages (%d pages including toasts and indexes)",
  1201. $table_info->{stats}{page_count},
  1202. $table_info->{stats}{total_page_count}
  1203. );
  1204. }
  1205.  
  1206. if(has_triggers($schema_name, $table_name)) {
  1207.  
  1208. if ($DBI::err) {
  1209. logger(LOG_ERROR, "Table handling interrupt.");
  1210. return -1;
  1211. }
  1212.  
  1213. logger(LOG_ERROR,'Can not process: "always" or "replica" triggers are on.');
  1214. $is_skipped = 1;
  1215. }
  1216.  
  1217. if(!$force) {
  1218. if($table_info->{stats}{page_count} < MINIMAL_COMPACT_PAGES) {
  1219. logger(LOG_NOTICE,"Skipping processing: %d pages from %d pages minimum required.'", $table_info->{stats}{page_count}, MINIMAL_COMPACT_PAGES);
  1220. $is_skipped = 1;
  1221. }
  1222. if(($bloat_stats->{free_percent}||0) <= MINIMAL_COMPACT_PERCENT) {
  1223. logger(LOG_NOTICE,"Skipping processing: %0.2f%% space to compact from 20%% minimum required.",($bloat_stats->{free_percent}||0));
  1224. $is_skipped = 1;
  1225. }
  1226. }
  1227. }
  1228.  
  1229. if (!$is_locked && !$is_skipped) {
  1230. logger(LOG_WARNING, "Processing forced.") if ($force);
  1231. my $vacuum_page_count = 0;
  1232. my $initial_size_stats = {%{$table_info->{stats}}};
  1233. my $to_page = $table_info->{stats}{page_count} - 1;
  1234. my $progress_report_time = time;
  1235. my $clean_pages_total_duration = 0;
  1236. my $last_loop = $table_info->{stats}{page_count} + 1;
  1237. my $expected_error_occurred = 0;
  1238.  
  1239. my $expected_page_count = $table_info->{stats}{page_count};
  1240. my $column_name = get_update_column($schema_name, $table_name);
  1241.  
  1242. if ($DBI::err) {
  1243. logger(LOG_ERROR, "Table handling interrupt.");
  1244. return -1;
  1245. }
  1246.  
  1247. my $pages_per_round = get_pages_per_round($table_info->{stats}{page_count}, $to_page);
  1248. my $pages_before_vacuum = get_pages_before_vacuum($table_info->{stats}{page_count}, $expected_page_count);
  1249.  
  1250. my $max_tupples_per_page = get_max_tupples_per_page($schema_name, $table_name);
  1251.  
  1252. if ($DBI::err) {
  1253. logger(LOG_ERROR, "Table handling interrupt.");
  1254. return -1;
  1255. }
  1256.  
  1257. logger(LOG_NOTICE, "Update by column: %s.", $column_name||'');
  1258. logger(LOG_NOTICE, "Set pages/round: %d.", $pages_per_round);
  1259. logger(LOG_NOTICE, "Set pages/vacuum: %d.", $pages_before_vacuum);
  1260.  
  1261. my $start_time;
  1262. my $last_to_page;
  1263. my $loop;
  1264. for ($loop = $table_info->{stats}{page_count}; $loop > 0; $loop--) {
  1265. $start_time = time;
  1266. _dbh->begin_work;
  1267. $last_to_page = $to_page;
  1268.  
  1269. $to_page = clean_pages($schema_name, $table_name, $column_name, $last_to_page, $pages_per_round, $max_tupples_per_page);
  1270. $clean_pages_total_duration += (time - $start_time);
  1271.  
  1272. unless(defined($to_page)) {
  1273. _dbh->rollback();
  1274.  
  1275. if ($DBI::err && $DBI::errstr =~ 'deadlock detected') {
  1276. logger(LOG_ERROR,"Detected deadlock during cleaning.");
  1277. next;
  1278. } elsif ($DBI::err && $DBI::errstr =~ 'cannot extract system attribute') {
  1279. logger(LOG_ERROR, "System attribute extraction error has occurred, processing stopped.");
  1280. $expected_error_occurred = 1;
  1281. last;
  1282. } elsif($DBI::err) {
  1283. logger(LOG_ERROR,"Cannot handle table: %s", $DBI::errstr);
  1284. return -1;
  1285. } else {
  1286. logger(LOG_ERROR, "Incorrect result of cleaning: column_name %s, to_page %d, pages_per_round %d, max_tupples_per_page %d.",$column_name, $last_to_page, $pages_per_round, $max_tupples_per_page);
  1287. }
  1288. } else {
  1289. if ($to_page == -1) {
  1290. _dbh->rollback();
  1291. $to_page = $last_to_page;
  1292. last;
  1293. }
  1294. _dbh->commit();
  1295. }
  1296.  
  1297. sleep($delay_ratio * (time - $start_time));
  1298.  
  1299. if (_after_round_statement) {
  1300. _after_round_statement->execute();
  1301.  
  1302. if ($DBI::err) {
  1303. logger(LOG_ERROR, "SQL Error in after round statement: %s", $DBI::errstr);
  1304. }
  1305. }
  1306.  
  1307. if (time - $progress_report_time >= PROGRESS_REPORT_PERIOD && $last_to_page != $to_page) {
  1308. logger(LOG_WARNING, "Progress: %s %d pages completed.", (defined $bloat_stats->{effective_page_count} ? int(100 * ($to_page ? ($initial_size_stats->{page_count} - $to_page - 1) / ($table_info->{base_stats}{page_count} - $bloat_stats->{effective_page_count}) : 1) ).'%, ' : ' '), ($table_info->{stats}{page_count} - $to_page - 1));
  1309. $progress_report_time = time;
  1310. }
  1311.  
  1312. $expected_page_count -= $pages_per_round;
  1313. $vacuum_page_count += ($last_to_page - $to_page);
  1314.  
  1315. if ($routine_vacuum && $vacuum_page_count >= $pages_before_vacuum) {
  1316. my $duration = $clean_pages_total_duration / ($last_loop - $loop);
  1317. my $average_duration = $duration == 0 ? 0.0001 : $duration;
  1318. logger(LOG_WARNING, "Cleaning in average: %.1f pages/second (%.3f seconds per %d pages).", ($pages_per_round / $average_duration), $duration, $pages_per_round);
  1319. $clean_pages_total_duration = 0;
  1320. $last_loop = $loop;
  1321.  
  1322. my $vacuum_time = time;
  1323. vacuum($schema_name, $table_name);
  1324.  
  1325. if ($DBI::err) {
  1326. logger(LOG_ERROR, "Table handling interrupt.");
  1327. return -1;
  1328. }
  1329.  
  1330. $vacuum_time = time - $vacuum_time;
  1331.  
  1332. $table_info->{stats} = get_size_stats($schema_name, $table_name);
  1333.  
  1334. if ($DBI::err) {
  1335. logger(LOG_ERROR, "Table handling interrupt.");
  1336. return -1;
  1337. }
  1338.  
  1339. if ($table_info->{stats}{page_count} > $to_page + 1) {
  1340. logger(LOG_NOTICE, "Vacuum routine: can not clean %d pages, %d pages left, duration %0.3f seconds.", ($table_info->{stats}{page_count} - $to_page - 1), $table_info->{stats}{page_count}, $vacuum_time);
  1341. } else {
  1342. logger(LOG_NOTICE, "Vacuum routine: %d pages left, duration %.3f seconds.", ($table_info->{stats}{page_count}||0), $vacuum_time);
  1343. }
  1344.  
  1345. $vacuum_page_count = 0;
  1346.  
  1347. my $last_pages_before_vacuum = $pages_before_vacuum;
  1348. $pages_before_vacuum = get_pages_before_vacuum($table_info->{stats}{page_count}, $expected_page_count);
  1349. if ($last_pages_before_vacuum != $pages_before_vacuum) {
  1350. logger(LOG_WARNING, "Set pages/vacuum: %d.", $pages_before_vacuum);
  1351. }
  1352. }
  1353.  
  1354. if ($to_page >= $table_info->{stats}{page_count}) {
  1355. $to_page = $table_info->{stats}{page_count} - 1;
  1356. }
  1357.  
  1358. if ($to_page <= 1) {
  1359. $to_page = 0;
  1360. last;
  1361. }
  1362.  
  1363. my $last_pages_per_round = $pages_per_round;
  1364. $pages_per_round = get_pages_per_round($table_info->{stats}{page_count}, $to_page);
  1365.  
  1366. if ($last_pages_per_round != $pages_per_round) {
  1367. logger(LOG_WARNING, "Set pages/round: %d.", $pages_per_round);
  1368. }
  1369. }
  1370.  
  1371. if ($loop == 0) {
  1372. logger(LOG_WARNING, "Maximum loops reached.");
  1373. }
  1374.  
  1375. if ($to_page > 0) {
  1376. my $vacuum_time = time;
  1377. sleep(1);
  1378. vacuum($schema_name, $table_name);
  1379.  
  1380. if ($DBI::err) {
  1381. logger(LOG_ERROR, "Table handling interrupt.");
  1382. return -1;
  1383. }
  1384.  
  1385. $vacuum_time = time - $vacuum_time;
  1386.  
  1387. $table_info->{stats} = get_size_stats($schema_name, $table_name);
  1388.  
  1389. if ($DBI::err) {
  1390. logger(LOG_ERROR, "Table handling interrupt.");
  1391. return -1;
  1392. }
  1393.  
  1394. if ($table_info->{stats}{page_count} > $to_page + $pages_per_round) {
  1395. logger(LOG_NOTICE, "Vacuum final: cannot clean %d pages, %d pages left, duration %0.3f seconds.", ($table_info->{stats}{page_count} - $to_page - $pages_per_round), $table_info->{stats}{page_count}, $vacuum_time);
  1396. } else {
  1397. logger(LOG_NOTICE, "Vacuum final: %d pages left, duration %.3f seconds.", ($table_info->{stats}{page_count}||0), $vacuum_time);
  1398. }
  1399. }
  1400.  
  1401. #if (not $self->{'_no_final_analyze'}) {
  1402. my $analyze_time = time;
  1403. analyze($schema_name, $table_name);
  1404.  
  1405. if ($DBI::err) {
  1406. logger(LOG_ERROR, "Table handling interrupt.");
  1407. return -1;
  1408. }
  1409.  
  1410. $analyze_time = time - $analyze_time;
  1411. logger(LOG_NOTICE, "Analyze final: duration %.3f second.", $analyze_time);
  1412. #}
  1413.  
  1414. my $get_stat_time = time;
  1415. $bloat_stats = get_bloat_stats($schema_name, $table_name);
  1416.  
  1417. if ($DBI::err) {
  1418. logger(LOG_ERROR, "Table handling interrupt.");
  1419. return -1;
  1420. }
  1421.  
  1422. $get_stat_time = time - $get_stat_time;
  1423. logger(LOG_NOTICE,"Bloat statistics with pgstattuple: duration %.3f seconds.", $get_stat_time);
  1424.  
  1425. $pages_before_vacuum = get_pages_before_vacuum($table_info->{stats}{page_count}, $expected_page_count);
  1426. }
  1427.  
  1428. my $will_be_skipped = (!$is_locked && ($is_skipped || $table_info->{stats}{page_count} < MINIMAL_COMPACT_PAGES || $bloat_stats->{free_percent} < MINIMAL_COMPACT_PERCENT));
  1429.  
  1430. if (!$is_locked && ($print_reindex_queries || (!$no_reindex && (!$is_skipped || ($attempt == $max_retry_count) || (!$is_reindexed && $is_skipped && $attempt == 0))))) {
  1431.  
  1432. $is_reindexed = (reindex_table($table_name, $schema_name, $db_name, $print_reindex_queries) || $is_reindexed);
  1433.  
  1434. if (!$no_reindex) {
  1435. $table_info->{stats} = get_size_stats($schema_name, $table_name);
  1436.  
  1437. if ($DBI::err) {
  1438. logger(LOG_ERROR, "Table handling interrupt.");
  1439. return -1;
  1440. }
  1441. }
  1442. }
  1443.  
  1444. if (!$is_locked && !($is_skipped && !defined($is_reindexed))) {
  1445. my $complete = (($will_be_skipped || $is_skipped) && (defined $is_reindexed ? $is_reindexed : 1));
  1446. if ($complete) {
  1447. logger(LOG_NOTICE, "Processing complete.");
  1448. } else {
  1449. logger(LOG_NOTICE, "Processing incomplete.");
  1450. }
  1451.  
  1452. if (defined $bloat_stats->{free_percent} && defined $bloat_stats->{effective_page_count} && $bloat_stats->{free_percent} > 0 && $table_info->{stats}{page_count} > $bloat_stats->{effective_page_count} && !$complete) {
  1453. logger(LOG_WARNING, "Processing results: %d pages left (%d pages including toasts and indexes), size reduced by %s (%s including toasts and indexes) in total , approximately %d%% (%d pages) that is %s more were expected to be compacted after this attempt.",
  1454. $table_info->{stats}{page_count},
  1455. $table_info->{stats}{total_page_count},
  1456. nice_size($table_info->{base_stats}{size} - $table_info->{stats}{size}),
  1457. nice_size($table_info->{base_stats}{total_size} - $table_info->{stats}{total_size}),
  1458. $bloat_stats->{free_percent},
  1459. $table_info->{stats}{page_count} - $bloat_stats->{effective_page_count},
  1460. nice_size($bloat_stats->{free_space})
  1461. );
  1462. } else {
  1463. logger(LOG_WARNING, "Processing results: %d pages left (%d pages including toasts and indexes), size reduced by %s (%s including toasts and indexes) in total.",
  1464. $table_info->{stats}{page_count},
  1465. $table_info->{stats}{total_page_count},
  1466. nice_size($table_info->{base_stats}{size} - $table_info->{stats}{size}),
  1467. nice_size($table_info->{base_stats}{total_size} - $table_info->{stats}{total_size}),
  1468. );
  1469. }
  1470. }
  1471.  
  1472. return (($is_locked || $is_skipped || $will_be_skipped) && (defined $is_reindexed ? $is_reindexed : 1));
  1473. }
  1474.  
  1475. #Main code
  1476.  
  1477. if ($show_version) {
  1478. show_version;
  1479. exit(1);
  1480. }
  1481.  
  1482. if ($show_help) {
  1483. show_help;
  1484. exit(1);
  1485. }
  1486.  
  1487. if ($show_man) {
  1488. show_man;
  1489. exit(1);
  1490. }
  1491.  
  1492. unless ($db_name || $table_name || $all_db) {
  1493. not_enough_arguments();
  1494. exit(1);
  1495. }
  1496.  
  1497. if ($no_reindex && $initial_reindex) {
  1498. no_together_arguments('no_reindex', 'initial_reindex');
  1499. exit(1);
  1500. }
  1501.  
  1502. my @dbs = ($db_name);
  1503.  
  1504. if($all_db) {
  1505. $db_name = 'template1';
  1506. unless(db_connect($db_name, $db_host, $db_port, $db_user, $db_passwd)) {
  1507. logger(LOG_ERROR, "Cannot get database list. Quit.");
  1508. exit(0);
  1509. }
  1510. @dbs =
  1511. my $dbs = get_databases;
  1512. unless ($dbs) {
  1513. logger(LOG_ERROR, "Interrupt processing.");
  1514. exit(0);
  1515. }
  1516. @dbs = @$dbs;
  1517.  
  1518. db_disconnect;
  1519. }
  1520.  
  1521. if($only_schema) {
  1522. %only_schemas = map {$_ => 1} split(/,/,$only_schema);
  1523. $schema_name = $only_schema if (scalar(keys(%only_schemas)) == 1);
  1524. }
  1525.  
  1526. if($exclude_schema) {
  1527. %excluded_schemas = map {$_ => 1} split(/,/,$exclude_schema);
  1528. }
  1529.  
  1530. if($exclude_table) {
  1531. %excluded_tables = map {$_ => 1} split(/,/,$exclude_table);
  1532. }
  1533.  
  1534. my $databases_left;
  1535. my $is_something_processed;
  1536.  
  1537. foreach my $current_db_name (@dbs) {
  1538. $table_info{$current_db_name} = {tables => {}, attempts => 0, size => 0, base_size => 0};
  1539. $db_name = $current_db_name;
  1540. my $tables_left;
  1541.  
  1542. set_current_db_name($current_db_name);
  1543. unset_current_schema_name_table_name;
  1544. unless(db_connect($current_db_name, $db_host, $db_port, $db_user, $db_passwd)) {
  1545. $databases_left++;
  1546. next;
  1547. }
  1548.  
  1549. my $ionice_made = undef;
  1550.  
  1551. my $backend_pid = get_pg_backend_pid();
  1552. if ($backend_pid) {
  1553. my $user_login = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<);
  1554. if ($user_login eq 'postgres' || $user_login eq 'root') {
  1555. my $errstr = `ionice -c 3 -p $backend_pid 2>/dev/stdout`;
  1556. if ($errstr) {
  1557. chomp $errstr;
  1558. logger(LOG_WARNING, "Cannot set ionice 3 for the process. It is recomennded to set ionice -c 3 for pgcompacttable. Error: %s", $errstr);
  1559. } else {
  1560. $ionice_made = 1;
  1561. }
  1562. }
  1563. logger(LOG_WARNING, "Postgress backend pid: $backend_pid") if ($backend_pid && $backend_pid =~/^\d+$/ && $backend_pid > 0);
  1564. } else {
  1565. logger(LOG_ERROR, "Cannot get backend pid from Postgres. Exitting...");
  1566. exit(-1);
  1567. }
  1568.  
  1569. unless ($ionice_made) {
  1570. logger(LOG_WARNING, "It is recomennded to set ionice -c 3 for pgcompacttable: ionice -c 3 -p %d", $backend_pid);
  1571. }
  1572.  
  1573. set_session_replication_role;
  1574.  
  1575. if ($DBI::err) {
  1576. logger(LOG_ERROR, "Database handling interrupt.");
  1577. db_disconnect($current_db_name);
  1578. $databases_left++;
  1579. next;
  1580. }
  1581.  
  1582. unless(get_pgstattuple_schema_name) {
  1583. logger('qiuet', "Skip handling database %s: pgstattuple extention is not found", $current_db_name);
  1584. db_disconnect($current_db_name);
  1585. next;
  1586. }
  1587.  
  1588. unless (create_clean_pages_function) {
  1589. logger('qiuet', "Skip handling database %s: pgstattuple cannot create clean_pages function", $current_db_name);
  1590. db_disconnect($current_db_name);
  1591. $databases_left++;
  1592. next;
  1593. }
  1594.  
  1595. my $database_tables = ($schema_name && $table_name) ? [{schemaname => $schema_name, tablename => $table_name}] : get_database_tables($current_db_name);
  1596.  
  1597. unless($database_tables && ref $database_tables eq 'ARRAY' && scalar(@$database_tables) > 0) {
  1598. logger('qiuet', "Skip handling database %s: cannot find tables", $current_db_name);
  1599. }
  1600.  
  1601. for (my $attempt = 0; $attempt < $max_retry_count; $attempt++) {
  1602.  
  1603. logger('qiuet', "Handling tables. Attempt %s", ($attempt + 1));
  1604.  
  1605. $table_info{$current_db_name}{attempts}++;
  1606.  
  1607. my @retry_idents = ();
  1608.  
  1609. foreach my $current_ident (@$database_tables) {
  1610. next if (!$current_ident || ref $current_ident ne 'HASH' || $excluded_schemas{$current_ident->{schemaname}} || ($only_schema && !$only_schemas{$current_ident->{schemaname}}) || $excluded_tables{"$current_ident->{schemaname}.$current_ident->{tablename}"});
  1611. my $table_key = $current_ident->{schemaname}.$current_ident->{tablename};
  1612. $table_info{$current_db_name}{tables}{$table_key}{current} = {} unless ($table_info{$current_db_name}{tables}{$table_key}{current} && ref $table_info{$current_db_name}{tables}{$table_key}{current} eq 'HASH');
  1613. $is_something_processed = 1 unless ($is_something_processed);
  1614. set_current_schema_name_table_name($current_ident->{schemaname}, $current_ident->{tablename});
  1615. logger(LOG_NOTICE, "Start handling table %s.%s", $current_ident->{schemaname}, $current_ident->{tablename});
  1616. push @retry_idents, $current_ident unless process($current_ident->{schemaname}, $current_ident->{tablename}, $attempt, $table_info{$current_db_name}{tables}{$table_key}{current});
  1617. logger(LOG_NOTICE, "Finish handling table %s.%s", $current_ident->{schemaname}, $current_ident->{tablename});
  1618. if ($attempt == 0) {
  1619. $table_info{$current_db_name}{tables}{$table_key}{final}{base_stats}{size} = $table_info{$current_db_name}{tables}{$table_key}{current}{base_stats}{size};
  1620. $table_info{$current_db_name}{tables}{$table_key}{final}{base_stats}{total_size} = $table_info{$current_db_name}{tables}{$table_key}{current}{base_stats}{total_size};
  1621. }
  1622. $table_info{$current_db_name}{tables}{$table_key}{final}{stats}{size} = $table_info{$current_db_name}{tables}{$table_key}{current}{stats}{size};
  1623. $table_info{$current_db_name}{tables}{$table_key}{final}{stats}{total_size} = $table_info{$current_db_name}{tables}{$table_key}{current}{stats}{total_size};
  1624. }
  1625.  
  1626. if (scalar @retry_idents > 0) {
  1627. @$database_tables = @retry_idents;
  1628. } else {
  1629. undef $tables_left;
  1630. last;
  1631. }
  1632.  
  1633. $tables_left = scalar(@retry_idents);
  1634. }
  1635.  
  1636. drop_clean_pages_function;
  1637.  
  1638. unset_current_schema_name_table_name;
  1639.  
  1640. $databases_left++ if ($tables_left);
  1641.  
  1642. logger(LOG_WARNING, "Processing %scomplete%s.", ($tables_left ? 'in' : ''), ($tables_left ? ": $tables_left tables left" : ''));
  1643.  
  1644. $table_info{$current_db_name}{size} = sum(map {$table_info{$current_db_name}{tables}{$_}{final}{base_stats}{size} - $table_info{$current_db_name}{tables}{$_}{final}{stats}{size}} keys(%{$table_info{$current_db_name}{tables}}));
  1645. $table_info{$current_db_name}{total_size} = sum(map {$table_info{$current_db_name}{tables}{$_}{final}{base_stats}{total_size} - $table_info{$current_db_name}{tables}{$_}{final}{stats}{total_size}} keys(%{$table_info{$current_db_name}{tables}}));
  1646. logger(LOG_ERROR, "Processing results: size reduced by %s (%s including toasts and indexes) in total.", nice_size($table_info{$current_db_name}{size}), nice_size($table_info{$current_db_name}{total_size}));
  1647.  
  1648. db_disconnect($current_db_name);
  1649. }
  1650.  
  1651. unset_current_db_name;
  1652.  
  1653. if($databases_left) {
  1654. logger(LOG_WARNING, "Processing incomplete: %d databases left.", $databases_left);
  1655. } else {
  1656. logger(LOG_WARNING, "Processing complete: %d retries to process has been done", sum(map {$table_info{$_}{attempts}} keys(%table_info)));
  1657. }
  1658.  
  1659. if ($is_something_processed) {
  1660. my $dbases_message = "";
  1661. foreach (keys(%table_info)) {
  1662. $dbases_message .= ", " . nice_size($table_info{$_}{size}) . " (" . nice_size($table_info{$_}{total_size}) . ") $_" if ($table_info{$_} && ref $table_info{$_} eq 'HASH' && $table_info{$_}{size} && $table_info{$_}{total_size});
  1663. }
  1664.  
  1665. logger(LOG_ERROR, "Processing results: size reduced by %s (%s including toasts and indexes) in total%s.",
  1666. nice_size(sum(map {$table_info{$_}{size}||0} keys(%table_info)) || 0),
  1667. nice_size(sum(map {$table_info{$_}{total_size}||0} keys(%table_info))|| 0),
  1668. $dbases_message
  1669. );
  1670. }
  1671.  
  1672. 1;
  1673.  
  1674. =head1 NAME
  1675.  
  1676. B<pgcompacttable> - PostgreSQL bloat reducing tool.
  1677.  
  1678. =head1 SYNOPSIS
  1679.  
  1680. pgcompacttable [OPTION...]
  1681.  
  1682. =over 4
  1683.  
  1684. =item General options:
  1685.  
  1686. [-?mV] [(-q | -v LEVEL)]
  1687.  
  1688. =item Connection options:
  1689.  
  1690. [-h HOST] [-p PORT] [-U USER] [-W PASSWD] [-P PATH]
  1691.  
  1692. =item Targeting options:
  1693.  
  1694. (-a | -d DBNAME...) [-n SCHEMA...] [-t TABLE...] [-D DBNAME...]
  1695. [-N SCHEMA...] [-T TABLE...]
  1696.  
  1697. =back
  1698.  
  1699. =head1 DESCRIPTION
  1700.  
  1701. B<pgcompacttable> is a tool to reduce bloat for tables and indexes without
  1702. heavy locks.
  1703.  
  1704. =over 4
  1705.  
  1706. =back
  1707.  
  1708. pgstattuple must be installed. B<pgcompacttable> uses pgstattuple to get statistics.
  1709.  
  1710. =head1 EXAMPLES
  1711.  
  1712. Shows usage manual.
  1713.  
  1714. pgcompacttable --man
  1715. Compacts all the bloated tables in all the database in the cluster plus their bloated indexes. Prints additional progress information.
  1716.  
  1717. pgcompacttable --all --verbose info
  1718.  
  1719. Compacts all the bloated tables in the billing database and their bloated indexes excepts ones that are in the pgq schema.
  1720.  
  1721. pgcompacttable --dbname billing --exclude-schema pgq
  1722.  
  1723. =head1 OPTIONS
  1724.  
  1725. =head2 General options
  1726.  
  1727. =over 4
  1728.  
  1729. =item B<-?>
  1730.  
  1731. =item B<--help>
  1732.  
  1733. Display short help.
  1734.  
  1735. =item B<-m>
  1736.  
  1737. =item B<--man>
  1738.  
  1739. Display full manual.
  1740.  
  1741. =item B<-V>
  1742.  
  1743. =item B<--version>
  1744.  
  1745. Print version.
  1746.  
  1747. =item B<-q>
  1748.  
  1749. =item B<--quiet>
  1750.  
  1751. Quiet mode. Do not display any messages exept error messages and total result.
  1752.  
  1753. =item B<-v>
  1754.  
  1755. =item B<--verbose>
  1756.  
  1757. Verbose mode. Display all the progress messages.
  1758.  
  1759. =item B<-q>
  1760.  
  1761. =item B<--quiet>
  1762.  
  1763. A verbosity level. One of C<warning>, C<notice>, C<info>. By default C<notice>.
  1764.  
  1765. =back
  1766.  
  1767. =head2 Connection options
  1768.  
  1769. The B<pgcompacttable> tries to connect to the database with the DBI Perl module.
  1770.  
  1771. If some of the connection options is not specified the tool tries to
  1772. get it from C<PGHOST>, C<PGPORT>, C<PGUSER>, C<PGPASSWORD> environment
  1773. variables. If password is still unknown after that than it tries to
  1774. get it from the password file that C<PGPASSFILE> refers to and if this
  1775. file does not exist it tries to get it from C<HOME/.pgpass> file.
  1776.  
  1777. =over 4
  1778.  
  1779. =item B<-h> HOST
  1780.  
  1781. =item B<--host> HOST
  1782.  
  1783. A database host. By default C<localhost>.
  1784.  
  1785. =item B<-p> PORT
  1786.  
  1787. =item B<--port> PORT
  1788.  
  1789. A database port. By default C<5432>.
  1790.  
  1791. =item B<-U> USER
  1792.  
  1793. =item B<--user> USER
  1794.  
  1795. A database user. By default current system user is used (as returned by whoami).
  1796.  
  1797. =item B<-W> PASSWD
  1798.  
  1799. =item B<--password> PASSWD
  1800.  
  1801. A password for the user.
  1802.  
  1803. =back
  1804.  
  1805. =head2 Targeting options
  1806.  
  1807. Note that if you specified a database, schema or table that is not in the cluster it will be ignored. Redundant exclusions will be ignored too. All these options except C<--all> can be specified several times.
  1808.  
  1809. =over 4
  1810.  
  1811. =item B<-a>
  1812.  
  1813. =item B<--all>
  1814.  
  1815. Process all the databases in the cluster.
  1816.  
  1817. =item B<-d> DBNAME
  1818.  
  1819. =item B<--dbname> DBNAME
  1820.  
  1821. A database to process. By default all the user databses of the instance are processed.
  1822.  
  1823. =item B<-D> DBNAME
  1824.  
  1825. =item B<--exclude-dbname> DBNAME
  1826.  
  1827. A database to exclude from processing.
  1828.  
  1829. =item B<-n> SCHEMA
  1830.  
  1831. =item B<--schema> SCHEMA
  1832.  
  1833. A schema to process. By default 'public' schema id processed.
  1834.  
  1835. =item B<-N> SCHEMA
  1836.  
  1837. =item B<--exclude-schema> SCHEMA
  1838.  
  1839. A schema to exclude from processing.
  1840.  
  1841. =item B<-t> TABLE
  1842.  
  1843. =item B<--table> TABLE
  1844.  
  1845. A table to process. By default all the tables of the specified schema are processed.
  1846.  
  1847. =item B<-T> TABLE
  1848.  
  1849. =item B<--exclude-table> TABLE
  1850.  
  1851. A table to exclude from processing.
  1852.  
  1853. =back
  1854.  
  1855. =head2 Options controlling the behaviour
  1856.  
  1857. =over 4
  1858.  
  1859. =item B<-R>
  1860.  
  1861. =item B<--routine-vacuum>
  1862.  
  1863. Turn on the routine vacuum. By default all the vacuums are off.
  1864.  
  1865. =item B<-r>
  1866.  
  1867. =item B<--no-reindex>
  1868.  
  1869. Turn off reindexing of tables after processing.
  1870.  
  1871. =item B<-i>
  1872.  
  1873. =item B<--initial-reindex>
  1874.  
  1875. Perform an initial reindex of tables before processing.
  1876.  
  1877. =item B<-s>
  1878.  
  1879. =item B<--print-reindex-queries>
  1880.  
  1881. Print reindex queries. Useful if you want to perform manual
  1882. reindex later.
  1883.  
  1884. =item B<-f>
  1885.  
  1886. =item B<--force>
  1887.  
  1888. Try to compact even those tables and indexes that do not meet minimal bloat requirements.
  1889.  
  1890. =item B<-E> RATIO
  1891.  
  1892. =item B<--delay-ratio> RATIO
  1893.  
  1894. A dynamic part of the delay between rounds is calculated as previous-round-time * delay-ratio. By default 2.
  1895.  
  1896. =item B<-Q> Query
  1897.  
  1898. =item B<--after-round-query> Query
  1899.  
  1900. SQL statement to be called after each round against current database
  1901.  
  1902. =item B<-o> COUNT
  1903.  
  1904. =item B<--max-retry-count> COUNT
  1905.  
  1906. A maximum number of retries in case of unsuccessful processing. By default 10.
  1907.  
  1908. =item B<-x> COUNT
  1909.  
  1910. =back
  1911.  
  1912. =cut
  1913.  
  1914. =head1 LICENSE AND COPYRIGHT
  1915.  
  1916. Copyright (c) 2015 Maxim Boguk
  1917.  
  1918. =head1 AUTHOR
  1919.  
  1920. =over 4
  1921.  
  1922. =item L<Maxim Boguk|mailto:maxim.boguk@gmail.com>
  1923.  
  1924. =back
  1925.  
  1926. =cut
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement