7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
1 |
#!/usr/bin/python -S
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
2 |
# Copyright 2010 Canonical Ltd. This software is licensed under the
|
3 |
# GNU Affero General Public License version 3 (see the file LICENSE).
|
|
4 |
||
5 |
"""Generate the database statistics report."""
|
|
6 |
||
7 |
__metaclass__ = type |
|
8 |
||
9 |
import _pythonpath |
|
10 |
||
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
11 |
from datetime import datetime |
7675.686.9
by Stuart Bishop
Basic database utilization report |
12 |
from operator import attrgetter |
7675.686.27
by Stuart Bishop
Disk utilization statistics are expensive to make and only calculated daily. Handle this gracefully on historical reports or reports on < 24 hour time ranges |
13 |
from textwrap import ( |
14 |
dedent, |
|
15 |
fill, |
|
16 |
)
|
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
17 |
|
18 |
from canonical.database.sqlbase import connect, sqlvalues |
|
19 |
from canonical.launchpad.scripts import db_options |
|
20 |
from lp.scripts.helpers import LPOptionParser |
|
7675.686.22
by Stuart Bishop
Start of bloat report |
21 |
from lp.services.database.namedrow import named_fetchall |
7675.686.9
by Stuart Bishop
Basic database utilization report |
22 |
|
23 |
||
24 |
class Table: |
|
25 |
pass
|
|
26 |
||
27 |
||
7675.686.28
by Stuart Bishop
Add some fuzz in the time interval to get meaningful daily reports when we cannot guarantee disk utilization statistics are calculated exactly 24 hours apart |
28 |
def get_where_clause(options, fuzz='0 seconds'): |
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
29 |
"Generate a WHERE clause referencing the date_created column."
|
30 |
# We have two of the from timestamp, the until timestamp and an
|
|
31 |
# interval. The interval is in a format unsuitable for processing in
|
|
32 |
# Python. If the interval is set, it represents the period before
|
|
33 |
# the until timestamp or the period after the from timestamp,
|
|
34 |
# depending on which of these is set. From this information,
|
|
35 |
# generate the SQL representation of the from timestamp and the
|
|
36 |
# until timestamp.
|
|
37 |
if options.from_ts: |
|
38 |
from_sql = ("CAST(%s AS timestamp without time zone)" |
|
39 |
% sqlvalues(options.from_ts)) |
|
40 |
elif options.interval and options.until_ts: |
|
41 |
from_sql = ( |
|
42 |
"CAST(%s AS timestamp without time zone) - CAST(%s AS interval)" |
|
43 |
% sqlvalues(options.until_ts, options.interval)) |
|
44 |
elif options.interval: |
|
45 |
from_sql = ( |
|
46 |
"(CURRENT_TIMESTAMP AT TIME ZONE 'UTC') - CAST(%s AS interval)" |
|
47 |
% sqlvalues(options.interval)) |
|
48 |
else: |
|
49 |
from_sql = "CAST('1970-01-01' AS timestamp without time zone)" |
|
50 |
||
51 |
if options.until_ts: |
|
52 |
until_sql = ( |
|
53 |
"CAST(%s AS timestamp without time zone)" |
|
54 |
% sqlvalues(options.until_ts)) |
|
55 |
elif options.interval and options.from_ts: |
|
56 |
until_sql = ( |
|
57 |
"CAST(%s AS timestamp without time zone) + CAST(%s AS interval)" |
|
58 |
% sqlvalues(options.from_ts, options.interval)) |
|
59 |
else: |
|
60 |
until_sql = "CURRENT_TIMESTAMP AT TIME ZONE 'UTC'" |
|
61 |
||
7675.686.28
by Stuart Bishop
Add some fuzz in the time interval to get meaningful daily reports when we cannot guarantee disk utilization statistics are calculated exactly 24 hours apart |
62 |
fuzz_sql = "CAST(%s AS interval)" % sqlvalues(fuzz) |
63 |
clause = "date_created BETWEEN (%s - %s) AND (%s + %s)" % ( |
|
64 |
from_sql, fuzz_sql, until_sql, fuzz_sql) |
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
65 |
|
66 |
return clause |
|
67 |
||
68 |
||
7675.686.9
by Stuart Bishop
Basic database utilization report |
69 |
def get_table_stats(cur, options): |
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
70 |
params = {'where': get_where_clause(options)} |
71 |
tablestats_query = dedent("""\ |
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
72 |
SELECT
|
73 |
Earliest.date_created AS date_start,
|
|
74 |
Latest.date_created AS date_end,
|
|
75 |
Latest.schemaname,
|
|
76 |
Latest.relname,
|
|
77 |
Latest.seq_scan - Earliest.seq_scan AS seq_scan,
|
|
78 |
Latest.seq_tup_read - Earliest.seq_tup_read AS seq_tup_read,
|
|
79 |
Latest.idx_scan - Earliest.idx_scan AS idx_scan,
|
|
80 |
Latest.idx_tup_fetch - Earliest.idx_tup_fetch AS idx_tup_fetch,
|
|
81 |
Latest.n_tup_ins - Earliest.n_tup_ins AS n_tup_ins,
|
|
82 |
Latest.n_tup_upd - Earliest.n_tup_upd AS n_tup_upd,
|
|
83 |
Latest.n_tup_del - Earliest.n_tup_del AS n_tup_del,
|
|
84 |
Latest.n_tup_hot_upd - Earliest.n_tup_hot_upd AS n_tup_hot_upd,
|
|
85 |
Latest.n_live_tup,
|
|
86 |
Latest.n_dead_tup,
|
|
87 |
Latest.last_vacuum,
|
|
88 |
Latest.last_autovacuum,
|
|
89 |
Latest.last_analyze,
|
|
90 |
Latest.last_autoanalyze
|
|
91 |
FROM
|
|
92 |
DatabaseTableStats AS Earliest,
|
|
93 |
DatabaseTableStats AS Latest
|
|
94 |
WHERE
|
|
95 |
Earliest.date_created = (
|
|
96 |
SELECT min(date_created) FROM DatabaseTableStats
|
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
97 |
WHERE %(where)s) |
7675.686.9
by Stuart Bishop
Basic database utilization report |
98 |
AND Latest.date_created = (
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
99 |
SELECT max(date_created) FROM DatabaseTableStats
|
100 |
WHERE %(where)s) |
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
101 |
AND Earliest.schemaname = Latest.schemaname
|
102 |
AND Earliest.relname = Latest.relname
|
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
103 |
""" % params) |
7675.686.9
by Stuart Bishop
Basic database utilization report |
104 |
cur.execute(tablestats_query) |
105 |
||
106 |
# description[0] is the column name, per PEP-0249
|
|
107 |
fields = [description[0] for description in cur.description] |
|
108 |
tables = set() |
|
109 |
for row in cur.fetchall(): |
|
110 |
table = Table() |
|
111 |
for index in range(len(fields)): |
|
112 |
setattr(table, fields[index], row[index]) |
|
113 |
table.total_tup_read = table.seq_tup_read + table.idx_tup_fetch |
|
114 |
table.total_tup_written = ( |
|
115 |
table.n_tup_ins + table.n_tup_upd + table.n_tup_del) |
|
116 |
tables.add(table) |
|
117 |
||
118 |
return tables |
|
119 |
||
120 |
||
121 |
def get_cpu_stats(cur, options): |
|
7675.686.14
by Stuart Bishop
Fix bogus CPU utilization report |
122 |
# This query calculates the averate cpu utilization from the
|
123 |
# samples. It assumes samples are taken at regular intervals over
|
|
124 |
# the period.
|
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
125 |
# Note that we have to use SUM()/COUNT() instead of AVG() as
|
126 |
# database users not connected when the sample was taken are not
|
|
127 |
# recorded - we want the average utilization over the time period,
|
|
128 |
# not the subset of the time period the user was actually connected.
|
|
129 |
params = {'where': get_where_clause(options)} |
|
130 |
query = dedent("""\ |
|
7675.686.14
by Stuart Bishop
Fix bogus CPU utilization report |
131 |
SELECT (
|
132 |
CAST(SUM(cpu) AS float) / (
|
|
133 |
SELECT COUNT(DISTINCT date_created) FROM DatabaseCpuStats
|
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
134 |
WHERE %(where)s |
135 |
)) AS avg_cpu, username
|
|
7675.686.14
by Stuart Bishop
Fix bogus CPU utilization report |
136 |
FROM DatabaseCpuStats
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
137 |
WHERE %(where)s |
7675.686.9
by Stuart Bishop
Basic database utilization report |
138 |
GROUP BY username
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
139 |
""" % params) |
7675.686.9
by Stuart Bishop
Basic database utilization report |
140 |
cur.execute(query) |
9893.6.20
by Stuart Bishop
Fold edge database stats into lpnet |
141 |
cpu_stats = set(cur.fetchall()) |
142 |
||
143 |
# Fold edge into lpnet, as they are now running the same code.
|
|
144 |
# This is a temporary hack until we drop edge entirely. See
|
|
145 |
# Bug #667883 for details.
|
|
146 |
lpnet_avg_cpu = 0.0 |
|
147 |
edge_avg_cpu = 0.0 |
|
148 |
for stats_tuple in list(cpu_stats): |
|
149 |
avg_cpu, username = stats_tuple |
|
150 |
if username == 'lpnet': |
|
151 |
lpnet_avg_cpu = avg_cpu |
|
152 |
cpu_stats.discard(stats_tuple) |
|
153 |
elif username == 'edge': |
|
154 |
edge_avg_cpu = avg_cpu |
|
155 |
cpu_stats.discard(stats_tuple) |
|
156 |
cpu_stats.add((lpnet_avg_cpu + edge_avg_cpu, 'lpnet')) |
|
157 |
||
158 |
return cpu_stats |
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
159 |
|
160 |
||
7675.686.22
by Stuart Bishop
Start of bloat report |
161 |
def get_bloat_stats(cur, options, kind): |
162 |
# Return information on bloated tables and indexes, as of the end of
|
|
163 |
# the requested time period.
|
|
164 |
params = { |
|
7675.686.28
by Stuart Bishop
Add some fuzz in the time interval to get meaningful daily reports when we cannot guarantee disk utilization statistics are calculated exactly 24 hours apart |
165 |
# We only collect these statistics daily, so add some fuzz
|
166 |
# to ensure bloat information ends up on the daily reports;
|
|
167 |
# we cannot guarantee the disk utilization statistics occur
|
|
7675.686.32
by Stuart Bishop
Increase fuzz for bloat reports - running adhoc queries, the most recent space report could be 1 day old |
168 |
# exactly 24 hours apart. Our most recent snapshot could be 1
|
169 |
# day ago, give or take a few hours.
|
|
170 |
'where': get_where_clause(options, fuzz='1 day 6 hours'), |
|
7675.686.22
by Stuart Bishop
Start of bloat report |
171 |
'bloat': options.bloat, |
172 |
'min_bloat': options.min_bloat, |
|
173 |
'kind': kind, |
|
174 |
}
|
|
175 |
query = dedent(""" |
|
176 |
SELECT * FROM (
|
|
7675.686.34
by Stuart Bishop
Missing DISTINCT |
177 |
SELECT DISTINCT
|
7675.686.22
by Stuart Bishop
Start of bloat report |
178 |
namespace,
|
179 |
name,
|
|
180 |
sub_namespace,
|
|
181 |
sub_name,
|
|
7675.686.27
by Stuart Bishop
Disk utilization statistics are expensive to make and only calculated daily. Handle this gracefully on historical reports or reports on < 24 hour time ranges |
182 |
count(*) OVER t AS num_samples,
|
7675.686.22
by Stuart Bishop
Start of bloat report |
183 |
last_value(table_len) OVER t AS table_len,
|
184 |
pg_size_pretty(last_value(table_len) OVER t) AS table_size,
|
|
7675.686.31
by Stuart Bishop
Fix bloat default sort order and widen index name columns |
185 |
last_value(dead_tuple_len + free_space) OVER t AS bloat_len,
|
7675.686.22
by Stuart Bishop
Start of bloat report |
186 |
pg_size_pretty(last_value(dead_tuple_len + free_space) OVER t)
|
187 |
AS bloat_size,
|
|
188 |
first_value(dead_tuple_percent + free_percent) OVER t
|
|
189 |
AS start_bloat_percent,
|
|
190 |
last_value(dead_tuple_percent + free_percent) OVER t
|
|
7675.686.25
by Stuart Bishop
Improve bloat reports |
191 |
AS end_bloat_percent,
|
192 |
(last_value(dead_tuple_percent + free_percent) OVER t
|
|
193 |
- first_value(dead_tuple_percent + free_percent) OVER t
|
|
194 |
) AS delta_bloat_percent,
|
|
195 |
(last_value(table_len) OVER t
|
|
196 |
- first_value(table_len) OVER t) AS delta_bloat_len,
|
|
197 |
pg_size_pretty(
|
|
198 |
last_value(table_len) OVER t
|
|
199 |
- first_value(table_len) OVER t) AS delta_bloat_size
|
|
7675.686.22
by Stuart Bishop
Start of bloat report |
200 |
FROM DatabaseDiskUtilization
|
201 |
WHERE
|
|
202 |
%(where)s |
|
203 |
AND kind = %%(kind)s |
|
204 |
WINDOW t AS (
|
|
205 |
PARTITION BY sort ORDER BY date_created
|
|
206 |
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)
|
|
207 |
) AS whatever
|
|
208 |
WHERE
|
|
209 |
table_len >= %(min_bloat)s |
|
210 |
AND end_bloat_percent >= %(bloat)s |
|
7675.686.31
by Stuart Bishop
Fix bloat default sort order and widen index name columns |
211 |
ORDER BY bloat_len DESC
|
7675.686.22
by Stuart Bishop
Start of bloat report |
212 |
""" % params) |
213 |
cur.execute(query, params) |
|
214 |
bloat_stats = named_fetchall(cur) |
|
7675.686.24
by Stuart Bishop
Reorder report, interesting stuff at the top. Catch an error when not enough data. Limit bloat report length. |
215 |
return list(bloat_stats) |
7675.686.22
by Stuart Bishop
Start of bloat report |
216 |
|
217 |
||
7675.686.9
by Stuart Bishop
Basic database utilization report |
218 |
def main(): |
219 |
parser = LPOptionParser() |
|
220 |
db_options(parser) |
|
221 |
parser.add_option( |
|
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
222 |
"-f", "--from", dest="from_ts", type=datetime, |
223 |
default=None, metavar="TIMESTAMP", |
|
224 |
help="Use statistics collected since TIMESTAMP.") |
|
225 |
parser.add_option( |
|
226 |
"-u", "--until", dest="until_ts", type=datetime, |
|
227 |
default=None, metavar="TIMESTAMP", |
|
228 |
help="Use statistics collected up until TIMESTAMP.") |
|
229 |
parser.add_option( |
|
230 |
"-i", "--interval", dest="interval", type=str, |
|
231 |
default=None, metavar="INTERVAL", |
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
232 |
help= |
233 |
"Use statistics collected over the last INTERVAL period. "
|
|
234 |
"INTERVAL is a string parsable by PostgreSQL "
|
|
235 |
"such as '5 minutes'.") |
|
236 |
parser.add_option( |
|
237 |
"-n", "--limit", dest="limit", type=int, |
|
238 |
default=15, metavar="NUM", |
|
239 |
help="Display the top NUM items in each category.") |
|
7675.686.22
by Stuart Bishop
Start of bloat report |
240 |
parser.add_option( |
241 |
"-b", "--bloat", dest="bloat", type=float, |
|
242 |
default=40, metavar="BLOAT", |
|
243 |
help="Display tables and indexes bloated by more than BLOAT%.") |
|
244 |
parser.add_option( |
|
245 |
"--min-bloat", dest="min_bloat", type=int, |
|
246 |
default=10000000, metavar="BLOAT", |
|
247 |
help="Don't report tables bloated less than BLOAT bytes.") |
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
248 |
parser.set_defaults(dbuser="database_stats_report") |
249 |
options, args = parser.parse_args() |
|
250 |
||
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
251 |
if options.from_ts and options.until_ts and options.interval: |
252 |
parser.error( |
|
253 |
"Only two of --from, --until and --interval may be specified.") |
|
254 |
||
7675.686.9
by Stuart Bishop
Basic database utilization report |
255 |
con = connect(options.dbuser) |
256 |
cur = con.cursor() |
|
257 |
||
7675.686.18
by Stuart Bishop
Let report-database-stats.py report on arbitrary time periods |
258 |
tables = list(get_table_stats(cur, options)) |
259 |
if len(tables) == 0: |
|
260 |
parser.error("No statistics available in that time range.") |
|
261 |
arbitrary_table = tables[0] |
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
262 |
interval = arbitrary_table.date_end - arbitrary_table.date_start |
7675.686.16
by Stuart Bishop
Correctly calculate number of seconds in a day |
263 |
per_second = float(interval.days * 24 * 60 * 60 + interval.seconds) |
7675.686.24
by Stuart Bishop
Reorder report, interesting stuff at the top. Catch an error when not enough data. Limit bloat report length. |
264 |
if per_second == 0: |
265 |
parser.error("Only one sample in that time range.") |
|
266 |
||
267 |
user_cpu = get_cpu_stats(cur, options) |
|
268 |
print "== Most Active Users ==" |
|
269 |
print
|
|
270 |
for cpu, username in sorted(user_cpu, reverse=True)[:options.limit]: |
|
271 |
print "%40s || %10.2f%% CPU" % (username, float(cpu) / 10) |
|
272 |
||
273 |
print
|
|
274 |
print "== Most Written Tables ==" |
|
275 |
print
|
|
276 |
tables_sort = [ |
|
277 |
'total_tup_written', 'n_tup_upd', 'n_tup_ins', 'n_tup_del', 'relname'] |
|
278 |
most_written_tables = sorted( |
|
279 |
tables, key=attrgetter(*tables_sort), reverse=True) |
|
280 |
for table in most_written_tables[:options.limit]: |
|
281 |
print "%40s || %10.2f tuples/sec" % ( |
|
282 |
table.relname, table.total_tup_written / per_second) |
|
283 |
||
284 |
print
|
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
285 |
print "== Most Read Tables ==" |
286 |
print
|
|
7675.686.11
by Stuart Bishop
Comments per review feedback |
287 |
# These match the pg_user_table_stats view. schemaname is the
|
288 |
# namespace (normally 'public'), relname is the table (relation)
|
|
289 |
# name. total_tup_red is the total number of rows read.
|
|
290 |
# idx_tup_fetch is the number of rows looked up using an index.
|
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
291 |
tables_sort = ['total_tup_read', 'idx_tup_fetch', 'schemaname', 'relname'] |
292 |
most_read_tables = sorted( |
|
293 |
tables, key=attrgetter(*tables_sort), reverse=True) |
|
294 |
for table in most_read_tables[:options.limit]: |
|
7675.686.15
by Stuart Bishop
tweak formatting |
295 |
print "%40s || %10.2f tuples/sec" % ( |
296 |
table.relname, table.total_tup_read / per_second) |
|
7675.686.24
by Stuart Bishop
Reorder report, interesting stuff at the top. Catch an error when not enough data. Limit bloat report length. |
297 |
|
7675.686.25
by Stuart Bishop
Improve bloat reports |
298 |
table_bloat_stats = get_bloat_stats(cur, options, 'r') |
299 |
||
7675.686.27
by Stuart Bishop
Disk utilization statistics are expensive to make and only calculated daily. Handle this gracefully on historical reports or reports on < 24 hour time ranges |
300 |
if not table_bloat_stats: |
301 |
print
|
|
302 |
print "(There is no bloat information available in this time range.)" |
|
303 |
||
304 |
else: |
|
305 |
print
|
|
306 |
print "== Most Bloated Tables ==" |
|
307 |
print
|
|
308 |
for bloated_table in table_bloat_stats[:options.limit]: |
|
309 |
print "%40s || %2d%% || %s of %s" % ( |
|
7675.686.25
by Stuart Bishop
Improve bloat reports |
310 |
bloated_table.name, |
7675.686.27
by Stuart Bishop
Disk utilization statistics are expensive to make and only calculated daily. Handle this gracefully on historical reports or reports on < 24 hour time ranges |
311 |
bloated_table.end_bloat_percent, |
312 |
bloated_table.bloat_size, |
|
313 |
bloated_table.table_size) |
|
314 |
||
315 |
index_bloat_stats = get_bloat_stats(cur, options, 'i') |
|
316 |
||
317 |
print
|
|
318 |
print "== Most Bloated Indexes ==" |
|
319 |
print
|
|
320 |
for bloated_index in index_bloat_stats[:options.limit]: |
|
7675.686.33
by Stuart Bishop
Increase index name length further. Should be dynamic? |
321 |
print "%65s || %2d%% || %s of %s" % ( |
7675.686.25
by Stuart Bishop
Improve bloat reports |
322 |
bloated_index.sub_name, |
7675.686.27
by Stuart Bishop
Disk utilization statistics are expensive to make and only calculated daily. Handle this gracefully on historical reports or reports on < 24 hour time ranges |
323 |
bloated_index.end_bloat_percent, |
324 |
bloated_index.bloat_size, |
|
325 |
bloated_index.table_size) |
|
326 |
||
327 |
# Order bloat delta report by size of bloat increase.
|
|
328 |
# We might want to change this to percentage bloat increase.
|
|
329 |
bloating_sort_key = lambda x: x.delta_bloat_len |
|
330 |
||
331 |
table_bloating_stats = sorted( |
|
332 |
table_bloat_stats, key=bloating_sort_key, reverse=True) |
|
333 |
||
334 |
if table_bloating_stats[0].num_samples <= 1: |
|
335 |
print
|
|
336 |
print fill(dedent("""\ |
|
337 |
(There are not enough samples in this time range to display
|
|
338 |
bloat change statistics)
|
|
339 |
""")) |
|
340 |
else: |
|
341 |
print
|
|
342 |
print "== Most Bloating Tables ==" |
|
343 |
print
|
|
344 |
||
345 |
for bloated_table in table_bloating_stats[:options.limit]: |
|
346 |
# Bloat decreases are uninteresting, and would need to be in
|
|
7675.686.29
by Stuart Bishop
Spelling |
347 |
# a separate table sorted in reverse anyway.
|
7675.686.27
by Stuart Bishop
Disk utilization statistics are expensive to make and only calculated daily. Handle this gracefully on historical reports or reports on < 24 hour time ranges |
348 |
if bloated_table.delta_bloat_percent > 0: |
349 |
print "%40s || +%4.2f%% || +%s" % ( |
|
350 |
bloated_table.name, |
|
351 |
bloated_table.delta_bloat_percent, |
|
352 |
bloated_table.delta_bloat_size) |
|
353 |
||
354 |
index_bloating_stats = sorted( |
|
355 |
index_bloat_stats, key=bloating_sort_key, reverse=True) |
|
356 |
||
357 |
print
|
|
358 |
print "== Most Bloating Indexes ==" |
|
359 |
print
|
|
360 |
for bloated_index in index_bloating_stats[:options.limit]: |
|
361 |
# Bloat decreases are uninteresting, and would need to be in
|
|
7675.686.29
by Stuart Bishop
Spelling |
362 |
# a separate table sorted in reverse anyway.
|
7675.686.27
by Stuart Bishop
Disk utilization statistics are expensive to make and only calculated daily. Handle this gracefully on historical reports or reports on < 24 hour time ranges |
363 |
if bloated_index.delta_bloat_percent > 0: |
7675.686.33
by Stuart Bishop
Increase index name length further. Should be dynamic? |
364 |
print "%65s || +%4.2f%% || +%s" % ( |
7675.686.27
by Stuart Bishop
Disk utilization statistics are expensive to make and only calculated daily. Handle this gracefully on historical reports or reports on < 24 hour time ranges |
365 |
bloated_index.sub_name, |
366 |
bloated_index.delta_bloat_percent, |
|
367 |
bloated_index.delta_bloat_size) |
|
7675.686.25
by Stuart Bishop
Improve bloat reports |
368 |
|
7675.686.22
by Stuart Bishop
Start of bloat report |
369 |
|
7675.686.9
by Stuart Bishop
Basic database utilization report |
370 |
if __name__ == '__main__': |
371 |
main() |