1
/******************************************************
2
INFORMATION SCHEMA innodb_trx, innodb_locks and
3
innodb_lock_waits tables fetch code.
5
The code below fetches information needed to fill those
6
3 dynamic tables and uploads it into a "transactions
7
table cache" for later retrieval.
11
Created July 17, 2007 Vasil Dimov
12
*******************************************************/
14
#include <mysql/plugin.h>
16
#include "mysql_addons.h"
20
#include "dict0dict.h"
21
#include "ha0storage.h"
22
#include "ha_prototypes.h"
23
#include "hash0hash.h"
24
#include "lock0iter.h"
25
#include "lock0lock.h"
27
#include "page0page.h"
32
#include "sync0sync.h"
33
#include "sync0types.h"
40
#define TABLE_CACHE_INITIAL_ROWSNUM 1024
42
/* Table cache's rows are stored in a set of chunks. When a new row is
43
added a new chunk is allocated if necessary. MEM_CHUNKS_IN_TABLE_CACHE
44
specifies the maximum number of chunks.
45
Assuming that the first one is 1024 rows (TABLE_CACHE_INITIAL_ROWSNUM)
46
and each subsequent is N/2 where N is the number of rows we have
47
allocated till now, then 39th chunk would have 1677416425 number of rows
48
and all chunks would have 3354832851 number of rows. */
49
#define MEM_CHUNKS_IN_TABLE_CACHE 39
51
/* The following are some testing auxiliary macros. Do not enable them
52
in a production environment. */
55
/* If this is enabled then lock folds will always be different
56
resulting in equal rows being put in a different cells of the hash
57
table. Checking for duplicates will be flawed because different
58
fold will be calculated when a row is searched in the hash table. */
59
#define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
63
/* This effectively kills the search-for-duplicate-before-adding-a-row
64
function, but searching in the hash is still performed. It will always
65
be assumed that lock is not present and insertion will be performed in
67
#define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
71
/* This aggressively repeats adding each row many times. Depending on
72
the above settings this may be noop or may result in lots of rows being
74
#define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
78
/* Very similar to TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T but hash
79
table search is not performed at all. */
80
#define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
84
/* Do not insert each row into the hash table, duplicates may appear
85
if this is enabled, also if this is enabled searching into the hash is
86
noop because it will be empty. */
87
#define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
90
#define MAX_ALLOWED_FOR_STORAGE(cache) \
92
- (cache)->mem_allocd)
94
#define MAX_ALLOWED_FOR_ALLOC(cache) \
96
- (cache)->mem_allocd \
97
- ha_storage_get_size((cache)->storage))
99
/* Memory for each table in the intermediate buffer is allocated in
100
separate chunks. These chunks are considered to be concatenated to
101
represent one flat array of rows. */
102
typedef struct i_s_mem_chunk_struct {
103
ulint offset; /* offset, in number of rows */
104
ulint rows_allocd; /* the size of this chunk, in number
106
void* base; /* start of the chunk */
109
/* This represents one table's cache. */
110
typedef struct i_s_table_cache_struct {
111
ulint rows_used; /* number of used rows */
112
ulint rows_allocd; /* number of allocated rows */
113
ulint row_size; /* size of a single row */
114
i_s_mem_chunk_t chunks[MEM_CHUNKS_IN_TABLE_CACHE]; /* array of
115
memory chunks that stores the
119
/* This structure describes the intermediate buffer */
120
struct trx_i_s_cache_struct {
121
rw_lock_t rw_lock; /* read-write lock protecting
122
the rest of this structure */
123
ullint last_read; /* last time the cache was read;
124
measured in microseconds since
126
mutex_t last_read_mutex;/* mutex protecting the
127
last_read member - it is updated
128
inside a shared lock of the
130
i_s_table_cache_t innodb_trx; /* innodb_trx table */
131
i_s_table_cache_t innodb_locks; /* innodb_locks table */
132
i_s_table_cache_t innodb_lock_waits;/* innodb_lock_waits table */
133
/* the hash table size is LOCKS_HASH_CELLS_NUM * sizeof(void*) bytes */
134
#define LOCKS_HASH_CELLS_NUM 10000
135
hash_table_t* locks_hash; /* hash table used to eliminate
136
duplicate entries in the
137
innodb_locks table */
138
#define CACHE_STORAGE_INITIAL_SIZE 1024
139
#define CACHE_STORAGE_HASH_CELLS 2048
140
ha_storage_t* storage; /* storage for external volatile
141
data that can possibly not be
142
available later, when we release
144
ulint mem_allocd; /* the amount of memory
145
allocated with mem_alloc*() */
146
ibool is_truncated; /* this is TRUE if the memory
147
limit was hit and thus the data
148
in the cache is truncated */
151
/* This is the intermediate buffer where data needed to fill the
152
INFORMATION SCHEMA tables is fetched and later retrieved by the C++
153
code in handler/i_s.cc. */
154
static trx_i_s_cache_t trx_i_s_cache_static;
155
UNIV_INTERN trx_i_s_cache_t* trx_i_s_cache = &trx_i_s_cache_static;
157
/***********************************************************************
158
For a record lock that is in waiting state retrieves the only bit that
159
is set, for a table lock returns ULINT_UNDEFINED. */
162
wait_lock_get_heap_no(
163
/*==================*/
164
/* out: record number within the heap */
165
const lock_t* lock) /* in: lock */
169
switch (lock_get_type(lock)) {
171
ret = lock_rec_find_set_bit(lock);
172
ut_a(ret != ULINT_UNDEFINED);
175
ret = ULINT_UNDEFINED;
184
/***********************************************************************
185
Initializes the members of a table cache. */
190
i_s_table_cache_t* table_cache, /* out: table cache */
191
size_t row_size) /* in: the size of a
196
table_cache->rows_used = 0;
197
table_cache->rows_allocd = 0;
198
table_cache->row_size = row_size;
200
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
202
/* the memory is actually allocated in
203
table_cache_create_empty_row() */
204
table_cache->chunks[i].base = NULL;
208
/***********************************************************************
209
Returns an empty row from a table cache. The row is allocated if no more
210
empty rows are available. The number of used rows is incremented.
211
If the memory limit is hit then NULL is returned and nothing is
215
table_cache_create_empty_row(
216
/*=========================*/
217
/* out: empty row, or
218
NULL if out of memory */
219
i_s_table_cache_t* table_cache, /* in/out: table cache */
220
trx_i_s_cache_t* cache) /* in/out: cache to record
227
ut_a(table_cache->rows_used <= table_cache->rows_allocd);
229
if (table_cache->rows_used == table_cache->rows_allocd) {
231
/* rows_used == rows_allocd means that new chunk needs
232
to be allocated: either no more empty rows in the
233
last allocated chunk or nothing has been allocated yet
234
(rows_num == rows_allocd == 0); */
236
i_s_mem_chunk_t* chunk;
242
/* find the first not allocated chunk */
243
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
245
if (table_cache->chunks[i].base == NULL) {
251
/* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
252
have been allocated :-X */
253
ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
255
/* allocate the chunk we just found */
259
/* first chunk, nothing is allocated yet */
260
req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
263
/* Memory is increased by the formula
264
new = old + old / 2; We are trying not to be
265
aggressive here (= using the common new = old * 2)
266
because the allocated memory will not be freed
267
until InnoDB exit (it is reused). So it is better
268
to once allocate the memory in more steps, but
269
have less unused/wasted memory than to use less
270
steps in allocation (which is done once in a
271
lifetime) but end up with lots of unused/wasted
273
req_rows = table_cache->rows_allocd / 2;
275
req_bytes = req_rows * table_cache->row_size;
277
if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
282
chunk = &table_cache->chunks[i];
284
chunk->base = mem_alloc2(req_bytes, &got_bytes);
286
got_rows = got_bytes / table_cache->row_size;
288
cache->mem_allocd += got_bytes;
291
printf("allocating chunk %d req bytes=%lu, got bytes=%lu, "
293
"req rows=%lu, got rows=%lu\n",
294
i, req_bytes, got_bytes,
295
table_cache->row_size,
299
chunk->rows_allocd = got_rows;
301
table_cache->rows_allocd += got_rows;
303
/* adjust the offset of the next chunk */
304
if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
306
table_cache->chunks[i + 1].offset
307
= chunk->offset + chunk->rows_allocd;
310
/* return the first empty row in the newly allocated
318
/* there is an empty row, no need to allocate new
321
/* find the first chunk that contains allocated but
323
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
325
if (table_cache->chunks[i].offset
326
+ table_cache->chunks[i].rows_allocd
327
> table_cache->rows_used) {
333
/* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
335
table_cache->rows_used != table_cache->rows_allocd means
336
exactly the opposite - there are allocated but
337
empty/unused rows :-X */
338
ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
340
chunk_start = (char*) table_cache->chunks[i].base;
341
offset = table_cache->rows_used
342
- table_cache->chunks[i].offset;
344
row = chunk_start + offset * table_cache->row_size;
347
table_cache->rows_used++;
352
/***********************************************************************
353
Fills i_s_trx_row_t object.
354
If memory can not be allocated then FALSE is returned. */
361
i_s_trx_row_t* row, /* out: result object
363
const trx_t* trx, /* in: transaction to
365
const i_s_locks_row_t* requested_lock_row,/* in: pointer to the
367
innodb_locks if trx is
368
waiting or NULL if trx
370
trx_i_s_cache_t* cache) /* in/out: cache into
371
which to copy volatile
374
row->trx_id = trx_get_id(trx);
375
row->trx_started = (ib_time_t) trx->start_time;
376
row->trx_state = trx_get_que_state_str(trx);
378
if (trx->wait_lock != NULL) {
380
ut_a(requested_lock_row != NULL);
382
row->requested_lock_row = requested_lock_row;
383
row->trx_wait_started = (ib_time_t) trx->wait_started;
386
ut_a(requested_lock_row == NULL);
388
row->requested_lock_row = NULL;
389
row->trx_wait_started = 0;
392
row->trx_weight = (ullint) ut_conv_dulint_to_longlong(TRX_WEIGHT(trx));
394
row->trx_mysql_thread_id = ib_thd_get_thread_id(trx->mysql_thd);
396
if (trx->mysql_query_str != NULL && *trx->mysql_query_str != NULL) {
398
if (strlen(*trx->mysql_query_str)
399
> TRX_I_S_TRX_QUERY_MAX_LEN) {
401
char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
403
memcpy(query, *trx->mysql_query_str,
404
TRX_I_S_TRX_QUERY_MAX_LEN);
405
query[TRX_I_S_TRX_QUERY_MAX_LEN] = '\0';
407
row->trx_query = ha_storage_put_memlim(
408
cache->storage, query,
409
TRX_I_S_TRX_QUERY_MAX_LEN + 1,
410
MAX_ALLOWED_FOR_STORAGE(cache));
413
row->trx_query = ha_storage_put_str_memlim(
414
cache->storage, *trx->mysql_query_str,
415
MAX_ALLOWED_FOR_STORAGE(cache));
418
if (row->trx_query == NULL) {
424
row->trx_query = NULL;
430
/***********************************************************************
431
Format the nth field of "rec" and put it in "buf". The result is always
432
'\0'-terminated. Returns the number of bytes that were written to "buf"
433
(including the terminating '\0'). */
438
/* out: end of the result */
439
char* buf, /* out: buffer */
440
ulint buf_size,/* in: buffer size in bytes */
441
ulint n, /* in: number of field */
442
const dict_index_t* index, /* in: index */
443
const rec_t* rec, /* in: record */
444
const ulint* offsets)/* in: record offsets, returned
445
by rec_get_offsets() */
449
dict_field_t* dict_field;
452
ut_ad(rec_offs_validate(rec, NULL, offsets));
462
/* we must append ", " before the actual data */
470
memcpy(buf, ", ", 3);
477
/* now buf_size >= 1 */
479
data = rec_get_nth_field(rec, offsets, n, &data_len);
481
dict_field = dict_index_get_nth_field(index, n);
483
ret += row_raw_format((const char*) data, data_len,
484
dict_field, buf, buf_size);
489
/***********************************************************************
490
Fills the "lock_data" member of i_s_locks_row_t object.
491
If memory can not be allocated then FALSE is returned. */
496
/* out: FALSE if allocation fails */
497
const char** lock_data,/* out: "lock_data" to fill */
498
const lock_t* lock, /* in: lock used to find the data */
499
ulint heap_no,/* in: rec num used to find the data */
500
trx_i_s_cache_t* cache) /* in/out: cache where to store
505
const buf_block_t* block;
509
ut_a(lock_get_type(lock) == LOCK_REC);
513
block = buf_page_try_get(lock_rec_get_space_id(lock),
514
lock_rec_get_page_no(lock),
526
page = (const page_t*) buf_block_get_frame(block);
528
rec = page_find_rec_with_heap_no(page, heap_no);
530
if (page_rec_is_infimum(rec)) {
532
*lock_data = ha_storage_put_str_memlim(
533
cache->storage, "infimum pseudo-record",
534
MAX_ALLOWED_FOR_STORAGE(cache));
535
} else if (page_rec_is_supremum(rec)) {
537
*lock_data = ha_storage_put_str_memlim(
538
cache->storage, "supremum pseudo-record",
539
MAX_ALLOWED_FOR_STORAGE(cache));
542
const dict_index_t* index;
545
ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
547
char buf[TRX_I_S_LOCK_DATA_MAX_LEN];
551
rec_offs_init(offsets_onstack);
552
offsets = offsets_onstack;
554
index = lock_rec_get_index(lock);
556
n_fields = dict_index_get_n_unique(index);
561
offsets = rec_get_offsets(rec, index, offsets, n_fields,
564
/* format and store the data */
567
for (i = 0; i < n_fields; i++) {
569
buf_used += put_nth_field(
570
buf + buf_used, sizeof(buf) - buf_used,
571
i, index, rec, offsets) - 1;
574
*lock_data = (const char*) ha_storage_put_memlim(
575
cache->storage, buf, buf_used + 1,
576
MAX_ALLOWED_FOR_STORAGE(cache));
578
if (UNIV_UNLIKELY(heap != NULL)) {
580
/* this means that rec_get_offsets() has created a new
581
heap and has stored offsets in it; check that this is
582
really the case and free the heap */
583
ut_a(offsets != offsets_onstack);
590
if (*lock_data == NULL) {
598
/***********************************************************************
599
Fills i_s_locks_row_t object. Returns its first argument.
600
If memory can not be allocated then FALSE is returned. */
605
/* out: FALSE if allocation fails */
606
i_s_locks_row_t* row, /* out: result object that's filled */
607
const lock_t* lock, /* in: lock to get data from */
608
ulint heap_no,/* in: lock's record number
609
or ULINT_UNDEFINED if the lock
611
trx_i_s_cache_t* cache) /* in/out: cache into which to copy
614
row->lock_trx_id = lock_get_trx_id(lock);
615
row->lock_mode = lock_get_mode_str(lock);
616
row->lock_type = lock_get_type_str(lock);
618
row->lock_table = ha_storage_put_str_memlim(
619
cache->storage, lock_get_table_name(lock),
620
MAX_ALLOWED_FOR_STORAGE(cache));
622
/* memory could not be allocated */
623
if (row->lock_table == NULL) {
628
switch (lock_get_type(lock)) {
630
row->lock_index = ha_storage_put_str_memlim(
631
cache->storage, lock_rec_get_index_name(lock),
632
MAX_ALLOWED_FOR_STORAGE(cache));
634
/* memory could not be allocated */
635
if (row->lock_index == NULL) {
640
row->lock_space = lock_rec_get_space_id(lock);
641
row->lock_page = lock_rec_get_page_no(lock);
642
row->lock_rec = heap_no;
644
if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {
646
/* memory could not be allocated */
652
row->lock_index = NULL;
654
row->lock_space = ULINT_UNDEFINED;
655
row->lock_page = ULINT_UNDEFINED;
656
row->lock_rec = ULINT_UNDEFINED;
658
row->lock_data = NULL;
665
row->lock_table_id = lock_get_table_id(lock);
667
row->hash_chain.value = row;
672
/***********************************************************************
673
Fills i_s_lock_waits_row_t object. Returns its first argument. */
675
i_s_lock_waits_row_t*
678
/* out: result object
680
i_s_lock_waits_row_t* row, /* out: result object
682
const i_s_locks_row_t* requested_lock_row,/* in: pointer to the
683
relevant requested lock
684
row in innodb_locks */
685
const i_s_locks_row_t* blocking_lock_row)/* in: pointer to the
686
relevant blocking lock
687
row in innodb_locks */
689
row->requested_lock_row = requested_lock_row;
690
row->blocking_lock_row = blocking_lock_row;
695
/***********************************************************************
696
Calculates a hash fold for a lock. For a record lock the fold is
697
calculated from 4 elements, which uniquely identify a lock at a given
698
point in time: transaction id, space id, page number, record number.
699
For a table lock the fold is table's id. */
705
const lock_t* lock, /* in: lock object to fold */
706
ulint heap_no)/* in: lock's record number
707
or ULINT_UNDEFINED if the lock
710
#ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
711
static ulint fold = 0;
717
switch (lock_get_type(lock)) {
719
ut_a(heap_no != ULINT_UNDEFINED);
721
ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock),
722
lock_rec_get_space_id(lock));
724
ret = ut_fold_ulint_pair(ret,
725
lock_rec_get_page_no(lock));
727
ret = ut_fold_ulint_pair(ret, heap_no);
731
/* this check is actually not necessary for continuing
732
correct operation, but something must have gone wrong if
734
ut_a(heap_no == ULINT_UNDEFINED);
736
ret = (ulint) lock_get_table_id(lock);
747
/***********************************************************************
748
Checks whether i_s_locks_row_t object represents a lock_t object. */
753
/* out: TRUE if they match */
754
const i_s_locks_row_t* row, /* in: innodb_locks row */
755
const lock_t* lock, /* in: lock object */
756
ulint heap_no)/* in: lock's record number
757
or ULINT_UNDEFINED if the lock
760
#ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
763
switch (lock_get_type(lock)) {
765
ut_a(heap_no != ULINT_UNDEFINED);
767
return(row->lock_trx_id == lock_get_trx_id(lock)
768
&& row->lock_space == lock_rec_get_space_id(lock)
769
&& row->lock_page == lock_rec_get_page_no(lock)
770
&& row->lock_rec == heap_no);
773
/* this check is actually not necessary for continuing
774
correct operation, but something must have gone wrong if
776
ut_a(heap_no == ULINT_UNDEFINED);
778
return(row->lock_trx_id == lock_get_trx_id(lock)
779
&& row->lock_table_id == lock_get_table_id(lock));
788
/***********************************************************************
789
Searches for a row in the innodb_locks cache that has a specified id.
790
This happens in O(1) time since a hash table is used. Returns pointer to
791
the row or NULL if none is found. */
796
/* out: row or NULL */
797
trx_i_s_cache_t* cache, /* in: cache */
798
const lock_t* lock, /* in: lock to search for */
799
ulint heap_no)/* in: lock's record number
800
or ULINT_UNDEFINED if the lock
803
i_s_hash_chain_t* hash_chain;
806
/* hash_chain->"next" */
811
fold_lock(lock, heap_no),
812
/* the type of the next variable */
814
/* auxiliary variable */
816
/* this determines if we have found the lock */
817
locks_row_eq_lock(hash_chain->value, lock, heap_no));
819
if (hash_chain == NULL) {
825
return(hash_chain->value);
828
/***********************************************************************
829
Adds new element to the locks cache, enlarging it if necessary.
830
Returns a pointer to the added row. If the row is already present then
831
no row is added and a pointer to the existing row is returned.
832
If row can not be allocated then NULL is returned. */
838
trx_i_s_cache_t* cache, /* in/out: cache */
839
const lock_t* lock, /* in: the element to add */
840
ulint heap_no)/* in: lock's record number
841
or ULINT_UNDEFINED if the lock
844
i_s_locks_row_t* dst_row;
846
#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
848
for (i = 0; i < 10000; i++) {
850
#ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
851
/* quit if this lock is already present */
852
dst_row = search_innodb_locks(cache, lock, heap_no);
853
if (dst_row != NULL) {
859
dst_row = (i_s_locks_row_t*)
860
table_cache_create_empty_row(&cache->innodb_locks, cache);
862
/* memory could not be allocated */
863
if (dst_row == NULL) {
868
if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
870
/* memory could not be allocated */
871
cache->innodb_locks.rows_used--;
875
#ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
877
/* the type used in the hash chain */
879
/* hash_chain->"next" */
884
fold_lock(lock, heap_no),
885
/* add this data to the hash */
886
&dst_row->hash_chain);
888
#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
895
/***********************************************************************
896
Adds new pair of locks to the lock waits cache.
897
If memory can not be allocated then FALSE is returned. */
900
add_lock_wait_to_cache(
901
/*===================*/
904
trx_i_s_cache_t* cache, /* in/out: cache */
905
const i_s_locks_row_t* requested_lock_row,/* in: pointer to the
906
relevant requested lock
907
row in innodb_locks */
908
const i_s_locks_row_t* blocking_lock_row)/* in: pointer to the
909
relevant blocking lock
910
row in innodb_locks */
912
i_s_lock_waits_row_t* dst_row;
914
dst_row = (i_s_lock_waits_row_t*)
915
table_cache_create_empty_row(&cache->innodb_lock_waits,
918
/* memory could not be allocated */
919
if (dst_row == NULL) {
924
fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
929
/***********************************************************************
930
Adds transaction's relevant (important) locks to cache.
931
If the transaction is waiting, then the wait lock is added to
932
innodb_locks and a pointer to the added row is returned in
933
requested_lock_row, otherwise requested_lock_row is set to NULL.
934
If rows can not be allocated then FALSE is returned and the value of
935
requested_lock_row is undefined. */
938
add_trx_relevant_locks_to_cache(
939
/*============================*/
940
/* out: FALSE if allocation fails */
941
trx_i_s_cache_t* cache, /* in/out: cache */
942
const trx_t* trx, /* in: transaction */
943
i_s_locks_row_t** requested_lock_row)/* out: pointer to the
944
requested lock row, or NULL or
947
/* If transaction is waiting we add the wait lock and all locks
948
from another transactions that are blocking the wait lock. */
949
if (trx->que_state == TRX_QUE_LOCK_WAIT) {
951
const lock_t* curr_lock;
952
ulint wait_lock_heap_no;
953
i_s_locks_row_t* blocking_lock_row;
954
lock_queue_iterator_t iter;
956
ut_a(trx->wait_lock != NULL);
959
= wait_lock_get_heap_no(trx->wait_lock);
961
/* add the requested lock */
963
= add_lock_to_cache(cache, trx->wait_lock,
966
/* memory could not be allocated */
967
if (*requested_lock_row == NULL) {
972
/* then iterate over the locks before the wait lock and
973
add the ones that are blocking it */
975
lock_queue_iterator_reset(&iter, trx->wait_lock,
978
curr_lock = lock_queue_iterator_get_prev(&iter);
979
while (curr_lock != NULL) {
981
if (lock_has_to_wait(trx->wait_lock,
984
/* add the lock that is
985
blocking trx->wait_lock */
989
/* heap_no is the same
990
for the wait and waited
994
/* memory could not be allocated */
995
if (blocking_lock_row == NULL) {
1000
/* add the relation between both locks
1001
to innodb_lock_waits */
1002
if (!add_lock_wait_to_cache(
1003
cache, *requested_lock_row,
1004
blocking_lock_row)) {
1006
/* memory could not be allocated */
1011
curr_lock = lock_queue_iterator_get_prev(&iter);
1015
*requested_lock_row = NULL;
1021
/***********************************************************************
1022
Checks if the cache can safely be updated. */
1025
can_cache_be_updated(
1026
/*=================*/
1027
trx_i_s_cache_t* cache) /* in: cache */
1031
/* The minimum time that a cache must not be updated after it has been
1032
read for the last time; measured in microseconds. We use this technique
1033
to ensure that SELECTs which join several INFORMATION SCHEMA tables read
1034
the same version of the cache. */
1035
#define CACHE_MIN_IDLE_TIME_US 100000 /* 0.1 sec */
1037
/* Here we read cache->last_read without acquiring its mutex
1038
because last_read is only updated when a shared rw lock on the
1039
whole cache is being held (see trx_i_s_cache_end_read()) and
1040
we are currently holding an exclusive rw lock on the cache.
1041
So it is not possible for last_read to be updated while we are
1044
#ifdef UNIV_SYNC_DEBUG
1045
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1048
now = ut_time_us(NULL);
1049
if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
1057
/***********************************************************************
1058
Declare a cache empty, preparing it to be filled up. Not all resources
1059
are freed because they can be reused. */
1062
trx_i_s_cache_clear(
1063
/*================*/
1064
trx_i_s_cache_t* cache) /* out: cache to clear */
1066
cache->innodb_trx.rows_used = 0;
1067
cache->innodb_locks.rows_used = 0;
1068
cache->innodb_lock_waits.rows_used = 0;
1070
hash_table_clear(cache->locks_hash);
1072
ha_storage_empty(&cache->storage);
1075
/***********************************************************************
1076
Fetches the data needed to fill the 3 INFORMATION SCHEMA tables into the
1077
table cache buffer. Cache must be locked for write. */
1080
fetch_data_into_cache(
1081
/*==================*/
1082
trx_i_s_cache_t* cache) /* in/out: cache */
1085
i_s_trx_row_t* trx_row;
1086
i_s_locks_row_t* requested_lock_row;
1088
trx_i_s_cache_clear(cache);
1090
/* We iterate over the list of all transactions and add each one
1091
to innodb_trx's cache. We also add all locks that are relevant
1092
to each transaction into innodb_locks' and innodb_lock_waits'
1095
for (trx = UT_LIST_GET_FIRST(trx_sys->trx_list);
1097
trx = UT_LIST_GET_NEXT(trx_list, trx)) {
1099
if (!add_trx_relevant_locks_to_cache(cache, trx,
1100
&requested_lock_row)) {
1102
cache->is_truncated = TRUE;
1106
trx_row = (i_s_trx_row_t*)
1107
table_cache_create_empty_row(&cache->innodb_trx,
1110
/* memory could not be allocated */
1111
if (trx_row == NULL) {
1113
cache->is_truncated = TRUE;
1117
if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) {
1119
/* memory could not be allocated */
1120
cache->innodb_trx.rows_used--;
1121
cache->is_truncated = TRUE;
1126
cache->is_truncated = FALSE;
1129
/***********************************************************************
1130
Update the transactions cache if it has not been read for some time.
1131
Called from handler/i_s.cc. */
1134
trx_i_s_possibly_fetch_data_into_cache(
1135
/*===================================*/
1136
/* out: 0 - fetched, 1 - not */
1137
trx_i_s_cache_t* cache) /* in/out: cache */
1139
if (!can_cache_be_updated(cache)) {
1144
/* We are going to access trx->query in all transactions */
1145
innobase_mysql_prepare_print_arbitrary_thd();
1147
/* We need to read trx_sys and record/table lock queues */
1148
mutex_enter(&kernel_mutex);
1150
fetch_data_into_cache(cache);
1152
mutex_exit(&kernel_mutex);
1154
innobase_mysql_end_print_arbitrary_thd();
1159
/***********************************************************************
1160
Returns TRUE if the data in the cache is truncated due to the memory
1161
limit posed by TRX_I_S_MEM_LIMIT. */
1164
trx_i_s_cache_is_truncated(
1165
/*=======================*/
1166
/* out: TRUE if truncated */
1167
trx_i_s_cache_t* cache) /* in: cache */
1169
return(cache->is_truncated);
1172
/***********************************************************************
1173
Initialize INFORMATION SCHEMA trx related cache. */
1178
trx_i_s_cache_t* cache) /* out: cache to init */
1180
/* The latching is done in the following order:
1181
acquire trx_i_s_cache_t::rw_lock, X
1182
acquire kernel_mutex
1183
release kernel_mutex
1184
release trx_i_s_cache_t::rw_lock
1185
acquire trx_i_s_cache_t::rw_lock, S
1186
acquire trx_i_s_cache_t::last_read_mutex
1187
release trx_i_s_cache_t::last_read_mutex
1188
release trx_i_s_cache_t::rw_lock */
1190
rw_lock_create(&cache->rw_lock, SYNC_TRX_I_S_RWLOCK);
1192
cache->last_read = 0;
1194
mutex_create(&cache->last_read_mutex, SYNC_TRX_I_S_LAST_READ);
1196
table_cache_init(&cache->innodb_trx, sizeof(i_s_trx_row_t));
1197
table_cache_init(&cache->innodb_locks, sizeof(i_s_locks_row_t));
1198
table_cache_init(&cache->innodb_lock_waits,
1199
sizeof(i_s_lock_waits_row_t));
1201
cache->locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
1203
cache->storage = ha_storage_create(CACHE_STORAGE_INITIAL_SIZE,
1204
CACHE_STORAGE_HASH_CELLS);
1206
cache->mem_allocd = 0;
1208
cache->is_truncated = FALSE;
1211
/***********************************************************************
1212
Issue a shared/read lock on the tables cache. */
1215
trx_i_s_cache_start_read(
1216
/*=====================*/
1217
trx_i_s_cache_t* cache) /* in: cache */
1219
rw_lock_s_lock(&cache->rw_lock);
1222
/***********************************************************************
1223
Release a shared/read lock on the tables cache. */
1226
trx_i_s_cache_end_read(
1227
/*===================*/
1228
trx_i_s_cache_t* cache) /* in: cache */
1232
#ifdef UNIV_SYNC_DEBUG
1233
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED));
1236
/* update cache last read time */
1237
now = ut_time_us(NULL);
1238
mutex_enter(&cache->last_read_mutex);
1239
cache->last_read = now;
1240
mutex_exit(&cache->last_read_mutex);
1242
rw_lock_s_unlock(&cache->rw_lock);
1245
/***********************************************************************
1246
Issue an exclusive/write lock on the tables cache. */
1249
trx_i_s_cache_start_write(
1250
/*======================*/
1251
trx_i_s_cache_t* cache) /* in: cache */
1253
rw_lock_x_lock(&cache->rw_lock);
1256
/***********************************************************************
1257
Release an exclusive/write lock on the tables cache. */
1260
trx_i_s_cache_end_write(
1261
/*====================*/
1262
trx_i_s_cache_t* cache) /* in: cache */
1264
#ifdef UNIV_SYNC_DEBUG
1265
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1268
rw_lock_x_unlock(&cache->rw_lock);
1271
/***********************************************************************
1272
Selects a INFORMATION SCHEMA table cache from the whole cache. */
1277
/* out: table cache */
1278
trx_i_s_cache_t* cache, /* in: whole cache */
1279
enum i_s_table table) /* in: which table */
1281
i_s_table_cache_t* table_cache;
1283
#ifdef UNIV_SYNC_DEBUG
1284
ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED)
1285
|| rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
1289
case I_S_INNODB_TRX:
1290
table_cache = &cache->innodb_trx;
1292
case I_S_INNODB_LOCKS:
1293
table_cache = &cache->innodb_locks;
1295
case I_S_INNODB_LOCK_WAITS:
1296
table_cache = &cache->innodb_lock_waits;
1302
return(table_cache);
1305
/***********************************************************************
1306
Retrieves the number of used rows in the cache for a given
1307
INFORMATION SCHEMA table. */
1310
trx_i_s_cache_get_rows_used(
1311
/*========================*/
1312
/* out: number of rows */
1313
trx_i_s_cache_t* cache, /* in: cache */
1314
enum i_s_table table) /* in: which table */
1316
i_s_table_cache_t* table_cache;
1318
table_cache = cache_select_table(cache, table);
1320
return(table_cache->rows_used);
1323
/***********************************************************************
1324
Retrieves the nth row (zero-based) in the cache for a given
1325
INFORMATION SCHEMA table. */
1328
trx_i_s_cache_get_nth_row(
1329
/*======================*/
1331
trx_i_s_cache_t* cache, /* in: cache */
1332
enum i_s_table table, /* in: which table */
1333
ulint n) /* in: row number */
1335
i_s_table_cache_t* table_cache;
1339
table_cache = cache_select_table(cache, table);
1341
ut_a(n < table_cache->rows_used);
1345
for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1347
if (table_cache->chunks[i].offset
1348
+ table_cache->chunks[i].rows_allocd > n) {
1350
row = (char*) table_cache->chunks[i].base
1351
+ (n - table_cache->chunks[i].offset)
1352
* table_cache->row_size;
1362
/***********************************************************************
1363
Crafts a lock id string from a i_s_locks_row_t object. Returns its
1364
second argument. This function aborts if there is not enough space in
1365
lock_id. Be sure to provide at least TRX_I_S_LOCK_ID_MAX_LEN + 1 if you
1366
want to be 100% sure that it will not abort. */
1369
trx_i_s_create_lock_id(
1370
/*===================*/
1371
/* out: resulting lock id */
1372
const i_s_locks_row_t* row, /* in: innodb_locks row */
1373
char* lock_id,/* out: resulting lock_id */
1374
ulint lock_id_size)/* in: size of the lock id
1379
/* please adjust TRX_I_S_LOCK_ID_MAX_LEN if you change this */
1381
if (row->lock_space != ULINT_UNDEFINED) {
1383
res_len = ut_snprintf(lock_id, lock_id_size,
1384
TRX_ID_FMT ":%lu:%lu:%lu",
1385
row->lock_trx_id, row->lock_space,
1386
row->lock_page, row->lock_rec);
1389
res_len = ut_snprintf(lock_id, lock_id_size,
1392
row->lock_table_id);
1395
/* the typecast is safe because snprintf(3) never returns
1398
ut_a((ulint) res_len < lock_id_size);