33
33
Finds out if an active transaction has inserted or modified a secondary
34
34
index record. NOTE: the kernel mutex is temporarily released in this
38
38
row_vers_impl_x_locked_off_kernel(
39
39
/*==============================*/
40
40
/* out: NULL if committed, else the active
41
41
transaction; NOTE that the kernel mutex is
42
42
temporarily released! */
43
const rec_t* rec, /* in: record in a secondary index */
43
rec_t* rec, /* in: record in a secondary index */
44
44
dict_index_t* index, /* in: the secondary index */
45
45
const ulint* offsets)/* in: rec_get_offsets(rec, index) */
158
158
mem_heap_free(heap2); /* free version and clust_offsets */
160
160
if (prev_version) {
163
161
clust_offsets = rec_get_offsets(
164
162
prev_version, clust_index, NULL,
165
163
ULINT_UNDEFINED, &heap);
166
/* The stack of versions is locked by mtr.
167
Thus, it is safe to fetch the prefixes for
168
externally stored columns. */
169
164
row = row_build(ROW_COPY_POINTERS, clust_index,
170
prev_version, clust_offsets,
172
entry = row_build_index_entry(row, ext, index, heap);
173
/* entry may be NULL if a record was inserted
174
in place of a deleted record, and the BLOB
175
pointers of the new record were not
176
initialized yet. But in that case,
177
prev_version should be NULL. */
165
prev_version, clust_offsets, heap);
166
entry = row_build_index_entry(row, index, heap);
181
169
mutex_enter(&kernel_mutex);
266
254
/*********************************************************************
267
255
Finds out if we must preserve a delete marked earlier version of a clustered
268
256
index record, because it is >= the purge view. */
271
259
row_vers_must_preserve_del_marked(
272
260
/*==============================*/
298
286
if there is any not delete marked version of the record where the trx
299
287
id >= purge view, and the secondary index entry and ientry are identified in
300
288
the alphabetical ordering; exactly in this case we return TRUE. */
303
291
row_vers_old_has_index_entry(
304
292
/*=========================*/
306
294
ibool also_curr,/* in: TRUE if also rec is included in the
307
295
versions to search; otherwise only versions
308
296
prior to it are searched */
309
const rec_t* rec, /* in: record in the clustered index; the
297
rec_t* rec, /* in: record in the clustered index; the
310
298
caller must have a latch on the page */
311
299
mtr_t* mtr, /* in: mtr holding the latch on rec; it will
312
300
also hold the latch on purge_view */
313
301
dict_index_t* index, /* in: the secondary index */
314
const dtuple_t* ientry) /* in: the secondary index entry */
302
dtuple_t* ientry) /* in: the secondary index entry */
316
const rec_t* version;
317
305
rec_t* prev_version;
318
306
dict_index_t* clust_index;
319
307
ulint* clust_offsets;
320
308
mem_heap_t* heap;
321
309
mem_heap_t* heap2;
323
const dtuple_t* entry;
327
ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
328
|| mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
315
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
316
|| mtr_memo_contains(mtr, buf_block_align(rec),
317
MTR_MEMO_PAGE_S_FIX));
329
318
#ifdef UNIV_SYNC_DEBUG
330
319
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
331
320
#endif /* UNIV_SYNC_DEBUG */
340
329
ULINT_UNDEFINED, &heap);
342
331
if (also_curr && !rec_get_deleted_flag(rec, comp)) {
345
/* The stack of versions is locked by mtr.
346
Thus, it is safe to fetch the prefixes for
347
externally stored columns. */
348
332
row = row_build(ROW_COPY_POINTERS, clust_index,
349
rec, clust_offsets, NULL, &ext, heap);
350
entry = row_build_index_entry(row, ext, index, heap);
352
/* If entry == NULL, the record contains unset BLOB
353
pointers. This must be a freshly inserted record. If
355
row_purge_remove_sec_if_poss_low(), the thread will
356
hold latches on the clustered index and the secondary
357
index. Because the insert works in three steps:
359
(1) insert the record to clustered index
360
(2) store the BLOBs and update BLOB pointers
361
(3) insert records to secondary indexes
363
the purge thread can safely ignore freshly inserted
364
records and delete the secondary index record. The
365
thread that inserted the new record will be inserting
366
the secondary index records. */
333
rec, clust_offsets, heap);
334
entry = row_build_index_entry(row, index, heap);
368
336
/* NOTE that we cannot do the comparison as binary
369
337
fields because the row is maybe being modified so that
370
the clustered index record has already been updated to
371
a different binary value in a char field, but the
338
the clustered index record has already been updated
339
to a different binary value in a char field, but the
372
340
collation identifies the old and new value anyway! */
373
if (entry && !dtuple_coll_cmp(ientry, entry)) {
342
if (dtuple_datas_are_ordering_equal(ientry, entry)) {
375
344
mem_heap_free(heap);
400
369
NULL, ULINT_UNDEFINED, &heap);
402
371
if (!rec_get_deleted_flag(prev_version, comp)) {
405
/* The stack of versions is locked by mtr.
406
Thus, it is safe to fetch the prefixes for
407
externally stored columns. */
408
372
row = row_build(ROW_COPY_POINTERS, clust_index,
409
prev_version, clust_offsets,
411
entry = row_build_index_entry(row, ext, index, heap);
413
/* If entry == NULL, the record contains unset
414
BLOB pointers. This must be a freshly
415
inserted record that we can safely ignore.
416
For the justification, see the comments after
417
the previous row_build_index_entry() call. */
373
prev_version, clust_offsets, heap);
374
entry = row_build_index_entry(row, index, heap);
419
376
/* NOTE that we cannot do the comparison as binary
420
377
fields because maybe the secondary index record has
438
395
Constructs the version of a clustered index record which a consistent
439
396
read should see. We assume that the trx id stored in rec is such that
440
397
the consistent read should not see rec in its present version. */
443
400
row_vers_build_for_consistent_read(
444
401
/*===============================*/
445
402
/* out: DB_SUCCESS or DB_MISSING_HISTORY */
446
const rec_t* rec, /* in: record in a clustered index; the
403
rec_t* rec, /* in: record in a clustered index; the
447
404
caller must have a latch on the page; this
448
405
latch locks the top of the stack of versions
449
406
of this records */
455
412
mem_heap_t** offset_heap,/* in/out: memory heap from which
456
413
the offsets are allocated */
457
414
mem_heap_t* in_heap,/* in: memory heap from which the memory for
458
*old_vers is allocated; memory for possible
415
old_vers is allocated; memory for possible
459
416
intermediate versions is allocated and freed
460
417
locally within the function */
461
418
rec_t** old_vers)/* out, own: old version, or NULL if the
462
419
record does not exist in the view, that is,
463
420
it was freshly inserted afterwards */
465
const rec_t* version;
466
423
rec_t* prev_version;
468
425
mem_heap_t* heap = NULL;
472
ut_ad(dict_index_is_clust(index));
473
ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
474
|| mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
429
ut_ad(index->type & DICT_CLUSTERED);
430
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
431
|| mtr_memo_contains(mtr, buf_block_align(rec),
432
MTR_MEMO_PAGE_S_FIX));
475
433
#ifdef UNIV_SYNC_DEBUG
476
434
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
477
435
#endif /* UNIV_SYNC_DEBUG */
570
528
/*********************************************************************
571
529
Constructs the last committed version of a clustered index record,
572
530
which should be seen by a semi-consistent read. */
575
533
row_vers_build_for_semi_consistent_read(
576
534
/*====================================*/
577
535
/* out: DB_SUCCESS or DB_MISSING_HISTORY */
578
const rec_t* rec, /* in: record in a clustered index; the
536
rec_t* rec, /* in: record in a clustered index; the
579
537
caller must have a latch on the page; this
580
538
latch locks the top of the stack of versions
581
539
of this records */
586
544
mem_heap_t** offset_heap,/* in/out: memory heap from which
587
545
the offsets are allocated */
588
546
mem_heap_t* in_heap,/* in: memory heap from which the memory for
589
*old_vers is allocated; memory for possible
547
old_vers is allocated; memory for possible
590
548
intermediate versions is allocated and freed
591
549
locally within the function */
592
const rec_t** old_vers)/* out: rec, old version, or NULL if the
550
rec_t** old_vers)/* out, own: rec, old version, or NULL if the
593
551
record does not exist in the view, that is,
594
552
it was freshly inserted afterwards */
596
const rec_t* version;
597
555
mem_heap_t* heap = NULL;
600
dulint rec_trx_id = ut_dulint_zero;
558
dulint rec_trx_id = ut_dulint_create(0, 0);
602
ut_ad(dict_index_is_clust(index));
603
ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
604
|| mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX));
560
ut_ad(index->type & DICT_CLUSTERED);
561
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
562
|| mtr_memo_contains(mtr, buf_block_align(rec),
563
MTR_MEMO_PAGE_S_FIX));
605
564
#ifdef UNIV_SYNC_DEBUG
606
565
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED));
607
566
#endif /* UNIV_SYNC_DEBUG */