859
892
/***************************************************************
860
Inserts a record next to page cursor. Returns pointer to inserted record if
861
succeed, i.e., enough space available, NULL otherwise. The record to be
862
inserted can be in a data tuple or as a physical record. The other parameter
863
must then be NULL. The cursor stays at the same position. */
893
Inserts a record next to page cursor on an uncompressed page.
894
Returns pointer to inserted record if succeed, i.e., enough
895
space available, NULL otherwise. The cursor stays at the same position. */
866
898
page_cur_insert_rec_low(
867
899
/*====================*/
868
900
/* out: pointer to record if succeed, NULL
870
page_cur_t* cursor, /* in: a page cursor */
871
dtuple_t* tuple, /* in: pointer to a data tuple or NULL */
902
rec_t* current_rec,/* in: pointer to current record after
903
which the new record is inserted */
872
904
dict_index_t* index, /* in: record descriptor */
873
rec_t* rec, /* in: pointer to a physical record or NULL */
874
ulint* offsets,/* in: rec_get_offsets(rec, index) or NULL */
875
mtr_t* mtr) /* in: mini-transaction handle */
905
const rec_t* rec, /* in: pointer to a physical record */
906
ulint* offsets,/* in/out: rec_get_offsets(rec, index) */
907
mtr_t* mtr) /* in: mini-transaction handle, or NULL */
877
909
byte* insert_buf = NULL;
879
byte* page; /* the relevant page */
911
page_t* page; /* the relevant page */
880
912
rec_t* last_insert; /* cursor position at previous
914
rec_t* free_rec; /* a free record that was reused,
882
916
rec_t* insert_rec; /* inserted record */
883
917
ulint heap_no; /* heap number of the inserted
885
rec_t* current_rec; /* current record after which the
886
new record is inserted */
887
rec_t* next_rec; /* next record after current before
889
ulint owner_slot; /* the slot which owns the
893
mem_heap_t* heap = NULL;
896
ut_ad(cursor && mtr);
898
ut_ad(!(tuple && rec));
899
ut_ad(rec || dtuple_check_typed(tuple));
901
page = page_cur_get_page(cursor);
902
comp = page_is_comp(page);
903
ut_ad(dict_table_is_comp(index->table) == !!comp);
905
ut_ad(cursor->rec != page_get_supremum_rec(page));
920
ut_ad(rec_offs_validate(rec, index, offsets));
922
page = page_align(current_rec);
923
ut_ad(dict_table_is_comp(index->table)
924
== (ibool) !!page_is_comp(page));
926
ut_ad(!page_rec_is_supremum(current_rec));
907
928
/* 1. Get the size of the physical record in the page */
909
rec_size = rec_get_converted_size(index, tuple);
912
offsets = rec_get_offsets(rec, index, offsets,
913
ULINT_UNDEFINED, &heap);
915
ut_ad(rec_offs_validate(rec, index, offsets));
916
rec_size = rec_offs_size(offsets);
929
rec_size = rec_offs_size(offsets);
931
#ifdef UNIV_DEBUG_VALGRIND
933
const void* rec_start
934
= rec - rec_offs_extra_size(offsets);
936
= rec_offs_extra_size(offsets)
937
- (rec_offs_comp(offsets)
938
? REC_N_NEW_EXTRA_BYTES
939
: REC_N_OLD_EXTRA_BYTES);
941
/* All data bytes of the record must be valid. */
942
UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
943
/* The variable-length header must be valid. */
944
UNIV_MEM_ASSERT_RW(rec_start, extra_size);
946
#endif /* UNIV_DEBUG_VALGRIND */
919
948
/* 2. Try to find suitable space from page memory management */
920
insert_buf = page_mem_alloc(page, rec_size, index, &heap_no);
922
if (insert_buf == NULL) {
950
free_rec = page_header_get_ptr(page, PAGE_FREE);
951
if (UNIV_LIKELY_NULL(free_rec)) {
952
/* Try to allocate from the head of the free list. */
953
ulint foffsets_[REC_OFFS_NORMAL_SIZE];
954
ulint* foffsets = foffsets_;
955
mem_heap_t* heap = NULL;
957
rec_offs_init(foffsets_);
959
foffsets = rec_get_offsets(free_rec, index, foffsets,
960
ULINT_UNDEFINED, &heap);
961
if (rec_offs_size(foffsets) < rec_size) {
962
if (UNIV_LIKELY_NULL(heap)) {
969
insert_buf = free_rec - rec_offs_extra_size(foffsets);
971
if (page_is_comp(page)) {
972
heap_no = rec_get_heap_no_new(free_rec);
973
page_mem_alloc_free(page, NULL,
974
rec_get_next_ptr(free_rec, TRUE),
977
heap_no = rec_get_heap_no_old(free_rec);
978
page_mem_alloc_free(page, NULL,
979
rec_get_next_ptr(free_rec, FALSE),
923
983
if (UNIV_LIKELY_NULL(heap)) {
924
984
mem_heap_free(heap);
989
insert_buf = page_mem_alloc_heap(page, NULL,
992
if (UNIV_UNLIKELY(insert_buf == NULL)) {
929
997
/* 3. Create the record */
931
insert_rec = rec_convert_dtuple_to_rec(insert_buf,
933
offsets = rec_get_offsets(insert_rec, index, offsets,
934
ULINT_UNDEFINED, &heap);
936
insert_rec = rec_copy(insert_buf, rec, offsets);
937
ut_ad(rec_offs_validate(rec, index, offsets));
938
rec_offs_make_valid(insert_rec, index, offsets);
942
ut_ad(rec_size == rec_offs_size(offsets));
998
insert_rec = rec_copy(insert_buf, rec, offsets);
999
rec_offs_make_valid(insert_rec, index, offsets);
944
1001
/* 4. Insert the record in the linked list of records */
945
current_rec = cursor->rec;
947
ut_ad(!comp || rec_get_status(current_rec) <= REC_STATUS_INFIMUM);
948
ut_ad(!comp || rec_get_status(insert_rec) < REC_STATUS_INFIMUM);
950
next_rec = page_rec_get_next(current_rec);
951
ut_ad(!comp || rec_get_status(next_rec) != REC_STATUS_INFIMUM);
952
page_rec_set_next(insert_rec, next_rec);
953
page_rec_set_next(current_rec, insert_rec);
955
page_header_set_field(page, PAGE_N_RECS, 1 + page_get_n_recs(page));
1002
ut_ad(current_rec != insert_rec);
1005
/* next record after current before the insertion */
1006
rec_t* next_rec = page_rec_get_next(current_rec);
1008
if (page_is_comp(page)) {
1009
ut_ad(rec_get_status(current_rec)
1010
<= REC_STATUS_INFIMUM);
1011
ut_ad(rec_get_status(insert_rec) < REC_STATUS_INFIMUM);
1012
ut_ad(rec_get_status(next_rec) != REC_STATUS_INFIMUM);
1015
page_rec_set_next(insert_rec, next_rec);
1016
page_rec_set_next(current_rec, insert_rec);
1019
page_header_set_field(page, NULL, PAGE_N_RECS,
1020
1 + page_get_n_recs(page));
957
1022
/* 5. Set the n_owned field in the inserted record to zero,
958
1023
and set the heap_no field */
960
rec_set_n_owned(insert_rec, comp, 0);
961
rec_set_heap_no(insert_rec, comp, heap_no);
1024
if (page_is_comp(page)) {
1025
rec_set_n_owned_new(insert_rec, NULL, 0);
1026
rec_set_heap_no_new(insert_rec, heap_no);
1028
rec_set_n_owned_old(insert_rec, 0);
1029
rec_set_heap_no_old(insert_rec, heap_no);
1032
UNIV_MEM_ASSERT_RW(rec_get_start(insert_rec, offsets),
1033
rec_offs_size(offsets));
963
1034
/* 6. Update the last insertion info in page header */
965
1036
last_insert = page_header_get_ptr(page, PAGE_LAST_INSERT);
966
ut_ad(!last_insert || !comp
1037
ut_ad(!last_insert || !page_is_comp(page)
967
1038
|| rec_get_node_ptr_flag(last_insert)
968
1039
== rec_get_node_ptr_flag(insert_rec));
970
if (last_insert == NULL) {
971
page_header_set_field(page, PAGE_DIRECTION, PAGE_NO_DIRECTION);
972
page_header_set_field(page, PAGE_N_DIRECTION, 0);
1041
if (UNIV_UNLIKELY(last_insert == NULL)) {
1042
page_header_set_field(page, NULL, PAGE_DIRECTION,
1044
page_header_set_field(page, NULL, PAGE_N_DIRECTION, 0);
974
1046
} else if ((last_insert == current_rec)
975
1047
&& (page_header_get_field(page, PAGE_DIRECTION)
976
1048
!= PAGE_LEFT)) {
978
page_header_set_field(page, PAGE_DIRECTION, PAGE_RIGHT);
979
page_header_set_field(page, PAGE_N_DIRECTION,
980
page_header_get_field(
981
page, PAGE_N_DIRECTION) + 1);
983
} else if ((page_rec_get_next(insert_rec) == last_insert)
984
&& (page_header_get_field(page, PAGE_DIRECTION)
987
page_header_set_field(page, PAGE_DIRECTION, PAGE_LEFT);
988
page_header_set_field(page, PAGE_N_DIRECTION,
989
page_header_get_field(
990
page, PAGE_N_DIRECTION) + 1);
992
page_header_set_field(page, PAGE_DIRECTION, PAGE_NO_DIRECTION);
993
page_header_set_field(page, PAGE_N_DIRECTION, 0);
996
page_header_set_ptr(page, PAGE_LAST_INSERT, insert_rec);
998
/* 7. It remains to update the owner record. */
1000
owner_rec = page_rec_find_owner_rec(insert_rec);
1001
n_owned = rec_get_n_owned(owner_rec, comp);
1002
rec_set_n_owned(owner_rec, comp, n_owned + 1);
1004
/* 8. Now we have incremented the n_owned field of the owner
1005
record. If the number exceeds PAGE_DIR_SLOT_MAX_N_OWNED,
1006
we have to split the corresponding directory slot in two. */
1008
if (n_owned == PAGE_DIR_SLOT_MAX_N_OWNED) {
1009
owner_slot = page_dir_find_owner_slot(owner_rec);
1010
page_dir_split_slot(page, owner_slot);
1013
/* 9. Write log record of the insert */
1014
page_cur_insert_rec_write_log(insert_rec, rec_size, current_rec,
1017
if (UNIV_LIKELY_NULL(heap)) {
1018
mem_heap_free(heap);
1050
page_header_set_field(page, NULL, PAGE_DIRECTION,
1052
page_header_set_field(page, NULL, PAGE_N_DIRECTION,
1053
page_header_get_field(
1054
page, PAGE_N_DIRECTION) + 1);
1056
} else if ((page_rec_get_next(insert_rec) == last_insert)
1057
&& (page_header_get_field(page, PAGE_DIRECTION)
1060
page_header_set_field(page, NULL, PAGE_DIRECTION,
1062
page_header_set_field(page, NULL, PAGE_N_DIRECTION,
1063
page_header_get_field(
1064
page, PAGE_N_DIRECTION) + 1);
1066
page_header_set_field(page, NULL, PAGE_DIRECTION,
1068
page_header_set_field(page, NULL, PAGE_N_DIRECTION, 0);
1071
page_header_set_ptr(page, NULL, PAGE_LAST_INSERT, insert_rec);
1073
/* 7. It remains to update the owner record. */
1075
rec_t* owner_rec = page_rec_find_owner_rec(insert_rec);
1077
if (page_is_comp(page)) {
1078
n_owned = rec_get_n_owned_new(owner_rec);
1079
rec_set_n_owned_new(owner_rec, NULL, n_owned + 1);
1081
n_owned = rec_get_n_owned_old(owner_rec);
1082
rec_set_n_owned_old(owner_rec, n_owned + 1);
1085
/* 8. Now we have incremented the n_owned field of the owner
1086
record. If the number exceeds PAGE_DIR_SLOT_MAX_N_OWNED,
1087
we have to split the corresponding directory slot in two. */
1089
if (UNIV_UNLIKELY(n_owned == PAGE_DIR_SLOT_MAX_N_OWNED)) {
1090
page_dir_split_slot(
1092
page_dir_find_owner_slot(owner_rec));
1096
/* 9. Write log record of the insert */
1097
if (UNIV_LIKELY(mtr != NULL)) {
1098
page_cur_insert_rec_write_log(insert_rec, rec_size,
1099
current_rec, index, mtr);
1105
/***************************************************************
1106
Compresses or reorganizes a page after an optimistic insert. */
1109
page_cur_insert_rec_zip_reorg(
1110
/*==========================*/
1111
/* out: rec if succeed, NULL otherwise */
1112
rec_t** current_rec,/* in/out: pointer to current record after
1113
which the new record is inserted */
1114
buf_block_t* block, /* in: buffer block */
1115
dict_index_t* index, /* in: record descriptor */
1116
rec_t* rec, /* in: inserted record */
1117
page_t* page, /* in: uncompressed page */
1118
page_zip_des_t* page_zip,/* in: compressed page */
1119
mtr_t* mtr) /* in: mini-transaction, or NULL */
1123
/* Recompress or reorganize and recompress the page. */
1124
if (UNIV_LIKELY(page_zip_compress(page_zip, page, index, mtr))) {
1128
/* Before trying to reorganize the page,
1129
store the number of preceding records on the page. */
1130
pos = page_rec_get_n_recs_before(rec);
1132
if (page_zip_reorganize(block, index, mtr)) {
1133
/* The page was reorganized: Find rec by seeking to pos,
1134
and update *current_rec. */
1135
rec = page + PAGE_NEW_INFIMUM;
1138
rec = page + rec_get_next_offs(rec, TRUE);
1142
rec = page + rec_get_next_offs(rec, TRUE);
1147
/* Out of space: restore the page */
1148
if (!page_zip_decompress(page_zip, page)) {
1149
ut_error; /* Memory corrupted? */
1151
ut_ad(page_validate(page, index));
1155
/***************************************************************
1156
Inserts a record next to page cursor on a compressed and uncompressed
1157
page. Returns pointer to inserted record if succeed, i.e.,
1158
enough space available, NULL otherwise.
1159
The cursor stays at the same position. */
1162
page_cur_insert_rec_zip(
1163
/*====================*/
1164
/* out: pointer to record if succeed, NULL
1166
rec_t** current_rec,/* in/out: pointer to current record after
1167
which the new record is inserted */
1168
buf_block_t* block, /* in: buffer block of *current_rec */
1169
dict_index_t* index, /* in: record descriptor */
1170
const rec_t* rec, /* in: pointer to a physical record */
1171
ulint* offsets,/* in/out: rec_get_offsets(rec, index) */
1172
mtr_t* mtr) /* in: mini-transaction handle, or NULL */
1174
byte* insert_buf = NULL;
1176
page_t* page; /* the relevant page */
1177
rec_t* last_insert; /* cursor position at previous
1179
rec_t* free_rec; /* a free record that was reused,
1181
rec_t* insert_rec; /* inserted record */
1182
ulint heap_no; /* heap number of the inserted
1184
page_zip_des_t* page_zip;
1186
page_zip = buf_block_get_page_zip(block);
1189
ut_ad(rec_offs_validate(rec, index, offsets));
1191
page = page_align(*current_rec);
1192
ut_ad(dict_table_is_comp(index->table));
1193
ut_ad(page_is_comp(page));
1195
ut_ad(!page_rec_is_supremum(*current_rec));
1196
#ifdef UNIV_ZIP_DEBUG
1197
ut_a(page_zip_validate(page_zip, page));
1198
#endif /* UNIV_ZIP_DEBUG */
1200
/* 1. Get the size of the physical record in the page */
1201
rec_size = rec_offs_size(offsets);
1203
#ifdef UNIV_DEBUG_VALGRIND
1205
const void* rec_start
1206
= rec - rec_offs_extra_size(offsets);
1208
= rec_offs_extra_size(offsets)
1209
- (rec_offs_comp(offsets)
1210
? REC_N_NEW_EXTRA_BYTES
1211
: REC_N_OLD_EXTRA_BYTES);
1213
/* All data bytes of the record must be valid. */
1214
UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
1215
/* The variable-length header must be valid. */
1216
UNIV_MEM_ASSERT_RW(rec_start, extra_size);
1218
#endif /* UNIV_DEBUG_VALGRIND */
1220
/* 2. Try to find suitable space from page memory management */
1221
if (!page_zip_available(page_zip, dict_index_is_clust(index),
1224
/* Try compressing the whole page afterwards. */
1225
insert_rec = page_cur_insert_rec_low(*current_rec,
1226
index, rec, offsets,
1229
if (UNIV_LIKELY(insert_rec != NULL)) {
1230
insert_rec = page_cur_insert_rec_zip_reorg(
1231
current_rec, block, index, insert_rec,
1232
page, page_zip, mtr);
1238
free_rec = page_header_get_ptr(page, PAGE_FREE);
1239
if (UNIV_LIKELY_NULL(free_rec)) {
1240
/* Try to allocate from the head of the free list. */
1241
lint extra_size_diff;
1242
ulint foffsets_[REC_OFFS_NORMAL_SIZE];
1243
ulint* foffsets = foffsets_;
1244
mem_heap_t* heap = NULL;
1246
rec_offs_init(foffsets_);
1248
foffsets = rec_get_offsets(free_rec, index, foffsets,
1249
ULINT_UNDEFINED, &heap);
1250
if (rec_offs_size(foffsets) < rec_size) {
1252
if (UNIV_LIKELY_NULL(heap)) {
1253
mem_heap_free(heap);
1259
insert_buf = free_rec - rec_offs_extra_size(foffsets);
1261
/* On compressed pages, do not relocate records from
1262
the free list. If extra_size would grow, use the heap. */
1264
= rec_offs_extra_size(offsets)
1265
- rec_offs_extra_size(foffsets);
1267
if (UNIV_UNLIKELY(extra_size_diff < 0)) {
1268
/* Add an offset to the extra_size. */
1269
if (rec_offs_size(foffsets)
1270
< rec_size - extra_size_diff) {
1275
insert_buf -= extra_size_diff;
1276
} else if (UNIV_UNLIKELY(extra_size_diff)) {
1277
/* Do not allow extra_size to grow */
1282
heap_no = rec_get_heap_no_new(free_rec);
1283
page_mem_alloc_free(page, page_zip,
1284
rec_get_next_ptr(free_rec, TRUE),
1287
if (UNIV_LIKELY_NULL(heap)) {
1288
mem_heap_free(heap);
1293
insert_buf = page_mem_alloc_heap(page, page_zip,
1294
rec_size, &heap_no);
1296
if (UNIV_UNLIKELY(insert_buf == NULL)) {
1300
page_zip_dir_add_slot(page_zip, dict_index_is_clust(index));
1303
/* 3. Create the record */
1304
insert_rec = rec_copy(insert_buf, rec, offsets);
1305
rec_offs_make_valid(insert_rec, index, offsets);
1307
/* 4. Insert the record in the linked list of records */
1308
ut_ad(*current_rec != insert_rec);
1311
/* next record after current before the insertion */
1312
rec_t* next_rec = page_rec_get_next(*current_rec);
1313
ut_ad(rec_get_status(*current_rec)
1314
<= REC_STATUS_INFIMUM);
1315
ut_ad(rec_get_status(insert_rec) < REC_STATUS_INFIMUM);
1316
ut_ad(rec_get_status(next_rec) != REC_STATUS_INFIMUM);
1318
page_rec_set_next(insert_rec, next_rec);
1319
page_rec_set_next(*current_rec, insert_rec);
1322
page_header_set_field(page, page_zip, PAGE_N_RECS,
1323
1 + page_get_n_recs(page));
1325
/* 5. Set the n_owned field in the inserted record to zero,
1326
and set the heap_no field */
1327
rec_set_n_owned_new(insert_rec, NULL, 0);
1328
rec_set_heap_no_new(insert_rec, heap_no);
1330
UNIV_MEM_ASSERT_RW(rec_get_start(insert_rec, offsets),
1331
rec_offs_size(offsets));
1333
page_zip_dir_insert(page_zip, *current_rec, free_rec, insert_rec);
1335
/* 6. Update the last insertion info in page header */
1337
last_insert = page_header_get_ptr(page, PAGE_LAST_INSERT);
1339
|| rec_get_node_ptr_flag(last_insert)
1340
== rec_get_node_ptr_flag(insert_rec));
1342
if (UNIV_UNLIKELY(last_insert == NULL)) {
1343
page_header_set_field(page, page_zip, PAGE_DIRECTION,
1345
page_header_set_field(page, page_zip, PAGE_N_DIRECTION, 0);
1347
} else if ((last_insert == *current_rec)
1348
&& (page_header_get_field(page, PAGE_DIRECTION)
1351
page_header_set_field(page, page_zip, PAGE_DIRECTION,
1353
page_header_set_field(page, page_zip, PAGE_N_DIRECTION,
1354
page_header_get_field(
1355
page, PAGE_N_DIRECTION) + 1);
1357
} else if ((page_rec_get_next(insert_rec) == last_insert)
1358
&& (page_header_get_field(page, PAGE_DIRECTION)
1361
page_header_set_field(page, page_zip, PAGE_DIRECTION,
1363
page_header_set_field(page, page_zip, PAGE_N_DIRECTION,
1364
page_header_get_field(
1365
page, PAGE_N_DIRECTION) + 1);
1367
page_header_set_field(page, page_zip, PAGE_DIRECTION,
1369
page_header_set_field(page, page_zip, PAGE_N_DIRECTION, 0);
1372
page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, insert_rec);
1374
/* 7. It remains to update the owner record. */
1376
rec_t* owner_rec = page_rec_find_owner_rec(insert_rec);
1379
n_owned = rec_get_n_owned_new(owner_rec);
1380
rec_set_n_owned_new(owner_rec, page_zip, n_owned + 1);
1382
/* 8. Now we have incremented the n_owned field of the owner
1383
record. If the number exceeds PAGE_DIR_SLOT_MAX_N_OWNED,
1384
we have to split the corresponding directory slot in two. */
1386
if (UNIV_UNLIKELY(n_owned == PAGE_DIR_SLOT_MAX_N_OWNED)) {
1387
page_dir_split_slot(
1389
page_dir_find_owner_slot(owner_rec));
1393
page_zip_write_rec(page_zip, insert_rec, index, offsets, 1);
1395
/* 9. Write log record of the insert */
1396
if (UNIV_LIKELY(mtr != NULL)) {
1397
page_cur_insert_rec_write_log(insert_rec, rec_size,
1398
*current_rec, index, mtr);
1020
1401
return(insert_rec);