448
464
session->cuted_fields= 0L;
449
465
session->set_proc_info("Updating");
451
transactional_table= table->cursor->has_transactions();
467
transactional_table= table->file->has_transactions();
452
468
session->abort_on_warning= test(!ignore);
469
will_batch= !table->file->start_bulk_update();
455
472
Assure that we can use position()
456
473
if we need to create an error message.
458
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
475
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
459
476
table->prepare_for_position();
461
while (not (error=info.read_record(&info)) && not session->getKilled())
479
We can use compare_record() to optimize away updates if
480
the table handler is returning all columns OR if
481
if all updated columns are read
483
can_compare_record= (!(table->file->ha_table_flags() &
484
HA_PARTIAL_COLUMN_READ) ||
485
bitmap_is_subset(table->write_set, table->read_set));
487
while (!(error=info.read_record(&info)) && !session->killed)
463
if (not (select && select->skip_record()))
489
if (!(select && select->skip_record()))
465
if (table->cursor->was_semi_consistent_read())
491
if (table->file->was_semi_consistent_read())
466
492
continue; /* repeat the read of the same row if it still exists */
468
table->storeRecord();
469
if (fill_record(session, fields, values))
494
store_record(table,record[1]);
495
if (fill_record(session, fields, values, 0))
496
break; /* purecov: inspected */
474
if (! table->records_are_comparable() || table->compare_records())
500
if (!can_compare_record || table->compare_record())
476
/* Non-batched update */
477
error= table->cursor->updateRecord(table->getUpdateRecord(),
478
table->getInsertRecord());
480
table->auto_increment_field_not_null= false;
505
Typically a batched handler can execute the batched jobs when:
506
1) When specifically told to do so
507
2) When it is not a good idea to batch anymore
508
3) When it is necessary to send batch for other reasons
509
(One such reason is when READ's must be performed)
511
1) is covered by exec_bulk_update calls.
512
2) and 3) is handled by the bulk_update_row method.
514
bulk_update_row can execute the updates including the one
515
defined in the bulk_update_row or not including the row
516
in the call. This is up to the handler implementation and can
517
vary from call to call.
519
The dup_key_found reports the number of duplicate keys found
520
in those updates actually executed. It only reports those if
521
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
522
If this hasn't been issued it returns an error code and can
523
ignore this number. Thus any handler that implements batching
524
for UPDATE IGNORE must also handle this extra call properly.
526
If a duplicate key is found on the record included in this
527
call then it should be included in the count of dup_key_found
528
and error should be set to 0 (only if these errors are ignored).
530
error= table->file->ha_bulk_update_row(table->record[1],
533
limit+= dup_key_found;
534
updated-= dup_key_found;
538
/* Non-batched update */
539
error= table->file->ha_update_row(table->record[1],
482
542
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
484
544
if (error != HA_ERR_RECORD_IS_THE_SAME)
490
table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
550
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
493
553
If (ignore && error is ignorable) we don't have to
494
554
do anything; otherwise...
498
if (table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
558
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
499
559
flags|= ME_FATALERROR; /* Other handler errors are fatal */
501
561
prepare_record_for_error_message(error, table);
502
table->print_error(error,MYF(flags));
562
table->file->print_error(error,MYF(flags));
508
568
if (!--limit && using_limit)
510
error= -1; // Simulate end of cursor
571
We have reached end-of-file in most common situations where no
572
batching has occurred and if batching was supposed to occur but
573
no updates were made and finally when the batch execution was
574
performed without error and without finding any duplicate keys.
575
If the batched updates were performed with errors we need to
576
check and if no error but duplicate key's found we need to
577
continue since those are not counted for in limit.
580
((error= table->file->exec_bulk_update(&dup_key_found)) ||
585
/* purecov: begin inspected */
587
The handler should not report error of duplicate keys if they
588
are ignored. This is a requirement on batching handlers.
590
prepare_record_for_error_message(error, table);
591
table->file->print_error(error,MYF(0));
597
Either an error was found and we are ignoring errors or there
598
were duplicate keys found. In both cases we need to correct
599
the counters and continue the loop.
601
limit= dup_key_found; //limit is 0 when we get here so need to +
602
updated-= dup_key_found;
606
error= -1; // Simulate end of file
515
table->cursor->unlock_row();
612
table->file->unlock_row();
516
613
session->row_count++;
518
615
dup_key_found= 0;
630
744
/* Check that we are not using table that we are updating in a sub select */
632
746
TableList *duplicate;
633
if ((duplicate= unique_table(table_list, table_list->next_global)))
635
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
643
} /* namespace drizzled */
747
if ((duplicate= unique_table(session, table_list, table_list->next_global, 0)))
749
update_non_unique_table_error(table_list, "UPDATE", duplicate);
750
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
759
/***************************************************************************
760
Update multiple tables from join
761
***************************************************************************/
764
Get table map for list of Item_field
767
static table_map get_table_map(List<Item> *items)
769
List_iterator_fast<Item> item_it(*items);
773
while ((item= (Item_field *) item_it++))
774
map|= item->used_tables();
780
make update specific preparation and checks after opening tables
783
mysql_multi_update_prepare()
784
session thread handler
791
int mysql_multi_update_prepare(Session *session)
793
LEX *lex= session->lex;
794
TableList *table_list= lex->query_tables;
795
TableList *tl, *leaves;
796
List<Item> *fields= &lex->select_lex.item_list;
797
table_map tables_for_update;
800
if this multi-update was converted from usual update, here is table
801
counter else junk will be assigned here, but then replaced with real
802
count in open_tables()
804
uint32_t table_count= lex->table_count;
805
const bool using_lock_tables= session->locked_tables != 0;
806
bool original_multiupdate= (session->lex->sql_command == SQLCOM_UPDATE_MULTI);
807
bool need_reopen= false;
810
/* following need for prepared statements, to run next time multi-update */
811
session->lex->sql_command= SQLCOM_UPDATE_MULTI;
815
/* open tables and create derived ones, but do not lock and fill them */
816
if (((original_multiupdate || need_reopen) &&
817
open_tables(session, &table_list, &table_count, 0)) ||
818
mysql_handle_derived(lex, &mysql_derived_prepare))
821
setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
822
second time, but this call will do nothing (there are check for second
823
call in setup_tables()).
826
if (setup_tables_and_check_access(session, &lex->select_lex.context,
827
&lex->select_lex.top_join_list,
829
&lex->select_lex.leaf_tables, false))
832
if (setup_fields_with_no_wrap(session, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
835
if (update_view && check_fields(session, *fields))
840
tables_for_update= get_table_map(fields);
843
Setup timestamp handling and locking mode
845
leaves= lex->select_lex.leaf_tables;
846
for (tl= leaves; tl; tl= tl->next_leaf)
848
Table *table= tl->table;
849
/* Only set timestamp column if this is not modified */
850
if (table->timestamp_field &&
851
bitmap_is_set(table->write_set,
852
table->timestamp_field->field_index))
853
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
855
/* if table will be updated then check that it is unique */
856
if (table->map & tables_for_update)
858
table->mark_columns_needed_for_update();
860
If table will be updated we should not downgrade lock for it and
867
If we are using the binary log, we need TL_READ_NO_INSERT to get
868
correct order of statements. Otherwise, we use a TL_READ lock to
871
tl->lock_type= TL_READ;
873
/* Update Table::lock_type accordingly. */
874
if (!tl->placeholder() && !using_lock_tables)
875
tl->table->reginfo.lock_type= tl->lock_type;
879
/* now lock and fill tables */
880
if (lock_tables(session, table_list, table_count, &need_reopen))
886
We have to reopen tables since some of them were altered or dropped
887
during lock_tables() or something was done with their triggers.
888
Let us do some cleanups to be able do setup_table() and setup_fields()
891
List_iterator_fast<Item> it(*fields);
896
/* We have to cleanup translation tables of views. */
897
for (TableList *tbl= table_list; tbl; tbl= tbl->next_global)
898
tbl->cleanup_items();
900
close_tables_for_reopen(session, &table_list);
905
Check that we are not using table that we are updating, but we should
906
skip all tables of UPDATE SELECT itself
908
lex->select_lex.exclude_from_table_unique_test= true;
909
/* We only need SELECT privilege for columns in the values list */
910
for (tl= leaves; tl; tl= tl->next_leaf)
912
if (tl->lock_type != TL_READ &&
913
tl->lock_type != TL_READ_NO_INSERT)
915
TableList *duplicate;
916
if ((duplicate= unique_table(session, tl, table_list, 0)))
918
update_non_unique_table_error(table_list, "UPDATE", duplicate);
924
Set exclude_from_table_unique_test value back to false. It is needed for
925
further check in multi_update::prepare whether to use record cache.
927
lex->select_lex.exclude_from_table_unique_test= false;
929
if (session->fill_derived_tables() &&
930
mysql_handle_derived(lex, &mysql_derived_filling))
938
Setup multi-update handling and call SELECT to do the join
941
bool mysql_multi_update(Session *session,
942
TableList *table_list,
947
enum enum_duplicates handle_duplicates, bool ignore,
948
Select_Lex_Unit *unit, Select_Lex *select_lex)
950
multi_update *result;
953
if (!(result= new multi_update(table_list,
954
session->lex->select_lex.leaf_tables,
956
handle_duplicates, ignore)))
959
session->abort_on_warning= true;
961
List<Item> total_list;
962
res= mysql_select(session, &select_lex->ref_pointer_array,
963
table_list, select_lex->with_wild,
965
conds, 0, (order_st *) NULL, (order_st *)NULL, (Item *) NULL,
967
options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
968
OPTION_SETUP_TABLES_DONE,
969
result, unit, select_lex);
970
res|= session->is_error();
973
/* If we had a another error reported earlier then this will be ignored */
974
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
978
session->abort_on_warning= 0;
983
multi_update::multi_update(TableList *table_list,
984
TableList *leaves_list,
985
List<Item> *field_list, List<Item> *value_list,
986
enum enum_duplicates handle_duplicates_arg,
988
:all_tables(table_list), leaves(leaves_list), update_tables(0),
989
tmp_tables(0), updated(0), found(0), fields(field_list),
990
values(value_list), table_count(0), copy_field(0),
991
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
992
transactional_tables(0), ignore(ignore_arg), error_handled(0)
997
Connect fields with tables and create list of tables that are updated
1000
int multi_update::prepare(List<Item> &,
1003
TableList *table_ref;
1005
table_map tables_to_update;
1007
List_iterator_fast<Item> field_it(*fields);
1008
List_iterator_fast<Item> value_it(*values);
1009
uint32_t i, max_fields;
1010
uint32_t leaf_table_count= 0;
1012
session->count_cuted_fields= CHECK_FIELD_WARN;
1013
session->cuted_fields=0L;
1014
session->set_proc_info("updating main table");
1016
tables_to_update= get_table_map(fields);
1018
if (!tables_to_update)
1020
my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1025
We have to check values after setup_tables to get covering_keys right in
1029
if (setup_fields(session, 0, *values, MARK_COLUMNS_READ, 0, 0))
1033
Save tables beeing updated in update_tables
1034
update_table->shared is position for table
1035
Don't use key read on tables that are updated
1039
for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1041
/* TODO: add support of view of join support */
1042
Table *table=table_ref->table;
1044
if (tables_to_update & table->map)
1046
TableList *tl= (TableList*) session->memdup((char*) table_ref,
1050
update.link_in_list((unsigned char*) tl, (unsigned char**) &tl->next_local);
1051
tl->shared= table_count++;
1052
table->no_keyread=1;
1053
table->covering_keys.clear_all();
1054
table->pos_in_table_list= tl;
1059
table_count= update.elements;
1060
update_tables= (TableList*) update.first;
1062
tmp_tables = (Table**) session->calloc(sizeof(Table *) * table_count);
1063
tmp_table_param = (Tmp_Table_Param*) session->calloc(sizeof(Tmp_Table_Param) *
1065
fields_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1067
values_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1069
if (session->is_fatal_error)
1071
for (i=0 ; i < table_count ; i++)
1073
fields_for_table[i]= new List_item;
1074
values_for_table[i]= new List_item;
1076
if (session->is_fatal_error)
1079
/* Split fields into fields_for_table[] and values_by_table[] */
1081
while ((item= (Item_field *) field_it++))
1083
Item *value= value_it++;
1084
uint32_t offset= item->field->table->pos_in_table_list->shared;
1085
fields_for_table[offset]->push_back(item);
1086
values_for_table[offset]->push_back(value);
1088
if (session->is_fatal_error)
1091
/* Allocate copy fields */
1093
for (i=0 ; i < table_count ; i++)
1094
set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1095
copy_field= new Copy_field[max_fields];
1096
return(session->is_fatal_error != 0);
1101
Check if table is safe to update on fly
1104
safe_update_on_fly()
1105
session Thread handler
1106
join_tab How table is used in join
1107
all_tables List of tables
1110
We can update the first table in join on the fly if we know that
1111
a row in this table will never be read twice. This is true under
1112
the following conditions:
1114
- We are doing a table scan and the data is in a separate file (MyISAM) or
1115
if we don't update a clustered key.
1117
- We are doing a range scan and we don't update the scan key or
1118
the primary key for a clustered table handler.
1120
- Table is not joined to itself.
1122
This function gets information about fields to be updated from
1123
the Table::write_set bitmap.
1126
This code is a bit dependent of how make_join_readinfo() works.
1129
0 Not safe to update
1133
static bool safe_update_on_fly(Session *session, JOIN_TAB *join_tab,
1134
TableList *table_ref, TableList *all_tables)
1136
Table *table= join_tab->table;
1137
if (unique_table(session, table_ref, all_tables, 0))
1139
switch (join_tab->type) {
1143
return true; // At most one matching row
1145
case JT_REF_OR_NULL:
1146
return !is_key_used(table, join_tab->ref.key, table->write_set);
1148
/* If range search on index */
1149
if (join_tab->quick)
1150
return !join_tab->quick->is_keys_used(table->write_set);
1151
/* If scanning in clustered key */
1152
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1153
table->s->primary_key < MAX_KEY)
1154
return !is_key_used(table, table->s->primary_key, table->write_set);
1157
break; // Avoid compler warning
1165
Initialize table for multi table
1168
- Update first table in join on the fly, if possible
1169
- Create temporary tables to store changed values for all other tables
1170
that are updated (and main_table if the above doesn't hold).
1174
multi_update::initialize_tables(JOIN *join)
1176
TableList *table_ref;
1178
if ((session->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1180
main_table=join->join_tab->table;
1183
/* Any update has at least one pair (field, value) */
1184
assert(fields->elements);
1186
/* Create a temporary table for keys to all tables, except main table */
1187
for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1189
Table *table=table_ref->table;
1190
uint32_t cnt= table_ref->shared;
1191
List<Item> temp_fields;
1193
Tmp_Table_Param *tmp_param;
1195
table->mark_columns_needed_for_update();
1197
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1198
if (table == main_table) // First table in join
1200
if (safe_update_on_fly(session, join->join_tab, table_ref, all_tables))
1202
table_to_update= main_table; // Update table on the fly
1206
table->prepare_for_position();
1208
tmp_param= tmp_table_param+cnt;
1211
Create a temporary table to store all fields that are changed for this
1212
table. The first field in the temporary table is a pointer to the
1213
original row so that we can find and update it. For the updatable
1214
VIEW a few following fields are rowids of tables used in the CHECK
1218
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1222
Field_varstring *field= new Field_varstring(tbl->file->ref_length, 0,
1223
tbl->alias, tbl->s, &my_charset_bin);
1228
The field will be converted to varstring when creating tmp table if
1229
table to be updated was created by mysql 4.1. Deny this.
1231
Item_field *ifield= new Item_field((Field *) field);
1234
ifield->maybe_null= 0;
1235
if (temp_fields.push_back(ifield))
1237
} while ((tbl= tbl_it++));
1239
temp_fields.concat(fields_for_table[cnt]);
1241
/* Make an unique key over the first field to avoid duplicated updates */
1242
memset(&group, 0, sizeof(group));
1244
group.item= (Item**) temp_fields.head_ref();
1246
tmp_param->quick_group=1;
1247
tmp_param->field_count=temp_fields.elements;
1248
tmp_param->group_parts=1;
1249
tmp_param->group_length= table->file->ref_length;
1250
if (!(tmp_tables[cnt]=create_tmp_table(session,
1253
(order_st*) &group, 0, 0,
1254
TMP_TABLE_ALL_COLUMNS,
1258
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1264
multi_update::~multi_update()
1267
for (table= update_tables ; table; table= table->next_local)
1269
table->table->no_keyread= table->table->no_cache= 0;
1271
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1276
for (uint32_t cnt = 0; cnt < table_count; cnt++)
1278
if (tmp_tables[cnt])
1280
tmp_tables[cnt]->free_tmp_table(session);
1281
tmp_table_param[cnt].cleanup();
1286
delete [] copy_field;
1287
session->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1288
assert(trans_safe || !updated ||
1289
session->transaction.all.modified_non_trans_table);
1293
bool multi_update::send_data(List<Item> &)
1295
TableList *cur_table;
1297
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1299
Table *table= cur_table->table;
1300
uint32_t offset= cur_table->shared;
1302
Check if we are using outer join and we didn't find the row
1303
or if we have already updated this row in the previous call to this
1306
The same row may be presented here several times in a join of type
1307
UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1309
In this case we will do the update for the first found row combination.
1310
The join algorithm guarantees that we will not find the a row in
1313
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1317
We can use compare_record() to optimize away updates if
1318
the table handler is returning all columns OR if
1319
if all updated columns are read
1321
if (table == table_to_update)
1323
bool can_compare_record;
1324
can_compare_record= (!(table->file->ha_table_flags() &
1325
HA_PARTIAL_COLUMN_READ) ||
1326
bitmap_is_subset(table->write_set,
1328
table->status|= STATUS_UPDATED;
1329
store_record(table,record[1]);
1330
if (fill_record(session, *fields_for_table[offset],
1331
*values_for_table[offset], 0))
1335
if (!can_compare_record || table->compare_record())
1341
Inform the main table that we are going to update the table even
1342
while we may be scanning it. This will flush the read cache
1345
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1347
if ((error=table->file->ha_update_row(table->record[1],
1348
table->record[0])) &&
1349
error != HA_ERR_RECORD_IS_THE_SAME)
1353
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1356
If (ignore && error == is ignorable) we don't have to
1357
do anything; otherwise...
1361
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1362
flags|= ME_FATALERROR; /* Other handler errors are fatal */
1364
prepare_record_for_error_message(error, table);
1365
table->file->print_error(error,MYF(flags));
1371
if (error == HA_ERR_RECORD_IS_THE_SAME)
1376
/* non-transactional or transactional table got modified */
1377
/* either multi_update class' flag is raised in its branch */
1378
if (table->file->has_transactions())
1379
transactional_tables= 1;
1383
session->transaction.stmt.modified_non_trans_table= true;
1391
Table *tmp_table= tmp_tables[offset];
1393
For updatable VIEW store rowid of the updated table and
1394
rowids of tables used in the CHECK OPTION condition.
1396
uint32_t field_num= 0;
1397
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1401
tbl->file->position(tbl->record[0]);
1402
Field_varstring *ref_field=
1403
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1404
ref_field->store((char *)tbl->file->ref, tbl->file->ref_length,
1407
} while ((tbl= tbl_it++));
1409
/* Store regular updated fields in the row. */
1410
fill_record(session,
1411
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1412
*values_for_table[offset], 1);
1414
/* Write row, ignoring duplicated updates to a row */
1415
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1416
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1419
create_myisam_from_heap(session, tmp_table,
1420
tmp_table_param[offset].start_recinfo,
1421
&tmp_table_param[offset].recinfo,
1425
return(1); // Not a table_is_full error
1435
void multi_update::send_error(uint32_t errcode,const char *err)
1437
/* First send error what ever it is ... */
1438
my_error(errcode, MYF(0), err);
1442
void multi_update::abort()
1444
/* the error was handled or nothing deleted and no side effects return */
1445
if (error_handled ||
1446
(!session->transaction.stmt.modified_non_trans_table && !updated))
1449
If all tables that has been updated are trans safe then just do rollback.
1450
If not attempt to do remaining updates.
1455
assert(session->transaction.stmt.modified_non_trans_table);
1456
if (do_update && table_count > 1)
1458
/* Add warning here */
1460
todo/fixme: do_update() is never called with the arg 1.
1461
should it change the signature to become argless?
1466
if (session->transaction.stmt.modified_non_trans_table)
1468
session->transaction.all.modified_non_trans_table= true;
1470
assert(trans_safe || !updated || session->transaction.stmt.modified_non_trans_table);
1474
int multi_update::do_updates()
1476
TableList *cur_table;
1478
ha_rows org_updated;
1479
Table *table, *tmp_table;
1480
List_iterator_fast<Table> check_opt_it(unupdated_check_opt_tables);
1482
do_update= 0; // Don't retry this function
1485
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1487
bool can_compare_record;
1488
uint32_t offset= cur_table->shared;
1490
table = cur_table->table;
1491
if (table == table_to_update)
1492
continue; // Already updated
1493
org_updated= updated;
1494
tmp_table= tmp_tables[cur_table->shared];
1495
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1496
(void) table->file->ha_rnd_init(0);
1497
table->file->extra(HA_EXTRA_NO_CACHE);
1499
check_opt_it.rewind();
1500
while(Table *tbl= check_opt_it++)
1502
if (tbl->file->ha_rnd_init(1))
1504
tbl->file->extra(HA_EXTRA_CACHE);
1508
Setup copy functions to copy fields from temporary table
1510
List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1511
Field **field= tmp_table->field +
1512
1 + unupdated_check_opt_tables.elements; // Skip row pointers
1513
Copy_field *copy_field_ptr= copy_field, *copy_field_end;
1514
for ( ; *field ; field++)
1516
Item_field *item= (Item_field* ) field_it++;
1517
(copy_field_ptr++)->set(item->field, *field, 0);
1519
copy_field_end=copy_field_ptr;
1521
if ((local_error = tmp_table->file->ha_rnd_init(1)))
1524
can_compare_record= (!(table->file->ha_table_flags() &
1525
HA_PARTIAL_COLUMN_READ) ||
1526
bitmap_is_subset(table->write_set,
1531
if (session->killed && trans_safe)
1533
if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1535
if (local_error == HA_ERR_END_OF_FILE)
1537
if (local_error == HA_ERR_RECORD_DELETED)
1538
continue; // May happen on dup key
1542
/* call rnd_pos() using rowids from temporary table */
1543
check_opt_it.rewind();
1545
uint32_t field_num= 0;
1548
Field_varstring *ref_field=
1549
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1551
tbl->file->rnd_pos(tbl->record[0],
1552
(unsigned char *) ref_field->ptr
1553
+ ref_field->length_bytes)))
1556
} while((tbl= check_opt_it++));
1558
table->status|= STATUS_UPDATED;
1559
store_record(table,record[1]);
1561
/* Copy data from temporary table to current table */
1562
for (copy_field_ptr=copy_field;
1563
copy_field_ptr != copy_field_end;
1565
(*copy_field_ptr->do_copy)(copy_field_ptr);
1567
if (!can_compare_record || table->compare_record())
1569
if ((local_error=table->file->ha_update_row(table->record[1],
1570
table->record[0])) &&
1571
local_error != HA_ERR_RECORD_IS_THE_SAME)
1574
table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
1577
if (local_error != HA_ERR_RECORD_IS_THE_SAME)
1584
if (updated != org_updated)
1586
if (table->file->has_transactions())
1587
transactional_tables= 1;
1590
trans_safe= 0; // Can't do safe rollback
1591
session->transaction.stmt.modified_non_trans_table= true;
1594
(void) table->file->ha_rnd_end();
1595
(void) tmp_table->file->ha_rnd_end();
1596
check_opt_it.rewind();
1597
while (Table *tbl= check_opt_it++)
1598
tbl->file->ha_rnd_end();
1605
prepare_record_for_error_message(local_error, table);
1606
table->file->print_error(local_error,MYF(ME_FATALERROR));
1609
(void) table->file->ha_rnd_end();
1610
(void) tmp_table->file->ha_rnd_end();
1611
check_opt_it.rewind();
1612
while (Table *tbl= check_opt_it++)
1613
tbl->file->ha_rnd_end();
1615
if (updated != org_updated)
1617
if (table->file->has_transactions())
1618
transactional_tables= 1;
1622
session->transaction.stmt.modified_non_trans_table= true;
1629
/* out: 1 if error, 0 if success */
1631
bool multi_update::send_eof()
1633
char buff[STRING_BUFFER_USUAL_SIZE];
1635
Session::killed_state killed_status= Session::NOT_KILLED;
1637
session->set_proc_info("updating reference tables");
1640
Does updates for the last n - 1 tables, returns 0 if ok;
1641
error takes into account killed status gained in do_updates()
1643
int local_error = (table_count) ? do_updates() : 0;
1645
if local_error is not set ON until after do_updates() then
1646
later carried out killing should not affect binlogging.
1648
killed_status= (local_error == 0)? Session::NOT_KILLED : session->killed;
1649
session->set_proc_info("end");
1652
Write the SQL statement to the binlog if we updated
1653
rows and we succeeded or if we updated some non
1654
transactional tables.
1656
The query has to binlog because there's a modified non-transactional table
1657
either from the query's list or via a stored routine: bug#13270,23333
1660
assert(trans_safe || !updated ||
1661
session->transaction.stmt.modified_non_trans_table);
1662
if (local_error == 0 || session->transaction.stmt.modified_non_trans_table)
1664
if (session->transaction.stmt.modified_non_trans_table)
1665
session->transaction.all.modified_non_trans_table= true;
1667
if (local_error != 0)
1668
error_handled= true; // to force early leave from ::send_error()
1670
if (local_error > 0) // if the above log write did not fail ...
1672
/* Safety: If we haven't got an error before (can happen in do_updates) */
1673
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
1678
id= session->arg_of_last_insert_id_function ?
1679
session->first_successful_insert_id_in_prev_stmt : 0;
1680
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
1681
(ulong) session->cuted_fields);
1682
session->row_count_func=
1683
(session->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1684
session->my_ok((ulong) session->row_count_func, id, buff);