448
464
session->cuted_fields= 0L;
449
465
session->set_proc_info("Updating");
451
transactional_table= table->cursor->has_transactions();
467
transactional_table= table->file->has_transactions();
452
468
session->abort_on_warning= test(!ignore);
469
will_batch= !table->file->start_bulk_update();
455
472
Assure that we can use position()
456
473
if we need to create an error message.
458
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
475
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
459
476
table->prepare_for_position();
461
while (not (error=info.read_record(&info)) && not session->getKilled())
479
We can use compare_record() to optimize away updates if
480
the table handler is returning all columns OR if
481
if all updated columns are read
483
can_compare_record= (!(table->file->ha_table_flags() &
484
HA_PARTIAL_COLUMN_READ) ||
485
bitmap_is_subset(table->write_set, table->read_set));
487
while (!(error=info.read_record(&info)) && !session->killed)
463
if (not (select && select->skip_record()))
489
if (!(select && select->skip_record()))
465
if (table->cursor->was_semi_consistent_read())
491
if (table->file->was_semi_consistent_read())
466
492
continue; /* repeat the read of the same row if it still exists */
468
494
table->storeRecord();
469
if (fill_record(session, fields, values))
495
if (fill_record(session, fields, values, 0))
496
break; /* purecov: inspected */
474
if (! table->records_are_comparable() || table->compare_records())
500
if (!can_compare_record || table->compare_record())
476
/* Non-batched update */
477
error= table->cursor->updateRecord(table->getUpdateRecord(),
478
table->getInsertRecord());
480
table->auto_increment_field_not_null= false;
505
Typically a batched handler can execute the batched jobs when:
506
1) When specifically told to do so
507
2) When it is not a good idea to batch anymore
508
3) When it is necessary to send batch for other reasons
509
(One such reason is when READ's must be performed)
511
1) is covered by exec_bulk_update calls.
512
2) and 3) is handled by the bulk_update_row method.
514
bulk_update_row can execute the updates including the one
515
defined in the bulk_update_row or not including the row
516
in the call. This is up to the handler implementation and can
517
vary from call to call.
519
The dup_key_found reports the number of duplicate keys found
520
in those updates actually executed. It only reports those if
521
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
522
If this hasn't been issued it returns an error code and can
523
ignore this number. Thus any handler that implements batching
524
for UPDATE IGNORE must also handle this extra call properly.
526
If a duplicate key is found on the record included in this
527
call then it should be included in the count of dup_key_found
528
and error should be set to 0 (only if these errors are ignored).
530
error= table->file->ha_bulk_update_row(table->record[1],
533
limit+= dup_key_found;
534
updated-= dup_key_found;
538
/* Non-batched update */
539
error= table->file->ha_update_row(table->record[1],
482
542
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
484
544
if (error != HA_ERR_RECORD_IS_THE_SAME)
490
table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
550
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
493
553
If (ignore && error is ignorable) we don't have to
494
554
do anything; otherwise...
498
if (table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
558
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
499
559
flags|= ME_FATALERROR; /* Other handler errors are fatal */
501
561
prepare_record_for_error_message(error, table);
502
table->print_error(error,MYF(flags));
562
table->file->print_error(error,MYF(flags));
508
568
if (!--limit && using_limit)
510
error= -1; // Simulate end of cursor
571
We have reached end-of-file in most common situations where no
572
batching has occurred and if batching was supposed to occur but
573
no updates were made and finally when the batch execution was
574
performed without error and without finding any duplicate keys.
575
If the batched updates were performed with errors we need to
576
check and if no error but duplicate key's found we need to
577
continue since those are not counted for in limit.
580
((error= table->file->exec_bulk_update(&dup_key_found)) ||
585
/* purecov: begin inspected */
587
The handler should not report error of duplicate keys if they
588
are ignored. This is a requirement on batching handlers.
590
prepare_record_for_error_message(error, table);
591
table->file->print_error(error,MYF(0));
597
Either an error was found and we are ignoring errors or there
598
were duplicate keys found. In both cases we need to correct
599
the counters and continue the loop.
601
limit= dup_key_found; //limit is 0 when we get here so need to +
602
updated-= dup_key_found;
606
error= -1; // Simulate end of file
515
table->cursor->unlock_row();
612
table->file->unlock_row();
516
613
session->row_count++;
518
615
dup_key_found= 0;
630
744
/* Check that we are not using table that we are updating in a sub select */
632
746
TableList *duplicate;
633
if ((duplicate= unique_table(table_list, table_list->next_global)))
635
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
643
} /* namespace drizzled */
747
if ((duplicate= unique_table(session, table_list, table_list->next_global, 0)))
749
update_non_unique_table_error(table_list, "UPDATE", duplicate);
750
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
759
/***************************************************************************
760
Update multiple tables from join
761
***************************************************************************/
764
Get table map for list of Item_field
767
static table_map get_table_map(List<Item> *items)
769
List_iterator_fast<Item> item_it(*items);
773
while ((item= (Item_field *) item_it++))
774
map|= item->used_tables();
780
make update specific preparation and checks after opening tables
783
mysql_multi_update_prepare()
784
session thread handler
791
int mysql_multi_update_prepare(Session *session)
793
LEX *lex= session->lex;
794
TableList *table_list= lex->query_tables;
795
TableList *tl, *leaves;
796
List<Item> *fields= &lex->select_lex.item_list;
797
table_map tables_for_update;
800
if this multi-update was converted from usual update, here is table
801
counter else junk will be assigned here, but then replaced with real
802
count in open_tables()
804
uint32_t table_count= lex->table_count;
805
const bool using_lock_tables= session->locked_tables != 0;
806
bool original_multiupdate= (session->lex->sql_command == SQLCOM_UPDATE_MULTI);
807
bool need_reopen= false;
810
/* following need for prepared statements, to run next time multi-update */
811
session->lex->sql_command= SQLCOM_UPDATE_MULTI;
815
/* open tables and create derived ones, but do not lock and fill them */
816
if (((original_multiupdate || need_reopen) &&
817
open_tables(session, &table_list, &table_count, 0)) ||
818
mysql_handle_derived(lex, &mysql_derived_prepare))
821
setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
822
second time, but this call will do nothing (there are check for second
823
call in setup_tables()).
826
if (setup_tables_and_check_access(session, &lex->select_lex.context,
827
&lex->select_lex.top_join_list,
829
&lex->select_lex.leaf_tables, false))
832
if (setup_fields_with_no_wrap(session, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
835
if (update_view && check_fields(session, *fields))
840
tables_for_update= get_table_map(fields);
843
Setup timestamp handling and locking mode
845
leaves= lex->select_lex.leaf_tables;
846
for (tl= leaves; tl; tl= tl->next_leaf)
848
Table *table= tl->table;
849
/* Only set timestamp column if this is not modified */
850
if (table->timestamp_field && table->timestamp_field->isWriteSet())
851
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
853
/* if table will be updated then check that it is unique */
854
if (table->map & tables_for_update)
856
table->mark_columns_needed_for_update();
858
If table will be updated we should not downgrade lock for it and
865
If we are using the binary log, we need TL_READ_NO_INSERT to get
866
correct order of statements. Otherwise, we use a TL_READ lock to
869
tl->lock_type= TL_READ;
871
/* Update Table::lock_type accordingly. */
872
if (!tl->placeholder() && !using_lock_tables)
873
tl->table->reginfo.lock_type= tl->lock_type;
877
/* now lock and fill tables */
878
if (lock_tables(session, table_list, table_count, &need_reopen))
884
We have to reopen tables since some of them were altered or dropped
885
during lock_tables() or something was done with their triggers.
886
Let us do some cleanups to be able do setup_table() and setup_fields()
889
List_iterator_fast<Item> it(*fields);
894
close_tables_for_reopen(session, &table_list);
899
Check that we are not using table that we are updating, but we should
900
skip all tables of UPDATE SELECT itself
902
lex->select_lex.exclude_from_table_unique_test= true;
903
/* We only need SELECT privilege for columns in the values list */
904
for (tl= leaves; tl; tl= tl->next_leaf)
906
if (tl->lock_type != TL_READ &&
907
tl->lock_type != TL_READ_NO_INSERT)
909
TableList *duplicate;
910
if ((duplicate= unique_table(session, tl, table_list, 0)))
912
update_non_unique_table_error(table_list, "UPDATE", duplicate);
918
Set exclude_from_table_unique_test value back to false. It is needed for
919
further check in multi_update::prepare whether to use record cache.
921
lex->select_lex.exclude_from_table_unique_test= false;
923
if (session->fill_derived_tables() &&
924
mysql_handle_derived(lex, &mysql_derived_filling))
932
Setup multi-update handling and call SELECT to do the join
935
bool mysql_multi_update(Session *session,
936
TableList *table_list,
941
enum enum_duplicates handle_duplicates, bool ignore,
942
Select_Lex_Unit *unit, Select_Lex *select_lex)
944
multi_update *result;
947
if (!(result= new multi_update(table_list,
948
session->lex->select_lex.leaf_tables,
950
handle_duplicates, ignore)))
953
session->abort_on_warning= true;
955
List<Item> total_list;
956
res= mysql_select(session, &select_lex->ref_pointer_array,
957
table_list, select_lex->with_wild,
959
conds, 0, (order_st *) NULL, (order_st *)NULL, (Item *) NULL,
960
options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
961
OPTION_SETUP_TABLES_DONE,
962
result, unit, select_lex);
963
res|= session->is_error();
966
/* If we had a another error reported earlier then this will be ignored */
967
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
971
session->abort_on_warning= 0;
976
multi_update::multi_update(TableList *table_list,
977
TableList *leaves_list,
978
List<Item> *field_list, List<Item> *value_list,
979
enum enum_duplicates handle_duplicates_arg,
981
:all_tables(table_list), leaves(leaves_list),
982
tmp_tables(0), updated(0), found(0), fields(field_list),
983
values(value_list), table_count(0), copy_field(0),
984
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
985
transactional_tables(0), ignore(ignore_arg), error_handled(0)
990
Connect fields with tables and create list of tables that are updated
993
int multi_update::prepare(List<Item> &,
996
TableList *table_ref;
997
table_map tables_to_update;
999
List_iterator_fast<Item> field_it(*fields);
1000
List_iterator_fast<Item> value_it(*values);
1001
uint32_t i, max_fields;
1002
uint32_t leaf_table_count= 0;
1004
session->count_cuted_fields= CHECK_FIELD_WARN;
1005
session->cuted_fields=0L;
1006
session->set_proc_info("updating main table");
1008
tables_to_update= get_table_map(fields);
1010
if (!tables_to_update)
1012
my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1017
We have to check values after setup_tables to get covering_keys right in
1021
if (setup_fields(session, 0, *values, MARK_COLUMNS_READ, 0, 0))
1025
Save tables beeing updated in update_tables
1026
update_table->shared is position for table
1027
Don't use key read on tables that are updated
1029
for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1031
/* TODO: add support of view of join support */
1032
Table *table=table_ref->table;
1034
if (tables_to_update & table->map)
1036
TableList *tl= (TableList*) session->memdup((char*) table_ref,
1040
update_tables.push_back(tl);
1041
tl->shared= table_count++;
1042
table->no_keyread=1;
1043
table->covering_keys.reset();
1044
table->pos_in_table_list= tl;
1049
table_count= update_tables.size();
1051
tmp_tables = (Table**) session->calloc(sizeof(Table *) * table_count);
1052
tmp_table_param = (Tmp_Table_Param*) session->calloc(sizeof(Tmp_Table_Param) *
1054
fields_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1056
values_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1058
if (session->is_fatal_error)
1060
for (i=0 ; i < table_count ; i++)
1062
fields_for_table[i]= new List_item;
1063
values_for_table[i]= new List_item;
1065
if (session->is_fatal_error)
1068
/* Split fields into fields_for_table[] and values_by_table[] */
1070
while ((item= (Item_field *) field_it++))
1072
Item *value= value_it++;
1073
uint32_t offset= item->field->table->pos_in_table_list->shared;
1074
fields_for_table[offset]->push_back(item);
1075
values_for_table[offset]->push_back(value);
1077
if (session->is_fatal_error)
1080
/* Allocate copy fields */
1082
for (i=0 ; i < table_count ; i++)
1083
set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1084
copy_field= new Copy_field[max_fields];
1085
return(session->is_fatal_error != 0);
1090
Check if table is safe to update on fly
1093
safe_update_on_fly()
1094
session Thread handler
1095
join_tab How table is used in join
1096
all_tables List of tables
1099
We can update the first table in join on the fly if we know that
1100
a row in this table will never be read twice. This is true under
1101
the following conditions:
1103
- We are doing a table scan and the data is in a separate file (MyISAM) or
1104
if we don't update a clustered key.
1106
- We are doing a range scan and we don't update the scan key or
1107
the primary key for a clustered table handler.
1109
- Table is not joined to itself.
1111
This function gets information about fields to be updated from
1112
the Table::write_set bitmap.
1115
This code is a bit dependent of how make_join_readinfo() works.
1118
0 Not safe to update
1122
static bool safe_update_on_fly(Session *session, JOIN_TAB *join_tab,
1123
TableList *table_ref, TableList *all_tables)
1125
Table *table= join_tab->table;
1126
if (unique_table(session, table_ref, all_tables, 0))
1128
switch (join_tab->type) {
1132
return true; // At most one matching row
1134
case JT_REF_OR_NULL:
1135
return !is_key_used(table, join_tab->ref.key, table->write_set);
1137
/* If range search on index */
1138
if (join_tab->quick)
1139
return !join_tab->quick->is_keys_used(table->write_set);
1140
/* If scanning in clustered key */
1141
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1142
table->s->primary_key < MAX_KEY)
1143
return !is_key_used(table, table->s->primary_key, table->write_set);
1146
break; // Avoid compler warning
1154
Initialize table for multi table
1157
- Update first table in join on the fly, if possible
1158
- Create temporary tables to store changed values for all other tables
1159
that are updated (and main_table if the above doesn't hold).
1163
multi_update::initialize_tables(JOIN *join)
1165
if ((session->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1167
main_table=join->join_tab->table;
1170
/* Any update has at least one pair (field, value) */
1171
assert(fields->elements);
1173
/* Create a temporary table for keys to all tables, except main table */
1174
for (list<TableList*>::iterator it= update_tables.begin();
1175
it != update_tables.end();
1178
Table *table= (*it)->table;
1179
uint32_t cnt= (*it)->shared;
1180
List<Item> temp_fields;
1182
Tmp_Table_Param *tmp_param;
1184
table->mark_columns_needed_for_update();
1186
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1187
if (table == main_table) // First table in join
1189
if (safe_update_on_fly(session, join->join_tab, (*it), all_tables))
1191
table_to_update= main_table; // Update table on the fly
1195
table->prepare_for_position();
1197
tmp_param= tmp_table_param+cnt;
1200
Create a temporary table to store all fields that are changed for this
1201
table. The first field in the temporary table is a pointer to the
1202
original row so that we can find and update it. For the updatable
1203
VIEW a few following fields are rowids of tables used in the CHECK
1207
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1211
Field_varstring *field= new Field_varstring(tbl->file->ref_length, 0,
1212
tbl->alias, tbl->s, &my_charset_bin);
1217
The field will be converted to varstring when creating tmp table if
1218
table to be updated was created by mysql 4.1. Deny this.
1220
Item_field *ifield= new Item_field((Field *) field);
1223
ifield->maybe_null= 0;
1224
if (temp_fields.push_back(ifield))
1226
} while ((tbl= tbl_it++));
1228
temp_fields.concat(fields_for_table[cnt]);
1230
/* Make an unique key over the first field to avoid duplicated updates */
1231
memset(&group, 0, sizeof(group));
1233
group.item= (Item**) temp_fields.head_ref();
1235
tmp_param->quick_group=1;
1236
tmp_param->field_count=temp_fields.elements;
1237
tmp_param->group_parts=1;
1238
tmp_param->group_length= table->file->ref_length;
1239
if (!(tmp_tables[cnt]=create_tmp_table(session,
1242
(order_st*) &group, 0, 0,
1243
TMP_TABLE_ALL_COLUMNS,
1247
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1253
multi_update::~multi_update()
1256
for (list<TableList*>::iterator it= update_tables.begin();
1257
it != update_tables.end();
1261
table->table->no_keyread= table->table->no_cache= 0;
1263
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1268
for (uint32_t cnt = 0; cnt < table_count; cnt++)
1270
if (tmp_tables[cnt])
1272
tmp_tables[cnt]->free_tmp_table(session);
1273
tmp_table_param[cnt].cleanup();
1278
delete [] copy_field;
1279
session->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1280
assert(trans_safe || !updated ||
1281
session->transaction.all.modified_non_trans_table);
1285
bool multi_update::send_data(List<Item> &)
1287
for (list<TableList*>::iterator it= update_tables.begin();
1288
it != update_tables.end();
1291
Table *table= (*it)->table;
1292
uint32_t offset= (*it)->shared;
1294
Check if we are using outer join and we didn't find the row
1295
or if we have already updated this row in the previous call to this
1298
The same row may be presented here several times in a join of type
1299
UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1301
In this case we will do the update for the first found row combination.
1302
The join algorithm guarantees that we will not find the a row in
1305
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1309
We can use compare_record() to optimize away updates if
1310
the table handler is returning all columns OR if
1311
if all updated columns are read
1313
if (table == table_to_update)
1315
bool can_compare_record;
1316
can_compare_record= (!(table->file->ha_table_flags() &
1317
HA_PARTIAL_COLUMN_READ) ||
1318
bitmap_is_subset(table->write_set,
1320
table->status|= STATUS_UPDATED;
1321
table->storeRecord();
1322
if (fill_record(session, *fields_for_table[offset],
1323
*values_for_table[offset], 0))
1327
if (!can_compare_record || table->compare_record())
1333
Inform the main table that we are going to update the table even
1334
while we may be scanning it. This will flush the read cache
1337
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1339
if ((error=table->file->ha_update_row(table->record[1],
1340
table->record[0])) &&
1341
error != HA_ERR_RECORD_IS_THE_SAME)
1345
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1348
If (ignore && error == is ignorable) we don't have to
1349
do anything; otherwise...
1353
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1354
flags|= ME_FATALERROR; /* Other handler errors are fatal */
1356
prepare_record_for_error_message(error, table);
1357
table->file->print_error(error,MYF(flags));
1363
if (error == HA_ERR_RECORD_IS_THE_SAME)
1368
/* non-transactional or transactional table got modified */
1369
/* either multi_update class' flag is raised in its branch */
1370
if (table->file->has_transactions())
1371
transactional_tables= 1;
1375
session->transaction.stmt.modified_non_trans_table= true;
1383
Table *tmp_table= tmp_tables[offset];
1385
For updatable VIEW store rowid of the updated table and
1386
rowids of tables used in the CHECK OPTION condition.
1388
uint32_t field_num= 0;
1389
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1393
tbl->file->position(tbl->record[0]);
1394
Field_varstring *ref_field=
1395
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1396
ref_field->store((char *)tbl->file->ref, tbl->file->ref_length,
1399
} while ((tbl= tbl_it++));
1401
/* Store regular updated fields in the row. */
1402
fill_record(session,
1403
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1404
*values_for_table[offset], 1);
1406
/* Write row, ignoring duplicated updates to a row */
1407
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1408
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1411
create_myisam_from_heap(session, tmp_table,
1412
tmp_table_param[offset].start_recinfo,
1413
&tmp_table_param[offset].recinfo,
1417
return(1); // Not a table_is_full error
1427
void multi_update::send_error(uint32_t errcode,const char *err)
1429
/* First send error what ever it is ... */
1430
my_error(errcode, MYF(0), err);
1434
void multi_update::abort()
1436
/* the error was handled or nothing deleted and no side effects return */
1437
if (error_handled ||
1438
(!session->transaction.stmt.modified_non_trans_table && !updated))
1441
If all tables that has been updated are trans safe then just do rollback.
1442
If not attempt to do remaining updates.
1447
assert(session->transaction.stmt.modified_non_trans_table);
1448
if (do_update && table_count > 1)
1450
/* Add warning here */
1452
todo/fixme: do_update() is never called with the arg 1.
1453
should it change the signature to become argless?
1458
if (session->transaction.stmt.modified_non_trans_table)
1460
session->transaction.all.modified_non_trans_table= true;
1462
assert(trans_safe || !updated || session->transaction.stmt.modified_non_trans_table);
1466
int multi_update::do_updates()
1468
TableList *cur_table;
1470
ha_rows org_updated;
1471
Table *table, *tmp_table;
1472
List_iterator_fast<Table> check_opt_it(unupdated_check_opt_tables);
1474
do_update= 0; // Don't retry this function
1477
for (list<TableList*>::iterator it= update_tables.begin();
1478
it != update_tables.end();
1482
bool can_compare_record;
1483
uint32_t offset= cur_table->shared;
1485
table = cur_table->table;
1486
if (table == table_to_update)
1487
continue; // Already updated
1488
org_updated= updated;
1489
tmp_table= tmp_tables[cur_table->shared];
1490
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1491
(void) table->file->ha_rnd_init(0);
1492
table->file->extra(HA_EXTRA_NO_CACHE);
1494
check_opt_it.rewind();
1495
while(Table *tbl= check_opt_it++)
1497
if (tbl->file->ha_rnd_init(1))
1499
tbl->file->extra(HA_EXTRA_CACHE);
1503
Setup copy functions to copy fields from temporary table
1505
List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1506
Field **field= tmp_table->field +
1507
1 + unupdated_check_opt_tables.elements; // Skip row pointers
1508
Copy_field *copy_field_ptr= copy_field, *copy_field_end;
1509
for ( ; *field ; field++)
1511
Item_field *item= (Item_field* ) field_it++;
1512
(copy_field_ptr++)->set(item->field, *field, 0);
1514
copy_field_end=copy_field_ptr;
1516
if ((local_error = tmp_table->file->ha_rnd_init(1)))
1519
can_compare_record= (!(table->file->ha_table_flags() &
1520
HA_PARTIAL_COLUMN_READ) ||
1521
bitmap_is_subset(table->write_set,
1526
if (session->killed && trans_safe)
1528
if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1530
if (local_error == HA_ERR_END_OF_FILE)
1532
if (local_error == HA_ERR_RECORD_DELETED)
1533
continue; // May happen on dup key
1537
/* call rnd_pos() using rowids from temporary table */
1538
check_opt_it.rewind();
1540
uint32_t field_num= 0;
1543
Field_varstring *ref_field=
1544
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1546
tbl->file->rnd_pos(tbl->record[0],
1547
(unsigned char *) ref_field->ptr
1548
+ ref_field->length_bytes)))
1551
} while((tbl= check_opt_it++));
1553
table->status|= STATUS_UPDATED;
1554
table->storeRecord();
1556
/* Copy data from temporary table to current table */
1557
for (copy_field_ptr=copy_field;
1558
copy_field_ptr != copy_field_end;
1560
(*copy_field_ptr->do_copy)(copy_field_ptr);
1562
if (!can_compare_record || table->compare_record())
1564
if ((local_error=table->file->ha_update_row(table->record[1],
1565
table->record[0])) &&
1566
local_error != HA_ERR_RECORD_IS_THE_SAME)
1569
table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
1572
if (local_error != HA_ERR_RECORD_IS_THE_SAME)
1579
if (updated != org_updated)
1581
if (table->file->has_transactions())
1582
transactional_tables= 1;
1585
trans_safe= 0; // Can't do safe rollback
1586
session->transaction.stmt.modified_non_trans_table= true;
1589
(void) table->file->ha_rnd_end();
1590
(void) tmp_table->file->ha_rnd_end();
1591
check_opt_it.rewind();
1592
while (Table *tbl= check_opt_it++)
1593
tbl->file->ha_rnd_end();
1599
prepare_record_for_error_message(local_error, table);
1600
table->file->print_error(local_error,MYF(ME_FATALERROR));
1603
(void) table->file->ha_rnd_end();
1604
(void) tmp_table->file->ha_rnd_end();
1605
check_opt_it.rewind();
1606
while (Table *tbl= check_opt_it++)
1607
tbl->file->ha_rnd_end();
1609
if (updated != org_updated)
1611
if (table->file->has_transactions())
1612
transactional_tables= 1;
1616
session->transaction.stmt.modified_non_trans_table= true;
1623
/* out: 1 if error, 0 if success */
1625
bool multi_update::send_eof()
1627
char buff[STRING_BUFFER_USUAL_SIZE];
1629
Session::killed_state killed_status= Session::NOT_KILLED;
1631
session->set_proc_info("updating reference tables");
1634
Does updates for the last n - 1 tables, returns 0 if ok;
1635
error takes into account killed status gained in do_updates()
1637
int local_error = (table_count) ? do_updates() : 0;
1639
if local_error is not set ON until after do_updates() then
1640
later carried out killing should not affect binlogging.
1642
killed_status= (local_error == 0)? Session::NOT_KILLED : session->killed;
1643
session->set_proc_info("end");
1646
Write the SQL statement to the binlog if we updated
1647
rows and we succeeded or if we updated some non
1648
transactional tables.
1650
The query has to binlog because there's a modified non-transactional table
1651
either from the query's list or via a stored routine: bug#13270,23333
1654
assert(trans_safe || !updated ||
1655
session->transaction.stmt.modified_non_trans_table);
1656
if (local_error == 0 || session->transaction.stmt.modified_non_trans_table)
1658
if (session->transaction.stmt.modified_non_trans_table)
1659
session->transaction.all.modified_non_trans_table= true;
1661
if (local_error != 0)
1662
error_handled= true; // to force early leave from ::send_error()
1664
if (local_error > 0) // if the above log write did not fail ...
1666
/* Safety: If we haven't got an error before (can happen in do_updates) */
1667
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
1672
id= session->arg_of_last_insert_id_function ?
1673
session->first_successful_insert_id_in_prev_stmt : 0;
1674
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
1675
(ulong) session->cuted_fields);
1676
session->row_count_func=
1677
(session->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1678
session->my_ok((ulong) session->row_count_func, id, buff);