448
464
session->cuted_fields= 0L;
449
465
session->set_proc_info("Updating");
451
transactional_table= table->cursor->has_transactions();
467
transactional_table= table->file->has_transactions();
452
468
session->abort_on_warning= test(!ignore);
469
will_batch= !table->file->start_bulk_update();
455
472
Assure that we can use position()
456
473
if we need to create an error message.
458
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
475
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
459
476
table->prepare_for_position();
461
while (not (error=info.read_record(&info)) && not session->getKilled())
479
We can use compare_record() to optimize away updates if
480
the table handler is returning all columns OR if
481
if all updated columns are read
483
can_compare_record= (!(table->file->ha_table_flags() &
484
HA_PARTIAL_COLUMN_READ) ||
485
bitmap_is_subset(table->write_set, table->read_set));
487
while (!(error=info.read_record(&info)) && !session->killed)
463
if (not (select && select->skip_record()))
489
if (!(select && select->skip_record()))
465
if (table->cursor->was_semi_consistent_read())
491
if (table->file->was_semi_consistent_read())
466
492
continue; /* repeat the read of the same row if it still exists */
468
table->storeRecord();
469
if (fill_record(session, fields, values))
494
store_record(table,record[1]);
495
if (fill_record(session, fields, values, 0))
496
break; /* purecov: inspected */
474
if (! table->records_are_comparable() || table->compare_records())
500
if (!can_compare_record || table->compare_record())
476
/* Non-batched update */
477
error= table->cursor->updateRecord(table->getUpdateRecord(),
478
table->getInsertRecord());
480
table->auto_increment_field_not_null= false;
505
Typically a batched handler can execute the batched jobs when:
506
1) When specifically told to do so
507
2) When it is not a good idea to batch anymore
508
3) When it is necessary to send batch for other reasons
509
(One such reason is when READ's must be performed)
511
1) is covered by exec_bulk_update calls.
512
2) and 3) is handled by the bulk_update_row method.
514
bulk_update_row can execute the updates including the one
515
defined in the bulk_update_row or not including the row
516
in the call. This is up to the handler implementation and can
517
vary from call to call.
519
The dup_key_found reports the number of duplicate keys found
520
in those updates actually executed. It only reports those if
521
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
522
If this hasn't been issued it returns an error code and can
523
ignore this number. Thus any handler that implements batching
524
for UPDATE IGNORE must also handle this extra call properly.
526
If a duplicate key is found on the record included in this
527
call then it should be included in the count of dup_key_found
528
and error should be set to 0 (only if these errors are ignored).
530
error= table->file->ha_bulk_update_row(table->record[1],
533
limit+= dup_key_found;
534
updated-= dup_key_found;
538
/* Non-batched update */
539
error= table->file->ha_update_row(table->record[1],
482
542
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
484
544
if (error != HA_ERR_RECORD_IS_THE_SAME)
490
table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
550
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
493
553
If (ignore && error is ignorable) we don't have to
494
554
do anything; otherwise...
498
if (table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
558
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
499
559
flags|= ME_FATALERROR; /* Other handler errors are fatal */
501
561
prepare_record_for_error_message(error, table);
502
table->print_error(error,MYF(flags));
562
table->file->print_error(error,MYF(flags));
508
568
if (!--limit && using_limit)
510
error= -1; // Simulate end of cursor
571
We have reached end-of-file in most common situations where no
572
batching has occurred and if batching was supposed to occur but
573
no updates were made and finally when the batch execution was
574
performed without error and without finding any duplicate keys.
575
If the batched updates were performed with errors we need to
576
check and if no error but duplicate key's found we need to
577
continue since those are not counted for in limit.
580
((error= table->file->exec_bulk_update(&dup_key_found)) ||
585
/* purecov: begin inspected */
587
The handler should not report error of duplicate keys if they
588
are ignored. This is a requirement on batching handlers.
590
prepare_record_for_error_message(error, table);
591
table->file->print_error(error,MYF(0));
597
Either an error was found and we are ignoring errors or there
598
were duplicate keys found. In both cases we need to correct
599
the counters and continue the loop.
601
limit= dup_key_found; //limit is 0 when we get here so need to +
602
updated-= dup_key_found;
606
error= -1; // Simulate end of file
515
table->cursor->unlock_row();
612
table->file->unlock_row();
516
613
session->row_count++;
518
615
dup_key_found= 0;
630
744
/* Check that we are not using table that we are updating in a sub select */
632
746
TableList *duplicate;
633
if ((duplicate= unique_table(table_list, table_list->next_global)))
635
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
643
} /* namespace drizzled */
747
if ((duplicate= unique_table(session, table_list, table_list->next_global, 0)))
749
update_non_unique_table_error(table_list, "UPDATE", duplicate);
750
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
759
/***************************************************************************
760
Update multiple tables from join
761
***************************************************************************/
764
Get table map for list of Item_field
767
static table_map get_table_map(List<Item> *items)
769
List_iterator_fast<Item> item_it(*items);
773
while ((item= (Item_field *) item_it++))
774
map|= item->used_tables();
780
make update specific preparation and checks after opening tables
783
mysql_multi_update_prepare()
784
session thread handler
791
int mysql_multi_update_prepare(Session *session)
793
LEX *lex= session->lex;
794
TableList *table_list= lex->query_tables;
795
TableList *tl, *leaves;
796
List<Item> *fields= &lex->select_lex.item_list;
797
table_map tables_for_update;
800
if this multi-update was converted from usual update, here is table
801
counter else junk will be assigned here, but then replaced with real
802
count in open_tables()
804
uint32_t table_count= lex->table_count;
805
const bool using_lock_tables= session->locked_tables != 0;
806
bool original_multiupdate= (session->lex->sql_command == SQLCOM_UPDATE_MULTI);
807
bool need_reopen= false;
810
/* following need for prepared statements, to run next time multi-update */
811
session->lex->sql_command= SQLCOM_UPDATE_MULTI;
815
/* open tables and create derived ones, but do not lock and fill them */
816
if (((original_multiupdate || need_reopen) &&
817
open_tables(session, &table_list, &table_count, 0)) ||
818
mysql_handle_derived(lex, &mysql_derived_prepare))
821
setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
822
second time, but this call will do nothing (there are check for second
823
call in setup_tables()).
826
if (setup_tables_and_check_access(session, &lex->select_lex.context,
827
&lex->select_lex.top_join_list,
829
&lex->select_lex.leaf_tables, false))
832
if (setup_fields_with_no_wrap(session, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
835
if (update_view && check_fields(session, *fields))
840
tables_for_update= get_table_map(fields);
843
Setup timestamp handling and locking mode
845
leaves= lex->select_lex.leaf_tables;
846
for (tl= leaves; tl; tl= tl->next_leaf)
848
Table *table= tl->table;
849
/* Only set timestamp column if this is not modified */
850
if (table->timestamp_field &&
851
bitmap_is_set(table->write_set,
852
table->timestamp_field->field_index))
853
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
855
/* if table will be updated then check that it is unique */
856
if (table->map & tables_for_update)
858
table->mark_columns_needed_for_update();
860
If table will be updated we should not downgrade lock for it and
867
If we are using the binary log, we need TL_READ_NO_INSERT to get
868
correct order of statements. Otherwise, we use a TL_READ lock to
871
tl->lock_type= TL_READ;
873
/* Update Table::lock_type accordingly. */
874
if (!tl->placeholder() && !using_lock_tables)
875
tl->table->reginfo.lock_type= tl->lock_type;
879
/* now lock and fill tables */
880
if (lock_tables(session, table_list, table_count, &need_reopen))
886
We have to reopen tables since some of them were altered or dropped
887
during lock_tables() or something was done with their triggers.
888
Let us do some cleanups to be able do setup_table() and setup_fields()
891
List_iterator_fast<Item> it(*fields);
896
/* We have to cleanup translation tables of views. */
897
for (TableList *tbl= table_list; tbl; tbl= tbl->next_global)
898
tbl->cleanup_items();
900
close_tables_for_reopen(session, &table_list);
905
Check that we are not using table that we are updating, but we should
906
skip all tables of UPDATE SELECT itself
908
lex->select_lex.exclude_from_table_unique_test= true;
909
/* We only need SELECT privilege for columns in the values list */
910
for (tl= leaves; tl; tl= tl->next_leaf)
912
if (tl->lock_type != TL_READ &&
913
tl->lock_type != TL_READ_NO_INSERT)
915
TableList *duplicate;
916
if ((duplicate= unique_table(session, tl, table_list, 0)))
918
update_non_unique_table_error(table_list, "UPDATE", duplicate);
924
Set exclude_from_table_unique_test value back to false. It is needed for
925
further check in multi_update::prepare whether to use record cache.
927
lex->select_lex.exclude_from_table_unique_test= false;
929
if (session->fill_derived_tables() &&
930
mysql_handle_derived(lex, &mysql_derived_filling))
938
Setup multi-update handling and call SELECT to do the join
941
bool mysql_multi_update(Session *session,
942
TableList *table_list,
947
enum enum_duplicates handle_duplicates, bool ignore,
948
Select_Lex_Unit *unit, Select_Lex *select_lex)
950
multi_update *result;
953
if (!(result= new multi_update(table_list,
954
session->lex->select_lex.leaf_tables,
956
handle_duplicates, ignore)))
959
session->abort_on_warning= true;
961
List<Item> total_list;
962
res= mysql_select(session, &select_lex->ref_pointer_array,
963
table_list, select_lex->with_wild,
965
conds, 0, (order_st *) NULL, (order_st *)NULL, (Item *) NULL,
966
options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
967
OPTION_SETUP_TABLES_DONE,
968
result, unit, select_lex);
969
res|= session->is_error();
972
/* If we had a another error reported earlier then this will be ignored */
973
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
977
session->abort_on_warning= 0;
982
multi_update::multi_update(TableList *table_list,
983
TableList *leaves_list,
984
List<Item> *field_list, List<Item> *value_list,
985
enum enum_duplicates handle_duplicates_arg,
987
:all_tables(table_list), leaves(leaves_list), update_tables(0),
988
tmp_tables(0), updated(0), found(0), fields(field_list),
989
values(value_list), table_count(0), copy_field(0),
990
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
991
transactional_tables(0), ignore(ignore_arg), error_handled(0)
996
Connect fields with tables and create list of tables that are updated
999
int multi_update::prepare(List<Item> &,
1002
TableList *table_ref;
1004
table_map tables_to_update;
1006
List_iterator_fast<Item> field_it(*fields);
1007
List_iterator_fast<Item> value_it(*values);
1008
uint32_t i, max_fields;
1009
uint32_t leaf_table_count= 0;
1011
session->count_cuted_fields= CHECK_FIELD_WARN;
1012
session->cuted_fields=0L;
1013
session->set_proc_info("updating main table");
1015
tables_to_update= get_table_map(fields);
1017
if (!tables_to_update)
1019
my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1024
We have to check values after setup_tables to get covering_keys right in
1028
if (setup_fields(session, 0, *values, MARK_COLUMNS_READ, 0, 0))
1032
Save tables beeing updated in update_tables
1033
update_table->shared is position for table
1034
Don't use key read on tables that are updated
1038
for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1040
/* TODO: add support of view of join support */
1041
Table *table=table_ref->table;
1043
if (tables_to_update & table->map)
1045
TableList *tl= (TableList*) session->memdup((char*) table_ref,
1049
update.link_in_list((unsigned char*) tl, (unsigned char**) &tl->next_local);
1050
tl->shared= table_count++;
1051
table->no_keyread=1;
1052
table->covering_keys.clear_all();
1053
table->pos_in_table_list= tl;
1058
table_count= update.elements;
1059
update_tables= (TableList*) update.first;
1061
tmp_tables = (Table**) session->calloc(sizeof(Table *) * table_count);
1062
tmp_table_param = (Tmp_Table_Param*) session->calloc(sizeof(Tmp_Table_Param) *
1064
fields_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1066
values_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1068
if (session->is_fatal_error)
1070
for (i=0 ; i < table_count ; i++)
1072
fields_for_table[i]= new List_item;
1073
values_for_table[i]= new List_item;
1075
if (session->is_fatal_error)
1078
/* Split fields into fields_for_table[] and values_by_table[] */
1080
while ((item= (Item_field *) field_it++))
1082
Item *value= value_it++;
1083
uint32_t offset= item->field->table->pos_in_table_list->shared;
1084
fields_for_table[offset]->push_back(item);
1085
values_for_table[offset]->push_back(value);
1087
if (session->is_fatal_error)
1090
/* Allocate copy fields */
1092
for (i=0 ; i < table_count ; i++)
1093
set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1094
copy_field= new Copy_field[max_fields];
1095
return(session->is_fatal_error != 0);
1100
Check if table is safe to update on fly
1103
safe_update_on_fly()
1104
session Thread handler
1105
join_tab How table is used in join
1106
all_tables List of tables
1109
We can update the first table in join on the fly if we know that
1110
a row in this table will never be read twice. This is true under
1111
the following conditions:
1113
- We are doing a table scan and the data is in a separate file (MyISAM) or
1114
if we don't update a clustered key.
1116
- We are doing a range scan and we don't update the scan key or
1117
the primary key for a clustered table handler.
1119
- Table is not joined to itself.
1121
This function gets information about fields to be updated from
1122
the Table::write_set bitmap.
1125
This code is a bit dependent of how make_join_readinfo() works.
1128
0 Not safe to update
1132
static bool safe_update_on_fly(Session *session, JOIN_TAB *join_tab,
1133
TableList *table_ref, TableList *all_tables)
1135
Table *table= join_tab->table;
1136
if (unique_table(session, table_ref, all_tables, 0))
1138
switch (join_tab->type) {
1142
return true; // At most one matching row
1144
case JT_REF_OR_NULL:
1145
return !is_key_used(table, join_tab->ref.key, table->write_set);
1147
/* If range search on index */
1148
if (join_tab->quick)
1149
return !join_tab->quick->is_keys_used(table->write_set);
1150
/* If scanning in clustered key */
1151
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1152
table->s->primary_key < MAX_KEY)
1153
return !is_key_used(table, table->s->primary_key, table->write_set);
1156
break; // Avoid compler warning
1164
Initialize table for multi table
1167
- Update first table in join on the fly, if possible
1168
- Create temporary tables to store changed values for all other tables
1169
that are updated (and main_table if the above doesn't hold).
1173
multi_update::initialize_tables(JOIN *join)
1175
TableList *table_ref;
1177
if ((session->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1179
main_table=join->join_tab->table;
1182
/* Any update has at least one pair (field, value) */
1183
assert(fields->elements);
1185
/* Create a temporary table for keys to all tables, except main table */
1186
for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1188
Table *table=table_ref->table;
1189
uint32_t cnt= table_ref->shared;
1190
List<Item> temp_fields;
1192
Tmp_Table_Param *tmp_param;
1194
table->mark_columns_needed_for_update();
1196
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1197
if (table == main_table) // First table in join
1199
if (safe_update_on_fly(session, join->join_tab, table_ref, all_tables))
1201
table_to_update= main_table; // Update table on the fly
1205
table->prepare_for_position();
1207
tmp_param= tmp_table_param+cnt;
1210
Create a temporary table to store all fields that are changed for this
1211
table. The first field in the temporary table is a pointer to the
1212
original row so that we can find and update it. For the updatable
1213
VIEW a few following fields are rowids of tables used in the CHECK
1217
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1221
Field_varstring *field= new Field_varstring(tbl->file->ref_length, 0,
1222
tbl->alias, tbl->s, &my_charset_bin);
1227
The field will be converted to varstring when creating tmp table if
1228
table to be updated was created by mysql 4.1. Deny this.
1230
Item_field *ifield= new Item_field((Field *) field);
1233
ifield->maybe_null= 0;
1234
if (temp_fields.push_back(ifield))
1236
} while ((tbl= tbl_it++));
1238
temp_fields.concat(fields_for_table[cnt]);
1240
/* Make an unique key over the first field to avoid duplicated updates */
1241
memset(&group, 0, sizeof(group));
1243
group.item= (Item**) temp_fields.head_ref();
1245
tmp_param->quick_group=1;
1246
tmp_param->field_count=temp_fields.elements;
1247
tmp_param->group_parts=1;
1248
tmp_param->group_length= table->file->ref_length;
1249
if (!(tmp_tables[cnt]=create_tmp_table(session,
1252
(order_st*) &group, 0, 0,
1253
TMP_TABLE_ALL_COLUMNS,
1257
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1263
multi_update::~multi_update()
1266
for (table= update_tables ; table; table= table->next_local)
1268
table->table->no_keyread= table->table->no_cache= 0;
1270
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1275
for (uint32_t cnt = 0; cnt < table_count; cnt++)
1277
if (tmp_tables[cnt])
1279
tmp_tables[cnt]->free_tmp_table(session);
1280
tmp_table_param[cnt].cleanup();
1285
delete [] copy_field;
1286
session->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1287
assert(trans_safe || !updated ||
1288
session->transaction.all.modified_non_trans_table);
1292
bool multi_update::send_data(List<Item> &)
1294
TableList *cur_table;
1296
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1298
Table *table= cur_table->table;
1299
uint32_t offset= cur_table->shared;
1301
Check if we are using outer join and we didn't find the row
1302
or if we have already updated this row in the previous call to this
1305
The same row may be presented here several times in a join of type
1306
UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1308
In this case we will do the update for the first found row combination.
1309
The join algorithm guarantees that we will not find the a row in
1312
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1316
We can use compare_record() to optimize away updates if
1317
the table handler is returning all columns OR if
1318
if all updated columns are read
1320
if (table == table_to_update)
1322
bool can_compare_record;
1323
can_compare_record= (!(table->file->ha_table_flags() &
1324
HA_PARTIAL_COLUMN_READ) ||
1325
bitmap_is_subset(table->write_set,
1327
table->status|= STATUS_UPDATED;
1328
store_record(table,record[1]);
1329
if (fill_record(session, *fields_for_table[offset],
1330
*values_for_table[offset], 0))
1334
if (!can_compare_record || table->compare_record())
1340
Inform the main table that we are going to update the table even
1341
while we may be scanning it. This will flush the read cache
1344
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1346
if ((error=table->file->ha_update_row(table->record[1],
1347
table->record[0])) &&
1348
error != HA_ERR_RECORD_IS_THE_SAME)
1352
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1355
If (ignore && error == is ignorable) we don't have to
1356
do anything; otherwise...
1360
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1361
flags|= ME_FATALERROR; /* Other handler errors are fatal */
1363
prepare_record_for_error_message(error, table);
1364
table->file->print_error(error,MYF(flags));
1370
if (error == HA_ERR_RECORD_IS_THE_SAME)
1375
/* non-transactional or transactional table got modified */
1376
/* either multi_update class' flag is raised in its branch */
1377
if (table->file->has_transactions())
1378
transactional_tables= 1;
1382
session->transaction.stmt.modified_non_trans_table= true;
1390
Table *tmp_table= tmp_tables[offset];
1392
For updatable VIEW store rowid of the updated table and
1393
rowids of tables used in the CHECK OPTION condition.
1395
uint32_t field_num= 0;
1396
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1400
tbl->file->position(tbl->record[0]);
1401
Field_varstring *ref_field=
1402
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1403
ref_field->store((char *)tbl->file->ref, tbl->file->ref_length,
1406
} while ((tbl= tbl_it++));
1408
/* Store regular updated fields in the row. */
1409
fill_record(session,
1410
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1411
*values_for_table[offset], 1);
1413
/* Write row, ignoring duplicated updates to a row */
1414
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1415
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1418
create_myisam_from_heap(session, tmp_table,
1419
tmp_table_param[offset].start_recinfo,
1420
&tmp_table_param[offset].recinfo,
1424
return(1); // Not a table_is_full error
1434
void multi_update::send_error(uint32_t errcode,const char *err)
1436
/* First send error what ever it is ... */
1437
my_error(errcode, MYF(0), err);
1441
void multi_update::abort()
1443
/* the error was handled or nothing deleted and no side effects return */
1444
if (error_handled ||
1445
(!session->transaction.stmt.modified_non_trans_table && !updated))
1448
If all tables that has been updated are trans safe then just do rollback.
1449
If not attempt to do remaining updates.
1454
assert(session->transaction.stmt.modified_non_trans_table);
1455
if (do_update && table_count > 1)
1457
/* Add warning here */
1459
todo/fixme: do_update() is never called with the arg 1.
1460
should it change the signature to become argless?
1465
if (session->transaction.stmt.modified_non_trans_table)
1467
session->transaction.all.modified_non_trans_table= true;
1469
assert(trans_safe || !updated || session->transaction.stmt.modified_non_trans_table);
1473
int multi_update::do_updates()
1475
TableList *cur_table;
1477
ha_rows org_updated;
1478
Table *table, *tmp_table;
1479
List_iterator_fast<Table> check_opt_it(unupdated_check_opt_tables);
1481
do_update= 0; // Don't retry this function
1484
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1486
bool can_compare_record;
1487
uint32_t offset= cur_table->shared;
1489
table = cur_table->table;
1490
if (table == table_to_update)
1491
continue; // Already updated
1492
org_updated= updated;
1493
tmp_table= tmp_tables[cur_table->shared];
1494
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1495
(void) table->file->ha_rnd_init(0);
1496
table->file->extra(HA_EXTRA_NO_CACHE);
1498
check_opt_it.rewind();
1499
while(Table *tbl= check_opt_it++)
1501
if (tbl->file->ha_rnd_init(1))
1503
tbl->file->extra(HA_EXTRA_CACHE);
1507
Setup copy functions to copy fields from temporary table
1509
List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1510
Field **field= tmp_table->field +
1511
1 + unupdated_check_opt_tables.elements; // Skip row pointers
1512
Copy_field *copy_field_ptr= copy_field, *copy_field_end;
1513
for ( ; *field ; field++)
1515
Item_field *item= (Item_field* ) field_it++;
1516
(copy_field_ptr++)->set(item->field, *field, 0);
1518
copy_field_end=copy_field_ptr;
1520
if ((local_error = tmp_table->file->ha_rnd_init(1)))
1523
can_compare_record= (!(table->file->ha_table_flags() &
1524
HA_PARTIAL_COLUMN_READ) ||
1525
bitmap_is_subset(table->write_set,
1530
if (session->killed && trans_safe)
1532
if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1534
if (local_error == HA_ERR_END_OF_FILE)
1536
if (local_error == HA_ERR_RECORD_DELETED)
1537
continue; // May happen on dup key
1541
/* call rnd_pos() using rowids from temporary table */
1542
check_opt_it.rewind();
1544
uint32_t field_num= 0;
1547
Field_varstring *ref_field=
1548
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1550
tbl->file->rnd_pos(tbl->record[0],
1551
(unsigned char *) ref_field->ptr
1552
+ ref_field->length_bytes)))
1555
} while((tbl= check_opt_it++));
1557
table->status|= STATUS_UPDATED;
1558
store_record(table,record[1]);
1560
/* Copy data from temporary table to current table */
1561
for (copy_field_ptr=copy_field;
1562
copy_field_ptr != copy_field_end;
1564
(*copy_field_ptr->do_copy)(copy_field_ptr);
1566
if (!can_compare_record || table->compare_record())
1568
if ((local_error=table->file->ha_update_row(table->record[1],
1569
table->record[0])) &&
1570
local_error != HA_ERR_RECORD_IS_THE_SAME)
1573
table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
1576
if (local_error != HA_ERR_RECORD_IS_THE_SAME)
1583
if (updated != org_updated)
1585
if (table->file->has_transactions())
1586
transactional_tables= 1;
1589
trans_safe= 0; // Can't do safe rollback
1590
session->transaction.stmt.modified_non_trans_table= true;
1593
(void) table->file->ha_rnd_end();
1594
(void) tmp_table->file->ha_rnd_end();
1595
check_opt_it.rewind();
1596
while (Table *tbl= check_opt_it++)
1597
tbl->file->ha_rnd_end();
1604
prepare_record_for_error_message(local_error, table);
1605
table->file->print_error(local_error,MYF(ME_FATALERROR));
1608
(void) table->file->ha_rnd_end();
1609
(void) tmp_table->file->ha_rnd_end();
1610
check_opt_it.rewind();
1611
while (Table *tbl= check_opt_it++)
1612
tbl->file->ha_rnd_end();
1614
if (updated != org_updated)
1616
if (table->file->has_transactions())
1617
transactional_tables= 1;
1621
session->transaction.stmt.modified_non_trans_table= true;
1628
/* out: 1 if error, 0 if success */
1630
bool multi_update::send_eof()
1632
char buff[STRING_BUFFER_USUAL_SIZE];
1634
Session::killed_state killed_status= Session::NOT_KILLED;
1636
session->set_proc_info("updating reference tables");
1639
Does updates for the last n - 1 tables, returns 0 if ok;
1640
error takes into account killed status gained in do_updates()
1642
int local_error = (table_count) ? do_updates() : 0;
1644
if local_error is not set ON until after do_updates() then
1645
later carried out killing should not affect binlogging.
1647
killed_status= (local_error == 0)? Session::NOT_KILLED : session->killed;
1648
session->set_proc_info("end");
1651
Write the SQL statement to the binlog if we updated
1652
rows and we succeeded or if we updated some non
1653
transactional tables.
1655
The query has to binlog because there's a modified non-transactional table
1656
either from the query's list or via a stored routine: bug#13270,23333
1659
assert(trans_safe || !updated ||
1660
session->transaction.stmt.modified_non_trans_table);
1661
if (local_error == 0 || session->transaction.stmt.modified_non_trans_table)
1663
if (session->transaction.stmt.modified_non_trans_table)
1664
session->transaction.all.modified_non_trans_table= true;
1666
if (local_error != 0)
1667
error_handled= true; // to force early leave from ::send_error()
1669
if (local_error > 0) // if the above log write did not fail ...
1671
/* Safety: If we haven't got an error before (can happen in do_updates) */
1672
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
1677
id= session->arg_of_last_insert_id_function ?
1678
session->first_successful_insert_id_in_prev_stmt : 0;
1679
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
1680
(ulong) session->cuted_fields);
1681
session->row_count_func=
1682
(session->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1683
session->my_ok((ulong) session->row_count_func, id, buff);