448
466
session->cuted_fields= 0L;
449
467
session->set_proc_info("Updating");
451
transactional_table= table->cursor->has_transactions();
469
transactional_table= table->file->has_transactions();
452
470
session->abort_on_warning= test(!ignore);
471
will_batch= !table->file->start_bulk_update();
455
474
Assure that we can use position()
456
475
if we need to create an error message.
458
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
477
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
459
478
table->prepare_for_position();
461
while (not (error=info.read_record(&info)) && not session->getKilled())
481
We can use compare_record() to optimize away updates if
482
the table handler is returning all columns OR if
483
if all updated columns are read
485
can_compare_record= (!(table->file->ha_table_flags() &
486
HA_PARTIAL_COLUMN_READ) ||
487
bitmap_is_subset(table->write_set, table->read_set));
489
while (!(error=info.read_record(&info)) && !session->killed)
463
if (not (select && select->skip_record()))
491
if (!(select && select->skip_record()))
465
if (table->cursor->was_semi_consistent_read())
493
if (table->file->was_semi_consistent_read())
466
494
continue; /* repeat the read of the same row if it still exists */
468
table->storeRecord();
469
if (fill_record(session, fields, values))
496
store_record(table,record[1]);
497
if (fill_record(session, fields, values, 0))
498
break; /* purecov: inspected */
474
if (! table->records_are_comparable() || table->compare_records())
502
if (!can_compare_record || table->compare_record())
476
/* Non-batched update */
477
error= table->cursor->updateRecord(table->getUpdateRecord(),
478
table->getInsertRecord());
480
table->auto_increment_field_not_null= false;
507
Typically a batched handler can execute the batched jobs when:
508
1) When specifically told to do so
509
2) When it is not a good idea to batch anymore
510
3) When it is necessary to send batch for other reasons
511
(One such reason is when READ's must be performed)
513
1) is covered by exec_bulk_update calls.
514
2) and 3) is handled by the bulk_update_row method.
516
bulk_update_row can execute the updates including the one
517
defined in the bulk_update_row or not including the row
518
in the call. This is up to the handler implementation and can
519
vary from call to call.
521
The dup_key_found reports the number of duplicate keys found
522
in those updates actually executed. It only reports those if
523
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
524
If this hasn't been issued it returns an error code and can
525
ignore this number. Thus any handler that implements batching
526
for UPDATE IGNORE must also handle this extra call properly.
528
If a duplicate key is found on the record included in this
529
call then it should be included in the count of dup_key_found
530
and error should be set to 0 (only if these errors are ignored).
532
error= table->file->ha_bulk_update_row(table->record[1],
535
limit+= dup_key_found;
536
updated-= dup_key_found;
540
/* Non-batched update */
541
error= table->file->ha_update_row(table->record[1],
482
544
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
484
546
if (error != HA_ERR_RECORD_IS_THE_SAME)
490
table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
552
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
493
555
If (ignore && error is ignorable) we don't have to
494
556
do anything; otherwise...
498
if (table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
560
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
499
561
flags|= ME_FATALERROR; /* Other handler errors are fatal */
501
563
prepare_record_for_error_message(error, table);
502
table->print_error(error,MYF(flags));
564
table->file->print_error(error,MYF(flags));
508
570
if (!--limit && using_limit)
510
error= -1; // Simulate end of cursor
573
We have reached end-of-file in most common situations where no
574
batching has occurred and if batching was supposed to occur but
575
no updates were made and finally when the batch execution was
576
performed without error and without finding any duplicate keys.
577
If the batched updates were performed with errors we need to
578
check and if no error but duplicate key's found we need to
579
continue since those are not counted for in limit.
582
((error= table->file->exec_bulk_update(&dup_key_found)) ||
587
/* purecov: begin inspected */
589
The handler should not report error of duplicate keys if they
590
are ignored. This is a requirement on batching handlers.
592
prepare_record_for_error_message(error, table);
593
table->file->print_error(error,MYF(0));
599
Either an error was found and we are ignoring errors or there
600
were duplicate keys found. In both cases we need to correct
601
the counters and continue the loop.
603
limit= dup_key_found; //limit is 0 when we get here so need to +
604
updated-= dup_key_found;
608
error= -1; // Simulate end of file
515
table->cursor->unlock_row();
614
table->file->unlock_row();
516
615
session->row_count++;
518
617
dup_key_found= 0;
622
738
&select_lex->leaf_tables,
624
session->setup_conds(table_list, conds) ||
740
setup_conds(session, table_list, select_lex->leaf_tables, conds) ||
625
741
select_lex->setup_ref_array(session, order_num) ||
626
742
setup_order(session, select_lex->ref_pointer_array,
627
743
table_list, all_fields, all_fields, order))
630
746
/* Check that we are not using table that we are updating in a sub select */
632
748
TableList *duplicate;
633
if ((duplicate= unique_table(table_list, table_list->next_global)))
635
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
749
if ((duplicate= unique_table(session, table_list, table_list->next_global, 0)))
751
update_non_unique_table_error(table_list, "UPDATE", duplicate);
752
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
761
/***************************************************************************
762
Update multiple tables from join
763
***************************************************************************/
766
Get table map for list of Item_field
769
static table_map get_table_map(List<Item> *items)
771
List_iterator_fast<Item> item_it(*items);
775
while ((item= (Item_field *) item_it++))
776
map|= item->used_tables();
782
make update specific preparation and checks after opening tables
785
mysql_multi_update_prepare()
786
session thread handler
793
int mysql_multi_update_prepare(Session *session)
795
LEX *lex= session->lex;
796
TableList *table_list= lex->query_tables;
797
TableList *tl, *leaves;
798
List<Item> *fields= &lex->select_lex.item_list;
799
table_map tables_for_update;
802
if this multi-update was converted from usual update, here is table
803
counter else junk will be assigned here, but then replaced with real
804
count in open_tables()
806
uint32_t table_count= lex->table_count;
807
const bool using_lock_tables= session->locked_tables != 0;
808
bool original_multiupdate= (session->lex->sql_command == SQLCOM_UPDATE_MULTI);
809
bool need_reopen= false;
812
/* following need for prepared statements, to run next time multi-update */
813
session->lex->sql_command= SQLCOM_UPDATE_MULTI;
817
/* open tables and create derived ones, but do not lock and fill them */
818
if (((original_multiupdate || need_reopen) &&
819
open_tables(session, &table_list, &table_count, 0)) ||
820
mysql_handle_derived(lex, &mysql_derived_prepare))
823
setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
824
second time, but this call will do nothing (there are check for second
825
call in setup_tables()).
828
if (setup_tables_and_check_access(session, &lex->select_lex.context,
829
&lex->select_lex.top_join_list,
831
&lex->select_lex.leaf_tables, false))
834
if (setup_fields_with_no_wrap(session, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
837
if (update_view && check_fields(session, *fields))
842
tables_for_update= get_table_map(fields);
845
Setup timestamp handling and locking mode
847
leaves= lex->select_lex.leaf_tables;
848
for (tl= leaves; tl; tl= tl->next_leaf)
850
Table *table= tl->table;
851
/* Only set timestamp column if this is not modified */
852
if (table->timestamp_field &&
853
bitmap_is_set(table->write_set,
854
table->timestamp_field->field_index))
855
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
857
/* if table will be updated then check that it is unique */
858
if (table->map & tables_for_update)
860
table->mark_columns_needed_for_update();
862
If table will be updated we should not downgrade lock for it and
869
If we are using the binary log, we need TL_READ_NO_INSERT to get
870
correct order of statements. Otherwise, we use a TL_READ lock to
873
tl->lock_type= TL_READ;
875
/* Update Table::lock_type accordingly. */
876
if (!tl->placeholder() && !using_lock_tables)
877
tl->table->reginfo.lock_type= tl->lock_type;
881
/* now lock and fill tables */
882
if (lock_tables(session, table_list, table_count, &need_reopen))
888
We have to reopen tables since some of them were altered or dropped
889
during lock_tables() or something was done with their triggers.
890
Let us do some cleanups to be able do setup_table() and setup_fields()
893
List_iterator_fast<Item> it(*fields);
898
/* We have to cleanup translation tables of views. */
899
for (TableList *tbl= table_list; tbl; tbl= tbl->next_global)
900
tbl->cleanup_items();
902
close_tables_for_reopen(session, &table_list);
907
Check that we are not using table that we are updating, but we should
908
skip all tables of UPDATE SELECT itself
910
lex->select_lex.exclude_from_table_unique_test= true;
911
/* We only need SELECT privilege for columns in the values list */
912
for (tl= leaves; tl; tl= tl->next_leaf)
914
if (tl->lock_type != TL_READ &&
915
tl->lock_type != TL_READ_NO_INSERT)
917
TableList *duplicate;
918
if ((duplicate= unique_table(session, tl, table_list, 0)))
920
update_non_unique_table_error(table_list, "UPDATE", duplicate);
926
Set exclude_from_table_unique_test value back to false. It is needed for
927
further check in multi_update::prepare whether to use record cache.
929
lex->select_lex.exclude_from_table_unique_test= false;
931
if (session->fill_derived_tables() &&
932
mysql_handle_derived(lex, &mysql_derived_filling))
940
Setup multi-update handling and call SELECT to do the join
943
bool mysql_multi_update(Session *session,
944
TableList *table_list,
949
enum enum_duplicates handle_duplicates, bool ignore,
950
SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
952
multi_update *result;
955
if (!(result= new multi_update(table_list,
956
session->lex->select_lex.leaf_tables,
958
handle_duplicates, ignore)))
961
session->abort_on_warning= true;
963
List<Item> total_list;
964
res= mysql_select(session, &select_lex->ref_pointer_array,
965
table_list, select_lex->with_wild,
967
conds, 0, (order_st *) NULL, (order_st *)NULL, (Item *) NULL,
969
options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
970
OPTION_SETUP_TABLES_DONE,
971
result, unit, select_lex);
972
res|= session->is_error();
975
/* If we had a another error reported earlier then this will be ignored */
976
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
980
session->abort_on_warning= 0;
985
multi_update::multi_update(TableList *table_list,
986
TableList *leaves_list,
987
List<Item> *field_list, List<Item> *value_list,
988
enum enum_duplicates handle_duplicates_arg,
990
:all_tables(table_list), leaves(leaves_list), update_tables(0),
991
tmp_tables(0), updated(0), found(0), fields(field_list),
992
values(value_list), table_count(0), copy_field(0),
993
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
994
transactional_tables(0), ignore(ignore_arg), error_handled(0)
999
Connect fields with tables and create list of tables that are updated
1002
int multi_update::prepare(List<Item> &,
1005
TableList *table_ref;
1007
table_map tables_to_update;
1009
List_iterator_fast<Item> field_it(*fields);
1010
List_iterator_fast<Item> value_it(*values);
1011
uint32_t i, max_fields;
1012
uint32_t leaf_table_count= 0;
1014
session->count_cuted_fields= CHECK_FIELD_WARN;
1015
session->cuted_fields=0L;
1016
session->set_proc_info("updating main table");
1018
tables_to_update= get_table_map(fields);
1020
if (!tables_to_update)
1022
my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1027
We have to check values after setup_tables to get covering_keys right in
1031
if (setup_fields(session, 0, *values, MARK_COLUMNS_READ, 0, 0))
1035
Save tables beeing updated in update_tables
1036
update_table->shared is position for table
1037
Don't use key read on tables that are updated
1041
for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1043
/* TODO: add support of view of join support */
1044
Table *table=table_ref->table;
1046
if (tables_to_update & table->map)
1048
TableList *tl= (TableList*) session->memdup((char*) table_ref,
1052
update.link_in_list((unsigned char*) tl, (unsigned char**) &tl->next_local);
1053
tl->shared= table_count++;
1054
table->no_keyread=1;
1055
table->covering_keys.clear_all();
1056
table->pos_in_table_list= tl;
1061
table_count= update.elements;
1062
update_tables= (TableList*) update.first;
1064
tmp_tables = (Table**) session->calloc(sizeof(Table *) * table_count);
1065
tmp_table_param = (TMP_TABLE_PARAM*) session->calloc(sizeof(TMP_TABLE_PARAM) *
1067
fields_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1069
values_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1071
if (session->is_fatal_error)
1073
for (i=0 ; i < table_count ; i++)
1075
fields_for_table[i]= new List_item;
1076
values_for_table[i]= new List_item;
1078
if (session->is_fatal_error)
1081
/* Split fields into fields_for_table[] and values_by_table[] */
1083
while ((item= (Item_field *) field_it++))
1085
Item *value= value_it++;
1086
uint32_t offset= item->field->table->pos_in_table_list->shared;
1087
fields_for_table[offset]->push_back(item);
1088
values_for_table[offset]->push_back(value);
1090
if (session->is_fatal_error)
1093
/* Allocate copy fields */
1095
for (i=0 ; i < table_count ; i++)
1096
set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1097
copy_field= new Copy_field[max_fields];
1098
return(session->is_fatal_error != 0);
1103
Check if table is safe to update on fly
1106
safe_update_on_fly()
1107
session Thread handler
1108
join_tab How table is used in join
1109
all_tables List of tables
1112
We can update the first table in join on the fly if we know that
1113
a row in this table will never be read twice. This is true under
1114
the following conditions:
1116
- We are doing a table scan and the data is in a separate file (MyISAM) or
1117
if we don't update a clustered key.
1119
- We are doing a range scan and we don't update the scan key or
1120
the primary key for a clustered table handler.
1122
- Table is not joined to itself.
1124
This function gets information about fields to be updated from
1125
the Table::write_set bitmap.
1128
This code is a bit dependent of how make_join_readinfo() works.
1131
0 Not safe to update
1135
static bool safe_update_on_fly(Session *session, JOIN_TAB *join_tab,
1136
TableList *table_ref, TableList *all_tables)
1138
Table *table= join_tab->table;
1139
if (unique_table(session, table_ref, all_tables, 0))
1141
switch (join_tab->type) {
1145
return true; // At most one matching row
1147
case JT_REF_OR_NULL:
1148
return !is_key_used(table, join_tab->ref.key, table->write_set);
1150
/* If range search on index */
1151
if (join_tab->quick)
1152
return !join_tab->quick->is_keys_used(table->write_set);
1153
/* If scanning in clustered key */
1154
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1155
table->s->primary_key < MAX_KEY)
1156
return !is_key_used(table, table->s->primary_key, table->write_set);
1159
break; // Avoid compler warning
643
} /* namespace drizzled */
1167
Initialize table for multi table
1170
- Update first table in join on the fly, if possible
1171
- Create temporary tables to store changed values for all other tables
1172
that are updated (and main_table if the above doesn't hold).
1176
multi_update::initialize_tables(JOIN *join)
1178
TableList *table_ref;
1180
if ((session->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1182
main_table=join->join_tab->table;
1185
/* Any update has at least one pair (field, value) */
1186
assert(fields->elements);
1188
/* Create a temporary table for keys to all tables, except main table */
1189
for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1191
Table *table=table_ref->table;
1192
uint32_t cnt= table_ref->shared;
1193
List<Item> temp_fields;
1195
TMP_TABLE_PARAM *tmp_param;
1197
table->mark_columns_needed_for_update();
1199
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1200
if (table == main_table) // First table in join
1202
if (safe_update_on_fly(session, join->join_tab, table_ref, all_tables))
1204
table_to_update= main_table; // Update table on the fly
1208
table->prepare_for_position();
1210
tmp_param= tmp_table_param+cnt;
1213
Create a temporary table to store all fields that are changed for this
1214
table. The first field in the temporary table is a pointer to the
1215
original row so that we can find and update it. For the updatable
1216
VIEW a few following fields are rowids of tables used in the CHECK
1220
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1224
Field_varstring *field= new Field_varstring(tbl->file->ref_length, 0,
1225
tbl->alias, tbl->s, &my_charset_bin);
1230
The field will be converted to varstring when creating tmp table if
1231
table to be updated was created by mysql 4.1. Deny this.
1233
Item_field *ifield= new Item_field((Field *) field);
1236
ifield->maybe_null= 0;
1237
if (temp_fields.push_back(ifield))
1239
} while ((tbl= tbl_it++));
1241
temp_fields.concat(fields_for_table[cnt]);
1243
/* Make an unique key over the first field to avoid duplicated updates */
1244
memset(&group, 0, sizeof(group));
1246
group.item= (Item**) temp_fields.head_ref();
1248
tmp_param->quick_group=1;
1249
tmp_param->field_count=temp_fields.elements;
1250
tmp_param->group_parts=1;
1251
tmp_param->group_length= table->file->ref_length;
1252
if (!(tmp_tables[cnt]=create_tmp_table(session,
1255
(order_st*) &group, 0, 0,
1256
TMP_TABLE_ALL_COLUMNS,
1260
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1266
multi_update::~multi_update()
1269
for (table= update_tables ; table; table= table->next_local)
1271
table->table->no_keyread= table->table->no_cache= 0;
1273
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1278
for (uint32_t cnt = 0; cnt < table_count; cnt++)
1280
if (tmp_tables[cnt])
1282
tmp_tables[cnt]->free_tmp_table(session);
1283
tmp_table_param[cnt].cleanup();
1288
delete [] copy_field;
1289
session->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1290
assert(trans_safe || !updated ||
1291
session->transaction.all.modified_non_trans_table);
1295
bool multi_update::send_data(List<Item> &)
1297
TableList *cur_table;
1299
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1301
Table *table= cur_table->table;
1302
uint32_t offset= cur_table->shared;
1304
Check if we are using outer join and we didn't find the row
1305
or if we have already updated this row in the previous call to this
1308
The same row may be presented here several times in a join of type
1309
UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1311
In this case we will do the update for the first found row combination.
1312
The join algorithm guarantees that we will not find the a row in
1315
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1319
We can use compare_record() to optimize away updates if
1320
the table handler is returning all columns OR if
1321
if all updated columns are read
1323
if (table == table_to_update)
1325
bool can_compare_record;
1326
can_compare_record= (!(table->file->ha_table_flags() &
1327
HA_PARTIAL_COLUMN_READ) ||
1328
bitmap_is_subset(table->write_set,
1330
table->status|= STATUS_UPDATED;
1331
store_record(table,record[1]);
1332
if (fill_record(session, *fields_for_table[offset],
1333
*values_for_table[offset], 0))
1337
if (!can_compare_record || table->compare_record())
1343
Inform the main table that we are going to update the table even
1344
while we may be scanning it. This will flush the read cache
1347
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1349
if ((error=table->file->ha_update_row(table->record[1],
1350
table->record[0])) &&
1351
error != HA_ERR_RECORD_IS_THE_SAME)
1355
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1358
If (ignore && error == is ignorable) we don't have to
1359
do anything; otherwise...
1363
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1364
flags|= ME_FATALERROR; /* Other handler errors are fatal */
1366
prepare_record_for_error_message(error, table);
1367
table->file->print_error(error,MYF(flags));
1373
if (error == HA_ERR_RECORD_IS_THE_SAME)
1378
/* non-transactional or transactional table got modified */
1379
/* either multi_update class' flag is raised in its branch */
1380
if (table->file->has_transactions())
1381
transactional_tables= 1;
1385
session->transaction.stmt.modified_non_trans_table= true;
1393
Table *tmp_table= tmp_tables[offset];
1395
For updatable VIEW store rowid of the updated table and
1396
rowids of tables used in the CHECK OPTION condition.
1398
uint32_t field_num= 0;
1399
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1403
tbl->file->position(tbl->record[0]);
1404
Field_varstring *ref_field=
1405
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1406
ref_field->store((char *)tbl->file->ref, tbl->file->ref_length,
1409
} while ((tbl= tbl_it++));
1411
/* Store regular updated fields in the row. */
1412
fill_record(session,
1413
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1414
*values_for_table[offset], 1);
1416
/* Write row, ignoring duplicated updates to a row */
1417
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1418
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1421
create_myisam_from_heap(session, tmp_table,
1422
tmp_table_param[offset].start_recinfo,
1423
&tmp_table_param[offset].recinfo,
1427
return(1); // Not a table_is_full error
1437
void multi_update::send_error(uint32_t errcode,const char *err)
1439
/* First send error what ever it is ... */
1440
my_error(errcode, MYF(0), err);
1444
void multi_update::abort()
1446
/* the error was handled or nothing deleted and no side effects return */
1447
if (error_handled ||
1448
(!session->transaction.stmt.modified_non_trans_table && !updated))
1451
If all tables that has been updated are trans safe then just do rollback.
1452
If not attempt to do remaining updates.
1457
assert(session->transaction.stmt.modified_non_trans_table);
1458
if (do_update && table_count > 1)
1460
/* Add warning here */
1462
todo/fixme: do_update() is never called with the arg 1.
1463
should it change the signature to become argless?
1468
if (session->transaction.stmt.modified_non_trans_table)
1470
session->transaction.all.modified_non_trans_table= true;
1472
assert(trans_safe || !updated || session->transaction.stmt.modified_non_trans_table);
1476
int multi_update::do_updates()
1478
TableList *cur_table;
1480
ha_rows org_updated;
1481
Table *table, *tmp_table;
1482
List_iterator_fast<Table> check_opt_it(unupdated_check_opt_tables);
1484
do_update= 0; // Don't retry this function
1487
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1489
bool can_compare_record;
1490
uint32_t offset= cur_table->shared;
1492
table = cur_table->table;
1493
if (table == table_to_update)
1494
continue; // Already updated
1495
org_updated= updated;
1496
tmp_table= tmp_tables[cur_table->shared];
1497
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1498
(void) table->file->ha_rnd_init(0);
1499
table->file->extra(HA_EXTRA_NO_CACHE);
1501
check_opt_it.rewind();
1502
while(Table *tbl= check_opt_it++)
1504
if (tbl->file->ha_rnd_init(1))
1506
tbl->file->extra(HA_EXTRA_CACHE);
1510
Setup copy functions to copy fields from temporary table
1512
List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1513
Field **field= tmp_table->field +
1514
1 + unupdated_check_opt_tables.elements; // Skip row pointers
1515
Copy_field *copy_field_ptr= copy_field, *copy_field_end;
1516
for ( ; *field ; field++)
1518
Item_field *item= (Item_field* ) field_it++;
1519
(copy_field_ptr++)->set(item->field, *field, 0);
1521
copy_field_end=copy_field_ptr;
1523
if ((local_error = tmp_table->file->ha_rnd_init(1)))
1526
can_compare_record= (!(table->file->ha_table_flags() &
1527
HA_PARTIAL_COLUMN_READ) ||
1528
bitmap_is_subset(table->write_set,
1533
if (session->killed && trans_safe)
1535
if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1537
if (local_error == HA_ERR_END_OF_FILE)
1539
if (local_error == HA_ERR_RECORD_DELETED)
1540
continue; // May happen on dup key
1544
/* call rnd_pos() using rowids from temporary table */
1545
check_opt_it.rewind();
1547
uint32_t field_num= 0;
1550
Field_varstring *ref_field=
1551
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1553
tbl->file->rnd_pos(tbl->record[0],
1554
(unsigned char *) ref_field->ptr
1555
+ ref_field->length_bytes)))
1558
} while((tbl= check_opt_it++));
1560
table->status|= STATUS_UPDATED;
1561
store_record(table,record[1]);
1563
/* Copy data from temporary table to current table */
1564
for (copy_field_ptr=copy_field;
1565
copy_field_ptr != copy_field_end;
1567
(*copy_field_ptr->do_copy)(copy_field_ptr);
1569
if (!can_compare_record || table->compare_record())
1571
if ((local_error=table->file->ha_update_row(table->record[1],
1572
table->record[0])) &&
1573
local_error != HA_ERR_RECORD_IS_THE_SAME)
1576
table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
1579
if (local_error != HA_ERR_RECORD_IS_THE_SAME)
1586
if (updated != org_updated)
1588
if (table->file->has_transactions())
1589
transactional_tables= 1;
1592
trans_safe= 0; // Can't do safe rollback
1593
session->transaction.stmt.modified_non_trans_table= true;
1596
(void) table->file->ha_rnd_end();
1597
(void) tmp_table->file->ha_rnd_end();
1598
check_opt_it.rewind();
1599
while (Table *tbl= check_opt_it++)
1600
tbl->file->ha_rnd_end();
1607
prepare_record_for_error_message(local_error, table);
1608
table->file->print_error(local_error,MYF(ME_FATALERROR));
1611
(void) table->file->ha_rnd_end();
1612
(void) tmp_table->file->ha_rnd_end();
1613
check_opt_it.rewind();
1614
while (Table *tbl= check_opt_it++)
1615
tbl->file->ha_rnd_end();
1617
if (updated != org_updated)
1619
if (table->file->has_transactions())
1620
transactional_tables= 1;
1624
session->transaction.stmt.modified_non_trans_table= true;
1631
/* out: 1 if error, 0 if success */
1633
bool multi_update::send_eof()
1635
char buff[STRING_BUFFER_USUAL_SIZE];
1637
Session::killed_state killed_status= Session::NOT_KILLED;
1639
session->set_proc_info("updating reference tables");
1642
Does updates for the last n - 1 tables, returns 0 if ok;
1643
error takes into account killed status gained in do_updates()
1645
int local_error = (table_count) ? do_updates() : 0;
1647
if local_error is not set ON until after do_updates() then
1648
later carried out killing should not affect binlogging.
1650
killed_status= (local_error == 0)? Session::NOT_KILLED : session->killed;
1651
session->set_proc_info("end");
1654
Write the SQL statement to the binlog if we updated
1655
rows and we succeeded or if we updated some non
1656
transactional tables.
1658
The query has to binlog because there's a modified non-transactional table
1659
either from the query's list or via a stored routine: bug#13270,23333
1662
assert(trans_safe || !updated ||
1663
session->transaction.stmt.modified_non_trans_table);
1664
if (local_error == 0 || session->transaction.stmt.modified_non_trans_table)
1666
if (session->transaction.stmt.modified_non_trans_table)
1667
session->transaction.all.modified_non_trans_table= true;
1669
if (local_error != 0)
1670
error_handled= true; // to force early leave from ::send_error()
1672
if (local_error > 0) // if the above log write did not fail ...
1674
/* Safety: If we haven't got an error before (can happen in do_updates) */
1675
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
1680
id= session->arg_of_last_insert_id_function ?
1681
session->first_successful_insert_id_in_prev_stmt : 0;
1682
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
1683
(ulong) session->cuted_fields);
1684
session->row_count_func=
1685
(session->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1686
::my_ok(session, (ulong) session->row_count_func, id, buff);