455
429
session->cuted_fields= 0L;
456
430
session->set_proc_info("Updating");
458
transactional_table= table->cursor->has_transactions();
459
session->setAbortOnWarning(test(!ignore));
432
transactional_table= table->file->has_transactions();
433
session->abort_on_warning= test(!ignore);
434
will_batch= !table->file->start_bulk_update();
462
437
Assure that we can use position()
463
438
if we need to create an error message.
465
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
440
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
466
441
table->prepare_for_position();
468
while (not (error=info.read_record(&info)) && not session->getKilled())
444
We can use compare_record() to optimize away updates if
445
the table handler is returning all columns OR if
446
if all updated columns are read
448
can_compare_record= (!(table->file->ha_table_flags() &
449
HA_PARTIAL_COLUMN_READ) ||
450
bitmap_is_subset(table->write_set, table->read_set));
452
while (!(error=info.read_record(&info)) && !session->killed)
470
if (not (select && select->skip_record()))
454
if (!(select && select->skip_record()))
472
if (table->cursor->was_semi_consistent_read())
456
if (table->file->was_semi_consistent_read())
473
457
continue; /* repeat the read of the same row if it still exists */
475
459
table->storeRecord();
476
if (fill_record(session, fields, values))
460
if (fill_record(session, fields, values, 0))
461
break; /* purecov: inspected */
481
if (! table->records_are_comparable() || table->compare_records())
465
if (!can_compare_record || table->compare_record())
483
/* Non-batched update */
484
error= table->cursor->updateRecord(table->getUpdateRecord(),
485
table->getInsertRecord());
487
table->auto_increment_field_not_null= false;
470
Typically a batched handler can execute the batched jobs when:
471
1) When specifically told to do so
472
2) When it is not a good idea to batch anymore
473
3) When it is necessary to send batch for other reasons
474
(One such reason is when READ's must be performed)
476
1) is covered by exec_bulk_update calls.
477
2) and 3) is handled by the bulk_update_row method.
479
bulk_update_row can execute the updates including the one
480
defined in the bulk_update_row or not including the row
481
in the call. This is up to the handler implementation and can
482
vary from call to call.
484
The dup_key_found reports the number of duplicate keys found
485
in those updates actually executed. It only reports those if
486
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
487
If this hasn't been issued it returns an error code and can
488
ignore this number. Thus any handler that implements batching
489
for UPDATE IGNORE must also handle this extra call properly.
491
If a duplicate key is found on the record included in this
492
call then it should be included in the count of dup_key_found
493
and error should be set to 0 (only if these errors are ignored).
495
error= table->file->ha_bulk_update_row(table->record[1],
498
limit+= dup_key_found;
499
updated-= dup_key_found;
503
/* Non-batched update */
504
error= table->file->ha_update_row(table->record[1],
489
507
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
491
509
if (error != HA_ERR_RECORD_IS_THE_SAME)
497
table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
515
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
500
518
If (ignore && error is ignorable) we don't have to
501
519
do anything; otherwise...
505
if (table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
523
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
506
524
flags|= ME_FATALERROR; /* Other handler errors are fatal */
508
526
prepare_record_for_error_message(error, table);
509
table->print_error(error,MYF(flags));
527
table->file->print_error(error,MYF(flags));
515
533
if (!--limit && using_limit)
517
error= -1; // Simulate end of cursor
536
We have reached end-of-file in most common situations where no
537
batching has occurred and if batching was supposed to occur but
538
no updates were made and finally when the batch execution was
539
performed without error and without finding any duplicate keys.
540
If the batched updates were performed with errors we need to
541
check and if no error but duplicate key's found we need to
542
continue since those are not counted for in limit.
545
((error= table->file->exec_bulk_update(&dup_key_found)) ||
550
/* purecov: begin inspected */
552
The handler should not report error of duplicate keys if they
553
are ignored. This is a requirement on batching handlers.
555
prepare_record_for_error_message(error, table);
556
table->file->print_error(error,MYF(0));
562
Either an error was found and we are ignoring errors or there
563
were duplicate keys found. In both cases we need to correct
564
the counters and continue the loop.
566
limit= dup_key_found; //limit is 0 when we get here so need to +
567
updated-= dup_key_found;
571
error= -1; // Simulate end of file
522
table->cursor->unlock_row();
577
table->file->unlock_row();
523
578
session->row_count++;
525
580
dup_key_found= 0;
531
586
It's assumed that if an error was set in combination with an effective
532
587
killed status then the error is due to killing.
534
killed_status= session->getKilled(); // get the status of the volatile
589
killed_status= session->killed; // get the status of the volatile
535
590
// simulated killing after the loop must be ineffective for binlogging
536
591
error= (killed_status == Session::NOT_KILLED)? error : 1;
538
updated-= dup_key_found;
539
table->cursor->try_semi_consistent_read(0);
595
(loc_error= table->file->exec_bulk_update(&dup_key_found)))
597
An error has occurred when a batched update was performed and returned
598
an error indication. It cannot be an allowed duplicate key error since
599
we require the batching handler to treat this as a normal behavior.
601
Otherwise we simply remove the number of duplicate keys records found
602
in the batched update.
605
/* purecov: begin inspected */
606
prepare_record_for_error_message(loc_error, table);
607
table->file->print_error(loc_error,MYF(ME_FATALERROR));
612
updated-= dup_key_found;
614
table->file->end_bulk_update();
615
table->file->try_semi_consistent_read(0);
541
617
if (!transactional_table && updated > 0)
542
session->transaction.stmt.markModifiedNonTransData();
618
session->transaction.stmt.modified_non_trans_table= true;
544
info.end_read_record();
620
end_read_record(&info);
546
622
session->set_proc_info("end");
547
table->cursor->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
623
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
550
626
error < 0 means really no error at all: we processed all rows until the
551
627
last one without error. error > 0 means an error (e.g. unique key
552
628
violation and no IGNORE or REPLACE). error == 0 is also an error (if
553
629
preparing the record or invoking before triggers fails). See
554
autocommitOrRollback(error>=0) and return(error>=0) below.
630
ha_autocommit_or_rollback(error>=0) and return(error>=0) below.
555
631
Sometimes we want to binlog even if we updated no rows, in case user used
556
632
it to be sure master and slave are in same state.
558
if ((error < 0) || session->transaction.stmt.hasModifiedNonTransData())
634
if ((error < 0) || session->transaction.stmt.modified_non_trans_table)
560
if (session->transaction.stmt.hasModifiedNonTransData())
561
session->transaction.all.markModifiedNonTransData();
636
if (session->transaction.stmt.modified_non_trans_table)
637
session->transaction.all.modified_non_trans_table= true;
563
assert(transactional_table || !updated || session->transaction.stmt.hasModifiedNonTransData());
639
assert(transactional_table || !updated || session->transaction.stmt.modified_non_trans_table);
564
640
free_underlaid_joins(session, select_lex);
566
642
/* If LAST_INSERT_ID(X) was used, report X */
567
643
id= session->arg_of_last_insert_id_function ?
568
644
session->first_successful_insert_id_in_prev_stmt : 0;
646
DRIZZLE_UPDATE_END();
572
649
char buff[STRING_BUFFER_USUAL_SIZE];
573
snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
650
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
574
651
(ulong) session->cuted_fields);
575
session->row_count_func= updated;
577
* Resetting the Diagnostic area to prevent
580
session->main_da.reset_diagnostics_area();
581
session->my_ok((ulong) session->rowCount(), found, id, buff);
582
session->status_var.updated_row_count+= session->rowCount();
652
session->row_count_func=
653
(session->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
654
session->my_ok((ulong) session->row_count_func, id, buff);
584
session->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL; /* calc cuted fields */
585
session->setAbortOnWarning(false);
586
DRIZZLE_UPDATE_DONE((error >= 0 || session->is_error()), found, updated);
587
return ((error >= 0 || session->is_error()) ? 1 : 0);
656
session->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
657
session->abort_on_warning= 0;
658
return((error >= 0 || session->is_error()) ? 1 : 0);
591
table->print_error(error,MYF(0));
594
662
free_underlaid_joins(session, select_lex);
595
663
if (table->key_read)
597
665
table->key_read=0;
598
table->cursor->extra(HA_EXTRA_NO_KEYREAD);
666
table->file->extra(HA_EXTRA_NO_KEYREAD);
600
session->setAbortOnWarning(false);
668
session->abort_on_warning= 0;
602
DRIZZLE_UPDATE_DONE(1, 0, 0);
671
DRIZZLE_UPDATE_END();
607
676
Prepare items in UPDATE statement
679
mysql_prepare_update()
611
680
session - thread handler
612
681
table_list - global/local table list
613
682
conds - conditions
614
order_num - number of ORDER BY list entries
615
order - ORDER BY clause list
683
order_num - number of order_st BY list entries
684
order - order_st BY clause list
621
bool prepare_update(Session *session, TableList *table_list,
622
Item **conds, uint32_t order_num, Order *order)
690
bool mysql_prepare_update(Session *session, TableList *table_list,
691
Item **conds, uint32_t order_num, order_st *order)
624
693
List<Item> all_fields;
625
694
Select_Lex *select_lex= &session->lex->select_lex;
640
709
/* Check that we are not using table that we are updating in a sub select */
642
711
TableList *duplicate;
643
if ((duplicate= unique_table(table_list, table_list->next_global)))
645
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
653
} /* namespace drizzled */
712
if ((duplicate= unique_table(session, table_list, table_list->next_global, 0)))
714
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
723
/***************************************************************************
724
Update multiple tables from join
725
***************************************************************************/
728
Get table map for list of Item_field
731
static table_map get_table_map(List<Item> *items)
733
List_iterator_fast<Item> item_it(*items);
737
while ((item= (Item_field *) item_it++))
738
map|= item->used_tables();
744
make update specific preparation and checks after opening tables
747
mysql_multi_update_prepare()
748
session thread handler
755
int mysql_multi_update_prepare(Session *session)
757
LEX *lex= session->lex;
758
TableList *table_list= lex->query_tables;
759
TableList *tl, *leaves;
760
List<Item> *fields= &lex->select_lex.item_list;
761
table_map tables_for_update;
763
if this multi-update was converted from usual update, here is table
764
counter else junk will be assigned here, but then replaced with real
765
count in open_tables()
767
uint32_t table_count= lex->table_count;
768
bool original_multiupdate= (session->lex->sql_command == SQLCOM_UPDATE_MULTI);
769
bool need_reopen= false;
772
/* following need for prepared statements, to run next time multi-update */
773
session->lex->sql_command= SQLCOM_UPDATE_MULTI;
777
/* open tables and create derived ones, but do not lock and fill them */
778
if (((original_multiupdate || need_reopen) &&
779
session->open_tables_from_list(&table_list, &table_count, false)) ||
780
mysql_handle_derived(lex, &mysql_derived_prepare))
783
setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
784
second time, but this call will do nothing (there are check for second
785
call in setup_tables()).
788
if (setup_tables_and_check_access(session, &lex->select_lex.context,
789
&lex->select_lex.top_join_list,
791
&lex->select_lex.leaf_tables, false))
794
if (setup_fields_with_no_wrap(session, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
797
tables_for_update= get_table_map(fields);
800
Setup timestamp handling and locking mode
802
leaves= lex->select_lex.leaf_tables;
803
for (tl= leaves; tl; tl= tl->next_leaf)
805
Table *table= tl->table;
806
/* Only set timestamp column if this is not modified */
807
if (table->timestamp_field && table->timestamp_field->isWriteSet())
808
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
810
/* if table will be updated then check that it is unique */
811
if (table->map & tables_for_update)
813
table->mark_columns_needed_for_update();
815
If table will be updated we should not downgrade lock for it and
822
If we are using the binary log, we need TL_READ_NO_INSERT to get
823
correct order of statements. Otherwise, we use a TL_READ lock to
826
tl->lock_type= TL_READ;
828
/* Update Table::lock_type accordingly. */
829
if (!tl->placeholder())
830
tl->table->reginfo.lock_type= tl->lock_type;
834
/* now lock and fill tables */
835
if (lock_tables(session, table_list, table_count, &need_reopen))
841
We have to reopen tables since some of them were altered or dropped
842
during lock_tables() or something was done with their triggers.
843
Let us do some cleanups to be able do setup_table() and setup_fields()
846
List_iterator_fast<Item> it(*fields);
851
session->close_tables_for_reopen(&table_list);
857
Check that we are not using table that we are updating, but we should
858
skip all tables of UPDATE SELECT itself
860
lex->select_lex.exclude_from_table_unique_test= true;
861
/* We only need SELECT privilege for columns in the values list */
862
for (tl= leaves; tl; tl= tl->next_leaf)
864
if (tl->lock_type != TL_READ &&
865
tl->lock_type != TL_READ_NO_INSERT)
867
TableList *duplicate;
868
if ((duplicate= unique_table(session, tl, table_list, 0)))
870
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->alias);
877
Set exclude_from_table_unique_test value back to false. It is needed for
878
further check in multi_update::prepare whether to use record cache.
880
lex->select_lex.exclude_from_table_unique_test= false;
882
if (session->fill_derived_tables() &&
883
mysql_handle_derived(lex, &mysql_derived_filling))
891
Setup multi-update handling and call SELECT to do the join
894
bool mysql_multi_update(Session *session,
895
TableList *table_list,
900
enum enum_duplicates handle_duplicates, bool ignore,
901
Select_Lex_Unit *unit, Select_Lex *select_lex)
903
multi_update *result;
906
if (!(result= new multi_update(table_list,
907
session->lex->select_lex.leaf_tables,
909
handle_duplicates, ignore)))
912
session->abort_on_warning= true;
914
List<Item> total_list;
915
res= mysql_select(session, &select_lex->ref_pointer_array,
916
table_list, select_lex->with_wild,
918
conds, 0, (order_st *) NULL, (order_st *)NULL, (Item *) NULL,
919
options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
920
OPTION_SETUP_TABLES_DONE,
921
result, unit, select_lex);
922
res|= session->is_error();
925
/* If we had a another error reported earlier then this will be ignored */
926
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
930
session->abort_on_warning= 0;
935
multi_update::multi_update(TableList *table_list,
936
TableList *leaves_list,
937
List<Item> *field_list, List<Item> *value_list,
938
enum enum_duplicates handle_duplicates_arg,
940
:all_tables(table_list), leaves(leaves_list),
941
tmp_tables(0), updated(0), found(0), fields(field_list),
942
values(value_list), table_count(0), copy_field(0),
943
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
944
transactional_tables(0), ignore(ignore_arg), error_handled(0)
949
Connect fields with tables and create list of tables that are updated
952
int multi_update::prepare(List<Item> &,
955
TableList *table_ref;
956
table_map tables_to_update;
958
List_iterator_fast<Item> field_it(*fields);
959
List_iterator_fast<Item> value_it(*values);
960
uint32_t i, max_fields;
961
uint32_t leaf_table_count= 0;
963
session->count_cuted_fields= CHECK_FIELD_WARN;
964
session->cuted_fields=0L;
965
session->set_proc_info("updating main table");
967
tables_to_update= get_table_map(fields);
969
if (!tables_to_update)
971
my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
976
We have to check values after setup_tables to get covering_keys right in
980
if (setup_fields(session, 0, *values, MARK_COLUMNS_READ, 0, 0))
984
Save tables beeing updated in update_tables
985
update_table->shared is position for table
986
Don't use key read on tables that are updated
988
for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
990
/* TODO: add support of view of join support */
991
Table *table=table_ref->table;
993
if (tables_to_update & table->map)
995
TableList *tl= (TableList*) session->memdup((char*) table_ref,
999
update_tables.push_back(tl);
1000
tl->shared= table_count++;
1001
table->no_keyread=1;
1002
table->covering_keys.reset();
1003
table->pos_in_table_list= tl;
1008
table_count= update_tables.size();
1010
tmp_tables = (Table**) session->calloc(sizeof(Table *) * table_count);
1011
tmp_table_param = (Tmp_Table_Param*) session->calloc(sizeof(Tmp_Table_Param) *
1013
fields_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1015
values_for_table= (List_item **) session->alloc(sizeof(List_item *) *
1017
if (session->is_fatal_error)
1019
for (i=0 ; i < table_count ; i++)
1021
fields_for_table[i]= new List_item;
1022
values_for_table[i]= new List_item;
1024
if (session->is_fatal_error)
1027
/* Split fields into fields_for_table[] and values_by_table[] */
1029
while ((item= (Item_field *) field_it++))
1031
Item *value= value_it++;
1032
uint32_t offset= item->field->table->pos_in_table_list->shared;
1033
fields_for_table[offset]->push_back(item);
1034
values_for_table[offset]->push_back(value);
1036
if (session->is_fatal_error)
1039
/* Allocate copy fields */
1041
for (i=0 ; i < table_count ; i++)
1042
set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1043
copy_field= new CopyField[max_fields];
1044
return(session->is_fatal_error != 0);
1049
Check if table is safe to update on fly
1052
safe_update_on_fly()
1053
session Thread handler
1054
join_tab How table is used in join
1055
all_tables List of tables
1058
We can update the first table in join on the fly if we know that
1059
a row in this table will never be read twice. This is true under
1060
the following conditions:
1062
- We are doing a table scan and the data is in a separate file (MyISAM) or
1063
if we don't update a clustered key.
1065
- We are doing a range scan and we don't update the scan key or
1066
the primary key for a clustered table handler.
1068
- Table is not joined to itself.
1070
This function gets information about fields to be updated from
1071
the Table::write_set bitmap.
1074
This code is a bit dependent of how make_join_readinfo() works.
1077
0 Not safe to update
1081
static bool safe_update_on_fly(Session *session, JOIN_TAB *join_tab,
1082
TableList *table_ref, TableList *all_tables)
1084
Table *table= join_tab->table;
1085
if (unique_table(session, table_ref, all_tables, 0))
1087
switch (join_tab->type) {
1091
return true; // At most one matching row
1093
case JT_REF_OR_NULL:
1094
return !is_key_used(table, join_tab->ref.key, table->write_set);
1096
/* If range search on index */
1097
if (join_tab->quick)
1098
return !join_tab->quick->is_keys_used(table->write_set);
1099
/* If scanning in clustered key */
1100
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1101
table->s->primary_key < MAX_KEY)
1102
return !is_key_used(table, table->s->primary_key, table->write_set);
1105
break; // Avoid compler warning
1113
Initialize table for multi table
1116
- Update first table in join on the fly, if possible
1117
- Create temporary tables to store changed values for all other tables
1118
that are updated (and main_table if the above doesn't hold).
1122
multi_update::initialize_tables(JOIN *join)
1124
if ((session->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1126
main_table=join->join_tab->table;
1129
/* Any update has at least one pair (field, value) */
1130
assert(fields->elements);
1132
/* Create a temporary table for keys to all tables, except main table */
1133
for (list<TableList*>::iterator it= update_tables.begin();
1134
it != update_tables.end();
1137
Table *table= (*it)->table;
1138
uint32_t cnt= (*it)->shared;
1139
List<Item> temp_fields;
1141
Tmp_Table_Param *tmp_param;
1143
table->mark_columns_needed_for_update();
1145
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1146
if (table == main_table) // First table in join
1148
if (safe_update_on_fly(session, join->join_tab, (*it), all_tables))
1150
table_to_update= main_table; // Update table on the fly
1154
table->prepare_for_position();
1156
tmp_param= tmp_table_param+cnt;
1159
Create a temporary table to store all fields that are changed for this
1160
table. The first field in the temporary table is a pointer to the
1161
original row so that we can find and update it. For the updatable
1162
VIEW a few following fields are rowids of tables used in the CHECK
1166
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1170
Field_varstring *field= new Field_varstring(tbl->file->ref_length, 0,
1171
tbl->alias, tbl->s, &my_charset_bin);
1176
The field will be converted to varstring when creating tmp table if
1177
table to be updated was created by mysql 4.1. Deny this.
1179
Item_field *ifield= new Item_field((Field *) field);
1182
ifield->maybe_null= 0;
1183
if (temp_fields.push_back(ifield))
1185
} while ((tbl= tbl_it++));
1187
temp_fields.concat(fields_for_table[cnt]);
1189
/* Make an unique key over the first field to avoid duplicated updates */
1190
memset(&group, 0, sizeof(group));
1192
group.item= (Item**) temp_fields.head_ref();
1194
tmp_param->quick_group=1;
1195
tmp_param->field_count=temp_fields.elements;
1196
tmp_param->group_parts=1;
1197
tmp_param->group_length= table->file->ref_length;
1198
if (!(tmp_tables[cnt]=create_tmp_table(session,
1201
(order_st*) &group, 0, 0,
1202
TMP_TABLE_ALL_COLUMNS,
1206
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1212
multi_update::~multi_update()
1215
for (list<TableList*>::iterator it= update_tables.begin();
1216
it != update_tables.end();
1220
table->table->no_keyread= table->table->no_cache= 0;
1222
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1227
for (uint32_t cnt = 0; cnt < table_count; cnt++)
1229
if (tmp_tables[cnt])
1231
tmp_tables[cnt]->free_tmp_table(session);
1232
tmp_table_param[cnt].cleanup();
1237
delete [] copy_field;
1238
session->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1239
assert(trans_safe || !updated ||
1240
session->transaction.all.modified_non_trans_table);
1244
bool multi_update::send_data(List<Item> &)
1246
for (list<TableList*>::iterator it= update_tables.begin();
1247
it != update_tables.end();
1250
Table *table= (*it)->table;
1251
uint32_t offset= (*it)->shared;
1253
Check if we are using outer join and we didn't find the row
1254
or if we have already updated this row in the previous call to this
1257
The same row may be presented here several times in a join of type
1258
UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1260
In this case we will do the update for the first found row combination.
1261
The join algorithm guarantees that we will not find the a row in
1264
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1268
We can use compare_record() to optimize away updates if
1269
the table handler is returning all columns OR if
1270
if all updated columns are read
1272
if (table == table_to_update)
1274
bool can_compare_record;
1275
can_compare_record= (!(table->file->ha_table_flags() &
1276
HA_PARTIAL_COLUMN_READ) ||
1277
bitmap_is_subset(table->write_set,
1279
table->status|= STATUS_UPDATED;
1280
table->storeRecord();
1281
if (fill_record(session, *fields_for_table[offset],
1282
*values_for_table[offset], 0))
1286
if (!can_compare_record || table->compare_record())
1292
Inform the main table that we are going to update the table even
1293
while we may be scanning it. This will flush the read cache
1296
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1298
if ((error=table->file->ha_update_row(table->record[1],
1299
table->record[0])) &&
1300
error != HA_ERR_RECORD_IS_THE_SAME)
1304
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1307
If (ignore && error == is ignorable) we don't have to
1308
do anything; otherwise...
1312
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1313
flags|= ME_FATALERROR; /* Other handler errors are fatal */
1315
prepare_record_for_error_message(error, table);
1316
table->file->print_error(error,MYF(flags));
1322
if (error == HA_ERR_RECORD_IS_THE_SAME)
1327
/* non-transactional or transactional table got modified */
1328
/* either multi_update class' flag is raised in its branch */
1329
if (table->file->has_transactions())
1330
transactional_tables= 1;
1334
session->transaction.stmt.modified_non_trans_table= true;
1342
Table *tmp_table= tmp_tables[offset];
1344
For updatable VIEW store rowid of the updated table and
1345
rowids of tables used in the CHECK OPTION condition.
1347
uint32_t field_num= 0;
1348
List_iterator_fast<Table> tbl_it(unupdated_check_opt_tables);
1352
tbl->file->position(tbl->record[0]);
1353
Field_varstring *ref_field=
1354
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1355
ref_field->store((char *)tbl->file->ref, tbl->file->ref_length,
1358
} while ((tbl= tbl_it++));
1360
/* Store regular updated fields in the row. */
1361
fill_record(session,
1362
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1363
*values_for_table[offset], 1);
1365
/* Write row, ignoring duplicated updates to a row */
1366
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1367
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1370
create_myisam_from_heap(session, tmp_table,
1371
tmp_table_param[offset].start_recinfo,
1372
&tmp_table_param[offset].recinfo,
1376
return(1); // Not a table_is_full error
1386
void multi_update::send_error(uint32_t errcode,const char *err)
1388
/* First send error what ever it is ... */
1389
my_error(errcode, MYF(0), err);
1393
void multi_update::abort()
1395
/* the error was handled or nothing deleted and no side effects return */
1396
if (error_handled ||
1397
(!session->transaction.stmt.modified_non_trans_table && !updated))
1400
If all tables that has been updated are trans safe then just do rollback.
1401
If not attempt to do remaining updates.
1406
assert(session->transaction.stmt.modified_non_trans_table);
1407
if (do_update && table_count > 1)
1409
/* Add warning here */
1411
todo/fixme: do_update() is never called with the arg 1.
1412
should it change the signature to become argless?
1417
if (session->transaction.stmt.modified_non_trans_table)
1419
session->transaction.all.modified_non_trans_table= true;
1421
assert(trans_safe || !updated || session->transaction.stmt.modified_non_trans_table);
1425
int multi_update::do_updates()
1427
TableList *cur_table;
1429
ha_rows org_updated;
1430
Table *table, *tmp_table;
1431
List_iterator_fast<Table> check_opt_it(unupdated_check_opt_tables);
1433
do_update= 0; // Don't retry this function
1436
for (list<TableList*>::iterator it= update_tables.begin();
1437
it != update_tables.end();
1441
bool can_compare_record;
1442
uint32_t offset= cur_table->shared;
1444
table = cur_table->table;
1445
if (table == table_to_update)
1446
continue; // Already updated
1447
org_updated= updated;
1448
tmp_table= tmp_tables[cur_table->shared];
1449
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1450
(void) table->file->ha_rnd_init(0);
1451
table->file->extra(HA_EXTRA_NO_CACHE);
1453
check_opt_it.rewind();
1454
while(Table *tbl= check_opt_it++)
1456
if (tbl->file->ha_rnd_init(1))
1458
tbl->file->extra(HA_EXTRA_CACHE);
1462
Setup copy functions to copy fields from temporary table
1464
List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1465
Field **field= tmp_table->field +
1466
1 + unupdated_check_opt_tables.elements; // Skip row pointers
1467
CopyField *copy_field_ptr= copy_field, *copy_field_end;
1468
for ( ; *field ; field++)
1470
Item_field *item= (Item_field* ) field_it++;
1471
(copy_field_ptr++)->set(item->field, *field, 0);
1473
copy_field_end=copy_field_ptr;
1475
if ((local_error = tmp_table->file->ha_rnd_init(1)))
1478
can_compare_record= (!(table->file->ha_table_flags() &
1479
HA_PARTIAL_COLUMN_READ) ||
1480
bitmap_is_subset(table->write_set,
1485
if (session->killed && trans_safe)
1487
if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1489
if (local_error == HA_ERR_END_OF_FILE)
1491
if (local_error == HA_ERR_RECORD_DELETED)
1492
continue; // May happen on dup key
1496
/* call rnd_pos() using rowids from temporary table */
1497
check_opt_it.rewind();
1499
uint32_t field_num= 0;
1502
Field_varstring *ref_field=
1503
reinterpret_cast<Field_varstring *>(tmp_table->field[field_num]);
1505
tbl->file->rnd_pos(tbl->record[0],
1506
(unsigned char *) ref_field->ptr
1507
+ ref_field->length_bytes)))
1510
} while((tbl= check_opt_it++));
1512
table->status|= STATUS_UPDATED;
1513
table->storeRecord();
1515
/* Copy data from temporary table to current table */
1516
for (copy_field_ptr=copy_field;
1517
copy_field_ptr != copy_field_end;
1519
(*copy_field_ptr->do_copy)(copy_field_ptr);
1521
if (!can_compare_record || table->compare_record())
1523
if ((local_error=table->file->ha_update_row(table->record[1],
1524
table->record[0])) &&
1525
local_error != HA_ERR_RECORD_IS_THE_SAME)
1528
table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
1531
if (local_error != HA_ERR_RECORD_IS_THE_SAME)
1538
if (updated != org_updated)
1540
if (table->file->has_transactions())
1541
transactional_tables= 1;
1544
trans_safe= 0; // Can't do safe rollback
1545
session->transaction.stmt.modified_non_trans_table= true;
1548
(void) table->file->ha_rnd_end();
1549
(void) tmp_table->file->ha_rnd_end();
1550
check_opt_it.rewind();
1551
while (Table *tbl= check_opt_it++)
1552
tbl->file->ha_rnd_end();
1558
prepare_record_for_error_message(local_error, table);
1559
table->file->print_error(local_error,MYF(ME_FATALERROR));
1562
(void) table->file->ha_rnd_end();
1563
(void) tmp_table->file->ha_rnd_end();
1564
check_opt_it.rewind();
1565
while (Table *tbl= check_opt_it++)
1566
tbl->file->ha_rnd_end();
1568
if (updated != org_updated)
1570
if (table->file->has_transactions())
1571
transactional_tables= 1;
1575
session->transaction.stmt.modified_non_trans_table= true;
1582
/* out: 1 if error, 0 if success */
1584
bool multi_update::send_eof()
1586
char buff[STRING_BUFFER_USUAL_SIZE];
1588
Session::killed_state killed_status= Session::NOT_KILLED;
1590
session->set_proc_info("updating reference tables");
1593
Does updates for the last n - 1 tables, returns 0 if ok;
1594
error takes into account killed status gained in do_updates()
1596
int local_error = (table_count) ? do_updates() : 0;
1598
if local_error is not set ON until after do_updates() then
1599
later carried out killing should not affect binlogging.
1601
killed_status= (local_error == 0)? Session::NOT_KILLED : session->killed;
1602
session->set_proc_info("end");
1605
Write the SQL statement to the binlog if we updated
1606
rows and we succeeded or if we updated some non
1607
transactional tables.
1609
The query has to binlog because there's a modified non-transactional table
1610
either from the query's list or via a stored routine: bug#13270,23333
1613
assert(trans_safe || !updated ||
1614
session->transaction.stmt.modified_non_trans_table);
1615
if (local_error == 0 || session->transaction.stmt.modified_non_trans_table)
1617
if (session->transaction.stmt.modified_non_trans_table)
1618
session->transaction.all.modified_non_trans_table= true;
1620
if (local_error != 0)
1621
error_handled= true; // to force early leave from ::send_error()
1623
if (local_error > 0) // if the above log write did not fail ...
1625
/* Safety: If we haven't got an error before (can happen in do_updates) */
1626
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
1631
id= session->arg_of_last_insert_id_function ?
1632
session->first_successful_insert_id_in_prev_stmt : 0;
1633
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
1634
(ulong) session->cuted_fields);
1635
session->row_count_func=
1636
(session->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1637
session->my_ok((ulong) session->row_count_func, id, buff);