1
/* Copyright (C) 2000-2006 MySQL AB
3
This program is free software; you can redistribute it and/or modify
4
it under the terms of the GNU General Public License as published by
5
the Free Software Foundation; version 2 of the License.
7
This program is distributed in the hope that it will be useful,
8
but WITHOUT ANY WARRANTY; without even the implied warranty of
9
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
GNU General Public License for more details.
12
You should have received a copy of the GNU General Public License
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
18
Single table and multi table updates of tables.
19
Multi-table updates were introduced by Sinisa & Monty
22
#include "mysql_priv.h"
23
#include "sql_select.h"
25
/* Return 0 if row hasn't changed */
27
bool compare_record(TABLE *table)
29
if (table->s->blob_fields + table->s->varchar_fields == 0)
30
return cmp_record(table,record[1]);
31
/* Compare null bits */
32
if (memcmp(table->null_flags,
33
table->null_flags+table->s->rec_buff_length,
34
table->s->null_bytes))
35
return TRUE; // Diff in NULL value
36
/* Compare updated fields */
37
for (Field **ptr= table->field ; *ptr ; ptr++)
39
if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
40
(*ptr)->cmp_binary_offset(table->s->rec_buff_length))
48
check that all fields are real fields
56
TRUE Items can't be used in UPDATE
60
static bool check_fields(THD *thd, List<Item> &items)
62
List_iterator<Item> it(items);
68
if (!(field= item->filed_for_view_update()))
70
/* item has name, because it comes from VIEW SELECT list */
71
my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
75
we make temporary copy of Item_field, to avoid influence of changing
76
result_field on Item_ref which refer on this field
78
thd->change_item_tree(it.ref(), new Item_field(thd, field));
85
Re-read record if more columns are needed for error message.
87
If we got a duplicate key error, we want to write an error
88
message containing the value of the duplicate key. If we do not have
89
all fields of the key value in record[0], we need to re-read the
90
record with a proper read_set.
92
@param[in] error error number
93
@param[in] table table
96
static void prepare_record_for_error_message(int error, TABLE *table)
101
MY_BITMAP unique_map; /* Fields in offended unique. */
102
my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
103
DBUG_ENTER("prepare_record_for_error_message");
106
Only duplicate key errors print the key value.
107
If storage engine does always read all columns, we have the value alraedy.
109
if ((error != HA_ERR_FOUND_DUPP_KEY) ||
110
!(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ))
114
Get the number of the offended index.
115
We will see MAX_KEY if the engine cannot determine the affected index.
117
if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY)
120
/* Create unique_map with all fields used by that index. */
121
bitmap_init(&unique_map, unique_map_buf, table->s->fields, FALSE);
122
table->mark_columns_used_by_index_no_reset(keynr, &unique_map);
124
/* Subtract read_set and write_set. */
125
bitmap_subtract(&unique_map, table->read_set);
126
bitmap_subtract(&unique_map, table->write_set);
129
If the unique index uses columns that are neither in read_set
130
nor in write_set, we must re-read the record.
131
Otherwise no need to do anything.
133
if (bitmap_is_clear_all(&unique_map))
136
/* Get identifier of last read record into table->file->ref. */
137
table->file->position(table->record[0]);
138
/* Add all fields used by unique index to read_set. */
139
bitmap_union(table->read_set, &unique_map);
140
/* Tell the engine about the new set. */
141
table->file->column_bitmaps_signal();
142
/* Read record that is identified by table->file->ref. */
143
(void) table->file->rnd_pos(table->record[1], table->file->ref);
144
/* Copy the newly read columns into the new record. */
145
for (field_p= table->field; (field= *field_p); field_p++)
146
if (bitmap_is_set(&unique_map, field->field_index))
147
field->copy_from_tmp(table->s->rec_buff_length);
159
fields fields for update
160
values values of fields for update
161
conds WHERE clause expression
162
order_num number of elemen in ORDER BY clause
163
order ORDER BY clause list
165
handle_duplicates how to handle duplicates
169
2 - privilege check and openning table passed, but we need to convert to
170
multi-update because of view substitution
174
int mysql_update(THD *thd,
175
TABLE_LIST *table_list,
179
uint order_num, ORDER *order,
181
enum enum_duplicates handle_duplicates, bool ignore)
183
bool using_limit= limit != HA_POS_ERROR;
184
bool safe_update= test(thd->options & OPTION_SAFE_UPDATES);
185
bool used_key_is_modified, transactional_table, will_batch;
186
bool can_compare_record;
187
int error, loc_error;
188
uint used_index= MAX_KEY, dup_key_found;
189
bool need_sort= TRUE;
191
ha_rows updated, found;
192
key_map old_covering_keys;
196
SELECT_LEX *select_lex= &thd->lex->select_lex;
199
List<Item> all_fields;
200
THD::killed_state killed_status= THD::NOT_KILLED;
201
DBUG_ENTER("mysql_update");
205
if (open_tables(thd, &table_list, &table_count, 0))
208
if (!lock_tables(thd, table_list, table_count, &need_reopen))
212
close_tables_for_reopen(thd, &table_list);
215
if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) ||
216
(thd->fill_derived_tables() &&
217
mysql_handle_derived(thd->lex, &mysql_derived_filling)))
220
MYSQL_UPDATE_START();
221
thd_proc_info(thd, "init");
222
table= table_list->table;
224
/* Calculate "table->covering_keys" based on the WHERE */
225
table->covering_keys= table->s->keys_in_use;
226
table->quick_keys.clear_all();
228
if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
231
old_covering_keys= table->covering_keys; // Keys used in WHERE
232
/* Check the fields we are going to modify */
233
if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
234
goto abort; /* purecov: inspected */
235
if (table->timestamp_field)
237
// Don't set timestamp column if this is modified
238
if (bitmap_is_set(table->write_set,
239
table->timestamp_field->field_index))
240
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
243
if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
244
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)
245
bitmap_set_bit(table->write_set,
246
table->timestamp_field->field_index);
250
if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0))
252
free_underlaid_joins(thd, select_lex);
253
goto abort; /* purecov: inspected */
256
if (select_lex->inner_refs_list.elements &&
257
fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
265
Item::cond_result cond_value;
266
conds= remove_eq_conds(thd, conds, &cond_value);
267
if (cond_value == Item::COND_FALSE)
268
limit= 0; // Impossible WHERE
272
If a timestamp field settable on UPDATE is present then to avoid wrong
273
update force the table handler to retrieve write-only fields to be able
274
to compare records and detect data change.
276
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ &&
277
table->timestamp_field &&
278
(table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
279
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH))
280
bitmap_union(table->read_set, table->write_set);
281
// Don't count on usage of 'only index' when calculating which key to use
282
table->covering_keys.clear_all();
284
/* Update the table->file->stats.records number */
285
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
287
select= make_select(table, 0, 0, conds, 0, &error);
288
if (error || !limit ||
289
(select && select->check_quick(thd, safe_update, limit)))
292
free_underlaid_joins(thd, select_lex);
294
goto abort; // Error in where
296
my_ok(thd); // No matching records
299
if (!select && limit != HA_POS_ERROR)
301
if ((used_index= get_index_for_order(table, order, limit)) != MAX_KEY)
304
/* If running in safe sql mode, don't allow updates without keys */
305
if (table->quick_keys.is_clear_all())
307
thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
308
if (safe_update && !using_limit)
310
my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
311
ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
316
table->mark_columns_needed_for_update();
318
/* Check if we are modifying a key that we are used to search with */
320
if (select && select->quick)
322
used_index= select->quick->index;
323
used_key_is_modified= (!select->quick->unique_key_range() &&
324
select->quick->is_keys_used(table->write_set));
328
used_key_is_modified= 0;
329
if (used_index == MAX_KEY) // no index for sort order
330
used_index= table->file->key_used_on_scan;
331
if (used_index != MAX_KEY)
332
used_key_is_modified= is_key_used(table, used_index, table->write_set);
336
if (used_key_is_modified || order)
339
We can't update table directly; We must first search after all
340
matching rows before updating the table!
342
if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
345
table->mark_columns_used_by_index(used_index);
349
table->use_all_columns();
352
/* note: We avoid sorting avoid if we sort on the used index */
353
if (order && (need_sort || used_key_is_modified))
356
Doing an ORDER BY; Let filesort find and sort the rows we are going
358
NOTE: filesort will call table->prepare_for_position()
361
SORT_FIELD *sortorder;
362
ha_rows examined_rows;
364
table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
365
MYF(MY_FAE | MY_ZEROFILL));
366
if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) ||
367
(table->sort.found_records= filesort(thd, table, sortorder, length,
375
Filesort has already found and selected the rows we want to update,
376
so we don't need the where clause
384
We are doing a search on a key that is updated. In this case
385
we go trough the matching rows, save a pointer to them and
386
update these in a separate loop based on the pointer.
390
if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
391
DISK_BUFFER_SIZE, MYF(MY_WME)))
394
/* If quick select is used, initialize it before retrieving rows. */
395
if (select && select->quick && select->quick->reset())
397
table->file->try_semi_consistent_read(1);
400
When we get here, we have one of the following options:
401
A. used_index == MAX_KEY
402
This means we should use full table scan, and start it with
403
init_read_record call
404
B. used_index != MAX_KEY
405
B.1 quick select is used, start the scan with init_read_record
406
B.2 quick select is not used, this is full index scan (with LIMIT)
407
Full index scan must be started with init_read_record_idx
410
if (used_index == MAX_KEY || (select && select->quick))
411
init_read_record(&info,thd,table,select,0,1);
413
init_read_record_idx(&info, thd, table, 1, used_index);
415
thd_proc_info(thd, "Searching rows for update");
416
ha_rows tmp_limit= limit;
418
while (!(error=info.read_record(&info)) && !thd->killed)
420
if (!(select && select->skip_record()))
422
if (table->file->was_semi_consistent_read())
423
continue; /* repeat the read of the same row if it still exists */
425
table->file->position(table->record[0]);
426
if (my_b_write(&tempfile,table->file->ref,
427
table->file->ref_length))
429
error=1; /* purecov: inspected */
430
break; /* purecov: inspected */
432
if (!--limit && using_limit)
439
table->file->unlock_row();
441
if (thd->killed && !error)
444
table->file->try_semi_consistent_read(0);
445
end_read_record(&info);
447
/* Change select to use tempfile */
450
delete select->quick;
451
if (select->free_cond)
458
select= new SQL_SELECT;
461
if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
462
error=1; /* purecov: inspected */
463
select->file=tempfile; // Read row ptrs from this file
468
table->restore_column_maps_after_mark_index();
472
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
474
if (select && select->quick && select->quick->reset())
476
table->file->try_semi_consistent_read(1);
477
init_read_record(&info,thd,table,select,0,1);
480
/* Generate an error when trying to set a NOT NULL field to NULL. */
481
thd->count_cuted_fields= ignore ? CHECK_FIELD_WARN
482
: CHECK_FIELD_ERROR_FOR_NULL;
483
thd->cuted_fields=0L;
484
thd_proc_info(thd, "Updating");
486
transactional_table= table->file->has_transactions();
487
thd->abort_on_warning= test(!ignore &&
488
(thd->variables.sql_mode &
489
(MODE_STRICT_TRANS_TABLES |
490
MODE_STRICT_ALL_TABLES)));
491
will_batch= !table->file->start_bulk_update();
494
Assure that we can use position()
495
if we need to create an error message.
497
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
498
table->prepare_for_position();
501
We can use compare_record() to optimize away updates if
502
the table handler is returning all columns OR if
503
if all updated columns are read
505
can_compare_record= (!(table->file->ha_table_flags() &
506
HA_PARTIAL_COLUMN_READ) ||
507
bitmap_is_subset(table->write_set, table->read_set));
509
while (!(error=info.read_record(&info)) && !thd->killed)
511
if (!(select && select->skip_record()))
513
if (table->file->was_semi_consistent_read())
514
continue; /* repeat the read of the same row if it still exists */
516
store_record(table,record[1]);
517
if (fill_record(thd, fields, values, 0))
518
break; /* purecov: inspected */
522
if (!can_compare_record || compare_record(table))
527
Typically a batched handler can execute the batched jobs when:
528
1) When specifically told to do so
529
2) When it is not a good idea to batch anymore
530
3) When it is necessary to send batch for other reasons
531
(One such reason is when READ's must be performed)
533
1) is covered by exec_bulk_update calls.
534
2) and 3) is handled by the bulk_update_row method.
536
bulk_update_row can execute the updates including the one
537
defined in the bulk_update_row or not including the row
538
in the call. This is up to the handler implementation and can
539
vary from call to call.
541
The dup_key_found reports the number of duplicate keys found
542
in those updates actually executed. It only reports those if
543
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
544
If this hasn't been issued it returns an error code and can
545
ignore this number. Thus any handler that implements batching
546
for UPDATE IGNORE must also handle this extra call properly.
548
If a duplicate key is found on the record included in this
549
call then it should be included in the count of dup_key_found
550
and error should be set to 0 (only if these errors are ignored).
552
error= table->file->ha_bulk_update_row(table->record[1],
555
limit+= dup_key_found;
556
updated-= dup_key_found;
560
/* Non-batched update */
561
error= table->file->ha_update_row(table->record[1],
564
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
566
if (error != HA_ERR_RECORD_IS_THE_SAME)
572
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
575
If (ignore && error is ignorable) we don't have to
576
do anything; otherwise...
580
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
581
flags|= ME_FATALERROR; /* Other handler errors are fatal */
583
prepare_record_for_error_message(error, table);
584
table->file->print_error(error,MYF(flags));
590
if (!--limit && using_limit)
593
We have reached end-of-file in most common situations where no
594
batching has occurred and if batching was supposed to occur but
595
no updates were made and finally when the batch execution was
596
performed without error and without finding any duplicate keys.
597
If the batched updates were performed with errors we need to
598
check and if no error but duplicate key's found we need to
599
continue since those are not counted for in limit.
602
((error= table->file->exec_bulk_update(&dup_key_found)) ||
607
/* purecov: begin inspected */
609
The handler should not report error of duplicate keys if they
610
are ignored. This is a requirement on batching handlers.
612
prepare_record_for_error_message(error, table);
613
table->file->print_error(error,MYF(0));
619
Either an error was found and we are ignoring errors or there
620
were duplicate keys found. In both cases we need to correct
621
the counters and continue the loop.
623
limit= dup_key_found; //limit is 0 when we get here so need to +
624
updated-= dup_key_found;
628
error= -1; // Simulate end of file
634
table->file->unlock_row();
639
Caching the killed status to pass as the arg to query event constuctor;
640
The cached value can not change whereas the killed status can
641
(externally) since this point and change of the latter won't affect
643
It's assumed that if an error was set in combination with an effective
644
killed status then the error is due to killing.
646
killed_status= thd->killed; // get the status of the volatile
647
// simulated killing after the loop must be ineffective for binlogging
648
DBUG_EXECUTE_IF("simulate_kill_bug27571",
650
thd->killed= THD::KILL_QUERY;
652
error= (killed_status == THD::NOT_KILLED)? error : 1;
656
(loc_error= table->file->exec_bulk_update(&dup_key_found)))
658
An error has occurred when a batched update was performed and returned
659
an error indication. It cannot be an allowed duplicate key error since
660
we require the batching handler to treat this as a normal behavior.
662
Otherwise we simply remove the number of duplicate keys records found
663
in the batched update.
666
/* purecov: begin inspected */
667
prepare_record_for_error_message(loc_error, table);
668
table->file->print_error(loc_error,MYF(ME_FATALERROR));
673
updated-= dup_key_found;
675
table->file->end_bulk_update();
676
table->file->try_semi_consistent_read(0);
678
if (!transactional_table && updated > 0)
679
thd->transaction.stmt.modified_non_trans_table= TRUE;
681
end_read_record(&info);
683
thd_proc_info(thd, "end");
684
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
687
error < 0 means really no error at all: we processed all rows until the
688
last one without error. error > 0 means an error (e.g. unique key
689
violation and no IGNORE or REPLACE). error == 0 is also an error (if
690
preparing the record or invoking before triggers fails). See
691
ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
692
Sometimes we want to binlog even if we updated no rows, in case user used
693
it to be sure master and slave are in same state.
695
if ((error < 0) || thd->transaction.stmt.modified_non_trans_table)
697
if (mysql_bin_log.is_open())
701
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
702
thd->query, thd->query_length,
703
transactional_table, FALSE, killed_status) &&
706
error=1; // Rollback update
709
if (thd->transaction.stmt.modified_non_trans_table)
710
thd->transaction.all.modified_non_trans_table= TRUE;
712
DBUG_ASSERT(transactional_table || !updated || thd->transaction.stmt.modified_non_trans_table);
713
free_underlaid_joins(thd, select_lex);
715
/* If LAST_INSERT_ID(X) was used, report X */
716
id= thd->arg_of_last_insert_id_function ?
717
thd->first_successful_insert_id_in_prev_stmt : 0;
722
char buff[STRING_BUFFER_USUAL_SIZE];
723
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
724
(ulong) thd->cuted_fields);
726
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
727
my_ok(thd, (ulong) thd->row_count_func, id, buff);
728
DBUG_PRINT("info",("%ld records updated", (long) updated));
730
thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
731
thd->abort_on_warning= 0;
732
DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
736
free_underlaid_joins(thd, select_lex);
740
table->file->extra(HA_EXTRA_NO_KEYREAD);
742
thd->abort_on_warning= 0;
750
Prepare items in UPDATE statement
753
mysql_prepare_update()
755
table_list - global/local table list
757
order_num - number of ORDER BY list entries
758
order - ORDER BY clause list
764
bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
765
Item **conds, uint order_num, ORDER *order)
768
List<Item> all_fields;
769
SELECT_LEX *select_lex= &thd->lex->select_lex;
770
DBUG_ENTER("mysql_prepare_update");
773
Statement-based replication of UPDATE ... LIMIT is not safe as order of
774
rows is not defined, so in mixed mode we go to row-based.
776
Note that we may consider a statement as safe if ORDER BY primary_key
777
is present. However it may confuse users to see very similiar statements
778
replicated differently.
780
if (thd->lex->current_select->select_limit)
782
thd->lex->set_stmt_unsafe();
783
thd->set_current_stmt_binlog_row_based_if_mixed();
786
thd->lex->allow_sum_func= 0;
788
if (setup_tables_and_check_access(thd, &select_lex->context,
789
&select_lex->top_join_list,
791
&select_lex->leaf_tables,
793
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
794
select_lex->setup_ref_array(thd, order_num) ||
795
setup_order(thd, select_lex->ref_pointer_array,
796
table_list, all_fields, all_fields, order))
799
/* Check that we are not using table that we are updating in a sub select */
801
TABLE_LIST *duplicate;
802
if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
804
update_non_unique_table_error(table_list, "UPDATE", duplicate);
805
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
809
select_lex->fix_prepare_information(thd, conds, &fake_conds);
814
/***************************************************************************
815
Update multiple tables from join
816
***************************************************************************/
819
Get table map for list of Item_field
822
static table_map get_table_map(List<Item> *items)
824
List_iterator_fast<Item> item_it(*items);
828
while ((item= (Item_field *) item_it++))
829
map|= item->used_tables();
830
DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map));
836
make update specific preparation and checks after opening tables
839
mysql_multi_update_prepare()
847
int mysql_multi_update_prepare(THD *thd)
850
TABLE_LIST *table_list= lex->query_tables;
851
TABLE_LIST *tl, *leaves;
852
List<Item> *fields= &lex->select_lex.item_list;
853
table_map tables_for_update;
856
if this multi-update was converted from usual update, here is table
857
counter else junk will be assigned here, but then replaced with real
858
count in open_tables()
860
uint table_count= lex->table_count;
861
const bool using_lock_tables= thd->locked_tables != 0;
862
bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI);
863
bool need_reopen= FALSE;
864
DBUG_ENTER("mysql_multi_update_prepare");
866
/* following need for prepared statements, to run next time multi-update */
867
thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
871
/* open tables and create derived ones, but do not lock and fill them */
872
if (((original_multiupdate || need_reopen) &&
873
open_tables(thd, &table_list, &table_count, 0)) ||
874
mysql_handle_derived(lex, &mysql_derived_prepare))
877
setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
878
second time, but this call will do nothing (there are check for second
879
call in setup_tables()).
882
if (setup_tables_and_check_access(thd, &lex->select_lex.context,
883
&lex->select_lex.top_join_list,
885
&lex->select_lex.leaf_tables, false))
888
if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
891
if (update_view && check_fields(thd, *fields))
896
tables_for_update= get_table_map(fields);
899
Setup timestamp handling and locking mode
901
leaves= lex->select_lex.leaf_tables;
902
for (tl= leaves; tl; tl= tl->next_leaf)
904
TABLE *table= tl->table;
905
/* Only set timestamp column if this is not modified */
906
if (table->timestamp_field &&
907
bitmap_is_set(table->write_set,
908
table->timestamp_field->field_index))
909
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
911
/* if table will be updated then check that it is unique */
912
if (table->map & tables_for_update)
914
table->mark_columns_needed_for_update();
915
DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
917
If table will be updated we should not downgrade lock for it and
923
DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias));
925
If we are using the binary log, we need TL_READ_NO_INSERT to get
926
correct order of statements. Otherwise, we use a TL_READ lock to
929
tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ;
931
/* Update TABLE::lock_type accordingly. */
932
if (!tl->placeholder() && !using_lock_tables)
933
tl->table->reginfo.lock_type= tl->lock_type;
937
/* now lock and fill tables */
938
if (lock_tables(thd, table_list, table_count, &need_reopen))
944
We have to reopen tables since some of them were altered or dropped
945
during lock_tables() or something was done with their triggers.
946
Let us do some cleanups to be able do setup_table() and setup_fields()
949
List_iterator_fast<Item> it(*fields);
954
/* We have to cleanup translation tables of views. */
955
for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global)
956
tbl->cleanup_items();
958
close_tables_for_reopen(thd, &table_list);
963
Check that we are not using table that we are updating, but we should
964
skip all tables of UPDATE SELECT itself
966
lex->select_lex.exclude_from_table_unique_test= TRUE;
967
/* We only need SELECT privilege for columns in the values list */
968
for (tl= leaves; tl; tl= tl->next_leaf)
970
if (tl->lock_type != TL_READ &&
971
tl->lock_type != TL_READ_NO_INSERT)
973
TABLE_LIST *duplicate;
974
if ((duplicate= unique_table(thd, tl, table_list, 0)))
976
update_non_unique_table_error(table_list, "UPDATE", duplicate);
982
Set exclude_from_table_unique_test value back to FALSE. It is needed for
983
further check in multi_update::prepare whether to use record cache.
985
lex->select_lex.exclude_from_table_unique_test= FALSE;
987
if (thd->fill_derived_tables() &&
988
mysql_handle_derived(lex, &mysql_derived_filling))
996
Setup multi-update handling and call SELECT to do the join
999
bool mysql_multi_update(THD *thd,
1000
TABLE_LIST *table_list,
1005
enum enum_duplicates handle_duplicates, bool ignore,
1006
SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
1008
multi_update *result;
1010
DBUG_ENTER("mysql_multi_update");
1012
if (!(result= new multi_update(table_list,
1013
thd->lex->select_lex.leaf_tables,
1015
handle_duplicates, ignore)))
1018
thd->abort_on_warning= test(thd->variables.sql_mode &
1019
(MODE_STRICT_TRANS_TABLES |
1020
MODE_STRICT_ALL_TABLES));
1022
List<Item> total_list;
1023
res= mysql_select(thd, &select_lex->ref_pointer_array,
1024
table_list, select_lex->with_wild,
1026
conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
1028
options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1029
OPTION_SETUP_TABLES_DONE,
1030
result, unit, select_lex);
1031
DBUG_PRINT("info",("res: %d report_error: %d", res,
1032
(int) thd->is_error()));
1033
res|= thd->is_error();
1036
/* If we had a another error reported earlier then this will be ignored */
1037
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
1041
thd->abort_on_warning= 0;
1046
multi_update::multi_update(TABLE_LIST *table_list,
1047
TABLE_LIST *leaves_list,
1048
List<Item> *field_list, List<Item> *value_list,
1049
enum enum_duplicates handle_duplicates_arg,
1051
:all_tables(table_list), leaves(leaves_list), update_tables(0),
1052
tmp_tables(0), updated(0), found(0), fields(field_list),
1053
values(value_list), table_count(0), copy_field(0),
1054
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1055
transactional_tables(0), ignore(ignore_arg), error_handled(0)
1060
Connect fields with tables and create list of tables that are updated
1063
int multi_update::prepare(List<Item> ¬_used_values,
1064
SELECT_LEX_UNIT *lex_unit)
1066
TABLE_LIST *table_ref;
1068
table_map tables_to_update;
1070
List_iterator_fast<Item> field_it(*fields);
1071
List_iterator_fast<Item> value_it(*values);
1073
uint leaf_table_count= 0;
1074
DBUG_ENTER("multi_update::prepare");
1076
thd->count_cuted_fields= CHECK_FIELD_WARN;
1077
thd->cuted_fields=0L;
1078
thd_proc_info(thd, "updating main table");
1080
tables_to_update= get_table_map(fields);
1082
if (!tables_to_update)
1084
my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1089
We have to check values after setup_tables to get covering_keys right in
1093
if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0))
1097
Save tables beeing updated in update_tables
1098
update_table->shared is position for table
1099
Don't use key read on tables that are updated
1103
for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1105
/* TODO: add support of view of join support */
1106
TABLE *table=table_ref->table;
1108
if (tables_to_update & table->map)
1110
TABLE_LIST *tl= (TABLE_LIST*) thd->memdup((char*) table_ref,
1114
update.link_in_list((uchar*) tl, (uchar**) &tl->next_local);
1115
tl->shared= table_count++;
1116
table->no_keyread=1;
1117
table->covering_keys.clear_all();
1118
table->pos_in_table_list= tl;
1123
table_count= update.elements;
1124
update_tables= (TABLE_LIST*) update.first;
1126
tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count);
1127
tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
1129
fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1131
values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1133
if (thd->is_fatal_error)
1135
for (i=0 ; i < table_count ; i++)
1137
fields_for_table[i]= new List_item;
1138
values_for_table[i]= new List_item;
1140
if (thd->is_fatal_error)
1143
/* Split fields into fields_for_table[] and values_by_table[] */
1145
while ((item= (Item_field *) field_it++))
1147
Item *value= value_it++;
1148
uint offset= item->field->table->pos_in_table_list->shared;
1149
fields_for_table[offset]->push_back(item);
1150
values_for_table[offset]->push_back(value);
1152
if (thd->is_fatal_error)
1155
/* Allocate copy fields */
1157
for (i=0 ; i < table_count ; i++)
1158
set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1159
copy_field= new Copy_field[max_fields];
1160
DBUG_RETURN(thd->is_fatal_error != 0);
1165
Check if table is safe to update on fly
1168
safe_update_on_fly()
1170
join_tab How table is used in join
1171
all_tables List of tables
1174
We can update the first table in join on the fly if we know that
1175
a row in this table will never be read twice. This is true under
1176
the following conditions:
1178
- We are doing a table scan and the data is in a separate file (MyISAM) or
1179
if we don't update a clustered key.
1181
- We are doing a range scan and we don't update the scan key or
1182
the primary key for a clustered table handler.
1184
- Table is not joined to itself.
1186
This function gets information about fields to be updated from
1187
the TABLE::write_set bitmap.
1190
This code is a bit dependent of how make_join_readinfo() works.
1193
0 Not safe to update
1197
static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
1198
TABLE_LIST *table_ref, TABLE_LIST *all_tables)
1200
TABLE *table= join_tab->table;
1201
if (unique_table(thd, table_ref, all_tables, 0))
1203
switch (join_tab->type) {
1207
return TRUE; // At most one matching row
1209
case JT_REF_OR_NULL:
1210
return !is_key_used(table, join_tab->ref.key, table->write_set);
1212
/* If range search on index */
1213
if (join_tab->quick)
1214
return !join_tab->quick->is_keys_used(table->write_set);
1215
/* If scanning in clustered key */
1216
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1217
table->s->primary_key < MAX_KEY)
1218
return !is_key_used(table, table->s->primary_key, table->write_set);
1221
break; // Avoid compler warning
1229
Initialize table for multi table
1232
- Update first table in join on the fly, if possible
1233
- Create temporary tables to store changed values for all other tables
1234
that are updated (and main_table if the above doesn't hold).
1238
multi_update::initialize_tables(JOIN *join)
1240
TABLE_LIST *table_ref;
1241
DBUG_ENTER("initialize_tables");
1243
if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1245
main_table=join->join_tab->table;
1248
/* Any update has at least one pair (field, value) */
1249
DBUG_ASSERT(fields->elements);
1251
/* Create a temporary table for keys to all tables, except main table */
1252
for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1254
TABLE *table=table_ref->table;
1255
uint cnt= table_ref->shared;
1256
List<Item> temp_fields;
1258
TMP_TABLE_PARAM *tmp_param;
1260
table->mark_columns_needed_for_update();
1262
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1263
if (table == main_table) // First table in join
1265
if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
1267
table_to_update= main_table; // Update table on the fly
1271
table->prepare_for_position();
1273
tmp_param= tmp_table_param+cnt;
1276
Create a temporary table to store all fields that are changed for this
1277
table. The first field in the temporary table is a pointer to the
1278
original row so that we can find and update it. For the updatable
1279
VIEW a few following fields are rowids of tables used in the CHECK
1283
List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1287
Field_string *field= new Field_string(tbl->file->ref_length, 0,
1288
tbl->alias, &my_charset_bin);
1293
The field will be converted to varstring when creating tmp table if
1294
table to be updated was created by mysql 4.1. Deny this.
1296
field->can_alter_field_type= 0;
1297
Item_field *ifield= new Item_field((Field *) field);
1300
ifield->maybe_null= 0;
1301
if (temp_fields.push_back(ifield))
1303
} while ((tbl= tbl_it++));
1305
temp_fields.concat(fields_for_table[cnt]);
1307
/* Make an unique key over the first field to avoid duplicated updates */
1308
bzero((char*) &group, sizeof(group));
1310
group.item= (Item**) temp_fields.head_ref();
1312
tmp_param->quick_group=1;
1313
tmp_param->field_count=temp_fields.elements;
1314
tmp_param->group_parts=1;
1315
tmp_param->group_length= table->file->ref_length;
1316
if (!(tmp_tables[cnt]=create_tmp_table(thd,
1319
(ORDER*) &group, 0, 0,
1320
TMP_TABLE_ALL_COLUMNS,
1324
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1330
multi_update::~multi_update()
1333
for (table= update_tables ; table; table= table->next_local)
1335
table->table->no_keyread= table->table->no_cache= 0;
1337
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1342
for (uint cnt = 0; cnt < table_count; cnt++)
1344
if (tmp_tables[cnt])
1346
free_tmp_table(thd, tmp_tables[cnt]);
1347
tmp_table_param[cnt].cleanup();
1352
delete [] copy_field;
1353
thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1354
DBUG_ASSERT(trans_safe || !updated ||
1355
thd->transaction.all.modified_non_trans_table);
1359
bool multi_update::send_data(List<Item> ¬_used_values)
1361
TABLE_LIST *cur_table;
1362
DBUG_ENTER("multi_update::send_data");
1364
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1366
TABLE *table= cur_table->table;
1367
uint offset= cur_table->shared;
1369
Check if we are using outer join and we didn't find the row
1370
or if we have already updated this row in the previous call to this
1373
The same row may be presented here several times in a join of type
1374
UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1376
In this case we will do the update for the first found row combination.
1377
The join algorithm guarantees that we will not find the a row in
1380
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1384
We can use compare_record() to optimize away updates if
1385
the table handler is returning all columns OR if
1386
if all updated columns are read
1388
if (table == table_to_update)
1390
bool can_compare_record;
1391
can_compare_record= (!(table->file->ha_table_flags() &
1392
HA_PARTIAL_COLUMN_READ) ||
1393
bitmap_is_subset(table->write_set,
1395
table->status|= STATUS_UPDATED;
1396
store_record(table,record[1]);
1397
if (fill_record(thd, *fields_for_table[offset],
1398
*values_for_table[offset], 0))
1402
if (!can_compare_record || compare_record(table))
1408
Inform the main table that we are going to update the table even
1409
while we may be scanning it. This will flush the read cache
1412
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1414
if ((error=table->file->ha_update_row(table->record[1],
1415
table->record[0])) &&
1416
error != HA_ERR_RECORD_IS_THE_SAME)
1420
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1423
If (ignore && error == is ignorable) we don't have to
1424
do anything; otherwise...
1428
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1429
flags|= ME_FATALERROR; /* Other handler errors are fatal */
1431
prepare_record_for_error_message(error, table);
1432
table->file->print_error(error,MYF(flags));
1438
if (error == HA_ERR_RECORD_IS_THE_SAME)
1443
/* non-transactional or transactional table got modified */
1444
/* either multi_update class' flag is raised in its branch */
1445
if (table->file->has_transactions())
1446
transactional_tables= 1;
1450
thd->transaction.stmt.modified_non_trans_table= TRUE;
1458
TABLE *tmp_table= tmp_tables[offset];
1460
For updatable VIEW store rowid of the updated table and
1461
rowids of tables used in the CHECK OPTION condition.
1464
List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1468
tbl->file->position(tbl->record[0]);
1469
memcpy((char*) tmp_table->field[field_num]->ptr,
1470
(char*) tbl->file->ref, tbl->file->ref_length);
1472
} while ((tbl= tbl_it++));
1474
/* Store regular updated fields in the row. */
1476
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1477
*values_for_table[offset], 1);
1479
/* Write row, ignoring duplicated updates to a row */
1480
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1481
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1484
create_myisam_from_heap(thd, tmp_table,
1485
tmp_table_param[offset].start_recinfo,
1486
&tmp_table_param[offset].recinfo,
1490
DBUG_RETURN(1); // Not a table_is_full error
1500
void multi_update::send_error(uint errcode,const char *err)
1502
/* First send error what ever it is ... */
1503
my_error(errcode, MYF(0), err);
1507
void multi_update::abort()
1509
/* the error was handled or nothing deleted and no side effects return */
1510
if (error_handled ||
1511
(!thd->transaction.stmt.modified_non_trans_table && !updated))
1514
If all tables that has been updated are trans safe then just do rollback.
1515
If not attempt to do remaining updates.
1520
DBUG_ASSERT(thd->transaction.stmt.modified_non_trans_table);
1521
if (do_update && table_count > 1)
1523
/* Add warning here */
1525
todo/fixme: do_update() is never called with the arg 1.
1526
should it change the signature to become argless?
1531
if (thd->transaction.stmt.modified_non_trans_table)
1534
The query has to binlog because there's a modified non-transactional table
1535
either from the query's list or via a stored routine: bug#13270,23333
1537
if (mysql_bin_log.is_open())
1540
THD::killed status might not have been set ON at time of an error
1541
got caught and if happens later the killed error is written
1544
thd->binlog_query(THD::ROW_QUERY_TYPE,
1545
thd->query, thd->query_length,
1546
transactional_tables, FALSE);
1548
thd->transaction.all.modified_non_trans_table= TRUE;
1550
DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table);
1554
int multi_update::do_updates()
1556
TABLE_LIST *cur_table;
1558
ha_rows org_updated;
1559
TABLE *table, *tmp_table;
1560
List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
1561
DBUG_ENTER("multi_update::do_updates");
1563
do_update= 0; // Don't retry this function
1566
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1568
bool can_compare_record;
1569
uint offset= cur_table->shared;
1571
table = cur_table->table;
1572
if (table == table_to_update)
1573
continue; // Already updated
1574
org_updated= updated;
1575
tmp_table= tmp_tables[cur_table->shared];
1576
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1577
(void) table->file->ha_rnd_init(0);
1578
table->file->extra(HA_EXTRA_NO_CACHE);
1580
check_opt_it.rewind();
1581
while(TABLE *tbl= check_opt_it++)
1583
if (tbl->file->ha_rnd_init(1))
1585
tbl->file->extra(HA_EXTRA_CACHE);
1589
Setup copy functions to copy fields from temporary table
1591
List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1592
Field **field= tmp_table->field +
1593
1 + unupdated_check_opt_tables.elements; // Skip row pointers
1594
Copy_field *copy_field_ptr= copy_field, *copy_field_end;
1595
for ( ; *field ; field++)
1597
Item_field *item= (Item_field* ) field_it++;
1598
(copy_field_ptr++)->set(item->field, *field, 0);
1600
copy_field_end=copy_field_ptr;
1602
if ((local_error = tmp_table->file->ha_rnd_init(1)))
1605
can_compare_record= (!(table->file->ha_table_flags() &
1606
HA_PARTIAL_COLUMN_READ) ||
1607
bitmap_is_subset(table->write_set,
1612
if (thd->killed && trans_safe)
1614
if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1616
if (local_error == HA_ERR_END_OF_FILE)
1618
if (local_error == HA_ERR_RECORD_DELETED)
1619
continue; // May happen on dup key
1623
/* call rnd_pos() using rowids from temporary table */
1624
check_opt_it.rewind();
1630
tbl->file->rnd_pos(tbl->record[0],
1631
(uchar *) tmp_table->field[field_num]->ptr)))
1634
} while((tbl= check_opt_it++));
1636
table->status|= STATUS_UPDATED;
1637
store_record(table,record[1]);
1639
/* Copy data from temporary table to current table */
1640
for (copy_field_ptr=copy_field;
1641
copy_field_ptr != copy_field_end;
1643
(*copy_field_ptr->do_copy)(copy_field_ptr);
1645
if (!can_compare_record || compare_record(table))
1647
if ((local_error=table->file->ha_update_row(table->record[1],
1648
table->record[0])) &&
1649
local_error != HA_ERR_RECORD_IS_THE_SAME)
1652
table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
1655
if (local_error != HA_ERR_RECORD_IS_THE_SAME)
1662
if (updated != org_updated)
1664
if (table->file->has_transactions())
1665
transactional_tables= 1;
1668
trans_safe= 0; // Can't do safe rollback
1669
thd->transaction.stmt.modified_non_trans_table= TRUE;
1672
(void) table->file->ha_rnd_end();
1673
(void) tmp_table->file->ha_rnd_end();
1674
check_opt_it.rewind();
1675
while (TABLE *tbl= check_opt_it++)
1676
tbl->file->ha_rnd_end();
1683
prepare_record_for_error_message(local_error, table);
1684
table->file->print_error(local_error,MYF(ME_FATALERROR));
1687
(void) table->file->ha_rnd_end();
1688
(void) tmp_table->file->ha_rnd_end();
1689
check_opt_it.rewind();
1690
while (TABLE *tbl= check_opt_it++)
1691
tbl->file->ha_rnd_end();
1693
if (updated != org_updated)
1695
if (table->file->has_transactions())
1696
transactional_tables= 1;
1700
thd->transaction.stmt.modified_non_trans_table= TRUE;
1707
/* out: 1 if error, 0 if success */
1709
bool multi_update::send_eof()
1711
char buff[STRING_BUFFER_USUAL_SIZE];
1713
THD::killed_state killed_status= THD::NOT_KILLED;
1714
DBUG_ENTER("multi_update::send_eof");
1715
thd_proc_info(thd, "updating reference tables");
1718
Does updates for the last n - 1 tables, returns 0 if ok;
1719
error takes into account killed status gained in do_updates()
1721
int local_error = (table_count) ? do_updates() : 0;
1723
if local_error is not set ON until after do_updates() then
1724
later carried out killing should not affect binlogging.
1726
killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
1727
thd_proc_info(thd, "end");
1730
Write the SQL statement to the binlog if we updated
1731
rows and we succeeded or if we updated some non
1732
transactional tables.
1734
The query has to binlog because there's a modified non-transactional table
1735
either from the query's list or via a stored routine: bug#13270,23333
1738
DBUG_ASSERT(trans_safe || !updated ||
1739
thd->transaction.stmt.modified_non_trans_table);
1740
if (local_error == 0 || thd->transaction.stmt.modified_non_trans_table)
1742
if (mysql_bin_log.is_open())
1744
if (local_error == 0)
1746
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
1747
thd->query, thd->query_length,
1748
transactional_tables, FALSE, killed_status) &&
1751
local_error= 1; // Rollback update
1754
if (thd->transaction.stmt.modified_non_trans_table)
1755
thd->transaction.all.modified_non_trans_table= TRUE;
1757
if (local_error != 0)
1758
error_handled= TRUE; // to force early leave from ::send_error()
1760
if (local_error > 0) // if the above log write did not fail ...
1762
/* Safety: If we haven't got an error before (can happen in do_updates) */
1763
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
1768
id= thd->arg_of_last_insert_id_function ?
1769
thd->first_successful_insert_id_in_prev_stmt : 0;
1770
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
1771
(ulong) thd->cuted_fields);
1772
thd->row_count_func=
1773
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1774
::my_ok(thd, (ulong) thd->row_count_func, id, buff);