12
12
You should have received a copy of the GNU General Public License
13
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
18
18
Single table and multi table updates of tables.
23
#include <drizzled/sql_select.h>
24
#include <drizzled/error.h>
25
#include <drizzled/probes.h>
26
#include <drizzled/sql_base.h>
27
#include <drizzled/field/epoch.h>
28
#include <drizzled/sql_parse.h>
29
#include <drizzled/optimizer/range.h>
30
#include <drizzled/records.h>
31
#include <drizzled/internal/my_sys.h>
32
#include <drizzled/internal/iocache.h>
33
#include <drizzled/transaction_services.h>
34
#include <drizzled/filesort.h>
35
#include <drizzled/plugin/storage_engine.h>
37
#include <boost/dynamic_bitset.hpp>
19
Multi-table updates were introduced by Sinisa & Monty
22
#include "mysql_priv.h"
23
#include "sql_select.h"
24
#include <drizzled/drizzled_error_messages.h>
26
/* Return 0 if row hasn't changed */
28
bool compare_record(TABLE *table)
30
if (table->s->blob_fields + table->s->varchar_fields == 0)
31
return cmp_record(table,record[1]);
32
/* Compare null bits */
33
if (memcmp(table->null_flags,
34
table->null_flags+table->s->rec_buff_length,
35
table->s->null_bytes))
36
return true; // Diff in NULL value
37
/* Compare updated fields */
38
for (Field **ptr= table->field ; *ptr ; ptr++)
40
if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
41
(*ptr)->cmp_binary_offset(table->s->rec_buff_length))
49
check that all fields are real fields
57
true Items can't be used in UPDATE
61
static bool check_fields(THD *thd, List<Item> &items)
63
List_iterator<Item> it(items);
69
if (!(field= item->filed_for_view_update()))
71
/* item has name, because it comes from VIEW SELECT list */
72
my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
76
we make temporary copy of Item_field, to avoid influence of changing
77
result_field on Item_ref which refer on this field
79
thd->change_item_tree(it.ref(), new Item_field(thd, field));
46
86
Re-read record if more columns are needed for error message.
48
88
If we got a duplicate key error, we want to write an error
49
89
message containing the value of the duplicate key. If we do not have
50
all fields of the key value in getInsertRecord(), we need to re-read the
90
all fields of the key value in record[0], we need to re-read the
51
91
record with a proper read_set.
53
93
@param[in] error error number
54
94
@param[in] table table
57
static void prepare_record_for_error_message(int error, Table *table)
97
static void prepare_record_for_error_message(int error, TABLE *table)
59
Field **field_p= NULL;
102
MY_BITMAP unique_map; /* Fields in offended unique. */
103
my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
64
106
Only duplicate key errors print the key value.
65
107
If storage engine does always read all columns, we have the value alraedy.
67
109
if ((error != HA_ERR_FOUND_DUPP_KEY) ||
68
! (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)))
110
!(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ))
72
114
Get the number of the offended index.
73
115
We will see MAX_KEY if the engine cannot determine the affected index.
75
if ((keynr= table->get_dup_key(error)) >= MAX_KEY)
117
if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY)
78
120
/* Create unique_map with all fields used by that index. */
79
boost::dynamic_bitset<> unique_map(table->getShare()->sizeFields()); /* Fields in offended unique. */
80
table->mark_columns_used_by_index_no_reset(keynr, unique_map);
121
bitmap_init(&unique_map, unique_map_buf, table->s->fields, false);
122
table->mark_columns_used_by_index_no_reset(keynr, &unique_map);
82
124
/* Subtract read_set and write_set. */
83
unique_map-= *table->read_set;
84
unique_map-= *table->write_set;
125
bitmap_subtract(&unique_map, table->read_set);
126
bitmap_subtract(&unique_map, table->write_set);
87
129
If the unique index uses columns that are neither in read_set
88
130
nor in write_set, we must re-read the record.
89
131
Otherwise no need to do anything.
91
if (unique_map.none())
133
if (bitmap_is_clear_all(&unique_map))
94
/* Get identifier of last read record into table->cursor->ref. */
95
table->cursor->position(table->getInsertRecord());
136
/* Get identifier of last read record into table->file->ref. */
137
table->file->position(table->record[0]);
96
138
/* Add all fields used by unique index to read_set. */
97
*table->read_set|= unique_map;
98
/* Read record that is identified by table->cursor->ref. */
99
(void) table->cursor->rnd_pos(table->getUpdateRecord(), table->cursor->ref);
139
bitmap_union(table->read_set, &unique_map);
140
/* Tell the engine about the new set. */
141
table->file->column_bitmaps_signal();
142
/* Read record that is identified by table->file->ref. */
143
(void) table->file->rnd_pos(table->record[1], table->file->ref);
100
144
/* Copy the newly read columns into the new record. */
101
for (field_p= table->getFields(); (field= *field_p); field_p++)
103
if (unique_map.test(field->position()))
105
field->copy_from_tmp(table->getShare()->rec_buff_length);
145
for (field_p= table->field; (field= *field_p); field_p++)
146
if (bitmap_is_set(&unique_map, field->field_index))
147
field->copy_from_tmp(table->s->rec_buff_length);
114
154
Process usual UPDATE
118
session thread handler
119
159
fields fields for update
120
160
values values of fields for update
121
161
conds WHERE clause expression
122
162
order_num number of elemen in ORDER BY clause
123
order order_st BY clause list
163
order ORDER BY clause list
124
164
limit limit clause
125
165
handle_duplicates how to handle duplicates
169
2 - privilege check and openning table passed, but we need to convert to
170
multi-update because of view substitution
132
int update_query(Session *session, TableList *table_list,
133
List<Item> &fields, List<Item> &values, COND *conds,
134
uint32_t order_num, Order *order,
135
ha_rows limit, enum enum_duplicates,
174
int mysql_update(THD *thd,
175
TABLE_LIST *table_list,
179
uint order_num, ORDER *order,
181
enum enum_duplicates handle_duplicates __attribute__((unused)),
138
184
bool using_limit= limit != HA_POS_ERROR;
139
bool used_key_is_modified;
140
bool transactional_table;
185
bool safe_update= test(thd->options & OPTION_SAFE_UPDATES);
186
bool used_key_is_modified, transactional_table, will_batch;
187
bool can_compare_record;
188
int error, loc_error;
142
189
uint used_index= MAX_KEY, dup_key_found;
143
190
bool need_sort= true;
144
192
ha_rows updated, found;
145
193
key_map old_covering_keys;
147
optimizer::SqlSelect *select= NULL;
149
Select_Lex *select_lex= &session->getLex()->select_lex;
197
SELECT_LEX *select_lex= &thd->lex->select_lex;
151
200
List<Item> all_fields;
152
Session::killed_state_t killed_status= Session::NOT_KILLED;
154
DRIZZLE_UPDATE_START(session->getQueryString()->c_str());
155
if (session->openTablesLock(table_list))
201
THD::killed_state killed_status= THD::NOT_KILLED;
157
DRIZZLE_UPDATE_DONE(1, 0, 0);
205
if (open_tables(thd, &table_list, &table_count, 0))
208
if (!lock_tables(thd, table_list, table_count, &need_reopen))
212
close_tables_for_reopen(thd, &table_list);
161
session->set_proc_info("init");
215
if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) ||
216
(thd->fill_derived_tables() &&
217
mysql_handle_derived(thd->lex, &mysql_derived_filling)))
220
MYSQL_UPDATE_START();
221
thd_proc_info(thd, "init");
162
222
table= table_list->table;
164
224
/* Calculate "table->covering_keys" based on the WHERE */
165
table->covering_keys= table->getShare()->keys_in_use;
166
table->quick_keys.reset();
225
table->covering_keys= table->s->keys_in_use;
226
table->quick_keys.clear_all();
168
if (prepare_update(session, table_list, &conds, order_num, order))
170
DRIZZLE_UPDATE_DONE(1, 0, 0);
228
if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
174
231
old_covering_keys= table->covering_keys; // Keys used in WHERE
175
232
/* Check the fields we are going to modify */
176
if (setup_fields_with_no_wrap(session, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
178
DRIZZLE_UPDATE_DONE(1, 0, 0);
233
if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
234
goto abort; /* purecov: inspected */
182
235
if (table->timestamp_field)
184
237
// Don't set timestamp column if this is modified
185
if (table->timestamp_field->isWriteSet())
238
if (bitmap_is_set(table->write_set,
239
table->timestamp_field->field_index))
187
240
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
191
243
if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
192
244
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)
194
table->setWriteSet(table->timestamp_field->position());
245
bitmap_set_bit(table->write_set,
246
table->timestamp_field->field_index);
199
if (setup_fields(session, 0, values, MARK_COLUMNS_READ, 0, 0))
250
if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0))
201
free_underlaid_joins(session, select_lex);
202
DRIZZLE_UPDATE_DONE(1, 0, 0);
252
free_underlaid_joins(thd, select_lex);
253
goto abort; /* purecov: inspected */
207
256
if (select_lex->inner_refs_list.elements &&
208
fix_inner_refs(session, all_fields, select_lex, select_lex->ref_pointer_array))
257
fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
210
DRIZZLE_UPDATE_DONE(1, 0, 0);
216
265
Item::cond_result cond_value;
217
conds= remove_eq_conds(session, conds, &cond_value);
266
conds= remove_eq_conds(thd, conds, &cond_value);
218
267
if (cond_value == Item::COND_FALSE)
219
268
limit= 0; // Impossible WHERE
433
table->cursor->extra(HA_EXTRA_IGNORE_DUP_KEY);
472
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
435
474
if (select && select->quick && select->quick->reset())
437
table->cursor->try_semi_consistent_read(1);
438
if ((error= info.init_read_record(session, table, select, 0, true)))
476
table->file->try_semi_consistent_read(1);
477
init_read_record(&info,thd,table,select,0,1);
443
479
updated= found= 0;
445
* Per the SQL standard, inserting NULL into a NOT NULL
446
* field requires an error to be thrown.
450
* NULL check and handling occurs in field_conv.cc
452
session->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
453
session->cuted_fields= 0L;
454
session->set_proc_info("Updating");
480
/* Generate an error when trying to set a NOT NULL field to NULL. */
481
thd->count_cuted_fields= ignore ? CHECK_FIELD_WARN
482
: CHECK_FIELD_ERROR_FOR_NULL;
483
thd->cuted_fields=0L;
484
thd_proc_info(thd, "Updating");
456
transactional_table= table->cursor->has_transactions();
457
session->setAbortOnWarning(test(!ignore));
486
transactional_table= table->file->has_transactions();
487
thd->abort_on_warning= test(!ignore &&
488
(thd->variables.sql_mode &
489
(MODE_STRICT_TRANS_TABLES |
490
MODE_STRICT_ALL_TABLES)));
491
will_batch= !table->file->start_bulk_update();
460
494
Assure that we can use position()
461
495
if we need to create an error message.
463
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
497
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
464
498
table->prepare_for_position();
466
while (not (error=info.read_record(&info)) && not session->getKilled())
501
We can use compare_record() to optimize away updates if
502
the table handler is returning all columns OR if
503
if all updated columns are read
505
can_compare_record= (!(table->file->ha_table_flags() &
506
HA_PARTIAL_COLUMN_READ) ||
507
bitmap_is_subset(table->write_set, table->read_set));
509
while (!(error=info.read_record(&info)) && !thd->killed)
468
if (not (select && select->skip_record()))
511
if (!(select && select->skip_record()))
470
if (table->cursor->was_semi_consistent_read())
513
if (table->file->was_semi_consistent_read())
471
514
continue; /* repeat the read of the same row if it still exists */
473
table->storeRecord();
474
if (fill_record(session, fields, values))
516
store_record(table,record[1]);
517
if (fill_record(thd, fields, values, 0))
518
break; /* purecov: inspected */
479
if (! table->records_are_comparable() || table->compare_records())
522
if (!can_compare_record || compare_record(table))
481
/* Non-batched update */
482
error= table->cursor->updateRecord(table->getUpdateRecord(),
483
table->getInsertRecord());
485
table->auto_increment_field_not_null= false;
527
Typically a batched handler can execute the batched jobs when:
528
1) When specifically told to do so
529
2) When it is not a good idea to batch anymore
530
3) When it is necessary to send batch for other reasons
531
(One such reason is when READ's must be performed)
533
1) is covered by exec_bulk_update calls.
534
2) and 3) is handled by the bulk_update_row method.
536
bulk_update_row can execute the updates including the one
537
defined in the bulk_update_row or not including the row
538
in the call. This is up to the handler implementation and can
539
vary from call to call.
541
The dup_key_found reports the number of duplicate keys found
542
in those updates actually executed. It only reports those if
543
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
544
If this hasn't been issued it returns an error code and can
545
ignore this number. Thus any handler that implements batching
546
for UPDATE IGNORE must also handle this extra call properly.
548
If a duplicate key is found on the record included in this
549
call then it should be included in the count of dup_key_found
550
and error should be set to 0 (only if these errors are ignored).
552
error= table->file->ha_bulk_update_row(table->record[1],
555
limit+= dup_key_found;
556
updated-= dup_key_found;
560
/* Non-batched update */
561
error= table->file->ha_update_row(table->record[1],
487
564
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
489
566
if (error != HA_ERR_RECORD_IS_THE_SAME)
495
table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
572
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
498
575
If (ignore && error is ignorable) we don't have to
499
576
do anything; otherwise...
503
if (table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
580
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
504
581
flags|= ME_FATALERROR; /* Other handler errors are fatal */
506
583
prepare_record_for_error_message(error, table);
507
table->print_error(error,MYF(flags));
584
table->file->print_error(error,MYF(flags));
513
590
if (!--limit && using_limit)
515
error= -1; // Simulate end of cursor
593
We have reached end-of-file in most common situations where no
594
batching has occurred and if batching was supposed to occur but
595
no updates were made and finally when the batch execution was
596
performed without error and without finding any duplicate keys.
597
If the batched updates were performed with errors we need to
598
check and if no error but duplicate key's found we need to
599
continue since those are not counted for in limit.
602
((error= table->file->exec_bulk_update(&dup_key_found)) ||
607
/* purecov: begin inspected */
609
The handler should not report error of duplicate keys if they
610
are ignored. This is a requirement on batching handlers.
612
prepare_record_for_error_message(error, table);
613
table->file->print_error(error,MYF(0));
619
Either an error was found and we are ignoring errors or there
620
were duplicate keys found. In both cases we need to correct
621
the counters and continue the loop.
623
limit= dup_key_found; //limit is 0 when we get here so need to +
624
updated-= dup_key_found;
628
error= -1; // Simulate end of file
520
table->cursor->unlock_row();
521
session->row_count++;
634
table->file->unlock_row();
523
637
dup_key_found= 0;
619
bool prepare_update(Session *session, TableList *table_list,
620
Item **conds, uint32_t order_num, Order *order)
759
bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
760
Item **conds, uint order_num, ORDER *order)
622
762
List<Item> all_fields;
623
Select_Lex *select_lex= &session->getLex()->select_lex;
625
session->getLex()->allow_sum_func= 0;
627
if (setup_tables_and_check_access(session, &select_lex->context,
763
SELECT_LEX *select_lex= &thd->lex->select_lex;
766
Statement-based replication of UPDATE ... LIMIT is not safe as order of
767
rows is not defined, so in mixed mode we go to row-based.
769
Note that we may consider a statement as safe if ORDER BY primary_key
770
is present. However it may confuse users to see very similiar statements
771
replicated differently.
773
if (thd->lex->current_select->select_limit)
775
thd->lex->set_stmt_unsafe();
776
thd->set_current_stmt_binlog_row_based_if_mixed();
779
thd->lex->allow_sum_func= 0;
781
if (setup_tables_and_check_access(thd, &select_lex->context,
628
782
&select_lex->top_join_list,
630
784
&select_lex->leaf_tables,
632
session->setup_conds(table_list, conds) ||
633
select_lex->setup_ref_array(session, order_num) ||
634
setup_order(session, select_lex->ref_pointer_array,
786
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
787
select_lex->setup_ref_array(thd, order_num) ||
788
setup_order(thd, select_lex->ref_pointer_array,
635
789
table_list, all_fields, all_fields, order))
792
/* Check that we are not using table that we are updating in a sub select */
794
TABLE_LIST *duplicate;
795
if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
797
update_non_unique_table_error(table_list, "UPDATE", duplicate);
798
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
807
/***************************************************************************
808
Update multiple tables from join
809
***************************************************************************/
812
Get table map for list of Item_field
815
static table_map get_table_map(List<Item> *items)
817
List_iterator_fast<Item> item_it(*items);
821
while ((item= (Item_field *) item_it++))
822
map|= item->used_tables();
828
make update specific preparation and checks after opening tables
831
mysql_multi_update_prepare()
839
int mysql_multi_update_prepare(THD *thd)
842
TABLE_LIST *table_list= lex->query_tables;
843
TABLE_LIST *tl, *leaves;
844
List<Item> *fields= &lex->select_lex.item_list;
845
table_map tables_for_update;
848
if this multi-update was converted from usual update, here is table
849
counter else junk will be assigned here, but then replaced with real
850
count in open_tables()
852
uint table_count= lex->table_count;
853
const bool using_lock_tables= thd->locked_tables != 0;
854
bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI);
855
bool need_reopen= false;
858
/* following need for prepared statements, to run next time multi-update */
859
thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
863
/* open tables and create derived ones, but do not lock and fill them */
864
if (((original_multiupdate || need_reopen) &&
865
open_tables(thd, &table_list, &table_count, 0)) ||
866
mysql_handle_derived(lex, &mysql_derived_prepare))
869
setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
870
second time, but this call will do nothing (there are check for second
871
call in setup_tables()).
874
if (setup_tables_and_check_access(thd, &lex->select_lex.context,
875
&lex->select_lex.top_join_list,
877
&lex->select_lex.leaf_tables, false))
880
if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
883
if (update_view && check_fields(thd, *fields))
888
tables_for_update= get_table_map(fields);
891
Setup timestamp handling and locking mode
893
leaves= lex->select_lex.leaf_tables;
894
for (tl= leaves; tl; tl= tl->next_leaf)
896
TABLE *table= tl->table;
897
/* Only set timestamp column if this is not modified */
898
if (table->timestamp_field &&
899
bitmap_is_set(table->write_set,
900
table->timestamp_field->field_index))
901
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
903
/* if table will be updated then check that it is unique */
904
if (table->map & tables_for_update)
906
table->mark_columns_needed_for_update();
908
If table will be updated we should not downgrade lock for it and
915
If we are using the binary log, we need TL_READ_NO_INSERT to get
916
correct order of statements. Otherwise, we use a TL_READ lock to
919
tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ;
921
/* Update TABLE::lock_type accordingly. */
922
if (!tl->placeholder() && !using_lock_tables)
923
tl->table->reginfo.lock_type= tl->lock_type;
927
/* now lock and fill tables */
928
if (lock_tables(thd, table_list, table_count, &need_reopen))
934
We have to reopen tables since some of them were altered or dropped
935
during lock_tables() or something was done with their triggers.
936
Let us do some cleanups to be able do setup_table() and setup_fields()
939
List_iterator_fast<Item> it(*fields);
944
/* We have to cleanup translation tables of views. */
945
for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global)
946
tbl->cleanup_items();
948
close_tables_for_reopen(thd, &table_list);
953
Check that we are not using table that we are updating, but we should
954
skip all tables of UPDATE SELECT itself
956
lex->select_lex.exclude_from_table_unique_test= true;
957
/* We only need SELECT privilege for columns in the values list */
958
for (tl= leaves; tl; tl= tl->next_leaf)
960
if (tl->lock_type != TL_READ &&
961
tl->lock_type != TL_READ_NO_INSERT)
963
TABLE_LIST *duplicate;
964
if ((duplicate= unique_table(thd, tl, table_list, 0)))
966
update_non_unique_table_error(table_list, "UPDATE", duplicate);
972
Set exclude_from_table_unique_test value back to false. It is needed for
973
further check in multi_update::prepare whether to use record cache.
975
lex->select_lex.exclude_from_table_unique_test= false;
977
if (thd->fill_derived_tables() &&
978
mysql_handle_derived(lex, &mysql_derived_filling))
986
Setup multi-update handling and call SELECT to do the join
989
bool mysql_multi_update(THD *thd,
990
TABLE_LIST *table_list,
995
enum enum_duplicates handle_duplicates, bool ignore,
996
SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
998
multi_update *result;
1001
if (!(result= new multi_update(table_list,
1002
thd->lex->select_lex.leaf_tables,
1004
handle_duplicates, ignore)))
1007
thd->abort_on_warning= test(thd->variables.sql_mode &
1008
(MODE_STRICT_TRANS_TABLES |
1009
MODE_STRICT_ALL_TABLES));
1011
List<Item> total_list;
1012
res= mysql_select(thd, &select_lex->ref_pointer_array,
1013
table_list, select_lex->with_wild,
1015
conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
1017
options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1018
OPTION_SETUP_TABLES_DONE,
1019
result, unit, select_lex);
1020
res|= thd->is_error();
1023
/* If we had a another error reported earlier then this will be ignored */
1024
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
1028
thd->abort_on_warning= 0;
1033
multi_update::multi_update(TABLE_LIST *table_list,
1034
TABLE_LIST *leaves_list,
1035
List<Item> *field_list, List<Item> *value_list,
1036
enum enum_duplicates handle_duplicates_arg,
1038
:all_tables(table_list), leaves(leaves_list), update_tables(0),
1039
tmp_tables(0), updated(0), found(0), fields(field_list),
1040
values(value_list), table_count(0), copy_field(0),
1041
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1042
transactional_tables(0), ignore(ignore_arg), error_handled(0)
1047
Connect fields with tables and create list of tables that are updated
1050
int multi_update::prepare(List<Item> ¬_used_values __attribute__((unused)),
1051
SELECT_LEX_UNIT *lex_unit __attribute__((unused)))
1053
TABLE_LIST *table_ref;
1055
table_map tables_to_update;
1057
List_iterator_fast<Item> field_it(*fields);
1058
List_iterator_fast<Item> value_it(*values);
1060
uint leaf_table_count= 0;
1062
thd->count_cuted_fields= CHECK_FIELD_WARN;
1063
thd->cuted_fields=0L;
1064
thd_proc_info(thd, "updating main table");
1066
tables_to_update= get_table_map(fields);
1068
if (!tables_to_update)
1070
my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1075
We have to check values after setup_tables to get covering_keys right in
1079
if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0))
1083
Save tables beeing updated in update_tables
1084
update_table->shared is position for table
1085
Don't use key read on tables that are updated
1089
for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1091
/* TODO: add support of view of join support */
1092
TABLE *table=table_ref->table;
1094
if (tables_to_update & table->map)
1096
TABLE_LIST *tl= (TABLE_LIST*) thd->memdup((char*) table_ref,
1100
update.link_in_list((uchar*) tl, (uchar**) &tl->next_local);
1101
tl->shared= table_count++;
1102
table->no_keyread=1;
1103
table->covering_keys.clear_all();
1104
table->pos_in_table_list= tl;
1109
table_count= update.elements;
1110
update_tables= (TABLE_LIST*) update.first;
1112
tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count);
1113
tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
1115
fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1117
values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1119
if (thd->is_fatal_error)
1121
for (i=0 ; i < table_count ; i++)
1123
fields_for_table[i]= new List_item;
1124
values_for_table[i]= new List_item;
1126
if (thd->is_fatal_error)
1129
/* Split fields into fields_for_table[] and values_by_table[] */
1131
while ((item= (Item_field *) field_it++))
1133
Item *value= value_it++;
1134
uint offset= item->field->table->pos_in_table_list->shared;
1135
fields_for_table[offset]->push_back(item);
1136
values_for_table[offset]->push_back(value);
1138
if (thd->is_fatal_error)
1141
/* Allocate copy fields */
1143
for (i=0 ; i < table_count ; i++)
1144
set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1145
copy_field= new Copy_field[max_fields];
1146
return(thd->is_fatal_error != 0);
1151
Check if table is safe to update on fly
1154
safe_update_on_fly()
1156
join_tab How table is used in join
1157
all_tables List of tables
1160
We can update the first table in join on the fly if we know that
1161
a row in this table will never be read twice. This is true under
1162
the following conditions:
1164
- We are doing a table scan and the data is in a separate file (MyISAM) or
1165
if we don't update a clustered key.
1167
- We are doing a range scan and we don't update the scan key or
1168
the primary key for a clustered table handler.
1170
- Table is not joined to itself.
1172
This function gets information about fields to be updated from
1173
the TABLE::write_set bitmap.
1176
This code is a bit dependent of how make_join_readinfo() works.
1179
0 Not safe to update
1183
static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
1184
TABLE_LIST *table_ref, TABLE_LIST *all_tables)
1186
TABLE *table= join_tab->table;
1187
if (unique_table(thd, table_ref, all_tables, 0))
1189
switch (join_tab->type) {
1193
return true; // At most one matching row
1195
case JT_REF_OR_NULL:
1196
return !is_key_used(table, join_tab->ref.key, table->write_set);
1198
/* If range search on index */
1199
if (join_tab->quick)
1200
return !join_tab->quick->is_keys_used(table->write_set);
1201
/* If scanning in clustered key */
1202
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1203
table->s->primary_key < MAX_KEY)
1204
return !is_key_used(table, table->s->primary_key, table->write_set);
638
/* Check that we are not using table that we are updating in a sub select */
640
TableList *duplicate;
641
if ((duplicate= unique_table(table_list, table_list->next_global)))
643
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
1207
break; // Avoid compler warning
651
} /* namespace drizzled */
1215
Initialize table for multi table
1218
- Update first table in join on the fly, if possible
1219
- Create temporary tables to store changed values for all other tables
1220
that are updated (and main_table if the above doesn't hold).
1224
multi_update::initialize_tables(JOIN *join)
1226
TABLE_LIST *table_ref;
1228
if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1230
main_table=join->join_tab->table;
1233
/* Any update has at least one pair (field, value) */
1234
assert(fields->elements);
1236
/* Create a temporary table for keys to all tables, except main table */
1237
for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1239
TABLE *table=table_ref->table;
1240
uint cnt= table_ref->shared;
1241
List<Item> temp_fields;
1243
TMP_TABLE_PARAM *tmp_param;
1245
table->mark_columns_needed_for_update();
1247
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1248
if (table == main_table) // First table in join
1250
if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
1252
table_to_update= main_table; // Update table on the fly
1256
table->prepare_for_position();
1258
tmp_param= tmp_table_param+cnt;
1261
Create a temporary table to store all fields that are changed for this
1262
table. The first field in the temporary table is a pointer to the
1263
original row so that we can find and update it. For the updatable
1264
VIEW a few following fields are rowids of tables used in the CHECK
1268
List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1272
Field_string *field= new Field_string(tbl->file->ref_length, 0,
1273
tbl->alias, &my_charset_bin);
1275
Field_varstring *field= new Field_varstring(tbl->file->ref_length, 0,
1276
tbl->alias, tbl->s, &my_charset_bin);
1282
The field will be converted to varstring when creating tmp table if
1283
table to be updated was created by mysql 4.1. Deny this.
1285
Item_field *ifield= new Item_field((Field *) field);
1288
ifield->maybe_null= 0;
1289
if (temp_fields.push_back(ifield))
1291
} while ((tbl= tbl_it++));
1293
temp_fields.concat(fields_for_table[cnt]);
1295
/* Make an unique key over the first field to avoid duplicated updates */
1296
memset((char*) &group, 0, sizeof(group));
1298
group.item= (Item**) temp_fields.head_ref();
1300
tmp_param->quick_group=1;
1301
tmp_param->field_count=temp_fields.elements;
1302
tmp_param->group_parts=1;
1303
tmp_param->group_length= table->file->ref_length;
1304
if (!(tmp_tables[cnt]=create_tmp_table(thd,
1307
(ORDER*) &group, 0, 0,
1308
TMP_TABLE_ALL_COLUMNS,
1312
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1318
multi_update::~multi_update()
1321
for (table= update_tables ; table; table= table->next_local)
1323
table->table->no_keyread= table->table->no_cache= 0;
1325
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1330
for (uint cnt = 0; cnt < table_count; cnt++)
1332
if (tmp_tables[cnt])
1334
free_tmp_table(thd, tmp_tables[cnt]);
1335
tmp_table_param[cnt].cleanup();
1340
delete [] copy_field;
1341
thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1342
assert(trans_safe || !updated ||
1343
thd->transaction.all.modified_non_trans_table);
1347
bool multi_update::send_data(List<Item> ¬_used_values __attribute__((unused)))
1349
TABLE_LIST *cur_table;
1351
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1353
TABLE *table= cur_table->table;
1354
uint offset= cur_table->shared;
1356
Check if we are using outer join and we didn't find the row
1357
or if we have already updated this row in the previous call to this
1360
The same row may be presented here several times in a join of type
1361
UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1363
In this case we will do the update for the first found row combination.
1364
The join algorithm guarantees that we will not find the a row in
1367
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1371
We can use compare_record() to optimize away updates if
1372
the table handler is returning all columns OR if
1373
if all updated columns are read
1375
if (table == table_to_update)
1377
bool can_compare_record;
1378
can_compare_record= (!(table->file->ha_table_flags() &
1379
HA_PARTIAL_COLUMN_READ) ||
1380
bitmap_is_subset(table->write_set,
1382
table->status|= STATUS_UPDATED;
1383
store_record(table,record[1]);
1384
if (fill_record(thd, *fields_for_table[offset],
1385
*values_for_table[offset], 0))
1389
if (!can_compare_record || compare_record(table))
1395
Inform the main table that we are going to update the table even
1396
while we may be scanning it. This will flush the read cache
1399
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1401
if ((error=table->file->ha_update_row(table->record[1],
1402
table->record[0])) &&
1403
error != HA_ERR_RECORD_IS_THE_SAME)
1407
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1410
If (ignore && error == is ignorable) we don't have to
1411
do anything; otherwise...
1415
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1416
flags|= ME_FATALERROR; /* Other handler errors are fatal */
1418
prepare_record_for_error_message(error, table);
1419
table->file->print_error(error,MYF(flags));
1425
if (error == HA_ERR_RECORD_IS_THE_SAME)
1430
/* non-transactional or transactional table got modified */
1431
/* either multi_update class' flag is raised in its branch */
1432
if (table->file->has_transactions())
1433
transactional_tables= 1;
1437
thd->transaction.stmt.modified_non_trans_table= true;
1445
TABLE *tmp_table= tmp_tables[offset];
1447
For updatable VIEW store rowid of the updated table and
1448
rowids of tables used in the CHECK OPTION condition.
1451
List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1455
tbl->file->position(tbl->record[0]);
1456
memcpy((char*) tmp_table->field[field_num]->ptr,
1457
(char*) tbl->file->ref, tbl->file->ref_length);
1459
} while ((tbl= tbl_it++));
1461
/* Store regular updated fields in the row. */
1463
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1464
*values_for_table[offset], 1);
1466
/* Write row, ignoring duplicated updates to a row */
1467
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1468
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1471
create_myisam_from_heap(thd, tmp_table,
1472
tmp_table_param[offset].start_recinfo,
1473
&tmp_table_param[offset].recinfo,
1477
return(1); // Not a table_is_full error
1487
void multi_update::send_error(uint errcode,const char *err)
1489
/* First send error what ever it is ... */
1490
my_error(errcode, MYF(0), err);
1494
void multi_update::abort()
1496
/* the error was handled or nothing deleted and no side effects return */
1497
if (error_handled ||
1498
(!thd->transaction.stmt.modified_non_trans_table && !updated))
1501
If all tables that has been updated are trans safe then just do rollback.
1502
If not attempt to do remaining updates.
1507
assert(thd->transaction.stmt.modified_non_trans_table);
1508
if (do_update && table_count > 1)
1510
/* Add warning here */
1512
todo/fixme: do_update() is never called with the arg 1.
1513
should it change the signature to become argless?
1518
if (thd->transaction.stmt.modified_non_trans_table)
1521
The query has to binlog because there's a modified non-transactional table
1522
either from the query's list or via a stored routine: bug#13270,23333
1524
if (mysql_bin_log.is_open())
1527
THD::killed status might not have been set ON at time of an error
1528
got caught and if happens later the killed error is written
1531
thd->binlog_query(THD::ROW_QUERY_TYPE,
1532
thd->query, thd->query_length,
1533
transactional_tables, false);
1535
thd->transaction.all.modified_non_trans_table= true;
1537
assert(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table);
1541
int multi_update::do_updates()
1543
TABLE_LIST *cur_table;
1545
ha_rows org_updated;
1546
TABLE *table, *tmp_table;
1547
List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
1549
do_update= 0; // Don't retry this function
1552
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1554
bool can_compare_record;
1555
uint offset= cur_table->shared;
1557
table = cur_table->table;
1558
if (table == table_to_update)
1559
continue; // Already updated
1560
org_updated= updated;
1561
tmp_table= tmp_tables[cur_table->shared];
1562
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1563
(void) table->file->ha_rnd_init(0);
1564
table->file->extra(HA_EXTRA_NO_CACHE);
1566
check_opt_it.rewind();
1567
while(TABLE *tbl= check_opt_it++)
1569
if (tbl->file->ha_rnd_init(1))
1571
tbl->file->extra(HA_EXTRA_CACHE);
1575
Setup copy functions to copy fields from temporary table
1577
List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1578
Field **field= tmp_table->field +
1579
1 + unupdated_check_opt_tables.elements; // Skip row pointers
1580
Copy_field *copy_field_ptr= copy_field, *copy_field_end;
1581
for ( ; *field ; field++)
1583
Item_field *item= (Item_field* ) field_it++;
1584
(copy_field_ptr++)->set(item->field, *field, 0);
1586
copy_field_end=copy_field_ptr;
1588
if ((local_error = tmp_table->file->ha_rnd_init(1)))
1591
can_compare_record= (!(table->file->ha_table_flags() &
1592
HA_PARTIAL_COLUMN_READ) ||
1593
bitmap_is_subset(table->write_set,
1598
if (thd->killed && trans_safe)
1600
if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1602
if (local_error == HA_ERR_END_OF_FILE)
1604
if (local_error == HA_ERR_RECORD_DELETED)
1605
continue; // May happen on dup key
1609
/* call rnd_pos() using rowids from temporary table */
1610
check_opt_it.rewind();
1616
tbl->file->rnd_pos(tbl->record[0],
1617
(uchar *) tmp_table->field[field_num]->ptr)))
1620
} while((tbl= check_opt_it++));
1622
table->status|= STATUS_UPDATED;
1623
store_record(table,record[1]);
1625
/* Copy data from temporary table to current table */
1626
for (copy_field_ptr=copy_field;
1627
copy_field_ptr != copy_field_end;
1629
(*copy_field_ptr->do_copy)(copy_field_ptr);
1631
if (!can_compare_record || compare_record(table))
1633
if ((local_error=table->file->ha_update_row(table->record[1],
1634
table->record[0])) &&
1635
local_error != HA_ERR_RECORD_IS_THE_SAME)
1638
table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
1641
if (local_error != HA_ERR_RECORD_IS_THE_SAME)
1648
if (updated != org_updated)
1650
if (table->file->has_transactions())
1651
transactional_tables= 1;
1654
trans_safe= 0; // Can't do safe rollback
1655
thd->transaction.stmt.modified_non_trans_table= true;
1658
(void) table->file->ha_rnd_end();
1659
(void) tmp_table->file->ha_rnd_end();
1660
check_opt_it.rewind();
1661
while (TABLE *tbl= check_opt_it++)
1662
tbl->file->ha_rnd_end();
1669
prepare_record_for_error_message(local_error, table);
1670
table->file->print_error(local_error,MYF(ME_FATALERROR));
1673
(void) table->file->ha_rnd_end();
1674
(void) tmp_table->file->ha_rnd_end();
1675
check_opt_it.rewind();
1676
while (TABLE *tbl= check_opt_it++)
1677
tbl->file->ha_rnd_end();
1679
if (updated != org_updated)
1681
if (table->file->has_transactions())
1682
transactional_tables= 1;
1686
thd->transaction.stmt.modified_non_trans_table= true;
1693
/* out: 1 if error, 0 if success */
1695
bool multi_update::send_eof()
1697
char buff[STRING_BUFFER_USUAL_SIZE];
1699
THD::killed_state killed_status= THD::NOT_KILLED;
1701
thd_proc_info(thd, "updating reference tables");
1704
Does updates for the last n - 1 tables, returns 0 if ok;
1705
error takes into account killed status gained in do_updates()
1707
int local_error = (table_count) ? do_updates() : 0;
1709
if local_error is not set ON until after do_updates() then
1710
later carried out killing should not affect binlogging.
1712
killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
1713
thd_proc_info(thd, "end");
1716
Write the SQL statement to the binlog if we updated
1717
rows and we succeeded or if we updated some non
1718
transactional tables.
1720
The query has to binlog because there's a modified non-transactional table
1721
either from the query's list or via a stored routine: bug#13270,23333
1724
assert(trans_safe || !updated ||
1725
thd->transaction.stmt.modified_non_trans_table);
1726
if (local_error == 0 || thd->transaction.stmt.modified_non_trans_table)
1728
if (mysql_bin_log.is_open())
1730
if (local_error == 0)
1732
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
1733
thd->query, thd->query_length,
1734
transactional_tables, false, killed_status) &&
1737
local_error= 1; // Rollback update
1740
if (thd->transaction.stmt.modified_non_trans_table)
1741
thd->transaction.all.modified_non_trans_table= true;
1743
if (local_error != 0)
1744
error_handled= true; // to force early leave from ::send_error()
1746
if (local_error > 0) // if the above log write did not fail ...
1748
/* Safety: If we haven't got an error before (can happen in do_updates) */
1749
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
1754
id= thd->arg_of_last_insert_id_function ?
1755
thd->first_successful_insert_id_in_prev_stmt : 0;
1756
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
1757
(ulong) thd->cuted_fields);
1758
thd->row_count_func=
1759
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1760
::my_ok(thd, (ulong) thd->row_count_func, id, buff);