12
12
You should have received a copy of the GNU General Public License
13
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
18
18
Single table and multi table updates of tables.
23
#include <drizzled/sql_select.h>
24
#include <drizzled/error.h>
25
#include <drizzled/probes.h>
26
#include <drizzled/sql_base.h>
27
#include <drizzled/field/epoch.h>
28
#include <drizzled/sql_parse.h>
29
#include <drizzled/optimizer/range.h>
30
#include <drizzled/records.h>
31
#include <drizzled/internal/my_sys.h>
32
#include <drizzled/internal/iocache.h>
33
#include <drizzled/transaction_services.h>
34
#include <drizzled/filesort.h>
35
#include <drizzled/plugin/storage_engine.h>
37
#include <boost/dynamic_bitset.hpp>
19
Multi-table updates were introduced by Sinisa & Monty
22
#include "mysql_priv.h"
23
#include "sql_select.h"
25
/* Return 0 if row hasn't changed */
27
bool compare_record(TABLE *table)
29
if (table->s->blob_fields + table->s->varchar_fields == 0)
30
return cmp_record(table,record[1]);
31
/* Compare null bits */
32
if (memcmp(table->null_flags,
33
table->null_flags+table->s->rec_buff_length,
34
table->s->null_bytes))
35
return true; // Diff in NULL value
36
/* Compare updated fields */
37
for (Field **ptr= table->field ; *ptr ; ptr++)
39
if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
40
(*ptr)->cmp_binary_offset(table->s->rec_buff_length))
48
check that all fields are real fields
56
true Items can't be used in UPDATE
60
static bool check_fields(THD *thd, List<Item> &items)
62
List_iterator<Item> it(items);
68
if (!(field= item->filed_for_view_update()))
70
/* item has name, because it comes from VIEW SELECT list */
71
my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
75
we make temporary copy of Item_field, to avoid influence of changing
76
result_field on Item_ref which refer on this field
78
thd->change_item_tree(it.ref(), new Item_field(thd, field));
46
85
Re-read record if more columns are needed for error message.
48
87
If we got a duplicate key error, we want to write an error
49
88
message containing the value of the duplicate key. If we do not have
50
all fields of the key value in getInsertRecord(), we need to re-read the
89
all fields of the key value in record[0], we need to re-read the
51
90
record with a proper read_set.
53
92
@param[in] error error number
54
93
@param[in] table table
57
static void prepare_record_for_error_message(int error, Table *table)
96
static void prepare_record_for_error_message(int error, TABLE *table)
59
Field **field_p= NULL;
101
MY_BITMAP unique_map; /* Fields in offended unique. */
102
my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
64
105
Only duplicate key errors print the key value.
65
106
If storage engine does always read all columns, we have the value alraedy.
67
108
if ((error != HA_ERR_FOUND_DUPP_KEY) ||
68
! (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)))
109
!(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ))
72
113
Get the number of the offended index.
73
114
We will see MAX_KEY if the engine cannot determine the affected index.
75
if ((keynr= table->get_dup_key(error)) >= MAX_KEY)
116
if ((keynr= table->file->get_dup_key(error)) >= MAX_KEY)
78
119
/* Create unique_map with all fields used by that index. */
79
boost::dynamic_bitset<> unique_map(table->getShare()->sizeFields()); /* Fields in offended unique. */
80
table->mark_columns_used_by_index_no_reset(keynr, unique_map);
120
bitmap_init(&unique_map, unique_map_buf, table->s->fields, false);
121
table->mark_columns_used_by_index_no_reset(keynr, &unique_map);
82
123
/* Subtract read_set and write_set. */
83
unique_map-= *table->read_set;
84
unique_map-= *table->write_set;
124
bitmap_subtract(&unique_map, table->read_set);
125
bitmap_subtract(&unique_map, table->write_set);
87
128
If the unique index uses columns that are neither in read_set
88
129
nor in write_set, we must re-read the record.
89
130
Otherwise no need to do anything.
91
if (unique_map.none())
132
if (bitmap_is_clear_all(&unique_map))
94
/* Get identifier of last read record into table->cursor->ref. */
95
table->cursor->position(table->getInsertRecord());
135
/* Get identifier of last read record into table->file->ref. */
136
table->file->position(table->record[0]);
96
137
/* Add all fields used by unique index to read_set. */
97
*table->read_set|= unique_map;
98
/* Read record that is identified by table->cursor->ref. */
99
(void) table->cursor->rnd_pos(table->getUpdateRecord(), table->cursor->ref);
138
bitmap_union(table->read_set, &unique_map);
139
/* Tell the engine about the new set. */
140
table->file->column_bitmaps_signal();
141
/* Read record that is identified by table->file->ref. */
142
(void) table->file->rnd_pos(table->record[1], table->file->ref);
100
143
/* Copy the newly read columns into the new record. */
101
for (field_p= table->getFields(); (field= *field_p); field_p++)
103
if (unique_map.test(field->position()))
105
field->copy_from_tmp(table->getShare()->rec_buff_length);
144
for (field_p= table->field; (field= *field_p); field_p++)
145
if (bitmap_is_set(&unique_map, field->field_index))
146
field->copy_from_tmp(table->s->rec_buff_length);
114
153
Process usual UPDATE
118
session thread handler
119
158
fields fields for update
120
159
values values of fields for update
121
160
conds WHERE clause expression
122
161
order_num number of elemen in ORDER BY clause
123
order order_st BY clause list
162
order ORDER BY clause list
124
163
limit limit clause
125
164
handle_duplicates how to handle duplicates
168
2 - privilege check and openning table passed, but we need to convert to
169
multi-update because of view substitution
132
int update_query(Session *session, TableList *table_list,
133
List<Item> &fields, List<Item> &values, COND *conds,
134
uint32_t order_num, Order *order,
135
ha_rows limit, enum enum_duplicates,
173
int mysql_update(THD *thd,
174
TABLE_LIST *table_list,
178
uint order_num, ORDER *order,
180
enum enum_duplicates handle_duplicates __attribute__((__unused__)),
138
183
bool using_limit= limit != HA_POS_ERROR;
139
bool used_key_is_modified;
140
bool transactional_table;
184
bool safe_update= test(thd->options & OPTION_SAFE_UPDATES);
185
bool used_key_is_modified, transactional_table, will_batch;
186
bool can_compare_record;
187
int error, loc_error;
142
188
uint used_index= MAX_KEY, dup_key_found;
143
189
bool need_sort= true;
144
191
ha_rows updated, found;
145
192
key_map old_covering_keys;
147
optimizer::SqlSelect *select= NULL;
149
Select_Lex *select_lex= &session->getLex()->select_lex;
196
SELECT_LEX *select_lex= &thd->lex->select_lex;
151
199
List<Item> all_fields;
152
Session::killed_state_t killed_status= Session::NOT_KILLED;
154
DRIZZLE_UPDATE_START(session->getQueryString()->c_str());
155
if (session->openTablesLock(table_list))
200
THD::killed_state killed_status= THD::NOT_KILLED;
157
DRIZZLE_UPDATE_DONE(1, 0, 0);
204
if (open_tables(thd, &table_list, &table_count, 0))
207
if (!lock_tables(thd, table_list, table_count, &need_reopen))
211
close_tables_for_reopen(thd, &table_list);
161
session->set_proc_info("init");
214
if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) ||
215
(thd->fill_derived_tables() &&
216
mysql_handle_derived(thd->lex, &mysql_derived_filling)))
219
MYSQL_UPDATE_START();
220
thd_proc_info(thd, "init");
162
221
table= table_list->table;
164
223
/* Calculate "table->covering_keys" based on the WHERE */
165
table->covering_keys= table->getShare()->keys_in_use;
166
table->quick_keys.reset();
224
table->covering_keys= table->s->keys_in_use;
225
table->quick_keys.clear_all();
168
if (prepare_update(session, table_list, &conds, order_num, order))
170
DRIZZLE_UPDATE_DONE(1, 0, 0);
227
if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
174
230
old_covering_keys= table->covering_keys; // Keys used in WHERE
175
231
/* Check the fields we are going to modify */
176
if (setup_fields_with_no_wrap(session, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
178
DRIZZLE_UPDATE_DONE(1, 0, 0);
232
if (setup_fields_with_no_wrap(thd, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
233
goto abort; /* purecov: inspected */
182
234
if (table->timestamp_field)
184
236
// Don't set timestamp column if this is modified
185
if (table->timestamp_field->isWriteSet())
237
if (bitmap_is_set(table->write_set,
238
table->timestamp_field->field_index))
187
239
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
191
242
if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
192
243
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)
194
table->setWriteSet(table->timestamp_field->position());
244
bitmap_set_bit(table->write_set,
245
table->timestamp_field->field_index);
199
if (setup_fields(session, 0, values, MARK_COLUMNS_READ, 0, 0))
249
if (setup_fields(thd, 0, values, MARK_COLUMNS_READ, 0, 0))
201
free_underlaid_joins(session, select_lex);
202
DRIZZLE_UPDATE_DONE(1, 0, 0);
251
free_underlaid_joins(thd, select_lex);
252
goto abort; /* purecov: inspected */
207
255
if (select_lex->inner_refs_list.elements &&
208
fix_inner_refs(session, all_fields, select_lex, select_lex->ref_pointer_array))
256
fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
210
DRIZZLE_UPDATE_DONE(1, 0, 0);
216
264
Item::cond_result cond_value;
217
conds= remove_eq_conds(session, conds, &cond_value);
265
conds= remove_eq_conds(thd, conds, &cond_value);
218
266
if (cond_value == Item::COND_FALSE)
219
267
limit= 0; // Impossible WHERE
433
table->cursor->extra(HA_EXTRA_IGNORE_DUP_KEY);
471
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
435
473
if (select && select->quick && select->quick->reset())
437
table->cursor->try_semi_consistent_read(1);
438
if ((error= info.init_read_record(session, table, select, 0, true)))
475
table->file->try_semi_consistent_read(1);
476
init_read_record(&info,thd,table,select,0,1);
443
478
updated= found= 0;
445
* Per the SQL standard, inserting NULL into a NOT NULL
446
* field requires an error to be thrown.
450
* NULL check and handling occurs in field_conv.cc
452
session->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;
453
session->cuted_fields= 0L;
454
session->set_proc_info("Updating");
479
/* Generate an error when trying to set a NOT NULL field to NULL. */
480
thd->count_cuted_fields= ignore ? CHECK_FIELD_WARN
481
: CHECK_FIELD_ERROR_FOR_NULL;
482
thd->cuted_fields=0L;
483
thd_proc_info(thd, "Updating");
456
transactional_table= table->cursor->has_transactions();
457
session->setAbortOnWarning(test(!ignore));
485
transactional_table= table->file->has_transactions();
486
thd->abort_on_warning= test(!ignore &&
487
(thd->variables.sql_mode &
488
(MODE_STRICT_TRANS_TABLES |
489
MODE_STRICT_ALL_TABLES)));
490
will_batch= !table->file->start_bulk_update();
460
493
Assure that we can use position()
461
494
if we need to create an error message.
463
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
496
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
464
497
table->prepare_for_position();
466
while (not (error=info.read_record(&info)) && not session->getKilled())
500
We can use compare_record() to optimize away updates if
501
the table handler is returning all columns OR if
502
if all updated columns are read
504
can_compare_record= (!(table->file->ha_table_flags() &
505
HA_PARTIAL_COLUMN_READ) ||
506
bitmap_is_subset(table->write_set, table->read_set));
508
while (!(error=info.read_record(&info)) && !thd->killed)
468
if (not (select && select->skip_record()))
510
if (!(select && select->skip_record()))
470
if (table->cursor->was_semi_consistent_read())
512
if (table->file->was_semi_consistent_read())
471
513
continue; /* repeat the read of the same row if it still exists */
473
table->storeRecord();
474
if (fill_record(session, fields, values))
515
store_record(table,record[1]);
516
if (fill_record(thd, fields, values, 0))
517
break; /* purecov: inspected */
479
if (! table->records_are_comparable() || table->compare_records())
521
if (!can_compare_record || compare_record(table))
481
/* Non-batched update */
482
error= table->cursor->updateRecord(table->getUpdateRecord(),
483
table->getInsertRecord());
485
table->auto_increment_field_not_null= false;
526
Typically a batched handler can execute the batched jobs when:
527
1) When specifically told to do so
528
2) When it is not a good idea to batch anymore
529
3) When it is necessary to send batch for other reasons
530
(One such reason is when READ's must be performed)
532
1) is covered by exec_bulk_update calls.
533
2) and 3) is handled by the bulk_update_row method.
535
bulk_update_row can execute the updates including the one
536
defined in the bulk_update_row or not including the row
537
in the call. This is up to the handler implementation and can
538
vary from call to call.
540
The dup_key_found reports the number of duplicate keys found
541
in those updates actually executed. It only reports those if
542
the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
543
If this hasn't been issued it returns an error code and can
544
ignore this number. Thus any handler that implements batching
545
for UPDATE IGNORE must also handle this extra call properly.
547
If a duplicate key is found on the record included in this
548
call then it should be included in the count of dup_key_found
549
and error should be set to 0 (only if these errors are ignored).
551
error= table->file->ha_bulk_update_row(table->record[1],
554
limit+= dup_key_found;
555
updated-= dup_key_found;
559
/* Non-batched update */
560
error= table->file->ha_update_row(table->record[1],
487
563
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
489
565
if (error != HA_ERR_RECORD_IS_THE_SAME)
495
table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
571
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
498
574
If (ignore && error is ignorable) we don't have to
499
575
do anything; otherwise...
503
if (table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
579
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
504
580
flags|= ME_FATALERROR; /* Other handler errors are fatal */
506
582
prepare_record_for_error_message(error, table);
507
table->print_error(error,MYF(flags));
583
table->file->print_error(error,MYF(flags));
513
589
if (!--limit && using_limit)
515
error= -1; // Simulate end of cursor
592
We have reached end-of-file in most common situations where no
593
batching has occurred and if batching was supposed to occur but
594
no updates were made and finally when the batch execution was
595
performed without error and without finding any duplicate keys.
596
If the batched updates were performed with errors we need to
597
check and if no error but duplicate key's found we need to
598
continue since those are not counted for in limit.
601
((error= table->file->exec_bulk_update(&dup_key_found)) ||
606
/* purecov: begin inspected */
608
The handler should not report error of duplicate keys if they
609
are ignored. This is a requirement on batching handlers.
611
prepare_record_for_error_message(error, table);
612
table->file->print_error(error,MYF(0));
618
Either an error was found and we are ignoring errors or there
619
were duplicate keys found. In both cases we need to correct
620
the counters and continue the loop.
622
limit= dup_key_found; //limit is 0 when we get here so need to +
623
updated-= dup_key_found;
627
error= -1; // Simulate end of file
520
table->cursor->unlock_row();
521
session->row_count++;
633
table->file->unlock_row();
523
636
dup_key_found= 0;
619
bool prepare_update(Session *session, TableList *table_list,
620
Item **conds, uint32_t order_num, Order *order)
758
bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
759
Item **conds, uint order_num, ORDER *order)
622
761
List<Item> all_fields;
623
Select_Lex *select_lex= &session->getLex()->select_lex;
625
session->getLex()->allow_sum_func= 0;
627
if (setup_tables_and_check_access(session, &select_lex->context,
762
SELECT_LEX *select_lex= &thd->lex->select_lex;
765
Statement-based replication of UPDATE ... LIMIT is not safe as order of
766
rows is not defined, so in mixed mode we go to row-based.
768
Note that we may consider a statement as safe if ORDER BY primary_key
769
is present. However it may confuse users to see very similiar statements
770
replicated differently.
772
if (thd->lex->current_select->select_limit)
774
thd->lex->set_stmt_unsafe();
775
thd->set_current_stmt_binlog_row_based_if_mixed();
778
thd->lex->allow_sum_func= 0;
780
if (setup_tables_and_check_access(thd, &select_lex->context,
628
781
&select_lex->top_join_list,
630
783
&select_lex->leaf_tables,
632
session->setup_conds(table_list, conds) ||
633
select_lex->setup_ref_array(session, order_num) ||
634
setup_order(session, select_lex->ref_pointer_array,
785
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
786
select_lex->setup_ref_array(thd, order_num) ||
787
setup_order(thd, select_lex->ref_pointer_array,
635
788
table_list, all_fields, all_fields, order))
791
/* Check that we are not using table that we are updating in a sub select */
793
TABLE_LIST *duplicate;
794
if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
796
update_non_unique_table_error(table_list, "UPDATE", duplicate);
797
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
806
/***************************************************************************
807
Update multiple tables from join
808
***************************************************************************/
811
Get table map for list of Item_field
814
static table_map get_table_map(List<Item> *items)
816
List_iterator_fast<Item> item_it(*items);
820
while ((item= (Item_field *) item_it++))
821
map|= item->used_tables();
827
make update specific preparation and checks after opening tables
830
mysql_multi_update_prepare()
838
int mysql_multi_update_prepare(THD *thd)
841
TABLE_LIST *table_list= lex->query_tables;
842
TABLE_LIST *tl, *leaves;
843
List<Item> *fields= &lex->select_lex.item_list;
844
table_map tables_for_update;
847
if this multi-update was converted from usual update, here is table
848
counter else junk will be assigned here, but then replaced with real
849
count in open_tables()
851
uint table_count= lex->table_count;
852
const bool using_lock_tables= thd->locked_tables != 0;
853
bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI);
854
bool need_reopen= false;
857
/* following need for prepared statements, to run next time multi-update */
858
thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
862
/* open tables and create derived ones, but do not lock and fill them */
863
if (((original_multiupdate || need_reopen) &&
864
open_tables(thd, &table_list, &table_count, 0)) ||
865
mysql_handle_derived(lex, &mysql_derived_prepare))
868
setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
869
second time, but this call will do nothing (there are check for second
870
call in setup_tables()).
873
if (setup_tables_and_check_access(thd, &lex->select_lex.context,
874
&lex->select_lex.top_join_list,
876
&lex->select_lex.leaf_tables, false))
879
if (setup_fields_with_no_wrap(thd, 0, *fields, MARK_COLUMNS_WRITE, 0, 0))
882
if (update_view && check_fields(thd, *fields))
887
tables_for_update= get_table_map(fields);
890
Setup timestamp handling and locking mode
892
leaves= lex->select_lex.leaf_tables;
893
for (tl= leaves; tl; tl= tl->next_leaf)
895
TABLE *table= tl->table;
896
/* Only set timestamp column if this is not modified */
897
if (table->timestamp_field &&
898
bitmap_is_set(table->write_set,
899
table->timestamp_field->field_index))
900
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
902
/* if table will be updated then check that it is unique */
903
if (table->map & tables_for_update)
905
table->mark_columns_needed_for_update();
907
If table will be updated we should not downgrade lock for it and
914
If we are using the binary log, we need TL_READ_NO_INSERT to get
915
correct order of statements. Otherwise, we use a TL_READ lock to
918
tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ;
920
/* Update TABLE::lock_type accordingly. */
921
if (!tl->placeholder() && !using_lock_tables)
922
tl->table->reginfo.lock_type= tl->lock_type;
926
/* now lock and fill tables */
927
if (lock_tables(thd, table_list, table_count, &need_reopen))
933
We have to reopen tables since some of them were altered or dropped
934
during lock_tables() or something was done with their triggers.
935
Let us do some cleanups to be able do setup_table() and setup_fields()
938
List_iterator_fast<Item> it(*fields);
943
/* We have to cleanup translation tables of views. */
944
for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global)
945
tbl->cleanup_items();
947
close_tables_for_reopen(thd, &table_list);
952
Check that we are not using table that we are updating, but we should
953
skip all tables of UPDATE SELECT itself
955
lex->select_lex.exclude_from_table_unique_test= true;
956
/* We only need SELECT privilege for columns in the values list */
957
for (tl= leaves; tl; tl= tl->next_leaf)
959
if (tl->lock_type != TL_READ &&
960
tl->lock_type != TL_READ_NO_INSERT)
962
TABLE_LIST *duplicate;
963
if ((duplicate= unique_table(thd, tl, table_list, 0)))
965
update_non_unique_table_error(table_list, "UPDATE", duplicate);
971
Set exclude_from_table_unique_test value back to false. It is needed for
972
further check in multi_update::prepare whether to use record cache.
974
lex->select_lex.exclude_from_table_unique_test= false;
976
if (thd->fill_derived_tables() &&
977
mysql_handle_derived(lex, &mysql_derived_filling))
985
Setup multi-update handling and call SELECT to do the join
988
bool mysql_multi_update(THD *thd,
989
TABLE_LIST *table_list,
994
enum enum_duplicates handle_duplicates, bool ignore,
995
SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
997
multi_update *result;
1000
if (!(result= new multi_update(table_list,
1001
thd->lex->select_lex.leaf_tables,
1003
handle_duplicates, ignore)))
1006
thd->abort_on_warning= test(thd->variables.sql_mode &
1007
(MODE_STRICT_TRANS_TABLES |
1008
MODE_STRICT_ALL_TABLES));
1010
List<Item> total_list;
1011
res= mysql_select(thd, &select_lex->ref_pointer_array,
1012
table_list, select_lex->with_wild,
1014
conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL,
1016
options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1017
OPTION_SETUP_TABLES_DONE,
1018
result, unit, select_lex);
1019
res|= thd->is_error();
1022
/* If we had a another error reported earlier then this will be ignored */
1023
result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
1027
thd->abort_on_warning= 0;
1032
multi_update::multi_update(TABLE_LIST *table_list,
1033
TABLE_LIST *leaves_list,
1034
List<Item> *field_list, List<Item> *value_list,
1035
enum enum_duplicates handle_duplicates_arg,
1037
:all_tables(table_list), leaves(leaves_list), update_tables(0),
1038
tmp_tables(0), updated(0), found(0), fields(field_list),
1039
values(value_list), table_count(0), copy_field(0),
1040
handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1041
transactional_tables(0), ignore(ignore_arg), error_handled(0)
1046
Connect fields with tables and create list of tables that are updated
1049
int multi_update::prepare(List<Item> ¬_used_values __attribute__((__unused__)),
1050
SELECT_LEX_UNIT *lex_unit __attribute__((__unused__)))
1052
TABLE_LIST *table_ref;
1054
table_map tables_to_update;
1056
List_iterator_fast<Item> field_it(*fields);
1057
List_iterator_fast<Item> value_it(*values);
1059
uint leaf_table_count= 0;
1061
thd->count_cuted_fields= CHECK_FIELD_WARN;
1062
thd->cuted_fields=0L;
1063
thd_proc_info(thd, "updating main table");
1065
tables_to_update= get_table_map(fields);
1067
if (!tables_to_update)
1069
my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1074
We have to check values after setup_tables to get covering_keys right in
1078
if (setup_fields(thd, 0, *values, MARK_COLUMNS_READ, 0, 0))
1082
Save tables beeing updated in update_tables
1083
update_table->shared is position for table
1084
Don't use key read on tables that are updated
1088
for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1090
/* TODO: add support of view of join support */
1091
TABLE *table=table_ref->table;
1093
if (tables_to_update & table->map)
1095
TABLE_LIST *tl= (TABLE_LIST*) thd->memdup((char*) table_ref,
1099
update.link_in_list((uchar*) tl, (uchar**) &tl->next_local);
1100
tl->shared= table_count++;
1101
table->no_keyread=1;
1102
table->covering_keys.clear_all();
1103
table->pos_in_table_list= tl;
1108
table_count= update.elements;
1109
update_tables= (TABLE_LIST*) update.first;
1111
tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count);
1112
tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
1114
fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1116
values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1118
if (thd->is_fatal_error)
1120
for (i=0 ; i < table_count ; i++)
1122
fields_for_table[i]= new List_item;
1123
values_for_table[i]= new List_item;
1125
if (thd->is_fatal_error)
1128
/* Split fields into fields_for_table[] and values_by_table[] */
1130
while ((item= (Item_field *) field_it++))
1132
Item *value= value_it++;
1133
uint offset= item->field->table->pos_in_table_list->shared;
1134
fields_for_table[offset]->push_back(item);
1135
values_for_table[offset]->push_back(value);
1137
if (thd->is_fatal_error)
1140
/* Allocate copy fields */
1142
for (i=0 ; i < table_count ; i++)
1143
set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1144
copy_field= new Copy_field[max_fields];
1145
return(thd->is_fatal_error != 0);
1150
Check if table is safe to update on fly
1153
safe_update_on_fly()
1155
join_tab How table is used in join
1156
all_tables List of tables
1159
We can update the first table in join on the fly if we know that
1160
a row in this table will never be read twice. This is true under
1161
the following conditions:
1163
- We are doing a table scan and the data is in a separate file (MyISAM) or
1164
if we don't update a clustered key.
1166
- We are doing a range scan and we don't update the scan key or
1167
the primary key for a clustered table handler.
1169
- Table is not joined to itself.
1171
This function gets information about fields to be updated from
1172
the TABLE::write_set bitmap.
1175
This code is a bit dependent of how make_join_readinfo() works.
1178
0 Not safe to update
1182
static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
1183
TABLE_LIST *table_ref, TABLE_LIST *all_tables)
1185
TABLE *table= join_tab->table;
1186
if (unique_table(thd, table_ref, all_tables, 0))
1188
switch (join_tab->type) {
1192
return true; // At most one matching row
1194
case JT_REF_OR_NULL:
1195
return !is_key_used(table, join_tab->ref.key, table->write_set);
1197
/* If range search on index */
1198
if (join_tab->quick)
1199
return !join_tab->quick->is_keys_used(table->write_set);
1200
/* If scanning in clustered key */
1201
if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1202
table->s->primary_key < MAX_KEY)
1203
return !is_key_used(table, table->s->primary_key, table->write_set);
638
/* Check that we are not using table that we are updating in a sub select */
640
TableList *duplicate;
641
if ((duplicate= unique_table(table_list, table_list->next_global)))
643
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
1206
break; // Avoid compler warning
651
} /* namespace drizzled */
1214
Initialize table for multi table
1217
- Update first table in join on the fly, if possible
1218
- Create temporary tables to store changed values for all other tables
1219
that are updated (and main_table if the above doesn't hold).
1223
multi_update::initialize_tables(JOIN *join)
1225
TABLE_LIST *table_ref;
1227
if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1229
main_table=join->join_tab->table;
1232
/* Any update has at least one pair (field, value) */
1233
assert(fields->elements);
1235
/* Create a temporary table for keys to all tables, except main table */
1236
for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1238
TABLE *table=table_ref->table;
1239
uint cnt= table_ref->shared;
1240
List<Item> temp_fields;
1242
TMP_TABLE_PARAM *tmp_param;
1244
table->mark_columns_needed_for_update();
1246
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1247
if (table == main_table) // First table in join
1249
if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
1251
table_to_update= main_table; // Update table on the fly
1255
table->prepare_for_position();
1257
tmp_param= tmp_table_param+cnt;
1260
Create a temporary table to store all fields that are changed for this
1261
table. The first field in the temporary table is a pointer to the
1262
original row so that we can find and update it. For the updatable
1263
VIEW a few following fields are rowids of tables used in the CHECK
1267
List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1271
Field_string *field= new Field_string(tbl->file->ref_length, 0,
1272
tbl->alias, &my_charset_bin);
1277
The field will be converted to varstring when creating tmp table if
1278
table to be updated was created by mysql 4.1. Deny this.
1280
field->can_alter_field_type= 0;
1281
Item_field *ifield= new Item_field((Field *) field);
1284
ifield->maybe_null= 0;
1285
if (temp_fields.push_back(ifield))
1287
} while ((tbl= tbl_it++));
1289
temp_fields.concat(fields_for_table[cnt]);
1291
/* Make an unique key over the first field to avoid duplicated updates */
1292
bzero((char*) &group, sizeof(group));
1294
group.item= (Item**) temp_fields.head_ref();
1296
tmp_param->quick_group=1;
1297
tmp_param->field_count=temp_fields.elements;
1298
tmp_param->group_parts=1;
1299
tmp_param->group_length= table->file->ref_length;
1300
if (!(tmp_tables[cnt]=create_tmp_table(thd,
1303
(ORDER*) &group, 0, 0,
1304
TMP_TABLE_ALL_COLUMNS,
1308
tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1314
multi_update::~multi_update()
1317
for (table= update_tables ; table; table= table->next_local)
1319
table->table->no_keyread= table->table->no_cache= 0;
1321
table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1326
for (uint cnt = 0; cnt < table_count; cnt++)
1328
if (tmp_tables[cnt])
1330
free_tmp_table(thd, tmp_tables[cnt]);
1331
tmp_table_param[cnt].cleanup();
1336
delete [] copy_field;
1337
thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
1338
assert(trans_safe || !updated ||
1339
thd->transaction.all.modified_non_trans_table);
1343
bool multi_update::send_data(List<Item> ¬_used_values __attribute__((__unused__)))
1345
TABLE_LIST *cur_table;
1347
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1349
TABLE *table= cur_table->table;
1350
uint offset= cur_table->shared;
1352
Check if we are using outer join and we didn't find the row
1353
or if we have already updated this row in the previous call to this
1356
The same row may be presented here several times in a join of type
1357
UPDATE t1 FROM t1,t2 SET t1.a=t2.a
1359
In this case we will do the update for the first found row combination.
1360
The join algorithm guarantees that we will not find the a row in
1363
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
1367
We can use compare_record() to optimize away updates if
1368
the table handler is returning all columns OR if
1369
if all updated columns are read
1371
if (table == table_to_update)
1373
bool can_compare_record;
1374
can_compare_record= (!(table->file->ha_table_flags() &
1375
HA_PARTIAL_COLUMN_READ) ||
1376
bitmap_is_subset(table->write_set,
1378
table->status|= STATUS_UPDATED;
1379
store_record(table,record[1]);
1380
if (fill_record(thd, *fields_for_table[offset],
1381
*values_for_table[offset], 0))
1385
if (!can_compare_record || compare_record(table))
1391
Inform the main table that we are going to update the table even
1392
while we may be scanning it. This will flush the read cache
1395
main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
1397
if ((error=table->file->ha_update_row(table->record[1],
1398
table->record[0])) &&
1399
error != HA_ERR_RECORD_IS_THE_SAME)
1403
table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1406
If (ignore && error == is ignorable) we don't have to
1407
do anything; otherwise...
1411
if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY))
1412
flags|= ME_FATALERROR; /* Other handler errors are fatal */
1414
prepare_record_for_error_message(error, table);
1415
table->file->print_error(error,MYF(flags));
1421
if (error == HA_ERR_RECORD_IS_THE_SAME)
1426
/* non-transactional or transactional table got modified */
1427
/* either multi_update class' flag is raised in its branch */
1428
if (table->file->has_transactions())
1429
transactional_tables= 1;
1433
thd->transaction.stmt.modified_non_trans_table= true;
1441
TABLE *tmp_table= tmp_tables[offset];
1443
For updatable VIEW store rowid of the updated table and
1444
rowids of tables used in the CHECK OPTION condition.
1447
List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1451
tbl->file->position(tbl->record[0]);
1452
memcpy((char*) tmp_table->field[field_num]->ptr,
1453
(char*) tbl->file->ref, tbl->file->ref_length);
1455
} while ((tbl= tbl_it++));
1457
/* Store regular updated fields in the row. */
1459
tmp_table->field + 1 + unupdated_check_opt_tables.elements,
1460
*values_for_table[offset], 1);
1462
/* Write row, ignoring duplicated updates to a row */
1463
error= tmp_table->file->ha_write_row(tmp_table->record[0]);
1464
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
1467
create_myisam_from_heap(thd, tmp_table,
1468
tmp_table_param[offset].start_recinfo,
1469
&tmp_table_param[offset].recinfo,
1473
return(1); // Not a table_is_full error
1483
void multi_update::send_error(uint errcode,const char *err)
1485
/* First send error what ever it is ... */
1486
my_error(errcode, MYF(0), err);
1490
void multi_update::abort()
1492
/* the error was handled or nothing deleted and no side effects return */
1493
if (error_handled ||
1494
(!thd->transaction.stmt.modified_non_trans_table && !updated))
1497
If all tables that has been updated are trans safe then just do rollback.
1498
If not attempt to do remaining updates.
1503
assert(thd->transaction.stmt.modified_non_trans_table);
1504
if (do_update && table_count > 1)
1506
/* Add warning here */
1508
todo/fixme: do_update() is never called with the arg 1.
1509
should it change the signature to become argless?
1514
if (thd->transaction.stmt.modified_non_trans_table)
1517
The query has to binlog because there's a modified non-transactional table
1518
either from the query's list or via a stored routine: bug#13270,23333
1520
if (mysql_bin_log.is_open())
1523
THD::killed status might not have been set ON at time of an error
1524
got caught and if happens later the killed error is written
1527
thd->binlog_query(THD::ROW_QUERY_TYPE,
1528
thd->query, thd->query_length,
1529
transactional_tables, false);
1531
thd->transaction.all.modified_non_trans_table= true;
1533
assert(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table);
1537
int multi_update::do_updates()
1539
TABLE_LIST *cur_table;
1541
ha_rows org_updated;
1542
TABLE *table, *tmp_table;
1543
List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
1545
do_update= 0; // Don't retry this function
1548
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
1550
bool can_compare_record;
1551
uint offset= cur_table->shared;
1553
table = cur_table->table;
1554
if (table == table_to_update)
1555
continue; // Already updated
1556
org_updated= updated;
1557
tmp_table= tmp_tables[cur_table->shared];
1558
tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
1559
(void) table->file->ha_rnd_init(0);
1560
table->file->extra(HA_EXTRA_NO_CACHE);
1562
check_opt_it.rewind();
1563
while(TABLE *tbl= check_opt_it++)
1565
if (tbl->file->ha_rnd_init(1))
1567
tbl->file->extra(HA_EXTRA_CACHE);
1571
Setup copy functions to copy fields from temporary table
1573
List_iterator_fast<Item> field_it(*fields_for_table[offset]);
1574
Field **field= tmp_table->field +
1575
1 + unupdated_check_opt_tables.elements; // Skip row pointers
1576
Copy_field *copy_field_ptr= copy_field, *copy_field_end;
1577
for ( ; *field ; field++)
1579
Item_field *item= (Item_field* ) field_it++;
1580
(copy_field_ptr++)->set(item->field, *field, 0);
1582
copy_field_end=copy_field_ptr;
1584
if ((local_error = tmp_table->file->ha_rnd_init(1)))
1587
can_compare_record= (!(table->file->ha_table_flags() &
1588
HA_PARTIAL_COLUMN_READ) ||
1589
bitmap_is_subset(table->write_set,
1594
if (thd->killed && trans_safe)
1596
if ((local_error=tmp_table->file->rnd_next(tmp_table->record[0])))
1598
if (local_error == HA_ERR_END_OF_FILE)
1600
if (local_error == HA_ERR_RECORD_DELETED)
1601
continue; // May happen on dup key
1605
/* call rnd_pos() using rowids from temporary table */
1606
check_opt_it.rewind();
1612
tbl->file->rnd_pos(tbl->record[0],
1613
(uchar *) tmp_table->field[field_num]->ptr)))
1616
} while((tbl= check_opt_it++));
1618
table->status|= STATUS_UPDATED;
1619
store_record(table,record[1]);
1621
/* Copy data from temporary table to current table */
1622
for (copy_field_ptr=copy_field;
1623
copy_field_ptr != copy_field_end;
1625
(*copy_field_ptr->do_copy)(copy_field_ptr);
1627
if (!can_compare_record || compare_record(table))
1629
if ((local_error=table->file->ha_update_row(table->record[1],
1630
table->record[0])) &&
1631
local_error != HA_ERR_RECORD_IS_THE_SAME)
1634
table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY))
1637
if (local_error != HA_ERR_RECORD_IS_THE_SAME)
1644
if (updated != org_updated)
1646
if (table->file->has_transactions())
1647
transactional_tables= 1;
1650
trans_safe= 0; // Can't do safe rollback
1651
thd->transaction.stmt.modified_non_trans_table= true;
1654
(void) table->file->ha_rnd_end();
1655
(void) tmp_table->file->ha_rnd_end();
1656
check_opt_it.rewind();
1657
while (TABLE *tbl= check_opt_it++)
1658
tbl->file->ha_rnd_end();
1665
prepare_record_for_error_message(local_error, table);
1666
table->file->print_error(local_error,MYF(ME_FATALERROR));
1669
(void) table->file->ha_rnd_end();
1670
(void) tmp_table->file->ha_rnd_end();
1671
check_opt_it.rewind();
1672
while (TABLE *tbl= check_opt_it++)
1673
tbl->file->ha_rnd_end();
1675
if (updated != org_updated)
1677
if (table->file->has_transactions())
1678
transactional_tables= 1;
1682
thd->transaction.stmt.modified_non_trans_table= true;
1689
/* out: 1 if error, 0 if success */
1691
bool multi_update::send_eof()
1693
char buff[STRING_BUFFER_USUAL_SIZE];
1695
THD::killed_state killed_status= THD::NOT_KILLED;
1697
thd_proc_info(thd, "updating reference tables");
1700
Does updates for the last n - 1 tables, returns 0 if ok;
1701
error takes into account killed status gained in do_updates()
1703
int local_error = (table_count) ? do_updates() : 0;
1705
if local_error is not set ON until after do_updates() then
1706
later carried out killing should not affect binlogging.
1708
killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
1709
thd_proc_info(thd, "end");
1712
Write the SQL statement to the binlog if we updated
1713
rows and we succeeded or if we updated some non
1714
transactional tables.
1716
The query has to binlog because there's a modified non-transactional table
1717
either from the query's list or via a stored routine: bug#13270,23333
1720
assert(trans_safe || !updated ||
1721
thd->transaction.stmt.modified_non_trans_table);
1722
if (local_error == 0 || thd->transaction.stmt.modified_non_trans_table)
1724
if (mysql_bin_log.is_open())
1726
if (local_error == 0)
1728
if (thd->binlog_query(THD::ROW_QUERY_TYPE,
1729
thd->query, thd->query_length,
1730
transactional_tables, false, killed_status) &&
1733
local_error= 1; // Rollback update
1736
if (thd->transaction.stmt.modified_non_trans_table)
1737
thd->transaction.all.modified_non_trans_table= true;
1739
if (local_error != 0)
1740
error_handled= true; // to force early leave from ::send_error()
1742
if (local_error > 0) // if the above log write did not fail ...
1744
/* Safety: If we haven't got an error before (can happen in do_updates) */
1745
my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
1750
id= thd->arg_of_last_insert_id_function ?
1751
thd->first_successful_insert_id_in_prev_stmt : 0;
1752
sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated,
1753
(ulong) thd->cuted_fields);
1754
thd->row_count_func=
1755
(thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1756
::my_ok(thd, (ulong) thd->row_count_func, id, buff);