12
12
You should have received a copy of the GNU General Public License
13
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
18
18
Single table and multi table updates of tables.
19
Multi-table updates were introduced by Sinisa & Monty
23
#include <drizzled/sql_select.h>
24
#include <drizzled/error.h>
25
#include <drizzled/probes.h>
26
#include <drizzled/sql_base.h>
27
#include <drizzled/field/epoch.h>
28
#include <drizzled/sql_parse.h>
29
#include <drizzled/optimizer/range.h>
30
#include <drizzled/records.h>
31
#include <drizzled/internal/my_sys.h>
32
#include <drizzled/internal/iocache.h>
33
#include <drizzled/transaction_services.h>
34
#include <drizzled/filesort.h>
35
#include <drizzled/plugin/storage_engine.h>
37
#include <boost/dynamic_bitset.hpp>
22
#include "drizzled/sql_select.h"
23
#include "drizzled/error.h"
24
#include "drizzled/probes.h"
25
#include "drizzled/sql_base.h"
26
#include "drizzled/field/timestamp.h"
27
#include "drizzled/sql_parse.h"
28
#include "drizzled/optimizer/range.h"
29
#include "drizzled/records.h"
30
#include "drizzled/internal/my_sys.h"
31
#include "drizzled/internal/iocache.h"
40
35
using namespace std;
48
43
If we got a duplicate key error, we want to write an error
49
44
message containing the value of the duplicate key. If we do not have
50
all fields of the key value in getInsertRecord(), we need to re-read the
45
all fields of the key value in record[0], we need to re-read the
51
46
record with a proper read_set.
53
48
@param[in] error error number
57
52
static void prepare_record_for_error_message(int error, Table *table)
59
Field **field_p= NULL;
57
MyBitmap unique_map; /* Fields in offended unique. */
58
my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
64
61
Only duplicate key errors print the key value.
65
62
If storage engine does always read all columns, we have the value alraedy.
67
64
if ((error != HA_ERR_FOUND_DUPP_KEY) ||
68
! (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)))
65
!(table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)))
78
75
/* Create unique_map with all fields used by that index. */
79
boost::dynamic_bitset<> unique_map(table->getShare()->sizeFields()); /* Fields in offended unique. */
80
table->mark_columns_used_by_index_no_reset(keynr, unique_map);
76
unique_map.init(unique_map_buf, table->s->fields);
77
table->mark_columns_used_by_index_no_reset(keynr, &unique_map);
82
79
/* Subtract read_set and write_set. */
83
unique_map-= *table->read_set;
84
unique_map-= *table->write_set;
80
bitmap_subtract(&unique_map, table->read_set);
81
bitmap_subtract(&unique_map, table->write_set);
87
84
If the unique index uses columns that are neither in read_set
88
85
nor in write_set, we must re-read the record.
89
86
Otherwise no need to do anything.
91
if (unique_map.none())
88
if (unique_map.isClearAll())
94
91
/* Get identifier of last read record into table->cursor->ref. */
95
table->cursor->position(table->getInsertRecord());
92
table->cursor->position(table->record[0]);
96
93
/* Add all fields used by unique index to read_set. */
97
*table->read_set|= unique_map;
94
bitmap_union(table->read_set, &unique_map);
98
95
/* Read record that is identified by table->cursor->ref. */
99
(void) table->cursor->rnd_pos(table->getUpdateRecord(), table->cursor->ref);
96
(void) table->cursor->rnd_pos(table->record[1], table->cursor->ref);
100
97
/* Copy the newly read columns into the new record. */
101
for (field_p= table->getFields(); (field= *field_p); field_p++)
103
if (unique_map.test(field->position()))
105
field->copy_from_tmp(table->getShare()->rec_buff_length);
98
for (field_p= table->field; (field= *field_p); field_p++)
99
if (unique_map.isBitSet(field->field_index))
100
field->copy_from_tmp(table->s->rec_buff_length);
132
int update_query(Session *session, TableList *table_list,
125
int mysql_update(Session *session, TableList *table_list,
133
126
List<Item> &fields, List<Item> &values, COND *conds,
134
uint32_t order_num, Order *order,
127
uint32_t order_num, order_st *order,
135
128
ha_rows limit, enum enum_duplicates,
138
131
bool using_limit= limit != HA_POS_ERROR;
139
132
bool used_key_is_modified;
140
133
bool transactional_table;
134
bool can_compare_record;
142
136
uint used_index= MAX_KEY, dup_key_found;
143
137
bool need_sort= true;
144
138
ha_rows updated, found;
145
139
key_map old_covering_keys;
147
141
optimizer::SqlSelect *select= NULL;
149
Select_Lex *select_lex= &session->getLex()->select_lex;
143
Select_Lex *select_lex= &session->lex->select_lex;
151
145
List<Item> all_fields;
152
Session::killed_state_t killed_status= Session::NOT_KILLED;
146
Session::killed_state killed_status= Session::NOT_KILLED;
154
DRIZZLE_UPDATE_START(session->getQueryString()->c_str());
148
DRIZZLE_UPDATE_START(session->query.c_str());
155
149
if (session->openTablesLock(table_list))
157
151
DRIZZLE_UPDATE_DONE(1, 0, 0);
162
156
table= table_list->table;
164
158
/* Calculate "table->covering_keys" based on the WHERE */
165
table->covering_keys= table->getShare()->keys_in_use;
159
table->covering_keys= table->s->keys_in_use;
166
160
table->quick_keys.reset();
168
if (prepare_update(session, table_list, &conds, order_num, order))
170
DRIZZLE_UPDATE_DONE(1, 0, 0);
162
if (mysql_prepare_update(session, table_list, &conds, order_num, order))
174
165
old_covering_keys= table->covering_keys; // Keys used in WHERE
175
166
/* Check the fields we are going to modify */
176
167
if (setup_fields_with_no_wrap(session, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
178
DRIZZLE_UPDATE_DONE(1, 0, 0);
182
169
if (table->timestamp_field)
184
171
// Don't set timestamp column if this is modified
185
172
if (table->timestamp_field->isWriteSet())
187
173
table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
191
176
if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
192
177
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)
194
table->setWriteSet(table->timestamp_field->position());
178
table->setWriteSet(table->timestamp_field->field_index);
199
182
if (setup_fields(session, 0, values, MARK_COLUMNS_READ, 0, 0))
201
184
free_underlaid_joins(session, select_lex);
202
DRIZZLE_UPDATE_DONE(1, 0, 0);
207
188
if (select_lex->inner_refs_list.elements &&
208
189
fix_inner_refs(session, all_fields, select_lex, select_lex->ref_pointer_array))
210
191
DRIZZLE_UPDATE_DONE(1, 0, 0);
228
209
table->timestamp_field &&
229
210
(table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
230
211
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH))
232
*table->read_set|= *table->write_set;
212
bitmap_union(table->read_set, table->write_set);
234
213
// Don't count on usage of 'only index' when calculating which key to use
235
214
table->covering_keys.reset();
249
228
session->main_da.reset_diagnostics_area();
250
229
free_underlaid_joins(session, select_lex);
251
if (error || session->is_error())
253
DRIZZLE_UPDATE_DONE(1, 0, 0);
231
goto abort; // Error in where
256
232
DRIZZLE_UPDATE_DONE(0, 0, 0);
257
233
session->my_ok(); // No matching records
284
260
if (used_index == MAX_KEY) // no index for sort order
285
261
used_index= table->cursor->key_used_on_scan;
286
262
if (used_index != MAX_KEY)
287
used_key_is_modified= is_key_used(table, used_index, *table->write_set);
263
used_key_is_modified= is_key_used(table, used_index, table->write_set);
313
289
NOTE: filesort will call table->prepare_for_position()
315
291
uint32_t length= 0;
316
SortField *sortorder;
292
SORT_FIELD *sortorder;
317
293
ha_rows examined_rows;
318
FileSort filesort(*session);
320
table->sort.io_cache= new internal::IO_CACHE;
295
table->sort.io_cache = new internal::IO_CACHE;
296
memset(table->sort.io_cache, 0, sizeof(internal::IO_CACHE));
322
298
if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) ||
323
(table->sort.found_records= filesort.run(table, sortorder, length,
325
examined_rows)) == HA_POS_ERROR)
299
(table->sort.found_records= filesort(session, table, sortorder, length,
343
321
internal::IO_CACHE tempfile;
344
if (tempfile.open_cached_file(drizzle_tmpdir.c_str(),TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME)))
322
if (open_cached_file(&tempfile, drizzle_tmpdir,TEMP_PREFIX,
323
DISK_BUFFER_SIZE, MYF(MY_WME)))
349
326
/* If quick select is used, initialize it before retrieving rows. */
350
327
if (select && select->quick && select->quick->reset())
365
342
if (used_index == MAX_KEY || (select && select->quick))
367
if ((error= info.init_read_record(session, table, select, 0, true)))
343
init_read_record(&info,session,table,select,0,1);
372
if ((error= info.init_read_record_idx(session, table, 1, used_index)))
345
init_read_record_idx(&info, session, table, 1, used_index);
376
347
session->set_proc_info("Searching rows for update");
377
348
ha_rows tmp_limit= limit;
379
while (not(error= info.read_record(&info)) && not session->getKilled())
350
while (!(error=info.read_record(&info)) && !session->killed)
381
352
if (!(select && select->skip_record()))
383
354
if (table->cursor->was_semi_consistent_read())
384
355
continue; /* repeat the read of the same row if it still exists */
386
table->cursor->position(table->getInsertRecord());
357
table->cursor->position(table->record[0]);
387
358
if (my_b_write(&tempfile,table->cursor->ref,
388
359
table->cursor->ref_length))
400
371
table->cursor->unlock_row();
402
if (session->getKilled() && not error)
373
if (session->killed && !error)
403
374
error= 1; // Aborted
404
375
limit= tmp_limit;
405
376
table->cursor->try_semi_consistent_read(0);
406
info.end_read_record();
377
end_read_record(&info);
408
379
/* Change select to use tempfile */
411
safe_delete(select->quick);
382
delete select->quick;
412
383
if (select->free_cond)
413
384
delete select->cond;
418
select= new optimizer::SqlSelect();
390
select= new optimizer::SqlSelect;
419
391
select->head=table;
421
if (tempfile.reinit_io_cache(internal::READ_CACHE,0L,0,0))
393
if (reinit_io_cache(&tempfile,internal::READ_CACHE,0L,0,0))
423
395
// Read row ptrs from this cursor
424
396
memcpy(select->file, &tempfile, sizeof(tempfile));
435
407
if (select && select->quick && select->quick->reset())
437
409
table->cursor->try_semi_consistent_read(1);
438
if ((error= info.init_read_record(session, table, select, 0, true)))
410
init_read_record(&info,session,table,select,0,1);
443
412
updated= found= 0;
454
423
session->set_proc_info("Updating");
456
425
transactional_table= table->cursor->has_transactions();
457
session->setAbortOnWarning(test(!ignore));
426
session->abort_on_warning= test(!ignore);
460
429
Assure that we can use position()
463
432
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
464
433
table->prepare_for_position();
466
while (not (error=info.read_record(&info)) && not session->getKilled())
436
We can use compare_record() to optimize away updates if
437
the table handler is returning all columns OR if
438
if all updated columns are read
440
can_compare_record= (!(table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)) ||
441
bitmap_is_subset(table->write_set, table->read_set));
443
while (!(error=info.read_record(&info)) && !session->killed)
468
if (not (select && select->skip_record()))
445
if (!(select && select->skip_record()))
470
447
if (table->cursor->was_semi_consistent_read())
471
448
continue; /* repeat the read of the same row if it still exists */
479
if (! table->records_are_comparable() || table->compare_records())
456
if (!can_compare_record || table->compare_record())
481
458
/* Non-batched update */
482
error= table->cursor->updateRecord(table->getUpdateRecord(),
483
table->getInsertRecord());
485
table->auto_increment_field_not_null= false;
459
error= table->cursor->updateRecord(table->record[1],
487
461
if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
489
463
if (error != HA_ERR_RECORD_IS_THE_SAME)
495
469
table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
498
472
If (ignore && error is ignorable) we don't have to
499
473
do anything; otherwise...
504
478
flags|= ME_FATALERROR; /* Other handler errors are fatal */
506
480
prepare_record_for_error_message(error, table);
507
table->print_error(error,MYF(flags));
481
table->print_error(error,MYF(flags));
513
487
if (!--limit && using_limit)
529
503
It's assumed that if an error was set in combination with an effective
530
504
killed status then the error is due to killing.
532
killed_status= session->getKilled(); // get the status of the volatile
506
killed_status= session->killed; // get the status of the volatile
533
507
// simulated killing after the loop must be ineffective for binlogging
534
508
error= (killed_status == Session::NOT_KILLED)? error : 1;
578
552
session->main_da.reset_diagnostics_area();
579
session->my_ok((ulong) session->rowCount(), found, id, buff);
580
session->status_var.updated_row_count+= session->rowCount();
553
session->my_ok((ulong) session->row_count_func, found, id, buff);
582
session->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL; /* calc cuted fields */
583
session->setAbortOnWarning(false);
555
session->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
556
session->abort_on_warning= 0;
584
557
DRIZZLE_UPDATE_DONE((error >= 0 || session->is_error()), found, updated);
585
558
return ((error >= 0 || session->is_error()) ? 1 : 0);
589
table->print_error(error,MYF(0));
592
562
free_underlaid_joins(session, select_lex);
593
563
if (table->key_read)
595
565
table->key_read=0;
596
566
table->cursor->extra(HA_EXTRA_NO_KEYREAD);
598
session->setAbortOnWarning(false);
568
session->abort_on_warning= 0;
600
571
DRIZZLE_UPDATE_DONE(1, 0, 0);
619
bool prepare_update(Session *session, TableList *table_list,
620
Item **conds, uint32_t order_num, Order *order)
590
bool mysql_prepare_update(Session *session, TableList *table_list,
591
Item **conds, uint32_t order_num, order_st *order)
622
593
List<Item> all_fields;
623
Select_Lex *select_lex= &session->getLex()->select_lex;
594
Select_Lex *select_lex= &session->lex->select_lex;
625
session->getLex()->allow_sum_func= 0;
596
session->lex->allow_sum_func= 0;
627
598
if (setup_tables_and_check_access(session, &select_lex->context,
628
599
&select_lex->top_join_list,
640
611
TableList *duplicate;
641
612
if ((duplicate= unique_table(table_list, table_list->next_global)))
643
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
614
my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);