30
30
#include "drizzled/internal/my_sys.h"
31
31
#include "drizzled/internal/iocache.h"
32
32
#include "drizzled/transaction_services.h"
33
#include "drizzled/filesort.h"
35
34
#include <boost/dynamic_bitset.hpp>
98
97
/* Copy the newly read columns into the new record. */
99
98
for (field_p= table->getFields(); (field= *field_p); field_p++)
101
if (unique_map.test(field->position()))
100
if (unique_map.test(field->field_index))
103
102
field->copy_from_tmp(table->getShare()->rec_buff_length);
136
135
bool using_limit= limit != HA_POS_ERROR;
137
136
bool used_key_is_modified;
138
137
bool transactional_table;
138
bool can_compare_record;
140
140
uint used_index= MAX_KEY, dup_key_found;
141
141
bool need_sort= true;
147
147
Select_Lex *select_lex= &session->lex->select_lex;
149
149
List<Item> all_fields;
150
Session::killed_state_t killed_status= Session::NOT_KILLED;
150
Session::killed_state killed_status= Session::NOT_KILLED;
152
DRIZZLE_UPDATE_START(session->getQueryString()->c_str());
152
DRIZZLE_UPDATE_START(session->query.c_str());
153
153
if (session->openTablesLock(table_list))
155
155
DRIZZLE_UPDATE_DONE(1, 0, 0);
189
189
if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
190
190
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)
192
table->setWriteSet(table->timestamp_field->position());
192
table->setWriteSet(table->timestamp_field->field_index);
313
313
uint32_t length= 0;
314
314
SortField *sortorder;
315
315
ha_rows examined_rows;
316
FileSort filesort(*session);
318
table->sort.io_cache= new internal::IO_CACHE;
317
table->sort.io_cache = new internal::IO_CACHE;
320
319
if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) ||
321
(table->sort.found_records= filesort.run(table, sortorder, length,
323
examined_rows)) == HA_POS_ERROR)
320
(table->sort.found_records= filesort(session, table, sortorder, length,
342
342
internal::IO_CACHE tempfile;
343
if (tempfile.open_cached_file(drizzle_tmpdir.c_str(),TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME)))
343
if (open_cached_file(&tempfile, drizzle_tmpdir.c_str(),TEMP_PREFIX,
344
DISK_BUFFER_SIZE, MYF(MY_WME)))
348
347
/* If quick select is used, initialize it before retrieving rows. */
349
348
if (select && select->quick && select->quick->reset())
373
372
session->set_proc_info("Searching rows for update");
374
373
ha_rows tmp_limit= limit;
376
while (not(error= info.read_record(&info)) && not session->getKilled())
375
while (!(error=info.read_record(&info)) && !session->killed)
378
377
if (!(select && select->skip_record()))
416
415
select= new optimizer::SqlSelect;
417
416
select->head=table;
419
if (tempfile.reinit_io_cache(internal::READ_CACHE,0L,0,0))
418
if (reinit_io_cache(&tempfile,internal::READ_CACHE,0L,0,0))
421
420
// Read row ptrs from this cursor
422
421
memcpy(select->file, &tempfile, sizeof(tempfile));
458
457
if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
459
458
table->prepare_for_position();
461
while (not (error=info.read_record(&info)) && not session->getKilled())
461
We can use compare_record() to optimize away updates if
462
the table handler is returning all columns OR if
463
if all updated columns are read
465
can_compare_record= (! (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)) ||
466
table->write_set->is_subset_of(*table->read_set));
468
while (! (error=info.read_record(&info)) && !session->killed)
463
if (not (select && select->skip_record()))
470
if (! (select && select->skip_record()))
465
472
if (table->cursor->was_semi_consistent_read())
466
473
continue; /* repeat the read of the same row if it still exists */
468
475
table->storeRecord();
469
476
if (fill_record(session, fields, values))
479
* If we updated some rows before this one failed (updated > 0),
480
* then we will need to undo adding those records to the
481
* replication Statement message.
485
TransactionServices &ts= TransactionServices::singleton();
486
ts.removeStatementRecords(session, updated);
474
if (! table->records_are_comparable() || table->compare_records())
494
if (!can_compare_record || table->compare_record())
476
496
/* Non-batched update */
477
497
error= table->cursor->updateRecord(table->getUpdateRecord(),
524
544
It's assumed that if an error was set in combination with an effective
525
545
killed status then the error is due to killing.
527
killed_status= session->getKilled(); // get the status of the volatile
547
killed_status= session->killed; // get the status of the volatile
528
548
// simulated killing after the loop must be ineffective for binlogging
529
549
error= (killed_status == Session::NOT_KILLED)? error : 1;