~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to drizzled/sql_update.cc

  • Committer: Monty Taylor
  • Date: 2010-04-27 21:03:13 UTC
  • mto: This revision was merged to the branch mainline in revision 1510.
  • Revision ID: mordred@inaugust.com-20100427210313-6h4uz2553kapi196
Undid the libdrizzled.so.

Show diffs side-by-side

added added

removed removed

Lines of Context:
11
11
 
12
12
   You should have received a copy of the GNU General Public License
13
13
   along with this program; if not, write to the Free Software
14
 
   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA */
 
14
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
15
15
 
16
16
 
17
17
/*
18
18
  Single table and multi table updates of tables.
 
19
  Multi-table updates were introduced by Sinisa & Monty
19
20
*/
20
 
 
21
 
#include <config.h>
22
 
 
23
 
#include <drizzled/sql_select.h>
24
 
#include <drizzled/error.h>
25
 
#include <drizzled/probes.h>
26
 
#include <drizzled/sql_base.h>
27
 
#include <drizzled/field/epoch.h>
28
 
#include <drizzled/sql_parse.h>
29
 
#include <drizzled/optimizer/range.h>
30
 
#include <drizzled/records.h>
31
 
#include <drizzled/internal/my_sys.h>
32
 
#include <drizzled/internal/iocache.h>
33
 
#include <drizzled/transaction_services.h>
34
 
#include <drizzled/filesort.h>
35
 
#include <drizzled/plugin/storage_engine.h>
36
 
 
37
 
#include <boost/dynamic_bitset.hpp>
 
21
#include "config.h"
 
22
#include "drizzled/sql_select.h"
 
23
#include "drizzled/error.h"
 
24
#include "drizzled/probes.h"
 
25
#include "drizzled/sql_base.h"
 
26
#include "drizzled/field/timestamp.h"
 
27
#include "drizzled/sql_parse.h"
 
28
#include "drizzled/optimizer/range.h"
 
29
#include "drizzled/records.h"
 
30
#include "drizzled/internal/my_sys.h"
 
31
#include "drizzled/internal/iocache.h"
 
32
 
38
33
#include <list>
39
34
 
40
35
using namespace std;
47
42
 
48
43
  If we got a duplicate key error, we want to write an error
49
44
  message containing the value of the duplicate key. If we do not have
50
 
  all fields of the key value in getInsertRecord(), we need to re-read the
 
45
  all fields of the key value in record[0], we need to re-read the
51
46
  record with a proper read_set.
52
47
 
53
48
  @param[in] error   error number
56
51
 
57
52
static void prepare_record_for_error_message(int error, Table *table)
58
53
{
59
 
  Field **field_p= NULL;
60
 
  Field *field= NULL;
61
 
  uint32_t keynr= 0;
 
54
  Field **field_p;
 
55
  Field *field;
 
56
  uint32_t keynr;
 
57
  MyBitmap unique_map; /* Fields in offended unique. */
 
58
  my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
62
59
 
63
60
  /*
64
61
    Only duplicate key errors print the key value.
65
62
    If storage engine does always read all columns, we have the value alraedy.
66
63
  */
67
64
  if ((error != HA_ERR_FOUND_DUPP_KEY) ||
68
 
      ! (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)))
 
65
      !(table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)))
69
66
    return;
70
67
 
71
68
  /*
76
73
    return;
77
74
 
78
75
  /* Create unique_map with all fields used by that index. */
79
 
  boost::dynamic_bitset<> unique_map(table->getShare()->sizeFields()); /* Fields in offended unique. */
80
 
  table->mark_columns_used_by_index_no_reset(keynr, unique_map);
 
76
  unique_map.init(unique_map_buf, table->s->fields);
 
77
  table->mark_columns_used_by_index_no_reset(keynr, &unique_map);
81
78
 
82
79
  /* Subtract read_set and write_set. */
83
 
  unique_map-= *table->read_set;
84
 
  unique_map-= *table->write_set;
 
80
  bitmap_subtract(&unique_map, table->read_set);
 
81
  bitmap_subtract(&unique_map, table->write_set);
85
82
 
86
83
  /*
87
84
    If the unique index uses columns that are neither in read_set
88
85
    nor in write_set, we must re-read the record.
89
86
    Otherwise no need to do anything.
90
87
  */
91
 
  if (unique_map.none())
 
88
  if (unique_map.isClearAll())
92
89
    return;
93
90
 
94
91
  /* Get identifier of last read record into table->cursor->ref. */
95
 
  table->cursor->position(table->getInsertRecord());
 
92
  table->cursor->position(table->record[0]);
96
93
  /* Add all fields used by unique index to read_set. */
97
 
  *table->read_set|= unique_map;
 
94
  bitmap_union(table->read_set, &unique_map);
98
95
  /* Read record that is identified by table->cursor->ref. */
99
 
  (void) table->cursor->rnd_pos(table->getUpdateRecord(), table->cursor->ref);
 
96
  (void) table->cursor->rnd_pos(table->record[1], table->cursor->ref);
100
97
  /* Copy the newly read columns into the new record. */
101
 
  for (field_p= table->getFields(); (field= *field_p); field_p++)
102
 
  {
103
 
    if (unique_map.test(field->position()))
104
 
    {
105
 
      field->copy_from_tmp(table->getShare()->rec_buff_length);
106
 
    }
107
 
  }
 
98
  for (field_p= table->field; (field= *field_p); field_p++)
 
99
    if (unique_map.isBitSet(field->field_index))
 
100
      field->copy_from_tmp(table->s->rec_buff_length);
108
101
 
109
102
  return;
110
103
}
114
107
  Process usual UPDATE
115
108
 
116
109
  SYNOPSIS
117
 
    update_query()
 
110
    mysql_update()
118
111
    session                     thread handler
119
112
    fields              fields for update
120
113
    values              values of fields for update
129
122
    1  - error
130
123
*/
131
124
 
132
 
int update_query(Session *session, TableList *table_list,
 
125
int mysql_update(Session *session, TableList *table_list,
133
126
                 List<Item> &fields, List<Item> &values, COND *conds,
134
 
                 uint32_t order_num, Order *order,
 
127
                 uint32_t order_num, order_st *order,
135
128
                 ha_rows limit, enum enum_duplicates,
136
129
                 bool ignore)
137
130
{
138
131
  bool          using_limit= limit != HA_POS_ERROR;
139
132
  bool          used_key_is_modified;
140
133
  bool          transactional_table;
141
 
  int           error= 0;
 
134
  bool          can_compare_record;
 
135
  int           error;
142
136
  uint          used_index= MAX_KEY, dup_key_found;
143
137
  bool          need_sort= true;
144
138
  ha_rows       updated, found;
145
139
  key_map       old_covering_keys;
146
140
  Table         *table;
147
141
  optimizer::SqlSelect *select= NULL;
148
 
  ReadRecord    info;
149
 
  Select_Lex    *select_lex= &session->getLex()->select_lex;
 
142
  READ_RECORD   info;
 
143
  Select_Lex    *select_lex= &session->lex->select_lex;
150
144
  uint64_t     id;
151
145
  List<Item> all_fields;
152
 
  Session::killed_state_t killed_status= Session::NOT_KILLED;
 
146
  Session::killed_state killed_status= Session::NOT_KILLED;
153
147
 
154
 
  DRIZZLE_UPDATE_START(session->getQueryString()->c_str());
 
148
  DRIZZLE_UPDATE_START(session->query.c_str());
155
149
  if (session->openTablesLock(table_list))
156
150
  {
157
151
    DRIZZLE_UPDATE_DONE(1, 0, 0);
162
156
  table= table_list->table;
163
157
 
164
158
  /* Calculate "table->covering_keys" based on the WHERE */
165
 
  table->covering_keys= table->getShare()->keys_in_use;
 
159
  table->covering_keys= table->s->keys_in_use;
166
160
  table->quick_keys.reset();
167
161
 
168
 
  if (prepare_update(session, table_list, &conds, order_num, order))
169
 
  {
170
 
    DRIZZLE_UPDATE_DONE(1, 0, 0);
171
 
    return 1;
172
 
  }
 
162
  if (mysql_prepare_update(session, table_list, &conds, order_num, order))
 
163
    goto abort;
173
164
 
174
165
  old_covering_keys= table->covering_keys;              // Keys used in WHERE
175
166
  /* Check the fields we are going to modify */
176
167
  if (setup_fields_with_no_wrap(session, 0, fields, MARK_COLUMNS_WRITE, 0, 0))
177
 
  {
178
 
    DRIZZLE_UPDATE_DONE(1, 0, 0);
179
 
    return 1;
180
 
  }
181
 
 
 
168
    goto abort;
182
169
  if (table->timestamp_field)
183
170
  {
184
171
    // Don't set timestamp column if this is modified
185
172
    if (table->timestamp_field->isWriteSet())
186
 
    {
187
173
      table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET;
188
 
    }
189
174
    else
190
175
    {
191
176
      if (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
192
177
          table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)
193
 
      {
194
 
        table->setWriteSet(table->timestamp_field->position());
195
 
      }
 
178
        table->setWriteSet(table->timestamp_field->field_index);
196
179
    }
197
180
  }
198
181
 
199
182
  if (setup_fields(session, 0, values, MARK_COLUMNS_READ, 0, 0))
200
183
  {
201
184
    free_underlaid_joins(session, select_lex);
202
 
    DRIZZLE_UPDATE_DONE(1, 0, 0);
203
 
 
204
 
    return 1;
 
185
    goto abort;
205
186
  }
206
187
 
207
188
  if (select_lex->inner_refs_list.elements &&
208
189
    fix_inner_refs(session, all_fields, select_lex, select_lex->ref_pointer_array))
209
190
  {
210
191
    DRIZZLE_UPDATE_DONE(1, 0, 0);
211
 
    return 1;
 
192
    return -1;
212
193
  }
213
194
 
214
195
  if (conds)
228
209
      table->timestamp_field &&
229
210
      (table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_UPDATE ||
230
211
       table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH))
231
 
  {
232
 
    *table->read_set|= *table->write_set;
233
 
  }
 
212
    bitmap_union(table->read_set, table->write_set);
234
213
  // Don't count on usage of 'only index' when calculating which key to use
235
214
  table->covering_keys.reset();
236
215
 
248
227
     */
249
228
    session->main_da.reset_diagnostics_area();
250
229
    free_underlaid_joins(session, select_lex);
251
 
    if (error || session->is_error())
252
 
    {
253
 
      DRIZZLE_UPDATE_DONE(1, 0, 0);
254
 
      return 1;
255
 
    }
 
230
    if (error)
 
231
      goto abort;                               // Error in where
256
232
    DRIZZLE_UPDATE_DONE(0, 0, 0);
257
233
    session->my_ok();                           // No matching records
258
234
    return 0;
276
252
  {
277
253
    used_index= select->quick->index;
278
254
    used_key_is_modified= (!select->quick->unique_key_range() &&
279
 
                          select->quick->is_keys_used(*table->write_set));
 
255
                          select->quick->is_keys_used(table->write_set));
280
256
  }
281
257
  else
282
258
  {
284
260
    if (used_index == MAX_KEY)                  // no index for sort order
285
261
      used_index= table->cursor->key_used_on_scan;
286
262
    if (used_index != MAX_KEY)
287
 
      used_key_is_modified= is_key_used(table, used_index, *table->write_set);
 
263
      used_key_is_modified= is_key_used(table, used_index, table->write_set);
288
264
  }
289
265
 
290
266
 
313
289
        NOTE: filesort will call table->prepare_for_position()
314
290
      */
315
291
      uint32_t         length= 0;
316
 
      SortField  *sortorder;
 
292
      SORT_FIELD  *sortorder;
317
293
      ha_rows examined_rows;
318
 
      FileSort filesort(*session);
319
294
 
320
 
      table->sort.io_cache= new internal::IO_CACHE;
 
295
      table->sort.io_cache = new internal::IO_CACHE;
 
296
      memset(table->sort.io_cache, 0, sizeof(internal::IO_CACHE));
321
297
 
322
298
      if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) ||
323
 
          (table->sort.found_records= filesort.run(table, sortorder, length,
324
 
                                                   select, limit, 1,
325
 
                                                   examined_rows)) == HA_POS_ERROR)
 
299
          (table->sort.found_records= filesort(session, table, sortorder, length,
 
300
                                               select, limit, 1,
 
301
                                               &examined_rows))
 
302
          == HA_POS_ERROR)
326
303
      {
327
304
        goto err;
328
305
      }
330
307
        Filesort has already found and selected the rows we want to update,
331
308
        so we don't need the where clause
332
309
      */
333
 
      safe_delete(select);
 
310
      delete select;
 
311
      select= 0;
334
312
    }
335
313
    else
336
314
    {
341
319
      */
342
320
 
343
321
      internal::IO_CACHE tempfile;
344
 
      if (tempfile.open_cached_file(drizzle_tmpdir.c_str(),TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME)))
345
 
      {
 
322
      if (open_cached_file(&tempfile, drizzle_tmpdir,TEMP_PREFIX,
 
323
                           DISK_BUFFER_SIZE, MYF(MY_WME)))
346
324
        goto err;
347
 
      }
348
325
 
349
326
      /* If quick select is used, initialize it before retrieving rows. */
350
327
      if (select && select->quick && select->quick->reset())
363
340
      */
364
341
 
365
342
      if (used_index == MAX_KEY || (select && select->quick))
366
 
      {
367
 
        if ((error= info.init_read_record(session, table, select, 0, true)))
368
 
          goto err;
369
 
      }
 
343
        init_read_record(&info,session,table,select,0,1);
370
344
      else
371
 
      {
372
 
        if ((error= info.init_read_record_idx(session, table, 1, used_index)))
373
 
          goto err;
374
 
      }
 
345
        init_read_record_idx(&info, session, table, 1, used_index);
375
346
 
376
347
      session->set_proc_info("Searching rows for update");
377
348
      ha_rows tmp_limit= limit;
378
349
 
379
 
      while (not(error= info.read_record(&info)) && not session->getKilled())
 
350
      while (!(error=info.read_record(&info)) && !session->killed)
380
351
      {
381
352
        if (!(select && select->skip_record()))
382
353
        {
383
354
          if (table->cursor->was_semi_consistent_read())
384
355
            continue;  /* repeat the read of the same row if it still exists */
385
356
 
386
 
          table->cursor->position(table->getInsertRecord());
 
357
          table->cursor->position(table->record[0]);
387
358
          if (my_b_write(&tempfile,table->cursor->ref,
388
359
                         table->cursor->ref_length))
389
360
          {
399
370
        else
400
371
          table->cursor->unlock_row();
401
372
      }
402
 
      if (session->getKilled() && not error)
 
373
      if (session->killed && !error)
403
374
        error= 1;                               // Aborted
404
375
      limit= tmp_limit;
405
376
      table->cursor->try_semi_consistent_read(0);
406
 
      info.end_read_record();
 
377
      end_read_record(&info);
407
378
 
408
379
      /* Change select to use tempfile */
409
380
      if (select)
410
381
      {
411
 
        safe_delete(select->quick);
 
382
        delete select->quick;
412
383
        if (select->free_cond)
413
384
          delete select->cond;
 
385
        select->quick=0;
414
386
        select->cond=0;
415
387
      }
416
388
      else
417
389
      {
418
 
        select= new optimizer::SqlSelect();
 
390
        select= new optimizer::SqlSelect;
419
391
        select->head=table;
420
392
      }
421
 
      if (tempfile.reinit_io_cache(internal::READ_CACHE,0L,0,0))
 
393
      if (reinit_io_cache(&tempfile,internal::READ_CACHE,0L,0,0))
422
394
        error=1;
423
395
      // Read row ptrs from this cursor
424
396
      memcpy(select->file, &tempfile, sizeof(tempfile));
435
407
  if (select && select->quick && select->quick->reset())
436
408
    goto err;
437
409
  table->cursor->try_semi_consistent_read(1);
438
 
  if ((error= info.init_read_record(session, table, select, 0, true)))
439
 
  {
440
 
    goto err;
441
 
  }
 
410
  init_read_record(&info,session,table,select,0,1);
442
411
 
443
412
  updated= found= 0;
444
413
  /*
454
423
  session->set_proc_info("Updating");
455
424
 
456
425
  transactional_table= table->cursor->has_transactions();
457
 
  session->setAbortOnWarning(test(!ignore));
 
426
  session->abort_on_warning= test(!ignore);
458
427
 
459
428
  /*
460
429
    Assure that we can use position()
463
432
  if (table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ))
464
433
    table->prepare_for_position();
465
434
 
466
 
  while (not (error=info.read_record(&info)) && not session->getKilled())
 
435
  /*
 
436
    We can use compare_record() to optimize away updates if
 
437
    the table handler is returning all columns OR if
 
438
    if all updated columns are read
 
439
  */
 
440
  can_compare_record= (!(table->cursor->getEngine()->check_flag(HTON_BIT_PARTIAL_COLUMN_READ)) ||
 
441
                       bitmap_is_subset(table->write_set, table->read_set));
 
442
 
 
443
  while (!(error=info.read_record(&info)) && !session->killed)
467
444
  {
468
 
    if (not (select && select->skip_record()))
 
445
    if (!(select && select->skip_record()))
469
446
    {
470
447
      if (table->cursor->was_semi_consistent_read())
471
448
        continue;  /* repeat the read of the same row if it still exists */
476
453
 
477
454
      found++;
478
455
 
479
 
      if (! table->records_are_comparable() || table->compare_records())
 
456
      if (!can_compare_record || table->compare_record())
480
457
      {
481
458
        /* Non-batched update */
482
 
        error= table->cursor->updateRecord(table->getUpdateRecord(),
483
 
                                            table->getInsertRecord());
484
 
 
485
 
        table->auto_increment_field_not_null= false;
486
 
 
 
459
        error= table->cursor->updateRecord(table->record[1],
 
460
                                            table->record[0]);
487
461
        if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
488
 
        {
 
462
        {
489
463
          if (error != HA_ERR_RECORD_IS_THE_SAME)
490
464
            updated++;
491
465
          else
492
466
            error= 0;
493
 
        }
494
 
        else if (! ignore ||
 
467
        }
 
468
        else if (! ignore ||
495
469
                 table->cursor->is_fatal_error(error, HA_CHECK_DUP_KEY))
496
 
        {
 
470
        {
497
471
          /*
498
472
            If (ignore && error is ignorable) we don't have to
499
473
            do anything; otherwise...
504
478
            flags|= ME_FATALERROR; /* Other handler errors are fatal */
505
479
 
506
480
          prepare_record_for_error_message(error, table);
507
 
          table->print_error(error,MYF(flags));
508
 
          error= 1;
509
 
          break;
510
 
        }
 
481
          table->print_error(error,MYF(flags));
 
482
          error= 1;
 
483
          break;
 
484
        }
511
485
      }
512
486
 
513
487
      if (!--limit && using_limit)
529
503
    It's assumed that if an error was set in combination with an effective
530
504
    killed status then the error is due to killing.
531
505
  */
532
 
  killed_status= session->getKilled(); // get the status of the volatile
 
506
  killed_status= session->killed; // get the status of the volatile
533
507
  // simulated killing after the loop must be ineffective for binlogging
534
508
  error= (killed_status == Session::NOT_KILLED)?  error : 1;
535
509
 
539
513
  if (!transactional_table && updated > 0)
540
514
    session->transaction.stmt.markModifiedNonTransData();
541
515
 
542
 
  info.end_read_record();
 
516
  end_read_record(&info);
543
517
  delete select;
544
518
  session->set_proc_info("end");
545
519
  table->cursor->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
576
550
     * lp bug# 439719
577
551
     */
578
552
    session->main_da.reset_diagnostics_area();
579
 
    session->my_ok((ulong) session->rowCount(), found, id, buff);
580
 
    session->status_var.updated_row_count+= session->rowCount();
 
553
    session->my_ok((ulong) session->row_count_func, found, id, buff);
581
554
  }
582
 
  session->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL;              /* calc cuted fields */
583
 
  session->setAbortOnWarning(false);
 
555
  session->count_cuted_fields= CHECK_FIELD_IGNORE;              /* calc cuted fields */
 
556
  session->abort_on_warning= 0;
584
557
  DRIZZLE_UPDATE_DONE((error >= 0 || session->is_error()), found, updated);
585
558
  return ((error >= 0 || session->is_error()) ? 1 : 0);
586
559
 
587
560
err:
588
 
  if (error != 0)
589
 
    table->print_error(error,MYF(0));
590
 
 
591
561
  delete select;
592
562
  free_underlaid_joins(session, select_lex);
593
563
  if (table->key_read)
595
565
    table->key_read=0;
596
566
    table->cursor->extra(HA_EXTRA_NO_KEYREAD);
597
567
  }
598
 
  session->setAbortOnWarning(false);
 
568
  session->abort_on_warning= 0;
599
569
 
 
570
abort:
600
571
  DRIZZLE_UPDATE_DONE(1, 0, 0);
601
572
  return 1;
602
573
}
605
576
  Prepare items in UPDATE statement
606
577
 
607
578
  SYNOPSIS
608
 
    prepare_update()
 
579
    mysql_prepare_update()
609
580
    session                     - thread handler
610
581
    table_list          - global/local table list
611
582
    conds               - conditions
616
587
    false OK
617
588
    true  error
618
589
*/
619
 
bool prepare_update(Session *session, TableList *table_list,
620
 
                         Item **conds, uint32_t order_num, Order *order)
 
590
bool mysql_prepare_update(Session *session, TableList *table_list,
 
591
                         Item **conds, uint32_t order_num, order_st *order)
621
592
{
622
593
  List<Item> all_fields;
623
 
  Select_Lex *select_lex= &session->getLex()->select_lex;
 
594
  Select_Lex *select_lex= &session->lex->select_lex;
624
595
 
625
 
  session->getLex()->allow_sum_func= 0;
 
596
  session->lex->allow_sum_func= 0;
626
597
 
627
598
  if (setup_tables_and_check_access(session, &select_lex->context,
628
599
                                    &select_lex->top_join_list,
640
611
    TableList *duplicate;
641
612
    if ((duplicate= unique_table(table_list, table_list->next_global)))
642
613
    {
643
 
      my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->getTableName());
 
614
      my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name);
644
615
      return true;
645
616
    }
646
617
  }