~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to drizzled/cursor.cc

  • Committer: Barry.Leslie at PrimeBase
  • Date: 2010-07-14 22:58:28 UTC
  • mto: This revision was merged to the branch mainline in revision 1662.
  • Revision ID: barry.leslie@primebase.com-20100714225828-lckjqjp4xre0n5ec
Fixed merger problem in plugin/pbms/plugin.am.

Show diffs side-by-side

added added

removed removed

Lines of Context:
54
54
** General Cursor functions
55
55
****************************************************************************/
56
56
Cursor::Cursor(plugin::StorageEngine &engine_arg,
57
 
               Table &arg)
58
 
  : table(arg),
59
 
    engine(engine_arg),
60
 
    estimation_rows_to_insert(0),
 
57
               TableShare &share_arg)
 
58
  : table_share(&share_arg), table(0),
 
59
    estimation_rows_to_insert(0), engine(&engine_arg),
61
60
    ref(0),
62
61
    key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
63
62
    ref_length(sizeof(internal::my_off_t)),
80
79
 */
81
80
Cursor *Cursor::clone(memory::Root *mem_root)
82
81
{
83
 
  Cursor *new_handler= getTable()->getMutableShare()->db_type()->getCursor(*getTable());
 
82
  Cursor *new_handler= table->getMutableShare()->db_type()->getCursor(*table->getMutableShare(), mem_root);
84
83
 
85
84
  /*
86
85
    Allocate Cursor->ref here because otherwise ha_open will allocate it
90
89
  if (!(new_handler->ref= (unsigned char*) mem_root->alloc_root(ALIGN_SIZE(ref_length)*2)))
91
90
    return NULL;
92
91
 
93
 
  TableIdentifier identifier(getTable()->getShare()->getSchemaName(),
94
 
                             getTable()->getShare()->getTableName(),
95
 
                             getTable()->getShare()->getType());
 
92
  TableIdentifier identifier(table->getShare()->getSchemaName(),
 
93
                             table->getShare()->getTableName(),
 
94
                             table->getShare()->getType());
96
95
 
97
96
  if (new_handler && !new_handler->ha_open(identifier,
98
 
                                           getTable()->getDBStat(),
 
97
                                           table,
 
98
                                           table->getMutableShare()->getNormalizedPath(),
 
99
                                           table->getDBStat(),
99
100
                                           HA_OPEN_IGNORE_IF_LOCKED))
100
101
    return new_handler;
101
102
  return NULL;
111
112
  /* works only with key prefixes */
112
113
  assert(((keypart_map_arg + 1) & keypart_map_arg) == 0);
113
114
 
114
 
  const KeyPartInfo *key_part_found= getTable()->getShare()->getKeyInfo(key_position).key_part;
115
 
  const KeyPartInfo *end_key_part_found= key_part_found + getTable()->getShare()->getKeyInfo(key_position).key_parts;
 
115
  const KeyPartInfo *key_part_found= table->getShare()->getKeyInfo(key_position).key_part;
 
116
  const KeyPartInfo *end_key_part_found= key_part_found + table->getShare()->getKeyInfo(key_position).key_parts;
116
117
  uint32_t length= 0;
117
118
 
118
119
  while (key_part_found < end_key_part_found && keypart_map_arg)
175
176
  return end_bulk_insert();
176
177
}
177
178
 
 
179
void Cursor::change_table_ptr(Table *table_arg, TableShare *share)
 
180
{
 
181
  table= table_arg;
 
182
  table_share= share;
 
183
}
 
184
 
178
185
const key_map *Cursor::keys_to_use_for_scanning()
179
186
{
180
187
  return &key_map_empty;
182
189
 
183
190
bool Cursor::has_transactions()
184
191
{
185
 
  return (getTable()->getShare()->db_type()->check_flag(HTON_BIT_DOES_TRANSACTIONS));
 
192
  return (table->getShare()->db_type()->check_flag(HTON_BIT_DOES_TRANSACTIONS));
186
193
}
187
194
 
188
195
void Cursor::ha_statistic_increment(uint64_t system_status_var::*offset) const
189
196
{
190
 
  (getTable()->in_use->status_var.*offset)++;
 
197
  status_var_increment(table->in_use->status_var.*offset);
191
198
}
192
199
 
193
200
void **Cursor::ha_data(Session *session) const
194
201
{
195
 
  return session->getEngineData(getEngine());
 
202
  return session->getEngineData(engine);
196
203
}
197
204
 
198
205
bool Cursor::is_fatal_error(int error, uint32_t flags)
208
215
 
209
216
ha_rows Cursor::records() { return stats.records; }
210
217
uint64_t Cursor::tableSize() { return stats.index_file_length + stats.data_file_length; }
211
 
uint64_t Cursor::rowSize() { return getTable()->getRecordLength() + getTable()->sizeFields(); }
 
218
uint64_t Cursor::rowSize() { return table->getRecordLength() + table->sizeFields(); }
212
219
 
213
220
int Cursor::doOpen(const TableIdentifier &identifier, int mode, uint32_t test_if_locked)
214
221
{
222
229
  Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
223
230
*/
224
231
int Cursor::ha_open(const TableIdentifier &identifier,
225
 
                    int mode,
 
232
                    Table *table_arg, const char *name, int mode,
226
233
                    int test_if_locked)
227
234
{
228
235
  int error;
229
236
 
 
237
  table= table_arg;
 
238
  assert(table->getShare() == table_share);
 
239
 
 
240
  assert(identifier.getPath().compare(name) == 0);
230
241
  if ((error= doOpen(identifier, mode, test_if_locked)))
231
242
  {
232
243
    if ((error == EACCES || error == EROFS) && mode == O_RDWR &&
233
 
        (getTable()->db_stat & HA_TRY_READ_ONLY))
 
244
        (table->db_stat & HA_TRY_READ_ONLY))
234
245
    {
235
 
      getTable()->db_stat|=HA_READ_ONLY;
 
246
      table->db_stat|=HA_READ_ONLY;
236
247
      error= doOpen(identifier, O_RDONLY,test_if_locked);
237
248
    }
238
249
  }
242
253
  }
243
254
  else
244
255
  {
245
 
    if (getTable()->getShare()->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
246
 
      getTable()->db_stat|=HA_READ_ONLY;
 
256
    if (table->getShare()->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
 
257
      table->db_stat|=HA_READ_ONLY;
247
258
    (void) extra(HA_EXTRA_NO_READCHECK);        // Not needed in SQL
248
259
 
249
260
    /* ref is already allocated for us if we're called from Cursor::clone() */
250
 
    if (!ref && !(ref= (unsigned char*) getTable()->alloc_root(ALIGN_SIZE(ref_length)*2)))
 
261
    if (!ref && !(ref= (unsigned char*) table->alloc_root(ALIGN_SIZE(ref_length)*2)))
251
262
    {
252
263
      close();
253
264
      error=HA_ERR_OUT_OF_MEM;
276
287
    TODO remove the test for HA_READ_ORDER
277
288
  */
278
289
  if (stats.deleted < 10 || primary_key >= MAX_KEY ||
279
 
      !(getTable()->index_flags(primary_key) & HA_READ_ORDER))
 
290
      !(table->index_flags(primary_key) & HA_READ_ORDER))
280
291
  {
281
292
    (void) startTableScan(1);
282
293
    while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
304
315
  @verbatim 1,5,15,25,35,... @endverbatim
305
316
*/
306
317
inline uint64_t
307
 
compute_next_insert_id(uint64_t nr, drizzle_system_variables *variables)
 
318
compute_next_insert_id(uint64_t nr,struct system_variables *variables)
308
319
{
309
320
  if (variables->auto_increment_increment == 1)
310
321
    return (nr+1); // optimization of the formula below
324
335
    Session::next_insert_id to be greater than the explicit value.
325
336
  */
326
337
  if ((next_insert_id > 0) && (nr >= next_insert_id))
327
 
    set_next_insert_id(compute_next_insert_id(nr, &getTable()->in_use->variables));
 
338
    set_next_insert_id(compute_next_insert_id(nr, &table->in_use->variables));
328
339
}
329
340
 
330
341
 
344
355
    The number X if it exists, "nr" otherwise.
345
356
*/
346
357
inline uint64_t
347
 
prev_insert_id(uint64_t nr, drizzle_system_variables *variables)
 
358
prev_insert_id(uint64_t nr, struct system_variables *variables)
348
359
{
349
360
  if (unlikely(nr < variables->auto_increment_offset))
350
361
  {
441
452
{
442
453
  uint64_t nr, nb_reserved_values;
443
454
  bool append= false;
444
 
  Session *session= getTable()->in_use;
445
 
  drizzle_system_variables *variables= &session->variables;
 
455
  Session *session= table->in_use;
 
456
  struct system_variables *variables= &session->variables;
446
457
 
447
458
  /*
448
459
    next_insert_id is a "cursor" into the reserved interval, it may go greater
454
465
     for an auto increment column, not a magic value like NULL is.
455
466
     same as sql_mode=NO_AUTO_VALUE_ON_ZERO */
456
467
 
457
 
  if ((nr= getTable()->next_number_field->val_int()) != 0
458
 
      || getTable()->auto_increment_field_not_null)
 
468
  if ((nr= table->next_number_field->val_int()) != 0
 
469
      || table->auto_increment_field_not_null)
459
470
  {
460
471
    /*
461
472
      Update next_insert_id if we had already generated a value in this
533
544
      nr= compute_next_insert_id(nr-1, variables);
534
545
    }
535
546
 
536
 
    if (getTable()->getShare()->next_number_keypart == 0)
 
547
    if (table->getShare()->next_number_keypart == 0)
537
548
    {
538
549
      /* We must defer the appending until "nr" has been possibly truncated */
539
550
      append= true;
540
551
    }
541
552
  }
542
553
 
543
 
  if (unlikely(getTable()->next_number_field->store((int64_t) nr, true)))
 
554
  if (unlikely(table->next_number_field->store((int64_t) nr, true)))
544
555
  {
545
556
    /*
546
557
      first test if the query was aborted due to strict mode constraints
547
558
    */
548
 
    if (session->getKilled() == Session::KILL_BAD_DATA)
 
559
    if (session->killed == Session::KILL_BAD_DATA)
549
560
      return HA_ERR_AUTOINC_ERANGE;
550
561
 
551
562
    /*
556
567
      bother shifting the right bound (anyway any other value from this
557
568
      interval will cause a duplicate key).
558
569
    */
559
 
    nr= prev_insert_id(getTable()->next_number_field->val_int(), variables);
560
 
    if (unlikely(getTable()->next_number_field->store((int64_t) nr, true)))
561
 
      nr= getTable()->next_number_field->val_int();
 
570
    nr= prev_insert_id(table->next_number_field->val_int(), variables);
 
571
    if (unlikely(table->next_number_field->store((int64_t) nr, true)))
 
572
      nr= table->next_number_field->val_int();
562
573
  }
563
574
  if (append)
564
575
  {
612
623
      this statement used forced auto_increment values if there were some,
613
624
      wipe them away for other statements.
614
625
    */
615
 
    getTable()->in_use->auto_inc_intervals_forced.empty();
 
626
    table->in_use->auto_inc_intervals_forced.empty();
616
627
  }
617
628
}
618
629
 
658
669
   * possible resource to gain (and if there is... then there is a bug such
659
670
   * that in_use should have been set.
660
671
 */
661
 
  if (not getTable()->in_use)
 
672
  if (not table || not table->in_use)
662
673
    return;
663
674
 
664
 
  resource_context= getTable()->in_use->getResourceContext(getEngine());
 
675
  resource_context= table->in_use->getResourceContext(engine);
665
676
  /*
666
677
    When a storage engine method is called, the transaction must
667
678
    have been started, unless it's a DDL call, for which the
702
713
     * @todo Make TransactionServices generic to AfterTriggerServices
703
714
     * or similar...
704
715
     */
705
 
    Session *const session= getTable()->in_use;
 
716
    Session *const session= table->in_use;
706
717
    TransactionServices &transaction_services= TransactionServices::singleton();
707
 
    transaction_services.truncateTable(session, getTable());
 
718
    transaction_services.truncateTable(session, table);
708
719
  }
709
720
 
710
721
  return result;
803
814
  int error;
804
815
  if (!(error=index_next(buf)))
805
816
  {
806
 
    ptrdiff_t ptrdiff= buf - getTable()->getInsertRecord();
 
817
    ptrdiff_t ptrdiff= buf - table->record[0];
807
818
    unsigned char *save_record_0= NULL;
808
819
    KeyInfo *key_info= NULL;
809
820
    KeyPartInfo *key_part;
810
821
    KeyPartInfo *key_part_end= NULL;
811
822
 
812
823
    /*
813
 
      key_cmp_if_same() compares table->getInsertRecord() against 'key'.
814
 
      In parts it uses table->getInsertRecord() directly, in parts it uses
815
 
      field objects with their local pointers into table->getInsertRecord().
816
 
      If 'buf' is distinct from table->getInsertRecord(), we need to move
817
 
      all record references. This is table->getInsertRecord() itself and
 
824
      key_cmp_if_same() compares table->record[0] against 'key'.
 
825
      In parts it uses table->record[0] directly, in parts it uses
 
826
      field objects with their local pointers into table->record[0].
 
827
      If 'buf' is distinct from table->record[0], we need to move
 
828
      all record references. This is table->record[0] itself and
818
829
      the field pointers of the fields used in this key.
819
830
    */
820
831
    if (ptrdiff)
821
832
    {
822
 
      save_record_0= getTable()->getInsertRecord();
823
 
      getTable()->record[0]= buf;
824
 
      key_info= getTable()->key_info + active_index;
 
833
      save_record_0= table->record[0];
 
834
      table->record[0]= buf;
 
835
      key_info= table->key_info + active_index;
825
836
      key_part= key_info->key_part;
826
837
      key_part_end= key_part + key_info->key_parts;
827
838
      for (; key_part < key_part_end; key_part++)
831
842
      }
832
843
    }
833
844
 
834
 
    if (key_cmp_if_same(getTable(), key, active_index, keylen))
 
845
    if (key_cmp_if_same(table, key, active_index, keylen))
835
846
    {
836
 
      getTable()->status=STATUS_NOT_FOUND;
 
847
      table->status=STATUS_NOT_FOUND;
837
848
      error=HA_ERR_END_OF_FILE;
838
849
    }
839
850
 
840
851
    /* Move back if necessary. */
841
852
    if (ptrdiff)
842
853
    {
843
 
      getTable()->record[0]= save_record_0;
 
854
      table->record[0]= save_record_0;
844
855
      for (key_part= key_info->key_part; key_part < key_part_end; key_part++)
845
856
        key_part->field->move_field_offset(-ptrdiff);
846
857
    }
877
888
double Cursor::index_only_read_time(uint32_t keynr, double key_records)
878
889
{
879
890
  uint32_t keys_per_block= (stats.block_size/2/
880
 
                        (getTable()->key_info[keynr].key_length + ref_length) + 1);
 
891
                        (table->key_info[keynr].key_length + ref_length) + 1);
881
892
  return ((double) (key_records + keys_per_block-1) /
882
893
          (double) keys_per_block);
883
894
}
906
917
 
907
918
  @note
908
919
    This method (or an overriding one in a derived class) must check for
909
 
    session->getKilled() and return HA_POS_ERROR if it is not zero. This is required
 
920
    session->killed and return HA_POS_ERROR if it is not zero. This is required
910
921
    for a user to be able to interrupt the calculation by killing the
911
922
    connection/query.
912
923
 
1159
1170
  @param sorted         Set to 1 if result should be sorted per key
1160
1171
 
1161
1172
  @note
1162
 
    Record is read into table->getInsertRecord()
 
1173
    Record is read into table->record[0]
1163
1174
 
1164
1175
  @retval
1165
1176
    0                   Found row
1184
1195
    key_compare_result_on_equal= ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
1185
1196
                                  (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
1186
1197
  }
1187
 
  range_key_part= getTable()->key_info[active_index].key_part;
 
1198
  range_key_part= table->key_info[active_index].key_part;
1188
1199
 
1189
1200
  if (!start_key)                       // Read first record
1190
 
    result= index_first(getTable()->getInsertRecord());
 
1201
    result= index_first(table->record[0]);
1191
1202
  else
1192
 
    result= index_read_map(getTable()->getInsertRecord(),
 
1203
    result= index_read_map(table->record[0],
1193
1204
                           start_key->key,
1194
1205
                           start_key->keypart_map,
1195
1206
                           start_key->flag);
1206
1217
  Read next row between two endpoints.
1207
1218
 
1208
1219
  @note
1209
 
    Record is read into table->getInsertRecord()
 
1220
    Record is read into table->record[0]
1210
1221
 
1211
1222
  @retval
1212
1223
    0                   Found row
1222
1233
  if (eq_range)
1223
1234
  {
1224
1235
    /* We trust that index_next_same always gives a row in range */
1225
 
    return(index_next_same(getTable()->getInsertRecord(),
 
1236
    return(index_next_same(table->record[0],
1226
1237
                                end_range->key,
1227
1238
                                end_range->length));
1228
1239
  }
1229
 
  result= index_next(getTable()->getInsertRecord());
 
1240
  result= index_next(table->record[0]);
1230
1241
  if (result)
1231
1242
    return result;
1232
1243
  return(compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE);
1316
1327
     * called.  If it fails, then a call to deleteRecord()
1317
1328
     * is called, followed by a repeat of the original
1318
1329
     * call to insertRecord().  So, log_row_for_replication
1319
 
     * could be called multiple times for a REPLACE
 
1330
     * could be called either once or twice for a REPLACE
1320
1331
     * statement.  The below looks at the values of before_record
1321
1332
     * and after_record to determine which call to this
1322
1333
     * function is for the delete or the insert, since NULL
1329
1340
     */
1330
1341
    if (after_record == NULL)
1331
1342
    {
1332
 
      /*
1333
 
       * The storage engine is passed the record in table->record[1]
1334
 
       * as the row to delete (this is the conflicting row), so
1335
 
       * we need to notify TransactionService to use that row.
1336
 
       */
1337
 
      transaction_services.deleteRecord(session, table, true);
 
1343
      transaction_services.deleteRecord(session, table);
1338
1344
      /* 
1339
1345
       * We set the "current" statement message to NULL.  This triggers
1340
1346
       * the replication services component to generate a new statement
1352
1358
    break;
1353
1359
  case SQLCOM_INSERT:
1354
1360
  case SQLCOM_INSERT_SELECT:
1355
 
  case SQLCOM_LOAD:
1356
1361
    /*
1357
1362
     * The else block below represents an 
1358
1363
     * INSERT ... ON DUPLICATE KEY UPDATE that
1394
1399
  {
1395
1400
    if (lock_type == F_RDLCK)
1396
1401
    {
1397
 
      DRIZZLE_CURSOR_RDLOCK_START(getTable()->getShare()->getSchemaName(),
1398
 
                                  getTable()->getShare()->getTableName());
 
1402
      DRIZZLE_CURSOR_RDLOCK_START(table_share->getSchemaName(),
 
1403
                                  table_share->getTableName());
1399
1404
    }
1400
1405
    else if (lock_type == F_WRLCK)
1401
1406
    {
1402
 
      DRIZZLE_CURSOR_WRLOCK_START(getTable()->getShare()->getSchemaName(),
1403
 
                                  getTable()->getShare()->getTableName());
 
1407
      DRIZZLE_CURSOR_WRLOCK_START(table_share->getSchemaName(),
 
1408
                                  table_share->getTableName());
1404
1409
    }
1405
1410
    else if (lock_type == F_UNLCK)
1406
1411
    {
1407
 
      DRIZZLE_CURSOR_UNLOCK_START(getTable()->getShare()->getSchemaName(),
1408
 
                                  getTable()->getShare()->getTableName());
 
1412
      DRIZZLE_CURSOR_UNLOCK_START(table_share->getSchemaName(),
 
1413
                                  table_share->getTableName());
1409
1414
    }
1410
1415
  }
1411
1416
 
1444
1449
int Cursor::ha_reset()
1445
1450
{
1446
1451
  /* Check that we have called all proper deallocation functions */
1447
 
  assert(! getTable()->getShare()->all_set.none());
1448
 
  assert(getTable()->key_read == 0);
 
1452
  assert((unsigned char*) table->def_read_set.getBitmap() +
 
1453
              table->getShare()->column_bitmap_size ==
 
1454
              (unsigned char*) table->def_write_set.getBitmap());
 
1455
  assert(table->getShare()->all_set.isSetAll());
 
1456
  assert(table->key_read == 0);
1449
1457
  /* ensure that ha_index_end / endTableScan has been called */
1450
1458
  assert(inited == NONE);
1451
1459
  /* Free cache used by filesort */
1452
 
  getTable()->free_io_cache();
 
1460
  table->free_io_cache();
1453
1461
  /* reset the bitmaps to point to defaults */
1454
 
  getTable()->default_column_bitmaps();
 
1462
  table->default_column_bitmaps();
1455
1463
  return(reset());
1456
1464
}
1457
1465
 
1466
1474
   * @TODO Technically, the below two lines can be take even further out of the
1467
1475
   * Cursor interface and into the fill_record() method.
1468
1476
   */
1469
 
  if (getTable()->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
 
1477
  if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
1470
1478
  {
1471
 
    getTable()->timestamp_field->set_time();
 
1479
    table->timestamp_field->set_time();
1472
1480
  }
1473
1481
 
1474
 
  DRIZZLE_INSERT_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
 
1482
  DRIZZLE_INSERT_ROW_START(table_share->getSchemaName(), table_share->getTableName());
1475
1483
  setTransactionReadWrite();
1476
1484
  
1477
 
  if (unlikely(plugin::EventObserver::beforeInsertRecord(*getTable(), buf)))
 
1485
  if (unlikely(plugin::EventObserver::beforeInsertRecord(*table, buf)))
1478
1486
  {
1479
1487
    error= ER_EVENT_OBSERVER_PLUGIN;
1480
1488
  }
1481
1489
  else
1482
1490
  {
1483
1491
    error= doInsertRecord(buf);
1484
 
    if (unlikely(plugin::EventObserver::afterInsertRecord(*getTable(), buf, error))) 
 
1492
    if (unlikely(plugin::EventObserver::afterInsertRecord(*table, buf, error))) 
1485
1493
    {
1486
1494
      error= ER_EVENT_OBSERVER_PLUGIN;
1487
1495
    }
1496
1504
    return error;
1497
1505
  }
1498
1506
 
1499
 
  if (unlikely(log_row_for_replication(getTable(), NULL, buf)))
 
1507
  if (unlikely(log_row_for_replication(table, NULL, buf)))
1500
1508
    return HA_ERR_RBR_LOGGING_FAILED;
1501
1509
 
1502
1510
  return 0;
1508
1516
  int error;
1509
1517
 
1510
1518
  /*
1511
 
    Some storage engines require that the new record is in getInsertRecord()
1512
 
    (and the old record is in getUpdateRecord()).
 
1519
    Some storage engines require that the new record is in record[0]
 
1520
    (and the old record is in record[1]).
1513
1521
   */
1514
 
  assert(new_data == getTable()->getInsertRecord());
 
1522
  assert(new_data == table->record[0]);
1515
1523
 
1516
 
  DRIZZLE_UPDATE_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
 
1524
  DRIZZLE_UPDATE_ROW_START(table_share->getSchemaName(), table_share->getTableName());
1517
1525
  setTransactionReadWrite();
1518
 
  if (unlikely(plugin::EventObserver::beforeUpdateRecord(*getTable(), old_data, new_data)))
 
1526
  if (unlikely(plugin::EventObserver::beforeUpdateRecord(*table, old_data, new_data)))
1519
1527
  {
1520
1528
    error= ER_EVENT_OBSERVER_PLUGIN;
1521
1529
  }
1522
1530
  else
1523
1531
  {
1524
 
    if (getTable()->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
 
1532
    if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
1525
1533
    {
1526
 
      getTable()->timestamp_field->set_time();
 
1534
      table->timestamp_field->set_time();
1527
1535
    }
1528
1536
 
1529
1537
    error= doUpdateRecord(old_data, new_data);
1530
 
    if (unlikely(plugin::EventObserver::afterUpdateRecord(*getTable(), old_data, new_data, error)))
 
1538
    if (unlikely(plugin::EventObserver::afterUpdateRecord(*table, old_data, new_data, error)))
1531
1539
    {
1532
1540
      error= ER_EVENT_OBSERVER_PLUGIN;
1533
1541
    }
1542
1550
    return error;
1543
1551
  }
1544
1552
 
1545
 
  if (unlikely(log_row_for_replication(getTable(), old_data, new_data)))
 
1553
  if (unlikely(log_row_for_replication(table, old_data, new_data)))
1546
1554
    return HA_ERR_RBR_LOGGING_FAILED;
1547
1555
 
1548
1556
  return 0;
1549
1557
}
1550
 
TableShare *Cursor::getShare()
1551
 
{
1552
 
  return getTable()->getMutableShare();
1553
 
}
1554
1558
 
1555
1559
int Cursor::deleteRecord(const unsigned char *buf)
1556
1560
{
1557
1561
  int error;
1558
1562
 
1559
 
  DRIZZLE_DELETE_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
 
1563
  DRIZZLE_DELETE_ROW_START(table_share->getSchemaName(), table_share->getTableName());
1560
1564
  setTransactionReadWrite();
1561
 
  if (unlikely(plugin::EventObserver::beforeDeleteRecord(*getTable(), buf)))
 
1565
  if (unlikely(plugin::EventObserver::beforeDeleteRecord(*table, buf)))
1562
1566
  {
1563
1567
    error= ER_EVENT_OBSERVER_PLUGIN;
1564
1568
  }
1565
1569
  else
1566
1570
  {
1567
1571
    error= doDeleteRecord(buf);
1568
 
    if (unlikely(plugin::EventObserver::afterDeleteRecord(*getTable(), buf, error)))
 
1572
    if (unlikely(plugin::EventObserver::afterDeleteRecord(*table, buf, error)))
1569
1573
    {
1570
1574
      error= ER_EVENT_OBSERVER_PLUGIN;
1571
1575
    }
1578
1582
  if (unlikely(error))
1579
1583
    return error;
1580
1584
 
1581
 
  if (unlikely(log_row_for_replication(getTable(), buf, NULL)))
 
1585
  if (unlikely(log_row_for_replication(table, buf, NULL)))
1582
1586
    return HA_ERR_RBR_LOGGING_FAILED;
1583
1587
 
1584
1588
  return 0;