~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to drizzled/cursor.cc

  • Committer: Brian Aker
  • Date: 2010-10-22 17:44:34 UTC
  • mto: This revision was merged to the branch mainline in revision 1873.
  • Revision ID: brian@tangent.org-20101022174434-q8fjovcpclzqer7n
TableShare is no longer in the house (i.e. we no longer directly have a copy
of it in cursor).

One more bit of the knot now gone.

Show diffs side-by-side

added added

removed removed

Lines of Context:
55
55
****************************************************************************/
56
56
Cursor::Cursor(plugin::StorageEngine &engine_arg,
57
57
               Table &arg)
58
 
  : table(arg),
59
 
    engine(engine_arg),
60
 
    estimation_rows_to_insert(0),
 
58
  : table(&arg),
 
59
    estimation_rows_to_insert(0), engine(engine_arg),
61
60
    ref(0),
62
61
    key_used_on_scan(MAX_KEY), active_index(MAX_KEY),
63
62
    ref_length(sizeof(internal::my_off_t)),
80
79
 */
81
80
Cursor *Cursor::clone(memory::Root *mem_root)
82
81
{
83
 
  Cursor *new_handler= getTable()->getMutableShare()->db_type()->getCursor(*getTable());
 
82
  Cursor *new_handler= table->getMutableShare()->db_type()->getCursor(*table);
84
83
 
85
84
  /*
86
85
    Allocate Cursor->ref here because otherwise ha_open will allocate it
90
89
  if (!(new_handler->ref= (unsigned char*) mem_root->alloc_root(ALIGN_SIZE(ref_length)*2)))
91
90
    return NULL;
92
91
 
93
 
  TableIdentifier identifier(getTable()->getShare()->getSchemaName(),
94
 
                             getTable()->getShare()->getTableName(),
95
 
                             getTable()->getShare()->getType());
 
92
  TableIdentifier identifier(table->getShare()->getSchemaName(),
 
93
                             table->getShare()->getTableName(),
 
94
                             table->getShare()->getType());
96
95
 
97
96
  if (new_handler && !new_handler->ha_open(identifier,
98
 
                                           getTable()->getDBStat(),
 
97
                                           table,
 
98
                                           table->getDBStat(),
99
99
                                           HA_OPEN_IGNORE_IF_LOCKED))
100
100
    return new_handler;
101
101
  return NULL;
111
111
  /* works only with key prefixes */
112
112
  assert(((keypart_map_arg + 1) & keypart_map_arg) == 0);
113
113
 
114
 
  const KeyPartInfo *key_part_found= getTable()->getShare()->getKeyInfo(key_position).key_part;
115
 
  const KeyPartInfo *end_key_part_found= key_part_found + getTable()->getShare()->getKeyInfo(key_position).key_parts;
 
114
  const KeyPartInfo *key_part_found= table->getShare()->getKeyInfo(key_position).key_part;
 
115
  const KeyPartInfo *end_key_part_found= key_part_found + table->getShare()->getKeyInfo(key_position).key_parts;
116
116
  uint32_t length= 0;
117
117
 
118
118
  while (key_part_found < end_key_part_found && keypart_map_arg)
182
182
 
183
183
bool Cursor::has_transactions()
184
184
{
185
 
  return (getTable()->getShare()->db_type()->check_flag(HTON_BIT_DOES_TRANSACTIONS));
 
185
  return (table->getShare()->db_type()->check_flag(HTON_BIT_DOES_TRANSACTIONS));
186
186
}
187
187
 
188
188
void Cursor::ha_statistic_increment(uint64_t system_status_var::*offset) const
189
189
{
190
 
  (getTable()->in_use->status_var.*offset)++;
 
190
  (table->in_use->status_var.*offset)++;
191
191
}
192
192
 
193
193
void **Cursor::ha_data(Session *session) const
208
208
 
209
209
ha_rows Cursor::records() { return stats.records; }
210
210
uint64_t Cursor::tableSize() { return stats.index_file_length + stats.data_file_length; }
211
 
uint64_t Cursor::rowSize() { return getTable()->getRecordLength() + getTable()->sizeFields(); }
 
211
uint64_t Cursor::rowSize() { return table->getRecordLength() + table->sizeFields(); }
212
212
 
213
213
int Cursor::doOpen(const TableIdentifier &identifier, int mode, uint32_t test_if_locked)
214
214
{
222
222
  Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
223
223
*/
224
224
int Cursor::ha_open(const TableIdentifier &identifier,
 
225
                    Table *table_arg,
225
226
                    int mode,
226
227
                    int test_if_locked)
227
228
{
228
229
  int error;
229
230
 
 
231
  table= table_arg;
 
232
 
230
233
  if ((error= doOpen(identifier, mode, test_if_locked)))
231
234
  {
232
235
    if ((error == EACCES || error == EROFS) && mode == O_RDWR &&
233
 
        (getTable()->db_stat & HA_TRY_READ_ONLY))
 
236
        (table->db_stat & HA_TRY_READ_ONLY))
234
237
    {
235
 
      getTable()->db_stat|=HA_READ_ONLY;
 
238
      table->db_stat|=HA_READ_ONLY;
236
239
      error= doOpen(identifier, O_RDONLY,test_if_locked);
237
240
    }
238
241
  }
242
245
  }
243
246
  else
244
247
  {
245
 
    if (getTable()->getShare()->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
246
 
      getTable()->db_stat|=HA_READ_ONLY;
 
248
    if (table->getShare()->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
 
249
      table->db_stat|=HA_READ_ONLY;
247
250
    (void) extra(HA_EXTRA_NO_READCHECK);        // Not needed in SQL
248
251
 
249
252
    /* ref is already allocated for us if we're called from Cursor::clone() */
250
 
    if (!ref && !(ref= (unsigned char*) getTable()->alloc_root(ALIGN_SIZE(ref_length)*2)))
 
253
    if (!ref && !(ref= (unsigned char*) table->alloc_root(ALIGN_SIZE(ref_length)*2)))
251
254
    {
252
255
      close();
253
256
      error=HA_ERR_OUT_OF_MEM;
276
279
    TODO remove the test for HA_READ_ORDER
277
280
  */
278
281
  if (stats.deleted < 10 || primary_key >= MAX_KEY ||
279
 
      !(getTable()->index_flags(primary_key) & HA_READ_ORDER))
 
282
      !(table->index_flags(primary_key) & HA_READ_ORDER))
280
283
  {
281
284
    (void) startTableScan(1);
282
285
    while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
304
307
  @verbatim 1,5,15,25,35,... @endverbatim
305
308
*/
306
309
inline uint64_t
307
 
compute_next_insert_id(uint64_t nr, drizzle_system_variables *variables)
 
310
compute_next_insert_id(uint64_t nr,struct system_variables *variables)
308
311
{
309
312
  if (variables->auto_increment_increment == 1)
310
313
    return (nr+1); // optimization of the formula below
324
327
    Session::next_insert_id to be greater than the explicit value.
325
328
  */
326
329
  if ((next_insert_id > 0) && (nr >= next_insert_id))
327
 
    set_next_insert_id(compute_next_insert_id(nr, &getTable()->in_use->variables));
 
330
    set_next_insert_id(compute_next_insert_id(nr, &table->in_use->variables));
328
331
}
329
332
 
330
333
 
344
347
    The number X if it exists, "nr" otherwise.
345
348
*/
346
349
inline uint64_t
347
 
prev_insert_id(uint64_t nr, drizzle_system_variables *variables)
 
350
prev_insert_id(uint64_t nr, struct system_variables *variables)
348
351
{
349
352
  if (unlikely(nr < variables->auto_increment_offset))
350
353
  {
441
444
{
442
445
  uint64_t nr, nb_reserved_values;
443
446
  bool append= false;
444
 
  Session *session= getTable()->in_use;
445
 
  drizzle_system_variables *variables= &session->variables;
 
447
  Session *session= table->in_use;
 
448
  struct system_variables *variables= &session->variables;
446
449
 
447
450
  /*
448
451
    next_insert_id is a "cursor" into the reserved interval, it may go greater
454
457
     for an auto increment column, not a magic value like NULL is.
455
458
     same as sql_mode=NO_AUTO_VALUE_ON_ZERO */
456
459
 
457
 
  if ((nr= getTable()->next_number_field->val_int()) != 0
458
 
      || getTable()->auto_increment_field_not_null)
 
460
  if ((nr= table->next_number_field->val_int()) != 0
 
461
      || table->auto_increment_field_not_null)
459
462
  {
460
463
    /*
461
464
      Update next_insert_id if we had already generated a value in this
533
536
      nr= compute_next_insert_id(nr-1, variables);
534
537
    }
535
538
 
536
 
    if (getTable()->getShare()->next_number_keypart == 0)
 
539
    if (table->getShare()->next_number_keypart == 0)
537
540
    {
538
541
      /* We must defer the appending until "nr" has been possibly truncated */
539
542
      append= true;
540
543
    }
541
544
  }
542
545
 
543
 
  if (unlikely(getTable()->next_number_field->store((int64_t) nr, true)))
 
546
  if (unlikely(table->next_number_field->store((int64_t) nr, true)))
544
547
  {
545
548
    /*
546
549
      first test if the query was aborted due to strict mode constraints
547
550
    */
548
 
    if (session->getKilled() == Session::KILL_BAD_DATA)
 
551
    if (session->killed == Session::KILL_BAD_DATA)
549
552
      return HA_ERR_AUTOINC_ERANGE;
550
553
 
551
554
    /*
556
559
      bother shifting the right bound (anyway any other value from this
557
560
      interval will cause a duplicate key).
558
561
    */
559
 
    nr= prev_insert_id(getTable()->next_number_field->val_int(), variables);
560
 
    if (unlikely(getTable()->next_number_field->store((int64_t) nr, true)))
561
 
      nr= getTable()->next_number_field->val_int();
 
562
    nr= prev_insert_id(table->next_number_field->val_int(), variables);
 
563
    if (unlikely(table->next_number_field->store((int64_t) nr, true)))
 
564
      nr= table->next_number_field->val_int();
562
565
  }
563
566
  if (append)
564
567
  {
612
615
      this statement used forced auto_increment values if there were some,
613
616
      wipe them away for other statements.
614
617
    */
615
 
    getTable()->in_use->auto_inc_intervals_forced.empty();
 
618
    table->in_use->auto_inc_intervals_forced.empty();
616
619
  }
617
620
}
618
621
 
658
661
   * possible resource to gain (and if there is... then there is a bug such
659
662
   * that in_use should have been set.
660
663
 */
661
 
  if (not getTable()->in_use)
 
664
  if (not table || not table->in_use)
662
665
    return;
663
666
 
664
 
  resource_context= getTable()->in_use->getResourceContext(getEngine());
 
667
  resource_context= table->in_use->getResourceContext(getEngine());
665
668
  /*
666
669
    When a storage engine method is called, the transaction must
667
670
    have been started, unless it's a DDL call, for which the
702
705
     * @todo Make TransactionServices generic to AfterTriggerServices
703
706
     * or similar...
704
707
     */
705
 
    Session *const session= getTable()->in_use;
 
708
    Session *const session= table->in_use;
706
709
    TransactionServices &transaction_services= TransactionServices::singleton();
707
 
    transaction_services.truncateTable(session, getTable());
 
710
    transaction_services.truncateTable(session, table);
708
711
  }
709
712
 
710
713
  return result;
803
806
  int error;
804
807
  if (!(error=index_next(buf)))
805
808
  {
806
 
    ptrdiff_t ptrdiff= buf - getTable()->getInsertRecord();
 
809
    ptrdiff_t ptrdiff= buf - table->getInsertRecord();
807
810
    unsigned char *save_record_0= NULL;
808
811
    KeyInfo *key_info= NULL;
809
812
    KeyPartInfo *key_part;
819
822
    */
820
823
    if (ptrdiff)
821
824
    {
822
 
      save_record_0= getTable()->getInsertRecord();
823
 
      getTable()->record[0]= buf;
824
 
      key_info= getTable()->key_info + active_index;
 
825
      save_record_0= table->getInsertRecord();
 
826
      table->record[0]= buf;
 
827
      key_info= table->key_info + active_index;
825
828
      key_part= key_info->key_part;
826
829
      key_part_end= key_part + key_info->key_parts;
827
830
      for (; key_part < key_part_end; key_part++)
831
834
      }
832
835
    }
833
836
 
834
 
    if (key_cmp_if_same(getTable(), key, active_index, keylen))
 
837
    if (key_cmp_if_same(table, key, active_index, keylen))
835
838
    {
836
 
      getTable()->status=STATUS_NOT_FOUND;
 
839
      table->status=STATUS_NOT_FOUND;
837
840
      error=HA_ERR_END_OF_FILE;
838
841
    }
839
842
 
840
843
    /* Move back if necessary. */
841
844
    if (ptrdiff)
842
845
    {
843
 
      getTable()->record[0]= save_record_0;
 
846
      table->record[0]= save_record_0;
844
847
      for (key_part= key_info->key_part; key_part < key_part_end; key_part++)
845
848
        key_part->field->move_field_offset(-ptrdiff);
846
849
    }
877
880
double Cursor::index_only_read_time(uint32_t keynr, double key_records)
878
881
{
879
882
  uint32_t keys_per_block= (stats.block_size/2/
880
 
                        (getTable()->key_info[keynr].key_length + ref_length) + 1);
 
883
                        (table->key_info[keynr].key_length + ref_length) + 1);
881
884
  return ((double) (key_records + keys_per_block-1) /
882
885
          (double) keys_per_block);
883
886
}
906
909
 
907
910
  @note
908
911
    This method (or an overriding one in a derived class) must check for
909
 
    session->getKilled() and return HA_POS_ERROR if it is not zero. This is required
 
912
    session->killed and return HA_POS_ERROR if it is not zero. This is required
910
913
    for a user to be able to interrupt the calculation by killing the
911
914
    connection/query.
912
915
 
1184
1187
    key_compare_result_on_equal= ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
1185
1188
                                  (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
1186
1189
  }
1187
 
  range_key_part= getTable()->key_info[active_index].key_part;
 
1190
  range_key_part= table->key_info[active_index].key_part;
1188
1191
 
1189
1192
  if (!start_key)                       // Read first record
1190
 
    result= index_first(getTable()->getInsertRecord());
 
1193
    result= index_first(table->getInsertRecord());
1191
1194
  else
1192
 
    result= index_read_map(getTable()->getInsertRecord(),
 
1195
    result= index_read_map(table->getInsertRecord(),
1193
1196
                           start_key->key,
1194
1197
                           start_key->keypart_map,
1195
1198
                           start_key->flag);
1222
1225
  if (eq_range)
1223
1226
  {
1224
1227
    /* We trust that index_next_same always gives a row in range */
1225
 
    return(index_next_same(getTable()->getInsertRecord(),
 
1228
    return(index_next_same(table->getInsertRecord(),
1226
1229
                                end_range->key,
1227
1230
                                end_range->length));
1228
1231
  }
1229
 
  result= index_next(getTable()->getInsertRecord());
 
1232
  result= index_next(table->getInsertRecord());
1230
1233
  if (result)
1231
1234
    return result;
1232
1235
  return(compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE);
1444
1447
int Cursor::ha_reset()
1445
1448
{
1446
1449
  /* Check that we have called all proper deallocation functions */
1447
 
  assert(! getTable()->getShare()->all_set.none());
1448
 
  assert(getTable()->key_read == 0);
 
1450
  assert(! table->getShare()->all_set.none());
 
1451
  assert(table->key_read == 0);
1449
1452
  /* ensure that ha_index_end / endTableScan has been called */
1450
1453
  assert(inited == NONE);
1451
1454
  /* Free cache used by filesort */
1452
 
  getTable()->free_io_cache();
 
1455
  table->free_io_cache();
1453
1456
  /* reset the bitmaps to point to defaults */
1454
 
  getTable()->default_column_bitmaps();
 
1457
  table->default_column_bitmaps();
1455
1458
  return(reset());
1456
1459
}
1457
1460
 
1466
1469
   * @TODO Technically, the below two lines can be take even further out of the
1467
1470
   * Cursor interface and into the fill_record() method.
1468
1471
   */
1469
 
  if (getTable()->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
 
1472
  if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
1470
1473
  {
1471
 
    getTable()->timestamp_field->set_time();
 
1474
    table->timestamp_field->set_time();
1472
1475
  }
1473
1476
 
1474
1477
  DRIZZLE_INSERT_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
1475
1478
  setTransactionReadWrite();
1476
1479
  
1477
 
  if (unlikely(plugin::EventObserver::beforeInsertRecord(*getTable(), buf)))
 
1480
  if (unlikely(plugin::EventObserver::beforeInsertRecord(*table, buf)))
1478
1481
  {
1479
1482
    error= ER_EVENT_OBSERVER_PLUGIN;
1480
1483
  }
1481
1484
  else
1482
1485
  {
1483
1486
    error= doInsertRecord(buf);
1484
 
    if (unlikely(plugin::EventObserver::afterInsertRecord(*getTable(), buf, error))) 
 
1487
    if (unlikely(plugin::EventObserver::afterInsertRecord(*table, buf, error))) 
1485
1488
    {
1486
1489
      error= ER_EVENT_OBSERVER_PLUGIN;
1487
1490
    }
1496
1499
    return error;
1497
1500
  }
1498
1501
 
1499
 
  if (unlikely(log_row_for_replication(getTable(), NULL, buf)))
 
1502
  if (unlikely(log_row_for_replication(table, NULL, buf)))
1500
1503
    return HA_ERR_RBR_LOGGING_FAILED;
1501
1504
 
1502
1505
  return 0;
1511
1514
    Some storage engines require that the new record is in getInsertRecord()
1512
1515
    (and the old record is in getUpdateRecord()).
1513
1516
   */
1514
 
  assert(new_data == getTable()->getInsertRecord());
 
1517
  assert(new_data == table->getInsertRecord());
1515
1518
 
1516
1519
  DRIZZLE_UPDATE_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
1517
1520
  setTransactionReadWrite();
1518
 
  if (unlikely(plugin::EventObserver::beforeUpdateRecord(*getTable(), old_data, new_data)))
 
1521
  if (unlikely(plugin::EventObserver::beforeUpdateRecord(*table, old_data, new_data)))
1519
1522
  {
1520
1523
    error= ER_EVENT_OBSERVER_PLUGIN;
1521
1524
  }
1522
1525
  else
1523
1526
  {
1524
 
    if (getTable()->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
 
1527
    if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
1525
1528
    {
1526
 
      getTable()->timestamp_field->set_time();
 
1529
      table->timestamp_field->set_time();
1527
1530
    }
1528
1531
 
1529
1532
    error= doUpdateRecord(old_data, new_data);
1530
 
    if (unlikely(plugin::EventObserver::afterUpdateRecord(*getTable(), old_data, new_data, error)))
 
1533
    if (unlikely(plugin::EventObserver::afterUpdateRecord(*table, old_data, new_data, error)))
1531
1534
    {
1532
1535
      error= ER_EVENT_OBSERVER_PLUGIN;
1533
1536
    }
1542
1545
    return error;
1543
1546
  }
1544
1547
 
1545
 
  if (unlikely(log_row_for_replication(getTable(), old_data, new_data)))
 
1548
  if (unlikely(log_row_for_replication(table, old_data, new_data)))
1546
1549
    return HA_ERR_RBR_LOGGING_FAILED;
1547
1550
 
1548
1551
  return 0;
1558
1561
 
1559
1562
  DRIZZLE_DELETE_ROW_START(getTable()->getShare()->getSchemaName(), getTable()->getShare()->getTableName());
1560
1563
  setTransactionReadWrite();
1561
 
  if (unlikely(plugin::EventObserver::beforeDeleteRecord(*getTable(), buf)))
 
1564
  if (unlikely(plugin::EventObserver::beforeDeleteRecord(*table, buf)))
1562
1565
  {
1563
1566
    error= ER_EVENT_OBSERVER_PLUGIN;
1564
1567
  }
1565
1568
  else
1566
1569
  {
1567
1570
    error= doDeleteRecord(buf);
1568
 
    if (unlikely(plugin::EventObserver::afterDeleteRecord(*getTable(), buf, error)))
 
1571
    if (unlikely(plugin::EventObserver::afterDeleteRecord(*table, buf, error)))
1569
1572
    {
1570
1573
      error= ER_EVENT_OBSERVER_PLUGIN;
1571
1574
    }
1578
1581
  if (unlikely(error))
1579
1582
    return error;
1580
1583
 
1581
 
  if (unlikely(log_row_for_replication(getTable(), buf, NULL)))
 
1584
  if (unlikely(log_row_for_replication(table, buf, NULL)))
1582
1585
    return HA_ERR_RBR_LOGGING_FAILED;
1583
1586
 
1584
1587
  return 0;