~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/archive/ha_archive.cc

  • Committer: Brian Aker
  • Date: 2010-10-23 22:28:30 UTC
  • mto: This revision was merged to the branch mainline in revision 1875.
  • Revision ID: brian@tangent.org-20101023222830-em4idhbau96l37zt
Encapsulate schema_name it table_list.

Show diffs side-by-side

added added

removed removed

Lines of Context:
12
12
 
13
13
  You should have received a copy of the GNU General Public License
14
14
  along with this program; if not, write to the Free Software
15
 
  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
 
15
  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA */
16
16
 
17
17
 
18
18
#include "config.h"
196
196
 
197
197
 
198
198
ha_archive::ha_archive(drizzled::plugin::StorageEngine &engine_arg,
199
 
                       TableShare &table_arg)
 
199
                       Table &table_arg)
200
200
  :Cursor(engine_arg, table_arg), delayed_insert(0), bulk_insert(0)
201
201
{
202
202
  /* Set our original buffer from pre-allocated memory */
294
294
*/
295
295
ArchiveShare *ha_archive::get_share(const char *table_name, int *rc)
296
296
{
297
 
  ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(engine);
 
297
  ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(getEngine());
298
298
 
299
299
  pthread_mutex_lock(&a_engine->mutex());
300
300
 
339
339
*/
340
340
int ha_archive::free_share()
341
341
{
342
 
  ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(engine);
 
342
  ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(getEngine());
343
343
 
344
344
  pthread_mutex_lock(&a_engine->mutex());
345
345
  if (!--share->use_count)
438
438
 
439
439
  assert(share);
440
440
 
441
 
  record_buffer.resize(table->getShare()->getRecordLength() + ARCHIVE_ROW_HEADER_SIZE);
 
441
  record_buffer.resize(getTable()->getShare()->getRecordLength() + ARCHIVE_ROW_HEADER_SIZE);
442
442
 
443
443
  lock.init(&share->_lock);
444
444
 
624
624
 
625
625
uint32_t ha_archive::max_row_length(const unsigned char *)
626
626
{
627
 
  uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
 
627
  uint32_t length= (uint32_t)(getTable()->getRecordLength() + getTable()->sizeFields()*2);
628
628
  length+= ARCHIVE_ROW_HEADER_SIZE;
629
629
 
630
630
  uint32_t *ptr, *end;
631
 
  for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
 
631
  for (ptr= getTable()->getBlobField(), end=ptr + getTable()->sizeBlobFields();
632
632
       ptr != end ;
633
633
       ptr++)
634
634
  {
635
 
      length += 2 + ((Field_blob*)table->getField(*ptr))->get_length();
 
635
      length += 2 + ((Field_blob*)getTable()->getField(*ptr))->get_length();
636
636
  }
637
637
 
638
638
  return length;
647
647
    return(HA_ERR_OUT_OF_MEM);
648
648
 
649
649
  /* Copy null bits */
650
 
  memcpy(&record_buffer[0], record, table->getShare()->null_bytes);
651
 
  ptr= &record_buffer[0] + table->getShare()->null_bytes;
 
650
  memcpy(&record_buffer[0], record, getTable()->getShare()->null_bytes);
 
651
  ptr= &record_buffer[0] + getTable()->getShare()->null_bytes;
652
652
 
653
 
  for (Field **field=table->getFields() ; *field ; field++)
 
653
  for (Field **field=getTable()->getFields() ; *field ; field++)
654
654
  {
655
655
    if (!((*field)->is_null()))
656
656
      ptr= (*field)->pack(ptr, record + (*field)->offset(record));
674
674
  int rc;
675
675
  unsigned char *read_buf= NULL;
676
676
  uint64_t temp_auto;
677
 
  unsigned char *record=  table->getInsertRecord();
 
677
  unsigned char *record=  getTable()->getInsertRecord();
678
678
 
679
679
  if (share->crashed)
680
680
    return(HA_ERR_CRASHED_ON_USAGE);
686
686
      return(HA_ERR_CRASHED_ON_USAGE);
687
687
 
688
688
 
689
 
  if (table->next_number_field && record == table->getInsertRecord())
 
689
  if (getTable()->next_number_field && record == getTable()->getInsertRecord())
690
690
  {
691
691
    update_auto_increment();
692
 
    temp_auto= table->next_number_field->val_int();
 
692
    temp_auto= getTable()->next_number_field->val_int();
693
693
 
694
694
    /*
695
695
      We don't support decremening auto_increment. They make the performance
696
696
      just cry.
697
697
    */
698
698
    if (temp_auto <= share->archive_write.auto_increment &&
699
 
        table->getShare()->getKeyInfo(0).flags & HA_NOSAME)
 
699
        getTable()->getShare()->getKeyInfo(0).flags & HA_NOSAME)
700
700
    {
701
701
      rc= HA_ERR_FOUND_DUPP_KEY;
702
702
      goto error;
748
748
{
749
749
  int rc;
750
750
  bool found= 0;
751
 
  current_k_offset= table->getShare()->getKeyInfo(0).key_part->offset;
 
751
  current_k_offset= getTable()->getShare()->getKeyInfo(0).key_part->offset;
752
752
  current_key= key;
753
753
  current_key_len= key_len;
754
754
 
853
853
  }
854
854
 
855
855
  /* Copy null bits */
856
 
  memcpy(record, ptr, table->getNullBytes());
857
 
  ptr+= table->getNullBytes();
858
 
  for (Field **field= table->getFields() ; *field ; field++)
 
856
  memcpy(record, ptr, getTable()->getNullBytes());
 
857
  ptr+= getTable()->getNullBytes();
 
858
  for (Field **field= getTable()->getFields() ; *field ; field++)
859
859
  {
860
860
    if (!((*field)->is_null()))
861
861
    {
862
 
      ptr= (*field)->unpack(record + (*field)->offset(table->getInsertRecord()), ptr);
 
862
      ptr= (*field)->unpack(record + (*field)->offset(getTable()->getInsertRecord()), ptr);
863
863
    }
864
864
  }
865
865
  return(0);
894
894
  current_position= aztell(&archive);
895
895
  rc= get_row(&archive, buf);
896
896
 
897
 
  table->status=rc ? STATUS_NOT_FOUND: 0;
 
897
  getTable()->status=rc ? STATUS_NOT_FOUND: 0;
898
898
 
899
899
  return(rc);
900
900
}
1014
1014
 
1015
1015
      for (uint64_t x= 0; x < rows_restored ; x++)
1016
1016
      {
1017
 
        rc= get_row(&archive, table->getInsertRecord());
 
1017
        rc= get_row(&archive, getTable()->getInsertRecord());
1018
1018
 
1019
1019
        if (rc != 0)
1020
1020
          break;
1021
1021
 
1022
 
        real_write_row(table->getInsertRecord(), &writer);
 
1022
        real_write_row(getTable()->getInsertRecord(), &writer);
1023
1023
        /*
1024
1024
          Long term it should be possible to optimize this so that
1025
1025
          it is not called on each row.
1026
1026
        */
1027
 
        if (table->found_next_number_field)
 
1027
        if (getTable()->found_next_number_field)
1028
1028
        {
1029
 
          Field *field= table->found_next_number_field;
 
1029
          Field *field= getTable()->found_next_number_field;
1030
1030
 
1031
1031
          /* Since we will need to use field to translate, we need to flip its read bit */
1032
1032
          field->setReadSet();
1033
1033
 
1034
1034
          uint64_t auto_value=
1035
 
            (uint64_t) field->val_int(table->getInsertRecord() +
1036
 
                                       field->offset(table->getInsertRecord()));
 
1035
            (uint64_t) field->val_int(getTable()->getInsertRecord() +
 
1036
                                       field->offset(getTable()->getInsertRecord()));
1037
1037
          if (share->archive_write.auto_increment < auto_value)
1038
1038
            stats.auto_increment_value=
1039
1039
              (share->archive_write.auto_increment= auto_value) + 1;
1147
1147
 
1148
1148
    stat(share->data_file_name.c_str(), &file_stat);
1149
1149
 
1150
 
    stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
 
1150
    stats.mean_rec_length= getTable()->getRecordLength()+ buffer.alloced_length();
1151
1151
    stats.data_file_length= file_stat.st_size;
1152
1152
    stats.create_time= file_stat.st_ctime;
1153
1153
    stats.update_time= file_stat.st_mtime;
1229
1229
  read_data_header(&archive);
1230
1230
  for (uint64_t x= 0; x < share->archive_write.rows; x++)
1231
1231
  {
1232
 
    rc= get_row(&archive, table->getInsertRecord());
 
1232
    rc= get_row(&archive, getTable()->getInsertRecord());
1233
1233
 
1234
1234
    if (rc != 0)
1235
1235
      break;