100
106
void FunctionCursor::position(const unsigned char *record)
104
copy= (unsigned char *)calloc(table->getShare()->reclength, sizeof(unsigned char));
106
memcpy(copy, record, table->getShare()->reclength);
107
row_cache.push_back(copy);
108
if (row_cache.size() <= record_id * table->getShare()->reclength)
110
row_cache.resize(row_cache.size() + table->getShare()->reclength * 100); // Hardwired at adding an additional 100 rows of storage
113
std::cerr << " position() " << table->getShare()->reclength << " " << row_cache.size() << " " << record_id << " total " << record_id * table->getShare()->reclength << "\n";
115
memcpy(&row_cache[record_id * table->getShare()->reclength], record, table->getShare()->reclength);
108
116
internal::my_store_ptr(ref, ref_length, record_id);
121
void FunctionCursor::wipeCache()
123
if (rows_returned > estimate_of_rows)
124
estimate_of_rows= rows_returned;
112
130
int FunctionCursor::extra(enum ha_extra_function operation)
114
132
switch (operation)
116
134
case drizzled::HA_EXTRA_CACHE:
117
136
case drizzled::HA_EXTRA_NO_CACHE:
119
138
case drizzled::HA_EXTRA_RESET_STATE:
121
size_t length_of_vector= row_cache.size();
123
for (size_t x= 0; x < length_of_vector; x++)
128
if (rows_returned > estimate_of_rows)
129
estimate_of_rows= rows_returned;
133
delete generator; // Do this in case of an early exit from rnd_next()
153
157
ha_statistic_increment(&system_status_var::ha_read_rnd_count);
154
158
size_t position_id= (size_t)internal::my_get_ptr(pos, ref_length);
156
assert(position_id < row_cache.size());
157
memcpy(buf, row_cache[position_id], table->getShare()->reclength);
161
std::cerr << " rnd_pos() " << table->getShare()->reclength << " " << row_cache.size() << " " << position_id << " total " << position_id * table->getShare()->reclength << "\n";
163
assert(position_id * table->getShare()->reclength < row_cache.size());
164
memcpy(buf, &row_cache[position_id * table->getShare()->reclength], table->getShare()->reclength);