1110
1110
if (exec_tmp_table1)
1112
exec_tmp_table1->cursor->extra(HA_EXTRA_RESET_STATE);
1113
exec_tmp_table1->cursor->ha_delete_all_rows();
1112
exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE);
1113
exec_tmp_table1->file->ha_delete_all_rows();
1114
1114
exec_tmp_table1->free_io_cache();
1115
1115
exec_tmp_table1->filesort_free_buffers();
1117
1117
if (exec_tmp_table2)
1119
exec_tmp_table2->cursor->extra(HA_EXTRA_RESET_STATE);
1120
exec_tmp_table2->cursor->ha_delete_all_rows();
1119
exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE);
1120
exec_tmp_table2->file->ha_delete_all_rows();
1121
1121
exec_tmp_table2->free_io_cache();
1122
1122
exec_tmp_table2->filesort_free_buffers();
2327
2327
item->save_in_result_field(1);
2329
2329
copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
2330
if ((write_error= table_arg->cursor->ha_write_row(table_arg->record[0])))
2330
if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
2332
2332
if (create_myisam_from_heap(session, table_arg,
2333
2333
tmp_table_param.start_recinfo,
2691
2691
if ((join->tables == 1) && !join->tmp_table && !join->sort_and_group
2692
2692
&& !join->send_group_parts && !join->having && !jt->select_cond &&
2693
2693
!(jt->select && jt->select->quick) &&
2694
(jt->table->cursor->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
2694
(jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
2695
2695
(jt->ref.key < 0))
2697
2697
/* Join over all rows in table; Return number of found rows */
2752
2752
join->found_records++;
2753
if ((error=table->cursor->ha_write_row(table->record[0])))
2753
if ((error=table->file->ha_write_row(table->record[0])))
2755
if (!table->cursor->is_fatal_error(error, HA_CHECK_DUP))
2755
if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
2757
2757
if (create_myisam_from_heap(join->session, table,
2758
2758
join->tmp_table_param.start_recinfo,
2801
2801
if (item->maybe_null)
2802
2802
group->buff[-1]= (char) group->field->is_null();
2804
if (!table->cursor->index_read_map(table->record[1],
2804
if (!table->file->index_read_map(table->record[1],
2805
2805
join->tmp_table_param.group_buff,
2807
2807
HA_READ_KEY_EXACT))
2808
2808
{ /* Update old record */
2809
2809
table->restoreRecord();
2810
2810
update_tmptable_sum_func(join->sum_funcs,table);
2811
if ((error= table->cursor->ha_update_row(table->record[1],
2811
if ((error= table->file->ha_update_row(table->record[1],
2812
2812
table->record[0])))
2814
table->cursor->print_error(error,MYF(0));
2814
table->file->print_error(error,MYF(0));
2815
2815
return NESTED_LOOP_ERROR;
2817
2817
return NESTED_LOOP_OK;
2833
2833
init_tmptable_sum_functions(join->sum_funcs);
2834
2834
copy_funcs(join->tmp_table_param.items_to_copy);
2835
if ((error=table->cursor->ha_write_row(table->record[0])))
2835
if ((error=table->file->ha_write_row(table->record[0])))
2837
2837
if (create_myisam_from_heap(join->session, table,
2838
2838
join->tmp_table_param.start_recinfo,
2841
2841
return NESTED_LOOP_ERROR; // Not a table_is_full error
2842
2842
/* Change method to update rows */
2843
table->cursor->ha_index_init(0, 0);
2843
table->file->ha_index_init(0, 0);
2844
2844
join->join_tab[join->tables-1].next_select= end_unique_update;
2846
2846
join->send_records++;
2865
2865
copy_fields(&join->tmp_table_param); // Groups are copied twice.
2866
2866
copy_funcs(join->tmp_table_param.items_to_copy);
2868
if (!(error= table->cursor->ha_write_row(table->record[0])))
2868
if (!(error= table->file->ha_write_row(table->record[0])))
2869
2869
join->send_records++; // New group
2872
if ((int) table->cursor->get_dup_key(error) < 0)
2872
if ((int) table->file->get_dup_key(error) < 0)
2874
table->cursor->print_error(error,MYF(0));
2874
table->file->print_error(error,MYF(0));
2875
2875
return NESTED_LOOP_ERROR;
2877
if (table->cursor->rnd_pos(table->record[1],table->cursor->dup_ref))
2877
if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
2879
table->cursor->print_error(error,MYF(0));
2879
table->file->print_error(error,MYF(0));
2880
2880
return NESTED_LOOP_ERROR;
2882
2882
table->restoreRecord();
2883
2883
update_tmptable_sum_func(join->sum_funcs,table);
2884
if ((error= table->cursor->ha_update_row(table->record[1],
2884
if ((error= table->file->ha_update_row(table->record[1],
2885
2885
table->record[0])))
2887
table->cursor->print_error(error,MYF(0));
2887
table->file->print_error(error,MYF(0));
2888
2888
return NESTED_LOOP_ERROR;
3458
3458
if (table->covering_keys.test(key))
3460
3460
/* we can use only index tree */
3461
tmp= record_count * table->cursor->index_only_read_time(key, tmp);
3461
tmp= record_count * table->file->index_only_read_time(key, tmp);
3464
3464
tmp= record_count * min(tmp,s->worst_seeks);
3472
3472
Set tmp to (previous record count) * (records / combination)
3474
3474
if ((found_part & 1) &&
3475
(!(table->cursor->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) ||
3475
(!(table->file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) ||
3476
3476
found_part == PREV_BITS(uint,keyinfo->key_parts)))
3478
3478
max_key_part= max_part_bit(found_part);
3555
Assume that the first key part matches 1% of the cursor
3555
Assume that the first key part matches 1% of the file
3556
3556
and that the whole key matches 10 (duplicates) or 1
3557
3557
(unique) records.
3558
3558
Assume also that more key matches proportionally more
3623
3623
if (table->covering_keys.test(key))
3625
3625
/* we can use only index tree */
3626
tmp= record_count * table->cursor->index_only_read_time(key, tmp);
3626
tmp= record_count * table->file->index_only_read_time(key, tmp);
3629
3629
tmp= record_count * min(tmp,s->worst_seeks);
3677
3677
if ((records >= s->found_records || best > s->read_time) && // (1)
3678
3678
! (s->quick && best_key && s->quick->index == best_key->getKey() && // (2)
3679
3679
best_max_key_part >= s->table->quick_key_parts[best_key->getKey()]) &&// (2)
3680
! ((s->table->cursor->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
3680
! ((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
3681
3681
! s->table->covering_keys.none() && best_key && !s->quick) && // (3)
3682
3682
! (s->table->force_index && best_key && !s->quick)) // (4)
3683
3683
{ // Check full join
4815
4815
Table *table=tab->table;
4816
4816
bool using_join_cache;
4817
4817
tab->read_record.table= table;
4818
tab->read_record.cursor= table->cursor;
4818
tab->read_record.file=table->file;
4819
4819
tab->next_select=sub_select; /* normal select */
4821
4821
TODO: don't always instruct first table's ref/range access method to
4956
4956
table->covering_keys.test(tab->select->quick->index))
4958
4958
table->key_read=1;
4959
table->cursor->extra(HA_EXTRA_KEYREAD);
4959
table->file->extra(HA_EXTRA_KEYREAD);
4961
4961
else if (!table->covering_keys.none() &&
4962
4962
!(tab->select && tab->select->quick))
4968
4968
is always faster than using a secondary index".
4970
4970
if (table->s->primary_key != MAX_KEY &&
4971
table->cursor->primary_key_is_clustered())
4971
table->file->primary_key_is_clustered())
4972
4972
tab->index= table->s->primary_key;
4974
4974
tab->index= table->find_shortest_key(&table->covering_keys);
5462
5462
reclength= entry->s->reclength-offset;
5464
5464
entry->free_io_cache(); // Safety
5465
entry->cursor->info(HA_STATUS_VARIABLE);
5465
entry->file->info(HA_STATUS_VARIABLE);
5466
5466
if (entry->s->db_type() == heap_engine ||
5467
5467
(!entry->s->blob_fields &&
5468
((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->cursor->stats.records <
5468
((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->stats.records <
5469
5469
session->variables.sortbuff_size)))
5470
5470
error= remove_dup_with_hash_index(join->session, entry,
5471
5471
field_count, first_field,
5567
5567
s->needed_reg.reset();
5568
5568
table_vector[i]=s->table=table=tables->table;
5569
5569
table->pos_in_table_list= tables;
5570
error= table->cursor->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
5570
error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
5573
table->cursor->print_error(error, MYF(0));
5573
table->file->print_error(error, MYF(0));
5576
5576
table->quick_keys.reset();
5585
5585
s->dependent= tables->dep_tables;
5586
5586
s->key_dependent= 0;
5587
5587
if (tables->schema_table)
5588
table->cursor->stats.records= 2;
5589
table->quick_condition_rows= table->cursor->stats.records;
5588
table->file->stats.records= 2;
5589
table->quick_condition_rows= table->file->stats.records;
5591
5591
s->on_expr_ref= &tables->on_expr;
5592
5592
if (*s->on_expr_ref)
5594
5594
/* s is the only inner table of an outer join */
5595
if (!table->cursor->stats.records && !embedding)
5595
if (!table->file->stats.records && !embedding)
5596
5596
{ // Empty table
5597
5597
s->dependent= 0; // Ignore LEFT JOIN depend.
5598
5598
set_position(join, const_count++, s, (optimizer::KeyUse*) 0);
5619
5619
while (embedding);
5622
if ((table->cursor->stats.records <= 1) && !s->dependent &&
5623
(table->cursor->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
5622
if ((table->file->stats.records <= 1) && !s->dependent &&
5623
(table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
5624
5624
!join->no_const_tables)
5626
5626
set_position(join, const_count++, s, (optimizer::KeyUse*) 0);
5745
5745
// All dep. must be constants
5746
5746
if (s->dependent & ~(found_const_table_map))
5748
if (table->cursor->stats.records <= 1L &&
5749
(table->cursor->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
5748
if (table->file->stats.records <= 1L &&
5749
(table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
5750
5750
!table->pos_in_table_list->embedding)
5751
5751
{ // system table
5861
5861
/* Approximate found rows and time to read them */
5862
s->found_records=s->records=s->table->cursor->stats.records;
5863
s->read_time=(ha_rows) s->table->cursor->scan_time();
5862
s->found_records=s->records=s->table->file->stats.records;
5863
s->read_time=(ha_rows) s->table->file->scan_time();
5866
5866
Set a max range of how many seeks we can expect when using keys