~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to drizzled/optimizer/range.cc

  • Committer: Stewart Smith
  • Date: 2010-03-18 12:01:34 UTC
  • mto: (1666.2.3 build)
  • mto: This revision was merged to the branch mainline in revision 1596.
  • Revision ID: stewart@flamingspork.com-20100318120134-45fdnsw8g3j6c7oy
move RAND() into a plugin

Show diffs side-by-side

added added

removed removed

Lines of Context:
84
84
  keypart-value-bytes holds the value. Its format depends on the field type.
85
85
  The length of keypart-value-bytes may or may not depend on the value being
86
86
  stored. The default is that length is static and equal to
87
 
  KeyPartInfo::length.
 
87
  KEY_PART_INFO::length.
88
88
 
89
89
  Key parts with (key_part_flag & HA_BLOB_PART) have length depending of the
90
90
  value:
109
109
#include <vector>
110
110
#include <algorithm>
111
111
 
112
 
#include <boost/dynamic_bitset.hpp>
113
 
 
114
112
#include "drizzled/sql_base.h"
115
113
#include "drizzled/sql_select.h"
116
114
#include "drizzled/error.h"
117
 
#include "drizzled/optimizer/cost_vector.h"
 
115
#include "drizzled/cost_vect.h"
118
116
#include "drizzled/item/cmpfunc.h"
119
117
#include "drizzled/field/num.h"
120
118
#include "drizzled/check_stack_overrun.h"
137
135
 
138
136
#include "drizzled/temporal.h" /* Needed in get_mm_leaf() for timestamp -> datetime comparisons */
139
137
 
 
138
#include "drizzled/memory/multi_malloc.h"
 
139
 
140
140
using namespace std;
141
141
namespace drizzled
142
142
{
201
201
static void get_sweep_read_cost(Table *table,
202
202
                                ha_rows nrows,
203
203
                                bool interrupted,
204
 
                                optimizer::CostVector *cost)
 
204
                                COST_VECT *cost)
205
205
{
206
206
  cost->zero();
207
207
  if (table->cursor->primary_key_is_clustered())
208
208
  {
209
 
    cost->setIOCount(table->cursor->read_time(table->getShare()->getPrimaryKey(),
 
209
    cost->io_count= table->cursor->read_time(table->s->primary_key,
210
210
                                             static_cast<uint32_t>(nrows),
211
 
                                             nrows));
 
211
                                             nrows);
212
212
  }
213
213
  else
214
214
  {
219
219
    if (busy_blocks < 1.0)
220
220
      busy_blocks= 1.0;
221
221
 
222
 
    cost->setIOCount(busy_blocks);
 
222
    cost->io_count= busy_blocks;
223
223
 
224
224
    if (! interrupted)
225
225
    {
226
226
      /* Assume reading is done in one 'sweep' */
227
 
      cost->setAvgIOCost((DISK_SEEK_BASE_COST +
228
 
                          DISK_SEEK_PROP_COST*n_blocks/busy_blocks));
 
227
      cost->avg_io_cost= (DISK_SEEK_BASE_COST +
 
228
                          DISK_SEEK_PROP_COST*n_blocks/busy_blocks);
229
229
    }
230
230
  }
231
231
}
232
232
 
 
233
struct st_ror_scan_info;
 
234
 
233
235
static optimizer::SEL_TREE * get_mm_parts(optimizer::RangeParameter *param,
234
236
                               COND *cond_func,
235
237
                               Field *field,
248
250
 
249
251
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts);
250
252
 
251
 
static ha_rows check_quick_select(Session *session,
252
 
                                  optimizer::Parameter *param,
 
253
static ha_rows check_quick_select(optimizer::Parameter *param,
253
254
                                  uint32_t idx,
254
255
                                  bool index_only,
255
256
                                  optimizer::SEL_ARG *tree,
256
257
                                  bool update_tbl_stats,
257
258
                                  uint32_t *mrr_flags,
258
259
                                  uint32_t *bufsize,
259
 
                                  optimizer::CostVector *cost);
 
260
                                  COST_VECT *cost);
260
261
 
261
 
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
262
 
                                                      optimizer::Parameter *param,
 
262
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
263
263
                                                      optimizer::SEL_TREE *tree,
264
264
                                                      bool index_read_must_be_used,
265
265
                                                      bool update_tbl_stats,
277
277
                                                                 double read_time);
278
278
 
279
279
static
280
 
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
281
 
                                                  optimizer::Parameter *param,
 
280
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
282
281
                                                  optimizer::SEL_IMERGE *imerge,
283
282
                                                  double read_time);
284
283
 
385
384
 
386
385
void optimizer::SqlSelect::cleanup()
387
386
{
388
 
  if (quick)
389
 
  {
390
 
    delete quick;
391
 
    quick= NULL;
392
 
  }
393
 
 
 
387
  delete quick;
 
388
  quick= 0;
394
389
  if (free_cond)
395
390
  {
396
391
    free_cond= 0;
397
392
    delete cond;
398
393
    cond= 0;
399
394
  }
400
 
  file->close_cached_file();
 
395
  close_cached_file(file);
401
396
}
402
397
 
403
398
 
464
459
    MAX_KEY if no such index was found.
465
460
*/
466
461
 
467
 
uint32_t optimizer::get_index_for_order(Table *table, Order *order, ha_rows limit)
 
462
uint32_t optimizer::get_index_for_order(Table *table, order_st *order, ha_rows limit)
468
463
{
469
464
  uint32_t idx;
470
465
  uint32_t match_key= MAX_KEY, match_key_len= MAX_KEY_LENGTH + 1;
471
 
  Order *ord;
 
466
  order_st *ord;
472
467
 
473
468
  for (ord= order; ord; ord= ord->next)
474
469
    if (!ord->asc)
475
470
      return MAX_KEY;
476
471
 
477
 
  for (idx= 0; idx < table->getShare()->sizeKeys(); idx++)
 
472
  for (idx= 0; idx < table->s->keys; idx++)
478
473
  {
479
474
    if (!(table->keys_in_use_for_query.test(idx)))
480
475
      continue;
481
 
    KeyPartInfo *keyinfo= table->key_info[idx].key_part;
 
476
    KEY_PART_INFO *keyinfo= table->key_info[idx].key_part;
482
477
    uint32_t n_parts=  table->key_info[idx].key_parts;
483
478
    uint32_t partno= 0;
484
479
 
543
538
static int fill_used_fields_bitmap(optimizer::Parameter *param)
544
539
{
545
540
  Table *table= param->table;
 
541
  my_bitmap_map *tmp;
546
542
  uint32_t pk;
547
 
  param->tmp_covered_fields.clear();
548
 
  param->needed_fields.resize(table->getShare()->sizeFields());
549
 
  param->needed_fields.reset();
550
 
 
551
 
  param->needed_fields|= *table->read_set;
552
 
  param->needed_fields|= *table->write_set;
553
 
 
554
 
  pk= param->table->getShare()->getPrimaryKey();
 
543
  param->tmp_covered_fields.setBitmap(0);
 
544
  param->fields_bitmap_size= table->s->column_bitmap_size;
 
545
  if (!(tmp= (my_bitmap_map*) alloc_root(param->mem_root,
 
546
                                  param->fields_bitmap_size)) ||
 
547
      param->needed_fields.init(tmp, table->s->fields))
 
548
    return 1;
 
549
 
 
550
  param->needed_fields= *table->read_set;
 
551
  bitmap_union(&param->needed_fields, table->write_set);
 
552
 
 
553
  pk= param->table->s->primary_key;
555
554
  if (pk != MAX_KEY && param->table->cursor->primary_key_is_clustered())
556
555
  {
557
556
    /* The table uses clustered PK and it is not internally generated */
558
 
    KeyPartInfo *key_part= param->table->key_info[pk].key_part;
559
 
    KeyPartInfo *key_part_end= key_part +
 
557
    KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
 
558
    KEY_PART_INFO *key_part_end= key_part +
560
559
                                 param->table->key_info[pk].key_parts;
561
560
    for (;key_part != key_part_end; ++key_part)
562
 
      param->needed_fields.reset(key_part->fieldnr-1);
 
561
      param->needed_fields.clearBit(key_part->fieldnr-1);
563
562
  }
564
563
  return 0;
565
564
}
640
639
{
641
640
  uint32_t idx;
642
641
  double scan_time;
643
 
  if (quick)
644
 
  {
645
 
    delete quick;
646
 
    quick= NULL;
647
 
  }
 
642
  delete quick;
 
643
  quick=0;
648
644
  needed_reg.reset();
649
645
  quick_keys.reset();
650
646
  if (keys_to_use.none())
667
663
    memory::Root alloc;
668
664
    optimizer::SEL_TREE *tree= NULL;
669
665
    KEY_PART *key_parts;
670
 
    KeyInfo *key_info;
 
666
    KEY *key_info;
671
667
    optimizer::Parameter param;
672
668
 
673
669
    if (check_stack_overrun(session, 2*STACK_MIN_SIZE, NULL))
690
686
 
691
687
    session->no_errors=1;                               // Don't warn about NULL
692
688
    memory::init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
693
 
    if (!(param.key_parts= (KEY_PART*) alloc.alloc_root( sizeof(KEY_PART) * head->getShare()->key_parts)) ||
 
689
    if (!(param.key_parts= (KEY_PART*) alloc_root(&alloc,
 
690
                                                  sizeof(KEY_PART)*
 
691
                                                  head->s->key_parts)) ||
694
692
        fill_used_fields_bitmap(&param))
695
693
    {
696
694
      session->no_errors=0;
697
 
      alloc.free_root(MYF(0));                  // Return memory & allocator
698
 
 
 
695
      free_root(&alloc,MYF(0));                 // Return memory & allocator
699
696
      return 0;                         // Can't use range
700
697
    }
701
698
    key_parts= param.key_parts;
706
703
      This is used in get_mm_parts function.
707
704
    */
708
705
    key_info= head->key_info;
709
 
    for (idx=0 ; idx < head->getShare()->sizeKeys() ; idx++, key_info++)
 
706
    for (idx=0 ; idx < head->s->keys ; idx++, key_info++)
710
707
    {
711
 
      KeyPartInfo *key_part_info;
 
708
      KEY_PART_INFO *key_part_info;
712
709
      if (! keys_to_use.test(idx))
713
710
        continue;
714
711
 
796
793
        bool can_build_covering= false;
797
794
 
798
795
        /* Get best 'range' plan and prepare data for making other plans */
799
 
        if ((range_trp= get_key_scans_params(session, &param, tree, false, true,
 
796
        if ((range_trp= get_key_scans_params(&param, tree, false, true,
800
797
                                             best_read_time)))
801
798
        {
802
799
          best_trp= range_trp;
823
820
              Try constructing covering ROR-intersect only if it looks possible
824
821
              and worth doing.
825
822
            */
826
 
            if (!rori_trp->is_covering && can_build_covering &&
 
823
            if (rori_trp->isRowRetrievalNecessary() && can_build_covering &&
827
824
                (rori_trp= get_best_covering_ror_intersect(&param, tree,
828
825
                                                           best_read_time)))
829
826
              best_trp= rori_trp;
839
836
        List_iterator_fast<optimizer::SEL_IMERGE> it(tree->merges);
840
837
        while ((imerge= it++))
841
838
        {
842
 
          new_conj_trp= get_best_disjunct_quick(session, &param, imerge, best_read_time);
 
839
          new_conj_trp= get_best_disjunct_quick(&param, imerge, best_read_time);
843
840
          if (new_conj_trp)
844
841
            set_if_smaller(param.table->quick_condition_rows,
845
842
                           new_conj_trp->records);
860
857
      records= best_trp->records;
861
858
      if (! (quick= best_trp->make_quick(&param, true)) || quick->init())
862
859
      {
863
 
        /* quick can already be free here */
864
 
        if (quick)
865
 
        {
866
 
          delete quick;
867
 
          quick= NULL;
868
 
        }
 
860
        delete quick;
 
861
        quick= NULL;
869
862
      }
870
863
    }
871
864
 
872
865
  free_mem:
873
 
    alloc.free_root(MYF(0));                    // Return memory & allocator
 
866
    free_root(&alloc,MYF(0));                   // Return memory & allocator
874
867
    session->mem_root= param.old_root;
875
868
    session->no_errors=0;
876
869
  }
886
879
  Get best plan for a optimizer::SEL_IMERGE disjunctive expression.
887
880
  SYNOPSIS
888
881
    get_best_disjunct_quick()
889
 
      session
890
882
      param     Parameter from check_quick_select function
891
883
      imerge    Expression to use
892
884
      read_time Don't create scans with cost > read_time
949
941
*/
950
942
 
951
943
static
952
 
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
953
 
                                                  optimizer::Parameter *param,
 
944
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
954
945
                                                  optimizer::SEL_IMERGE *imerge,
955
946
                                                  double read_time)
956
947
{
974
965
  ha_rows roru_total_records;
975
966
  double roru_intersect_part= 1.0;
976
967
 
977
 
  if (! (range_scans= (optimizer::RangeReadPlan**)param->mem_root->alloc_root(sizeof(optimizer::RangeReadPlan*)* n_child_scans)))
978
 
  {
 
968
  if (! (range_scans= (optimizer::RangeReadPlan**)alloc_root(param->mem_root,
 
969
                                                             sizeof(optimizer::RangeReadPlan*)*
 
970
                                                             n_child_scans)))
979
971
    return NULL;
980
 
  }
981
 
 
982
972
  /*
983
973
    Collect best 'range' scan for each of disjuncts, and, while doing so,
984
974
    analyze possibility of ROR scans. Also calculate some values needed by
988
978
       ptree != imerge->trees_next;
989
979
       ptree++, cur_child++)
990
980
  {
991
 
    if (!(*cur_child= get_key_scans_params(session, param, *ptree, true, false, read_time)))
 
981
    if (!(*cur_child= get_key_scans_params(param, *ptree, true, false, read_time)))
992
982
    {
993
983
      /*
994
984
        One of index scans in this index_merge is more expensive than entire
1005
995
    all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
1006
996
    all_scans_rors &= (*cur_child)->is_ror;
1007
997
    if (pk_is_clustered &&
1008
 
        param->real_keynr[(*cur_child)->key_idx] ==
1009
 
        param->table->getShare()->getPrimaryKey())
 
998
        param->real_keynr[(*cur_child)->getKeyIndex()] ==
 
999
        param->table->s->primary_key)
1010
1000
    {
1011
1001
      cpk_scan= cur_child;
1012
1002
      cpk_scan_records= (*cur_child)->records;
1040
1030
 
1041
1031
  /* Calculate cost(rowid_to_row_scan) */
1042
1032
  {
1043
 
    optimizer::CostVector sweep_cost;
1044
 
    Join *join= param->session->lex->select_lex.join;
 
1033
    COST_VECT sweep_cost;
 
1034
    JOIN *join= param->session->lex->select_lex.join;
1045
1035
    bool is_interrupted= test(join && join->tables == 1);
1046
1036
    get_sweep_read_cost(param->table, non_cpk_scan_records, is_interrupted,
1047
1037
                        &sweep_cost);
1057
1047
                                    param->session->variables.sortbuff_size);
1058
1048
  if (param->imerge_cost_buff_size < unique_calc_buff_size)
1059
1049
  {
1060
 
    if (!(param->imerge_cost_buff= (uint*)param->mem_root->alloc_root(unique_calc_buff_size)))
1061
 
    {
 
1050
    if (!(param->imerge_cost_buff= (uint*)alloc_root(param->mem_root,
 
1051
                                                     unique_calc_buff_size)))
1062
1052
      return NULL;
1063
 
    }
1064
 
 
1065
1053
    param->imerge_cost_buff_size= unique_calc_buff_size;
1066
1054
  }
1067
1055
 
1090
1078
  /* Ok, it is possible to build a ROR-union, try it. */
1091
1079
  bool dummy;
1092
1080
  if (! (roru_read_plans=
1093
 
          (optimizer::TableReadPlan **) param->mem_root->alloc_root(sizeof(optimizer::TableReadPlan*) * n_child_scans)))
1094
 
  {
 
1081
          (optimizer::TableReadPlan **) alloc_root(param->mem_root,
 
1082
                                                   sizeof(optimizer::TableReadPlan*)*
 
1083
                                                   n_child_scans)))
1095
1084
    return imerge_trp;
1096
 
  }
1097
1085
skip_to_ror_scan:
1098
1086
  roru_index_costs= 0.0;
1099
1087
  roru_total_records= 0;
1115
1103
    {
1116
1104
      /* Ok, we have index_only cost, now get full rows scan cost */
1117
1105
      cost= param->table->cursor->
1118
 
              read_time(param->real_keynr[(*cur_child)->key_idx], 1,
 
1106
              read_time(param->real_keynr[(*cur_child)->getKeyIndex()], 1,
1119
1107
                        (*cur_child)->records) +
1120
1108
              rows2double((*cur_child)->records) / TIME_FOR_COMPARE;
1121
1109
    }
1133
1121
      roru_index_costs += (*cur_roru_plan)->read_cost;
1134
1122
    }
1135
1123
    else
 
1124
    {
1136
1125
      roru_index_costs +=
1137
 
        ((optimizer::RorIntersectReadPlan*)(*cur_roru_plan))->index_scan_costs;
 
1126
        ((optimizer::RorIntersectReadPlan *)(*cur_roru_plan))->getCostOfIndexScans();
 
1127
    }
1138
1128
    roru_total_records += (*cur_roru_plan)->records;
1139
1129
    roru_intersect_part *= (*cur_roru_plan)->records /
1140
1130
                           param->table->cursor->stats.records;
1158
1148
  */
1159
1149
  double roru_total_cost;
1160
1150
  {
1161
 
    optimizer::CostVector sweep_cost;
1162
 
    Join *join= param->session->lex->select_lex.join;
 
1151
    COST_VECT sweep_cost;
 
1152
    JOIN *join= param->session->lex->select_lex.join;
1163
1153
    bool is_interrupted= test(join && join->tables == 1);
1164
1154
    get_sweep_read_cost(param->table, roru_total_records, is_interrupted,
1165
1155
                        &sweep_cost);
1174
1164
  {
1175
1165
    if ((roru= new (param->mem_root) optimizer::RorUnionReadPlan))
1176
1166
    {
1177
 
      roru->first_ror= roru_read_plans;
1178
 
      roru->last_ror= roru_read_plans + n_child_scans;
 
1167
      roru->merged_scans.assign(roru_read_plans, roru_read_plans + n_child_scans);
1179
1168
      roru->read_cost= roru_total_cost;
1180
1169
      roru->records= roru_total_records;
1181
1170
      return roru;
1185
1174
}
1186
1175
 
1187
1176
 
 
1177
typedef struct st_ror_scan_info
 
1178
{
 
1179
  uint32_t      idx;      /* # of used key in param->keys */
 
1180
  uint32_t      keynr;    /* # of used key in table */
 
1181
  ha_rows   records;  /* estimate of # records this scan will return */
 
1182
 
 
1183
  /* Set of intervals over key fields that will be used for row retrieval. */
 
1184
  optimizer::SEL_ARG   *sel_arg;
 
1185
 
 
1186
  /* Fields used in the query and covered by this ROR scan. */
 
1187
  MyBitmap covered_fields;
 
1188
  uint32_t      used_fields_covered; /* # of set bits in covered_fields */
 
1189
  int       key_rec_length; /* length of key record (including rowid) */
 
1190
 
 
1191
  /*
 
1192
    Cost of reading all index records with values in sel_arg intervals set
 
1193
    (assuming there is no need to access full table records)
 
1194
  */
 
1195
  double    index_read_cost;
 
1196
  uint32_t      first_uncovered_field; /* first unused bit in covered_fields */
 
1197
  uint32_t      key_components; /* # of parts in the key */
 
1198
} ROR_SCAN_INFO;
 
1199
 
1188
1200
 
1189
1201
/*
1190
 
  Create optimizer::RorScanInfo* structure with a single ROR scan on index idx using
 
1202
  Create ROR_SCAN_INFO* structure with a single ROR scan on index idx using
1191
1203
  sel_arg set of intervals.
1192
1204
 
1193
1205
  SYNOPSIS
1202
1214
*/
1203
1215
 
1204
1216
static
1205
 
optimizer::RorScanInfo *make_ror_scan(const optimizer::Parameter *param, int idx, optimizer::SEL_ARG *sel_arg)
 
1217
ROR_SCAN_INFO *make_ror_scan(const optimizer::Parameter *param, int idx, optimizer::SEL_ARG *sel_arg)
1206
1218
{
1207
 
  optimizer::RorScanInfo *ror_scan= NULL;
 
1219
  ROR_SCAN_INFO *ror_scan;
 
1220
  my_bitmap_map *bitmap_buf;
1208
1221
 
1209
1222
  uint32_t keynr;
1210
1223
 
1211
 
  if (!(ror_scan= (optimizer::RorScanInfo*)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo))))
 
1224
  if (!(ror_scan= (ROR_SCAN_INFO*)alloc_root(param->mem_root,
 
1225
                                             sizeof(ROR_SCAN_INFO))))
1212
1226
    return NULL;
1213
1227
 
1214
1228
  ror_scan->idx= idx;
1218
1232
  ror_scan->sel_arg= sel_arg;
1219
1233
  ror_scan->records= param->table->quick_rows[keynr];
1220
1234
 
1221
 
  ror_scan->covered_fields_size= param->table->getShare()->sizeFields();
1222
 
  boost::dynamic_bitset<> tmp_bitset(param->table->getShare()->sizeFields());
1223
 
  tmp_bitset.reset();
1224
 
 
1225
 
  KeyPartInfo *key_part= param->table->key_info[keynr].key_part;
1226
 
  KeyPartInfo *key_part_end= key_part +
 
1235
  if (!(bitmap_buf= (my_bitmap_map*) alloc_root(param->mem_root,
 
1236
                                                param->fields_bitmap_size)))
 
1237
    return NULL;
 
1238
 
 
1239
  if (ror_scan->covered_fields.init(bitmap_buf,
 
1240
                                    param->table->s->fields))
 
1241
    return NULL;
 
1242
  ror_scan->covered_fields.clearAll();
 
1243
 
 
1244
  KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part;
 
1245
  KEY_PART_INFO *key_part_end= key_part +
1227
1246
                               param->table->key_info[keynr].key_parts;
1228
 
  for (; key_part != key_part_end; ++key_part)
 
1247
  for (;key_part != key_part_end; ++key_part)
1229
1248
  {
1230
 
    if (param->needed_fields.test(key_part->fieldnr-1))
1231
 
      tmp_bitset.set(key_part->fieldnr-1);
 
1249
    if (param->needed_fields.isBitSet(key_part->fieldnr-1))
 
1250
      ror_scan->covered_fields.setBit(key_part->fieldnr-1);
1232
1251
  }
1233
1252
  double rows= rows2double(param->table->quick_rows[ror_scan->keynr]);
1234
1253
  ror_scan->index_read_cost=
1235
1254
    param->table->cursor->index_only_read_time(ror_scan->keynr, rows);
1236
 
  ror_scan->covered_fields= tmp_bitset.to_ulong();
1237
 
  return ror_scan;
 
1255
  return(ror_scan);
1238
1256
}
1239
1257
 
1240
1258
 
1241
1259
/*
1242
 
  Compare two optimizer::RorScanInfo** by  E(#records_matched) * key_record_length.
 
1260
  Compare two ROR_SCAN_INFO** by  E(#records_matched) * key_record_length.
1243
1261
  SYNOPSIS
1244
1262
    cmp_ror_scan_info()
1245
1263
      a ptr to first compared value
1251
1269
    1 a > b
1252
1270
*/
1253
1271
 
1254
 
static int cmp_ror_scan_info(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
 
1272
static int cmp_ror_scan_info(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b)
1255
1273
{
1256
1274
  double val1= rows2double((*a)->records) * (*a)->key_rec_length;
1257
1275
  double val2= rows2double((*b)->records) * (*b)->key_rec_length;
1258
1276
  return (val1 < val2)? -1: (val1 == val2)? 0 : 1;
1259
1277
}
1260
1278
 
1261
 
 
1262
1279
/*
1263
 
  Compare two optimizer::RorScanInfo** by
 
1280
  Compare two ROR_SCAN_INFO** by
1264
1281
   (#covered fields in F desc,
1265
1282
    #components asc,
1266
1283
    number of first not covered component asc)
1276
1293
    1 a > b
1277
1294
*/
1278
1295
 
1279
 
static int cmp_ror_scan_info_covering(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
 
1296
static int cmp_ror_scan_info_covering(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b)
1280
1297
{
1281
1298
  if ((*a)->used_fields_covered > (*b)->used_fields_covered)
1282
1299
    return -1;
1293
1310
  return 0;
1294
1311
}
1295
1312
 
 
1313
 
1296
1314
/* Auxiliary structure for incremental ROR-intersection creation */
1297
 
typedef struct st_ror_intersect_info
 
1315
typedef struct
1298
1316
{
1299
 
  st_ror_intersect_info()
1300
 
    :
1301
 
      param(NULL),
1302
 
      covered_fields(),
1303
 
      out_rows(0.0),
1304
 
      is_covering(false),
1305
 
      index_records(0),
1306
 
      index_scan_costs(0.0),
1307
 
      total_cost(0.0)
1308
 
  {}
1309
 
 
1310
 
  st_ror_intersect_info(const optimizer::Parameter *in_param)
1311
 
    :
1312
 
      param(in_param),
1313
 
      covered_fields(in_param->table->getShare()->sizeFields()),
1314
 
      out_rows(in_param->table->cursor->stats.records),
1315
 
      is_covering(false),
1316
 
      index_records(0),
1317
 
      index_scan_costs(0.0),
1318
 
      total_cost(0.0)
1319
 
  {
1320
 
    covered_fields.reset();
1321
 
  }
1322
 
 
1323
1317
  const optimizer::Parameter *param;
1324
 
  boost::dynamic_bitset<> covered_fields; /* union of fields covered by all scans */
 
1318
  MyBitmap covered_fields; /* union of fields covered by all scans */
1325
1319
  /*
1326
1320
    Fraction of table records that satisfies conditions of all scans.
1327
1321
    This is the number of full records that will be retrieved if a
1337
1331
} ROR_INTERSECT_INFO;
1338
1332
 
1339
1333
 
 
1334
/*
 
1335
  Allocate a ROR_INTERSECT_INFO and initialize it to contain zero scans.
 
1336
 
 
1337
  SYNOPSIS
 
1338
    ror_intersect_init()
 
1339
      param         Parameter from test_quick_select
 
1340
 
 
1341
  RETURN
 
1342
    allocated structure
 
1343
    NULL on error
 
1344
*/
 
1345
 
 
1346
static
 
1347
ROR_INTERSECT_INFO* ror_intersect_init(const optimizer::Parameter *param)
 
1348
{
 
1349
  ROR_INTERSECT_INFO *info;
 
1350
  my_bitmap_map* buf;
 
1351
  if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root,
 
1352
                                              sizeof(ROR_INTERSECT_INFO))))
 
1353
    return NULL;
 
1354
  info->param= param;
 
1355
  if (!(buf= (my_bitmap_map*) alloc_root(param->mem_root,
 
1356
                                         param->fields_bitmap_size)))
 
1357
    return NULL;
 
1358
  if (info->covered_fields.init(buf, param->table->s->fields))
 
1359
    return NULL;
 
1360
  info->is_covering= false;
 
1361
  info->index_scan_costs= 0.0;
 
1362
  info->index_records= 0;
 
1363
  info->out_rows= (double) param->table->cursor->stats.records;
 
1364
  info->covered_fields.clearAll();
 
1365
  return info;
 
1366
}
 
1367
 
1340
1368
static void ror_intersect_cpy(ROR_INTERSECT_INFO *dst,
1341
1369
                              const ROR_INTERSECT_INFO *src)
1342
1370
{
1441
1469
*/
1442
1470
 
1443
1471
static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
1444
 
                                   const optimizer::RorScanInfo *scan)
 
1472
                                   const ROR_SCAN_INFO *scan)
1445
1473
{
1446
1474
  double selectivity_mult= 1.0;
1447
 
  KeyPartInfo *key_part= info->param->table->key_info[scan->keynr].key_part;
 
1475
  KEY_PART_INFO *key_part= info->param->table->key_info[scan->keynr].key_part;
1448
1476
  unsigned char key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; /* key values tuple */
1449
1477
  unsigned char *key_ptr= key_val;
1450
1478
  optimizer::SEL_ARG *sel_arg= NULL;
1451
1479
  optimizer::SEL_ARG *tuple_arg= NULL;
1452
1480
  key_part_map keypart_map= 0;
1453
1481
  bool cur_covered;
1454
 
  bool prev_covered= test(info->covered_fields.test(key_part->fieldnr-1));
 
1482
  bool prev_covered= test(info->covered_fields.isBitSet(key_part->fieldnr-1));
1455
1483
  key_range min_range;
1456
1484
  key_range max_range;
1457
1485
  min_range.key= key_val;
1464
1492
       sel_arg= sel_arg->next_key_part)
1465
1493
  {
1466
1494
    cur_covered=
1467
 
      test(info->covered_fields.test(key_part[sel_arg->part].fieldnr-1));
 
1495
      test(info->covered_fields.isBitSet(key_part[sel_arg->part].fieldnr-1));
1468
1496
    if (cur_covered != prev_covered)
1469
1497
    {
1470
1498
      /* create (part1val, ..., part{n-1}val) tuple. */
1549
1577
*/
1550
1578
 
1551
1579
static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
1552
 
                              optimizer::RorScanInfo* ror_scan, bool is_cpk_scan)
 
1580
                              ROR_SCAN_INFO* ror_scan, bool is_cpk_scan)
1553
1581
{
1554
1582
  double selectivity_mult= 1.0;
1555
1583
 
1576
1604
  {
1577
1605
    info->index_records += info->param->table->quick_rows[ror_scan->keynr];
1578
1606
    info->index_scan_costs += ror_scan->index_read_cost;
1579
 
    boost::dynamic_bitset<> tmp_bitset= ror_scan->bitsToBitset();
1580
 
    info->covered_fields|= tmp_bitset;
1581
 
    if (! info->is_covering && info->param->needed_fields.is_subset_of(info->covered_fields))
 
1607
    bitmap_union(&info->covered_fields, &ror_scan->covered_fields);
 
1608
    if (!info->is_covering && bitmap_is_subset(&info->param->needed_fields,
 
1609
                                               &info->covered_fields))
1582
1610
    {
1583
1611
      info->is_covering= true;
1584
1612
    }
1585
1613
  }
1586
1614
 
1587
1615
  info->total_cost= info->index_scan_costs;
1588
 
  if (! info->is_covering)
 
1616
  if (!info->is_covering)
1589
1617
  {
1590
 
    optimizer::CostVector sweep_cost;
1591
 
    Join *join= info->param->session->lex->select_lex.join;
 
1618
    COST_VECT sweep_cost;
 
1619
    JOIN *join= info->param->session->lex->select_lex.join;
1592
1620
    bool is_interrupted= test(join && join->tables == 1);
1593
1621
    get_sweep_read_cost(info->param->table, double2rows(info->out_rows),
1594
1622
                        is_interrupted, &sweep_cost);
1599
1627
 
1600
1628
 
1601
1629
/*
1602
 
  Get best covering ROR-intersection.
1603
 
  SYNOPSIS
1604
 
    get_best_covering_ror_intersect()
1605
 
      param     Parameter from test_quick_select function.
1606
 
      tree      optimizer::SEL_TREE with sets of intervals for different keys.
1607
 
      read_time Don't return table read plans with cost > read_time.
1608
 
 
1609
 
  RETURN
1610
 
    Best covering ROR-intersection plan
1611
 
    NULL if no plan found.
1612
 
 
1613
 
  NOTES
1614
 
    get_best_ror_intersect must be called for a tree before calling this
1615
 
    function for it.
1616
 
    This function invalidates tree->ror_scans member values.
1617
 
 
1618
 
  The following approximate algorithm is used:
1619
 
    I=set of all covering indexes
1620
 
    F=set of all fields to cover
1621
 
    S={}
1622
 
 
1623
 
    do
1624
 
    {
1625
 
      Order I by (#covered fields in F desc,
1626
 
                  #components asc,
1627
 
                  number of first not covered component asc);
1628
 
      F=F-covered by first(I);
1629
 
      S=S+first(I);
1630
 
      I=I-first(I);
1631
 
    } while F is not empty.
1632
 
*/
1633
 
 
1634
 
static
1635
 
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
1636
 
                                                            optimizer::SEL_TREE *tree,
1637
 
                                                            double read_time)
1638
 
{
1639
 
  optimizer::RorScanInfo **ror_scan_mark;
1640
 
  optimizer::RorScanInfo **ror_scans_end= tree->ror_scans_end;
1641
 
 
1642
 
  for (optimizer::RorScanInfo **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
1643
 
    (*scan)->key_components=
1644
 
      param->table->key_info[(*scan)->keynr].key_parts;
1645
 
 
1646
 
  /*
1647
 
    Run covering-ROR-search algorithm.
1648
 
    Assume set I is [ror_scan .. ror_scans_end)
1649
 
  */
1650
 
 
1651
 
  /*I=set of all covering indexes */
1652
 
  ror_scan_mark= tree->ror_scans;
1653
 
 
1654
 
  boost::dynamic_bitset<> *covered_fields= &param->tmp_covered_fields;
1655
 
  if (covered_fields->empty())
1656
 
  {
1657
 
    covered_fields->resize(param->table->getShare()->sizeFields());
1658
 
  }
1659
 
  covered_fields->reset();
1660
 
 
1661
 
  double total_cost= 0.0f;
1662
 
  ha_rows records=0;
1663
 
  bool all_covered;
1664
 
 
1665
 
  do
1666
 
  {
1667
 
    /*
1668
 
      Update changed sorting info:
1669
 
        #covered fields,
1670
 
        number of first not covered component
1671
 
      Calculate and save these values for each of remaining scans.
1672
 
    */
1673
 
    for (optimizer::RorScanInfo **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
1674
 
    {
1675
 
      /* subtract one bitset from the other */
1676
 
      (*scan)->subtractBitset(*covered_fields);
1677
 
      (*scan)->used_fields_covered=
1678
 
        (*scan)->getBitCount();
1679
 
      (*scan)->first_uncovered_field= (*scan)->findFirstNotSet();
1680
 
    }
1681
 
 
1682
 
    internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
1683
 
                       sizeof(optimizer::RorScanInfo*),
1684
 
                       (qsort_cmp)cmp_ror_scan_info_covering);
1685
 
 
1686
 
    /* I=I-first(I) */
1687
 
    total_cost += (*ror_scan_mark)->index_read_cost;
1688
 
    records += (*ror_scan_mark)->records;
1689
 
    if (total_cost > read_time)
1690
 
      return NULL;
1691
 
    /* F=F-covered by first(I) */
1692
 
    boost::dynamic_bitset<> tmp_bitset= (*ror_scan_mark)->bitsToBitset();
1693
 
    *covered_fields|= tmp_bitset;
1694
 
    all_covered= param->needed_fields.is_subset_of(*covered_fields);
1695
 
  } while ((++ror_scan_mark < ror_scans_end) && ! all_covered);
1696
 
 
1697
 
  if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
1698
 
    return NULL;
1699
 
 
1700
 
  /*
1701
 
    Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with
1702
 
    cost total_cost.
1703
 
  */
1704
 
  /* Add priority queue use cost. */
1705
 
  total_cost += rows2double(records)*
1706
 
                log((double)(ror_scan_mark - tree->ror_scans)) /
1707
 
                (TIME_FOR_COMPARE_ROWID * M_LN2);
1708
 
 
1709
 
  if (total_cost > read_time)
1710
 
    return NULL;
1711
 
 
1712
 
  optimizer::RorIntersectReadPlan *trp= NULL;
1713
 
  if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1714
 
  {
1715
 
    return trp;
1716
 
  }
1717
 
 
1718
 
  uint32_t best_num= (ror_scan_mark - tree->ror_scans);
1719
 
  if (!(trp->first_scan= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* best_num)))
1720
 
    return NULL;
1721
 
  memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(optimizer::RorScanInfo*));
1722
 
  trp->last_scan=  trp->first_scan + best_num;
1723
 
  trp->is_covering= true;
1724
 
  trp->read_cost= total_cost;
1725
 
  trp->records= records;
1726
 
  trp->cpk_scan= NULL;
1727
 
  set_if_smaller(param->table->quick_condition_rows, records);
1728
 
 
1729
 
  return(trp);
1730
 
}
1731
 
 
1732
 
 
1733
 
/*
1734
1630
  Get best ROR-intersection plan using non-covering ROR-intersection search
1735
1631
  algorithm. The returned plan may be covering.
1736
1632
 
1807
1703
    return NULL;
1808
1704
 
1809
1705
  /*
1810
 
    Step1: Collect ROR-able SEL_ARGs and create optimizer::RorScanInfo for each of
 
1706
    Step1: Collect ROR-able SEL_ARGs and create ROR_SCAN_INFO for each of
1811
1707
    them. Also find and save clustered PK scan if there is one.
1812
1708
  */
1813
 
  optimizer::RorScanInfo **cur_ror_scan= NULL;
1814
 
  optimizer::RorScanInfo *cpk_scan= NULL;
 
1709
  ROR_SCAN_INFO **cur_ror_scan= NULL;
 
1710
  ROR_SCAN_INFO *cpk_scan= NULL;
1815
1711
  uint32_t cpk_no= 0;
1816
1712
  bool cpk_scan_used= false;
1817
1713
 
1818
 
  if (! (tree->ror_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* param->keys)))
1819
 
  {
 
1714
  if (! (tree->ror_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
 
1715
                                                      sizeof(ROR_SCAN_INFO*)*
 
1716
                                                      param->keys)))
1820
1717
    return NULL;
1821
 
  }
1822
1718
  cpk_no= ((param->table->cursor->primary_key_is_clustered()) ?
1823
 
           param->table->getShare()->getPrimaryKey() : MAX_KEY);
 
1719
           param->table->s->primary_key : MAX_KEY);
1824
1720
 
1825
1721
  for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++)
1826
1722
  {
1827
 
    optimizer::RorScanInfo *scan;
 
1723
    ROR_SCAN_INFO *scan;
1828
1724
    if (! tree->ror_scans_map.test(idx))
1829
1725
      continue;
1830
 
    if (! (scan= make_ror_scan(param, idx, tree->keys[idx])))
 
1726
    if (!(scan= make_ror_scan(param, idx, tree->keys[idx])))
1831
1727
      return NULL;
1832
1728
    if (param->real_keynr[idx] == cpk_no)
1833
1729
    {
1841
1737
  tree->ror_scans_end= cur_ror_scan;
1842
1738
  /*
1843
1739
    Ok, [ror_scans, ror_scans_end) is array of ptrs to initialized
1844
 
    optimizer::RorScanInfo's.
 
1740
    ROR_SCAN_INFO's.
1845
1741
    Step 2: Get best ROR-intersection using an approximate algorithm.
1846
1742
  */
1847
 
  internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(optimizer::RorScanInfo*),
 
1743
  internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(ROR_SCAN_INFO*),
1848
1744
                     (qsort_cmp)cmp_ror_scan_info);
1849
1745
 
1850
 
  optimizer::RorScanInfo **intersect_scans= NULL; /* ROR scans used in index intersection */
1851
 
  optimizer::RorScanInfo **intersect_scans_end= NULL;
1852
 
  if (! (intersect_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*) * tree->n_ror_scans)))
 
1746
  ROR_SCAN_INFO **intersect_scans= NULL; /* ROR scans used in index intersection */
 
1747
  ROR_SCAN_INFO **intersect_scans_end= NULL;
 
1748
  if (! (intersect_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
 
1749
                                                      sizeof(ROR_SCAN_INFO*)*
 
1750
                                                      tree->n_ror_scans)))
1853
1751
    return NULL;
1854
1752
  intersect_scans_end= intersect_scans;
1855
1753
 
1856
1754
  /* Create and incrementally update ROR intersection. */
1857
 
  ROR_INTERSECT_INFO intersect(param);
1858
 
  ROR_INTERSECT_INFO intersect_best(param);
 
1755
  ROR_INTERSECT_INFO *intersect= NULL;
 
1756
  ROR_INTERSECT_INFO *intersect_best= NULL;
 
1757
  if (! (intersect= ror_intersect_init(param)) ||
 
1758
      ! (intersect_best= ror_intersect_init(param)))
 
1759
    return NULL;
1859
1760
 
1860
1761
  /* [intersect_scans,intersect_scans_best) will hold the best intersection */
1861
 
  optimizer::RorScanInfo **intersect_scans_best= NULL;
 
1762
  ROR_SCAN_INFO **intersect_scans_best= NULL;
1862
1763
  cur_ror_scan= tree->ror_scans;
1863
1764
  intersect_scans_best= intersect_scans;
1864
 
  while (cur_ror_scan != tree->ror_scans_end && ! intersect.is_covering)
 
1765
  while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering)
1865
1766
  {
1866
1767
    /* S= S + first(R);  R= R - first(R); */
1867
 
    if (! ror_intersect_add(&intersect, *cur_ror_scan, false))
 
1768
    if (!ror_intersect_add(intersect, *cur_ror_scan, false))
1868
1769
    {
1869
1770
      cur_ror_scan++;
1870
1771
      continue;
1872
1773
 
1873
1774
    *(intersect_scans_end++)= *(cur_ror_scan++);
1874
1775
 
1875
 
    if (intersect.total_cost < min_cost)
 
1776
    if (intersect->total_cost < min_cost)
1876
1777
    {
1877
1778
      /* Local minimum found, save it */
1878
 
      ror_intersect_cpy(&intersect_best, &intersect);
 
1779
      ror_intersect_cpy(intersect_best, intersect);
1879
1780
      intersect_scans_best= intersect_scans_end;
1880
 
      min_cost = intersect.total_cost;
 
1781
      min_cost = intersect->total_cost;
1881
1782
    }
1882
1783
  }
1883
1784
 
1886
1787
    return NULL;
1887
1788
  }
1888
1789
 
1889
 
  *are_all_covering= intersect.is_covering;
 
1790
  *are_all_covering= intersect->is_covering;
1890
1791
  uint32_t best_num= intersect_scans_best - intersect_scans;
1891
 
  ror_intersect_cpy(&intersect, &intersect_best);
 
1792
  ror_intersect_cpy(intersect, intersect_best);
1892
1793
 
1893
1794
  /*
1894
1795
    Ok, found the best ROR-intersection of non-CPK key scans.
1895
1796
    Check if we should add a CPK scan. If the obtained ROR-intersection is
1896
1797
    covering, it doesn't make sense to add CPK scan.
1897
1798
  */
1898
 
  if (cpk_scan && ! intersect.is_covering)
 
1799
  if (cpk_scan && !intersect->is_covering)
1899
1800
  {
1900
 
    if (ror_intersect_add(&intersect, cpk_scan, true) &&
1901
 
        (intersect.total_cost < min_cost))
 
1801
    if (ror_intersect_add(intersect, cpk_scan, true) &&
 
1802
        (intersect->total_cost < min_cost))
1902
1803
    {
1903
1804
      cpk_scan_used= true;
1904
1805
      intersect_best= intersect; //just set pointer here
1912
1813
    if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1913
1814
      return trp;
1914
1815
 
1915
 
    if (! (trp->first_scan=
1916
 
           (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)*best_num)))
1917
 
      return NULL;
1918
 
    memcpy(trp->first_scan, intersect_scans, best_num*sizeof(optimizer::RorScanInfo*));
1919
 
    trp->last_scan=  trp->first_scan + best_num;
1920
 
    trp->is_covering= intersect_best.is_covering;
1921
 
    trp->read_cost= intersect_best.total_cost;
 
1816
    trp->ror_range_scans.assign(intersect_scans, intersect_scans + best_num);
 
1817
    trp->setRowRetrievalNecessary(intersect_best->is_covering);
 
1818
    trp->read_cost= intersect_best->total_cost;
1922
1819
    /* Prevent divisons by zero */
1923
 
    ha_rows best_rows = double2rows(intersect_best.out_rows);
 
1820
    ha_rows best_rows = double2rows(intersect_best->out_rows);
1924
1821
    if (! best_rows)
1925
1822
      best_rows= 1;
1926
1823
    set_if_smaller(param->table->quick_condition_rows, best_rows);
1927
1824
    trp->records= best_rows;
1928
 
    trp->index_scan_costs= intersect_best.index_scan_costs;
 
1825
    trp->setCostOfIndexScans(intersect_best->index_scan_costs);
1929
1826
    trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
1930
1827
  }
1931
1828
  return trp;
1933
1830
 
1934
1831
 
1935
1832
/*
 
1833
  Get best covering ROR-intersection.
 
1834
  SYNOPSIS
 
1835
    get_best_covering_ror_intersect()
 
1836
      param     Parameter from test_quick_select function.
 
1837
      tree      optimizer::SEL_TREE with sets of intervals for different keys.
 
1838
      read_time Don't return table read plans with cost > read_time.
 
1839
 
 
1840
  RETURN
 
1841
    Best covering ROR-intersection plan
 
1842
    NULL if no plan found.
 
1843
 
 
1844
  NOTES
 
1845
    get_best_ror_intersect must be called for a tree before calling this
 
1846
    function for it.
 
1847
    This function invalidates tree->ror_scans member values.
 
1848
 
 
1849
  The following approximate algorithm is used:
 
1850
    I=set of all covering indexes
 
1851
    F=set of all fields to cover
 
1852
    S={}
 
1853
 
 
1854
    do
 
1855
    {
 
1856
      Order I by (#covered fields in F desc,
 
1857
                  #components asc,
 
1858
                  number of first not covered component asc);
 
1859
      F=F-covered by first(I);
 
1860
      S=S+first(I);
 
1861
      I=I-first(I);
 
1862
    } while F is not empty.
 
1863
*/
 
1864
 
 
1865
static
 
1866
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
 
1867
                                                            optimizer::SEL_TREE *tree,
 
1868
                                                            double read_time)
 
1869
{
 
1870
  ROR_SCAN_INFO **ror_scan_mark;
 
1871
  ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
 
1872
 
 
1873
  for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
 
1874
    (*scan)->key_components=
 
1875
      param->table->key_info[(*scan)->keynr].key_parts;
 
1876
 
 
1877
  /*
 
1878
    Run covering-ROR-search algorithm.
 
1879
    Assume set I is [ror_scan .. ror_scans_end)
 
1880
  */
 
1881
 
 
1882
  /*I=set of all covering indexes */
 
1883
  ror_scan_mark= tree->ror_scans;
 
1884
 
 
1885
  MyBitmap *covered_fields= &param->tmp_covered_fields;
 
1886
  if (! covered_fields->getBitmap())
 
1887
  {
 
1888
    my_bitmap_map *tmp_bitmap= (my_bitmap_map*)alloc_root(param->mem_root,
 
1889
                                               param->fields_bitmap_size);
 
1890
    covered_fields->setBitmap(tmp_bitmap);
 
1891
  }
 
1892
  if (! covered_fields->getBitmap() ||
 
1893
      covered_fields->init(covered_fields->getBitmap(),
 
1894
                           param->table->s->fields))
 
1895
    return 0;
 
1896
  covered_fields->clearAll();
 
1897
 
 
1898
  double total_cost= 0.0f;
 
1899
  ha_rows records=0;
 
1900
  bool all_covered;
 
1901
 
 
1902
  do
 
1903
  {
 
1904
    /*
 
1905
      Update changed sorting info:
 
1906
        #covered fields,
 
1907
        number of first not covered component
 
1908
      Calculate and save these values for each of remaining scans.
 
1909
    */
 
1910
    for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
 
1911
    {
 
1912
      bitmap_subtract(&(*scan)->covered_fields, covered_fields);
 
1913
      (*scan)->used_fields_covered=
 
1914
        (*scan)->covered_fields.getBitsSet();
 
1915
      (*scan)->first_uncovered_field=
 
1916
        (*scan)->covered_fields.getFirst();
 
1917
    }
 
1918
 
 
1919
    internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
 
1920
                       sizeof(ROR_SCAN_INFO*),
 
1921
                       (qsort_cmp)cmp_ror_scan_info_covering);
 
1922
 
 
1923
    /* I=I-first(I) */
 
1924
    total_cost += (*ror_scan_mark)->index_read_cost;
 
1925
    records += (*ror_scan_mark)->records;
 
1926
    if (total_cost > read_time)
 
1927
      return NULL;
 
1928
    /* F=F-covered by first(I) */
 
1929
    bitmap_union(covered_fields, &(*ror_scan_mark)->covered_fields);
 
1930
    all_covered= bitmap_is_subset(&param->needed_fields, covered_fields);
 
1931
  } while ((++ror_scan_mark < ror_scans_end) && !all_covered);
 
1932
 
 
1933
  if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
 
1934
    return NULL;
 
1935
 
 
1936
  /*
 
1937
    Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with
 
1938
    cost total_cost.
 
1939
  */
 
1940
  /* Add priority queue use cost. */
 
1941
  total_cost += rows2double(records)*
 
1942
                log((double)(ror_scan_mark - tree->ror_scans)) /
 
1943
                (TIME_FOR_COMPARE_ROWID * M_LN2);
 
1944
 
 
1945
  if (total_cost > read_time)
 
1946
    return NULL;
 
1947
 
 
1948
  optimizer::RorIntersectReadPlan *trp= NULL;
 
1949
  if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
 
1950
  {
 
1951
    return trp;
 
1952
  }
 
1953
 
 
1954
  uint32_t best_num= (ror_scan_mark - tree->ror_scans);
 
1955
  trp->ror_range_scans.assign(tree->ror_scans, tree->ror_scans + best_num);
 
1956
  trp->setRowRetrievalNecessary(true);
 
1957
  trp->read_cost= total_cost;
 
1958
  trp->records= records;
 
1959
  trp->cpk_scan= NULL;
 
1960
  set_if_smaller(param->table->quick_condition_rows, records);
 
1961
 
 
1962
  return(trp);
 
1963
}
 
1964
 
 
1965
 
 
1966
/*
1936
1967
  Get best "range" table read plan for given optimizer::SEL_TREE, also update some info
1937
1968
 
1938
1969
  SYNOPSIS
1939
1970
    get_key_scans_params()
1940
 
      session
1941
1971
      param                    Parameters from test_quick_select
1942
1972
      tree                     Make range select for this optimizer::SEL_TREE
1943
1973
      index_read_must_be_used  true <=> assume 'index only' option will be set
1959
1989
    NULL if no plan found or error occurred
1960
1990
*/
1961
1991
 
1962
 
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
1963
 
                                                      optimizer::Parameter *param,
 
1992
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
1964
1993
                                                      optimizer::SEL_TREE *tree,
1965
1994
                                                      bool index_read_must_be_used,
1966
1995
                                                      bool update_tbl_stats,
1986
2015
    if (*key)
1987
2016
    {
1988
2017
      ha_rows found_records;
1989
 
      optimizer::CostVector cost;
 
2018
      COST_VECT cost;
1990
2019
      double found_read_time= 0.0;
1991
2020
      uint32_t mrr_flags, buf_size;
1992
2021
      uint32_t keynr= param->real_keynr[idx];
1997
2026
      bool read_index_only= index_read_must_be_used ||
1998
2027
                            param->table->covering_keys.test(keynr);
1999
2028
 
2000
 
      found_records= check_quick_select(session, param, idx, read_index_only, *key,
 
2029
      found_records= check_quick_select(param, idx, read_index_only, *key,
2001
2030
                                        update_tbl_stats, &mrr_flags,
2002
2031
                                        &buf_size, &cost);
2003
2032
      found_read_time= cost.total_cost();
2026
2055
      read_plan->records= best_records;
2027
2056
      read_plan->is_ror= tree->ror_scans_map.test(idx);
2028
2057
      read_plan->read_cost= read_time;
2029
 
      read_plan->mrr_buf_size= best_buf_size;
 
2058
      read_plan->setMRRBufferSize(best_buf_size);
2030
2059
    }
2031
2060
  }
2032
2061
 
2077
2106
                                                parent_alloc)))
2078
2107
  {
2079
2108
    alloc= parent_alloc ? parent_alloc : &quick_intersect->alloc;
2080
 
    for (; first_scan != last_scan; ++first_scan)
 
2109
    for (vector<struct st_ror_scan_info *>::iterator it= ror_range_scans.begin();
 
2110
         it != ror_range_scans.end();
 
2111
         ++it)
2081
2112
    {
2082
2113
      if (! (quick= optimizer::get_quick_select(param,
2083
 
                                                (*first_scan)->idx,
2084
 
                                                (*first_scan)->sel_arg,
 
2114
                                                (*it)->idx,
 
2115
                                                (*it)->sel_arg,
2085
2116
                                                HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
2086
2117
                                                0,
2087
2118
                                                alloc)) ||
2116
2147
optimizer::QuickSelectInterface *optimizer::RorUnionReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *)
2117
2148
{
2118
2149
  optimizer::QuickRorUnionSelect *quick_roru= NULL;
2119
 
  optimizer::TableReadPlan **scan= NULL;
2120
2150
  optimizer::QuickSelectInterface *quick= NULL;
2121
2151
  /*
2122
2152
    It is impossible to construct a ROR-union that will not retrieve full
2124
2154
  */
2125
2155
  if ((quick_roru= new optimizer::QuickRorUnionSelect(param->session, param->table)))
2126
2156
  {
2127
 
    for (scan= first_ror; scan != last_ror; scan++)
 
2157
    for (vector<optimizer::TableReadPlan *>::iterator it= merged_scans.begin();
 
2158
         it != merged_scans.end();
 
2159
         ++it)
2128
2160
    {
2129
 
      if (! (quick= (*scan)->make_quick(param, false, &quick_roru->alloc)) ||
 
2161
      if (! (quick= (*it)->make_quick(param, false, &quick_roru->alloc)) ||
2130
2162
          quick_roru->push_quick_back(quick))
2131
2163
      {
2132
2164
        return NULL;
2528
2560
  field->setWriteSet();
2529
2561
 
2530
2562
  Item_result cmp_type= field->cmp_type();
2531
 
  if (!((ref_tables | field->getTable()->map) & param_comp))
 
2563
  if (!((ref_tables | field->table->map) & param_comp))
2532
2564
    ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type, inv);
2533
2565
  Item_equal *item_equal= field_item->item_equal;
2534
2566
  if (item_equal)
2542
2574
 
2543
2575
      if (field->eq(f))
2544
2576
        continue;
2545
 
      if (!((ref_tables | f->getTable()->map) & param_comp))
 
2577
      if (!((ref_tables | f->table->map) & param_comp))
2546
2578
      {
2547
2579
        tree= get_func_mm_tree(param, cond_func, f, value, cmp_type, inv);
2548
2580
        ftree= !ftree ? tree : tree_and(param, ftree, tree);
2600
2632
    }
2601
2633
    return(tree);
2602
2634
  }
2603
 
  /* Here when simple cond
2604
 
     There are limits on what kinds of const items we can evaluate, grep for
2605
 
     DontEvaluateMaterializedSubqueryTooEarly.
2606
 
  */
2607
 
  if (cond->const_item()  && !cond->is_expensive())
 
2635
  /* Here when simple cond */
 
2636
  if (cond->const_item())
2608
2637
  {
2609
2638
    /*
2610
2639
      During the cond->val_int() evaluation we can come across a subselect
2696
2725
      field->setWriteSet();
2697
2726
 
2698
2727
      Item_result cmp_type= field->cmp_type();
2699
 
      if (!((ref_tables | field->getTable()->map) & param_comp))
 
2728
      if (!((ref_tables | field->table->map) & param_comp))
2700
2729
      {
2701
2730
        tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC,
2702
2731
                           value,cmp_type);
2735
2764
                   Item_func::Functype type,
2736
2765
                   Item *value, Item_result)
2737
2766
{
2738
 
  if (field->getTable() != param->table)
 
2767
  if (field->table != param->table)
2739
2768
    return 0;
2740
2769
 
2741
2770
  KEY_PART *key_part = param->key_parts;
2805
2834
  param->session->mem_root= param->old_root;
2806
2835
  if (!value)                                   // IS NULL or IS NOT NULL
2807
2836
  {
2808
 
    if (field->getTable()->maybe_null)          // Can't use a key on this
 
2837
    if (field->table->maybe_null)               // Can't use a key on this
2809
2838
      goto end;
2810
2839
    if (!maybe_null)                            // Not null field
2811
2840
    {
2890
2919
    {
2891
2920
      if (unlikely(length < field_length))
2892
2921
      {
2893
 
        /*
2894
 
          This can only happen in a table created with UNIREG where one key
2895
 
          overlaps many fields
2896
 
        */
2897
 
        length= field_length;
 
2922
        /*
 
2923
          This can only happen in a table created with UNIREG where one key
 
2924
          overlaps many fields
 
2925
        */
 
2926
        length= field_length;
2898
2927
      }
2899
2928
      else
2900
 
        field_length= length;
 
2929
        field_length= length;
2901
2930
    }
2902
2931
    length+=offset;
2903
 
    if (!(min_str= (unsigned char*) alloc->alloc_root(length*2)))
2904
 
    {
 
2932
    if (!(min_str= (unsigned char*) alloc_root(alloc, length*2)))
2905
2933
      goto end;
2906
 
    }
2907
2934
 
2908
2935
    max_str=min_str+length;
2909
2936
    if (maybe_null)
3122
3149
    tree= &optimizer::null_element;                        // cmp with NULL is never true
3123
3150
    goto end;
3124
3151
  }
3125
 
 
3126
 
  /*
3127
 
    Any predicate except "<=>"(null-safe equality operator) involving NULL as a
3128
 
    constant is always FALSE
3129
 
    Put IMPOSSIBLE Tree(null_element) here.
3130
 
  */  
3131
 
  if (type != Item_func::EQUAL_FUNC && field->is_real_null())
3132
 
  {
3133
 
    tree= &optimizer::null_element;
3134
 
    goto end;
3135
 
  }
3136
 
 
3137
 
  str= (unsigned char*) alloc->alloc_root(key_part->store_length+1);
 
3152
  str= (unsigned char*) alloc_root(alloc, key_part->store_length+1);
3138
3153
  if (!str)
3139
3154
    goto end;
3140
3155
  if (maybe_null)
3734
3749
  /* Ok got a tuple */
3735
3750
  RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i];
3736
3751
 
3737
 
  range->ptr= (char*)(size_t)(key_tree->part);
 
3752
  range->ptr= (char*)(int)(key_tree->part);
3738
3753
  {
3739
3754
    range->range_flag= cur->min_key_flag | cur->max_key_flag;
3740
3755
 
3810
3825
*/
3811
3826
 
3812
3827
static
3813
 
ha_rows check_quick_select(Session *session,
3814
 
                           optimizer::Parameter *param,
 
3828
ha_rows check_quick_select(optimizer::Parameter *param,
3815
3829
                           uint32_t idx,
3816
3830
                           bool index_only,
3817
3831
                           optimizer::SEL_ARG *tree,
3818
3832
                           bool update_tbl_stats,
3819
3833
                           uint32_t *mrr_flags,
3820
3834
                           uint32_t *bufsize,
3821
 
                           optimizer::CostVector *cost)
 
3835
                           COST_VECT *cost)
3822
3836
{
3823
3837
  SEL_ARG_RANGE_SEQ seq;
3824
3838
  RANGE_SEQ_IF seq_if = {sel_arg_range_seq_init, sel_arg_range_seq_next};
3852
3866
  bool pk_is_clustered= cursor->primary_key_is_clustered();
3853
3867
  if (index_only &&
3854
3868
      (param->table->index_flags(keynr) & HA_KEYREAD_ONLY) &&
3855
 
      !(pk_is_clustered && keynr == param->table->getShare()->getPrimaryKey()))
 
3869
      !(pk_is_clustered && keynr == param->table->s->primary_key))
3856
3870
     *mrr_flags |= HA_MRR_INDEX_ONLY;
3857
3871
 
3858
 
  if (session->lex->sql_command != SQLCOM_SELECT)
 
3872
  if (current_session->lex->sql_command != SQLCOM_SELECT)
3859
3873
    *mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
3860
3874
 
3861
3875
  *bufsize= param->session->variables.read_rnd_buff_size;
3887
3901
  else
3888
3902
  {
3889
3903
    /* Clustered PK scan is always a ROR scan (TODO: same as above) */
3890
 
    if (param->table->getShare()->getPrimaryKey() == keynr && pk_is_clustered)
 
3904
    if (param->table->s->primary_key == keynr && pk_is_clustered)
3891
3905
      param->is_ror_scan= true;
3892
3906
  }
3893
3907
 
3934
3948
 
3935
3949
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts)
3936
3950
{
3937
 
  KeyInfo *table_key= param->table->key_info + keynr;
3938
 
  KeyPartInfo *key_part= table_key->key_part + nparts;
3939
 
  KeyPartInfo *key_part_end= (table_key->key_part +
 
3951
  KEY *table_key= param->table->key_info + keynr;
 
3952
  KEY_PART_INFO *key_part= table_key->key_part + nparts;
 
3953
  KEY_PART_INFO *key_part_end= (table_key->key_part +
3940
3954
                                table_key->key_parts);
3941
3955
  uint32_t pk_number;
3942
3956
 
3943
 
  for (KeyPartInfo *kp= table_key->key_part; kp < key_part; kp++)
 
3957
  for (KEY_PART_INFO *kp= table_key->key_part; kp < key_part; kp++)
3944
3958
  {
3945
3959
    uint16_t fieldnr= param->table->key_info[keynr].
3946
3960
                    key_part[kp - table_key->key_part].fieldnr - 1;
3947
 
    if (param->table->getField(fieldnr)->key_length() != kp->length)
 
3961
    if (param->table->field[fieldnr]->key_length() != kp->length)
3948
3962
      return false;
3949
3963
  }
3950
3964
 
3952
3966
    return true;
3953
3967
 
3954
3968
  key_part= table_key->key_part + nparts;
3955
 
  pk_number= param->table->getShare()->getPrimaryKey();
 
3969
  pk_number= param->table->s->primary_key;
3956
3970
  if (!param->table->cursor->primary_key_is_clustered() || pk_number == MAX_KEY)
3957
3971
    return false;
3958
3972
 
3959
 
  KeyPartInfo *pk_part= param->table->key_info[pk_number].key_part;
3960
 
  KeyPartInfo *pk_part_end= pk_part +
 
3973
  KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part;
 
3974
  KEY_PART_INFO *pk_part_end= pk_part +
3961
3975
                              param->table->key_info[pk_number].key_parts;
3962
3976
  for (;(key_part!=key_part_end) && (pk_part != pk_part_end);
3963
3977
       ++key_part, ++pk_part)
3978
3992
                            uint32_t mrr_buf_size,
3979
3993
                            memory::Root *parent_alloc)
3980
3994
{
3981
 
  optimizer::QuickRangeSelect *quick= new optimizer::QuickRangeSelect(param->session,
3982
 
                                                                      param->table,
3983
 
                                                                      param->real_keynr[idx],
3984
 
                                                                      test(parent_alloc),
3985
 
                                                                      NULL);
 
3995
  optimizer::QuickRangeSelect *quick= NULL;
 
3996
  bool create_err= false;
 
3997
 
 
3998
  quick= new optimizer::QuickRangeSelect(param->session,
 
3999
                                         param->table,
 
4000
                                         param->real_keynr[idx],
 
4001
                                         test(parent_alloc),
 
4002
                                         NULL,
 
4003
                                         &create_err);
3986
4004
 
3987
4005
  if (quick)
3988
4006
  {
3989
 
          if (get_quick_keys(param,
 
4007
    if (create_err ||
 
4008
              get_quick_keys(param,
3990
4009
                       quick,
3991
4010
                       param->key[idx],
3992
4011
                       key_tree,
4002
4021
    {
4003
4022
      quick->mrr_flags= mrr_flags;
4004
4023
      quick->mrr_buf_size= mrr_buf_size;
4005
 
      if (parent_alloc)
4006
 
      {
4007
 
        quick->key_parts=(KEY_PART*)
4008
 
          parent_alloc->memdup_root( (char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
4009
 
      }
4010
 
      else
4011
 
      {
4012
 
        quick->key_parts=(KEY_PART*)
4013
 
          quick->alloc.memdup_root((char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
4014
 
      }
 
4024
      quick->key_parts=(KEY_PART*)
 
4025
        memdup_root(parent_alloc? parent_alloc : &quick->alloc,
 
4026
                    (char*) param->key[idx],
 
4027
                    sizeof(KEY_PART)*
 
4028
                    param->table->key_info[param->real_keynr[idx]].key_parts);
4015
4029
    }
4016
4030
  }
4017
4031
  return quick;
4125
4139
    if (length == (uint32_t) (tmp_max_key - param->max_key) &&
4126
4140
              ! memcmp(param->min_key,param->max_key,length))
4127
4141
    {
4128
 
      KeyInfo *table_key= quick->head->key_info+quick->index;
 
4142
      KEY *table_key= quick->head->key_info+quick->index;
4129
4143
      flag= EQ_RANGE;
4130
4144
      if ((table_key->flags & (HA_NOSAME)) == HA_NOSAME &&
4131
4145
                key->part == table_key->key_parts-1)
4207
4221
}
4208
4222
 
4209
4223
 
4210
 
bool optimizer::QuickSelectInterface::is_keys_used(const boost::dynamic_bitset<>& fields)
 
4224
bool optimizer::QuickSelectInterface::is_keys_used(const MyBitmap *fields)
4211
4225
{
4212
4226
  return is_key_used(head, index, fields);
4213
4227
}
4237
4251
                                                                 table_reference_st *ref,
4238
4252
                                                                 ha_rows records)
4239
4253
{
4240
 
  memory::Root *old_root= NULL;
4241
 
  memory::Root *alloc= NULL;
4242
 
  KeyInfo *key_info = &table->key_info[ref->key];
 
4254
  memory::Root *old_root, *alloc;
 
4255
  optimizer::QuickRangeSelect *quick= NULL;
 
4256
  KEY *key_info = &table->key_info[ref->key];
4243
4257
  KEY_PART *key_part;
4244
4258
  optimizer::QuickRange *range= NULL;
4245
4259
  uint32_t part;
4246
 
  optimizer::CostVector cost;
 
4260
  bool create_err= false;
 
4261
  COST_VECT cost;
4247
4262
 
4248
4263
  old_root= session->mem_root;
4249
4264
  /* The following call may change session->mem_root */
4250
 
  optimizer::QuickRangeSelect *quick= new optimizer::QuickRangeSelect(session, table, ref->key, 0, 0);
 
4265
  quick= new optimizer::QuickRangeSelect(session, table, ref->key, 0, 0, &create_err);
4251
4266
  /* save mem_root set by QuickRangeSelect constructor */
4252
4267
  alloc= session->mem_root;
4253
4268
  /*
4256
4271
  */
4257
4272
  session->mem_root= old_root;
4258
4273
 
4259
 
  if (! quick)
 
4274
  if (!quick || create_err)
4260
4275
    return 0;                   /* no ranges found */
4261
4276
  if (quick->init())
4262
4277
    goto err;
4275
4290
 
4276
4291
 
4277
4292
  if (!(quick->key_parts=key_part=(KEY_PART *)
4278
 
        quick->alloc.alloc_root(sizeof(KEY_PART)*ref->key_parts)))
 
4293
        alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts)))
4279
4294
    goto err;
4280
4295
 
4281
4296
  for (part=0 ; part < ref->key_parts ;part++,key_part++)
4320
4335
 
4321
4336
  quick->mrr_buf_size= session->variables.read_rnd_buff_size;
4322
4337
  if (table->cursor->multi_range_read_info(quick->index, 1, (uint32_t)records,
4323
 
                                           &quick->mrr_buf_size,
4324
 
                                           &quick->mrr_flags, &cost))
 
4338
                                         &quick->mrr_buf_size,
 
4339
                                         &quick->mrr_flags, &cost))
4325
4340
    goto err;
4326
4341
 
4327
4342
  return quick;
4399
4414
}
4400
4415
 
4401
4416
 
4402
 
static inline uint32_t get_field_keypart(KeyInfo *index, Field *field);
 
4417
static inline uint32_t get_field_keypart(KEY *index, Field *field);
4403
4418
 
4404
4419
static inline optimizer::SEL_ARG * get_index_range_tree(uint32_t index,
4405
4420
                                                        optimizer::SEL_TREE *range_tree,
4406
4421
                                                        optimizer::Parameter *param,
4407
4422
                                                        uint32_t *param_idx);
4408
4423
 
4409
 
static bool get_constant_key_infix(KeyInfo *index_info,
 
4424
static bool get_constant_key_infix(KEY *index_info,
4410
4425
                                   optimizer::SEL_ARG *index_range_tree,
4411
 
                                   KeyPartInfo *first_non_group_part,
4412
 
                                   KeyPartInfo *min_max_arg_part,
4413
 
                                   KeyPartInfo *last_part,
 
4426
                                   KEY_PART_INFO *first_non_group_part,
 
4427
                                   KEY_PART_INFO *min_max_arg_part,
 
4428
                                   KEY_PART_INFO *last_part,
4414
4429
                                   Session *session,
4415
4430
                                   unsigned char *key_infix,
4416
4431
                                   uint32_t *key_infix_len,
4417
 
                                   KeyPartInfo **first_non_infix_part);
 
4432
                                   KEY_PART_INFO **first_non_infix_part);
4418
4433
 
4419
4434
static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item);
4420
4435
 
4421
4436
static void
4422
4437
cost_group_min_max(Table* table,
4423
 
                   KeyInfo *index_info,
 
4438
                   KEY *index_info,
4424
4439
                   uint32_t used_key_parts,
4425
4440
                   uint32_t group_key_parts,
4426
4441
                   optimizer::SEL_TREE *range_tree,
4563
4578
get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree)
4564
4579
{
4565
4580
  Session *session= param->session;
4566
 
  Join *join= session->lex->current_select->join;
 
4581
  JOIN *join= session->lex->current_select->join;
4567
4582
  Table *table= param->table;
4568
4583
  bool have_min= false;              /* true if there is a MIN function. */
4569
4584
  bool have_max= false;              /* true if there is a MAX function. */
4570
4585
  Item_field *min_max_arg_item= NULL; // The argument of all MIN/MAX functions
4571
 
  KeyPartInfo *min_max_arg_part= NULL; /* The corresponding keypart. */
 
4586
  KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */
4572
4587
  uint32_t group_prefix_len= 0; /* Length (in bytes) of the key prefix. */
4573
 
  KeyInfo *index_info= NULL;    /* The index chosen for data access. */
 
4588
  KEY *index_info= NULL;    /* The index chosen for data access. */
4574
4589
  uint32_t index= 0;            /* The id of the chosen index. */
4575
4590
  uint32_t group_key_parts= 0;  // Number of index key parts in the group prefix.
4576
4591
  uint32_t used_key_parts= 0;   /* Number of index key parts used for access. */
4578
4593
  uint32_t key_infix_len= 0;          /* Length of key_infix. */
4579
4594
  optimizer::GroupMinMaxReadPlan *read_plan= NULL; /* The eventually constructed TRP. */
4580
4595
  uint32_t key_part_nr;
4581
 
  Order *tmp_group= NULL;
 
4596
  order_st *tmp_group= NULL;
4582
4597
  Item *item= NULL;
4583
4598
  Item_field *item_field= NULL;
4584
4599
 
4591
4606
       (! join->select_distinct)) ||
4592
4607
      (join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */
4593
4608
    return NULL;
4594
 
  if (table->getShare()->sizeKeys() == 0)        /* There are no indexes to use. */
 
4609
  if (table->s->keys == 0)        /* There are no indexes to use. */
4595
4610
    return NULL;
4596
4611
 
4597
4612
  /* Analyze the query in more detail. */
4650
4665
    (GA1,GA2) are all true. If there is more than one such index, select the
4651
4666
    first one. Here we set the variables: group_prefix_len and index_info.
4652
4667
  */
4653
 
  KeyInfo *cur_index_info= table->key_info;
4654
 
  KeyInfo *cur_index_info_end= cur_index_info + table->getShare()->sizeKeys();
4655
 
  KeyPartInfo *cur_part= NULL;
4656
 
  KeyPartInfo *end_part= NULL; /* Last part for loops. */
 
4668
  KEY *cur_index_info= table->key_info;
 
4669
  KEY *cur_index_info_end= cur_index_info + table->s->keys;
 
4670
  KEY_PART_INFO *cur_part= NULL;
 
4671
  KEY_PART_INFO *end_part= NULL; /* Last part for loops. */
4657
4672
  /* Last index part. */
4658
 
  KeyPartInfo *last_part= NULL;
4659
 
  KeyPartInfo *first_non_group_part= NULL;
4660
 
  KeyPartInfo *first_non_infix_part= NULL;
 
4673
  KEY_PART_INFO *last_part= NULL;
 
4674
  KEY_PART_INFO *first_non_group_part= NULL;
 
4675
  KEY_PART_INFO *first_non_infix_part= NULL;
4661
4676
  uint32_t key_infix_parts= 0;
4662
4677
  uint32_t cur_group_key_parts= 0;
4663
4678
  uint32_t cur_group_prefix_len= 0;
4676
4691
  uint32_t cur_key_infix_len= 0;
4677
4692
  unsigned char cur_key_infix[MAX_KEY_LENGTH];
4678
4693
  uint32_t cur_used_key_parts= 0;
4679
 
  uint32_t pk= param->table->getShare()->getPrimaryKey();
 
4694
  uint32_t pk= param->table->s->primary_key;
4680
4695
 
4681
4696
  for (uint32_t cur_index= 0;
4682
4697
       cur_index_info != cur_index_info_end;
4699
4714
        (table->cursor->getEngine()->check_flag(HTON_BIT_PRIMARY_KEY_IN_READ_INDEX)))
4700
4715
    {
4701
4716
      /* For each table field */
4702
 
      for (uint32_t i= 0; i < table->getShare()->sizeFields(); i++)
 
4717
      for (uint32_t i= 0; i < table->s->fields; i++)
4703
4718
      {
4704
 
        Field *cur_field= table->getField(i);
 
4719
        Field *cur_field= table->field[i];
4705
4720
        /*
4706
4721
          If the field is used in the current query ensure that it's
4707
4722
          part of 'cur_index'
4869
4884
          Store the first and last keyparts that need to be analyzed
4870
4885
          into one array that can be passed as parameter.
4871
4886
        */
4872
 
        KeyPartInfo *key_part_range[2];
 
4887
        KEY_PART_INFO *key_part_range[2];
4873
4888
        key_part_range[0]= first_non_group_part;
4874
4889
        key_part_range[1]= last_part;
4875
4890
 
4910
4925
                                           param,
4911
4926
                                           &cur_param_idx);
4912
4927
      /* Check if this range tree can be used for prefix retrieval. */
4913
 
      optimizer::CostVector dummy_cost;
 
4928
      COST_VECT dummy_cost;
4914
4929
      uint32_t mrr_flags= HA_MRR_USE_DEFAULT_IMPL;
4915
4930
      uint32_t mrr_bufsize= 0;
4916
 
      cur_quick_prefix_records= check_quick_select(session,
4917
 
                                                   param,
 
4931
      cur_quick_prefix_records= check_quick_select(param,
4918
4932
                                                   cur_param_idx,
4919
4933
                                                   false /*don't care*/,
4920
4934
                                                   cur_index_tree,
5162
5176
    false o/w
5163
5177
*/
5164
5178
static bool
5165
 
get_constant_key_infix(KeyInfo *,
 
5179
get_constant_key_infix(KEY *,
5166
5180
                       optimizer::SEL_ARG *index_range_tree,
5167
 
                       KeyPartInfo *first_non_group_part,
5168
 
                       KeyPartInfo *min_max_arg_part,
5169
 
                       KeyPartInfo *last_part,
 
5181
                       KEY_PART_INFO *first_non_group_part,
 
5182
                       KEY_PART_INFO *min_max_arg_part,
 
5183
                       KEY_PART_INFO *last_part,
5170
5184
                       Session *,
5171
5185
                       unsigned char *key_infix,
5172
5186
                       uint32_t *key_infix_len,
5173
 
                       KeyPartInfo **first_non_infix_part)
 
5187
                       KEY_PART_INFO **first_non_infix_part)
5174
5188
{
5175
5189
  optimizer::SEL_ARG *cur_range= NULL;
5176
 
  KeyPartInfo *cur_part= NULL;
 
5190
  KEY_PART_INFO *cur_part= NULL;
5177
5191
  /* End part for the first loop below. */
5178
 
  KeyPartInfo *end_part= min_max_arg_part ? min_max_arg_part : last_part;
 
5192
  KEY_PART_INFO *end_part= min_max_arg_part ? min_max_arg_part : last_part;
5179
5193
 
5180
5194
  *key_infix_len= 0;
5181
5195
  unsigned char *key_ptr= key_infix;
5241
5255
    field  field that possibly references some key part in index
5242
5256
 
5243
5257
  NOTES
5244
 
    The return value can be used to get a KeyPartInfo pointer by
 
5258
    The return value can be used to get a KEY_PART_INFO pointer by
5245
5259
    part= index->key_part + get_field_keypart(...) - 1;
5246
5260
 
5247
5261
  RETURN
5249
5263
    0 if field does not reference any index field.
5250
5264
*/
5251
5265
static inline uint
5252
 
get_field_keypart(KeyInfo *index, Field *field)
 
5266
get_field_keypart(KEY *index, Field *field)
5253
5267
{
5254
 
  KeyPartInfo *part= NULL;
5255
 
  KeyPartInfo *end= NULL;
 
5268
  KEY_PART_INFO *part= NULL;
 
5269
  KEY_PART_INFO *end= NULL;
5256
5270
 
5257
5271
  for (part= index->key_part, end= part + index->key_parts; part < end; part++)
5258
5272
  {
5362
5376
    None
5363
5377
*/
5364
5378
void cost_group_min_max(Table* table,
5365
 
                        KeyInfo *index_info,
 
5379
                        KEY *index_info,
5366
5380
                        uint32_t used_key_parts,
5367
5381
                        uint32_t group_key_parts,
5368
5382
                        optimizer::SEL_TREE *range_tree,
5567
5581
}
5568
5582
 
5569
5583
 
5570
 
uint32_t optimizer::RorScanInfo::findFirstNotSet() const
5571
 
{
5572
 
  boost::dynamic_bitset<> map= bitsToBitset();
5573
 
  for (boost::dynamic_bitset<>::size_type i= 0; i < map.size(); i++)
5574
 
  {
5575
 
    if (! map.test(i))
5576
 
    {
5577
 
      return i;
5578
 
    }
5579
 
  }
5580
 
  return map.size();
5581
 
}
5582
 
 
5583
 
 
5584
 
size_t optimizer::RorScanInfo::getBitCount() const
5585
 
{
5586
 
  boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5587
 
  return tmp_bitset.count();
5588
 
}
5589
 
 
5590
 
 
5591
 
void optimizer::RorScanInfo::subtractBitset(const boost::dynamic_bitset<>& in_bitset)
5592
 
{
5593
 
  boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5594
 
  tmp_bitset-= in_bitset;
5595
 
  covered_fields= tmp_bitset.to_ulong();
5596
 
}
5597
 
 
5598
 
 
5599
 
boost::dynamic_bitset<> optimizer::RorScanInfo::bitsToBitset() const
5600
 
{
5601
 
  string res;
5602
 
  uint64_t conv= covered_fields;
5603
 
  while (conv)
5604
 
  {
5605
 
    res.push_back((conv & 1) + '0');
5606
 
    conv>>= 1;
5607
 
  }
5608
 
  if (! res.empty())
5609
 
  {
5610
 
    std::reverse(res.begin(), res.end());
5611
 
  }
5612
 
  string final(covered_fields_size - res.length(), '0');
5613
 
  final.append(res);
5614
 
  return (boost::dynamic_bitset<>(final));
5615
 
}
5616
 
 
5617
 
 
5618
5584
} /* namespace drizzled */