~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to drizzled/optimizer/range.cc

  • Committer: Olaf van der Spek
  • Date: 2011-02-12 18:24:24 UTC
  • mto: (2167.1.2 build) (2172.1.4 build)
  • mto: This revision was merged to the branch mainline in revision 2168.
  • Revision ID: olafvdspek@gmail.com-20110212182424-kgnm9osi7qo97at2
casts

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/* -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil; -*-
2
2
 *  vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
3
3
 *
4
 
 *  Copyright (C) 2008-2009 Sun Microsystems
 
4
 *  Copyright (C) 2008-2009 Sun Microsystems, Inc.
5
5
 *
6
6
 *  This program is free software; you can redistribute it and/or modify
7
7
 *  it under the terms of the GNU General Public License as published by
84
84
  keypart-value-bytes holds the value. Its format depends on the field type.
85
85
  The length of keypart-value-bytes may or may not depend on the value being
86
86
  stored. The default is that length is static and equal to
87
 
  KEY_PART_INFO::length.
 
87
  KeyPartInfo::length.
88
88
 
89
89
  Key parts with (key_part_flag & HA_BLOB_PART) have length depending of the
90
90
  value:
109
109
#include <vector>
110
110
#include <algorithm>
111
111
 
 
112
#include <boost/dynamic_bitset.hpp>
 
113
 
112
114
#include "drizzled/sql_base.h"
113
115
#include "drizzled/sql_select.h"
114
116
#include "drizzled/error.h"
135
137
 
136
138
#include "drizzled/temporal.h" /* Needed in get_mm_leaf() for timestamp -> datetime comparisons */
137
139
 
138
 
#include "drizzled/memory/multi_malloc.h"
139
 
 
140
140
using namespace std;
141
141
namespace drizzled
142
142
{
206
206
  cost->zero();
207
207
  if (table->cursor->primary_key_is_clustered())
208
208
  {
209
 
    cost->setIOCount(table->cursor->read_time(table->s->primary_key,
 
209
    cost->setIOCount(table->cursor->read_time(table->getShare()->getPrimaryKey(),
210
210
                                             static_cast<uint32_t>(nrows),
211
211
                                             nrows));
212
212
  }
213
213
  else
214
214
  {
215
215
    double n_blocks=
216
 
      ceil(uint64_t2double(table->cursor->stats.data_file_length) / IO_SIZE);
 
216
      ceil(static_cast<double>(table->cursor->stats.data_file_length) / IO_SIZE);
217
217
    double busy_blocks=
218
 
      n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(nrows)));
 
218
      n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, static_cast<double>(nrows)));
219
219
    if (busy_blocks < 1.0)
220
220
      busy_blocks= 1.0;
221
221
 
230
230
  }
231
231
}
232
232
 
233
 
struct st_ror_scan_info;
234
 
 
235
233
static optimizer::SEL_TREE * get_mm_parts(optimizer::RangeParameter *param,
236
234
                               COND *cond_func,
237
235
                               Field *field,
250
248
 
251
249
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts);
252
250
 
253
 
static ha_rows check_quick_select(optimizer::Parameter *param,
 
251
static ha_rows check_quick_select(Session *session,
 
252
                                  optimizer::Parameter *param,
254
253
                                  uint32_t idx,
255
254
                                  bool index_only,
256
255
                                  optimizer::SEL_ARG *tree,
259
258
                                  uint32_t *bufsize,
260
259
                                  optimizer::CostVector *cost);
261
260
 
262
 
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
 
261
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
 
262
                                                      optimizer::Parameter *param,
263
263
                                                      optimizer::SEL_TREE *tree,
264
264
                                                      bool index_read_must_be_used,
265
265
                                                      bool update_tbl_stats,
277
277
                                                                 double read_time);
278
278
 
279
279
static
280
 
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
 
280
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
 
281
                                                  optimizer::Parameter *param,
281
282
                                                  optimizer::SEL_IMERGE *imerge,
282
283
                                                  double read_time);
283
284
 
347
348
  {
348
349
    return 0;
349
350
  }
350
 
  if (! (select= new optimizer::SqlSelect))
 
351
  if (! (select= new optimizer::SqlSelect()))
351
352
  {
352
353
    *error= 1;                  // out of memory
353
354
    return 0;
384
385
 
385
386
void optimizer::SqlSelect::cleanup()
386
387
{
387
 
  delete quick;
388
 
  quick= 0;
 
388
  if (quick)
 
389
  {
 
390
    delete quick;
 
391
    quick= NULL;
 
392
  }
 
393
 
389
394
  if (free_cond)
390
395
  {
391
396
    free_cond= 0;
392
397
    delete cond;
393
398
    cond= 0;
394
399
  }
395
 
  close_cached_file(file);
 
400
  file->close_cached_file();
396
401
}
397
402
 
398
403
 
459
464
    MAX_KEY if no such index was found.
460
465
*/
461
466
 
462
 
uint32_t optimizer::get_index_for_order(Table *table, order_st *order, ha_rows limit)
 
467
uint32_t optimizer::get_index_for_order(Table *table, Order *order, ha_rows limit)
463
468
{
464
469
  uint32_t idx;
465
470
  uint32_t match_key= MAX_KEY, match_key_len= MAX_KEY_LENGTH + 1;
466
 
  order_st *ord;
 
471
  Order *ord;
467
472
 
468
473
  for (ord= order; ord; ord= ord->next)
469
474
    if (!ord->asc)
470
475
      return MAX_KEY;
471
476
 
472
 
  for (idx= 0; idx < table->s->keys; idx++)
 
477
  for (idx= 0; idx < table->getShare()->sizeKeys(); idx++)
473
478
  {
474
479
    if (!(table->keys_in_use_for_query.test(idx)))
475
480
      continue;
476
 
    KEY_PART_INFO *keyinfo= table->key_info[idx].key_part;
 
481
    KeyPartInfo *keyinfo= table->key_info[idx].key_part;
477
482
    uint32_t n_parts=  table->key_info[idx].key_parts;
478
483
    uint32_t partno= 0;
479
484
 
538
543
static int fill_used_fields_bitmap(optimizer::Parameter *param)
539
544
{
540
545
  Table *table= param->table;
541
 
  my_bitmap_map *tmp;
542
546
  uint32_t pk;
543
 
  param->tmp_covered_fields.setBitmap(0);
544
 
  param->fields_bitmap_size= table->s->column_bitmap_size;
545
 
  if (!(tmp= (my_bitmap_map*) alloc_root(param->mem_root,
546
 
                                  param->fields_bitmap_size)) ||
547
 
      param->needed_fields.init(tmp, table->s->fields))
548
 
    return 1;
549
 
 
550
 
  param->needed_fields= *table->read_set;
551
 
  bitmap_union(&param->needed_fields, table->write_set);
552
 
 
553
 
  pk= param->table->s->primary_key;
 
547
  param->tmp_covered_fields.clear();
 
548
  param->needed_fields.resize(table->getShare()->sizeFields());
 
549
  param->needed_fields.reset();
 
550
 
 
551
  param->needed_fields|= *table->read_set;
 
552
  param->needed_fields|= *table->write_set;
 
553
 
 
554
  pk= param->table->getShare()->getPrimaryKey();
554
555
  if (pk != MAX_KEY && param->table->cursor->primary_key_is_clustered())
555
556
  {
556
557
    /* The table uses clustered PK and it is not internally generated */
557
 
    KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
558
 
    KEY_PART_INFO *key_part_end= key_part +
 
558
    KeyPartInfo *key_part= param->table->key_info[pk].key_part;
 
559
    KeyPartInfo *key_part_end= key_part +
559
560
                                 param->table->key_info[pk].key_parts;
560
561
    for (;key_part != key_part_end; ++key_part)
561
 
      param->needed_fields.clearBit(key_part->fieldnr-1);
 
562
      param->needed_fields.reset(key_part->fieldnr-1);
562
563
  }
563
564
  return 0;
564
565
}
639
640
{
640
641
  uint32_t idx;
641
642
  double scan_time;
642
 
  delete quick;
643
 
  quick=0;
 
643
  if (quick)
 
644
  {
 
645
    delete quick;
 
646
    quick= NULL;
 
647
  }
644
648
  needed_reg.reset();
645
649
  quick_keys.reset();
646
650
  if (keys_to_use.none())
663
667
    memory::Root alloc;
664
668
    optimizer::SEL_TREE *tree= NULL;
665
669
    KEY_PART *key_parts;
666
 
    KEY *key_info;
 
670
    KeyInfo *key_info;
667
671
    optimizer::Parameter param;
668
672
 
669
673
    if (check_stack_overrun(session, 2*STACK_MIN_SIZE, NULL))
686
690
 
687
691
    session->no_errors=1;                               // Don't warn about NULL
688
692
    memory::init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
689
 
    if (!(param.key_parts= (KEY_PART*) alloc_root(&alloc,
690
 
                                                  sizeof(KEY_PART)*
691
 
                                                  head->s->key_parts)) ||
 
693
    if (!(param.key_parts= (KEY_PART*) alloc.alloc_root( sizeof(KEY_PART) * head->getShare()->key_parts)) ||
692
694
        fill_used_fields_bitmap(&param))
693
695
    {
694
696
      session->no_errors=0;
695
 
      free_root(&alloc,MYF(0));                 // Return memory & allocator
 
697
      alloc.free_root(MYF(0));                  // Return memory & allocator
 
698
 
696
699
      return 0;                         // Can't use range
697
700
    }
698
701
    key_parts= param.key_parts;
703
706
      This is used in get_mm_parts function.
704
707
    */
705
708
    key_info= head->key_info;
706
 
    for (idx=0 ; idx < head->s->keys ; idx++, key_info++)
 
709
    for (idx=0 ; idx < head->getShare()->sizeKeys() ; idx++, key_info++)
707
710
    {
708
 
      KEY_PART_INFO *key_part_info;
 
711
      KeyPartInfo *key_part_info;
709
712
      if (! keys_to_use.test(idx))
710
713
        continue;
711
714
 
734
737
    {
735
738
      int key_for_use= head->find_shortest_key(&head->covering_keys);
736
739
      double key_read_time=
737
 
        param.table->cursor->index_only_read_time(key_for_use,
738
 
                                                rows2double(records)) +
 
740
        param.table->cursor->index_only_read_time(key_for_use, records) +
739
741
        (double) records / TIME_FOR_COMPARE;
740
742
      if (key_read_time < read_time)
741
743
        read_time= key_read_time;
793
795
        bool can_build_covering= false;
794
796
 
795
797
        /* Get best 'range' plan and prepare data for making other plans */
796
 
        if ((range_trp= get_key_scans_params(&param, tree, false, true,
 
798
        if ((range_trp= get_key_scans_params(session, &param, tree, false, true,
797
799
                                             best_read_time)))
798
800
        {
799
801
          best_trp= range_trp;
820
822
              Try constructing covering ROR-intersect only if it looks possible
821
823
              and worth doing.
822
824
            */
823
 
            if (rori_trp->isRowRetrievalNecessary() && can_build_covering &&
 
825
            if (!rori_trp->is_covering && can_build_covering &&
824
826
                (rori_trp= get_best_covering_ror_intersect(&param, tree,
825
827
                                                           best_read_time)))
826
828
              best_trp= rori_trp;
836
838
        List_iterator_fast<optimizer::SEL_IMERGE> it(tree->merges);
837
839
        while ((imerge= it++))
838
840
        {
839
 
          new_conj_trp= get_best_disjunct_quick(&param, imerge, best_read_time);
 
841
          new_conj_trp= get_best_disjunct_quick(session, &param, imerge, best_read_time);
840
842
          if (new_conj_trp)
841
843
            set_if_smaller(param.table->quick_condition_rows,
842
844
                           new_conj_trp->records);
857
859
      records= best_trp->records;
858
860
      if (! (quick= best_trp->make_quick(&param, true)) || quick->init())
859
861
      {
860
 
        delete quick;
861
 
        quick= NULL;
 
862
        /* quick can already be free here */
 
863
        if (quick)
 
864
        {
 
865
          delete quick;
 
866
          quick= NULL;
 
867
        }
862
868
      }
863
869
    }
864
870
 
865
871
  free_mem:
866
 
    free_root(&alloc,MYF(0));                   // Return memory & allocator
 
872
    alloc.free_root(MYF(0));                    // Return memory & allocator
867
873
    session->mem_root= param.old_root;
868
874
    session->no_errors=0;
869
875
  }
879
885
  Get best plan for a optimizer::SEL_IMERGE disjunctive expression.
880
886
  SYNOPSIS
881
887
    get_best_disjunct_quick()
 
888
      session
882
889
      param     Parameter from check_quick_select function
883
890
      imerge    Expression to use
884
891
      read_time Don't create scans with cost > read_time
941
948
*/
942
949
 
943
950
static
944
 
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
 
951
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
 
952
                                                  optimizer::Parameter *param,
945
953
                                                  optimizer::SEL_IMERGE *imerge,
946
954
                                                  double read_time)
947
955
{
965
973
  ha_rows roru_total_records;
966
974
  double roru_intersect_part= 1.0;
967
975
 
968
 
  if (! (range_scans= (optimizer::RangeReadPlan**)alloc_root(param->mem_root,
969
 
                                                             sizeof(optimizer::RangeReadPlan*)*
970
 
                                                             n_child_scans)))
 
976
  if (! (range_scans= (optimizer::RangeReadPlan**)param->mem_root->alloc_root(sizeof(optimizer::RangeReadPlan*)* n_child_scans)))
 
977
  {
971
978
    return NULL;
 
979
  }
 
980
 
972
981
  /*
973
982
    Collect best 'range' scan for each of disjuncts, and, while doing so,
974
983
    analyze possibility of ROR scans. Also calculate some values needed by
978
987
       ptree != imerge->trees_next;
979
988
       ptree++, cur_child++)
980
989
  {
981
 
    if (!(*cur_child= get_key_scans_params(param, *ptree, true, false, read_time)))
 
990
    if (!(*cur_child= get_key_scans_params(session, param, *ptree, true, false, read_time)))
982
991
    {
983
992
      /*
984
993
        One of index scans in this index_merge is more expensive than entire
995
1004
    all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
996
1005
    all_scans_rors &= (*cur_child)->is_ror;
997
1006
    if (pk_is_clustered &&
998
 
        param->real_keynr[(*cur_child)->getKeyIndex()] ==
999
 
        param->table->s->primary_key)
 
1007
        param->real_keynr[(*cur_child)->key_idx] ==
 
1008
        param->table->getShare()->getPrimaryKey())
1000
1009
    {
1001
1010
      cpk_scan= cur_child;
1002
1011
      cpk_scan_records= (*cur_child)->records;
1031
1040
  /* Calculate cost(rowid_to_row_scan) */
1032
1041
  {
1033
1042
    optimizer::CostVector sweep_cost;
1034
 
    JOIN *join= param->session->lex->select_lex.join;
 
1043
    Join *join= param->session->lex->select_lex.join;
1035
1044
    bool is_interrupted= test(join && join->tables == 1);
1036
1045
    get_sweep_read_cost(param->table, non_cpk_scan_records, is_interrupted,
1037
1046
                        &sweep_cost);
1047
1056
                                    param->session->variables.sortbuff_size);
1048
1057
  if (param->imerge_cost_buff_size < unique_calc_buff_size)
1049
1058
  {
1050
 
    if (!(param->imerge_cost_buff= (uint*)alloc_root(param->mem_root,
1051
 
                                                     unique_calc_buff_size)))
 
1059
    if (!(param->imerge_cost_buff= (uint*)param->mem_root->alloc_root(unique_calc_buff_size)))
 
1060
    {
1052
1061
      return NULL;
 
1062
    }
 
1063
 
1053
1064
    param->imerge_cost_buff_size= unique_calc_buff_size;
1054
1065
  }
1055
1066
 
1078
1089
  /* Ok, it is possible to build a ROR-union, try it. */
1079
1090
  bool dummy;
1080
1091
  if (! (roru_read_plans=
1081
 
          (optimizer::TableReadPlan **) alloc_root(param->mem_root,
1082
 
                                                   sizeof(optimizer::TableReadPlan*)*
1083
 
                                                   n_child_scans)))
 
1092
          (optimizer::TableReadPlan **) param->mem_root->alloc_root(sizeof(optimizer::TableReadPlan*) * n_child_scans)))
 
1093
  {
1084
1094
    return imerge_trp;
 
1095
  }
1085
1096
skip_to_ror_scan:
1086
1097
  roru_index_costs= 0.0;
1087
1098
  roru_total_records= 0;
1103
1114
    {
1104
1115
      /* Ok, we have index_only cost, now get full rows scan cost */
1105
1116
      cost= param->table->cursor->
1106
 
              read_time(param->real_keynr[(*cur_child)->getKeyIndex()], 1,
 
1117
              read_time(param->real_keynr[(*cur_child)->key_idx], 1,
1107
1118
                        (*cur_child)->records) +
1108
 
              rows2double((*cur_child)->records) / TIME_FOR_COMPARE;
 
1119
              static_cast<double>((*cur_child)->records) / TIME_FOR_COMPARE;
1109
1120
    }
1110
1121
    else
1111
1122
      cost= read_time;
1121
1132
      roru_index_costs += (*cur_roru_plan)->read_cost;
1122
1133
    }
1123
1134
    else
1124
 
    {
1125
1135
      roru_index_costs +=
1126
 
        ((optimizer::RorIntersectReadPlan *)(*cur_roru_plan))->getCostOfIndexScans();
1127
 
    }
 
1136
        ((optimizer::RorIntersectReadPlan*)(*cur_roru_plan))->index_scan_costs;
1128
1137
    roru_total_records += (*cur_roru_plan)->records;
1129
1138
    roru_intersect_part *= (*cur_roru_plan)->records /
1130
1139
                           param->table->cursor->stats.records;
1149
1158
  double roru_total_cost;
1150
1159
  {
1151
1160
    optimizer::CostVector sweep_cost;
1152
 
    JOIN *join= param->session->lex->select_lex.join;
 
1161
    Join *join= param->session->lex->select_lex.join;
1153
1162
    bool is_interrupted= test(join && join->tables == 1);
1154
1163
    get_sweep_read_cost(param->table, roru_total_records, is_interrupted,
1155
1164
                        &sweep_cost);
1156
1165
    roru_total_cost= roru_index_costs +
1157
 
                     rows2double(roru_total_records)*log((double)n_child_scans) /
 
1166
                     static_cast<double>(roru_total_records)*log((double)n_child_scans) /
1158
1167
                     (TIME_FOR_COMPARE_ROWID * M_LN2) +
1159
1168
                     sweep_cost.total_cost();
1160
1169
  }
1164
1173
  {
1165
1174
    if ((roru= new (param->mem_root) optimizer::RorUnionReadPlan))
1166
1175
    {
1167
 
      roru->merged_scans.assign(roru_read_plans, roru_read_plans + n_child_scans);
 
1176
      roru->first_ror= roru_read_plans;
 
1177
      roru->last_ror= roru_read_plans + n_child_scans;
1168
1178
      roru->read_cost= roru_total_cost;
1169
1179
      roru->records= roru_total_records;
1170
1180
      return roru;
1174
1184
}
1175
1185
 
1176
1186
 
1177
 
typedef struct st_ror_scan_info
1178
 
{
1179
 
  uint32_t      idx;      /* # of used key in param->keys */
1180
 
  uint32_t      keynr;    /* # of used key in table */
1181
 
  ha_rows   records;  /* estimate of # records this scan will return */
1182
 
 
1183
 
  /* Set of intervals over key fields that will be used for row retrieval. */
1184
 
  optimizer::SEL_ARG   *sel_arg;
1185
 
 
1186
 
  /* Fields used in the query and covered by this ROR scan. */
1187
 
  MyBitmap covered_fields;
1188
 
  uint32_t      used_fields_covered; /* # of set bits in covered_fields */
1189
 
  int       key_rec_length; /* length of key record (including rowid) */
1190
 
 
1191
 
  /*
1192
 
    Cost of reading all index records with values in sel_arg intervals set
1193
 
    (assuming there is no need to access full table records)
1194
 
  */
1195
 
  double    index_read_cost;
1196
 
  uint32_t      first_uncovered_field; /* first unused bit in covered_fields */
1197
 
  uint32_t      key_components; /* # of parts in the key */
1198
 
} ROR_SCAN_INFO;
1199
 
 
1200
1187
 
1201
1188
/*
1202
 
  Create ROR_SCAN_INFO* structure with a single ROR scan on index idx using
 
1189
  Create optimizer::RorScanInfo* structure with a single ROR scan on index idx using
1203
1190
  sel_arg set of intervals.
1204
1191
 
1205
1192
  SYNOPSIS
1214
1201
*/
1215
1202
 
1216
1203
static
1217
 
ROR_SCAN_INFO *make_ror_scan(const optimizer::Parameter *param, int idx, optimizer::SEL_ARG *sel_arg)
 
1204
optimizer::RorScanInfo *make_ror_scan(const optimizer::Parameter *param, int idx, optimizer::SEL_ARG *sel_arg)
1218
1205
{
1219
 
  ROR_SCAN_INFO *ror_scan;
1220
 
  my_bitmap_map *bitmap_buf;
 
1206
  optimizer::RorScanInfo *ror_scan= NULL;
1221
1207
 
1222
1208
  uint32_t keynr;
1223
1209
 
1224
 
  if (!(ror_scan= (ROR_SCAN_INFO*)alloc_root(param->mem_root,
1225
 
                                             sizeof(ROR_SCAN_INFO))))
 
1210
  if (!(ror_scan= (optimizer::RorScanInfo*)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo))))
1226
1211
    return NULL;
1227
1212
 
1228
1213
  ror_scan->idx= idx;
1232
1217
  ror_scan->sel_arg= sel_arg;
1233
1218
  ror_scan->records= param->table->quick_rows[keynr];
1234
1219
 
1235
 
  if (!(bitmap_buf= (my_bitmap_map*) alloc_root(param->mem_root,
1236
 
                                                param->fields_bitmap_size)))
1237
 
    return NULL;
1238
 
 
1239
 
  if (ror_scan->covered_fields.init(bitmap_buf,
1240
 
                                    param->table->s->fields))
1241
 
    return NULL;
1242
 
  ror_scan->covered_fields.clearAll();
1243
 
 
1244
 
  KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part;
1245
 
  KEY_PART_INFO *key_part_end= key_part +
 
1220
  ror_scan->covered_fields_size= param->table->getShare()->sizeFields();
 
1221
  boost::dynamic_bitset<> tmp_bitset(param->table->getShare()->sizeFields());
 
1222
  tmp_bitset.reset();
 
1223
 
 
1224
  KeyPartInfo *key_part= param->table->key_info[keynr].key_part;
 
1225
  KeyPartInfo *key_part_end= key_part +
1246
1226
                               param->table->key_info[keynr].key_parts;
1247
 
  for (;key_part != key_part_end; ++key_part)
 
1227
  for (; key_part != key_part_end; ++key_part)
1248
1228
  {
1249
 
    if (param->needed_fields.isBitSet(key_part->fieldnr-1))
1250
 
      ror_scan->covered_fields.setBit(key_part->fieldnr-1);
 
1229
    if (param->needed_fields.test(key_part->fieldnr-1))
 
1230
      tmp_bitset.set(key_part->fieldnr-1);
1251
1231
  }
1252
 
  double rows= rows2double(param->table->quick_rows[ror_scan->keynr]);
 
1232
  double rows= param->table->quick_rows[ror_scan->keynr];
1253
1233
  ror_scan->index_read_cost=
1254
1234
    param->table->cursor->index_only_read_time(ror_scan->keynr, rows);
1255
 
  return(ror_scan);
 
1235
  ror_scan->covered_fields= tmp_bitset.to_ulong();
 
1236
  return ror_scan;
1256
1237
}
1257
1238
 
1258
1239
 
1259
1240
/*
1260
 
  Compare two ROR_SCAN_INFO** by  E(#records_matched) * key_record_length.
 
1241
  Compare two optimizer::RorScanInfo** by  E(#records_matched) * key_record_length.
1261
1242
  SYNOPSIS
1262
1243
    cmp_ror_scan_info()
1263
1244
      a ptr to first compared value
1269
1250
    1 a > b
1270
1251
*/
1271
1252
 
1272
 
static int cmp_ror_scan_info(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b)
 
1253
static int cmp_ror_scan_info(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
1273
1254
{
1274
 
  double val1= rows2double((*a)->records) * (*a)->key_rec_length;
1275
 
  double val2= rows2double((*b)->records) * (*b)->key_rec_length;
 
1255
  double val1= static_cast<double>((*a)->records) * (*a)->key_rec_length;
 
1256
  double val2= static_cast<double>((*b)->records) * (*b)->key_rec_length;
1276
1257
  return (val1 < val2)? -1: (val1 == val2)? 0 : 1;
1277
1258
}
1278
1259
 
 
1260
 
1279
1261
/*
1280
 
  Compare two ROR_SCAN_INFO** by
 
1262
  Compare two optimizer::RorScanInfo** by
1281
1263
   (#covered fields in F desc,
1282
1264
    #components asc,
1283
1265
    number of first not covered component asc)
1293
1275
    1 a > b
1294
1276
*/
1295
1277
 
1296
 
static int cmp_ror_scan_info_covering(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b)
 
1278
static int cmp_ror_scan_info_covering(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
1297
1279
{
1298
1280
  if ((*a)->used_fields_covered > (*b)->used_fields_covered)
1299
1281
    return -1;
1310
1292
  return 0;
1311
1293
}
1312
1294
 
1313
 
 
1314
1295
/* Auxiliary structure for incremental ROR-intersection creation */
1315
 
typedef struct
 
1296
typedef struct st_ror_intersect_info
1316
1297
{
 
1298
  st_ror_intersect_info()
 
1299
    :
 
1300
      param(NULL),
 
1301
      covered_fields(),
 
1302
      out_rows(0.0),
 
1303
      is_covering(false),
 
1304
      index_records(0),
 
1305
      index_scan_costs(0.0),
 
1306
      total_cost(0.0)
 
1307
  {}
 
1308
 
 
1309
  st_ror_intersect_info(const optimizer::Parameter *in_param)
 
1310
    :
 
1311
      param(in_param),
 
1312
      covered_fields(in_param->table->getShare()->sizeFields()),
 
1313
      out_rows(in_param->table->cursor->stats.records),
 
1314
      is_covering(false),
 
1315
      index_records(0),
 
1316
      index_scan_costs(0.0),
 
1317
      total_cost(0.0)
 
1318
  {
 
1319
    covered_fields.reset();
 
1320
  }
 
1321
 
1317
1322
  const optimizer::Parameter *param;
1318
 
  MyBitmap covered_fields; /* union of fields covered by all scans */
 
1323
  boost::dynamic_bitset<> covered_fields; /* union of fields covered by all scans */
1319
1324
  /*
1320
1325
    Fraction of table records that satisfies conditions of all scans.
1321
1326
    This is the number of full records that will be retrieved if a
1331
1336
} ROR_INTERSECT_INFO;
1332
1337
 
1333
1338
 
1334
 
/*
1335
 
  Allocate a ROR_INTERSECT_INFO and initialize it to contain zero scans.
1336
 
 
1337
 
  SYNOPSIS
1338
 
    ror_intersect_init()
1339
 
      param         Parameter from test_quick_select
1340
 
 
1341
 
  RETURN
1342
 
    allocated structure
1343
 
    NULL on error
1344
 
*/
1345
 
 
1346
 
static
1347
 
ROR_INTERSECT_INFO* ror_intersect_init(const optimizer::Parameter *param)
1348
 
{
1349
 
  ROR_INTERSECT_INFO *info;
1350
 
  my_bitmap_map* buf;
1351
 
  if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root,
1352
 
                                              sizeof(ROR_INTERSECT_INFO))))
1353
 
    return NULL;
1354
 
  info->param= param;
1355
 
  if (!(buf= (my_bitmap_map*) alloc_root(param->mem_root,
1356
 
                                         param->fields_bitmap_size)))
1357
 
    return NULL;
1358
 
  if (info->covered_fields.init(buf, param->table->s->fields))
1359
 
    return NULL;
1360
 
  info->is_covering= false;
1361
 
  info->index_scan_costs= 0.0;
1362
 
  info->index_records= 0;
1363
 
  info->out_rows= (double) param->table->cursor->stats.records;
1364
 
  info->covered_fields.clearAll();
1365
 
  return info;
1366
 
}
1367
 
 
1368
1339
static void ror_intersect_cpy(ROR_INTERSECT_INFO *dst,
1369
1340
                              const ROR_INTERSECT_INFO *src)
1370
1341
{
1469
1440
*/
1470
1441
 
1471
1442
static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
1472
 
                                   const ROR_SCAN_INFO *scan)
 
1443
                                   const optimizer::RorScanInfo *scan)
1473
1444
{
1474
1445
  double selectivity_mult= 1.0;
1475
 
  KEY_PART_INFO *key_part= info->param->table->key_info[scan->keynr].key_part;
 
1446
  KeyPartInfo *key_part= info->param->table->key_info[scan->keynr].key_part;
1476
1447
  unsigned char key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; /* key values tuple */
1477
1448
  unsigned char *key_ptr= key_val;
1478
1449
  optimizer::SEL_ARG *sel_arg= NULL;
1479
1450
  optimizer::SEL_ARG *tuple_arg= NULL;
1480
1451
  key_part_map keypart_map= 0;
1481
1452
  bool cur_covered;
1482
 
  bool prev_covered= test(info->covered_fields.isBitSet(key_part->fieldnr-1));
 
1453
  bool prev_covered= test(info->covered_fields.test(key_part->fieldnr-1));
1483
1454
  key_range min_range;
1484
1455
  key_range max_range;
1485
1456
  min_range.key= key_val;
1492
1463
       sel_arg= sel_arg->next_key_part)
1493
1464
  {
1494
1465
    cur_covered=
1495
 
      test(info->covered_fields.isBitSet(key_part[sel_arg->part].fieldnr-1));
 
1466
      test(info->covered_fields.test(key_part[sel_arg->part].fieldnr-1));
1496
1467
    if (cur_covered != prev_covered)
1497
1468
    {
1498
1469
      /* create (part1val, ..., part{n-1}val) tuple. */
1518
1489
      if (cur_covered)
1519
1490
      {
1520
1491
        /* uncovered -> covered */
1521
 
        double tmp= rows2double(records)/rows2double(prev_records);
1522
 
        selectivity_mult *= tmp;
 
1492
        selectivity_mult *= static_cast<double>(records) / prev_records;
1523
1493
        prev_records= HA_POS_ERROR;
1524
1494
      }
1525
1495
      else
1532
1502
  }
1533
1503
  if (!prev_covered)
1534
1504
  {
1535
 
    double tmp= rows2double(info->param->table->quick_rows[scan->keynr]) /
1536
 
                rows2double(prev_records);
1537
 
    selectivity_mult *= tmp;
 
1505
    selectivity_mult *= static_cast<double>(info->param->table->quick_rows[scan->keynr]) / prev_records;
1538
1506
  }
1539
 
  return(selectivity_mult);
 
1507
  return selectivity_mult;
1540
1508
}
1541
1509
 
1542
1510
 
1577
1545
*/
1578
1546
 
1579
1547
static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
1580
 
                              ROR_SCAN_INFO* ror_scan, bool is_cpk_scan)
 
1548
                              optimizer::RorScanInfo* ror_scan, bool is_cpk_scan)
1581
1549
{
1582
1550
  double selectivity_mult= 1.0;
1583
1551
 
1597
1565
      each record of every scan. Assuming 1/TIME_FOR_COMPARE_ROWID
1598
1566
      per check this gives us:
1599
1567
    */
1600
 
    info->index_scan_costs += rows2double(info->index_records) /
 
1568
    info->index_scan_costs += static_cast<double>(info->index_records) /
1601
1569
                              TIME_FOR_COMPARE_ROWID;
1602
1570
  }
1603
1571
  else
1604
1572
  {
1605
1573
    info->index_records += info->param->table->quick_rows[ror_scan->keynr];
1606
1574
    info->index_scan_costs += ror_scan->index_read_cost;
1607
 
    bitmap_union(&info->covered_fields, &ror_scan->covered_fields);
1608
 
    if (!info->is_covering && bitmap_is_subset(&info->param->needed_fields,
1609
 
                                               &info->covered_fields))
 
1575
    boost::dynamic_bitset<> tmp_bitset= ror_scan->bitsToBitset();
 
1576
    info->covered_fields|= tmp_bitset;
 
1577
    if (! info->is_covering && info->param->needed_fields.is_subset_of(info->covered_fields))
1610
1578
    {
1611
1579
      info->is_covering= true;
1612
1580
    }
1613
1581
  }
1614
1582
 
1615
1583
  info->total_cost= info->index_scan_costs;
1616
 
  if (!info->is_covering)
 
1584
  if (! info->is_covering)
1617
1585
  {
1618
1586
    optimizer::CostVector sweep_cost;
1619
 
    JOIN *join= info->param->session->lex->select_lex.join;
 
1587
    Join *join= info->param->session->lex->select_lex.join;
1620
1588
    bool is_interrupted= test(join && join->tables == 1);
1621
1589
    get_sweep_read_cost(info->param->table, double2rows(info->out_rows),
1622
1590
                        is_interrupted, &sweep_cost);
1627
1595
 
1628
1596
 
1629
1597
/*
 
1598
  Get best covering ROR-intersection.
 
1599
  SYNOPSIS
 
1600
    get_best_covering_ror_intersect()
 
1601
      param     Parameter from test_quick_select function.
 
1602
      tree      optimizer::SEL_TREE with sets of intervals for different keys.
 
1603
      read_time Don't return table read plans with cost > read_time.
 
1604
 
 
1605
  RETURN
 
1606
    Best covering ROR-intersection plan
 
1607
    NULL if no plan found.
 
1608
 
 
1609
  NOTES
 
1610
    get_best_ror_intersect must be called for a tree before calling this
 
1611
    function for it.
 
1612
    This function invalidates tree->ror_scans member values.
 
1613
 
 
1614
  The following approximate algorithm is used:
 
1615
    I=set of all covering indexes
 
1616
    F=set of all fields to cover
 
1617
    S={}
 
1618
 
 
1619
    do
 
1620
    {
 
1621
      Order I by (#covered fields in F desc,
 
1622
                  #components asc,
 
1623
                  number of first not covered component asc);
 
1624
      F=F-covered by first(I);
 
1625
      S=S+first(I);
 
1626
      I=I-first(I);
 
1627
    } while F is not empty.
 
1628
*/
 
1629
 
 
1630
static
 
1631
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
 
1632
                                                            optimizer::SEL_TREE *tree,
 
1633
                                                            double read_time)
 
1634
{
 
1635
  optimizer::RorScanInfo **ror_scan_mark;
 
1636
  optimizer::RorScanInfo **ror_scans_end= tree->ror_scans_end;
 
1637
 
 
1638
  for (optimizer::RorScanInfo **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
 
1639
    (*scan)->key_components=
 
1640
      param->table->key_info[(*scan)->keynr].key_parts;
 
1641
 
 
1642
  /*
 
1643
    Run covering-ROR-search algorithm.
 
1644
    Assume set I is [ror_scan .. ror_scans_end)
 
1645
  */
 
1646
 
 
1647
  /*I=set of all covering indexes */
 
1648
  ror_scan_mark= tree->ror_scans;
 
1649
 
 
1650
  boost::dynamic_bitset<> *covered_fields= &param->tmp_covered_fields;
 
1651
  if (covered_fields->empty())
 
1652
  {
 
1653
    covered_fields->resize(param->table->getShare()->sizeFields());
 
1654
  }
 
1655
  covered_fields->reset();
 
1656
 
 
1657
  double total_cost= 0.0f;
 
1658
  ha_rows records=0;
 
1659
  bool all_covered;
 
1660
 
 
1661
  do
 
1662
  {
 
1663
    /*
 
1664
      Update changed sorting info:
 
1665
        #covered fields,
 
1666
        number of first not covered component
 
1667
      Calculate and save these values for each of remaining scans.
 
1668
    */
 
1669
    for (optimizer::RorScanInfo **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
 
1670
    {
 
1671
      /* subtract one bitset from the other */
 
1672
      (*scan)->subtractBitset(*covered_fields);
 
1673
      (*scan)->used_fields_covered=
 
1674
        (*scan)->getBitCount();
 
1675
      (*scan)->first_uncovered_field= (*scan)->findFirstNotSet();
 
1676
    }
 
1677
 
 
1678
    internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
 
1679
                       sizeof(optimizer::RorScanInfo*),
 
1680
                       (qsort_cmp)cmp_ror_scan_info_covering);
 
1681
 
 
1682
    /* I=I-first(I) */
 
1683
    total_cost += (*ror_scan_mark)->index_read_cost;
 
1684
    records += (*ror_scan_mark)->records;
 
1685
    if (total_cost > read_time)
 
1686
      return NULL;
 
1687
    /* F=F-covered by first(I) */
 
1688
    boost::dynamic_bitset<> tmp_bitset= (*ror_scan_mark)->bitsToBitset();
 
1689
    *covered_fields|= tmp_bitset;
 
1690
    all_covered= param->needed_fields.is_subset_of(*covered_fields);
 
1691
  } while ((++ror_scan_mark < ror_scans_end) && ! all_covered);
 
1692
 
 
1693
  if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
 
1694
    return NULL;
 
1695
 
 
1696
  /*
 
1697
    Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with
 
1698
    cost total_cost.
 
1699
  */
 
1700
  /* Add priority queue use cost. */
 
1701
  total_cost += static_cast<double>(records) *
 
1702
                log((double)(ror_scan_mark - tree->ror_scans)) /
 
1703
                (TIME_FOR_COMPARE_ROWID * M_LN2);
 
1704
 
 
1705
  if (total_cost > read_time)
 
1706
    return NULL;
 
1707
 
 
1708
  optimizer::RorIntersectReadPlan *trp= NULL;
 
1709
  if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
 
1710
  {
 
1711
    return trp;
 
1712
  }
 
1713
 
 
1714
  uint32_t best_num= (ror_scan_mark - tree->ror_scans);
 
1715
  if (!(trp->first_scan= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* best_num)))
 
1716
    return NULL;
 
1717
  memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(optimizer::RorScanInfo*));
 
1718
  trp->last_scan=  trp->first_scan + best_num;
 
1719
  trp->is_covering= true;
 
1720
  trp->read_cost= total_cost;
 
1721
  trp->records= records;
 
1722
  trp->cpk_scan= NULL;
 
1723
  set_if_smaller(param->table->quick_condition_rows, records);
 
1724
 
 
1725
  return(trp);
 
1726
}
 
1727
 
 
1728
 
 
1729
/*
1630
1730
  Get best ROR-intersection plan using non-covering ROR-intersection search
1631
1731
  algorithm. The returned plan may be covering.
1632
1732
 
1703
1803
    return NULL;
1704
1804
 
1705
1805
  /*
1706
 
    Step1: Collect ROR-able SEL_ARGs and create ROR_SCAN_INFO for each of
 
1806
    Step1: Collect ROR-able SEL_ARGs and create optimizer::RorScanInfo for each of
1707
1807
    them. Also find and save clustered PK scan if there is one.
1708
1808
  */
1709
 
  ROR_SCAN_INFO **cur_ror_scan= NULL;
1710
 
  ROR_SCAN_INFO *cpk_scan= NULL;
 
1809
  optimizer::RorScanInfo **cur_ror_scan= NULL;
 
1810
  optimizer::RorScanInfo *cpk_scan= NULL;
1711
1811
  uint32_t cpk_no= 0;
1712
1812
  bool cpk_scan_used= false;
1713
1813
 
1714
 
  if (! (tree->ror_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
1715
 
                                                      sizeof(ROR_SCAN_INFO*)*
1716
 
                                                      param->keys)))
 
1814
  if (! (tree->ror_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* param->keys)))
 
1815
  {
1717
1816
    return NULL;
 
1817
  }
1718
1818
  cpk_no= ((param->table->cursor->primary_key_is_clustered()) ?
1719
 
           param->table->s->primary_key : MAX_KEY);
 
1819
           param->table->getShare()->getPrimaryKey() : MAX_KEY);
1720
1820
 
1721
1821
  for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++)
1722
1822
  {
1723
 
    ROR_SCAN_INFO *scan;
 
1823
    optimizer::RorScanInfo *scan;
1724
1824
    if (! tree->ror_scans_map.test(idx))
1725
1825
      continue;
1726
 
    if (!(scan= make_ror_scan(param, idx, tree->keys[idx])))
 
1826
    if (! (scan= make_ror_scan(param, idx, tree->keys[idx])))
1727
1827
      return NULL;
1728
1828
    if (param->real_keynr[idx] == cpk_no)
1729
1829
    {
1737
1837
  tree->ror_scans_end= cur_ror_scan;
1738
1838
  /*
1739
1839
    Ok, [ror_scans, ror_scans_end) is array of ptrs to initialized
1740
 
    ROR_SCAN_INFO's.
 
1840
    optimizer::RorScanInfo's.
1741
1841
    Step 2: Get best ROR-intersection using an approximate algorithm.
1742
1842
  */
1743
 
  internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(ROR_SCAN_INFO*),
 
1843
  internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(optimizer::RorScanInfo*),
1744
1844
                     (qsort_cmp)cmp_ror_scan_info);
1745
1845
 
1746
 
  ROR_SCAN_INFO **intersect_scans= NULL; /* ROR scans used in index intersection */
1747
 
  ROR_SCAN_INFO **intersect_scans_end= NULL;
1748
 
  if (! (intersect_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
1749
 
                                                      sizeof(ROR_SCAN_INFO*)*
1750
 
                                                      tree->n_ror_scans)))
 
1846
  optimizer::RorScanInfo **intersect_scans= NULL; /* ROR scans used in index intersection */
 
1847
  optimizer::RorScanInfo **intersect_scans_end= NULL;
 
1848
  if (! (intersect_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*) * tree->n_ror_scans)))
1751
1849
    return NULL;
1752
1850
  intersect_scans_end= intersect_scans;
1753
1851
 
1754
1852
  /* Create and incrementally update ROR intersection. */
1755
 
  ROR_INTERSECT_INFO *intersect= NULL;
1756
 
  ROR_INTERSECT_INFO *intersect_best= NULL;
1757
 
  if (! (intersect= ror_intersect_init(param)) ||
1758
 
      ! (intersect_best= ror_intersect_init(param)))
1759
 
    return NULL;
 
1853
  ROR_INTERSECT_INFO intersect(param);
 
1854
  ROR_INTERSECT_INFO intersect_best(param);
1760
1855
 
1761
1856
  /* [intersect_scans,intersect_scans_best) will hold the best intersection */
1762
 
  ROR_SCAN_INFO **intersect_scans_best= NULL;
 
1857
  optimizer::RorScanInfo **intersect_scans_best= NULL;
1763
1858
  cur_ror_scan= tree->ror_scans;
1764
1859
  intersect_scans_best= intersect_scans;
1765
 
  while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering)
 
1860
  while (cur_ror_scan != tree->ror_scans_end && ! intersect.is_covering)
1766
1861
  {
1767
1862
    /* S= S + first(R);  R= R - first(R); */
1768
 
    if (!ror_intersect_add(intersect, *cur_ror_scan, false))
 
1863
    if (! ror_intersect_add(&intersect, *cur_ror_scan, false))
1769
1864
    {
1770
1865
      cur_ror_scan++;
1771
1866
      continue;
1773
1868
 
1774
1869
    *(intersect_scans_end++)= *(cur_ror_scan++);
1775
1870
 
1776
 
    if (intersect->total_cost < min_cost)
 
1871
    if (intersect.total_cost < min_cost)
1777
1872
    {
1778
1873
      /* Local minimum found, save it */
1779
 
      ror_intersect_cpy(intersect_best, intersect);
 
1874
      ror_intersect_cpy(&intersect_best, &intersect);
1780
1875
      intersect_scans_best= intersect_scans_end;
1781
 
      min_cost = intersect->total_cost;
 
1876
      min_cost = intersect.total_cost;
1782
1877
    }
1783
1878
  }
1784
1879
 
1787
1882
    return NULL;
1788
1883
  }
1789
1884
 
1790
 
  *are_all_covering= intersect->is_covering;
 
1885
  *are_all_covering= intersect.is_covering;
1791
1886
  uint32_t best_num= intersect_scans_best - intersect_scans;
1792
 
  ror_intersect_cpy(intersect, intersect_best);
 
1887
  ror_intersect_cpy(&intersect, &intersect_best);
1793
1888
 
1794
1889
  /*
1795
1890
    Ok, found the best ROR-intersection of non-CPK key scans.
1796
1891
    Check if we should add a CPK scan. If the obtained ROR-intersection is
1797
1892
    covering, it doesn't make sense to add CPK scan.
1798
1893
  */
1799
 
  if (cpk_scan && !intersect->is_covering)
 
1894
  if (cpk_scan && ! intersect.is_covering)
1800
1895
  {
1801
 
    if (ror_intersect_add(intersect, cpk_scan, true) &&
1802
 
        (intersect->total_cost < min_cost))
 
1896
    if (ror_intersect_add(&intersect, cpk_scan, true) &&
 
1897
        (intersect.total_cost < min_cost))
1803
1898
    {
1804
1899
      cpk_scan_used= true;
1805
1900
      intersect_best= intersect; //just set pointer here
1813
1908
    if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1814
1909
      return trp;
1815
1910
 
1816
 
    trp->ror_range_scans.assign(intersect_scans, intersect_scans + best_num);
1817
 
    trp->setRowRetrievalNecessary(intersect_best->is_covering);
1818
 
    trp->read_cost= intersect_best->total_cost;
 
1911
    if (! (trp->first_scan=
 
1912
           (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)*best_num)))
 
1913
      return NULL;
 
1914
    memcpy(trp->first_scan, intersect_scans, best_num*sizeof(optimizer::RorScanInfo*));
 
1915
    trp->last_scan=  trp->first_scan + best_num;
 
1916
    trp->is_covering= intersect_best.is_covering;
 
1917
    trp->read_cost= intersect_best.total_cost;
1819
1918
    /* Prevent divisons by zero */
1820
 
    ha_rows best_rows = double2rows(intersect_best->out_rows);
 
1919
    ha_rows best_rows = double2rows(intersect_best.out_rows);
1821
1920
    if (! best_rows)
1822
1921
      best_rows= 1;
1823
1922
    set_if_smaller(param->table->quick_condition_rows, best_rows);
1824
1923
    trp->records= best_rows;
1825
 
    trp->setCostOfIndexScans(intersect_best->index_scan_costs);
 
1924
    trp->index_scan_costs= intersect_best.index_scan_costs;
1826
1925
    trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
1827
1926
  }
1828
1927
  return trp;
1830
1929
 
1831
1930
 
1832
1931
/*
1833
 
  Get best covering ROR-intersection.
1834
 
  SYNOPSIS
1835
 
    get_best_covering_ror_intersect()
1836
 
      param     Parameter from test_quick_select function.
1837
 
      tree      optimizer::SEL_TREE with sets of intervals for different keys.
1838
 
      read_time Don't return table read plans with cost > read_time.
1839
 
 
1840
 
  RETURN
1841
 
    Best covering ROR-intersection plan
1842
 
    NULL if no plan found.
1843
 
 
1844
 
  NOTES
1845
 
    get_best_ror_intersect must be called for a tree before calling this
1846
 
    function for it.
1847
 
    This function invalidates tree->ror_scans member values.
1848
 
 
1849
 
  The following approximate algorithm is used:
1850
 
    I=set of all covering indexes
1851
 
    F=set of all fields to cover
1852
 
    S={}
1853
 
 
1854
 
    do
1855
 
    {
1856
 
      Order I by (#covered fields in F desc,
1857
 
                  #components asc,
1858
 
                  number of first not covered component asc);
1859
 
      F=F-covered by first(I);
1860
 
      S=S+first(I);
1861
 
      I=I-first(I);
1862
 
    } while F is not empty.
1863
 
*/
1864
 
 
1865
 
static
1866
 
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
1867
 
                                                            optimizer::SEL_TREE *tree,
1868
 
                                                            double read_time)
1869
 
{
1870
 
  ROR_SCAN_INFO **ror_scan_mark;
1871
 
  ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
1872
 
 
1873
 
  for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
1874
 
    (*scan)->key_components=
1875
 
      param->table->key_info[(*scan)->keynr].key_parts;
1876
 
 
1877
 
  /*
1878
 
    Run covering-ROR-search algorithm.
1879
 
    Assume set I is [ror_scan .. ror_scans_end)
1880
 
  */
1881
 
 
1882
 
  /*I=set of all covering indexes */
1883
 
  ror_scan_mark= tree->ror_scans;
1884
 
 
1885
 
  MyBitmap *covered_fields= &param->tmp_covered_fields;
1886
 
  if (! covered_fields->getBitmap())
1887
 
  {
1888
 
    my_bitmap_map *tmp_bitmap= (my_bitmap_map*)alloc_root(param->mem_root,
1889
 
                                               param->fields_bitmap_size);
1890
 
    covered_fields->setBitmap(tmp_bitmap);
1891
 
  }
1892
 
  if (! covered_fields->getBitmap() ||
1893
 
      covered_fields->init(covered_fields->getBitmap(),
1894
 
                           param->table->s->fields))
1895
 
    return 0;
1896
 
  covered_fields->clearAll();
1897
 
 
1898
 
  double total_cost= 0.0f;
1899
 
  ha_rows records=0;
1900
 
  bool all_covered;
1901
 
 
1902
 
  do
1903
 
  {
1904
 
    /*
1905
 
      Update changed sorting info:
1906
 
        #covered fields,
1907
 
        number of first not covered component
1908
 
      Calculate and save these values for each of remaining scans.
1909
 
    */
1910
 
    for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
1911
 
    {
1912
 
      bitmap_subtract(&(*scan)->covered_fields, covered_fields);
1913
 
      (*scan)->used_fields_covered=
1914
 
        (*scan)->covered_fields.getBitsSet();
1915
 
      (*scan)->first_uncovered_field=
1916
 
        (*scan)->covered_fields.getFirst();
1917
 
    }
1918
 
 
1919
 
    internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
1920
 
                       sizeof(ROR_SCAN_INFO*),
1921
 
                       (qsort_cmp)cmp_ror_scan_info_covering);
1922
 
 
1923
 
    /* I=I-first(I) */
1924
 
    total_cost += (*ror_scan_mark)->index_read_cost;
1925
 
    records += (*ror_scan_mark)->records;
1926
 
    if (total_cost > read_time)
1927
 
      return NULL;
1928
 
    /* F=F-covered by first(I) */
1929
 
    bitmap_union(covered_fields, &(*ror_scan_mark)->covered_fields);
1930
 
    all_covered= bitmap_is_subset(&param->needed_fields, covered_fields);
1931
 
  } while ((++ror_scan_mark < ror_scans_end) && !all_covered);
1932
 
 
1933
 
  if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
1934
 
    return NULL;
1935
 
 
1936
 
  /*
1937
 
    Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with
1938
 
    cost total_cost.
1939
 
  */
1940
 
  /* Add priority queue use cost. */
1941
 
  total_cost += rows2double(records)*
1942
 
                log((double)(ror_scan_mark - tree->ror_scans)) /
1943
 
                (TIME_FOR_COMPARE_ROWID * M_LN2);
1944
 
 
1945
 
  if (total_cost > read_time)
1946
 
    return NULL;
1947
 
 
1948
 
  optimizer::RorIntersectReadPlan *trp= NULL;
1949
 
  if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1950
 
  {
1951
 
    return trp;
1952
 
  }
1953
 
 
1954
 
  uint32_t best_num= (ror_scan_mark - tree->ror_scans);
1955
 
  trp->ror_range_scans.assign(tree->ror_scans, tree->ror_scans + best_num);
1956
 
  trp->setRowRetrievalNecessary(true);
1957
 
  trp->read_cost= total_cost;
1958
 
  trp->records= records;
1959
 
  trp->cpk_scan= NULL;
1960
 
  set_if_smaller(param->table->quick_condition_rows, records);
1961
 
 
1962
 
  return(trp);
1963
 
}
1964
 
 
1965
 
 
1966
 
/*
1967
1932
  Get best "range" table read plan for given optimizer::SEL_TREE, also update some info
1968
1933
 
1969
1934
  SYNOPSIS
1970
1935
    get_key_scans_params()
 
1936
      session
1971
1937
      param                    Parameters from test_quick_select
1972
1938
      tree                     Make range select for this optimizer::SEL_TREE
1973
1939
      index_read_must_be_used  true <=> assume 'index only' option will be set
1989
1955
    NULL if no plan found or error occurred
1990
1956
*/
1991
1957
 
1992
 
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
 
1958
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
 
1959
                                                      optimizer::Parameter *param,
1993
1960
                                                      optimizer::SEL_TREE *tree,
1994
1961
                                                      bool index_read_must_be_used,
1995
1962
                                                      bool update_tbl_stats,
2026
1993
      bool read_index_only= index_read_must_be_used ||
2027
1994
                            param->table->covering_keys.test(keynr);
2028
1995
 
2029
 
      found_records= check_quick_select(param, idx, read_index_only, *key,
 
1996
      found_records= check_quick_select(session, param, idx, read_index_only, *key,
2030
1997
                                        update_tbl_stats, &mrr_flags,
2031
1998
                                        &buf_size, &cost);
2032
1999
      found_read_time= cost.total_cost();
2055
2022
      read_plan->records= best_records;
2056
2023
      read_plan->is_ror= tree->ror_scans_map.test(idx);
2057
2024
      read_plan->read_cost= read_time;
2058
 
      read_plan->setMRRBufferSize(best_buf_size);
 
2025
      read_plan->mrr_buf_size= best_buf_size;
2059
2026
    }
2060
2027
  }
2061
2028
 
2106
2073
                                                parent_alloc)))
2107
2074
  {
2108
2075
    alloc= parent_alloc ? parent_alloc : &quick_intersect->alloc;
2109
 
    for (vector<struct st_ror_scan_info *>::iterator it= ror_range_scans.begin();
2110
 
         it != ror_range_scans.end();
2111
 
         ++it)
 
2076
    for (; first_scan != last_scan; ++first_scan)
2112
2077
    {
2113
2078
      if (! (quick= optimizer::get_quick_select(param,
2114
 
                                                (*it)->idx,
2115
 
                                                (*it)->sel_arg,
 
2079
                                                (*first_scan)->idx,
 
2080
                                                (*first_scan)->sel_arg,
2116
2081
                                                HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
2117
2082
                                                0,
2118
2083
                                                alloc)) ||
2147
2112
optimizer::QuickSelectInterface *optimizer::RorUnionReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *)
2148
2113
{
2149
2114
  optimizer::QuickRorUnionSelect *quick_roru= NULL;
 
2115
  optimizer::TableReadPlan **scan= NULL;
2150
2116
  optimizer::QuickSelectInterface *quick= NULL;
2151
2117
  /*
2152
2118
    It is impossible to construct a ROR-union that will not retrieve full
2154
2120
  */
2155
2121
  if ((quick_roru= new optimizer::QuickRorUnionSelect(param->session, param->table)))
2156
2122
  {
2157
 
    for (vector<optimizer::TableReadPlan *>::iterator it= merged_scans.begin();
2158
 
         it != merged_scans.end();
2159
 
         ++it)
 
2123
    for (scan= first_ror; scan != last_ror; scan++)
2160
2124
    {
2161
 
      if (! (quick= (*it)->make_quick(param, false, &quick_roru->alloc)) ||
 
2125
      if (! (quick= (*scan)->make_quick(param, false, &quick_roru->alloc)) ||
2162
2126
          quick_roru->push_quick_back(quick))
2163
2127
      {
2164
2128
        return NULL;
2560
2524
  field->setWriteSet();
2561
2525
 
2562
2526
  Item_result cmp_type= field->cmp_type();
2563
 
  if (!((ref_tables | field->table->map) & param_comp))
 
2527
  if (!((ref_tables | field->getTable()->map) & param_comp))
2564
2528
    ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type, inv);
2565
2529
  Item_equal *item_equal= field_item->item_equal;
2566
2530
  if (item_equal)
2574
2538
 
2575
2539
      if (field->eq(f))
2576
2540
        continue;
2577
 
      if (!((ref_tables | f->table->map) & param_comp))
 
2541
      if (!((ref_tables | f->getTable()->map) & param_comp))
2578
2542
      {
2579
2543
        tree= get_func_mm_tree(param, cond_func, f, value, cmp_type, inv);
2580
2544
        ftree= !ftree ? tree : tree_and(param, ftree, tree);
2632
2596
    }
2633
2597
    return(tree);
2634
2598
  }
2635
 
  /* Here when simple cond */
2636
 
  if (cond->const_item())
 
2599
  /* Here when simple cond
 
2600
     There are limits on what kinds of const items we can evaluate, grep for
 
2601
     DontEvaluateMaterializedSubqueryTooEarly.
 
2602
  */
 
2603
  if (cond->const_item()  && !cond->is_expensive())
2637
2604
  {
2638
2605
    /*
2639
2606
      During the cond->val_int() evaluation we can come across a subselect
2725
2692
      field->setWriteSet();
2726
2693
 
2727
2694
      Item_result cmp_type= field->cmp_type();
2728
 
      if (!((ref_tables | field->table->map) & param_comp))
 
2695
      if (!((ref_tables | field->getTable()->map) & param_comp))
2729
2696
      {
2730
2697
        tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC,
2731
2698
                           value,cmp_type);
2764
2731
                   Item_func::Functype type,
2765
2732
                   Item *value, Item_result)
2766
2733
{
2767
 
  if (field->table != param->table)
 
2734
  if (field->getTable() != param->table)
2768
2735
    return 0;
2769
2736
 
2770
2737
  KEY_PART *key_part = param->key_parts;
2834
2801
  param->session->mem_root= param->old_root;
2835
2802
  if (!value)                                   // IS NULL or IS NOT NULL
2836
2803
  {
2837
 
    if (field->table->maybe_null)               // Can't use a key on this
 
2804
    if (field->getTable()->maybe_null)          // Can't use a key on this
2838
2805
      goto end;
2839
2806
    if (!maybe_null)                            // Not null field
2840
2807
    {
2919
2886
    {
2920
2887
      if (unlikely(length < field_length))
2921
2888
      {
2922
 
        /*
2923
 
          This can only happen in a table created with UNIREG where one key
2924
 
          overlaps many fields
2925
 
        */
2926
 
        length= field_length;
 
2889
        /*
 
2890
          This can only happen in a table created with UNIREG where one key
 
2891
          overlaps many fields
 
2892
        */
 
2893
        length= field_length;
2927
2894
      }
2928
2895
      else
2929
 
        field_length= length;
 
2896
        field_length= length;
2930
2897
    }
2931
2898
    length+=offset;
2932
 
    if (!(min_str= (unsigned char*) alloc_root(alloc, length*2)))
 
2899
    if (!(min_str= (unsigned char*) alloc->alloc_root(length*2)))
 
2900
    {
2933
2901
      goto end;
 
2902
    }
2934
2903
 
2935
2904
    max_str=min_str+length;
2936
2905
    if (maybe_null)
3149
3118
    tree= &optimizer::null_element;                        // cmp with NULL is never true
3150
3119
    goto end;
3151
3120
  }
3152
 
  str= (unsigned char*) alloc_root(alloc, key_part->store_length+1);
 
3121
 
 
3122
  /*
 
3123
    Any predicate except "<=>"(null-safe equality operator) involving NULL as a
 
3124
    constant is always FALSE
 
3125
    Put IMPOSSIBLE Tree(null_element) here.
 
3126
  */  
 
3127
  if (type != Item_func::EQUAL_FUNC && field->is_real_null())
 
3128
  {
 
3129
    tree= &optimizer::null_element;
 
3130
    goto end;
 
3131
  }
 
3132
 
 
3133
  str= (unsigned char*) alloc->alloc_root(key_part->store_length+1);
3153
3134
  if (!str)
3154
3135
    goto end;
3155
3136
  if (maybe_null)
3272
3253
 
3273
3254
#define CLONE_KEY1_MAYBE 1
3274
3255
#define CLONE_KEY2_MAYBE 2
3275
 
#define swap_clone_flag(A) ((A & 1) << 1) | ((A & 2) >> 1)
3276
3256
 
 
3257
static uint32_t swap_clone_flag(uint32_t a)
 
3258
{
 
3259
  return ((a & 1) << 1) | ((a & 2) >> 1);
 
3260
}
3277
3261
 
3278
3262
static optimizer::SEL_TREE *
3279
3263
tree_and(optimizer::RangeParameter *param, optimizer::SEL_TREE *tree1, optimizer::SEL_TREE *tree2)
3749
3733
  /* Ok got a tuple */
3750
3734
  RANGE_SEQ_ENTRY *cur= &seq->stack[seq->i];
3751
3735
 
3752
 
  range->ptr= (char*)(int)(key_tree->part);
 
3736
  range->ptr= (char*)(size_t)(key_tree->part);
3753
3737
  {
3754
3738
    range->range_flag= cur->min_key_flag | cur->max_key_flag;
3755
3739
 
3825
3809
*/
3826
3810
 
3827
3811
static
3828
 
ha_rows check_quick_select(optimizer::Parameter *param,
 
3812
ha_rows check_quick_select(Session *session,
 
3813
                           optimizer::Parameter *param,
3829
3814
                           uint32_t idx,
3830
3815
                           bool index_only,
3831
3816
                           optimizer::SEL_ARG *tree,
3866
3851
  bool pk_is_clustered= cursor->primary_key_is_clustered();
3867
3852
  if (index_only &&
3868
3853
      (param->table->index_flags(keynr) & HA_KEYREAD_ONLY) &&
3869
 
      !(pk_is_clustered && keynr == param->table->s->primary_key))
 
3854
      !(pk_is_clustered && keynr == param->table->getShare()->getPrimaryKey()))
3870
3855
     *mrr_flags |= HA_MRR_INDEX_ONLY;
3871
3856
 
3872
 
  if (current_session->lex->sql_command != SQLCOM_SELECT)
 
3857
  if (session->lex->sql_command != SQLCOM_SELECT)
3873
3858
    *mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
3874
3859
 
3875
3860
  *bufsize= param->session->variables.read_rnd_buff_size;
3901
3886
  else
3902
3887
  {
3903
3888
    /* Clustered PK scan is always a ROR scan (TODO: same as above) */
3904
 
    if (param->table->s->primary_key == keynr && pk_is_clustered)
 
3889
    if (param->table->getShare()->getPrimaryKey() == keynr && pk_is_clustered)
3905
3890
      param->is_ror_scan= true;
3906
3891
  }
3907
3892
 
3948
3933
 
3949
3934
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts)
3950
3935
{
3951
 
  KEY *table_key= param->table->key_info + keynr;
3952
 
  KEY_PART_INFO *key_part= table_key->key_part + nparts;
3953
 
  KEY_PART_INFO *key_part_end= (table_key->key_part +
 
3936
  KeyInfo *table_key= param->table->key_info + keynr;
 
3937
  KeyPartInfo *key_part= table_key->key_part + nparts;
 
3938
  KeyPartInfo *key_part_end= (table_key->key_part +
3954
3939
                                table_key->key_parts);
3955
3940
  uint32_t pk_number;
3956
3941
 
3957
 
  for (KEY_PART_INFO *kp= table_key->key_part; kp < key_part; kp++)
 
3942
  for (KeyPartInfo *kp= table_key->key_part; kp < key_part; kp++)
3958
3943
  {
3959
3944
    uint16_t fieldnr= param->table->key_info[keynr].
3960
3945
                    key_part[kp - table_key->key_part].fieldnr - 1;
3961
 
    if (param->table->field[fieldnr]->key_length() != kp->length)
 
3946
    if (param->table->getField(fieldnr)->key_length() != kp->length)
3962
3947
      return false;
3963
3948
  }
3964
3949
 
3966
3951
    return true;
3967
3952
 
3968
3953
  key_part= table_key->key_part + nparts;
3969
 
  pk_number= param->table->s->primary_key;
 
3954
  pk_number= param->table->getShare()->getPrimaryKey();
3970
3955
  if (!param->table->cursor->primary_key_is_clustered() || pk_number == MAX_KEY)
3971
3956
    return false;
3972
3957
 
3973
 
  KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part;
3974
 
  KEY_PART_INFO *pk_part_end= pk_part +
 
3958
  KeyPartInfo *pk_part= param->table->key_info[pk_number].key_part;
 
3959
  KeyPartInfo *pk_part_end= pk_part +
3975
3960
                              param->table->key_info[pk_number].key_parts;
3976
3961
  for (;(key_part!=key_part_end) && (pk_part != pk_part_end);
3977
3962
       ++key_part, ++pk_part)
3992
3977
                            uint32_t mrr_buf_size,
3993
3978
                            memory::Root *parent_alloc)
3994
3979
{
3995
 
  optimizer::QuickRangeSelect *quick= NULL;
3996
 
  bool create_err= false;
3997
 
 
3998
 
  quick= new optimizer::QuickRangeSelect(param->session,
3999
 
                                         param->table,
4000
 
                                         param->real_keynr[idx],
4001
 
                                         test(parent_alloc),
4002
 
                                         NULL,
4003
 
                                         &create_err);
 
3980
  optimizer::QuickRangeSelect *quick= new optimizer::QuickRangeSelect(param->session,
 
3981
                                                                      param->table,
 
3982
                                                                      param->real_keynr[idx],
 
3983
                                                                      test(parent_alloc),
 
3984
                                                                      NULL);
4004
3985
 
4005
3986
  if (quick)
4006
3987
  {
4007
 
    if (create_err ||
4008
 
              get_quick_keys(param,
 
3988
          if (get_quick_keys(param,
4009
3989
                       quick,
4010
3990
                       param->key[idx],
4011
3991
                       key_tree,
4021
4001
    {
4022
4002
      quick->mrr_flags= mrr_flags;
4023
4003
      quick->mrr_buf_size= mrr_buf_size;
4024
 
      quick->key_parts=(KEY_PART*)
4025
 
        memdup_root(parent_alloc? parent_alloc : &quick->alloc,
4026
 
                    (char*) param->key[idx],
4027
 
                    sizeof(KEY_PART)*
4028
 
                    param->table->key_info[param->real_keynr[idx]].key_parts);
 
4004
      if (parent_alloc)
 
4005
      {
 
4006
        quick->key_parts=(KEY_PART*)
 
4007
          parent_alloc->memdup_root( (char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
 
4008
      }
 
4009
      else
 
4010
      {
 
4011
        quick->key_parts=(KEY_PART*)
 
4012
          quick->alloc.memdup_root((char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
 
4013
      }
4029
4014
    }
4030
4015
  }
4031
4016
  return quick;
4139
4124
    if (length == (uint32_t) (tmp_max_key - param->max_key) &&
4140
4125
              ! memcmp(param->min_key,param->max_key,length))
4141
4126
    {
4142
 
      KEY *table_key= quick->head->key_info+quick->index;
 
4127
      KeyInfo *table_key= quick->head->key_info+quick->index;
4143
4128
      flag= EQ_RANGE;
4144
4129
      if ((table_key->flags & (HA_NOSAME)) == HA_NOSAME &&
4145
4130
                key->part == table_key->key_parts-1)
4221
4206
}
4222
4207
 
4223
4208
 
4224
 
bool optimizer::QuickSelectInterface::is_keys_used(const MyBitmap *fields)
 
4209
bool optimizer::QuickSelectInterface::is_keys_used(const boost::dynamic_bitset<>& fields)
4225
4210
{
4226
4211
  return is_key_used(head, index, fields);
4227
4212
}
4251
4236
                                                                 table_reference_st *ref,
4252
4237
                                                                 ha_rows records)
4253
4238
{
4254
 
  memory::Root *old_root, *alloc;
4255
 
  optimizer::QuickRangeSelect *quick= NULL;
4256
 
  KEY *key_info = &table->key_info[ref->key];
 
4239
  memory::Root *old_root= NULL;
 
4240
  memory::Root *alloc= NULL;
 
4241
  KeyInfo *key_info = &table->key_info[ref->key];
4257
4242
  KEY_PART *key_part;
4258
4243
  optimizer::QuickRange *range= NULL;
4259
4244
  uint32_t part;
4260
 
  bool create_err= false;
4261
4245
  optimizer::CostVector cost;
4262
4246
 
4263
4247
  old_root= session->mem_root;
4264
4248
  /* The following call may change session->mem_root */
4265
 
  quick= new optimizer::QuickRangeSelect(session, table, ref->key, 0, 0, &create_err);
 
4249
  optimizer::QuickRangeSelect *quick= new optimizer::QuickRangeSelect(session, table, ref->key, 0, 0);
4266
4250
  /* save mem_root set by QuickRangeSelect constructor */
4267
4251
  alloc= session->mem_root;
4268
4252
  /*
4271
4255
  */
4272
4256
  session->mem_root= old_root;
4273
4257
 
4274
 
  if (!quick || create_err)
 
4258
  if (! quick)
4275
4259
    return 0;                   /* no ranges found */
4276
4260
  if (quick->init())
4277
4261
    goto err;
4290
4274
 
4291
4275
 
4292
4276
  if (!(quick->key_parts=key_part=(KEY_PART *)
4293
 
        alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts)))
 
4277
        quick->alloc.alloc_root(sizeof(KEY_PART)*ref->key_parts)))
4294
4278
    goto err;
4295
4279
 
4296
4280
  for (part=0 ; part < ref->key_parts ;part++,key_part++)
4335
4319
 
4336
4320
  quick->mrr_buf_size= session->variables.read_rnd_buff_size;
4337
4321
  if (table->cursor->multi_range_read_info(quick->index, 1, (uint32_t)records,
4338
 
                                         &quick->mrr_buf_size,
4339
 
                                         &quick->mrr_flags, &cost))
 
4322
                                           &quick->mrr_buf_size,
 
4323
                                           &quick->mrr_flags, &cost))
4340
4324
    goto err;
4341
4325
 
4342
4326
  return quick;
4414
4398
}
4415
4399
 
4416
4400
 
4417
 
static inline uint32_t get_field_keypart(KEY *index, Field *field);
 
4401
static inline uint32_t get_field_keypart(KeyInfo *index, Field *field);
4418
4402
 
4419
4403
static inline optimizer::SEL_ARG * get_index_range_tree(uint32_t index,
4420
4404
                                                        optimizer::SEL_TREE *range_tree,
4421
4405
                                                        optimizer::Parameter *param,
4422
4406
                                                        uint32_t *param_idx);
4423
4407
 
4424
 
static bool get_constant_key_infix(KEY *index_info,
 
4408
static bool get_constant_key_infix(KeyInfo *index_info,
4425
4409
                                   optimizer::SEL_ARG *index_range_tree,
4426
 
                                   KEY_PART_INFO *first_non_group_part,
4427
 
                                   KEY_PART_INFO *min_max_arg_part,
4428
 
                                   KEY_PART_INFO *last_part,
 
4410
                                   KeyPartInfo *first_non_group_part,
 
4411
                                   KeyPartInfo *min_max_arg_part,
 
4412
                                   KeyPartInfo *last_part,
4429
4413
                                   Session *session,
4430
4414
                                   unsigned char *key_infix,
4431
4415
                                   uint32_t *key_infix_len,
4432
 
                                   KEY_PART_INFO **first_non_infix_part);
 
4416
                                   KeyPartInfo **first_non_infix_part);
4433
4417
 
4434
4418
static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item);
4435
4419
 
4436
4420
static void
4437
4421
cost_group_min_max(Table* table,
4438
 
                   KEY *index_info,
 
4422
                   KeyInfo *index_info,
4439
4423
                   uint32_t used_key_parts,
4440
4424
                   uint32_t group_key_parts,
4441
4425
                   optimizer::SEL_TREE *range_tree,
4578
4562
get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree)
4579
4563
{
4580
4564
  Session *session= param->session;
4581
 
  JOIN *join= session->lex->current_select->join;
 
4565
  Join *join= session->lex->current_select->join;
4582
4566
  Table *table= param->table;
4583
4567
  bool have_min= false;              /* true if there is a MIN function. */
4584
4568
  bool have_max= false;              /* true if there is a MAX function. */
4585
4569
  Item_field *min_max_arg_item= NULL; // The argument of all MIN/MAX functions
4586
 
  KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */
 
4570
  KeyPartInfo *min_max_arg_part= NULL; /* The corresponding keypart. */
4587
4571
  uint32_t group_prefix_len= 0; /* Length (in bytes) of the key prefix. */
4588
 
  KEY *index_info= NULL;    /* The index chosen for data access. */
 
4572
  KeyInfo *index_info= NULL;    /* The index chosen for data access. */
4589
4573
  uint32_t index= 0;            /* The id of the chosen index. */
4590
4574
  uint32_t group_key_parts= 0;  // Number of index key parts in the group prefix.
4591
4575
  uint32_t used_key_parts= 0;   /* Number of index key parts used for access. */
4593
4577
  uint32_t key_infix_len= 0;          /* Length of key_infix. */
4594
4578
  optimizer::GroupMinMaxReadPlan *read_plan= NULL; /* The eventually constructed TRP. */
4595
4579
  uint32_t key_part_nr;
4596
 
  order_st *tmp_group= NULL;
 
4580
  Order *tmp_group= NULL;
4597
4581
  Item *item= NULL;
4598
4582
  Item_field *item_field= NULL;
4599
4583
 
4606
4590
       (! join->select_distinct)) ||
4607
4591
      (join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */
4608
4592
    return NULL;
4609
 
  if (table->s->keys == 0)        /* There are no indexes to use. */
 
4593
  if (table->getShare()->sizeKeys() == 0)        /* There are no indexes to use. */
4610
4594
    return NULL;
4611
4595
 
4612
4596
  /* Analyze the query in more detail. */
4665
4649
    (GA1,GA2) are all true. If there is more than one such index, select the
4666
4650
    first one. Here we set the variables: group_prefix_len and index_info.
4667
4651
  */
4668
 
  KEY *cur_index_info= table->key_info;
4669
 
  KEY *cur_index_info_end= cur_index_info + table->s->keys;
4670
 
  KEY_PART_INFO *cur_part= NULL;
4671
 
  KEY_PART_INFO *end_part= NULL; /* Last part for loops. */
 
4652
  KeyInfo *cur_index_info= table->key_info;
 
4653
  KeyInfo *cur_index_info_end= cur_index_info + table->getShare()->sizeKeys();
 
4654
  KeyPartInfo *cur_part= NULL;
 
4655
  KeyPartInfo *end_part= NULL; /* Last part for loops. */
4672
4656
  /* Last index part. */
4673
 
  KEY_PART_INFO *last_part= NULL;
4674
 
  KEY_PART_INFO *first_non_group_part= NULL;
4675
 
  KEY_PART_INFO *first_non_infix_part= NULL;
 
4657
  KeyPartInfo *last_part= NULL;
 
4658
  KeyPartInfo *first_non_group_part= NULL;
 
4659
  KeyPartInfo *first_non_infix_part= NULL;
4676
4660
  uint32_t key_infix_parts= 0;
4677
4661
  uint32_t cur_group_key_parts= 0;
4678
4662
  uint32_t cur_group_prefix_len= 0;
4691
4675
  uint32_t cur_key_infix_len= 0;
4692
4676
  unsigned char cur_key_infix[MAX_KEY_LENGTH];
4693
4677
  uint32_t cur_used_key_parts= 0;
4694
 
  uint32_t pk= param->table->s->primary_key;
 
4678
  uint32_t pk= param->table->getShare()->getPrimaryKey();
4695
4679
 
4696
4680
  for (uint32_t cur_index= 0;
4697
4681
       cur_index_info != cur_index_info_end;
4714
4698
        (table->cursor->getEngine()->check_flag(HTON_BIT_PRIMARY_KEY_IN_READ_INDEX)))
4715
4699
    {
4716
4700
      /* For each table field */
4717
 
      for (uint32_t i= 0; i < table->s->fields; i++)
 
4701
      for (uint32_t i= 0; i < table->getShare()->sizeFields(); i++)
4718
4702
      {
4719
 
        Field *cur_field= table->field[i];
 
4703
        Field *cur_field= table->getField(i);
4720
4704
        /*
4721
4705
          If the field is used in the current query ensure that it's
4722
4706
          part of 'cur_index'
4884
4868
          Store the first and last keyparts that need to be analyzed
4885
4869
          into one array that can be passed as parameter.
4886
4870
        */
4887
 
        KEY_PART_INFO *key_part_range[2];
 
4871
        KeyPartInfo *key_part_range[2];
4888
4872
        key_part_range[0]= first_non_group_part;
4889
4873
        key_part_range[1]= last_part;
4890
4874
 
4928
4912
      optimizer::CostVector dummy_cost;
4929
4913
      uint32_t mrr_flags= HA_MRR_USE_DEFAULT_IMPL;
4930
4914
      uint32_t mrr_bufsize= 0;
4931
 
      cur_quick_prefix_records= check_quick_select(param,
 
4915
      cur_quick_prefix_records= check_quick_select(session,
 
4916
                                                   param,
4932
4917
                                                   cur_param_idx,
4933
4918
                                                   false /*don't care*/,
4934
4919
                                                   cur_index_tree,
5176
5161
    false o/w
5177
5162
*/
5178
5163
static bool
5179
 
get_constant_key_infix(KEY *,
 
5164
get_constant_key_infix(KeyInfo *,
5180
5165
                       optimizer::SEL_ARG *index_range_tree,
5181
 
                       KEY_PART_INFO *first_non_group_part,
5182
 
                       KEY_PART_INFO *min_max_arg_part,
5183
 
                       KEY_PART_INFO *last_part,
 
5166
                       KeyPartInfo *first_non_group_part,
 
5167
                       KeyPartInfo *min_max_arg_part,
 
5168
                       KeyPartInfo *last_part,
5184
5169
                       Session *,
5185
5170
                       unsigned char *key_infix,
5186
5171
                       uint32_t *key_infix_len,
5187
 
                       KEY_PART_INFO **first_non_infix_part)
 
5172
                       KeyPartInfo **first_non_infix_part)
5188
5173
{
5189
5174
  optimizer::SEL_ARG *cur_range= NULL;
5190
 
  KEY_PART_INFO *cur_part= NULL;
 
5175
  KeyPartInfo *cur_part= NULL;
5191
5176
  /* End part for the first loop below. */
5192
 
  KEY_PART_INFO *end_part= min_max_arg_part ? min_max_arg_part : last_part;
 
5177
  KeyPartInfo *end_part= min_max_arg_part ? min_max_arg_part : last_part;
5193
5178
 
5194
5179
  *key_infix_len= 0;
5195
5180
  unsigned char *key_ptr= key_infix;
5255
5240
    field  field that possibly references some key part in index
5256
5241
 
5257
5242
  NOTES
5258
 
    The return value can be used to get a KEY_PART_INFO pointer by
 
5243
    The return value can be used to get a KeyPartInfo pointer by
5259
5244
    part= index->key_part + get_field_keypart(...) - 1;
5260
5245
 
5261
5246
  RETURN
5263
5248
    0 if field does not reference any index field.
5264
5249
*/
5265
5250
static inline uint
5266
 
get_field_keypart(KEY *index, Field *field)
 
5251
get_field_keypart(KeyInfo *index, Field *field)
5267
5252
{
5268
 
  KEY_PART_INFO *part= NULL;
5269
 
  KEY_PART_INFO *end= NULL;
 
5253
  KeyPartInfo *part= NULL;
 
5254
  KeyPartInfo *end= NULL;
5270
5255
 
5271
5256
  for (part= index->key_part, end= part + index->key_parts; part < end; part++)
5272
5257
  {
5376
5361
    None
5377
5362
*/
5378
5363
void cost_group_min_max(Table* table,
5379
 
                        KEY *index_info,
 
5364
                        KeyInfo *index_info,
5380
5365
                        uint32_t used_key_parts,
5381
5366
                        uint32_t group_key_parts,
5382
5367
                        optimizer::SEL_TREE *range_tree,
5581
5566
}
5582
5567
 
5583
5568
 
 
5569
uint32_t optimizer::RorScanInfo::findFirstNotSet() const
 
5570
{
 
5571
  boost::dynamic_bitset<> map= bitsToBitset();
 
5572
  for (boost::dynamic_bitset<>::size_type i= 0; i < map.size(); i++)
 
5573
  {
 
5574
    if (! map.test(i))
 
5575
    {
 
5576
      return i;
 
5577
    }
 
5578
  }
 
5579
  return map.size();
 
5580
}
 
5581
 
 
5582
 
 
5583
size_t optimizer::RorScanInfo::getBitCount() const
 
5584
{
 
5585
  boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
 
5586
  return tmp_bitset.count();
 
5587
}
 
5588
 
 
5589
 
 
5590
void optimizer::RorScanInfo::subtractBitset(const boost::dynamic_bitset<>& in_bitset)
 
5591
{
 
5592
  boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
 
5593
  tmp_bitset-= in_bitset;
 
5594
  covered_fields= tmp_bitset.to_ulong();
 
5595
}
 
5596
 
 
5597
 
 
5598
boost::dynamic_bitset<> optimizer::RorScanInfo::bitsToBitset() const
 
5599
{
 
5600
  string res;
 
5601
  uint64_t conv= covered_fields;
 
5602
  while (conv)
 
5603
  {
 
5604
    res.push_back((conv & 1) + '0');
 
5605
    conv>>= 1;
 
5606
  }
 
5607
  if (! res.empty())
 
5608
  {
 
5609
    std::reverse(res.begin(), res.end());
 
5610
  }
 
5611
  string final(covered_fields_size - res.length(), '0');
 
5612
  final.append(res);
 
5613
  return (boost::dynamic_bitset<>(final));
 
5614
}
 
5615
 
 
5616
 
5584
5617
} /* namespace drizzled */