~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to drizzled/optimizer/range.cc

  • Committer: Monty Taylor
  • Date: 2010-09-19 17:42:57 UTC
  • mfrom: (1769.2.5 fix-dist-files)
  • mto: This revision was merged to the branch mainline in revision 1776.
  • Revision ID: mordred@inaugust.com-20100919174257-hj8lvngq0325udzp
Changed package name to drizzle7

Show diffs side-by-side

added added

removed removed

Lines of Context:
84
84
  keypart-value-bytes holds the value. Its format depends on the field type.
85
85
  The length of keypart-value-bytes may or may not depend on the value being
86
86
  stored. The default is that length is static and equal to
87
 
  KEY_PART_INFO::length.
 
87
  KeyPartInfo::length.
88
88
 
89
89
  Key parts with (key_part_flag & HA_BLOB_PART) have length depending of the
90
90
  value:
135
135
 
136
136
#include "drizzled/temporal.h" /* Needed in get_mm_leaf() for timestamp -> datetime comparisons */
137
137
 
138
 
#include "drizzled/memory/multi_malloc.h"
139
 
 
140
138
using namespace std;
141
139
namespace drizzled
142
140
{
206
204
  cost->zero();
207
205
  if (table->cursor->primary_key_is_clustered())
208
206
  {
209
 
    cost->setIOCount(table->cursor->read_time(table->s->primary_key,
 
207
    cost->setIOCount(table->cursor->read_time(table->getShare()->getPrimaryKey(),
210
208
                                             static_cast<uint32_t>(nrows),
211
209
                                             nrows));
212
210
  }
250
248
 
251
249
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts);
252
250
 
253
 
static ha_rows check_quick_select(optimizer::Parameter *param,
 
251
static ha_rows check_quick_select(Session *session,
 
252
                                  optimizer::Parameter *param,
254
253
                                  uint32_t idx,
255
254
                                  bool index_only,
256
255
                                  optimizer::SEL_ARG *tree,
259
258
                                  uint32_t *bufsize,
260
259
                                  optimizer::CostVector *cost);
261
260
 
262
 
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
 
261
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
 
262
                                                      optimizer::Parameter *param,
263
263
                                                      optimizer::SEL_TREE *tree,
264
264
                                                      bool index_read_must_be_used,
265
265
                                                      bool update_tbl_stats,
277
277
                                                                 double read_time);
278
278
 
279
279
static
280
 
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
 
280
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
 
281
                                                  optimizer::Parameter *param,
281
282
                                                  optimizer::SEL_IMERGE *imerge,
282
283
                                                  double read_time);
283
284
 
384
385
 
385
386
void optimizer::SqlSelect::cleanup()
386
387
{
387
 
  delete quick;
388
 
  quick= 0;
 
388
  if (quick)
 
389
  {
 
390
    delete quick;
 
391
    quick= NULL;
 
392
  }
 
393
 
389
394
  if (free_cond)
390
395
  {
391
396
    free_cond= 0;
469
474
    if (!ord->asc)
470
475
      return MAX_KEY;
471
476
 
472
 
  for (idx= 0; idx < table->s->keys; idx++)
 
477
  for (idx= 0; idx < table->getShare()->sizeKeys(); idx++)
473
478
  {
474
479
    if (!(table->keys_in_use_for_query.test(idx)))
475
480
      continue;
476
 
    KEY_PART_INFO *keyinfo= table->key_info[idx].key_part;
 
481
    KeyPartInfo *keyinfo= table->key_info[idx].key_part;
477
482
    uint32_t n_parts=  table->key_info[idx].key_parts;
478
483
    uint32_t partno= 0;
479
484
 
541
546
  my_bitmap_map *tmp;
542
547
  uint32_t pk;
543
548
  param->tmp_covered_fields.setBitmap(0);
544
 
  param->fields_bitmap_size= table->s->column_bitmap_size;
545
 
  if (!(tmp= (my_bitmap_map*) alloc_root(param->mem_root,
546
 
                                  param->fields_bitmap_size)) ||
547
 
      param->needed_fields.init(tmp, table->s->fields))
 
549
  param->fields_bitmap_size= table->getShare()->column_bitmap_size;
 
550
  if (!(tmp= (my_bitmap_map*) param->mem_root->alloc_root(param->fields_bitmap_size)) ||
 
551
      param->needed_fields.init(tmp, table->getShare()->sizeFields()))
 
552
  {
548
553
    return 1;
 
554
  }
549
555
 
550
556
  param->needed_fields= *table->read_set;
551
557
  bitmap_union(&param->needed_fields, table->write_set);
552
558
 
553
 
  pk= param->table->s->primary_key;
 
559
  pk= param->table->getShare()->getPrimaryKey();
554
560
  if (pk != MAX_KEY && param->table->cursor->primary_key_is_clustered())
555
561
  {
556
562
    /* The table uses clustered PK and it is not internally generated */
557
 
    KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
558
 
    KEY_PART_INFO *key_part_end= key_part +
 
563
    KeyPartInfo *key_part= param->table->key_info[pk].key_part;
 
564
    KeyPartInfo *key_part_end= key_part +
559
565
                                 param->table->key_info[pk].key_parts;
560
566
    for (;key_part != key_part_end; ++key_part)
561
567
      param->needed_fields.clearBit(key_part->fieldnr-1);
639
645
{
640
646
  uint32_t idx;
641
647
  double scan_time;
642
 
  delete quick;
643
 
  quick=0;
 
648
  if (quick)
 
649
  {
 
650
    delete quick;
 
651
    quick= NULL;
 
652
  }
644
653
  needed_reg.reset();
645
654
  quick_keys.reset();
646
655
  if (keys_to_use.none())
663
672
    memory::Root alloc;
664
673
    optimizer::SEL_TREE *tree= NULL;
665
674
    KEY_PART *key_parts;
666
 
    KEY *key_info;
 
675
    KeyInfo *key_info;
667
676
    optimizer::Parameter param;
668
677
 
669
678
    if (check_stack_overrun(session, 2*STACK_MIN_SIZE, NULL))
686
695
 
687
696
    session->no_errors=1;                               // Don't warn about NULL
688
697
    memory::init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
689
 
    if (!(param.key_parts= (KEY_PART*) alloc_root(&alloc,
690
 
                                                  sizeof(KEY_PART)*
691
 
                                                  head->s->key_parts)) ||
 
698
    if (!(param.key_parts= (KEY_PART*) alloc.alloc_root( sizeof(KEY_PART) * head->getShare()->key_parts)) ||
692
699
        fill_used_fields_bitmap(&param))
693
700
    {
694
701
      session->no_errors=0;
695
 
      free_root(&alloc,MYF(0));                 // Return memory & allocator
 
702
      alloc.free_root(MYF(0));                  // Return memory & allocator
 
703
 
696
704
      return 0;                         // Can't use range
697
705
    }
698
706
    key_parts= param.key_parts;
703
711
      This is used in get_mm_parts function.
704
712
    */
705
713
    key_info= head->key_info;
706
 
    for (idx=0 ; idx < head->s->keys ; idx++, key_info++)
 
714
    for (idx=0 ; idx < head->getShare()->sizeKeys() ; idx++, key_info++)
707
715
    {
708
 
      KEY_PART_INFO *key_part_info;
 
716
      KeyPartInfo *key_part_info;
709
717
      if (! keys_to_use.test(idx))
710
718
        continue;
711
719
 
793
801
        bool can_build_covering= false;
794
802
 
795
803
        /* Get best 'range' plan and prepare data for making other plans */
796
 
        if ((range_trp= get_key_scans_params(&param, tree, false, true,
 
804
        if ((range_trp= get_key_scans_params(session, &param, tree, false, true,
797
805
                                             best_read_time)))
798
806
        {
799
807
          best_trp= range_trp;
820
828
              Try constructing covering ROR-intersect only if it looks possible
821
829
              and worth doing.
822
830
            */
823
 
            if (rori_trp->isRowRetrievalNecessary() && can_build_covering &&
 
831
            if (!rori_trp->is_covering && can_build_covering &&
824
832
                (rori_trp= get_best_covering_ror_intersect(&param, tree,
825
833
                                                           best_read_time)))
826
834
              best_trp= rori_trp;
836
844
        List_iterator_fast<optimizer::SEL_IMERGE> it(tree->merges);
837
845
        while ((imerge= it++))
838
846
        {
839
 
          new_conj_trp= get_best_disjunct_quick(&param, imerge, best_read_time);
 
847
          new_conj_trp= get_best_disjunct_quick(session, &param, imerge, best_read_time);
840
848
          if (new_conj_trp)
841
849
            set_if_smaller(param.table->quick_condition_rows,
842
850
                           new_conj_trp->records);
857
865
      records= best_trp->records;
858
866
      if (! (quick= best_trp->make_quick(&param, true)) || quick->init())
859
867
      {
860
 
        delete quick;
861
 
        quick= NULL;
 
868
        /* quick can already be free here */
 
869
        if (quick)
 
870
        {
 
871
          delete quick;
 
872
          quick= NULL;
 
873
        }
862
874
      }
863
875
    }
864
876
 
865
877
  free_mem:
866
 
    free_root(&alloc,MYF(0));                   // Return memory & allocator
 
878
    alloc.free_root(MYF(0));                    // Return memory & allocator
867
879
    session->mem_root= param.old_root;
868
880
    session->no_errors=0;
869
881
  }
879
891
  Get best plan for a optimizer::SEL_IMERGE disjunctive expression.
880
892
  SYNOPSIS
881
893
    get_best_disjunct_quick()
 
894
      session
882
895
      param     Parameter from check_quick_select function
883
896
      imerge    Expression to use
884
897
      read_time Don't create scans with cost > read_time
941
954
*/
942
955
 
943
956
static
944
 
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
 
957
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
 
958
                                                  optimizer::Parameter *param,
945
959
                                                  optimizer::SEL_IMERGE *imerge,
946
960
                                                  double read_time)
947
961
{
965
979
  ha_rows roru_total_records;
966
980
  double roru_intersect_part= 1.0;
967
981
 
968
 
  if (! (range_scans= (optimizer::RangeReadPlan**)alloc_root(param->mem_root,
969
 
                                                             sizeof(optimizer::RangeReadPlan*)*
970
 
                                                             n_child_scans)))
 
982
  if (! (range_scans= (optimizer::RangeReadPlan**)param->mem_root->alloc_root(sizeof(optimizer::RangeReadPlan*)* n_child_scans)))
 
983
  {
971
984
    return NULL;
 
985
  }
 
986
 
972
987
  /*
973
988
    Collect best 'range' scan for each of disjuncts, and, while doing so,
974
989
    analyze possibility of ROR scans. Also calculate some values needed by
978
993
       ptree != imerge->trees_next;
979
994
       ptree++, cur_child++)
980
995
  {
981
 
    if (!(*cur_child= get_key_scans_params(param, *ptree, true, false, read_time)))
 
996
    if (!(*cur_child= get_key_scans_params(session, param, *ptree, true, false, read_time)))
982
997
    {
983
998
      /*
984
999
        One of index scans in this index_merge is more expensive than entire
995
1010
    all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
996
1011
    all_scans_rors &= (*cur_child)->is_ror;
997
1012
    if (pk_is_clustered &&
998
 
        param->real_keynr[(*cur_child)->getKeyIndex()] ==
999
 
        param->table->s->primary_key)
 
1013
        param->real_keynr[(*cur_child)->key_idx] ==
 
1014
        param->table->getShare()->getPrimaryKey())
1000
1015
    {
1001
1016
      cpk_scan= cur_child;
1002
1017
      cpk_scan_records= (*cur_child)->records;
1031
1046
  /* Calculate cost(rowid_to_row_scan) */
1032
1047
  {
1033
1048
    optimizer::CostVector sweep_cost;
1034
 
    JOIN *join= param->session->lex->select_lex.join;
 
1049
    Join *join= param->session->lex->select_lex.join;
1035
1050
    bool is_interrupted= test(join && join->tables == 1);
1036
1051
    get_sweep_read_cost(param->table, non_cpk_scan_records, is_interrupted,
1037
1052
                        &sweep_cost);
1047
1062
                                    param->session->variables.sortbuff_size);
1048
1063
  if (param->imerge_cost_buff_size < unique_calc_buff_size)
1049
1064
  {
1050
 
    if (!(param->imerge_cost_buff= (uint*)alloc_root(param->mem_root,
1051
 
                                                     unique_calc_buff_size)))
 
1065
    if (!(param->imerge_cost_buff= (uint*)param->mem_root->alloc_root(unique_calc_buff_size)))
 
1066
    {
1052
1067
      return NULL;
 
1068
    }
 
1069
 
1053
1070
    param->imerge_cost_buff_size= unique_calc_buff_size;
1054
1071
  }
1055
1072
 
1078
1095
  /* Ok, it is possible to build a ROR-union, try it. */
1079
1096
  bool dummy;
1080
1097
  if (! (roru_read_plans=
1081
 
          (optimizer::TableReadPlan **) alloc_root(param->mem_root,
1082
 
                                                   sizeof(optimizer::TableReadPlan*)*
1083
 
                                                   n_child_scans)))
 
1098
          (optimizer::TableReadPlan **) param->mem_root->alloc_root(sizeof(optimizer::TableReadPlan*) * n_child_scans)))
 
1099
  {
1084
1100
    return imerge_trp;
 
1101
  }
1085
1102
skip_to_ror_scan:
1086
1103
  roru_index_costs= 0.0;
1087
1104
  roru_total_records= 0;
1103
1120
    {
1104
1121
      /* Ok, we have index_only cost, now get full rows scan cost */
1105
1122
      cost= param->table->cursor->
1106
 
              read_time(param->real_keynr[(*cur_child)->getKeyIndex()], 1,
 
1123
              read_time(param->real_keynr[(*cur_child)->key_idx], 1,
1107
1124
                        (*cur_child)->records) +
1108
1125
              rows2double((*cur_child)->records) / TIME_FOR_COMPARE;
1109
1126
    }
1121
1138
      roru_index_costs += (*cur_roru_plan)->read_cost;
1122
1139
    }
1123
1140
    else
1124
 
    {
1125
1141
      roru_index_costs +=
1126
 
        ((optimizer::RorIntersectReadPlan *)(*cur_roru_plan))->getCostOfIndexScans();
1127
 
    }
 
1142
        ((optimizer::RorIntersectReadPlan*)(*cur_roru_plan))->index_scan_costs;
1128
1143
    roru_total_records += (*cur_roru_plan)->records;
1129
1144
    roru_intersect_part *= (*cur_roru_plan)->records /
1130
1145
                           param->table->cursor->stats.records;
1149
1164
  double roru_total_cost;
1150
1165
  {
1151
1166
    optimizer::CostVector sweep_cost;
1152
 
    JOIN *join= param->session->lex->select_lex.join;
 
1167
    Join *join= param->session->lex->select_lex.join;
1153
1168
    bool is_interrupted= test(join && join->tables == 1);
1154
1169
    get_sweep_read_cost(param->table, roru_total_records, is_interrupted,
1155
1170
                        &sweep_cost);
1164
1179
  {
1165
1180
    if ((roru= new (param->mem_root) optimizer::RorUnionReadPlan))
1166
1181
    {
1167
 
      roru->merged_scans.assign(roru_read_plans, roru_read_plans + n_child_scans);
 
1182
      roru->first_ror= roru_read_plans;
 
1183
      roru->last_ror= roru_read_plans + n_child_scans;
1168
1184
      roru->read_cost= roru_total_cost;
1169
1185
      roru->records= roru_total_records;
1170
1186
      return roru;
1221
1237
 
1222
1238
  uint32_t keynr;
1223
1239
 
1224
 
  if (!(ror_scan= (ROR_SCAN_INFO*)alloc_root(param->mem_root,
1225
 
                                             sizeof(ROR_SCAN_INFO))))
 
1240
  if (!(ror_scan= (ROR_SCAN_INFO*)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO))))
1226
1241
    return NULL;
1227
1242
 
1228
1243
  ror_scan->idx= idx;
1232
1247
  ror_scan->sel_arg= sel_arg;
1233
1248
  ror_scan->records= param->table->quick_rows[keynr];
1234
1249
 
1235
 
  if (!(bitmap_buf= (my_bitmap_map*) alloc_root(param->mem_root,
1236
 
                                                param->fields_bitmap_size)))
 
1250
  if (!(bitmap_buf= (my_bitmap_map*) param->mem_root->alloc_root(param->fields_bitmap_size)))
 
1251
  {
1237
1252
    return NULL;
 
1253
  }
1238
1254
 
1239
 
  if (ror_scan->covered_fields.init(bitmap_buf,
1240
 
                                    param->table->s->fields))
 
1255
  if (ror_scan->covered_fields.init(bitmap_buf, param->table->getShare()->sizeFields()))
 
1256
  {
1241
1257
    return NULL;
 
1258
  }
1242
1259
  ror_scan->covered_fields.clearAll();
1243
1260
 
1244
 
  KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part;
1245
 
  KEY_PART_INFO *key_part_end= key_part +
 
1261
  KeyPartInfo *key_part= param->table->key_info[keynr].key_part;
 
1262
  KeyPartInfo *key_part_end= key_part +
1246
1263
                               param->table->key_info[keynr].key_parts;
1247
1264
  for (;key_part != key_part_end; ++key_part)
1248
1265
  {
1276
1293
  return (val1 < val2)? -1: (val1 == val2)? 0 : 1;
1277
1294
}
1278
1295
 
 
1296
 
1279
1297
/*
1280
1298
  Compare two ROR_SCAN_INFO** by
1281
1299
   (#covered fields in F desc,
1310
1328
  return 0;
1311
1329
}
1312
1330
 
1313
 
 
1314
1331
/* Auxiliary structure for incremental ROR-intersection creation */
1315
1332
typedef struct
1316
1333
{
1348
1365
{
1349
1366
  ROR_INTERSECT_INFO *info;
1350
1367
  my_bitmap_map* buf;
1351
 
  if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root,
1352
 
                                              sizeof(ROR_INTERSECT_INFO))))
 
1368
  if (!(info= (ROR_INTERSECT_INFO*)param->mem_root->alloc_root(sizeof(ROR_INTERSECT_INFO))))
1353
1369
    return NULL;
1354
1370
  info->param= param;
1355
 
  if (!(buf= (my_bitmap_map*) alloc_root(param->mem_root,
1356
 
                                         param->fields_bitmap_size)))
 
1371
  if (!(buf= (my_bitmap_map*) param->mem_root->alloc_root(param->fields_bitmap_size)))
1357
1372
    return NULL;
1358
 
  if (info->covered_fields.init(buf, param->table->s->fields))
 
1373
  if (info->covered_fields.init(buf, param->table->getShare()->sizeFields()))
1359
1374
    return NULL;
1360
1375
  info->is_covering= false;
1361
1376
  info->index_scan_costs= 0.0;
1472
1487
                                   const ROR_SCAN_INFO *scan)
1473
1488
{
1474
1489
  double selectivity_mult= 1.0;
1475
 
  KEY_PART_INFO *key_part= info->param->table->key_info[scan->keynr].key_part;
 
1490
  KeyPartInfo *key_part= info->param->table->key_info[scan->keynr].key_part;
1476
1491
  unsigned char key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; /* key values tuple */
1477
1492
  unsigned char *key_ptr= key_val;
1478
1493
  optimizer::SEL_ARG *sel_arg= NULL;
1616
1631
  if (!info->is_covering)
1617
1632
  {
1618
1633
    optimizer::CostVector sweep_cost;
1619
 
    JOIN *join= info->param->session->lex->select_lex.join;
 
1634
    Join *join= info->param->session->lex->select_lex.join;
1620
1635
    bool is_interrupted= test(join && join->tables == 1);
1621
1636
    get_sweep_read_cost(info->param->table, double2rows(info->out_rows),
1622
1637
                        is_interrupted, &sweep_cost);
1627
1642
 
1628
1643
 
1629
1644
/*
 
1645
  Get best covering ROR-intersection.
 
1646
  SYNOPSIS
 
1647
    get_best_covering_ror_intersect()
 
1648
      param     Parameter from test_quick_select function.
 
1649
      tree      optimizer::SEL_TREE with sets of intervals for different keys.
 
1650
      read_time Don't return table read plans with cost > read_time.
 
1651
 
 
1652
  RETURN
 
1653
    Best covering ROR-intersection plan
 
1654
    NULL if no plan found.
 
1655
 
 
1656
  NOTES
 
1657
    get_best_ror_intersect must be called for a tree before calling this
 
1658
    function for it.
 
1659
    This function invalidates tree->ror_scans member values.
 
1660
 
 
1661
  The following approximate algorithm is used:
 
1662
    I=set of all covering indexes
 
1663
    F=set of all fields to cover
 
1664
    S={}
 
1665
 
 
1666
    do
 
1667
    {
 
1668
      Order I by (#covered fields in F desc,
 
1669
                  #components asc,
 
1670
                  number of first not covered component asc);
 
1671
      F=F-covered by first(I);
 
1672
      S=S+first(I);
 
1673
      I=I-first(I);
 
1674
    } while F is not empty.
 
1675
*/
 
1676
 
 
1677
static
 
1678
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
 
1679
                                                            optimizer::SEL_TREE *tree,
 
1680
                                                            double read_time)
 
1681
{
 
1682
  ROR_SCAN_INFO **ror_scan_mark;
 
1683
  ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
 
1684
 
 
1685
  for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
 
1686
    (*scan)->key_components=
 
1687
      param->table->key_info[(*scan)->keynr].key_parts;
 
1688
 
 
1689
  /*
 
1690
    Run covering-ROR-search algorithm.
 
1691
    Assume set I is [ror_scan .. ror_scans_end)
 
1692
  */
 
1693
 
 
1694
  /*I=set of all covering indexes */
 
1695
  ror_scan_mark= tree->ror_scans;
 
1696
 
 
1697
  MyBitmap *covered_fields= &param->tmp_covered_fields;
 
1698
  if (! covered_fields->getBitmap())
 
1699
  {
 
1700
    my_bitmap_map *tmp_bitmap= (my_bitmap_map*)param->mem_root->alloc_root(param->fields_bitmap_size);
 
1701
    covered_fields->setBitmap(tmp_bitmap);
 
1702
  }
 
1703
  if (! covered_fields->getBitmap() ||
 
1704
      covered_fields->init(covered_fields->getBitmap(),
 
1705
                           param->table->getShare()->sizeFields()))
 
1706
    return 0;
 
1707
  covered_fields->clearAll();
 
1708
 
 
1709
  double total_cost= 0.0f;
 
1710
  ha_rows records=0;
 
1711
  bool all_covered;
 
1712
 
 
1713
  do
 
1714
  {
 
1715
    /*
 
1716
      Update changed sorting info:
 
1717
        #covered fields,
 
1718
        number of first not covered component
 
1719
      Calculate and save these values for each of remaining scans.
 
1720
    */
 
1721
    for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
 
1722
    {
 
1723
      bitmap_subtract(&(*scan)->covered_fields, covered_fields);
 
1724
      (*scan)->used_fields_covered=
 
1725
        (*scan)->covered_fields.getBitsSet();
 
1726
      (*scan)->first_uncovered_field=
 
1727
        (*scan)->covered_fields.getFirst();
 
1728
    }
 
1729
 
 
1730
    internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
 
1731
                       sizeof(ROR_SCAN_INFO*),
 
1732
                       (qsort_cmp)cmp_ror_scan_info_covering);
 
1733
 
 
1734
    /* I=I-first(I) */
 
1735
    total_cost += (*ror_scan_mark)->index_read_cost;
 
1736
    records += (*ror_scan_mark)->records;
 
1737
    if (total_cost > read_time)
 
1738
      return NULL;
 
1739
    /* F=F-covered by first(I) */
 
1740
    bitmap_union(covered_fields, &(*ror_scan_mark)->covered_fields);
 
1741
    all_covered= bitmap_is_subset(&param->needed_fields, covered_fields);
 
1742
  } while ((++ror_scan_mark < ror_scans_end) && !all_covered);
 
1743
 
 
1744
  if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
 
1745
    return NULL;
 
1746
 
 
1747
  /*
 
1748
    Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with
 
1749
    cost total_cost.
 
1750
  */
 
1751
  /* Add priority queue use cost. */
 
1752
  total_cost += rows2double(records)*
 
1753
                log((double)(ror_scan_mark - tree->ror_scans)) /
 
1754
                (TIME_FOR_COMPARE_ROWID * M_LN2);
 
1755
 
 
1756
  if (total_cost > read_time)
 
1757
    return NULL;
 
1758
 
 
1759
  optimizer::RorIntersectReadPlan *trp= NULL;
 
1760
  if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
 
1761
  {
 
1762
    return trp;
 
1763
  }
 
1764
 
 
1765
  uint32_t best_num= (ror_scan_mark - tree->ror_scans);
 
1766
  if (!(trp->first_scan= (ROR_SCAN_INFO**)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO*)* best_num)))
 
1767
    return NULL;
 
1768
  memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(ROR_SCAN_INFO*));
 
1769
  trp->last_scan=  trp->first_scan + best_num;
 
1770
  trp->is_covering= true;
 
1771
  trp->read_cost= total_cost;
 
1772
  trp->records= records;
 
1773
  trp->cpk_scan= NULL;
 
1774
  set_if_smaller(param->table->quick_condition_rows, records);
 
1775
 
 
1776
  return(trp);
 
1777
}
 
1778
 
 
1779
 
 
1780
/*
1630
1781
  Get best ROR-intersection plan using non-covering ROR-intersection search
1631
1782
  algorithm. The returned plan may be covering.
1632
1783
 
1711
1862
  uint32_t cpk_no= 0;
1712
1863
  bool cpk_scan_used= false;
1713
1864
 
1714
 
  if (! (tree->ror_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
1715
 
                                                      sizeof(ROR_SCAN_INFO*)*
1716
 
                                                      param->keys)))
 
1865
  if (! (tree->ror_scans= (ROR_SCAN_INFO**)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO*)* param->keys)))
 
1866
  {
1717
1867
    return NULL;
 
1868
  }
1718
1869
  cpk_no= ((param->table->cursor->primary_key_is_clustered()) ?
1719
 
           param->table->s->primary_key : MAX_KEY);
 
1870
           param->table->getShare()->getPrimaryKey() : MAX_KEY);
1720
1871
 
1721
1872
  for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++)
1722
1873
  {
1745
1896
 
1746
1897
  ROR_SCAN_INFO **intersect_scans= NULL; /* ROR scans used in index intersection */
1747
1898
  ROR_SCAN_INFO **intersect_scans_end= NULL;
1748
 
  if (! (intersect_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
1749
 
                                                      sizeof(ROR_SCAN_INFO*)*
1750
 
                                                      tree->n_ror_scans)))
 
1899
  if (! (intersect_scans= (ROR_SCAN_INFO**)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO*) * tree->n_ror_scans)))
1751
1900
    return NULL;
1752
1901
  intersect_scans_end= intersect_scans;
1753
1902
 
1813
1962
    if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1814
1963
      return trp;
1815
1964
 
1816
 
    trp->ror_range_scans.assign(intersect_scans, intersect_scans + best_num);
1817
 
    trp->setRowRetrievalNecessary(intersect_best->is_covering);
 
1965
    if (! (trp->first_scan=
 
1966
           (ROR_SCAN_INFO**)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO*)*best_num)))
 
1967
      return NULL;
 
1968
    memcpy(trp->first_scan, intersect_scans, best_num*sizeof(ROR_SCAN_INFO*));
 
1969
    trp->last_scan=  trp->first_scan + best_num;
 
1970
    trp->is_covering= intersect_best->is_covering;
1818
1971
    trp->read_cost= intersect_best->total_cost;
1819
1972
    /* Prevent divisons by zero */
1820
1973
    ha_rows best_rows = double2rows(intersect_best->out_rows);
1822
1975
      best_rows= 1;
1823
1976
    set_if_smaller(param->table->quick_condition_rows, best_rows);
1824
1977
    trp->records= best_rows;
1825
 
    trp->setCostOfIndexScans(intersect_best->index_scan_costs);
 
1978
    trp->index_scan_costs= intersect_best->index_scan_costs;
1826
1979
    trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
1827
1980
  }
1828
1981
  return trp;
1830
1983
 
1831
1984
 
1832
1985
/*
1833
 
  Get best covering ROR-intersection.
1834
 
  SYNOPSIS
1835
 
    get_best_covering_ror_intersect()
1836
 
      param     Parameter from test_quick_select function.
1837
 
      tree      optimizer::SEL_TREE with sets of intervals for different keys.
1838
 
      read_time Don't return table read plans with cost > read_time.
1839
 
 
1840
 
  RETURN
1841
 
    Best covering ROR-intersection plan
1842
 
    NULL if no plan found.
1843
 
 
1844
 
  NOTES
1845
 
    get_best_ror_intersect must be called for a tree before calling this
1846
 
    function for it.
1847
 
    This function invalidates tree->ror_scans member values.
1848
 
 
1849
 
  The following approximate algorithm is used:
1850
 
    I=set of all covering indexes
1851
 
    F=set of all fields to cover
1852
 
    S={}
1853
 
 
1854
 
    do
1855
 
    {
1856
 
      Order I by (#covered fields in F desc,
1857
 
                  #components asc,
1858
 
                  number of first not covered component asc);
1859
 
      F=F-covered by first(I);
1860
 
      S=S+first(I);
1861
 
      I=I-first(I);
1862
 
    } while F is not empty.
1863
 
*/
1864
 
 
1865
 
static
1866
 
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
1867
 
                                                            optimizer::SEL_TREE *tree,
1868
 
                                                            double read_time)
1869
 
{
1870
 
  ROR_SCAN_INFO **ror_scan_mark;
1871
 
  ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
1872
 
 
1873
 
  for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
1874
 
    (*scan)->key_components=
1875
 
      param->table->key_info[(*scan)->keynr].key_parts;
1876
 
 
1877
 
  /*
1878
 
    Run covering-ROR-search algorithm.
1879
 
    Assume set I is [ror_scan .. ror_scans_end)
1880
 
  */
1881
 
 
1882
 
  /*I=set of all covering indexes */
1883
 
  ror_scan_mark= tree->ror_scans;
1884
 
 
1885
 
  MyBitmap *covered_fields= &param->tmp_covered_fields;
1886
 
  if (! covered_fields->getBitmap())
1887
 
  {
1888
 
    my_bitmap_map *tmp_bitmap= (my_bitmap_map*)alloc_root(param->mem_root,
1889
 
                                               param->fields_bitmap_size);
1890
 
    covered_fields->setBitmap(tmp_bitmap);
1891
 
  }
1892
 
  if (! covered_fields->getBitmap() ||
1893
 
      covered_fields->init(covered_fields->getBitmap(),
1894
 
                           param->table->s->fields))
1895
 
    return 0;
1896
 
  covered_fields->clearAll();
1897
 
 
1898
 
  double total_cost= 0.0f;
1899
 
  ha_rows records=0;
1900
 
  bool all_covered;
1901
 
 
1902
 
  do
1903
 
  {
1904
 
    /*
1905
 
      Update changed sorting info:
1906
 
        #covered fields,
1907
 
        number of first not covered component
1908
 
      Calculate and save these values for each of remaining scans.
1909
 
    */
1910
 
    for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
1911
 
    {
1912
 
      bitmap_subtract(&(*scan)->covered_fields, covered_fields);
1913
 
      (*scan)->used_fields_covered=
1914
 
        (*scan)->covered_fields.getBitsSet();
1915
 
      (*scan)->first_uncovered_field=
1916
 
        (*scan)->covered_fields.getFirst();
1917
 
    }
1918
 
 
1919
 
    internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
1920
 
                       sizeof(ROR_SCAN_INFO*),
1921
 
                       (qsort_cmp)cmp_ror_scan_info_covering);
1922
 
 
1923
 
    /* I=I-first(I) */
1924
 
    total_cost += (*ror_scan_mark)->index_read_cost;
1925
 
    records += (*ror_scan_mark)->records;
1926
 
    if (total_cost > read_time)
1927
 
      return NULL;
1928
 
    /* F=F-covered by first(I) */
1929
 
    bitmap_union(covered_fields, &(*ror_scan_mark)->covered_fields);
1930
 
    all_covered= bitmap_is_subset(&param->needed_fields, covered_fields);
1931
 
  } while ((++ror_scan_mark < ror_scans_end) && !all_covered);
1932
 
 
1933
 
  if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
1934
 
    return NULL;
1935
 
 
1936
 
  /*
1937
 
    Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with
1938
 
    cost total_cost.
1939
 
  */
1940
 
  /* Add priority queue use cost. */
1941
 
  total_cost += rows2double(records)*
1942
 
                log((double)(ror_scan_mark - tree->ror_scans)) /
1943
 
                (TIME_FOR_COMPARE_ROWID * M_LN2);
1944
 
 
1945
 
  if (total_cost > read_time)
1946
 
    return NULL;
1947
 
 
1948
 
  optimizer::RorIntersectReadPlan *trp= NULL;
1949
 
  if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1950
 
  {
1951
 
    return trp;
1952
 
  }
1953
 
 
1954
 
  uint32_t best_num= (ror_scan_mark - tree->ror_scans);
1955
 
  trp->ror_range_scans.assign(tree->ror_scans, tree->ror_scans + best_num);
1956
 
  trp->setRowRetrievalNecessary(true);
1957
 
  trp->read_cost= total_cost;
1958
 
  trp->records= records;
1959
 
  trp->cpk_scan= NULL;
1960
 
  set_if_smaller(param->table->quick_condition_rows, records);
1961
 
 
1962
 
  return(trp);
1963
 
}
1964
 
 
1965
 
 
1966
 
/*
1967
1986
  Get best "range" table read plan for given optimizer::SEL_TREE, also update some info
1968
1987
 
1969
1988
  SYNOPSIS
1970
1989
    get_key_scans_params()
 
1990
      session
1971
1991
      param                    Parameters from test_quick_select
1972
1992
      tree                     Make range select for this optimizer::SEL_TREE
1973
1993
      index_read_must_be_used  true <=> assume 'index only' option will be set
1989
2009
    NULL if no plan found or error occurred
1990
2010
*/
1991
2011
 
1992
 
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
 
2012
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
 
2013
                                                      optimizer::Parameter *param,
1993
2014
                                                      optimizer::SEL_TREE *tree,
1994
2015
                                                      bool index_read_must_be_used,
1995
2016
                                                      bool update_tbl_stats,
2026
2047
      bool read_index_only= index_read_must_be_used ||
2027
2048
                            param->table->covering_keys.test(keynr);
2028
2049
 
2029
 
      found_records= check_quick_select(param, idx, read_index_only, *key,
 
2050
      found_records= check_quick_select(session, param, idx, read_index_only, *key,
2030
2051
                                        update_tbl_stats, &mrr_flags,
2031
2052
                                        &buf_size, &cost);
2032
2053
      found_read_time= cost.total_cost();
2055
2076
      read_plan->records= best_records;
2056
2077
      read_plan->is_ror= tree->ror_scans_map.test(idx);
2057
2078
      read_plan->read_cost= read_time;
2058
 
      read_plan->setMRRBufferSize(best_buf_size);
 
2079
      read_plan->mrr_buf_size= best_buf_size;
2059
2080
    }
2060
2081
  }
2061
2082
 
2106
2127
                                                parent_alloc)))
2107
2128
  {
2108
2129
    alloc= parent_alloc ? parent_alloc : &quick_intersect->alloc;
2109
 
    for (vector<struct st_ror_scan_info *>::iterator it= ror_range_scans.begin();
2110
 
         it != ror_range_scans.end();
2111
 
         ++it)
 
2130
    for (; first_scan != last_scan; ++first_scan)
2112
2131
    {
2113
2132
      if (! (quick= optimizer::get_quick_select(param,
2114
 
                                                (*it)->idx,
2115
 
                                                (*it)->sel_arg,
 
2133
                                                (*first_scan)->idx,
 
2134
                                                (*first_scan)->sel_arg,
2116
2135
                                                HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
2117
2136
                                                0,
2118
2137
                                                alloc)) ||
2147
2166
optimizer::QuickSelectInterface *optimizer::RorUnionReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *)
2148
2167
{
2149
2168
  optimizer::QuickRorUnionSelect *quick_roru= NULL;
 
2169
  optimizer::TableReadPlan **scan= NULL;
2150
2170
  optimizer::QuickSelectInterface *quick= NULL;
2151
2171
  /*
2152
2172
    It is impossible to construct a ROR-union that will not retrieve full
2154
2174
  */
2155
2175
  if ((quick_roru= new optimizer::QuickRorUnionSelect(param->session, param->table)))
2156
2176
  {
2157
 
    for (vector<optimizer::TableReadPlan *>::iterator it= merged_scans.begin();
2158
 
         it != merged_scans.end();
2159
 
         ++it)
 
2177
    for (scan= first_ror; scan != last_ror; scan++)
2160
2178
    {
2161
 
      if (! (quick= (*it)->make_quick(param, false, &quick_roru->alloc)) ||
 
2179
      if (! (quick= (*scan)->make_quick(param, false, &quick_roru->alloc)) ||
2162
2180
          quick_roru->push_quick_back(quick))
2163
2181
      {
2164
2182
        return NULL;
2560
2578
  field->setWriteSet();
2561
2579
 
2562
2580
  Item_result cmp_type= field->cmp_type();
2563
 
  if (!((ref_tables | field->table->map) & param_comp))
 
2581
  if (!((ref_tables | field->getTable()->map) & param_comp))
2564
2582
    ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type, inv);
2565
2583
  Item_equal *item_equal= field_item->item_equal;
2566
2584
  if (item_equal)
2574
2592
 
2575
2593
      if (field->eq(f))
2576
2594
        continue;
2577
 
      if (!((ref_tables | f->table->map) & param_comp))
 
2595
      if (!((ref_tables | f->getTable()->map) & param_comp))
2578
2596
      {
2579
2597
        tree= get_func_mm_tree(param, cond_func, f, value, cmp_type, inv);
2580
2598
        ftree= !ftree ? tree : tree_and(param, ftree, tree);
2632
2650
    }
2633
2651
    return(tree);
2634
2652
  }
2635
 
  /* Here when simple cond */
2636
 
  if (cond->const_item())
 
2653
  /* Here when simple cond
 
2654
     There are limits on what kinds of const items we can evaluate, grep for
 
2655
     DontEvaluateMaterializedSubqueryTooEarly.
 
2656
  */
 
2657
  if (cond->const_item()  && !cond->is_expensive())
2637
2658
  {
2638
2659
    /*
2639
2660
      During the cond->val_int() evaluation we can come across a subselect
2725
2746
      field->setWriteSet();
2726
2747
 
2727
2748
      Item_result cmp_type= field->cmp_type();
2728
 
      if (!((ref_tables | field->table->map) & param_comp))
 
2749
      if (!((ref_tables | field->getTable()->map) & param_comp))
2729
2750
      {
2730
2751
        tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC,
2731
2752
                           value,cmp_type);
2764
2785
                   Item_func::Functype type,
2765
2786
                   Item *value, Item_result)
2766
2787
{
2767
 
  if (field->table != param->table)
 
2788
  if (field->getTable() != param->table)
2768
2789
    return 0;
2769
2790
 
2770
2791
  KEY_PART *key_part = param->key_parts;
2834
2855
  param->session->mem_root= param->old_root;
2835
2856
  if (!value)                                   // IS NULL or IS NOT NULL
2836
2857
  {
2837
 
    if (field->table->maybe_null)               // Can't use a key on this
 
2858
    if (field->getTable()->maybe_null)          // Can't use a key on this
2838
2859
      goto end;
2839
2860
    if (!maybe_null)                            // Not null field
2840
2861
    {
2919
2940
    {
2920
2941
      if (unlikely(length < field_length))
2921
2942
      {
2922
 
        /*
2923
 
          This can only happen in a table created with UNIREG where one key
2924
 
          overlaps many fields
2925
 
        */
2926
 
        length= field_length;
 
2943
        /*
 
2944
          This can only happen in a table created with UNIREG where one key
 
2945
          overlaps many fields
 
2946
        */
 
2947
        length= field_length;
2927
2948
      }
2928
2949
      else
2929
 
        field_length= length;
 
2950
        field_length= length;
2930
2951
    }
2931
2952
    length+=offset;
2932
 
    if (!(min_str= (unsigned char*) alloc_root(alloc, length*2)))
 
2953
    if (!(min_str= (unsigned char*) alloc->alloc_root(length*2)))
 
2954
    {
2933
2955
      goto end;
 
2956
    }
2934
2957
 
2935
2958
    max_str=min_str+length;
2936
2959
    if (maybe_null)
3149
3172
    tree= &optimizer::null_element;                        // cmp with NULL is never true
3150
3173
    goto end;
3151
3174
  }
3152
 
  str= (unsigned char*) alloc_root(alloc, key_part->store_length+1);
 
3175
 
 
3176
  /*
 
3177
    Any predicate except "<=>"(null-safe equality operator) involving NULL as a
 
3178
    constant is always FALSE
 
3179
    Put IMPOSSIBLE Tree(null_element) here.
 
3180
  */  
 
3181
  if (type != Item_func::EQUAL_FUNC && field->is_real_null())
 
3182
  {
 
3183
    tree= &optimizer::null_element;
 
3184
    goto end;
 
3185
  }
 
3186
 
 
3187
  str= (unsigned char*) alloc->alloc_root(key_part->store_length+1);
3153
3188
  if (!str)
3154
3189
    goto end;
3155
3190
  if (maybe_null)
3825
3860
*/
3826
3861
 
3827
3862
static
3828
 
ha_rows check_quick_select(optimizer::Parameter *param,
 
3863
ha_rows check_quick_select(Session *session,
 
3864
                           optimizer::Parameter *param,
3829
3865
                           uint32_t idx,
3830
3866
                           bool index_only,
3831
3867
                           optimizer::SEL_ARG *tree,
3866
3902
  bool pk_is_clustered= cursor->primary_key_is_clustered();
3867
3903
  if (index_only &&
3868
3904
      (param->table->index_flags(keynr) & HA_KEYREAD_ONLY) &&
3869
 
      !(pk_is_clustered && keynr == param->table->s->primary_key))
 
3905
      !(pk_is_clustered && keynr == param->table->getShare()->getPrimaryKey()))
3870
3906
     *mrr_flags |= HA_MRR_INDEX_ONLY;
3871
3907
 
3872
 
  if (current_session->lex->sql_command != SQLCOM_SELECT)
 
3908
  if (session->lex->sql_command != SQLCOM_SELECT)
3873
3909
    *mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
3874
3910
 
3875
3911
  *bufsize= param->session->variables.read_rnd_buff_size;
3901
3937
  else
3902
3938
  {
3903
3939
    /* Clustered PK scan is always a ROR scan (TODO: same as above) */
3904
 
    if (param->table->s->primary_key == keynr && pk_is_clustered)
 
3940
    if (param->table->getShare()->getPrimaryKey() == keynr && pk_is_clustered)
3905
3941
      param->is_ror_scan= true;
3906
3942
  }
3907
3943
 
3948
3984
 
3949
3985
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts)
3950
3986
{
3951
 
  KEY *table_key= param->table->key_info + keynr;
3952
 
  KEY_PART_INFO *key_part= table_key->key_part + nparts;
3953
 
  KEY_PART_INFO *key_part_end= (table_key->key_part +
 
3987
  KeyInfo *table_key= param->table->key_info + keynr;
 
3988
  KeyPartInfo *key_part= table_key->key_part + nparts;
 
3989
  KeyPartInfo *key_part_end= (table_key->key_part +
3954
3990
                                table_key->key_parts);
3955
3991
  uint32_t pk_number;
3956
3992
 
3957
 
  for (KEY_PART_INFO *kp= table_key->key_part; kp < key_part; kp++)
 
3993
  for (KeyPartInfo *kp= table_key->key_part; kp < key_part; kp++)
3958
3994
  {
3959
3995
    uint16_t fieldnr= param->table->key_info[keynr].
3960
3996
                    key_part[kp - table_key->key_part].fieldnr - 1;
3961
 
    if (param->table->field[fieldnr]->key_length() != kp->length)
 
3997
    if (param->table->getField(fieldnr)->key_length() != kp->length)
3962
3998
      return false;
3963
3999
  }
3964
4000
 
3966
4002
    return true;
3967
4003
 
3968
4004
  key_part= table_key->key_part + nparts;
3969
 
  pk_number= param->table->s->primary_key;
 
4005
  pk_number= param->table->getShare()->getPrimaryKey();
3970
4006
  if (!param->table->cursor->primary_key_is_clustered() || pk_number == MAX_KEY)
3971
4007
    return false;
3972
4008
 
3973
 
  KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part;
3974
 
  KEY_PART_INFO *pk_part_end= pk_part +
 
4009
  KeyPartInfo *pk_part= param->table->key_info[pk_number].key_part;
 
4010
  KeyPartInfo *pk_part_end= pk_part +
3975
4011
                              param->table->key_info[pk_number].key_parts;
3976
4012
  for (;(key_part!=key_part_end) && (pk_part != pk_part_end);
3977
4013
       ++key_part, ++pk_part)
4021
4057
    {
4022
4058
      quick->mrr_flags= mrr_flags;
4023
4059
      quick->mrr_buf_size= mrr_buf_size;
4024
 
      quick->key_parts=(KEY_PART*)
4025
 
        memdup_root(parent_alloc? parent_alloc : &quick->alloc,
4026
 
                    (char*) param->key[idx],
4027
 
                    sizeof(KEY_PART)*
4028
 
                    param->table->key_info[param->real_keynr[idx]].key_parts);
 
4060
      if (parent_alloc)
 
4061
      {
 
4062
        quick->key_parts=(KEY_PART*)
 
4063
          parent_alloc->memdup_root( (char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
 
4064
      }
 
4065
      else
 
4066
      {
 
4067
        quick->key_parts=(KEY_PART*)
 
4068
          quick->alloc.memdup_root((char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
 
4069
      }
4029
4070
    }
4030
4071
  }
4031
4072
  return quick;
4139
4180
    if (length == (uint32_t) (tmp_max_key - param->max_key) &&
4140
4181
              ! memcmp(param->min_key,param->max_key,length))
4141
4182
    {
4142
 
      KEY *table_key= quick->head->key_info+quick->index;
 
4183
      KeyInfo *table_key= quick->head->key_info+quick->index;
4143
4184
      flag= EQ_RANGE;
4144
4185
      if ((table_key->flags & (HA_NOSAME)) == HA_NOSAME &&
4145
4186
                key->part == table_key->key_parts-1)
4253
4294
{
4254
4295
  memory::Root *old_root, *alloc;
4255
4296
  optimizer::QuickRangeSelect *quick= NULL;
4256
 
  KEY *key_info = &table->key_info[ref->key];
 
4297
  KeyInfo *key_info = &table->key_info[ref->key];
4257
4298
  KEY_PART *key_part;
4258
4299
  optimizer::QuickRange *range= NULL;
4259
4300
  uint32_t part;
4290
4331
 
4291
4332
 
4292
4333
  if (!(quick->key_parts=key_part=(KEY_PART *)
4293
 
        alloc_root(&quick->alloc,sizeof(KEY_PART)*ref->key_parts)))
 
4334
        quick->alloc.alloc_root(sizeof(KEY_PART)*ref->key_parts)))
4294
4335
    goto err;
4295
4336
 
4296
4337
  for (part=0 ; part < ref->key_parts ;part++,key_part++)
4335
4376
 
4336
4377
  quick->mrr_buf_size= session->variables.read_rnd_buff_size;
4337
4378
  if (table->cursor->multi_range_read_info(quick->index, 1, (uint32_t)records,
4338
 
                                         &quick->mrr_buf_size,
4339
 
                                         &quick->mrr_flags, &cost))
 
4379
                                           &quick->mrr_buf_size,
 
4380
                                           &quick->mrr_flags, &cost))
4340
4381
    goto err;
4341
4382
 
4342
4383
  return quick;
4414
4455
}
4415
4456
 
4416
4457
 
4417
 
static inline uint32_t get_field_keypart(KEY *index, Field *field);
 
4458
static inline uint32_t get_field_keypart(KeyInfo *index, Field *field);
4418
4459
 
4419
4460
static inline optimizer::SEL_ARG * get_index_range_tree(uint32_t index,
4420
4461
                                                        optimizer::SEL_TREE *range_tree,
4421
4462
                                                        optimizer::Parameter *param,
4422
4463
                                                        uint32_t *param_idx);
4423
4464
 
4424
 
static bool get_constant_key_infix(KEY *index_info,
 
4465
static bool get_constant_key_infix(KeyInfo *index_info,
4425
4466
                                   optimizer::SEL_ARG *index_range_tree,
4426
 
                                   KEY_PART_INFO *first_non_group_part,
4427
 
                                   KEY_PART_INFO *min_max_arg_part,
4428
 
                                   KEY_PART_INFO *last_part,
 
4467
                                   KeyPartInfo *first_non_group_part,
 
4468
                                   KeyPartInfo *min_max_arg_part,
 
4469
                                   KeyPartInfo *last_part,
4429
4470
                                   Session *session,
4430
4471
                                   unsigned char *key_infix,
4431
4472
                                   uint32_t *key_infix_len,
4432
 
                                   KEY_PART_INFO **first_non_infix_part);
 
4473
                                   KeyPartInfo **first_non_infix_part);
4433
4474
 
4434
4475
static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item);
4435
4476
 
4436
4477
static void
4437
4478
cost_group_min_max(Table* table,
4438
 
                   KEY *index_info,
 
4479
                   KeyInfo *index_info,
4439
4480
                   uint32_t used_key_parts,
4440
4481
                   uint32_t group_key_parts,
4441
4482
                   optimizer::SEL_TREE *range_tree,
4578
4619
get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree)
4579
4620
{
4580
4621
  Session *session= param->session;
4581
 
  JOIN *join= session->lex->current_select->join;
 
4622
  Join *join= session->lex->current_select->join;
4582
4623
  Table *table= param->table;
4583
4624
  bool have_min= false;              /* true if there is a MIN function. */
4584
4625
  bool have_max= false;              /* true if there is a MAX function. */
4585
4626
  Item_field *min_max_arg_item= NULL; // The argument of all MIN/MAX functions
4586
 
  KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */
 
4627
  KeyPartInfo *min_max_arg_part= NULL; /* The corresponding keypart. */
4587
4628
  uint32_t group_prefix_len= 0; /* Length (in bytes) of the key prefix. */
4588
 
  KEY *index_info= NULL;    /* The index chosen for data access. */
 
4629
  KeyInfo *index_info= NULL;    /* The index chosen for data access. */
4589
4630
  uint32_t index= 0;            /* The id of the chosen index. */
4590
4631
  uint32_t group_key_parts= 0;  // Number of index key parts in the group prefix.
4591
4632
  uint32_t used_key_parts= 0;   /* Number of index key parts used for access. */
4606
4647
       (! join->select_distinct)) ||
4607
4648
      (join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */
4608
4649
    return NULL;
4609
 
  if (table->s->keys == 0)        /* There are no indexes to use. */
 
4650
  if (table->getShare()->sizeKeys() == 0)        /* There are no indexes to use. */
4610
4651
    return NULL;
4611
4652
 
4612
4653
  /* Analyze the query in more detail. */
4665
4706
    (GA1,GA2) are all true. If there is more than one such index, select the
4666
4707
    first one. Here we set the variables: group_prefix_len and index_info.
4667
4708
  */
4668
 
  KEY *cur_index_info= table->key_info;
4669
 
  KEY *cur_index_info_end= cur_index_info + table->s->keys;
4670
 
  KEY_PART_INFO *cur_part= NULL;
4671
 
  KEY_PART_INFO *end_part= NULL; /* Last part for loops. */
 
4709
  KeyInfo *cur_index_info= table->key_info;
 
4710
  KeyInfo *cur_index_info_end= cur_index_info + table->getShare()->sizeKeys();
 
4711
  KeyPartInfo *cur_part= NULL;
 
4712
  KeyPartInfo *end_part= NULL; /* Last part for loops. */
4672
4713
  /* Last index part. */
4673
 
  KEY_PART_INFO *last_part= NULL;
4674
 
  KEY_PART_INFO *first_non_group_part= NULL;
4675
 
  KEY_PART_INFO *first_non_infix_part= NULL;
 
4714
  KeyPartInfo *last_part= NULL;
 
4715
  KeyPartInfo *first_non_group_part= NULL;
 
4716
  KeyPartInfo *first_non_infix_part= NULL;
4676
4717
  uint32_t key_infix_parts= 0;
4677
4718
  uint32_t cur_group_key_parts= 0;
4678
4719
  uint32_t cur_group_prefix_len= 0;
4691
4732
  uint32_t cur_key_infix_len= 0;
4692
4733
  unsigned char cur_key_infix[MAX_KEY_LENGTH];
4693
4734
  uint32_t cur_used_key_parts= 0;
4694
 
  uint32_t pk= param->table->s->primary_key;
 
4735
  uint32_t pk= param->table->getShare()->getPrimaryKey();
4695
4736
 
4696
4737
  for (uint32_t cur_index= 0;
4697
4738
       cur_index_info != cur_index_info_end;
4714
4755
        (table->cursor->getEngine()->check_flag(HTON_BIT_PRIMARY_KEY_IN_READ_INDEX)))
4715
4756
    {
4716
4757
      /* For each table field */
4717
 
      for (uint32_t i= 0; i < table->s->fields; i++)
 
4758
      for (uint32_t i= 0; i < table->getShare()->sizeFields(); i++)
4718
4759
      {
4719
 
        Field *cur_field= table->field[i];
 
4760
        Field *cur_field= table->getField(i);
4720
4761
        /*
4721
4762
          If the field is used in the current query ensure that it's
4722
4763
          part of 'cur_index'
4884
4925
          Store the first and last keyparts that need to be analyzed
4885
4926
          into one array that can be passed as parameter.
4886
4927
        */
4887
 
        KEY_PART_INFO *key_part_range[2];
 
4928
        KeyPartInfo *key_part_range[2];
4888
4929
        key_part_range[0]= first_non_group_part;
4889
4930
        key_part_range[1]= last_part;
4890
4931
 
4928
4969
      optimizer::CostVector dummy_cost;
4929
4970
      uint32_t mrr_flags= HA_MRR_USE_DEFAULT_IMPL;
4930
4971
      uint32_t mrr_bufsize= 0;
4931
 
      cur_quick_prefix_records= check_quick_select(param,
 
4972
      cur_quick_prefix_records= check_quick_select(session,
 
4973
                                                   param,
4932
4974
                                                   cur_param_idx,
4933
4975
                                                   false /*don't care*/,
4934
4976
                                                   cur_index_tree,
5176
5218
    false o/w
5177
5219
*/
5178
5220
static bool
5179
 
get_constant_key_infix(KEY *,
 
5221
get_constant_key_infix(KeyInfo *,
5180
5222
                       optimizer::SEL_ARG *index_range_tree,
5181
 
                       KEY_PART_INFO *first_non_group_part,
5182
 
                       KEY_PART_INFO *min_max_arg_part,
5183
 
                       KEY_PART_INFO *last_part,
 
5223
                       KeyPartInfo *first_non_group_part,
 
5224
                       KeyPartInfo *min_max_arg_part,
 
5225
                       KeyPartInfo *last_part,
5184
5226
                       Session *,
5185
5227
                       unsigned char *key_infix,
5186
5228
                       uint32_t *key_infix_len,
5187
 
                       KEY_PART_INFO **first_non_infix_part)
 
5229
                       KeyPartInfo **first_non_infix_part)
5188
5230
{
5189
5231
  optimizer::SEL_ARG *cur_range= NULL;
5190
 
  KEY_PART_INFO *cur_part= NULL;
 
5232
  KeyPartInfo *cur_part= NULL;
5191
5233
  /* End part for the first loop below. */
5192
 
  KEY_PART_INFO *end_part= min_max_arg_part ? min_max_arg_part : last_part;
 
5234
  KeyPartInfo *end_part= min_max_arg_part ? min_max_arg_part : last_part;
5193
5235
 
5194
5236
  *key_infix_len= 0;
5195
5237
  unsigned char *key_ptr= key_infix;
5255
5297
    field  field that possibly references some key part in index
5256
5298
 
5257
5299
  NOTES
5258
 
    The return value can be used to get a KEY_PART_INFO pointer by
 
5300
    The return value can be used to get a KeyPartInfo pointer by
5259
5301
    part= index->key_part + get_field_keypart(...) - 1;
5260
5302
 
5261
5303
  RETURN
5263
5305
    0 if field does not reference any index field.
5264
5306
*/
5265
5307
static inline uint
5266
 
get_field_keypart(KEY *index, Field *field)
 
5308
get_field_keypart(KeyInfo *index, Field *field)
5267
5309
{
5268
 
  KEY_PART_INFO *part= NULL;
5269
 
  KEY_PART_INFO *end= NULL;
 
5310
  KeyPartInfo *part= NULL;
 
5311
  KeyPartInfo *end= NULL;
5270
5312
 
5271
5313
  for (part= index->key_part, end= part + index->key_parts; part < end; part++)
5272
5314
  {
5376
5418
    None
5377
5419
*/
5378
5420
void cost_group_min_max(Table* table,
5379
 
                        KEY *index_info,
 
5421
                        KeyInfo *index_info,
5380
5422
                        uint32_t used_key_parts,
5381
5423
                        uint32_t group_key_parts,
5382
5424
                        optimizer::SEL_TREE *range_tree,