~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to drizzled/optimizer/range.cc

  • Committer: Olaf van der Spek
  • Date: 2011-07-07 13:41:07 UTC
  • mto: This revision was merged to the branch mainline in revision 2385.
  • Revision ID: olafvdspek@gmail.com-20110707134107-6mi7pauiatxtf4oe
Rename strmake to strdup (standard name)

Show diffs side-by-side

added added

removed removed

Lines of Context:
100
100
           subject and may omit some details.
101
101
*/
102
102
 
103
 
#include "config.h"
 
103
#include <config.h>
104
104
 
105
105
#include <math.h>
106
106
#include <float.h>
111
111
 
112
112
#include <boost/dynamic_bitset.hpp>
113
113
 
114
 
#include "drizzled/sql_base.h"
115
 
#include "drizzled/sql_select.h"
116
 
#include "drizzled/error.h"
117
 
#include "drizzled/optimizer/cost_vector.h"
118
 
#include "drizzled/item/cmpfunc.h"
119
 
#include "drizzled/field/num.h"
120
 
#include "drizzled/check_stack_overrun.h"
121
 
#include "drizzled/optimizer/sum.h"
122
 
#include "drizzled/optimizer/range.h"
123
 
#include "drizzled/optimizer/quick_range.h"
124
 
#include "drizzled/optimizer/quick_range_select.h"
125
 
#include "drizzled/optimizer/quick_group_min_max_select.h"
126
 
#include "drizzled/optimizer/quick_index_merge_select.h"
127
 
#include "drizzled/optimizer/quick_ror_intersect_select.h"
128
 
#include "drizzled/optimizer/quick_ror_union_select.h"
129
 
#include "drizzled/optimizer/table_read_plan.h"
130
 
#include "drizzled/optimizer/sel_arg.h"
131
 
#include "drizzled/optimizer/sel_imerge.h"
132
 
#include "drizzled/optimizer/sel_tree.h"
133
 
#include "drizzled/optimizer/range_param.h"
134
 
#include "drizzled/records.h"
135
 
#include "drizzled/internal/my_sys.h"
136
 
#include "drizzled/internal/iocache.h"
137
 
 
138
 
#include "drizzled/temporal.h" /* Needed in get_mm_leaf() for timestamp -> datetime comparisons */
 
114
#include <drizzled/check_stack_overrun.h>
 
115
#include <drizzled/error.h>
 
116
#include <drizzled/field/num.h>
 
117
#include <drizzled/internal/iocache.h>
 
118
#include <drizzled/internal/my_sys.h>
 
119
#include <drizzled/item/cmpfunc.h>
 
120
#include <drizzled/optimizer/cost_vector.h>
 
121
#include <drizzled/optimizer/quick_group_min_max_select.h>
 
122
#include <drizzled/optimizer/quick_index_merge_select.h>
 
123
#include <drizzled/optimizer/quick_range.h>
 
124
#include <drizzled/optimizer/quick_range_select.h>
 
125
#include <drizzled/optimizer/quick_ror_intersect_select.h>
 
126
#include <drizzled/optimizer/quick_ror_union_select.h>
 
127
#include <drizzled/optimizer/range.h>
 
128
#include <drizzled/optimizer/range_param.h>
 
129
#include <drizzled/optimizer/sel_arg.h>
 
130
#include <drizzled/optimizer/sel_imerge.h>
 
131
#include <drizzled/optimizer/sel_tree.h>
 
132
#include <drizzled/optimizer/sum.h>
 
133
#include <drizzled/optimizer/table_read_plan.h>
 
134
#include <drizzled/plugin/storage_engine.h>
 
135
#include <drizzled/records.h>
 
136
#include <drizzled/sql_base.h>
 
137
#include <drizzled/sql_select.h>
 
138
#include <drizzled/table_reference.h>
 
139
#include <drizzled/session.h>
 
140
#include <drizzled/key.h>
 
141
#include <drizzled/unique.h>
 
142
#include <drizzled/temporal.h> /* Needed in get_mm_leaf() for timestamp -> datetime comparisons */
 
143
#include <drizzled/sql_lex.h>
 
144
#include <drizzled/system_variables.h>
139
145
 
140
146
using namespace std;
141
 
namespace drizzled
142
 
{
 
147
 
 
148
namespace drizzled {
143
149
 
144
150
#define HA_END_SPACE_KEY 0
145
151
 
213
219
  else
214
220
  {
215
221
    double n_blocks=
216
 
      ceil(uint64_t2double(table->cursor->stats.data_file_length) / IO_SIZE);
 
222
      ceil(static_cast<double>(table->cursor->stats.data_file_length) / IO_SIZE);
217
223
    double busy_blocks=
218
 
      n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(nrows)));
 
224
      n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, static_cast<double>(nrows)));
219
225
    if (busy_blocks < 1.0)
220
226
      busy_blocks= 1.0;
221
227
 
285
291
static
286
292
optimizer::GroupMinMaxReadPlan *get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree);
287
293
 
288
 
static optimizer::SEL_TREE *tree_and(optimizer::RangeParameter *param, 
289
 
                                     optimizer::SEL_TREE *tree1, 
 
294
static optimizer::SEL_TREE *tree_and(optimizer::RangeParameter *param,
 
295
                                     optimizer::SEL_TREE *tree1,
290
296
                                     optimizer::SEL_TREE *tree2);
291
297
 
292
298
static optimizer::SEL_ARG *sel_add(optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2);
304
310
                             const unsigned char *key,
305
311
                             uint32_t length);
306
312
 
307
 
bool sel_trees_can_be_ored(optimizer::SEL_TREE *tree1, 
308
 
                           optimizer::SEL_TREE *tree2, 
 
313
bool sel_trees_can_be_ored(optimizer::SEL_TREE *tree1,
 
314
                           optimizer::SEL_TREE *tree2,
309
315
                           optimizer::RangeParameter *param);
310
316
 
311
317
 
340
346
                                             bool allow_null_cond,
341
347
                                             int *error)
342
348
{
343
 
  optimizer::SqlSelect *select= NULL;
344
 
 
345
349
  *error= 0;
346
350
 
347
351
  if (! conds && ! allow_null_cond)
348
352
  {
349
353
    return 0;
350
354
  }
351
 
  if (! (select= new optimizer::SqlSelect))
352
 
  {
353
 
    *error= 1;                  // out of memory
354
 
    return 0;
355
 
  }
 
355
  optimizer::SqlSelect* select= new optimizer::SqlSelect;
356
356
  select->read_tables=read_tables;
357
357
  select->const_tables=const_tables;
358
358
  select->head=head;
360
360
 
361
361
  if (head->sort.io_cache)
362
362
  {
363
 
    memcpy(select->file, head->sort.io_cache, sizeof(internal::IO_CACHE));
 
363
    memcpy(select->file, head->sort.io_cache, sizeof(internal::io_cache_st));
364
364
    select->records=(ha_rows) (select->file->end_of_file/
365
365
                               head->cursor->ref_length);
366
366
    delete head->sort.io_cache;
370
370
}
371
371
 
372
372
 
373
 
optimizer::SqlSelect::SqlSelect() 
 
373
optimizer::SqlSelect::SqlSelect()
374
374
  :
375
375
    quick(NULL),
376
376
    cond(NULL),
377
 
    file(static_cast<internal::IO_CACHE *>(memory::sql_calloc(sizeof(internal::IO_CACHE)))),
 
377
    file(static_cast<internal::io_cache_st *>(memory::sql_calloc(sizeof(internal::io_cache_st)))),
378
378
    free_cond(false)
379
379
{
380
380
  quick_keys.reset();
385
385
 
386
386
void optimizer::SqlSelect::cleanup()
387
387
{
388
 
  if (quick)
389
 
  {
390
 
    delete quick;
391
 
    quick= NULL;
392
 
  }
 
388
 
 
389
  delete quick;
 
390
  quick= NULL;
393
391
 
394
392
  if (free_cond)
395
393
  {
407
405
}
408
406
 
409
407
 
410
 
bool optimizer::SqlSelect::check_quick(Session *session, 
 
408
bool optimizer::SqlSelect::check_quick(Session *session,
411
409
                                       bool force_quick_range,
412
410
                                       ha_rows limit)
413
411
{
414
412
  key_map tmp;
415
413
  tmp.set();
416
 
  return (test_quick_select(session, 
417
 
                           tmp, 
418
 
                           0, 
 
414
  return (test_quick_select(session,
 
415
                           tmp,
 
416
                           0,
419
417
                           limit,
420
 
                           force_quick_range, 
 
418
                           force_quick_range,
421
419
                           false) < 0);
422
420
}
423
421
 
640
638
{
641
639
  uint32_t idx;
642
640
  double scan_time;
643
 
  if (quick)
644
 
  {
645
 
    delete quick;
646
 
    quick= NULL;
647
 
  }
 
641
 
 
642
  delete quick;
 
643
  quick= NULL;
 
644
 
648
645
  needed_reg.reset();
649
646
  quick_keys.reset();
650
647
  if (keys_to_use.none())
689
686
    param.force_default_mrr= ordered_output;
690
687
 
691
688
    session->no_errors=1;                               // Don't warn about NULL
692
 
    memory::init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
693
 
    if (!(param.key_parts= (KEY_PART*) alloc.alloc_root( sizeof(KEY_PART) * head->getShare()->key_parts)) ||
694
 
        fill_used_fields_bitmap(&param))
 
689
    alloc.init(session->variables.range_alloc_block_size);
 
690
    param.key_parts= new (alloc) KEY_PART[head->getShare()->key_parts];
 
691
    if (fill_used_fields_bitmap(&param))
695
692
    {
696
693
      session->no_errors=0;
697
694
      alloc.free_root(MYF(0));                  // Return memory & allocator
737
734
    {
738
735
      int key_for_use= head->find_shortest_key(&head->covering_keys);
739
736
      double key_read_time=
740
 
        param.table->cursor->index_only_read_time(key_for_use,
741
 
                                                rows2double(records)) +
 
737
        param.table->cursor->index_only_read_time(key_for_use, records) +
742
738
        (double) records / TIME_FOR_COMPARE;
743
739
      if (key_read_time < read_time)
744
740
        read_time= key_read_time;
808
804
          objects are not allowed so don't use ROR-intersection for
809
805
          table deletes.
810
806
        */
811
 
        if ((session->lex->sql_command != SQLCOM_DELETE))
 
807
        if ((session->lex().sql_command != SQLCOM_DELETE))
812
808
        {
813
809
          /*
814
810
            Get best non-covering ROR-intersection plan and prepare data for
836
832
        optimizer::SEL_IMERGE *imerge= NULL;
837
833
        optimizer::TableReadPlan *best_conj_trp= NULL;
838
834
        optimizer::TableReadPlan *new_conj_trp= NULL;
839
 
        List_iterator_fast<optimizer::SEL_IMERGE> it(tree->merges);
 
835
        List<optimizer::SEL_IMERGE>::iterator it(tree->merges.begin());
840
836
        while ((imerge= it++))
841
837
        {
842
838
          new_conj_trp= get_best_disjunct_quick(session, &param, imerge, best_read_time);
860
856
      records= best_trp->records;
861
857
      if (! (quick= best_trp->make_quick(&param, true)) || quick->init())
862
858
      {
863
 
        /* quick can already be free here */
864
 
        if (quick)
865
 
        {
866
 
          delete quick;
867
 
          quick= NULL;
868
 
        }
 
859
        delete quick;
 
860
        quick= NULL;
869
861
      }
870
862
    }
871
863
 
974
966
  ha_rows roru_total_records;
975
967
  double roru_intersect_part= 1.0;
976
968
 
977
 
  if (! (range_scans= (optimizer::RangeReadPlan**)param->mem_root->alloc_root(sizeof(optimizer::RangeReadPlan*)* n_child_scans)))
978
 
  {
979
 
    return NULL;
980
 
  }
 
969
  range_scans= new (*param->mem_root) optimizer::RangeReadPlan*[n_child_scans];
981
970
 
982
971
  /*
983
972
    Collect best 'range' scan for each of disjuncts, and, while doing so,
984
973
    analyze possibility of ROR scans. Also calculate some values needed by
985
974
    other parts of the code.
986
975
  */
987
 
  for (ptree= imerge->trees, cur_child= range_scans;
988
 
       ptree != imerge->trees_next;
989
 
       ptree++, cur_child++)
 
976
  for (ptree= imerge->trees, cur_child= range_scans; ptree != imerge->trees_next; ptree++, cur_child++)
990
977
  {
991
978
    if (!(*cur_child= get_key_scans_params(session, param, *ptree, true, false, read_time)))
992
979
    {
1041
1028
  /* Calculate cost(rowid_to_row_scan) */
1042
1029
  {
1043
1030
    optimizer::CostVector sweep_cost;
1044
 
    Join *join= param->session->lex->select_lex.join;
 
1031
    Join *join= param->session->lex().select_lex.join;
1045
1032
    bool is_interrupted= test(join && join->tables == 1);
1046
1033
    get_sweep_read_cost(param->table, non_cpk_scan_records, is_interrupted,
1047
1034
                        &sweep_cost);
1057
1044
                                    param->session->variables.sortbuff_size);
1058
1045
  if (param->imerge_cost_buff_size < unique_calc_buff_size)
1059
1046
  {
1060
 
    if (!(param->imerge_cost_buff= (uint*)param->mem_root->alloc_root(unique_calc_buff_size)))
1061
 
    {
1062
 
      return NULL;
1063
 
    }
1064
 
 
 
1047
    param->imerge_cost_buff= (uint*)param->mem_root->alloc(unique_calc_buff_size);
1065
1048
    param->imerge_cost_buff_size= unique_calc_buff_size;
1066
1049
  }
1067
1050
 
1071
1054
                         param->session->variables.sortbuff_size);
1072
1055
  if (imerge_cost < read_time)
1073
1056
  {
1074
 
    if ((imerge_trp= new (param->mem_root) optimizer::IndexMergeReadPlan))
1075
 
    {
1076
 
      imerge_trp->read_cost= imerge_cost;
1077
 
      imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
1078
 
      imerge_trp->records= min(imerge_trp->records,
1079
 
                               param->table->cursor->stats.records);
1080
 
      imerge_trp->range_scans= range_scans;
1081
 
      imerge_trp->range_scans_end= range_scans + n_child_scans;
1082
 
      read_time= imerge_cost;
1083
 
    }
 
1057
    imerge_trp= new (*param->mem_root) optimizer::IndexMergeReadPlan;
 
1058
    imerge_trp->read_cost= imerge_cost;
 
1059
    imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
 
1060
    imerge_trp->records= min(imerge_trp->records, param->table->cursor->stats.records);
 
1061
    imerge_trp->range_scans= range_scans;
 
1062
    imerge_trp->range_scans_end= range_scans + n_child_scans;
 
1063
    read_time= imerge_cost;
1084
1064
  }
1085
1065
 
1086
1066
build_ror_index_merge:
1087
 
  if (!all_scans_ror_able || param->session->lex->sql_command == SQLCOM_DELETE)
 
1067
  if (!all_scans_ror_able || param->session->lex().sql_command == SQLCOM_DELETE)
1088
1068
    return(imerge_trp);
1089
1069
 
1090
1070
  /* Ok, it is possible to build a ROR-union, try it. */
1091
1071
  bool dummy;
1092
 
  if (! (roru_read_plans=
1093
 
          (optimizer::TableReadPlan **) param->mem_root->alloc_root(sizeof(optimizer::TableReadPlan*) * n_child_scans)))
1094
 
  {
1095
 
    return imerge_trp;
1096
 
  }
 
1072
  roru_read_plans= new (*param->mem_root) optimizer::TableReadPlan*[n_child_scans];
1097
1073
skip_to_ror_scan:
1098
1074
  roru_index_costs= 0.0;
1099
1075
  roru_total_records= 0;
1117
1093
      cost= param->table->cursor->
1118
1094
              read_time(param->real_keynr[(*cur_child)->key_idx], 1,
1119
1095
                        (*cur_child)->records) +
1120
 
              rows2double((*cur_child)->records) / TIME_FOR_COMPARE;
 
1096
              static_cast<double>((*cur_child)->records) / TIME_FOR_COMPARE;
1121
1097
    }
1122
1098
    else
1123
1099
      cost= read_time;
1159
1135
  double roru_total_cost;
1160
1136
  {
1161
1137
    optimizer::CostVector sweep_cost;
1162
 
    Join *join= param->session->lex->select_lex.join;
 
1138
    Join *join= param->session->lex().select_lex.join;
1163
1139
    bool is_interrupted= test(join && join->tables == 1);
1164
1140
    get_sweep_read_cost(param->table, roru_total_records, is_interrupted,
1165
1141
                        &sweep_cost);
1166
1142
    roru_total_cost= roru_index_costs +
1167
 
                     rows2double(roru_total_records)*log((double)n_child_scans) /
 
1143
                     static_cast<double>(roru_total_records)*log((double)n_child_scans) /
1168
1144
                     (TIME_FOR_COMPARE_ROWID * M_LN2) +
1169
1145
                     sweep_cost.total_cost();
1170
1146
  }
1172
1148
  optimizer::RorUnionReadPlan *roru= NULL;
1173
1149
  if (roru_total_cost < read_time)
1174
1150
  {
1175
 
    if ((roru= new (param->mem_root) optimizer::RorUnionReadPlan))
 
1151
    if ((roru= new (*param->mem_root) optimizer::RorUnionReadPlan))
1176
1152
    {
1177
1153
      roru->first_ror= roru_read_plans;
1178
1154
      roru->last_ror= roru_read_plans + n_child_scans;
1204
1180
static
1205
1181
optimizer::RorScanInfo *make_ror_scan(const optimizer::Parameter *param, int idx, optimizer::SEL_ARG *sel_arg)
1206
1182
{
1207
 
  optimizer::RorScanInfo *ror_scan= NULL;
1208
 
 
1209
1183
  uint32_t keynr;
1210
 
 
1211
 
  if (!(ror_scan= (optimizer::RorScanInfo*)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo))))
1212
 
    return NULL;
 
1184
  optimizer::RorScanInfo* ror_scan= new (*param->mem_root) optimizer::RorScanInfo;
1213
1185
 
1214
1186
  ror_scan->idx= idx;
1215
1187
  ror_scan->keynr= keynr= param->real_keynr[idx];
1230
1202
    if (param->needed_fields.test(key_part->fieldnr-1))
1231
1203
      tmp_bitset.set(key_part->fieldnr-1);
1232
1204
  }
1233
 
  double rows= rows2double(param->table->quick_rows[ror_scan->keynr]);
 
1205
  double rows= param->table->quick_rows[ror_scan->keynr];
1234
1206
  ror_scan->index_read_cost=
1235
1207
    param->table->cursor->index_only_read_time(ror_scan->keynr, rows);
1236
1208
  ror_scan->covered_fields= tmp_bitset.to_ulong();
1253
1225
 
1254
1226
static int cmp_ror_scan_info(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
1255
1227
{
1256
 
  double val1= rows2double((*a)->records) * (*a)->key_rec_length;
1257
 
  double val2= rows2double((*b)->records) * (*b)->key_rec_length;
 
1228
  double val1= static_cast<double>((*a)->records) * (*a)->key_rec_length;
 
1229
  double val2= static_cast<double>((*b)->records) * (*b)->key_rec_length;
1258
1230
  return (val1 < val2)? -1: (val1 == val2)? 0 : 1;
1259
1231
}
1260
1232
 
1490
1462
      if (cur_covered)
1491
1463
      {
1492
1464
        /* uncovered -> covered */
1493
 
        double tmp= rows2double(records)/rows2double(prev_records);
1494
 
        selectivity_mult *= tmp;
 
1465
        selectivity_mult *= static_cast<double>(records) / prev_records;
1495
1466
        prev_records= HA_POS_ERROR;
1496
1467
      }
1497
1468
      else
1504
1475
  }
1505
1476
  if (!prev_covered)
1506
1477
  {
1507
 
    double tmp= rows2double(info->param->table->quick_rows[scan->keynr]) /
1508
 
                rows2double(prev_records);
1509
 
    selectivity_mult *= tmp;
 
1478
    selectivity_mult *= static_cast<double>(info->param->table->quick_rows[scan->keynr]) / prev_records;
1510
1479
  }
1511
 
  return(selectivity_mult);
 
1480
  return selectivity_mult;
1512
1481
}
1513
1482
 
1514
1483
 
1569
1538
      each record of every scan. Assuming 1/TIME_FOR_COMPARE_ROWID
1570
1539
      per check this gives us:
1571
1540
    */
1572
 
    info->index_scan_costs += rows2double(info->index_records) /
 
1541
    info->index_scan_costs += static_cast<double>(info->index_records) /
1573
1542
                              TIME_FOR_COMPARE_ROWID;
1574
1543
  }
1575
1544
  else
1588
1557
  if (! info->is_covering)
1589
1558
  {
1590
1559
    optimizer::CostVector sweep_cost;
1591
 
    Join *join= info->param->session->lex->select_lex.join;
 
1560
    Join *join= info->param->session->lex().select_lex.join;
1592
1561
    bool is_interrupted= test(join && join->tables == 1);
1593
1562
    get_sweep_read_cost(info->param->table, double2rows(info->out_rows),
1594
1563
                        is_interrupted, &sweep_cost);
1702
1671
    cost total_cost.
1703
1672
  */
1704
1673
  /* Add priority queue use cost. */
1705
 
  total_cost += rows2double(records)*
 
1674
  total_cost += static_cast<double>(records) *
1706
1675
                log((double)(ror_scan_mark - tree->ror_scans)) /
1707
1676
                (TIME_FOR_COMPARE_ROWID * M_LN2);
1708
1677
 
1709
1678
  if (total_cost > read_time)
1710
1679
    return NULL;
1711
1680
 
1712
 
  optimizer::RorIntersectReadPlan *trp= NULL;
1713
 
  if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1714
 
  {
1715
 
    return trp;
1716
 
  }
 
1681
  optimizer::RorIntersectReadPlan* trp= new (*param->mem_root) optimizer::RorIntersectReadPlan;
1717
1682
 
1718
1683
  uint32_t best_num= (ror_scan_mark - tree->ror_scans);
1719
 
  if (!(trp->first_scan= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* best_num)))
1720
 
    return NULL;
 
1684
  trp->first_scan= new (*param->mem_root) optimizer::RorScanInfo*[best_num];
1721
1685
  memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(optimizer::RorScanInfo*));
1722
1686
  trp->last_scan=  trp->first_scan + best_num;
1723
1687
  trp->is_covering= true;
1815
1779
  uint32_t cpk_no= 0;
1816
1780
  bool cpk_scan_used= false;
1817
1781
 
1818
 
  if (! (tree->ror_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* param->keys)))
1819
 
  {
1820
 
    return NULL;
1821
 
  }
1822
 
  cpk_no= ((param->table->cursor->primary_key_is_clustered()) ?
1823
 
           param->table->getShare()->getPrimaryKey() : MAX_KEY);
 
1782
  tree->ror_scans= new (*param->mem_root) optimizer::RorScanInfo*[param->keys];
 
1783
  cpk_no= ((param->table->cursor->primary_key_is_clustered()) ? param->table->getShare()->getPrimaryKey() : MAX_KEY);
1824
1784
 
1825
1785
  for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++)
1826
1786
  {
1848
1808
                     (qsort_cmp)cmp_ror_scan_info);
1849
1809
 
1850
1810
  optimizer::RorScanInfo **intersect_scans= NULL; /* ROR scans used in index intersection */
1851
 
  optimizer::RorScanInfo **intersect_scans_end= NULL;
1852
 
  if (! (intersect_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*) * tree->n_ror_scans)))
1853
 
    return NULL;
 
1811
  optimizer::RorScanInfo **intersect_scans_end= intersect_scans=  new (*param->mem_root) optimizer::RorScanInfo*[tree->n_ror_scans];
1854
1812
  intersect_scans_end= intersect_scans;
1855
1813
 
1856
1814
  /* Create and incrementally update ROR intersection. */
1909
1867
  optimizer::RorIntersectReadPlan *trp= NULL;
1910
1868
  if (min_cost < read_time && (cpk_scan_used || best_num > 1))
1911
1869
  {
1912
 
    if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1913
 
      return trp;
1914
 
 
1915
 
    if (! (trp->first_scan=
1916
 
           (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)*best_num)))
1917
 
      return NULL;
 
1870
    trp= new (*param->mem_root) optimizer::RorIntersectReadPlan;
 
1871
    trp->first_scan= new (*param->mem_root) optimizer::RorScanInfo*[best_num];
1918
1872
    memcpy(trp->first_scan, intersect_scans, best_num*sizeof(optimizer::RorScanInfo*));
1919
1873
    trp->last_scan=  trp->first_scan + best_num;
1920
1874
    trp->is_covering= intersect_best.is_covering;
2020
1974
  if (key_to_read)
2021
1975
  {
2022
1976
    idx= key_to_read - tree->keys;
2023
 
    if ((read_plan= new (param->mem_root) optimizer::RangeReadPlan(*key_to_read, idx,
2024
 
                                                                   best_mrr_flags)))
2025
 
    {
2026
 
      read_plan->records= best_records;
2027
 
      read_plan->is_ror= tree->ror_scans_map.test(idx);
2028
 
      read_plan->read_cost= read_time;
2029
 
      read_plan->mrr_buf_size= best_buf_size;
2030
 
    }
 
1977
    read_plan= new (*param->mem_root) optimizer::RangeReadPlan(*key_to_read, idx, best_mrr_flags);
 
1978
    read_plan->records= best_records;
 
1979
    read_plan->is_ror= tree->ror_scans_map.test(idx);
 
1980
    read_plan->read_cost= read_time;
 
1981
    read_plan->mrr_buf_size= best_buf_size;
2031
1982
  }
2032
 
 
2033
 
  return(read_plan);
 
1983
  return read_plan;
2034
1984
}
2035
1985
 
2036
1986
 
2037
1987
optimizer::QuickSelectInterface *optimizer::IndexMergeReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *)
2038
1988
{
2039
 
  optimizer::QuickIndexMergeSelect *quick_imerge;
2040
 
  optimizer::QuickRangeSelect *quick= NULL;
2041
1989
  /* index_merge always retrieves full rows, ignore retrieve_full_rows */
2042
 
  if (! (quick_imerge= new optimizer::QuickIndexMergeSelect(param->session, param->table)))
2043
 
  {
2044
 
    return NULL;
2045
 
  }
2046
 
 
 
1990
  optimizer::QuickIndexMergeSelect* quick_imerge= new optimizer::QuickIndexMergeSelect(param->session, param->table);
2047
1991
  quick_imerge->records= records;
2048
1992
  quick_imerge->read_time= read_cost;
2049
 
  for (optimizer::RangeReadPlan **range_scan= range_scans; 
2050
 
       range_scan != range_scans_end;
2051
 
       range_scan++)
 
1993
  for (optimizer::RangeReadPlan **range_scan= range_scans; range_scan != range_scans_end; range_scan++)
2052
1994
  {
2053
 
    if (! (quick= (optimizer::QuickRangeSelect*)
2054
 
          ((*range_scan)->make_quick(param, false, &quick_imerge->alloc))) ||
2055
 
        quick_imerge->push_quick_back(quick))
 
1995
    optimizer::QuickRangeSelect* quick= (optimizer::QuickRangeSelect*)((*range_scan)->make_quick(param, false, &quick_imerge->alloc));
 
1996
    if (not quick)
2056
1997
    {
2057
1998
      delete quick;
2058
1999
      delete quick_imerge;
2059
2000
      return NULL;
2060
2001
    }
 
2002
    quick_imerge->push_quick_back(quick);
2061
2003
  }
2062
2004
  return quick_imerge;
2063
2005
}
2084
2026
                                                (*first_scan)->sel_arg,
2085
2027
                                                HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
2086
2028
                                                0,
2087
 
                                                alloc)) ||
2088
 
          quick_intersect->push_quick_back(quick))
 
2029
                                                alloc)))
2089
2030
      {
2090
2031
        delete quick_intersect;
2091
2032
        return NULL;
2092
2033
      }
 
2034
      quick_intersect->push_quick_back(quick);
2093
2035
    }
2094
2036
    if (cpk_scan)
2095
2037
    {
2115
2057
 
2116
2058
optimizer::QuickSelectInterface *optimizer::RorUnionReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *)
2117
2059
{
2118
 
  optimizer::QuickRorUnionSelect *quick_roru= NULL;
2119
 
  optimizer::TableReadPlan **scan= NULL;
2120
 
  optimizer::QuickSelectInterface *quick= NULL;
2121
2060
  /*
2122
2061
    It is impossible to construct a ROR-union that will not retrieve full
2123
2062
    rows, ignore retrieve_full_rows parameter.
2124
2063
  */
2125
 
  if ((quick_roru= new optimizer::QuickRorUnionSelect(param->session, param->table)))
 
2064
  optimizer::QuickRorUnionSelect* quick_roru= new optimizer::QuickRorUnionSelect(param->session, param->table);
 
2065
  for (optimizer::TableReadPlan** scan= first_ror; scan != last_ror; scan++)
2126
2066
  {
2127
 
    for (scan= first_ror; scan != last_ror; scan++)
2128
 
    {
2129
 
      if (! (quick= (*scan)->make_quick(param, false, &quick_roru->alloc)) ||
2130
 
          quick_roru->push_quick_back(quick))
2131
 
      {
2132
 
        return NULL;
2133
 
      }
2134
 
    }
2135
 
    quick_roru->records= records;
2136
 
    quick_roru->read_time= read_cost;
 
2067
    optimizer::QuickSelectInterface* quick= (*scan)->make_quick(param, false, &quick_roru->alloc);
 
2068
    if (not quick)
 
2069
      return NULL;
 
2070
    quick_roru->push_quick_back(quick);
2137
2071
  }
 
2072
  quick_roru->records= records;
 
2073
  quick_roru->read_time= read_cost;
2138
2074
  return quick_roru;
2139
2075
}
2140
2076
 
2195
2131
*/
2196
2132
static optimizer::SEL_TREE *get_func_mm_tree(optimizer::RangeParameter *param,
2197
2133
                                  Item_func *cond_func,
2198
 
                                  Field *field, 
 
2134
                                  Field *field,
2199
2135
                                  Item *value,
2200
 
                                  Item_result cmp_type, 
 
2136
                                  Item_result cmp_type,
2201
2137
                                  bool inv)
2202
2138
{
2203
2139
  optimizer::SEL_TREE *tree= NULL;
2204
2140
 
2205
 
  switch (cond_func->functype()) 
 
2141
  switch (cond_func->functype())
2206
2142
  {
2207
2143
 
2208
2144
  case Item_func::NE_FUNC:
2215
2151
    {
2216
2152
      if (inv)
2217
2153
      {
2218
 
        tree= get_ne_mm_tree(param, 
2219
 
                             cond_func, 
2220
 
                             field, 
 
2154
        tree= get_ne_mm_tree(param,
 
2155
                             cond_func,
 
2156
                             field,
2221
2157
                             cond_func->arguments()[1],
2222
 
                             cond_func->arguments()[2], 
 
2158
                             cond_func->arguments()[2],
2223
2159
                             cmp_type);
2224
2160
      }
2225
2161
      else
2226
2162
      {
2227
 
        tree= get_mm_parts(param, 
2228
 
                           cond_func, 
2229
 
                           field, 
 
2163
        tree= get_mm_parts(param,
 
2164
                           cond_func,
 
2165
                           field,
2230
2166
                           Item_func::GE_FUNC,
2231
2167
                                       cond_func->arguments()[1],
2232
2168
                           cmp_type);
2233
2169
        if (tree)
2234
2170
        {
2235
 
          tree= tree_and(param, 
2236
 
                         tree, 
 
2171
          tree= tree_and(param,
 
2172
                         tree,
2237
2173
                         get_mm_parts(param, cond_func, field,
2238
2174
                                                       Item_func::LE_FUNC,
2239
2175
                                                       cond_func->arguments()[2],
2242
2178
      }
2243
2179
    }
2244
2180
    else
2245
 
      tree= get_mm_parts(param, 
2246
 
                         cond_func, 
 
2181
      tree= get_mm_parts(param,
 
2182
                         cond_func,
2247
2183
                         field,
2248
2184
                         (inv ?
2249
2185
                          (value == (Item*)1 ? Item_func::GT_FUNC :
2250
2186
                                               Item_func::LT_FUNC):
2251
2187
                          (value == (Item*)1 ? Item_func::LE_FUNC :
2252
2188
                                               Item_func::GE_FUNC)),
2253
 
                         cond_func->arguments()[0], 
 
2189
                         cond_func->arguments()[0],
2254
2190
                         cmp_type);
2255
2191
    break;
2256
2192
  }
2319
2255
        do
2320
2256
        {
2321
2257
          func->array->value_to_item(i, value_item);
2322
 
          tree= get_mm_parts(param, 
2323
 
                             cond_func, 
 
2258
          tree= get_mm_parts(param,
 
2259
                             cond_func,
2324
2260
                             field, Item_func::LT_FUNC,
2325
 
                             value_item, 
 
2261
                             value_item,
2326
2262
                             cmp_type);
2327
2263
          if (! tree)
2328
2264
            break;
2533
2469
  Item_equal *item_equal= field_item->item_equal;
2534
2470
  if (item_equal)
2535
2471
  {
2536
 
    Item_equal_iterator it(*item_equal);
 
2472
    Item_equal_iterator it(item_equal->begin());
2537
2473
    Item_field *item;
2538
2474
    while ((item= it++))
2539
2475
    {
2564
2500
 
2565
2501
  if (cond->type() == Item::COND_ITEM)
2566
2502
  {
2567
 
    List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
 
2503
    List<Item>::iterator li(((Item_cond*) cond)->argument_list()->begin());
2568
2504
 
2569
2505
    if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
2570
2506
    {
2571
2507
      tree=0;
2572
 
      Item *item;
2573
 
      while ((item=li++))
 
2508
      while (Item* item=li++)
2574
2509
      {
2575
2510
        optimizer::SEL_TREE *new_tree= get_mm_tree(param,item);
2576
2511
        if (param->session->is_fatal_error ||
2586
2521
      tree= get_mm_tree(param,li++);
2587
2522
      if (tree)
2588
2523
      {
2589
 
        Item *item;
2590
 
        while ((item=li++))
 
2524
        while (Item* item= li++)
2591
2525
        {
2592
2526
          optimizer::SEL_TREE *new_tree= get_mm_tree(param,item);
2593
2527
          if (!new_tree)
2688
2622
    Item_equal *item_equal= (Item_equal *) cond;
2689
2623
    if (!(value= item_equal->get_const()))
2690
2624
      return 0;
2691
 
    Item_equal_iterator it(*item_equal);
 
2625
    Item_equal_iterator it(item_equal->begin());
2692
2626
    ref_tables= value->used_tables();
2693
2627
    while ((field_item= it++))
2694
2628
    {
2749
2683
    if (field->eq(key_part->field))
2750
2684
    {
2751
2685
      optimizer::SEL_ARG *sel_arg=0;
2752
 
      if (!tree && !(tree=new optimizer::SEL_TREE()))
2753
 
        return 0;                               // OOM
 
2686
      if (!tree)
 
2687
        tree= new optimizer::SEL_TREE;
2754
2688
      if (!value || !(value->used_tables() & ~param->read_tables))
2755
2689
      {
2756
 
        sel_arg= get_mm_leaf(param,cond_func,
2757
 
            key_part->field,key_part,type,value);
 
2690
        sel_arg= get_mm_leaf(param,cond_func, key_part->field,key_part,type,value);
2758
2691
        if (! sel_arg)
2759
2692
          continue;
2760
2693
        if (sel_arg->type == optimizer::SEL_ARG::IMPOSSIBLE)
2766
2699
      else
2767
2700
      {
2768
2701
        // This key may be used later
2769
 
        if (! (sel_arg= new optimizer::SEL_ARG(optimizer::SEL_ARG::MAYBE_KEY)))
2770
 
          return 0;                     // OOM
 
2702
        sel_arg= new optimizer::SEL_ARG(optimizer::SEL_ARG::MAYBE_KEY);
2771
2703
      }
2772
2704
      sel_arg->part=(unsigned char) key_part->part;
2773
2705
      tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg);
2813
2745
        tree= &optimizer::null_element;
2814
2746
      goto end;
2815
2747
    }
2816
 
    if (!(tree= new (alloc) optimizer::SEL_ARG(field,is_null_string,is_null_string)))
2817
 
      goto end;                                 // out of memory
 
2748
    tree= new (*alloc) optimizer::SEL_ARG(field,is_null_string,is_null_string);
2818
2749
    if (type == Item_func::ISNOTNULL_FUNC)
2819
2750
    {
2820
2751
      tree->min_flag=NEAR_MIN;              /* IS NOT NULL ->  X > NULL */
2842
2773
    goto end;
2843
2774
 
2844
2775
  if (param->using_real_indexes)
2845
 
    optimize_range= field->optimize_range(param->real_keynr[key_part->key],
2846
 
                                          key_part->part);
 
2776
    optimize_range= field->optimize_range(param->real_keynr[key_part->key], key_part->part);
2847
2777
  else
2848
2778
    optimize_range= true;
2849
2779
 
2900
2830
        field_length= length;
2901
2831
    }
2902
2832
    length+=offset;
2903
 
    if (!(min_str= (unsigned char*) alloc->alloc_root(length*2)))
2904
 
    {
2905
 
      goto end;
2906
 
    }
2907
 
 
 
2833
    min_str= alloc->alloc(length*2);
2908
2834
    max_str=min_str+length;
2909
2835
    if (maybe_null)
2910
2836
      max_str[0]= min_str[0]=0;
2976
2902
   * it is, then we must convert to the highest Timestamp value (or lowest,
2977
2903
   * depending on whether the datetime is before or after the epoch.
2978
2904
   */
2979
 
  if (field->type() == DRIZZLE_TYPE_TIMESTAMP)
 
2905
  if (field->is_timestamp())
2980
2906
  {
2981
2907
    /*
2982
2908
     * The left-side of the range comparison is a timestamp field.  Therefore,
3115
3041
               (value->val_int() < 0))
3116
3042
        type = Item_func::GE_FUNC;
3117
3043
    }
 
3044
    else if (err == 1)
 
3045
    {
 
3046
      tree= new (alloc) optimizer::SEL_ARG(field, 0, 0);
 
3047
      tree->type= optimizer::SEL_ARG::IMPOSSIBLE;
 
3048
      goto end;
 
3049
    }
3118
3050
  }
3119
3051
  else if (err < 0)
3120
3052
  {
3127
3059
    Any predicate except "<=>"(null-safe equality operator) involving NULL as a
3128
3060
    constant is always FALSE
3129
3061
    Put IMPOSSIBLE Tree(null_element) here.
3130
 
  */  
 
3062
  */
3131
3063
  if (type != Item_func::EQUAL_FUNC && field->is_real_null())
3132
3064
  {
3133
3065
    tree= &optimizer::null_element;
3134
3066
    goto end;
3135
3067
  }
3136
3068
 
3137
 
  str= (unsigned char*) alloc->alloc_root(key_part->store_length+1);
3138
 
  if (!str)
3139
 
    goto end;
 
3069
  str= alloc->alloc(key_part->store_length+1);
3140
3070
  if (maybe_null)
3141
 
    *str= (unsigned char) field->is_real_null();        // Set to 1 if null
 
3071
    *str= field->is_real_null();        // Set to 1 if null
3142
3072
  field->get_key_image(str+maybe_null, key_part->length);
3143
 
  if (! (tree= new (alloc) optimizer::SEL_ARG(field, str, str)))
3144
 
    goto end; // out of memory
 
3073
  tree= new (alloc) optimizer::SEL_ARG(field, str, str);
3145
3074
 
3146
3075
  /*
3147
3076
    Check if we are comparing an UNSIGNED integer with a negative constant.
3257
3186
 
3258
3187
#define CLONE_KEY1_MAYBE 1
3259
3188
#define CLONE_KEY2_MAYBE 2
3260
 
#define swap_clone_flag(A) ((A & 1) << 1) | ((A & 2) >> 1)
3261
3189
 
 
3190
static uint32_t swap_clone_flag(uint32_t a)
 
3191
{
 
3192
  return ((a & 1) << 1) | ((a & 2) >> 1);
 
3193
}
3262
3194
 
3263
3195
static optimizer::SEL_TREE *
3264
3196
tree_and(optimizer::RangeParameter *param, optimizer::SEL_TREE *tree1, optimizer::SEL_TREE *tree2)
3310
3242
  /* dispose index_merge if there is a "range" option */
3311
3243
  if (result_keys.any())
3312
3244
  {
3313
 
    tree1->merges.empty();
 
3245
    tree1->merges.clear();
3314
3246
    return(tree1);
3315
3247
  }
3316
3248
 
3332
3264
  optimizer::SEL_ARG *next= NULL;
3333
3265
  ulong use_count=key1->use_count;
3334
3266
 
3335
 
  if (key1->elements != 1)
 
3267
  if (key1->size() != 1)
3336
3268
  {
3337
 
    key2->use_count+=key1->elements-1; //psergey: why we don't count that key1 has n-k-p?
3338
 
    key2->increment_use_count((int) key1->elements-1);
 
3269
    key2->use_count+=key1->size()-1; //psergey: why we don't count that key1 has n-k-p?
 
3270
    key2->increment_use_count((int) key1->size()-1);
3339
3271
  }
3340
3272
  if (key1->type == optimizer::SEL_ARG::MAYBE_KEY)
3341
3273
  {
3475
3407
    if (! next || next->type != optimizer::SEL_ARG::IMPOSSIBLE)
3476
3408
    {
3477
3409
      optimizer::SEL_ARG *new_arg= e1->clone_and(e2);
3478
 
      if (! new_arg)
3479
 
        return &optimizer::null_element;                        // End of memory
3480
3410
      new_arg->next_key_part=next;
3481
3411
      if (! new_tree)
3482
3412
      {
3855
3785
      !(pk_is_clustered && keynr == param->table->getShare()->getPrimaryKey()))
3856
3786
     *mrr_flags |= HA_MRR_INDEX_ONLY;
3857
3787
 
3858
 
  if (session->lex->sql_command != SQLCOM_SELECT)
 
3788
  if (session->lex().sql_command != SQLCOM_SELECT)
3859
3789
    *mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
3860
3790
 
3861
3791
  *bufsize= param->session->variables.read_rnd_buff_size;
4002
3932
    {
4003
3933
      quick->mrr_flags= mrr_flags;
4004
3934
      quick->mrr_buf_size= mrr_buf_size;
4005
 
      if (parent_alloc)
4006
 
      {
4007
 
        quick->key_parts=(KEY_PART*)
4008
 
          parent_alloc->memdup_root( (char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
4009
 
      }
4010
 
      else
4011
 
      {
4012
 
        quick->key_parts=(KEY_PART*)
4013
 
          quick->alloc.memdup_root((char*) param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
4014
 
      }
 
3935
      quick->key_parts= parent_alloc
 
3936
        ? (KEY_PART*)parent_alloc->memdup(param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts)
 
3937
        : (KEY_PART*)quick->alloc.memdup(param->key[idx], sizeof(KEY_PART)* param->table->key_info[param->real_keynr[idx]].key_parts);
4015
3938
    }
4016
3939
  }
4017
3940
  return quick;
4146
4069
  }
4147
4070
 
4148
4071
  /* Get range for retrieving rows in QUICK_SELECT::get_next */
4149
 
  if (! (range= new optimizer::QuickRange(param->min_key,
 
4072
  range= new optimizer::QuickRange(param->min_key,
4150
4073
                                                             (uint32_t) (tmp_min_key - param->min_key),
4151
4074
                                           min_part >=0 ? make_keypart_map(min_part) : 0,
4152
4075
                                                             param->max_key,
4153
4076
                                                             (uint32_t) (tmp_max_key - param->max_key),
4154
4077
                                           max_part >=0 ? make_keypart_map(max_part) : 0,
4155
 
                                                             flag)))
4156
 
  {
4157
 
    return 1;                   // out of memory
4158
 
  }
 
4078
                                                             flag);
4159
4079
 
4160
4080
  set_if_bigger(quick->max_used_key_length, (uint32_t)range->min_length);
4161
4081
  set_if_bigger(quick->max_used_key_length, (uint32_t)range->max_length);
4162
4082
  set_if_bigger(quick->used_key_parts, (uint32_t) key_tree->part+1);
4163
 
  if (insert_dynamic(&quick->ranges, (unsigned char*) &range))
4164
 
  {
4165
 
    return 1;
4166
 
  }
 
4083
  quick->ranges.push_back(&range);
4167
4084
 
4168
4085
 end:
4169
4086
  if (key_tree->right != &optimizer::null_element)
4262
4179
    goto err;
4263
4180
  quick->records= records;
4264
4181
 
4265
 
  if ((cp_buffer_from_ref(session, ref) && session->is_fatal_error) ||
4266
 
      !(range= new(alloc) optimizer::QuickRange()))
 
4182
  if (cp_buffer_from_ref(session, ref) && session->is_fatal_error)
4267
4183
    goto err;                                   // out of memory
 
4184
  range= new (*alloc) optimizer::QuickRange;
4268
4185
 
4269
4186
  range->min_key= range->max_key= ref->key_buff;
4270
4187
  range->min_length= range->max_length= ref->key_length;
4271
4188
  range->min_keypart_map= range->max_keypart_map=
4272
4189
    make_prev_keypart_map(ref->key_parts);
4273
 
  range->flag= ((ref->key_length == key_info->key_length &&
4274
 
                 (key_info->flags & HA_END_SPACE_KEY) == 0) ? EQ_RANGE : 0);
4275
 
 
4276
 
 
4277
 
  if (!(quick->key_parts=key_part=(KEY_PART *)
4278
 
        quick->alloc.alloc_root(sizeof(KEY_PART)*ref->key_parts)))
4279
 
    goto err;
 
4190
  range->flag= (ref->key_length == key_info->key_length && (key_info->flags & HA_END_SPACE_KEY) == 0) ? EQ_RANGE : 0;
 
4191
 
 
4192
  quick->key_parts=key_part= new (quick->alloc) KEY_PART[ref->key_parts];
4280
4193
 
4281
4194
  for (part=0 ; part < ref->key_parts ;part++,key_part++)
4282
4195
  {
4287
4200
    key_part->null_bit=     key_info->key_part[part].null_bit;
4288
4201
    key_part->flag=         (uint8_t) key_info->key_part[part].key_part_flag;
4289
4202
  }
4290
 
  if (insert_dynamic(&quick->ranges,(unsigned char*)&range))
4291
 
    goto err;
 
4203
  quick->ranges.push_back(&range);
4292
4204
 
4293
4205
  /*
4294
4206
     Add a NULL range if REF_OR_NULL optimization is used.
4301
4213
    optimizer::QuickRange *null_range= NULL;
4302
4214
 
4303
4215
    *ref->null_ref_key= 1;              // Set null byte then create a range
4304
 
    if (!(null_range= new (alloc)
 
4216
    null_range= new (alloc)
4305
4217
          optimizer::QuickRange(ref->key_buff, ref->key_length,
4306
4218
                                 make_prev_keypart_map(ref->key_parts),
4307
4219
                                 ref->key_buff, ref->key_length,
4308
 
                                 make_prev_keypart_map(ref->key_parts), EQ_RANGE)))
4309
 
      goto err;
 
4220
                                 make_prev_keypart_map(ref->key_parts), EQ_RANGE);
4310
4221
    *ref->null_ref_key= 0;              // Clear null byte
4311
 
    if (insert_dynamic(&quick->ranges,(unsigned char*)&null_range))
4312
 
      goto err;
 
4222
    quick->ranges.push_back(&null_range);
4313
4223
  }
4314
4224
 
4315
4225
  /* Call multi_range_read_info() to get the MRR flags and buffer size */
4316
4226
  quick->mrr_flags= HA_MRR_NO_ASSOCIATION |
4317
4227
                    (table->key_read ? HA_MRR_INDEX_ONLY : 0);
4318
 
  if (session->lex->sql_command != SQLCOM_SELECT)
 
4228
  if (session->lex().sql_command != SQLCOM_SELECT)
4319
4229
    quick->mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
4320
4230
 
4321
4231
  quick->mrr_buf_size= session->variables.read_rnd_buff_size;
4322
 
  if (table->cursor->multi_range_read_info(quick->index, 1, (uint32_t)records,
4323
 
                                           &quick->mrr_buf_size,
4324
 
                                           &quick->mrr_flags, &cost))
 
4232
  if (table->cursor->multi_range_read_info(quick->index, 1, (uint32_t)records, &quick->mrr_buf_size, &quick->mrr_flags, &cost))
4325
4233
    goto err;
4326
4234
 
4327
4235
  return quick;
4350
4258
  quick->qr_traversal_ctx.first=  (optimizer::QuickRange**)quick->ranges.buffer;
4351
4259
  quick->qr_traversal_ctx.cur=    (optimizer::QuickRange**)quick->ranges.buffer;
4352
4260
  quick->qr_traversal_ctx.last=   quick->qr_traversal_ctx.cur +
4353
 
                                  quick->ranges.elements;
 
4261
                                  quick->ranges.size();
4354
4262
  return &quick->qr_traversal_ctx;
4355
4263
}
4356
4264
 
4563
4471
get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree)
4564
4472
{
4565
4473
  Session *session= param->session;
4566
 
  Join *join= session->lex->current_select->join;
 
4474
  Join *join= session->lex().current_select->join;
4567
4475
  Table *table= param->table;
4568
4476
  bool have_min= false;              /* true if there is a MIN function. */
4569
4477
  bool have_max= false;              /* true if there is a MAX function. */
4595
4503
    return NULL;
4596
4504
 
4597
4505
  /* Analyze the query in more detail. */
4598
 
  List_iterator<Item> select_items_it(join->fields_list);
 
4506
  List<Item>::iterator select_items_it(join->fields_list.begin());
4599
4507
 
4600
4508
  /* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/
4601
4509
  if (join->make_sum_func_list(join->all_fields, join->fields_list, 1))
4751
4659
    */
4752
4660
    else if (join->select_distinct)
4753
4661
    {
4754
 
      select_items_it.rewind();
 
4662
      select_items_it= join->fields_list.begin();
4755
4663
      used_key_parts_map.reset();
4756
4664
      uint32_t max_key_part= 0;
4757
4665
      while ((item= select_items_it++))
4765
4673
        */
4766
4674
        if (used_key_parts_map.test(key_part_nr))
4767
4675
          continue;
4768
 
        if (key_part_nr < 1 || key_part_nr > join->fields_list.elements)
 
4676
        if (key_part_nr < 1 || key_part_nr > join->fields_list.size())
4769
4677
          goto next_index;
4770
4678
        cur_part= cur_index_info->key_part + key_part_nr - 1;
4771
4679
        cur_group_prefix_len+= cur_part->store_length;
4975
4883
    return NULL;
4976
4884
 
4977
4885
  /* The query passes all tests, so construct a new TRP object. */
4978
 
  read_plan=
4979
 
    new(param->mem_root) optimizer::GroupMinMaxReadPlan(have_min,
 
4886
  read_plan= new (*param->mem_root) optimizer::GroupMinMaxReadPlan(have_min,
4980
4887
                                                        have_max,
4981
4888
                                                        min_max_arg_part,
4982
4889
                                                        group_prefix_len,
4990
4897
                                                        best_index_tree,
4991
4898
                                                        best_param_idx,
4992
4899
                                                        best_quick_prefix_records);
4993
 
  if (read_plan)
4994
 
  {
4995
 
    if (tree && read_plan->quick_prefix_records == 0)
4996
 
      return NULL;
4997
 
 
4998
 
    read_plan->read_cost= best_read_cost;
4999
 
    read_plan->records= best_records;
5000
 
  }
5001
 
 
 
4900
  if (tree && read_plan->quick_prefix_records == 0)
 
4901
    return NULL;
 
4902
  read_plan->read_cost= best_read_cost;
 
4903
  read_plan->records= best_records;
5002
4904
  return read_plan;
5003
4905
}
5004
4906
 
5032
4934
  Item::Type cond_type= cond->type();
5033
4935
  if (cond_type == Item::COND_ITEM) /* 'AND' or 'OR' */
5034
4936
  {
5035
 
    List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list());
 
4937
    List<Item>::iterator li(((Item_cond*) cond)->argument_list()->begin());
5036
4938
    Item *and_or_arg= NULL;
5037
4939
    while ((and_or_arg= li++))
5038
4940
    {
5464
5366
optimizer::QuickSelectInterface *
5465
5367
optimizer::GroupMinMaxReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *parent_alloc)
5466
5368
{
5467
 
  optimizer::QuickGroupMinMaxSelect *quick= NULL;
5468
 
 
5469
 
  quick= new optimizer::QuickGroupMinMaxSelect(param->table,
5470
 
                                               param->session->lex->current_select->join,
 
5369
  optimizer::QuickGroupMinMaxSelect *quick= new optimizer::QuickGroupMinMaxSelect(param->table,
 
5370
                                               param->session->lex().current_select->join,
5471
5371
                                               have_min,
5472
5372
                                               have_max,
5473
5373
                                               min_max_arg_part,
5481
5381
                                               key_infix_len,
5482
5382
                                               key_infix,
5483
5383
                                               parent_alloc);
5484
 
  if (! quick)
5485
 
  {
5486
 
    return NULL;
5487
 
  }
5488
 
 
5489
5384
  if (quick->init())
5490
5385
  {
5491
5386
    delete quick;
5552
5447
 
5553
5448
optimizer::QuickSelectInterface *optimizer::RangeReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *parent_alloc)
5554
5449
{
5555
 
  optimizer::QuickRangeSelect *quick= NULL;
5556
 
  if ((quick= optimizer::get_quick_select(param,
5557
 
                                          key_idx,
5558
 
                                          key,
5559
 
                                          mrr_flags,
5560
 
                                          mrr_buf_size,
5561
 
                                          parent_alloc)))
 
5450
  optimizer::QuickRangeSelect *quick= optimizer::get_quick_select(param, key_idx, key, mrr_flags, mrr_buf_size, parent_alloc);
 
5451
  if (quick)
5562
5452
  {
5563
5453
    quick->records= records;
5564
5454
    quick->read_time= read_cost;
5572
5462
  boost::dynamic_bitset<> map= bitsToBitset();
5573
5463
  for (boost::dynamic_bitset<>::size_type i= 0; i < map.size(); i++)
5574
5464
  {
5575
 
    if (! map.test(i))
5576
 
    {
 
5465
    if (not map.test(i))
5577
5466
      return i;
5578
 
    }
5579
5467
  }
5580
5468
  return map.size();
5581
5469
}
5611
5499
  }
5612
5500
  string final(covered_fields_size - res.length(), '0');
5613
5501
  final.append(res);
5614
 
  return (boost::dynamic_bitset<>(final));
 
5502
  return boost::dynamic_bitset<>(final);
5615
5503
}
5616
5504
 
5617
5505