109
116
#include <vector>
110
117
#include <algorithm>
112
#include <boost/dynamic_bitset.hpp>
114
#include "drizzled/check_stack_overrun.h"
119
#include "drizzled/sql_base.h"
120
#include "drizzled/sql_select.h"
115
121
#include "drizzled/error.h"
122
#include "drizzled/cost_vect.h"
123
#include "drizzled/item/cmpfunc.h"
116
124
#include "drizzled/field/num.h"
117
#include "drizzled/internal/iocache.h"
118
#include "drizzled/internal/my_sys.h"
119
#include "drizzled/item/cmpfunc.h"
120
#include "drizzled/optimizer/cost_vector.h"
125
#include "drizzled/check_stack_overrun.h"
126
#include "drizzled/optimizer/sum.h"
127
#include "drizzled/optimizer/range.h"
128
#include "drizzled/optimizer/quick_range.h"
129
#include "drizzled/optimizer/quick_range_select.h"
121
130
#include "drizzled/optimizer/quick_group_min_max_select.h"
122
131
#include "drizzled/optimizer/quick_index_merge_select.h"
123
#include "drizzled/optimizer/quick_range.h"
124
#include "drizzled/optimizer/quick_range_select.h"
125
132
#include "drizzled/optimizer/quick_ror_intersect_select.h"
126
133
#include "drizzled/optimizer/quick_ror_union_select.h"
127
#include "drizzled/optimizer/range.h"
134
#include "drizzled/optimizer/table_read_plan.h"
135
#include "drizzled/optimizer/sel_arg.h"
128
136
#include "drizzled/optimizer/range_param.h"
129
#include "drizzled/optimizer/sel_arg.h"
130
#include "drizzled/optimizer/sel_imerge.h"
131
#include "drizzled/optimizer/sel_tree.h"
132
#include "drizzled/optimizer/sum.h"
133
#include "drizzled/optimizer/table_read_plan.h"
134
#include "drizzled/plugin/storage_engine.h"
135
137
#include "drizzled/records.h"
136
#include "drizzled/sql_base.h"
137
#include "drizzled/sql_select.h"
138
#include "drizzled/table_reference.h"
138
#include "drizzled/internal/my_sys.h"
139
#include "drizzled/internal/iocache.h"
140
141
#include "drizzled/temporal.h" /* Needed in get_mm_leaf() for timestamp -> datetime comparisons */
143
#include "drizzled/memory/multi_malloc.h"
142
145
using namespace std;
143
146
namespace drizzled
221
224
if (busy_blocks < 1.0)
222
225
busy_blocks= 1.0;
224
cost->setIOCount(busy_blocks);
227
cost->io_count= busy_blocks;
226
229
if (! interrupted)
228
231
/* Assume reading is done in one 'sweep' */
229
cost->setAvgIOCost((DISK_SEEK_BASE_COST +
230
DISK_SEEK_PROP_COST*n_blocks/busy_blocks));
232
cost->avg_io_cost= (DISK_SEEK_BASE_COST +
233
DISK_SEEK_PROP_COST*n_blocks/busy_blocks);
235
static optimizer::SEL_TREE * get_mm_parts(optimizer::RangeParameter *param,
241
class SEL_TREE :public memory::SqlAlloc
245
Starting an effort to document this field:
246
(for some i, keys[i]->type == optimizer::SEL_ARG::IMPOSSIBLE) =>
247
(type == SEL_TREE::IMPOSSIBLE)
258
SEL_TREE(enum Type type_arg) :type(type_arg) {}
259
SEL_TREE() :type(KEY)
262
memset(keys, 0, sizeof(keys));
265
Note: there may exist SEL_TREE objects with sel_tree->type=KEY and
266
keys[i]=0 for all i. (SergeyP: it is not clear whether there is any
267
merit in range analyzer functions (e.g. get_mm_parts) returning a
268
pointer to such SEL_TREE instead of NULL)
270
optimizer::SEL_ARG *keys[MAX_KEY];
271
key_map keys_map; /* bitmask of non-NULL elements in keys */
274
Possible ways to read rows using index_merge. The list is non-empty only
275
if type==KEY. Currently can be non empty only if keys_map.none().
277
List<SEL_IMERGE> merges;
279
/* The members below are filled/used only after get_mm_tree is done */
280
key_map ror_scans_map; /* bitmask of ROR scan-able elements in keys */
281
uint32_t n_ror_scans; /* number of set bits in ror_scans_map */
283
struct st_ror_scan_info **ror_scans; /* list of ROR key scans */
284
struct st_ror_scan_info **ror_scans_end; /* last ROR scan */
285
/* Note that #records for each key scan is stored in table->quick_rows */
288
struct st_ror_scan_info;
290
static SEL_TREE * get_mm_parts(optimizer::RangeParameter *param,
238
293
Item_func::Functype type,
246
301
Item_func::Functype type,
249
static optimizer::SEL_TREE *get_mm_tree(optimizer::RangeParameter *param, COND *cond);
304
static SEL_TREE *get_mm_tree(optimizer::RangeParameter *param, COND *cond);
251
306
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts);
253
static ha_rows check_quick_select(Session *session,
254
optimizer::Parameter *param,
308
static ha_rows check_quick_select(optimizer::Parameter *param,
257
311
optimizer::SEL_ARG *tree,
258
312
bool update_tbl_stats,
259
313
uint32_t *mrr_flags,
260
314
uint32_t *bufsize,
261
optimizer::CostVector *cost);
263
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
264
optimizer::Parameter *param,
265
optimizer::SEL_TREE *tree,
317
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
266
319
bool index_read_must_be_used,
267
320
bool update_tbl_stats,
268
321
double read_time);
271
324
optimizer::RorIntersectReadPlan *get_best_ror_intersect(const optimizer::Parameter *param,
272
optimizer::SEL_TREE *tree,
273
326
double read_time,
274
327
bool *are_all_covering);
277
330
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
278
optimizer::SEL_TREE *tree,
279
332
double read_time);
282
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
283
optimizer::Parameter *param,
284
optimizer::SEL_IMERGE *imerge,
335
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
285
337
double read_time);
288
optimizer::GroupMinMaxReadPlan *get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree);
290
static optimizer::SEL_TREE *tree_and(optimizer::RangeParameter *param,
291
optimizer::SEL_TREE *tree1,
292
optimizer::SEL_TREE *tree2);
340
optimizer::GroupMinMaxReadPlan *get_best_group_min_max(optimizer::Parameter *param, SEL_TREE *tree);
342
static void print_sel_tree(optimizer::Parameter *param,
347
static void print_ror_scans_arr(Table *table,
349
struct st_ror_scan_info **start,
350
struct st_ror_scan_info **end);
351
static void print_ror_scans_vector(Table *table,
353
vector<struct st_ror_scan_info *> &vec);
355
static SEL_TREE *tree_and(optimizer::RangeParameter *param, SEL_TREE *tree1, SEL_TREE *tree2);
357
static SEL_TREE *tree_or(optimizer::RangeParameter *param, SEL_TREE *tree1, SEL_TREE *tree2);
294
359
static optimizer::SEL_ARG *sel_add(optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2);
361
static optimizer::SEL_ARG *key_or(optimizer::RangeParameter *param, optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2);
296
363
static optimizer::SEL_ARG *key_and(optimizer::RangeParameter *param,
297
364
optimizer::SEL_ARG *key1,
298
365
optimizer::SEL_ARG *key2,
301
368
static bool get_range(optimizer::SEL_ARG **e1, optimizer::SEL_ARG **e2, optimizer::SEL_ARG *root1);
370
static bool eq_tree(optimizer::SEL_ARG* a, optimizer::SEL_ARG *b);
303
372
optimizer::SEL_ARG optimizer::null_element(optimizer::SEL_ARG::IMPOSSIBLE);
305
374
static bool null_part_in_key(KEY_PART *key_part,
306
375
const unsigned char *key,
307
376
uint32_t length);
309
bool sel_trees_can_be_ored(optimizer::SEL_TREE *tree1,
310
optimizer::SEL_TREE *tree2,
311
optimizer::RangeParameter *param);
378
bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, optimizer::RangeParameter *param);
382
SEL_IMERGE is a list of possible ways to do index merge, i.e. it is
383
a condition in the following form:
384
(t_1||t_2||...||t_N) && (next)
386
where all t_i are SEL_TREEs, next is another SEL_IMERGE and no pair
387
(t_i,t_j) contains SEL_ARGS for the same index.
389
SEL_TREE contained in SEL_IMERGE always has merges=NULL.
391
This class relies on memory manager to do the cleanup.
394
class SEL_IMERGE : public memory::SqlAlloc
396
enum { PREALLOCED_TREES= 10};
398
SEL_TREE *trees_prealloced[PREALLOCED_TREES];
399
SEL_TREE **trees; /* trees used to do index_merge */
400
SEL_TREE **trees_next; /* last of these trees */
401
SEL_TREE **trees_end; /* end of allocated space */
403
optimizer::SEL_ARG ***best_keys; /* best keys to read in SEL_TREEs */
406
trees(&trees_prealloced[0]),
408
trees_end(trees + PREALLOCED_TREES)
410
int or_sel_tree(optimizer::RangeParameter *param, SEL_TREE *tree);
411
int or_sel_tree_with_checks(optimizer::RangeParameter *param, SEL_TREE *new_tree);
412
int or_sel_imerge_with_checks(optimizer::RangeParameter *param, SEL_IMERGE* imerge);
417
Add SEL_TREE to this index_merge without any checks,
420
This function implements the following:
421
(x_1||...||x_N) || t = (x_1||...||x_N||t), where x_i, t are SEL_TREEs
427
int SEL_IMERGE::or_sel_tree(optimizer::RangeParameter *param, SEL_TREE *tree)
429
if (trees_next == trees_end)
431
const int realloc_ratio= 2; /* Double size for next round */
432
uint32_t old_elements= (trees_end - trees);
433
uint32_t old_size= sizeof(SEL_TREE**) * old_elements;
434
uint32_t new_size= old_size * realloc_ratio;
435
SEL_TREE **new_trees;
436
if (!(new_trees= (SEL_TREE**)alloc_root(param->mem_root, new_size)))
438
memcpy(new_trees, trees, old_size);
440
trees_next= trees + old_elements;
441
trees_end= trees + old_elements * realloc_ratio;
443
*(trees_next++)= tree;
449
Perform OR operation on this SEL_IMERGE and supplied SEL_TREE new_tree,
450
combining new_tree with one of the trees in this SEL_IMERGE if they both
451
have SEL_ARGs for the same key.
454
or_sel_tree_with_checks()
455
param Parameter from SqlSelect::test_quick_select
456
new_tree SEL_TREE with type KEY or KEY_SMALLER.
459
This does the following:
460
(t_1||...||t_k)||new_tree =
462
= (t_1||...||t_k||new_tree)
464
= (t_1||....||(t_j|| new_tree)||...||t_k),
466
where t_i, y are SEL_TREEs.
467
new_tree is combined with the first t_j it has a SEL_ARG on common
468
key with. As a consequence of this, choice of keys to do index_merge
469
read may depend on the order of conditions in WHERE part of the query.
473
1 One of the trees was combined with new_tree to SEL_TREE::ALWAYS,
474
and (*this) should be discarded.
475
-1 An error occurred.
477
int SEL_IMERGE::or_sel_tree_with_checks(optimizer::RangeParameter *param, SEL_TREE *new_tree)
479
for (SEL_TREE** tree = trees;
483
if (sel_trees_can_be_ored(*tree, new_tree, param))
485
*tree = tree_or(param, *tree, new_tree);
488
if (((*tree)->type == SEL_TREE::MAYBE) ||
489
((*tree)->type == SEL_TREE::ALWAYS))
491
/* SEL_TREE::IMPOSSIBLE is impossible here */
496
/* New tree cannot be combined with any of existing trees. */
497
return or_sel_tree(param, new_tree);
502
Perform OR operation on this index_merge and supplied index_merge list.
506
1 - One of conditions in result is always true and this SEL_IMERGE
508
-1 - An error occurred
511
int SEL_IMERGE::or_sel_imerge_with_checks(optimizer::RangeParameter *param, SEL_IMERGE* imerge)
513
for (SEL_TREE** tree= imerge->trees;
514
tree != imerge->trees_next;
517
if (or_sel_tree_with_checks(param, *tree))
319
525
Perform AND operation on two index_merge lists and store result in *im1.
322
inline void imerge_list_and_list(List<optimizer::SEL_IMERGE> *im1, List<optimizer::SEL_IMERGE> *im2)
528
inline void imerge_list_and_list(List<SEL_IMERGE> *im1, List<SEL_IMERGE> *im2)
324
530
im1->concat(im2);
535
Perform OR operation on 2 index_merge lists, storing result in first list.
538
The following conversion is implemented:
539
(a_1 &&...&& a_N)||(b_1 &&...&& b_K) = AND_i,j(a_i || b_j) =>
542
i.e. all conjuncts except the first one are currently dropped.
543
This is done to avoid producing N*K ways to do index_merge.
545
If (a_1||b_1) produce a condition that is always true, NULL is returned
546
and index_merge is discarded (while it is actually possible to try
549
As a consequence of this, choice of keys to do index_merge read may depend
550
on the order of conditions in WHERE part of the query.
553
0 OK, result is stored in *im1
554
other Error, both passed lists are unusable
557
static int imerge_list_or_list(optimizer::RangeParameter *param,
558
List<SEL_IMERGE> *im1,
559
List<SEL_IMERGE> *im2)
561
SEL_IMERGE *imerge= im1->head();
563
im1->push_back(imerge);
565
return imerge->or_sel_imerge_with_checks(param, im2->head());
570
Perform OR operation on index_merge list and key tree.
573
0 OK, result is stored in *im1.
577
static int imerge_list_or_tree(optimizer::RangeParameter *param,
578
List<SEL_IMERGE> *im1,
582
List_iterator<SEL_IMERGE> it(*im1);
583
while ((imerge= it++))
585
if (imerge->or_sel_tree_with_checks(param, tree))
588
return im1->is_empty();
328
592
/***************************************************************************
329
593
** Basic functions for SqlSelect and QuickRangeSelect
330
594
***************************************************************************/
545
805
static int fill_used_fields_bitmap(optimizer::Parameter *param)
547
807
Table *table= param->table;
549
param->tmp_covered_fields.clear();
550
param->needed_fields.resize(table->getShare()->sizeFields());
551
param->needed_fields.reset();
553
param->needed_fields|= *table->read_set;
554
param->needed_fields|= *table->write_set;
556
pk= param->table->getShare()->getPrimaryKey();
810
param->tmp_covered_fields.setBitmap(0);
811
param->fields_bitmap_size= table->s->column_bitmap_size;
812
if (!(tmp= (my_bitmap_map*) alloc_root(param->mem_root,
813
param->fields_bitmap_size)) ||
814
param->needed_fields.init(tmp, table->s->fields))
817
param->needed_fields= *table->read_set;
818
bitmap_union(¶m->needed_fields, table->write_set);
820
pk= param->table->s->primary_key;
557
821
if (pk != MAX_KEY && param->table->cursor->primary_key_is_clustered())
559
823
/* The table uses clustered PK and it is not internally generated */
560
KeyPartInfo *key_part= param->table->key_info[pk].key_part;
561
KeyPartInfo *key_part_end= key_part +
824
KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
825
KEY_PART_INFO *key_part_end= key_part +
562
826
param->table->key_info[pk].key_parts;
563
827
for (;key_part != key_part_end; ++key_part)
564
param->needed_fields.reset(key_part->fieldnr-1);
828
param->needed_fields.clearBit(key_part->fieldnr-1);
1220
1500
ror_scan->sel_arg= sel_arg;
1221
1501
ror_scan->records= param->table->quick_rows[keynr];
1223
ror_scan->covered_fields_size= param->table->getShare()->sizeFields();
1224
boost::dynamic_bitset<> tmp_bitset(param->table->getShare()->sizeFields());
1227
KeyPartInfo *key_part= param->table->key_info[keynr].key_part;
1228
KeyPartInfo *key_part_end= key_part +
1503
if (!(bitmap_buf= (my_bitmap_map*) alloc_root(param->mem_root,
1504
param->fields_bitmap_size)))
1507
if (ror_scan->covered_fields.init(bitmap_buf,
1508
param->table->s->fields))
1510
ror_scan->covered_fields.clearAll();
1512
KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part;
1513
KEY_PART_INFO *key_part_end= key_part +
1229
1514
param->table->key_info[keynr].key_parts;
1230
for (; key_part != key_part_end; ++key_part)
1515
for (;key_part != key_part_end; ++key_part)
1232
if (param->needed_fields.test(key_part->fieldnr-1))
1233
tmp_bitset.set(key_part->fieldnr-1);
1517
if (param->needed_fields.isBitSet(key_part->fieldnr-1))
1518
ror_scan->covered_fields.setBit(key_part->fieldnr-1);
1235
1520
double rows= rows2double(param->table->quick_rows[ror_scan->keynr]);
1236
1521
ror_scan->index_read_cost=
1237
1522
param->table->cursor->index_only_read_time(ror_scan->keynr, rows);
1238
ror_scan->covered_fields= tmp_bitset.to_ulong();
1244
Compare two optimizer::RorScanInfo** by E(#records_matched) * key_record_length.
1528
Compare two ROR_SCAN_INFO** by E(#records_matched) * key_record_length.
1246
1530
cmp_ror_scan_info()
1247
1531
a ptr to first compared value
1604
Get best covering ROR-intersection.
1606
get_best_covering_ror_intersect()
1607
param Parameter from test_quick_select function.
1608
tree optimizer::SEL_TREE with sets of intervals for different keys.
1609
read_time Don't return table read plans with cost > read_time.
1612
Best covering ROR-intersection plan
1613
NULL if no plan found.
1616
get_best_ror_intersect must be called for a tree before calling this
1618
This function invalidates tree->ror_scans member values.
1620
The following approximate algorithm is used:
1621
I=set of all covering indexes
1622
F=set of all fields to cover
1627
Order I by (#covered fields in F desc,
1629
number of first not covered component asc);
1630
F=F-covered by first(I);
1633
} while F is not empty.
1637
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
1638
optimizer::SEL_TREE *tree,
1641
optimizer::RorScanInfo **ror_scan_mark;
1642
optimizer::RorScanInfo **ror_scans_end= tree->ror_scans_end;
1644
for (optimizer::RorScanInfo **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
1645
(*scan)->key_components=
1646
param->table->key_info[(*scan)->keynr].key_parts;
1649
Run covering-ROR-search algorithm.
1650
Assume set I is [ror_scan .. ror_scans_end)
1653
/*I=set of all covering indexes */
1654
ror_scan_mark= tree->ror_scans;
1656
boost::dynamic_bitset<> *covered_fields= ¶m->tmp_covered_fields;
1657
if (covered_fields->empty())
1659
covered_fields->resize(param->table->getShare()->sizeFields());
1661
covered_fields->reset();
1663
double total_cost= 0.0f;
1670
Update changed sorting info:
1672
number of first not covered component
1673
Calculate and save these values for each of remaining scans.
1675
for (optimizer::RorScanInfo **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
1677
/* subtract one bitset from the other */
1678
(*scan)->subtractBitset(*covered_fields);
1679
(*scan)->used_fields_covered=
1680
(*scan)->getBitCount();
1681
(*scan)->first_uncovered_field= (*scan)->findFirstNotSet();
1684
internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
1685
sizeof(optimizer::RorScanInfo*),
1686
(qsort_cmp)cmp_ror_scan_info_covering);
1689
total_cost += (*ror_scan_mark)->index_read_cost;
1690
records += (*ror_scan_mark)->records;
1691
if (total_cost > read_time)
1693
/* F=F-covered by first(I) */
1694
boost::dynamic_bitset<> tmp_bitset= (*ror_scan_mark)->bitsToBitset();
1695
*covered_fields|= tmp_bitset;
1696
all_covered= param->needed_fields.is_subset_of(*covered_fields);
1697
} while ((++ror_scan_mark < ror_scans_end) && ! all_covered);
1699
if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
1703
Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with
1706
/* Add priority queue use cost. */
1707
total_cost += rows2double(records)*
1708
log((double)(ror_scan_mark - tree->ror_scans)) /
1709
(TIME_FOR_COMPARE_ROWID * M_LN2);
1711
if (total_cost > read_time)
1714
optimizer::RorIntersectReadPlan *trp= NULL;
1715
if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1720
uint32_t best_num= (ror_scan_mark - tree->ror_scans);
1721
if (!(trp->first_scan= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* best_num)))
1723
memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(optimizer::RorScanInfo*));
1724
trp->last_scan= trp->first_scan + best_num;
1725
trp->is_covering= true;
1726
trp->read_cost= total_cost;
1727
trp->records= records;
1728
trp->cpk_scan= NULL;
1729
set_if_smaller(param->table->quick_condition_rows, records);
1736
1898
Get best ROR-intersection plan using non-covering ROR-intersection search
1737
1899
algorithm. The returned plan may be covering.
1800
1962
optimizer::RorIntersectReadPlan *get_best_ror_intersect(const optimizer::Parameter *param,
1801
optimizer::SEL_TREE *tree,
1802
1964
double read_time,
1803
1965
bool *are_all_covering)
1806
1968
double min_cost= DBL_MAX;
1808
if ((tree->n_ror_scans < 2) || ! param->table->cursor->stats.records)
1970
if ((tree->n_ror_scans < 2) || !param->table->cursor->stats.records)
1812
Step1: Collect ROR-able SEL_ARGs and create optimizer::RorScanInfo for each of
1974
Step1: Collect ROR-able SEL_ARGs and create ROR_SCAN_INFO for each of
1813
1975
them. Also find and save clustered PK scan if there is one.
1815
optimizer::RorScanInfo **cur_ror_scan= NULL;
1816
optimizer::RorScanInfo *cpk_scan= NULL;
1977
ROR_SCAN_INFO **cur_ror_scan;
1978
ROR_SCAN_INFO *cpk_scan= NULL;
1818
1980
bool cpk_scan_used= false;
1820
if (! (tree->ror_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* param->keys)))
1982
if (!(tree->ror_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
1983
sizeof(ROR_SCAN_INFO*)*
1824
1986
cpk_no= ((param->table->cursor->primary_key_is_clustered()) ?
1825
param->table->getShare()->getPrimaryKey() : MAX_KEY);
1987
param->table->s->primary_key : MAX_KEY);
1827
1989
for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++)
1829
optimizer::RorScanInfo *scan;
1991
ROR_SCAN_INFO *scan;
1830
1992
if (! tree->ror_scans_map.test(idx))
1832
if (! (scan= make_ror_scan(param, idx, tree->keys[idx])))
1994
if (!(scan= make_ror_scan(param, idx, tree->keys[idx])))
1834
1996
if (param->real_keynr[idx] == cpk_no)
1843
2005
tree->ror_scans_end= cur_ror_scan;
2006
print_ror_scans_arr(param->table, "original",
2008
tree->ror_scans_end);
1845
2010
Ok, [ror_scans, ror_scans_end) is array of ptrs to initialized
1846
optimizer::RorScanInfo's.
1847
2012
Step 2: Get best ROR-intersection using an approximate algorithm.
1849
internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(optimizer::RorScanInfo*),
2014
internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(ROR_SCAN_INFO*),
1850
2015
(qsort_cmp)cmp_ror_scan_info);
2016
print_ror_scans_arr(param->table, "ordered",
2018
tree->ror_scans_end);
1852
optimizer::RorScanInfo **intersect_scans= NULL; /* ROR scans used in index intersection */
1853
optimizer::RorScanInfo **intersect_scans_end= NULL;
1854
if (! (intersect_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*) * tree->n_ror_scans)))
2020
ROR_SCAN_INFO **intersect_scans; /* ROR scans used in index intersection */
2021
ROR_SCAN_INFO **intersect_scans_end;
2022
if (!(intersect_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root,
2023
sizeof(ROR_SCAN_INFO*)*
2024
tree->n_ror_scans)))
1856
2026
intersect_scans_end= intersect_scans;
1858
2028
/* Create and incrementally update ROR intersection. */
1859
ROR_INTERSECT_INFO intersect(param);
1860
ROR_INTERSECT_INFO intersect_best(param);
2029
ROR_INTERSECT_INFO *intersect, *intersect_best;
2030
if (!(intersect= ror_intersect_init(param)) ||
2031
!(intersect_best= ror_intersect_init(param)))
1862
2034
/* [intersect_scans,intersect_scans_best) will hold the best intersection */
1863
optimizer::RorScanInfo **intersect_scans_best= NULL;
2035
ROR_SCAN_INFO **intersect_scans_best;
1864
2036
cur_ror_scan= tree->ror_scans;
1865
2037
intersect_scans_best= intersect_scans;
1866
while (cur_ror_scan != tree->ror_scans_end && ! intersect.is_covering)
2038
while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering)
1868
2040
/* S= S + first(R); R= R - first(R); */
1869
if (! ror_intersect_add(&intersect, *cur_ror_scan, false))
2041
if (!ror_intersect_add(intersect, *cur_ror_scan, false))
1871
2043
cur_ror_scan++;
1914
2091
if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
1917
if (! (trp->first_scan=
1918
(optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)*best_num)))
1920
memcpy(trp->first_scan, intersect_scans, best_num*sizeof(optimizer::RorScanInfo*));
1921
trp->last_scan= trp->first_scan + best_num;
1922
trp->is_covering= intersect_best.is_covering;
1923
trp->read_cost= intersect_best.total_cost;
2094
trp->ror_range_scans.assign(intersect_scans, intersect_scans + best_num);
2095
trp->setRowRetrievalNecessary(intersect_best->is_covering);
2096
trp->read_cost= intersect_best->total_cost;
1924
2097
/* Prevent divisons by zero */
1925
ha_rows best_rows = double2rows(intersect_best.out_rows);
2098
ha_rows best_rows = double2rows(intersect_best->out_rows);
1928
2101
set_if_smaller(param->table->quick_condition_rows, best_rows);
1929
2102
trp->records= best_rows;
1930
trp->index_scan_costs= intersect_best.index_scan_costs;
2103
trp->setCostOfIndexScans(intersect_best->index_scan_costs);
1931
2104
trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
1938
Get best "range" table read plan for given optimizer::SEL_TREE, also update some info
2111
Get best covering ROR-intersection.
2113
get_best_covering_ror_intersect()
2114
param Parameter from test_quick_select function.
2115
tree SEL_TREE with sets of intervals for different keys.
2116
read_time Don't return table read plans with cost > read_time.
2119
Best covering ROR-intersection plan
2120
NULL if no plan found.
2123
get_best_ror_intersect must be called for a tree before calling this
2125
This function invalidates tree->ror_scans member values.
2127
The following approximate algorithm is used:
2128
I=set of all covering indexes
2129
F=set of all fields to cover
2134
Order I by (#covered fields in F desc,
2136
number of first not covered component asc);
2137
F=F-covered by first(I);
2140
} while F is not empty.
2144
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
2148
ROR_SCAN_INFO **ror_scan_mark;
2149
ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
2151
for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
2152
(*scan)->key_components=
2153
param->table->key_info[(*scan)->keynr].key_parts;
2156
Run covering-ROR-search algorithm.
2157
Assume set I is [ror_scan .. ror_scans_end)
2160
/*I=set of all covering indexes */
2161
ror_scan_mark= tree->ror_scans;
2163
MyBitmap *covered_fields= ¶m->tmp_covered_fields;
2164
if (! covered_fields->getBitmap())
2166
my_bitmap_map *tmp_bitmap= (my_bitmap_map*)alloc_root(param->mem_root,
2167
param->fields_bitmap_size);
2168
covered_fields->setBitmap(tmp_bitmap);
2170
if (! covered_fields->getBitmap() ||
2171
covered_fields->init(covered_fields->getBitmap(),
2172
param->table->s->fields))
2174
covered_fields->clearAll();
2176
double total_cost= 0.0f;
2180
print_ror_scans_arr(param->table,
2181
"building covering ROR-I",
2182
ror_scan_mark, ror_scans_end);
2186
Update changed sorting info:
2188
number of first not covered component
2189
Calculate and save these values for each of remaining scans.
2191
for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
2193
bitmap_subtract(&(*scan)->covered_fields, covered_fields);
2194
(*scan)->used_fields_covered=
2195
(*scan)->covered_fields.getBitsSet();
2196
(*scan)->first_uncovered_field=
2197
(*scan)->covered_fields.getFirst();
2200
internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
2201
sizeof(ROR_SCAN_INFO*),
2202
(qsort_cmp)cmp_ror_scan_info_covering);
2204
print_ror_scans_arr(param->table,
2206
ror_scan_mark, ror_scans_end);
2209
total_cost += (*ror_scan_mark)->index_read_cost;
2210
records += (*ror_scan_mark)->records;
2211
if (total_cost > read_time)
2213
/* F=F-covered by first(I) */
2214
bitmap_union(covered_fields, &(*ror_scan_mark)->covered_fields);
2215
all_covered= bitmap_is_subset(¶m->needed_fields, covered_fields);
2216
} while ((++ror_scan_mark < ror_scans_end) && !all_covered);
2218
if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
2222
Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with
2225
print_ror_scans_arr(param->table,
2226
"creating covering ROR-intersect",
2227
tree->ror_scans, ror_scan_mark);
2229
/* Add priority queue use cost. */
2230
total_cost += rows2double(records)*
2231
log((double)(ror_scan_mark - tree->ror_scans)) /
2232
(TIME_FOR_COMPARE_ROWID * M_LN2);
2234
if (total_cost > read_time)
2237
optimizer::RorIntersectReadPlan *trp= NULL;
2238
if (! (trp= new (param->mem_root) optimizer::RorIntersectReadPlan))
2243
uint32_t best_num= (ror_scan_mark - tree->ror_scans);
2244
trp->ror_range_scans.assign(tree->ror_scans, tree->ror_scans + best_num);
2245
trp->setRowRetrievalNecessary(true);
2246
trp->read_cost= total_cost;
2247
trp->records= records;
2248
trp->cpk_scan= NULL;
2249
set_if_smaller(param->table->quick_condition_rows, records);
2256
Get best "range" table read plan for given SEL_TREE, also update some info
1941
2259
get_key_scans_params()
1943
2260
param Parameters from test_quick_select
1944
tree Make range select for this optimizer::SEL_TREE
2261
tree Make range select for this SEL_TREE
1945
2262
index_read_must_be_used true <=> assume 'index only' option will be set
1946
2263
(except for clustered PK indexes)
1947
2264
update_tbl_stats true <=> update table->quick_* with information
3260
3567
#define CLONE_KEY1_MAYBE 1
3261
3568
#define CLONE_KEY2_MAYBE 2
3263
static uint32_t swap_clone_flag(uint32_t a)
3265
return ((a & 1) << 1) | ((a & 2) >> 1);
3268
static optimizer::SEL_TREE *
3269
tree_and(optimizer::RangeParameter *param, optimizer::SEL_TREE *tree1, optimizer::SEL_TREE *tree2)
3569
#define swap_clone_flag(A) ((A & 1) << 1) | ((A & 2) >> 1)
3573
tree_and(optimizer::RangeParameter *param, SEL_TREE *tree1, SEL_TREE *tree2)
3275
if (tree1->type == optimizer::SEL_TREE::IMPOSSIBLE || tree2->type == optimizer::SEL_TREE::ALWAYS)
3579
if (tree1->type == SEL_TREE::IMPOSSIBLE || tree2->type == SEL_TREE::ALWAYS)
3277
if (tree2->type == optimizer::SEL_TREE::IMPOSSIBLE || tree1->type == optimizer::SEL_TREE::ALWAYS)
3581
if (tree2->type == SEL_TREE::IMPOSSIBLE || tree1->type == SEL_TREE::ALWAYS)
3279
if (tree1->type == optimizer::SEL_TREE::MAYBE)
3583
if (tree1->type == SEL_TREE::MAYBE)
3281
if (tree2->type == optimizer::SEL_TREE::KEY)
3282
tree2->type=optimizer::SEL_TREE::KEY_SMALLER;
3585
if (tree2->type == SEL_TREE::KEY)
3586
tree2->type=SEL_TREE::KEY_SMALLER;
3285
if (tree2->type == optimizer::SEL_TREE::MAYBE)
3589
if (tree2->type == SEL_TREE::MAYBE)
3287
tree1->type=optimizer::SEL_TREE::KEY_SMALLER;
3591
tree1->type=SEL_TREE::KEY_SMALLER;
3290
3594
key_map result_keys;
3633
Check if two SEL_TREES can be combined into one (i.e. a single key range
3634
read can be constructed for "cond_of_tree1 OR cond_of_tree2" ) without
3638
bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2,
3639
optimizer::RangeParameter* param)
3641
key_map common_keys= tree1->keys_map;
3642
common_keys&= tree2->keys_map;
3644
if (common_keys.none())
3647
/* trees have a common key, check if they refer to same key part */
3648
optimizer::SEL_ARG **key1,**key2;
3649
for (uint32_t key_no=0; key_no < param->keys; key_no++)
3651
if (common_keys.test(key_no))
3653
key1= tree1->keys + key_no;
3654
key2= tree2->keys + key_no;
3655
if ((*key1)->part == (*key2)->part)
3666
Remove the trees that are not suitable for record retrieval.
3668
param Range analysis parameter
3669
tree Tree to be processed, tree->type is KEY or KEY_SMALLER
3672
This function walks through tree->keys[] and removes the SEL_ARG* trees
3673
that are not "maybe" trees (*) and cannot be used to construct quick range
3675
(*) - have type MAYBE or MAYBE_KEY. Perhaps we should remove trees of
3676
these types here as well.
3678
A SEL_ARG* tree cannot be used to construct quick select if it has
3679
tree->part != 0. (e.g. it could represent "keypart2 < const").
3681
WHY THIS FUNCTION IS NEEDED
3683
Normally we allow construction of SEL_TREE objects that have SEL_ARG
3684
trees that do not allow quick range select construction. For example for
3685
" keypart1=1 AND keypart2=2 " the execution will proceed as follows:
3686
tree1= SEL_TREE { SEL_ARG{keypart1=1} }
3687
tree2= SEL_TREE { SEL_ARG{keypart2=2} } -- can't make quick range select
3689
call tree_and(tree1, tree2) -- this joins SEL_ARGs into a usable SEL_ARG
3692
There is an exception though: when we construct index_merge SEL_TREE,
3693
any SEL_ARG* tree that cannot be used to construct quick range select can
3694
be removed, because current range analysis code doesn't provide any way
3695
that tree could be later combined with another tree.
3696
Consider an example: we should not construct
3698
merges = SEL_IMERGE {
3699
SEL_TREE(t.key1part1 = 1),
3700
SEL_TREE(t.key2part2 = 2) -- (*)
3704
- (*) cannot be used to construct quick range select,
3705
- There is no execution path that would cause (*) to be converted to
3706
a tree that could be used.
3708
The latter is easy to verify: first, notice that the only way to convert
3709
(*) into a usable tree is to call tree_and(something, (*)).
3711
Second look at what tree_and/tree_or function would do when passed a
3712
SEL_TREE that has the structure like st1 tree has, and conlcude that
3713
tree_and(something, (*)) will not be called.
3716
0 Ok, some suitable trees left
3717
1 No tree->keys[] left.
3720
static bool remove_nonrange_trees(optimizer::RangeParameter *param, SEL_TREE *tree)
3723
for (uint32_t i=0; i < param->keys; i++)
3727
if (tree->keys[i]->part)
3729
tree->keys[i]= NULL;
3730
tree->keys_map.reset(i);
3741
tree_or(optimizer::RangeParameter *param,SEL_TREE *tree1,SEL_TREE *tree2)
3743
if (!tree1 || !tree2)
3745
if (tree1->type == SEL_TREE::IMPOSSIBLE || tree2->type == SEL_TREE::ALWAYS)
3747
if (tree2->type == SEL_TREE::IMPOSSIBLE || tree1->type == SEL_TREE::ALWAYS)
3749
if (tree1->type == SEL_TREE::MAYBE)
3750
return(tree1); // Can't use this
3751
if (tree2->type == SEL_TREE::MAYBE)
3754
SEL_TREE *result= 0;
3755
key_map result_keys;
3756
result_keys.reset();
3757
if (sel_trees_can_be_ored(tree1, tree2, param))
3759
/* Join the trees key per key */
3760
optimizer::SEL_ARG **key1,**key2,**end;
3761
for (key1= tree1->keys,key2= tree2->keys,end= key1+param->keys ;
3762
key1 != end ; key1++,key2++)
3764
*key1=key_or(param, *key1, *key2);
3767
result=tree1; // Added to tree1
3768
result_keys.set(key1 - tree1->keys);
3772
result->keys_map= result_keys;
3776
/* ok, two trees have KEY type but cannot be used without index merge */
3777
if (tree1->merges.is_empty() && tree2->merges.is_empty())
3779
if (param->remove_jump_scans)
3781
bool no_trees= remove_nonrange_trees(param, tree1);
3782
no_trees= no_trees || remove_nonrange_trees(param, tree2);
3784
return(new SEL_TREE(SEL_TREE::ALWAYS));
3787
/* both trees are "range" trees, produce new index merge structure */
3788
if (!(result= new SEL_TREE()) || !(merge= new SEL_IMERGE()) ||
3789
(result->merges.push_back(merge)) ||
3790
(merge->or_sel_tree(param, tree1)) ||
3791
(merge->or_sel_tree(param, tree2)))
3794
result->type= tree1->type;
3796
else if (!tree1->merges.is_empty() && !tree2->merges.is_empty())
3798
if (imerge_list_or_list(param, &tree1->merges, &tree2->merges))
3799
result= new SEL_TREE(SEL_TREE::ALWAYS);
3805
/* one tree is index merge tree and another is range tree */
3806
if (tree1->merges.is_empty())
3807
std::swap(tree1, tree2);
3809
if (param->remove_jump_scans && remove_nonrange_trees(param, tree2))
3810
return(new SEL_TREE(SEL_TREE::ALWAYS));
3811
/* add tree2 to tree1->merges, checking if it collapses to ALWAYS */
3812
if (imerge_list_or_tree(param, &tree1->merges, tree2))
3813
result= new SEL_TREE(SEL_TREE::ALWAYS);
3329
3822
/* And key trees where key1->part < key2 -> part */
4017
static optimizer::SEL_ARG *
4018
key_or(optimizer::RangeParameter *param, optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2)
4038
if (key1->part != key2->part)
4042
return 0; // Can't optimize this
4045
// If one of the key is MAYBE_KEY then the found region may be bigger
4046
if (key1->type == optimizer::SEL_ARG::MAYBE_KEY)
4052
if (key2->type == optimizer::SEL_ARG::MAYBE_KEY)
4059
if (key1->use_count > 0)
4061
if (key2->use_count == 0 || key1->elements > key2->elements)
4063
std::swap(key1,key2);
4065
if (key1->use_count > 0 || !(key1=key1->clone_tree(param)))
4069
// Add tree at key2 to tree at key1
4070
bool key2_shared= key2->use_count != 0;
4071
key1->maybe_flag|= key2->maybe_flag;
4073
for (key2=key2->first(); key2; )
4075
optimizer::SEL_ARG *tmp= key1->find_range(key2); // Find key1.min <= key2.min
4080
tmp=key1->first(); // tmp.min > key2.min
4083
else if ((cmp=tmp->cmp_max_to_min(key2)) < 0)
4084
{ // Found tmp.max < key2.min
4085
optimizer::SEL_ARG *next= tmp->next;
4086
if (cmp == -2 && eq_tree(tmp->next_key_part,key2->next_key_part))
4088
// Join near ranges like tmp.max < 0 and key2.min >= 0
4089
optimizer::SEL_ARG *key2_next=key2->next;
4092
if (! (key2=new optimizer::SEL_ARG(*key2)))
4093
return 0; // out of memory
4094
key2->increment_use_count(key1->use_count+1);
4095
key2->next= key2_next; // New copy of key2
4097
key2->copy_min(tmp);
4098
if (! (key1=key1->tree_delete(tmp)))
4099
{ // Only one key in tree
4106
if (! (tmp= next)) // tmp.min > key2.min
4107
break; // Copy rest of key2
4110
{ // tmp.min > key2.min
4112
if ((tmp_cmp= tmp->cmp_min_to_max(key2)) > 0) // if tmp.min > key2.max
4114
if (tmp_cmp == 2 && eq_tree(tmp->next_key_part,key2->next_key_part))
4115
{ // ranges are connected
4116
tmp->copy_min_to_min(key2);
4117
key1->merge_flags(key2);
4118
if (tmp->min_flag & NO_MIN_RANGE &&
4119
tmp->max_flag & NO_MAX_RANGE)
4121
if (key1->maybe_flag)
4122
return new optimizer::SEL_ARG(optimizer::SEL_ARG::MAYBE_KEY);
4125
key2->increment_use_count(-1); // Free not used tree
4131
optimizer::SEL_ARG *next= key2->next; // Keys are not overlapping
4134
optimizer::SEL_ARG *cpy= new optimizer::SEL_ARG(*key2); // Must make copy
4137
key1= key1->insert(cpy);
4138
key2->increment_use_count(key1->use_count+1);
4141
key1= key1->insert(key2); // Will destroy key2_root
4148
// tmp.max >= key2.min && tmp.min <= key.cmax(overlapping ranges)
4149
if (eq_tree(tmp->next_key_part,key2->next_key_part))
4151
if (tmp->is_same(key2))
4153
tmp->merge_flags(key2); // Copy maybe flags
4154
key2->increment_use_count(-1); // Free not used tree
4158
optimizer::SEL_ARG *last= tmp;
4159
while (last->next && last->next->cmp_min_to_max(key2) <= 0 &&
4160
eq_tree(last->next->next_key_part,key2->next_key_part))
4162
optimizer::SEL_ARG *save= last;
4164
key1= key1->tree_delete(save);
4166
last->copy_min(tmp);
4167
if (last->copy_min(key2) || last->copy_max(key2))
4170
for (; key2; key2= key2->next)
4171
key2->increment_use_count(-1); // Free not used tree
4172
if (key1->maybe_flag)
4173
return new optimizer::SEL_ARG(optimizer::SEL_ARG::MAYBE_KEY);
4181
if (cmp >= 0 && tmp->cmp_min_to_min(key2) < 0)
4182
{ // tmp.min <= x < key2.min
4183
optimizer::SEL_ARG *new_arg= tmp->clone_first(key2);
4186
if ((new_arg->next_key_part= key1->next_key_part))
4187
new_arg->increment_use_count(key1->use_count+1);
4188
tmp->copy_min_to_min(key2);
4189
key1= key1->insert(new_arg);
4192
// tmp.min >= key2.min && tmp.min <= key2.max
4193
optimizer::SEL_ARG key(*key2); // Get copy we can modify
4196
if (tmp->cmp_min_to_min(&key) > 0)
4197
{ // key.min <= x < tmp.min
4198
optimizer::SEL_ARG *new_arg= key.clone_first(tmp);
4201
if ((new_arg->next_key_part=key.next_key_part))
4202
new_arg->increment_use_count(key1->use_count+1);
4203
key1= key1->insert(new_arg);
4205
if ((cmp=tmp->cmp_max_to_max(&key)) <= 0)
4206
{ // tmp.min. <= x <= tmp.max
4207
tmp->maybe_flag|= key.maybe_flag;
4208
key.increment_use_count(key1->use_count+1);
4209
tmp->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part);
4210
if (! cmp) // Key2 is ready
4212
key.copy_max_to_min(tmp);
4213
if (! (tmp= tmp->next))
4215
optimizer::SEL_ARG *tmp2= new optimizer::SEL_ARG(key);
4218
key1= key1->insert(tmp2);
4222
if (tmp->cmp_min_to_max(&key) > 0)
4224
optimizer::SEL_ARG *tmp2= new optimizer::SEL_ARG(key);
4227
key1= key1->insert(tmp2);
4233
optimizer::SEL_ARG *new_arg= tmp->clone_last(&key); // tmp.min <= x <= key.max
4236
tmp->copy_max_to_min(&key);
4237
tmp->increment_use_count(key1->use_count+1);
4238
/* Increment key count as it may be used for next loop */
4239
key.increment_use_count(1);
4240
new_arg->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part);
4241
key1= key1->insert(new_arg);
4251
optimizer::SEL_ARG *next= key2->next;
4254
optimizer::SEL_ARG *tmp= new optimizer::SEL_ARG(*key2); // Must make copy
4257
key2->increment_use_count(key1->use_count+1);
4258
key1= key1->insert(tmp);
4261
key1= key1->insert(key2); // Will destroy key2_root
4269
/* Compare if two trees are equal */
4270
static bool eq_tree(optimizer::SEL_ARG *a, optimizer::SEL_ARG *b)
4277
if (! a || ! b || ! a->is_same(b))
4282
if (a->left != &optimizer::null_element && b->left != &optimizer::null_element)
4284
if (! eq_tree(a->left,b->left))
4287
else if (a->left != &optimizer::null_element || b->left != &optimizer::null_element)
4292
if (a->right != &optimizer::null_element && b->right != &optimizer::null_element)
4294
if (! eq_tree(a->right,b->right))
4297
else if (a->right != &optimizer::null_element || b->right != &optimizer::null_element)
4302
if (a->next_key_part != b->next_key_part)
4304
if (! a->next_key_part != ! b->next_key_part ||
4305
! eq_tree(a->next_key_part, b->next_key_part))
3524
4313
/****************************************************************************
3525
4314
MRR Range Sequence Interface implementation that walks a SEL_ARG* tree.
3526
4315
****************************************************************************/
4407
static inline uint32_t get_field_keypart(KeyInfo *index, Field *field);
5196
static inline uint32_t get_field_keypart(KEY *index, Field *field);
4409
5198
static inline optimizer::SEL_ARG * get_index_range_tree(uint32_t index,
4410
optimizer::SEL_TREE *range_tree,
5199
SEL_TREE *range_tree,
4411
5200
optimizer::Parameter *param,
4412
5201
uint32_t *param_idx);
4414
static bool get_constant_key_infix(KeyInfo *index_info,
5203
static bool get_constant_key_infix(KEY *index_info,
4415
5204
optimizer::SEL_ARG *index_range_tree,
4416
KeyPartInfo *first_non_group_part,
4417
KeyPartInfo *min_max_arg_part,
4418
KeyPartInfo *last_part,
5205
KEY_PART_INFO *first_non_group_part,
5206
KEY_PART_INFO *min_max_arg_part,
5207
KEY_PART_INFO *last_part,
4419
5208
Session *session,
4420
5209
unsigned char *key_infix,
4421
5210
uint32_t *key_infix_len,
4422
KeyPartInfo **first_non_infix_part);
5211
KEY_PART_INFO **first_non_infix_part);
4424
5213
static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item);
4427
5216
cost_group_min_max(Table* table,
4428
KeyInfo *index_info,
4429
5218
uint32_t used_key_parts,
4430
5219
uint32_t group_key_parts,
4431
optimizer::SEL_TREE *range_tree,
5220
SEL_TREE *range_tree,
4432
5221
optimizer::SEL_ARG *index_tree,
4433
5222
ha_rows quick_prefix_records,
5170
get_constant_key_infix(KeyInfo *,
5958
get_constant_key_infix(KEY *,
5171
5959
optimizer::SEL_ARG *index_range_tree,
5172
KeyPartInfo *first_non_group_part,
5173
KeyPartInfo *min_max_arg_part,
5174
KeyPartInfo *last_part,
5960
KEY_PART_INFO *first_non_group_part,
5961
KEY_PART_INFO *min_max_arg_part,
5962
KEY_PART_INFO *last_part,
5176
5964
unsigned char *key_infix,
5177
5965
uint32_t *key_infix_len,
5178
KeyPartInfo **first_non_infix_part)
5966
KEY_PART_INFO **first_non_infix_part)
5180
5968
optimizer::SEL_ARG *cur_range= NULL;
5181
KeyPartInfo *cur_part= NULL;
5969
KEY_PART_INFO *cur_part= NULL;
5182
5970
/* End part for the first loop below. */
5183
KeyPartInfo *end_part= min_max_arg_part ? min_max_arg_part : last_part;
5971
KEY_PART_INFO *end_part= min_max_arg_part ? min_max_arg_part : last_part;
5185
5973
*key_infix_len= 0;
5186
5974
unsigned char *key_ptr= key_infix;
5575
uint32_t optimizer::RorScanInfo::findFirstNotSet() const
6363
static void print_sel_tree(optimizer::Parameter *param, SEL_TREE *tree, key_map *tree_map, const char *)
5577
boost::dynamic_bitset<> map= bitsToBitset();
5578
for (boost::dynamic_bitset<>::size_type i= 0; i < map.size(); i++)
6365
optimizer::SEL_ARG **key= NULL;
6366
optimizer::SEL_ARG **end= NULL;
6370
String tmp(buff,sizeof(buff),&my_charset_bin);
6372
for (idx= 0, key=tree->keys, end= key + param->keys;
6376
if (tree_map->test(idx))
6378
uint32_t keynr= param->real_keynr[idx];
6381
tmp.append(param->table->key_info[keynr].name);
5589
size_t optimizer::RorScanInfo::getBitCount() const
5591
boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5592
return tmp_bitset.count();
5596
void optimizer::RorScanInfo::subtractBitset(const boost::dynamic_bitset<>& in_bitset)
5598
boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5599
tmp_bitset-= in_bitset;
5600
covered_fields= tmp_bitset.to_ulong();
5604
boost::dynamic_bitset<> optimizer::RorScanInfo::bitsToBitset() const
5607
uint64_t conv= covered_fields;
5610
res.push_back((conv & 1) + '0');
5615
std::reverse(res.begin(), res.end());
5617
string final(covered_fields_size - res.length(), '0');
5619
return (boost::dynamic_bitset<>(final));
6385
tmp.append(STRING_WITH_LEN("(empty)"));
6389
static void print_ror_scans_arr(Table *table,
6391
struct st_ror_scan_info **start,
6392
struct st_ror_scan_info **end)
6395
String tmp(buff,sizeof(buff),&my_charset_bin);
6397
for (; start != end; start++)
6401
tmp.append(table->key_info[(*start)->keynr].name);
6404
tmp.append(STRING_WITH_LEN("(empty)"));
6407
static void print_ror_scans_vector(Table *table,
6409
vector<struct st_ror_scan_info *> &vec)
6412
String tmp(buff,sizeof(buff),&my_charset_bin);
6414
for (vector<struct st_ror_scan_info *>::iterator it= vec.begin();
6420
tmp.append(table->key_info[(*it)->keynr].name);
6423
tmp.append(STRING_WITH_LEN("(empty)"));
5623
6426
} /* namespace drizzled */