207
205
if (table->cursor->primary_key_is_clustered())
209
cost->setIOCount(table->cursor->read_time(table->getShare()->getPrimaryKey(),
207
cost->setIOCount(table->cursor->read_time(table->getShare()->primary_key,
210
208
static_cast<uint32_t>(nrows),
249
249
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts);
251
static ha_rows check_quick_select(Session *session,
252
optimizer::Parameter *param,
251
static ha_rows check_quick_select(optimizer::Parameter *param,
255
254
optimizer::SEL_ARG *tree,
258
257
uint32_t *bufsize,
259
258
optimizer::CostVector *cost);
261
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
262
optimizer::Parameter *param,
260
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
263
261
optimizer::SEL_TREE *tree,
264
262
bool index_read_must_be_used,
265
263
bool update_tbl_stats,
277
275
double read_time);
280
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
281
optimizer::Parameter *param,
278
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
282
279
optimizer::SEL_IMERGE *imerge,
283
280
double read_time);
464
457
MAX_KEY if no such index was found.
467
uint32_t optimizer::get_index_for_order(Table *table, Order *order, ha_rows limit)
460
uint32_t optimizer::get_index_for_order(Table *table, order_st *order, ha_rows limit)
470
463
uint32_t match_key= MAX_KEY, match_key_len= MAX_KEY_LENGTH + 1;
473
466
for (ord= order; ord; ord= ord->next)
477
for (idx= 0; idx < table->getShare()->sizeKeys(); idx++)
470
for (idx= 0; idx < table->getShare()->keys; idx++)
479
472
if (!(table->keys_in_use_for_query.test(idx)))
543
536
static int fill_used_fields_bitmap(optimizer::Parameter *param)
545
538
Table *table= param->table;
547
param->tmp_covered_fields.clear();
548
param->needed_fields.resize(table->getShare()->sizeFields());
549
param->needed_fields.reset();
551
param->needed_fields|= *table->read_set;
552
param->needed_fields|= *table->write_set;
554
pk= param->table->getShare()->getPrimaryKey();
541
param->tmp_covered_fields.setBitmap(0);
542
param->fields_bitmap_size= table->getShare()->column_bitmap_size;
543
if (!(tmp= (my_bitmap_map*) param->mem_root->alloc_root(param->fields_bitmap_size)) ||
544
param->needed_fields.init(tmp, table->getShare()->fields))
549
param->needed_fields= *table->read_set;
550
bitmap_union(¶m->needed_fields, table->write_set);
552
pk= param->table->getShare()->primary_key;
555
553
if (pk != MAX_KEY && param->table->cursor->primary_key_is_clustered())
557
555
/* The table uses clustered PK and it is not internally generated */
559
557
KeyPartInfo *key_part_end= key_part +
560
558
param->table->key_info[pk].key_parts;
561
559
for (;key_part != key_part_end; ++key_part)
562
param->needed_fields.reset(key_part->fieldnr-1);
560
param->needed_fields.clearBit(key_part->fieldnr-1);
706
701
This is used in get_mm_parts function.
708
703
key_info= head->key_info;
709
for (idx=0 ; idx < head->getShare()->sizeKeys() ; idx++, key_info++)
704
for (idx=0 ; idx < head->getShare()->keys ; idx++, key_info++)
711
706
KeyPartInfo *key_part_info;
712
707
if (! keys_to_use.test(idx))
796
791
bool can_build_covering= false;
798
793
/* Get best 'range' plan and prepare data for making other plans */
799
if ((range_trp= get_key_scans_params(session, ¶m, tree, false, true,
794
if ((range_trp= get_key_scans_params(¶m, tree, false, true,
800
795
best_read_time)))
802
797
best_trp= range_trp;
839
834
List_iterator_fast<optimizer::SEL_IMERGE> it(tree->merges);
840
835
while ((imerge= it++))
842
new_conj_trp= get_best_disjunct_quick(session, ¶m, imerge, best_read_time);
837
new_conj_trp= get_best_disjunct_quick(¶m, imerge, best_read_time);
843
838
if (new_conj_trp)
844
839
set_if_smaller(param.table->quick_condition_rows,
845
840
new_conj_trp->records);
952
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
953
optimizer::Parameter *param,
942
optimizer::TableReadPlan *get_best_disjunct_quick(optimizer::Parameter *param,
954
943
optimizer::SEL_IMERGE *imerge,
955
944
double read_time)
988
977
ptree != imerge->trees_next;
989
978
ptree++, cur_child++)
991
if (!(*cur_child= get_key_scans_params(session, param, *ptree, true, false, read_time)))
980
if (!(*cur_child= get_key_scans_params(param, *ptree, true, false, read_time)))
994
983
One of index scans in this index_merge is more expensive than entire
1006
995
all_scans_rors &= (*cur_child)->is_ror;
1007
996
if (pk_is_clustered &&
1008
997
param->real_keynr[(*cur_child)->key_idx] ==
1009
param->table->getShare()->getPrimaryKey())
998
param->table->getShare()->primary_key)
1011
1000
cpk_scan= cur_child;
1012
1001
cpk_scan_records= (*cur_child)->records;
1177
typedef struct st_ror_scan_info
1179
uint32_t idx; /* # of used key in param->keys */
1180
uint32_t keynr; /* # of used key in table */
1181
ha_rows records; /* estimate of # records this scan will return */
1183
/* Set of intervals over key fields that will be used for row retrieval. */
1184
optimizer::SEL_ARG *sel_arg;
1186
/* Fields used in the query and covered by this ROR scan. */
1187
MyBitmap covered_fields;
1188
uint32_t used_fields_covered; /* # of set bits in covered_fields */
1189
int key_rec_length; /* length of key record (including rowid) */
1192
Cost of reading all index records with values in sel_arg intervals set
1193
(assuming there is no need to access full table records)
1195
double index_read_cost;
1196
uint32_t first_uncovered_field; /* first unused bit in covered_fields */
1197
uint32_t key_components; /* # of parts in the key */
1190
Create optimizer::RorScanInfo* structure with a single ROR scan on index idx using
1202
Create ROR_SCAN_INFO* structure with a single ROR scan on index idx using
1191
1203
sel_arg set of intervals.
1205
optimizer::RorScanInfo *make_ror_scan(const optimizer::Parameter *param, int idx, optimizer::SEL_ARG *sel_arg)
1217
ROR_SCAN_INFO *make_ror_scan(const optimizer::Parameter *param, int idx, optimizer::SEL_ARG *sel_arg)
1207
optimizer::RorScanInfo *ror_scan= NULL;
1219
ROR_SCAN_INFO *ror_scan;
1220
my_bitmap_map *bitmap_buf;
1209
1222
uint32_t keynr;
1211
if (!(ror_scan= (optimizer::RorScanInfo*)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo))))
1224
if (!(ror_scan= (ROR_SCAN_INFO*)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO))))
1214
1227
ror_scan->idx= idx;
1218
1231
ror_scan->sel_arg= sel_arg;
1219
1232
ror_scan->records= param->table->quick_rows[keynr];
1221
ror_scan->covered_fields_size= param->table->getShare()->sizeFields();
1222
boost::dynamic_bitset<> tmp_bitset(param->table->getShare()->sizeFields());
1234
if (!(bitmap_buf= (my_bitmap_map*) param->mem_root->alloc_root(param->fields_bitmap_size)))
1239
if (ror_scan->covered_fields.init(bitmap_buf, param->table->getShare()->fields))
1243
ror_scan->covered_fields.clearAll();
1225
1245
KeyPartInfo *key_part= param->table->key_info[keynr].key_part;
1226
1246
KeyPartInfo *key_part_end= key_part +
1227
1247
param->table->key_info[keynr].key_parts;
1228
for (; key_part != key_part_end; ++key_part)
1248
for (;key_part != key_part_end; ++key_part)
1230
if (param->needed_fields.test(key_part->fieldnr-1))
1231
tmp_bitset.set(key_part->fieldnr-1);
1250
if (param->needed_fields.isBitSet(key_part->fieldnr-1))
1251
ror_scan->covered_fields.setBit(key_part->fieldnr-1);
1233
1253
double rows= rows2double(param->table->quick_rows[ror_scan->keynr]);
1234
1254
ror_scan->index_read_cost=
1235
1255
param->table->cursor->index_only_read_time(ror_scan->keynr, rows);
1236
ror_scan->covered_fields= tmp_bitset.to_ulong();
1242
Compare two optimizer::RorScanInfo** by E(#records_matched) * key_record_length.
1261
Compare two ROR_SCAN_INFO** by E(#records_matched) * key_record_length.
1244
1263
cmp_ror_scan_info()
1245
1264
a ptr to first compared value
1254
static int cmp_ror_scan_info(optimizer::RorScanInfo** a, optimizer::RorScanInfo** b)
1273
static int cmp_ror_scan_info(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b)
1256
1275
double val1= rows2double((*a)->records) * (*a)->key_rec_length;
1257
1276
double val2= rows2double((*b)->records) * (*b)->key_rec_length;
1296
1315
/* Auxiliary structure for incremental ROR-intersection creation */
1297
typedef struct st_ror_intersect_info
1299
st_ror_intersect_info()
1306
index_scan_costs(0.0),
1310
st_ror_intersect_info(const optimizer::Parameter *in_param)
1313
covered_fields(in_param->table->getShare()->sizeFields()),
1314
out_rows(in_param->table->cursor->stats.records),
1317
index_scan_costs(0.0),
1320
covered_fields.reset();
1323
1318
const optimizer::Parameter *param;
1324
boost::dynamic_bitset<> covered_fields; /* union of fields covered by all scans */
1319
MyBitmap covered_fields; /* union of fields covered by all scans */
1326
1321
Fraction of table records that satisfies conditions of all scans.
1327
1322
This is the number of full records that will be retrieved if a
1337
1332
} ROR_INTERSECT_INFO;
1336
Allocate a ROR_INTERSECT_INFO and initialize it to contain zero scans.
1339
ror_intersect_init()
1340
param Parameter from test_quick_select
1348
ROR_INTERSECT_INFO* ror_intersect_init(const optimizer::Parameter *param)
1350
ROR_INTERSECT_INFO *info;
1352
if (!(info= (ROR_INTERSECT_INFO*)param->mem_root->alloc_root(sizeof(ROR_INTERSECT_INFO))))
1355
if (!(buf= (my_bitmap_map*) param->mem_root->alloc_root(param->fields_bitmap_size)))
1357
if (info->covered_fields.init(buf, param->table->getShare()->fields))
1359
info->is_covering= false;
1360
info->index_scan_costs= 0.0;
1361
info->index_records= 0;
1362
info->out_rows= (double) param->table->cursor->stats.records;
1363
info->covered_fields.clearAll();
1340
1367
static void ror_intersect_cpy(ROR_INTERSECT_INFO *dst,
1341
1368
const ROR_INTERSECT_INFO *src)
1443
1470
static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
1444
const optimizer::RorScanInfo *scan)
1471
const ROR_SCAN_INFO *scan)
1446
1473
double selectivity_mult= 1.0;
1447
1474
KeyPartInfo *key_part= info->param->table->key_info[scan->keynr].key_part;
1464
1491
sel_arg= sel_arg->next_key_part)
1467
test(info->covered_fields.test(key_part[sel_arg->part].fieldnr-1));
1494
test(info->covered_fields.isBitSet(key_part[sel_arg->part].fieldnr-1));
1468
1495
if (cur_covered != prev_covered)
1470
1497
/* create (part1val, ..., part{n-1}val) tuple. */
1551
1578
static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
1552
optimizer::RorScanInfo* ror_scan, bool is_cpk_scan)
1579
ROR_SCAN_INFO* ror_scan, bool is_cpk_scan)
1554
1581
double selectivity_mult= 1.0;
1577
1604
info->index_records += info->param->table->quick_rows[ror_scan->keynr];
1578
1605
info->index_scan_costs += ror_scan->index_read_cost;
1579
boost::dynamic_bitset<> tmp_bitset= ror_scan->bitsToBitset();
1580
info->covered_fields|= tmp_bitset;
1581
if (! info->is_covering && info->param->needed_fields.is_subset_of(info->covered_fields))
1606
bitmap_union(&info->covered_fields, &ror_scan->covered_fields);
1607
if (!info->is_covering && bitmap_is_subset(&info->param->needed_fields,
1608
&info->covered_fields))
1583
1610
info->is_covering= true;
1587
1614
info->total_cost= info->index_scan_costs;
1588
if (! info->is_covering)
1615
if (!info->is_covering)
1590
1617
optimizer::CostVector sweep_cost;
1591
1618
Join *join= info->param->session->lex->select_lex.join;
1636
1663
optimizer::SEL_TREE *tree,
1637
1664
double read_time)
1639
optimizer::RorScanInfo **ror_scan_mark;
1640
optimizer::RorScanInfo **ror_scans_end= tree->ror_scans_end;
1666
ROR_SCAN_INFO **ror_scan_mark;
1667
ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
1642
for (optimizer::RorScanInfo **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
1669
for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
1643
1670
(*scan)->key_components=
1644
1671
param->table->key_info[(*scan)->keynr].key_parts;
1651
1678
/*I=set of all covering indexes */
1652
1679
ror_scan_mark= tree->ror_scans;
1654
boost::dynamic_bitset<> *covered_fields= ¶m->tmp_covered_fields;
1655
if (covered_fields->empty())
1681
MyBitmap *covered_fields= ¶m->tmp_covered_fields;
1682
if (! covered_fields->getBitmap())
1657
covered_fields->resize(param->table->getShare()->sizeFields());
1684
my_bitmap_map *tmp_bitmap= (my_bitmap_map*)param->mem_root->alloc_root(param->fields_bitmap_size);
1685
covered_fields->setBitmap(tmp_bitmap);
1659
covered_fields->reset();
1687
if (! covered_fields->getBitmap() ||
1688
covered_fields->init(covered_fields->getBitmap(),
1689
param->table->getShare()->fields))
1691
covered_fields->clearAll();
1661
1693
double total_cost= 0.0f;
1662
1694
ha_rows records=0;
1670
1702
number of first not covered component
1671
1703
Calculate and save these values for each of remaining scans.
1673
for (optimizer::RorScanInfo **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
1705
for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan)
1675
/* subtract one bitset from the other */
1676
(*scan)->subtractBitset(*covered_fields);
1707
bitmap_subtract(&(*scan)->covered_fields, covered_fields);
1677
1708
(*scan)->used_fields_covered=
1678
(*scan)->getBitCount();
1679
(*scan)->first_uncovered_field= (*scan)->findFirstNotSet();
1709
(*scan)->covered_fields.getBitsSet();
1710
(*scan)->first_uncovered_field=
1711
(*scan)->covered_fields.getFirst();
1682
1714
internal::my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark,
1683
sizeof(optimizer::RorScanInfo*),
1715
sizeof(ROR_SCAN_INFO*),
1684
1716
(qsort_cmp)cmp_ror_scan_info_covering);
1686
1718
/* I=I-first(I) */
1689
1721
if (total_cost > read_time)
1691
1723
/* F=F-covered by first(I) */
1692
boost::dynamic_bitset<> tmp_bitset= (*ror_scan_mark)->bitsToBitset();
1693
*covered_fields|= tmp_bitset;
1694
all_covered= param->needed_fields.is_subset_of(*covered_fields);
1695
} while ((++ror_scan_mark < ror_scans_end) && ! all_covered);
1724
bitmap_union(covered_fields, &(*ror_scan_mark)->covered_fields);
1725
all_covered= bitmap_is_subset(¶m->needed_fields, covered_fields);
1726
} while ((++ror_scan_mark < ror_scans_end) && !all_covered);
1697
1728
if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1)
1718
1749
uint32_t best_num= (ror_scan_mark - tree->ror_scans);
1719
if (!(trp->first_scan= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* best_num)))
1750
if (!(trp->first_scan= (ROR_SCAN_INFO**)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO*)* best_num)))
1721
memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(optimizer::RorScanInfo*));
1752
memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(ROR_SCAN_INFO*));
1722
1753
trp->last_scan= trp->first_scan + best_num;
1723
1754
trp->is_covering= true;
1724
1755
trp->read_cost= total_cost;
1810
Step1: Collect ROR-able SEL_ARGs and create optimizer::RorScanInfo for each of
1841
Step1: Collect ROR-able SEL_ARGs and create ROR_SCAN_INFO for each of
1811
1842
them. Also find and save clustered PK scan if there is one.
1813
optimizer::RorScanInfo **cur_ror_scan= NULL;
1814
optimizer::RorScanInfo *cpk_scan= NULL;
1844
ROR_SCAN_INFO **cur_ror_scan= NULL;
1845
ROR_SCAN_INFO *cpk_scan= NULL;
1815
1846
uint32_t cpk_no= 0;
1816
1847
bool cpk_scan_used= false;
1818
if (! (tree->ror_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)* param->keys)))
1849
if (! (tree->ror_scans= (ROR_SCAN_INFO**)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO*)* param->keys)))
1822
1853
cpk_no= ((param->table->cursor->primary_key_is_clustered()) ?
1823
param->table->getShare()->getPrimaryKey() : MAX_KEY);
1854
param->table->getShare()->primary_key : MAX_KEY);
1825
1856
for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++)
1827
optimizer::RorScanInfo *scan;
1858
ROR_SCAN_INFO *scan;
1828
1859
if (! tree->ror_scans_map.test(idx))
1830
if (! (scan= make_ror_scan(param, idx, tree->keys[idx])))
1861
if (!(scan= make_ror_scan(param, idx, tree->keys[idx])))
1832
1863
if (param->real_keynr[idx] == cpk_no)
1841
1872
tree->ror_scans_end= cur_ror_scan;
1843
1874
Ok, [ror_scans, ror_scans_end) is array of ptrs to initialized
1844
optimizer::RorScanInfo's.
1845
1876
Step 2: Get best ROR-intersection using an approximate algorithm.
1847
internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(optimizer::RorScanInfo*),
1878
internal::my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(ROR_SCAN_INFO*),
1848
1879
(qsort_cmp)cmp_ror_scan_info);
1850
optimizer::RorScanInfo **intersect_scans= NULL; /* ROR scans used in index intersection */
1851
optimizer::RorScanInfo **intersect_scans_end= NULL;
1852
if (! (intersect_scans= (optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*) * tree->n_ror_scans)))
1881
ROR_SCAN_INFO **intersect_scans= NULL; /* ROR scans used in index intersection */
1882
ROR_SCAN_INFO **intersect_scans_end= NULL;
1883
if (! (intersect_scans= (ROR_SCAN_INFO**)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO*) * tree->n_ror_scans)))
1854
1885
intersect_scans_end= intersect_scans;
1856
1887
/* Create and incrementally update ROR intersection. */
1857
ROR_INTERSECT_INFO intersect(param);
1858
ROR_INTERSECT_INFO intersect_best(param);
1888
ROR_INTERSECT_INFO *intersect= NULL;
1889
ROR_INTERSECT_INFO *intersect_best= NULL;
1890
if (! (intersect= ror_intersect_init(param)) ||
1891
! (intersect_best= ror_intersect_init(param)))
1860
1894
/* [intersect_scans,intersect_scans_best) will hold the best intersection */
1861
optimizer::RorScanInfo **intersect_scans_best= NULL;
1895
ROR_SCAN_INFO **intersect_scans_best= NULL;
1862
1896
cur_ror_scan= tree->ror_scans;
1863
1897
intersect_scans_best= intersect_scans;
1864
while (cur_ror_scan != tree->ror_scans_end && ! intersect.is_covering)
1898
while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering)
1866
1900
/* S= S + first(R); R= R - first(R); */
1867
if (! ror_intersect_add(&intersect, *cur_ror_scan, false))
1901
if (!ror_intersect_add(intersect, *cur_ror_scan, false))
1869
1903
cur_ror_scan++;
1873
1907
*(intersect_scans_end++)= *(cur_ror_scan++);
1875
if (intersect.total_cost < min_cost)
1909
if (intersect->total_cost < min_cost)
1877
1911
/* Local minimum found, save it */
1878
ror_intersect_cpy(&intersect_best, &intersect);
1912
ror_intersect_cpy(intersect_best, intersect);
1879
1913
intersect_scans_best= intersect_scans_end;
1880
min_cost = intersect.total_cost;
1914
min_cost = intersect->total_cost;
1889
*are_all_covering= intersect.is_covering;
1923
*are_all_covering= intersect->is_covering;
1890
1924
uint32_t best_num= intersect_scans_best - intersect_scans;
1891
ror_intersect_cpy(&intersect, &intersect_best);
1925
ror_intersect_cpy(intersect, intersect_best);
1894
1928
Ok, found the best ROR-intersection of non-CPK key scans.
1895
1929
Check if we should add a CPK scan. If the obtained ROR-intersection is
1896
1930
covering, it doesn't make sense to add CPK scan.
1898
if (cpk_scan && ! intersect.is_covering)
1932
if (cpk_scan && !intersect->is_covering)
1900
if (ror_intersect_add(&intersect, cpk_scan, true) &&
1901
(intersect.total_cost < min_cost))
1934
if (ror_intersect_add(intersect, cpk_scan, true) &&
1935
(intersect->total_cost < min_cost))
1903
1937
cpk_scan_used= true;
1904
1938
intersect_best= intersect; //just set pointer here
1915
1949
if (! (trp->first_scan=
1916
(optimizer::RorScanInfo**)param->mem_root->alloc_root(sizeof(optimizer::RorScanInfo*)*best_num)))
1950
(ROR_SCAN_INFO**)param->mem_root->alloc_root(sizeof(ROR_SCAN_INFO*)*best_num)))
1918
memcpy(trp->first_scan, intersect_scans, best_num*sizeof(optimizer::RorScanInfo*));
1952
memcpy(trp->first_scan, intersect_scans, best_num*sizeof(ROR_SCAN_INFO*));
1919
1953
trp->last_scan= trp->first_scan + best_num;
1920
trp->is_covering= intersect_best.is_covering;
1921
trp->read_cost= intersect_best.total_cost;
1954
trp->is_covering= intersect_best->is_covering;
1955
trp->read_cost= intersect_best->total_cost;
1922
1956
/* Prevent divisons by zero */
1923
ha_rows best_rows = double2rows(intersect_best.out_rows);
1957
ha_rows best_rows = double2rows(intersect_best->out_rows);
1924
1958
if (! best_rows)
1926
1960
set_if_smaller(param->table->quick_condition_rows, best_rows);
1927
1961
trp->records= best_rows;
1928
trp->index_scan_costs= intersect_best.index_scan_costs;
1962
trp->index_scan_costs= intersect_best->index_scan_costs;
1929
1963
trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
1959
1992
NULL if no plan found or error occurred
1962
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
1963
optimizer::Parameter *param,
1995
static optimizer::RangeReadPlan *get_key_scans_params(optimizer::Parameter *param,
1964
1996
optimizer::SEL_TREE *tree,
1965
1997
bool index_read_must_be_used,
1966
1998
bool update_tbl_stats,
1997
2029
bool read_index_only= index_read_must_be_used ||
1998
2030
param->table->covering_keys.test(keynr);
2000
found_records= check_quick_select(session, param, idx, read_index_only, *key,
2032
found_records= check_quick_select(param, idx, read_index_only, *key,
2001
2033
update_tbl_stats, &mrr_flags,
2002
2034
&buf_size, &cost);
2003
2035
found_read_time= cost.total_cost();
2528
2560
field->setWriteSet();
2530
2562
Item_result cmp_type= field->cmp_type();
2531
if (!((ref_tables | field->getTable()->map) & param_comp))
2563
if (!((ref_tables | field->table->map) & param_comp))
2532
2564
ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type, inv);
2533
2565
Item_equal *item_equal= field_item->item_equal;
2534
2566
if (item_equal)
2543
2575
if (field->eq(f))
2545
if (!((ref_tables | f->getTable()->map) & param_comp))
2577
if (!((ref_tables | f->table->map) & param_comp))
2547
2579
tree= get_func_mm_tree(param, cond_func, f, value, cmp_type, inv);
2548
2580
ftree= !ftree ? tree : tree_and(param, ftree, tree);
2603
/* Here when simple cond
2604
There are limits on what kinds of const items we can evaluate, grep for
2605
DontEvaluateMaterializedSubqueryTooEarly.
2607
if (cond->const_item() && !cond->is_expensive())
2635
/* Here when simple cond */
2636
if (cond->const_item())
2610
2639
During the cond->val_int() evaluation we can come across a subselect
2696
2725
field->setWriteSet();
2698
2727
Item_result cmp_type= field->cmp_type();
2699
if (!((ref_tables | field->getTable()->map) & param_comp))
2728
if (!((ref_tables | field->table->map) & param_comp))
2701
2730
tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC,
2702
2731
value,cmp_type);
2735
2764
Item_func::Functype type,
2736
2765
Item *value, Item_result)
2738
if (field->getTable() != param->table)
2767
if (field->table != param->table)
2741
2770
KEY_PART *key_part = param->key_parts;
2805
2834
param->session->mem_root= param->old_root;
2806
2835
if (!value) // IS NULL or IS NOT NULL
2808
if (field->getTable()->maybe_null) // Can't use a key on this
2837
if (field->table->maybe_null) // Can't use a key on this
2810
2839
if (!maybe_null) // Not null field
3122
3151
tree= &optimizer::null_element; // cmp with NULL is never true
3127
Any predicate except "<=>"(null-safe equality operator) involving NULL as a
3128
constant is always FALSE
3129
Put IMPOSSIBLE Tree(null_element) here.
3131
if (type != Item_func::EQUAL_FUNC && field->is_real_null())
3133
tree= &optimizer::null_element;
3137
3154
str= (unsigned char*) alloc->alloc_root(key_part->store_length+1);
3813
ha_rows check_quick_select(Session *session,
3814
optimizer::Parameter *param,
3830
ha_rows check_quick_select(optimizer::Parameter *param,
3816
3832
bool index_only,
3817
3833
optimizer::SEL_ARG *tree,
3852
3868
bool pk_is_clustered= cursor->primary_key_is_clustered();
3853
3869
if (index_only &&
3854
3870
(param->table->index_flags(keynr) & HA_KEYREAD_ONLY) &&
3855
!(pk_is_clustered && keynr == param->table->getShare()->getPrimaryKey()))
3871
!(pk_is_clustered && keynr == param->table->getShare()->primary_key))
3856
3872
*mrr_flags |= HA_MRR_INDEX_ONLY;
3858
if (session->lex->sql_command != SQLCOM_SELECT)
3874
if (current_session->lex->sql_command != SQLCOM_SELECT)
3859
3875
*mrr_flags |= HA_MRR_USE_DEFAULT_IMPL;
3861
3877
*bufsize= param->session->variables.read_rnd_buff_size;
3978
3994
uint32_t mrr_buf_size,
3979
3995
memory::Root *parent_alloc)
3981
optimizer::QuickRangeSelect *quick= new optimizer::QuickRangeSelect(param->session,
3983
param->real_keynr[idx],
3997
optimizer::QuickRangeSelect *quick= NULL;
3998
bool create_err= false;
4000
quick= new optimizer::QuickRangeSelect(param->session,
4002
param->real_keynr[idx],
3989
if (get_quick_keys(param,
4010
get_quick_keys(param,
3991
4012
param->key[idx],
4237
4258
table_reference_st *ref,
4238
4259
ha_rows records)
4240
memory::Root *old_root= NULL;
4241
memory::Root *alloc= NULL;
4261
memory::Root *old_root, *alloc;
4262
optimizer::QuickRangeSelect *quick= NULL;
4242
4263
KeyInfo *key_info = &table->key_info[ref->key];
4243
4264
KEY_PART *key_part;
4244
4265
optimizer::QuickRange *range= NULL;
4267
bool create_err= false;
4246
4268
optimizer::CostVector cost;
4248
4270
old_root= session->mem_root;
4249
4271
/* The following call may change session->mem_root */
4250
optimizer::QuickRangeSelect *quick= new optimizer::QuickRangeSelect(session, table, ref->key, 0, 0);
4272
quick= new optimizer::QuickRangeSelect(session, table, ref->key, 0, 0, &create_err);
4251
4273
/* save mem_root set by QuickRangeSelect constructor */
4252
4274
alloc= session->mem_root;
4591
4613
(! join->select_distinct)) ||
4592
4614
(join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */
4594
if (table->getShare()->sizeKeys() == 0) /* There are no indexes to use. */
4616
if (table->getShare()->keys == 0) /* There are no indexes to use. */
4597
4619
/* Analyze the query in more detail. */
4651
4673
first one. Here we set the variables: group_prefix_len and index_info.
4653
4675
KeyInfo *cur_index_info= table->key_info;
4654
KeyInfo *cur_index_info_end= cur_index_info + table->getShare()->sizeKeys();
4676
KeyInfo *cur_index_info_end= cur_index_info + table->getShare()->keys;
4655
4677
KeyPartInfo *cur_part= NULL;
4656
4678
KeyPartInfo *end_part= NULL; /* Last part for loops. */
4657
4679
/* Last index part. */
4676
4698
uint32_t cur_key_infix_len= 0;
4677
4699
unsigned char cur_key_infix[MAX_KEY_LENGTH];
4678
4700
uint32_t cur_used_key_parts= 0;
4679
uint32_t pk= param->table->getShare()->getPrimaryKey();
4701
uint32_t pk= param->table->getShare()->primary_key;
4681
4703
for (uint32_t cur_index= 0;
4682
4704
cur_index_info != cur_index_info_end;
4699
4721
(table->cursor->getEngine()->check_flag(HTON_BIT_PRIMARY_KEY_IN_READ_INDEX)))
4701
4723
/* For each table field */
4702
for (uint32_t i= 0; i < table->getShare()->sizeFields(); i++)
4724
for (uint32_t i= 0; i < table->getShare()->fields; i++)
4704
Field *cur_field= table->getField(i);
4726
Field *cur_field= table->field[i];
4706
4728
If the field is used in the current query ensure that it's
4707
4729
part of 'cur_index'
4913
4935
optimizer::CostVector dummy_cost;
4914
4936
uint32_t mrr_flags= HA_MRR_USE_DEFAULT_IMPL;
4915
4937
uint32_t mrr_bufsize= 0;
4916
cur_quick_prefix_records= check_quick_select(session,
4938
cur_quick_prefix_records= check_quick_select(param,
4919
4940
false /*don't care*/,
4920
4941
cur_index_tree,
5570
uint32_t optimizer::RorScanInfo::findFirstNotSet() const
5572
boost::dynamic_bitset<> map= bitsToBitset();
5573
for (boost::dynamic_bitset<>::size_type i= 0; i < map.size(); i++)
5584
size_t optimizer::RorScanInfo::getBitCount() const
5586
boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5587
return tmp_bitset.count();
5591
void optimizer::RorScanInfo::subtractBitset(const boost::dynamic_bitset<>& in_bitset)
5593
boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5594
tmp_bitset-= in_bitset;
5595
covered_fields= tmp_bitset.to_ulong();
5599
boost::dynamic_bitset<> optimizer::RorScanInfo::bitsToBitset() const
5602
uint64_t conv= covered_fields;
5605
res.push_back((conv & 1) + '0');
5610
std::reverse(res.begin(), res.end());
5612
string final(covered_fields_size - res.length(), '0');
5614
return (boost::dynamic_bitset<>(final));
5618
5591
} /* namespace drizzled */