100
103
subject and may omit some details.
106
#include <drizzled/server_includes.h>
107
#include <drizzled/sql_select.h>
108
#include <drizzled/error.h>
109
#include <drizzled/cost_vect.h>
110
#include <drizzled/item/cmpfunc.h>
111
#include <drizzled/field/num.h>
112
#include <drizzled/check_stack_overrun.h>
108
114
#include <string>
112
#include <boost/dynamic_bitset.hpp>
114
#include "drizzled/check_stack_overrun.h"
115
#include "drizzled/error.h"
116
#include "drizzled/field/num.h"
117
#include "drizzled/internal/iocache.h"
118
#include "drizzled/internal/my_sys.h"
119
#include "drizzled/item/cmpfunc.h"
120
#include "drizzled/optimizer/cost_vector.h"
121
#include "drizzled/optimizer/quick_group_min_max_select.h"
122
#include "drizzled/optimizer/quick_index_merge_select.h"
123
#include "drizzled/optimizer/quick_range.h"
124
#include "drizzled/optimizer/quick_range_select.h"
125
#include "drizzled/optimizer/quick_ror_intersect_select.h"
126
#include "drizzled/optimizer/quick_ror_union_select.h"
127
#include "drizzled/optimizer/range.h"
128
#include "drizzled/optimizer/range_param.h"
129
#include "drizzled/optimizer/sel_arg.h"
130
#include "drizzled/optimizer/sel_imerge.h"
131
#include "drizzled/optimizer/sel_tree.h"
132
#include "drizzled/optimizer/sum.h"
133
#include "drizzled/optimizer/table_read_plan.h"
134
#include "drizzled/plugin/storage_engine.h"
135
#include "drizzled/records.h"
136
#include "drizzled/sql_base.h"
137
#include "drizzled/sql_select.h"
138
#include "drizzled/table_reference.h"
140
#include "drizzled/temporal.h" /* Needed in get_mm_leaf() for timestamp -> datetime comparisons */
142
117
using namespace std;
146
#define HA_END_SPACE_KEY 0
118
#if defined(CMATH_NAMESPACE)
119
using namespace CMATH_NAMESPACE;
149
123
Convert double value to #rows. Currently this does floor(), and we
150
124
might consider using round() instead.
152
static inline ha_rows double2rows(double x)
154
return static_cast<ha_rows>(x);
126
#define double2rows(x) ((ha_rows)(x))
128
static int sel_cmp(Field *f,unsigned char *a,unsigned char *b,uint8_t a_flag,uint8_t b_flag);
157
130
static unsigned char is_null_string[2]= {1,0};
161
Get cost of reading nrows table records in a "disk sweep"
163
A disk sweep read is a sequence of Cursor->rnd_pos(rowid) calls that made
164
for an ordered sequence of rowids.
166
We assume hard disk IO. The read is performed as follows:
168
1. The disk head is moved to the needed cylinder
169
2. The controller waits for the plate to rotate
170
3. The data is transferred
172
Time to do #3 is insignificant compared to #2+#1.
174
Time to move the disk head is proportional to head travel distance.
176
Time to wait for the plate to rotate depends on whether the disk head
179
If disk head wasn't moved, the wait time is proportional to distance
180
between the previous block and the block we're reading.
182
If the head was moved, we don't know how much we'll need to wait for the
183
plate to rotate. We assume the wait time to be a variate with a mean of
184
0.5 of full rotation time.
186
Our cost units are "random disk seeks". The cost of random disk seek is
187
actually not a constant, it depends one range of cylinders we're going
188
to access. We make it constant by introducing a fuzzy concept of "typical
189
datafile length" (it's fuzzy as it's hard to tell whether it should
190
include index cursor, temp.tables etc). Then random seek cost is:
192
1 = half_rotation_cost + move_cost * 1/3 * typical_data_file_length
194
We define half_rotation_cost as DISK_SEEK_BASE_COST=0.9.
196
@param table Table to be accessed
197
@param nrows Number of rows to retrieve
198
@param interrupted true <=> Assume that the disk sweep will be
199
interrupted by other disk IO. false - otherwise.
200
@param cost OUT The cost.
203
static void get_sweep_read_cost(Table *table,
206
optimizer::CostVector *cost)
209
if (table->cursor->primary_key_is_clustered())
211
cost->setIOCount(table->cursor->read_time(table->getShare()->getPrimaryKey(),
212
static_cast<uint32_t>(nrows),
218
ceil(uint64_t2double(table->cursor->stats.data_file_length) / IO_SIZE);
220
n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(nrows)));
221
if (busy_blocks < 1.0)
224
cost->setIOCount(busy_blocks);
228
/* Assume reading is done in one 'sweep' */
229
cost->setAvgIOCost((DISK_SEEK_BASE_COST +
230
DISK_SEEK_PROP_COST*n_blocks/busy_blocks));
235
static optimizer::SEL_TREE * get_mm_parts(optimizer::RangeParameter *param,
238
Item_func::Functype type,
240
Item_result cmp_type);
242
static optimizer::SEL_ARG *get_mm_leaf(optimizer::RangeParameter *param,
246
Item_func::Functype type,
249
static optimizer::SEL_TREE *get_mm_tree(optimizer::RangeParameter *param, COND *cond);
251
static bool is_key_scan_ror(optimizer::Parameter *param, uint32_t keynr, uint8_t nparts);
253
static ha_rows check_quick_select(Session *session,
254
optimizer::Parameter *param,
257
optimizer::SEL_ARG *tree,
258
bool update_tbl_stats,
261
optimizer::CostVector *cost);
263
static optimizer::RangeReadPlan *get_key_scans_params(Session *session,
264
optimizer::Parameter *param,
265
optimizer::SEL_TREE *tree,
266
bool index_read_must_be_used,
267
bool update_tbl_stats,
271
optimizer::RorIntersectReadPlan *get_best_ror_intersect(const optimizer::Parameter *param,
272
optimizer::SEL_TREE *tree,
274
bool *are_all_covering);
277
optimizer::RorIntersectReadPlan *get_best_covering_ror_intersect(optimizer::Parameter *param,
278
optimizer::SEL_TREE *tree,
282
optimizer::TableReadPlan *get_best_disjunct_quick(Session *session,
283
optimizer::Parameter *param,
284
optimizer::SEL_IMERGE *imerge,
288
optimizer::GroupMinMaxReadPlan *get_best_group_min_max(optimizer::Parameter *param, optimizer::SEL_TREE *tree);
290
static optimizer::SEL_TREE *tree_and(optimizer::RangeParameter *param,
291
optimizer::SEL_TREE *tree1,
292
optimizer::SEL_TREE *tree2);
294
static optimizer::SEL_ARG *sel_add(optimizer::SEL_ARG *key1, optimizer::SEL_ARG *key2);
296
static optimizer::SEL_ARG *key_and(optimizer::RangeParameter *param,
297
optimizer::SEL_ARG *key1,
298
optimizer::SEL_ARG *key2,
299
uint32_t clone_flag);
301
static bool get_range(optimizer::SEL_ARG **e1, optimizer::SEL_ARG **e2, optimizer::SEL_ARG *root1);
303
optimizer::SEL_ARG optimizer::null_element(optimizer::SEL_ARG::IMPOSSIBLE);
305
static bool null_part_in_key(KEY_PART *key_part,
306
const unsigned char *key,
132
class RANGE_OPT_PARAM;
134
A construction block of the SEL_ARG-graph.
136
The following description only covers graphs of SEL_ARG objects with
137
sel_arg->type==KEY_RANGE:
139
One SEL_ARG object represents an "elementary interval" in form
141
min_value <=? table.keypartX <=? max_value
143
The interval is a non-empty interval of any kind: with[out] minimum/maximum
144
bound, [half]open/closed, single-point interval, etc.
146
1. SEL_ARG GRAPH STRUCTURE
148
SEL_ARG objects are linked together in a graph. The meaning of the graph
149
is better demostrated by an example:
154
| part=1 $ part=2 $ part=3
156
| +-------+ $ +-------+ $ +--------+
157
| | kp1<1 |--$-->| kp2=5 |--$-->| kp3=10 |
158
| +-------+ $ +-------+ $ +--------+
164
\->| kp1=2 |--$--------------$-+
165
+-------+ $ $ | +--------+
167
+-------+ $ $ | +--------+
168
| kp1=3 |--$--------------$-+ |
169
+-------+ $ $ +--------+
173
The entire graph is partitioned into "interval lists".
175
An interval list is a sequence of ordered disjoint intervals over the same
176
key part. SEL_ARG are linked via "next" and "prev" pointers. Additionally,
177
all intervals in the list form an RB-tree, linked via left/right/parent
178
pointers. The RB-tree root SEL_ARG object will be further called "root of the
181
In the example pic, there are 4 interval lists:
182
"kp<1 OR kp1=2 OR kp1=3", "kp2=5", "kp3=10 OR kp3=12", "kp3=11 OR kp3=13".
183
The vertical lines represent SEL_ARG::next/prev pointers.
185
In an interval list, each member X may have SEL_ARG::next_key_part pointer
186
pointing to the root of another interval list Y. The pointed interval list
187
must cover a key part with greater number (i.e. Y->part > X->part).
189
In the example pic, the next_key_part pointers are represented by
192
2. SEL_ARG GRAPH SEMANTICS
194
It represents a condition in a special form (we don't have a name for it ATM)
195
The SEL_ARG::next/prev is "OR", and next_key_part is "AND".
197
For example, the picture represents the condition in form:
198
(kp1 < 1 AND kp2=5 AND (kp3=10 OR kp3=12)) OR
199
(kp1=2 AND (kp3=11 OR kp3=14)) OR
200
(kp1=3 AND (kp3=11 OR kp3=14))
205
Use get_mm_tree() to construct SEL_ARG graph from WHERE condition.
206
Then walk the SEL_ARG graph and get a list of dijsoint ordered key
207
intervals (i.e. intervals in form
209
(constA1, .., const1_K) < (keypart1,.., keypartK) < (constB1, .., constB_K)
211
Those intervals can be used to access the index. The uses are in:
212
- check_quick_select() - Walk the SEL_ARG graph and find an estimate of
213
how many table records are contained within all
215
- get_quick_select() - Walk the SEL_ARG, materialize the key intervals,
216
and create QUICK_RANGE_SELECT object that will
217
read records within these intervals.
219
4. SPACE COMPLEXITY NOTES
221
SEL_ARG graph is a representation of an ordered disjoint sequence of
222
intervals over the ordered set of index tuple values.
224
For multi-part keys, one can construct a WHERE expression such that its
225
list of intervals will be of combinatorial size. Here is an example:
227
(keypart1 IN (1,2, ..., n1)) AND
228
(keypart2 IN (1,2, ..., n2)) AND
229
(keypart3 IN (1,2, ..., n3))
231
For this WHERE clause the list of intervals will have n1*n2*n3 intervals
234
(keypart1, keypart2, keypart3) = (k1, k2, k3), where 1 <= k{i} <= n{i}
236
SEL_ARG graph structure aims to reduce the amount of required space by
237
"sharing" the elementary intervals when possible (the pic at the
238
beginning of this comment has examples of such sharing). The sharing may
239
prevent combinatorial blowup:
241
There are WHERE clauses that have combinatorial-size interval lists but
242
will be represented by a compact SEL_ARG graph.
244
(keypartN IN (1,2, ..., n1)) AND
246
(keypart2 IN (1,2, ..., n2)) AND
247
(keypart1 IN (1,2, ..., n3))
249
but not in all cases:
251
- There are WHERE clauses that do have a compact SEL_ARG-graph
252
representation but get_mm_tree() and its callees will construct a
253
graph of combinatorial size.
255
(keypart1 IN (1,2, ..., n1)) AND
256
(keypart2 IN (1,2, ..., n2)) AND
258
(keypartN IN (1,2, ..., n3))
260
- There are WHERE clauses for which the minimal possible SEL_ARG graph
261
representation will have combinatorial size.
263
By induction: Let's take any interval on some keypart in the middle:
267
Then let's AND it with this interval 'structure' from preceding and
270
(kp14=c1 AND kp16=c3) OR keypart14=c2) (*)
272
We will obtain this SEL_ARG graph:
276
+---------+ $ +---------+ $ +---------+
277
| kp14=c1 |--$-->| kp15=c0 |--$-->| kp16=c3 |
278
+---------+ $ +---------+ $ +---------+
280
+---------+ $ +---------+ $
281
| kp14=c2 |--$-->| kp15=c0 | $
282
+---------+ $ +---------+ $
285
Note that we had to duplicate "kp15=c0" and there was no way to avoid
287
The induction step: AND the obtained expression with another "wrapping"
289
When the process ends because of the limit on max. number of keyparts
292
WHERE clause length is O(3*#max_keyparts)
293
SEL_ARG graph size is O(2^(#max_keyparts/2))
295
(it is also possible to construct a case where instead of 2 in 2^n we
296
have a bigger constant, e.g. 4, and get a graph with 4^(31/2)= 2^31
299
We avoid consuming too much memory by setting a limit on the number of
300
SEL_ARG object we can construct during one range analysis invocation.
303
class SEL_ARG :public Sql_alloc
306
uint8_t min_flag,max_flag,maybe_flag;
307
uint8_t part; // Which key part
310
Number of children of this element in the RB-tree, plus 1 for this
315
Valid only for elements which are RB-tree roots: Number of times this
316
RB-tree is referred to (it is referred by SEL_ARG::next_key_part or by
317
SEL_TREE::keys[i] or by a temporary SEL_ARG* variable)
322
unsigned char *min_value,*max_value; // Pointer to range
325
eq_tree() requires that left == right == 0 if the type is MAYBE_KEY.
327
SEL_ARG *left,*right; /* R-B tree children */
328
SEL_ARG *next,*prev; /* Links for bi-directional interval list */
329
SEL_ARG *parent; /* R-B tree parent */
330
SEL_ARG *next_key_part;
331
enum leaf_color { BLACK,RED } color;
332
enum Type { IMPOSSIBLE, MAYBE, MAYBE_KEY, KEY_RANGE } type;
334
enum { MAX_SEL_ARGS = 16000 };
338
SEL_ARG(Field *,const unsigned char *, const unsigned char *);
339
SEL_ARG(Field *field, uint8_t part, unsigned char *min_value, unsigned char *max_value,
340
uint8_t min_flag, uint8_t max_flag, uint8_t maybe_flag);
341
SEL_ARG(enum Type type_arg)
342
:min_flag(0),elements(1),use_count(1),left(0),right(0),next_key_part(0),
343
color(BLACK), type(type_arg)
345
inline bool is_same(SEL_ARG *arg)
347
if (type != arg->type || part != arg->part)
349
if (type != KEY_RANGE)
351
return cmp_min_to_min(arg) == 0 && cmp_max_to_max(arg) == 0;
353
inline void merge_flags(SEL_ARG *arg) { maybe_flag|=arg->maybe_flag; }
354
inline void maybe_smaller() { maybe_flag=1; }
355
/* Return true iff it's a single-point null interval */
356
inline bool is_null_interval() { return maybe_null && max_value[0] == 1; }
357
inline int cmp_min_to_min(SEL_ARG* arg)
359
return sel_cmp(field,min_value, arg->min_value, min_flag, arg->min_flag);
361
inline int cmp_min_to_max(SEL_ARG* arg)
363
return sel_cmp(field,min_value, arg->max_value, min_flag, arg->max_flag);
365
inline int cmp_max_to_max(SEL_ARG* arg)
367
return sel_cmp(field,max_value, arg->max_value, max_flag, arg->max_flag);
369
inline int cmp_max_to_min(SEL_ARG* arg)
371
return sel_cmp(field,max_value, arg->min_value, max_flag, arg->min_flag);
373
SEL_ARG *clone_and(SEL_ARG* arg)
374
{ // Get overlapping range
375
unsigned char *new_min,*new_max;
376
uint8_t flag_min,flag_max;
377
if (cmp_min_to_min(arg) >= 0)
379
new_min=min_value; flag_min=min_flag;
383
new_min=arg->min_value; flag_min=arg->min_flag; /* purecov: deadcode */
385
if (cmp_max_to_max(arg) <= 0)
387
new_max=max_value; flag_max=max_flag;
391
new_max=arg->max_value; flag_max=arg->max_flag;
393
return new SEL_ARG(field, part, new_min, new_max, flag_min, flag_max,
394
test(maybe_flag && arg->maybe_flag));
396
SEL_ARG *clone_first(SEL_ARG *arg)
397
{ // min <= X < arg->min
398
return new SEL_ARG(field,part, min_value, arg->min_value,
399
min_flag, arg->min_flag & NEAR_MIN ? 0 : NEAR_MAX,
400
maybe_flag | arg->maybe_flag);
402
SEL_ARG *clone_last(SEL_ARG *arg)
403
{ // min <= X <= key_max
404
return new SEL_ARG(field, part, min_value, arg->max_value,
405
min_flag, arg->max_flag, maybe_flag | arg->maybe_flag);
407
SEL_ARG *clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent, SEL_ARG **next);
409
bool copy_min(SEL_ARG* arg)
410
{ // Get overlapping range
411
if (cmp_min_to_min(arg) > 0)
413
min_value=arg->min_value; min_flag=arg->min_flag;
414
if ((max_flag & (NO_MAX_RANGE | NO_MIN_RANGE)) ==
415
(NO_MAX_RANGE | NO_MIN_RANGE))
416
return 1; // Full range
418
maybe_flag|=arg->maybe_flag;
421
bool copy_max(SEL_ARG* arg)
422
{ // Get overlapping range
423
if (cmp_max_to_max(arg) <= 0)
425
max_value=arg->max_value; max_flag=arg->max_flag;
426
if ((max_flag & (NO_MAX_RANGE | NO_MIN_RANGE)) ==
427
(NO_MAX_RANGE | NO_MIN_RANGE))
428
return 1; // Full range
430
maybe_flag|=arg->maybe_flag;
434
void copy_min_to_min(SEL_ARG *arg)
436
min_value=arg->min_value; min_flag=arg->min_flag;
438
void copy_min_to_max(SEL_ARG *arg)
440
max_value=arg->min_value;
441
max_flag=arg->min_flag & NEAR_MIN ? 0 : NEAR_MAX;
443
void copy_max_to_min(SEL_ARG *arg)
445
min_value=arg->max_value;
446
min_flag=arg->max_flag & NEAR_MAX ? 0 : NEAR_MIN;
448
/* returns a number of keypart values (0 or 1) appended to the key buffer */
449
int store_min(uint32_t length, unsigned char **min_key,uint32_t min_key_flag)
451
/* "(kp1 > c1) AND (kp2 OP c2) AND ..." -> (kp1 > c1) */
452
if ((!(min_flag & NO_MIN_RANGE) &&
453
!(min_key_flag & (NO_MIN_RANGE | NEAR_MIN))))
455
if (maybe_null && *min_value)
458
memset(*min_key+1, 0, length-1);
461
memcpy(*min_key,min_value,length);
467
/* returns a number of keypart values (0 or 1) appended to the key buffer */
468
int store_max(uint32_t length, unsigned char **max_key, uint32_t max_key_flag)
470
if (!(max_flag & NO_MAX_RANGE) &&
471
!(max_key_flag & (NO_MAX_RANGE | NEAR_MAX)))
473
if (maybe_null && *max_value)
476
memset(*max_key+1, 0, length-1);
479
memcpy(*max_key,max_value,length);
486
/* returns a number of keypart values appended to the key buffer */
487
int store_min_key(KEY_PART *key, unsigned char **range_key, uint32_t *range_key_flag)
489
SEL_ARG *key_tree= first();
490
uint32_t res= key_tree->store_min(key[key_tree->part].store_length,
491
range_key, *range_key_flag);
492
*range_key_flag|= key_tree->min_flag;
494
if (key_tree->next_key_part &&
495
key_tree->next_key_part->part == key_tree->part+1 &&
496
!(*range_key_flag & (NO_MIN_RANGE | NEAR_MIN)) &&
497
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE)
498
res+= key_tree->next_key_part->store_min_key(key, range_key,
503
/* returns a number of keypart values appended to the key buffer */
504
int store_max_key(KEY_PART *key, unsigned char **range_key, uint32_t *range_key_flag)
506
SEL_ARG *key_tree= last();
507
uint32_t res=key_tree->store_max(key[key_tree->part].store_length,
508
range_key, *range_key_flag);
509
(*range_key_flag)|= key_tree->max_flag;
510
if (key_tree->next_key_part &&
511
key_tree->next_key_part->part == key_tree->part+1 &&
512
!(*range_key_flag & (NO_MAX_RANGE | NEAR_MAX)) &&
513
key_tree->next_key_part->type == SEL_ARG::KEY_RANGE)
514
res+= key_tree->next_key_part->store_max_key(key, range_key,
519
SEL_ARG *insert(SEL_ARG *key);
520
SEL_ARG *tree_delete(SEL_ARG *key);
521
SEL_ARG *find_range(SEL_ARG *key);
522
SEL_ARG *rb_insert(SEL_ARG *leaf);
523
friend SEL_ARG *rb_delete_fixup(SEL_ARG *root,SEL_ARG *key, SEL_ARG *par);
525
friend int test_rb_tree(SEL_ARG *element,SEL_ARG *parent);
526
void test_use_count(SEL_ARG *root);
531
inline bool simple_key()
533
return !next_key_part && elements == 1;
535
void increment_use_count(long count)
539
next_key_part->use_count+=count;
540
count*= (next_key_part->use_count-count);
541
for (SEL_ARG *pos=next_key_part->first(); pos ; pos=pos->next)
542
if (pos->next_key_part)
543
pos->increment_use_count(count);
548
for (SEL_ARG *pos=first(); pos ; pos=pos->next)
549
if (pos->next_key_part)
551
pos->next_key_part->use_count--;
552
pos->next_key_part->free_tree();
556
inline SEL_ARG **parent_ptr()
558
return parent->left == this ? &parent->left : &parent->right;
563
Check if this SEL_ARG object represents a single-point interval
569
Check if this SEL_ARG object (not tree) represents a single-point
570
interval, i.e. if it represents a "keypart = const" or
574
true This SEL_ARG object represents a singlepoint interval
578
bool is_singlepoint()
581
Check for NEAR_MIN ("strictly less") and NO_MIN_RANGE (-inf < field)
582
flags, and the same for right edge.
584
if (min_flag || max_flag)
586
unsigned char *min_val= min_value;
587
unsigned char *max_val= max_value;
591
/* First byte is a NULL value indicator */
592
if (*min_val != *max_val)
596
return true; /* This "x IS NULL" */
600
return !field->key_cmp(min_val, max_val);
602
SEL_ARG *clone_tree(RANGE_OPT_PARAM *param);
608
class SEL_TREE :public Sql_alloc
612
Starting an effort to document this field:
613
(for some i, keys[i]->type == SEL_ARG::IMPOSSIBLE) =>
614
(type == SEL_TREE::IMPOSSIBLE)
616
enum Type { IMPOSSIBLE, ALWAYS, MAYBE, KEY, KEY_SMALLER } type;
617
SEL_TREE(enum Type type_arg) :type(type_arg) {}
618
SEL_TREE() :type(KEY)
620
keys_map.clear_all();
621
memset(keys, 0, sizeof(keys));
624
Note: there may exist SEL_TREE objects with sel_tree->type=KEY and
625
keys[i]=0 for all i. (SergeyP: it is not clear whether there is any
626
merit in range analyzer functions (e.g. get_mm_parts) returning a
627
pointer to such SEL_TREE instead of NULL)
629
SEL_ARG *keys[MAX_KEY];
630
key_map keys_map; /* bitmask of non-NULL elements in keys */
633
Possible ways to read rows using index_merge. The list is non-empty only
634
if type==KEY. Currently can be non empty only if keys_map.is_clear_all().
636
List<SEL_IMERGE> merges;
638
/* The members below are filled/used only after get_mm_tree is done */
639
key_map ror_scans_map; /* bitmask of ROR scan-able elements in keys */
640
uint32_t n_ror_scans; /* number of set bits in ror_scans_map */
642
struct st_ror_scan_info **ror_scans; /* list of ROR key scans */
643
struct st_ror_scan_info **ror_scans_end; /* last ROR scan */
644
/* Note that #records for each key scan is stored in table->quick_rows */
647
class RANGE_OPT_PARAM
650
Session *session; /* Current thread handle */
651
Table *table; /* Table being analyzed */
652
COND *cond; /* Used inside get_mm_tree(). */
653
table_map prev_tables;
654
table_map read_tables;
655
table_map current_table; /* Bit of the table being analyzed */
657
/* Array of parts of all keys for which range analysis is performed */
659
KEY_PART *key_parts_end;
660
MEM_ROOT *mem_root; /* Memory that will be freed when range analysis completes */
661
MEM_ROOT *old_root; /* Memory that will last until the query end */
663
Number of indexes used in range analysis (In SEL_TREE::keys only first
664
#keys elements are not empty)
669
If true, the index descriptions describe real indexes (and it is ok to
670
call field->optimize_range(real_keynr[...], ...).
671
Otherwise index description describes fake indexes.
673
bool using_real_indexes;
675
bool remove_jump_scans;
678
used_key_no -> table_key_no translation table. Only makes sense if
679
using_real_indexes==true
681
uint32_t real_keynr[MAX_KEY];
682
/* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */
683
uint32_t alloced_sel_args;
684
bool force_default_mrr;
687
class PARAM : public RANGE_OPT_PARAM
690
KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */
692
uint32_t max_key_part;
693
/* Number of ranges in the last checked tree->key */
694
uint32_t range_count;
695
unsigned char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
696
max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
697
bool quick; // Don't calulate possible keys
699
uint32_t fields_bitmap_size;
700
MY_BITMAP needed_fields; /* bitmask of fields needed by the query */
701
MY_BITMAP tmp_covered_fields;
703
key_map *needed_reg; /* ptr to SQL_SELECT::needed_reg */
705
uint32_t *imerge_cost_buff; /* buffer for index_merge cost estimates */
706
uint32_t imerge_cost_buff_size; /* size of the buffer */
708
/* true if last checked tree->key can be used for ROR-scan */
710
/* Number of ranges in the last checked tree->key */
714
class TABLE_READ_PLAN;
716
class TRP_ROR_INTERSECT;
718
class TRP_ROR_INDEX_MERGE;
719
class TRP_GROUP_MIN_MAX;
721
struct st_ror_scan_info;
723
static SEL_TREE * get_mm_parts(RANGE_OPT_PARAM *param,COND *cond_func,Field *field,
724
Item_func::Functype type,Item *value,
725
Item_result cmp_type);
726
static SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param,COND *cond_func,Field *field,
728
Item_func::Functype type,Item *value);
729
static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond);
731
static bool is_key_scan_ror(PARAM *param, uint32_t keynr, uint8_t nparts);
732
static ha_rows check_quick_select(PARAM *param, uint32_t idx, bool index_only,
733
SEL_ARG *tree, bool update_tbl_stats,
734
uint32_t *mrr_flags, uint32_t *bufsize,
736
//bool update_tbl_stats);
737
/*static ha_rows check_quick_keys(PARAM *param,uint32_t index,SEL_ARG *key_tree,
738
unsigned char *min_key, uint32_t min_key_flag, int,
739
unsigned char *max_key, uint32_t max_key_flag, int);
742
QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint32_t index,
743
SEL_ARG *key_tree, uint32_t mrr_flags,
744
uint32_t mrr_buf_size, MEM_ROOT *alloc);
745
static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
746
bool index_read_must_be_used,
747
bool update_tbl_stats,
750
TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
752
bool *are_all_covering);
754
TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
758
TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
761
TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree);
763
static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
765
static void print_ror_scans_arr(Table *table, const char *msg,
766
struct st_ror_scan_info **start,
767
struct st_ror_scan_info **end);
769
static SEL_TREE *tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
770
static SEL_TREE *tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
771
static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2);
772
static SEL_ARG *key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2);
773
static SEL_ARG *key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2,
774
uint32_t clone_flag);
775
static bool get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1);
776
bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key,
777
SEL_ARG *key_tree, unsigned char *min_key,uint32_t min_key_flag,
778
unsigned char *max_key,uint32_t max_key_flag);
779
static bool eq_tree(SEL_ARG* a,SEL_ARG *b);
781
static SEL_ARG null_element(SEL_ARG::IMPOSSIBLE);
782
static bool null_part_in_key(KEY_PART *key_part, const unsigned char *key,
307
783
uint32_t length);
309
bool sel_trees_can_be_ored(optimizer::SEL_TREE *tree1,
310
optimizer::SEL_TREE *tree2,
311
optimizer::RangeParameter *param);
784
bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, RANGE_OPT_PARAM* param);
788
SEL_IMERGE is a list of possible ways to do index merge, i.e. it is
789
a condition in the following form:
790
(t_1||t_2||...||t_N) && (next)
792
where all t_i are SEL_TREEs, next is another SEL_IMERGE and no pair
793
(t_i,t_j) contains SEL_ARGS for the same index.
795
SEL_TREE contained in SEL_IMERGE always has merges=NULL.
797
This class relies on memory manager to do the cleanup.
800
class SEL_IMERGE : public Sql_alloc
802
enum { PREALLOCED_TREES= 10};
804
SEL_TREE *trees_prealloced[PREALLOCED_TREES];
805
SEL_TREE **trees; /* trees used to do index_merge */
806
SEL_TREE **trees_next; /* last of these trees */
807
SEL_TREE **trees_end; /* end of allocated space */
809
SEL_ARG ***best_keys; /* best keys to read in SEL_TREEs */
812
trees(&trees_prealloced[0]),
814
trees_end(trees + PREALLOCED_TREES)
816
int or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree);
817
int or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree);
818
int or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge);
823
Add SEL_TREE to this index_merge without any checks,
826
This function implements the following:
827
(x_1||...||x_N) || t = (x_1||...||x_N||t), where x_i, t are SEL_TREEs
834
int SEL_IMERGE::or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree)
836
if (trees_next == trees_end)
838
const int realloc_ratio= 2; /* Double size for next round */
839
uint32_t old_elements= (trees_end - trees);
840
uint32_t old_size= sizeof(SEL_TREE**) * old_elements;
841
uint32_t new_size= old_size * realloc_ratio;
842
SEL_TREE **new_trees;
843
if (!(new_trees= (SEL_TREE**)alloc_root(param->mem_root, new_size)))
845
memcpy(new_trees, trees, old_size);
847
trees_next= trees + old_elements;
848
trees_end= trees + old_elements * realloc_ratio;
850
*(trees_next++)= tree;
856
Perform OR operation on this SEL_IMERGE and supplied SEL_TREE new_tree,
857
combining new_tree with one of the trees in this SEL_IMERGE if they both
858
have SEL_ARGs for the same key.
861
or_sel_tree_with_checks()
862
param PARAM from SQL_SELECT::test_quick_select
863
new_tree SEL_TREE with type KEY or KEY_SMALLER.
866
This does the following:
867
(t_1||...||t_k)||new_tree =
869
= (t_1||...||t_k||new_tree)
871
= (t_1||....||(t_j|| new_tree)||...||t_k),
873
where t_i, y are SEL_TREEs.
874
new_tree is combined with the first t_j it has a SEL_ARG on common
875
key with. As a consequence of this, choice of keys to do index_merge
876
read may depend on the order of conditions in WHERE part of the query.
880
1 One of the trees was combined with new_tree to SEL_TREE::ALWAYS,
881
and (*this) should be discarded.
882
-1 An error occurred.
885
int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree)
887
for (SEL_TREE** tree = trees;
891
if (sel_trees_can_be_ored(*tree, new_tree, param))
893
*tree = tree_or(param, *tree, new_tree);
896
if (((*tree)->type == SEL_TREE::MAYBE) ||
897
((*tree)->type == SEL_TREE::ALWAYS))
899
/* SEL_TREE::IMPOSSIBLE is impossible here */
904
/* New tree cannot be combined with any of existing trees. */
905
return or_sel_tree(param, new_tree);
910
Perform OR operation on this index_merge and supplied index_merge list.
914
1 - One of conditions in result is always true and this SEL_IMERGE
916
-1 - An error occurred
919
int SEL_IMERGE::or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge)
921
for (SEL_TREE** tree= imerge->trees;
922
tree != imerge->trees_next;
925
if (or_sel_tree_with_checks(param, *tree))
319
933
Perform AND operation on two index_merge lists and store result in *im1.
322
inline void imerge_list_and_list(List<optimizer::SEL_IMERGE> *im1, List<optimizer::SEL_IMERGE> *im2)
936
inline void imerge_list_and_list(List<SEL_IMERGE> *im1, List<SEL_IMERGE> *im2)
324
938
im1->concat(im2);
943
Perform OR operation on 2 index_merge lists, storing result in first list.
946
The following conversion is implemented:
947
(a_1 &&...&& a_N)||(b_1 &&...&& b_K) = AND_i,j(a_i || b_j) =>
950
i.e. all conjuncts except the first one are currently dropped.
951
This is done to avoid producing N*K ways to do index_merge.
953
If (a_1||b_1) produce a condition that is always true, NULL is returned
954
and index_merge is discarded (while it is actually possible to try
957
As a consequence of this, choice of keys to do index_merge read may depend
958
on the order of conditions in WHERE part of the query.
961
0 OK, result is stored in *im1
962
other Error, both passed lists are unusable
965
int imerge_list_or_list(RANGE_OPT_PARAM *param,
966
List<SEL_IMERGE> *im1,
967
List<SEL_IMERGE> *im2)
969
SEL_IMERGE *imerge= im1->head();
971
im1->push_back(imerge);
973
return imerge->or_sel_imerge_with_checks(param, im2->head());
978
Perform OR operation on index_merge list and key tree.
981
0 OK, result is stored in *im1.
985
int imerge_list_or_tree(RANGE_OPT_PARAM *param,
986
List<SEL_IMERGE> *im1,
990
List_iterator<SEL_IMERGE> it(*im1);
991
while ((imerge= it++))
993
if (imerge->or_sel_tree_with_checks(param, tree))
996
return im1->is_empty();
328
1000
/***************************************************************************
329
** Basic functions for SqlSelect and QuickRangeSelect
1001
** Basic functions for SQL_SELECT and QUICK_RANGE_SELECT
330
1002
***************************************************************************/
332
1004
/* make a select from mysql info
375
optimizer::SqlSelect::SqlSelect()
379
file(static_cast<internal::IO_CACHE *>(memory::sql_calloc(sizeof(internal::IO_CACHE)))),
1043
SQL_SELECT::SQL_SELECT() :quick(0),cond(0),free_cond(0)
1045
quick_keys.clear_all(); needed_reg.clear_all();
388
void optimizer::SqlSelect::cleanup()
1050
void SQL_SELECT::cleanup()
402
file->close_cached_file();
1060
close_cached_file(&file);
406
optimizer::SqlSelect::~SqlSelect()
1064
SQL_SELECT::~SQL_SELECT()
412
bool optimizer::SqlSelect::check_quick(Session *session,
413
bool force_quick_range,
1070
bool SQL_SELECT::check_quick(Session *session, bool force_quick_range,
418
return (test_quick_select(session,
427
bool optimizer::SqlSelect::skip_record()
429
return (cond ? cond->val_int() == 0 : 0);
433
optimizer::QuickSelectInterface::QuickSelectInterface()
435
max_used_key_length(0),
1075
return test_quick_select(session, tmp, 0, limit,
1076
force_quick_range, false) < 0;
1080
bool SQL_SELECT::skip_record()
1082
return cond ? cond->val_int() == 0 : 0;
1086
QUICK_SELECT_I::QUICK_SELECT_I()
1087
:max_used_key_length(0),
1091
QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(Session *session, Table *table, uint32_t key_nr,
1092
bool no_alloc, MEM_ROOT *parent_alloc,
1094
:free_file(0),cur_range(NULL),last_range(0),dont_free(0)
1096
my_bitmap_map *bitmap;
1098
in_ror_merged_scan= 0;
1102
key_part_info= head->key_info[index].key_part;
1103
my_init_dynamic_array(&ranges, sizeof(QUICK_RANGE*), 16, 16);
1105
/* 'session' is not accessible in QUICK_RANGE_SELECT::reset(). */
1106
mrr_buf_size= session->variables.read_rnd_buff_size;
1109
if (!no_alloc && !parent_alloc)
1111
// Allocates everything through the internal memroot
1112
init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
1113
session->mem_root= &alloc;
1116
memset(&alloc, 0, sizeof(alloc));
1118
record= head->record[0];
1119
save_read_set= head->read_set;
1120
save_write_set= head->write_set;
1122
/* Allocate a bitmap for used columns (Q: why not on MEM_ROOT?) */
1123
if (!(bitmap= (my_bitmap_map*) malloc(head->s->column_bitmap_size)))
1125
column_bitmap.bitmap= 0;
1129
bitmap_init(&column_bitmap, bitmap, head->s->fields, false);
1134
int QUICK_RANGE_SELECT::init()
1136
if (file->inited != handler::NONE)
1137
file->ha_index_or_rnd_end();
1138
return(file->ha_index_init(index, 1));
1142
void QUICK_RANGE_SELECT::range_end()
1144
if (file->inited != handler::NONE)
1145
file->ha_index_or_rnd_end();
1149
QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
1153
/* file is NULL for CPK scan on covering ROR-intersection */
1160
file->extra(HA_EXTRA_NO_KEYREAD);
1164
file->ha_external_lock(current_session, F_UNLCK);
1169
delete_dynamic(&ranges); /* ranges are allocated in alloc */
1170
free_root(&alloc,MYF(0));
1171
free((char*) column_bitmap.bitmap);
1173
head->column_bitmaps_set(save_read_set, save_write_set);
1180
QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(Session *session_param,
1182
:pk_quick_select(NULL), session(session_param)
1186
memset(&read_record, 0, sizeof(read_record));
1187
init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
1191
int QUICK_INDEX_MERGE_SELECT::init()
1196
int QUICK_INDEX_MERGE_SELECT::reset()
1198
return(read_keys_and_merge());
1202
QUICK_INDEX_MERGE_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick_sel_range)
1205
Save quick_select that does scan on clustered primary key as it will be
1206
processed separately.
1208
if (head->file->primary_key_is_clustered() &&
1209
quick_sel_range->index == head->s->primary_key)
1210
pk_quick_select= quick_sel_range;
1212
return quick_selects.push_back(quick_sel_range);
1216
QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT()
1218
List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects);
1219
QUICK_RANGE_SELECT* quick;
1221
while ((quick= quick_it++))
1223
quick_selects.delete_elements();
1224
delete pk_quick_select;
1225
free_root(&alloc,MYF(0));
1230
QUICK_ROR_INTERSECT_SELECT::QUICK_ROR_INTERSECT_SELECT(Session *session_param,
1232
bool retrieve_full_rows,
1233
MEM_ROOT *parent_alloc)
1234
: cpk_quick(NULL), session(session_param), need_to_fetch_row(retrieve_full_rows),
1239
record= head->record[0];
1241
init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
1243
memset(&alloc, 0, sizeof(MEM_ROOT));
1244
last_rowid= (unsigned char*) alloc_root(parent_alloc? parent_alloc : &alloc,
1245
head->file->ref_length);
1250
Do post-constructor initialization.
1252
QUICK_ROR_INTERSECT_SELECT::init()
1259
int QUICK_ROR_INTERSECT_SELECT::init()
1261
/* Check if last_rowid was successfully allocated in ctor */
1262
return(!last_rowid);
1267
Initialize this quick select to be a ROR-merged scan.
1270
QUICK_RANGE_SELECT::init_ror_merged_scan()
1271
reuse_handler If true, use head->file, otherwise create a separate
1275
This function creates and prepares for subsequent use a separate handler
1276
object if it can't reuse head->file. The reason for this is that during
1277
ROR-merge several key scans are performed simultaneously, and a single
1278
handler is only capable of preserving context of a single key scan.
1280
In ROR-merge the quick select doing merge does full records retrieval,
1281
merged quick selects read only keys.
1284
0 ROR child scan initialized, ok to use.
1288
int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
1290
handler *save_file= file, *org_file;
1293
in_ror_merged_scan= 1;
1296
if (init() || reset())
1300
head->column_bitmaps_set(&column_bitmap, &column_bitmap);
1304
/* Create a separate handler object for this quick select */
1307
/* already have own 'handler' object. */
1311
session= head->in_use;
1312
if (!(file= head->file->clone(session->mem_root)))
1315
Manually set the error flag. Note: there seems to be quite a few
1316
places where a failure could cause the server to "hang" the client by
1317
sending no response to a query. ATM those are not real errors because
1318
the storage engine calls in question happen to never fail with the
1319
existing storage engines.
1321
my_error(ER_OUT_OF_RESOURCES, MYF(0)); /* purecov: inspected */
1322
/* Caller will free the memory */
1323
goto failure; /* purecov: inspected */
1326
head->column_bitmaps_set(&column_bitmap, &column_bitmap);
1328
if (file->ha_external_lock(session, F_RDLCK))
1331
if (init() || reset())
1333
file->ha_external_lock(session, F_UNLCK);
1338
last_rowid= file->ref;
1342
We are only going to read key fields and call position() on 'file'
1343
The following sets head->tmp_set to only use this key and then updates
1344
head->read_set and head->write_set to use this bitmap.
1345
The now bitmap is stored in 'column_bitmap' which is used in ::get_next()
1347
org_file= head->file;
1349
/* We don't have to set 'head->keyread' here as the 'file' is unique */
1350
if (!head->no_keyread)
1353
head->mark_columns_used_by_index(index);
1355
head->prepare_for_position();
1356
head->file= org_file;
1357
bitmap_copy(&column_bitmap, head->read_set);
1358
head->column_bitmaps_set(&column_bitmap, &column_bitmap);
1363
head->column_bitmaps_set(save_read_set, save_write_set);
1370
void QUICK_RANGE_SELECT::save_last_pos()
1372
file->position(record);
1377
Initialize this quick select to be a part of a ROR-merged scan.
1379
QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan()
1380
reuse_handler If true, use head->file, otherwise create separate
1386
int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler)
1388
List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects);
1389
QUICK_RANGE_SELECT* quick;
1391
/* Initialize all merged "children" quick selects */
1392
assert(!need_to_fetch_row || reuse_handler);
1393
if (!need_to_fetch_row && reuse_handler)
1397
There is no use of this->file. Use it for the first of merged range
1400
if (quick->init_ror_merged_scan(true))
1402
quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS);
1404
while ((quick= quick_it++))
1406
if (quick->init_ror_merged_scan(false))
1408
quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS);
1409
/* All merged scans share the same record buffer in intersection. */
1410
quick->record= head->record[0];
1413
if (need_to_fetch_row && head->file->ha_rnd_init(1))
1422
Initialize quick select for row retrieval.
1430
int QUICK_ROR_INTERSECT_SELECT::reset()
1432
if (!scans_inited && init_ror_merged_scan(true))
1435
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
1436
QUICK_RANGE_SELECT *quick;
1437
while ((quick= it++))
1444
Add a merged quick select to this ROR-intersection quick select.
1447
QUICK_ROR_INTERSECT_SELECT::push_quick_back()
1448
quick Quick select to be added. The quick select must return
1449
rows in rowid order.
1451
This call can only be made before init() is called.
1459
QUICK_ROR_INTERSECT_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick)
1461
return quick_selects.push_back(quick);
1464
QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT()
1466
quick_selects.delete_elements();
1468
free_root(&alloc,MYF(0));
1469
if (need_to_fetch_row && head->file->inited != handler::NONE)
1470
head->file->ha_rnd_end();
1475
QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(Session *session_param,
1477
: session(session_param), scans_inited(false)
1481
rowid_length= table->file->ref_length;
1482
record= head->record[0];
1483
init_sql_alloc(&alloc, session->variables.range_alloc_block_size, 0);
1484
session_param->mem_root= &alloc;
1489
Do post-constructor initialization.
1491
QUICK_ROR_UNION_SELECT::init()
1498
int QUICK_ROR_UNION_SELECT::init()
1500
if (init_queue(&queue, quick_selects.elements, 0,
1501
false , QUICK_ROR_UNION_SELECT::queue_cmp,
1504
memset(&queue, 0, sizeof(QUEUE));
1508
if (!(cur_rowid= (unsigned char*) alloc_root(&alloc, 2*head->file->ref_length)))
1510
prev_rowid= cur_rowid + head->file->ref_length;
1516
Comparison function to be used QUICK_ROR_UNION_SELECT::queue priority
1520
QUICK_ROR_UNION_SELECT::queue_cmp()
1521
arg Pointer to QUICK_ROR_UNION_SELECT
1522
val1 First merged select
1523
val2 Second merged select
1526
int QUICK_ROR_UNION_SELECT::queue_cmp(void *arg, unsigned char *val1, unsigned char *val2)
1528
QUICK_ROR_UNION_SELECT *self= (QUICK_ROR_UNION_SELECT*)arg;
1529
return self->head->file->cmp_ref(((QUICK_SELECT_I*)val1)->last_rowid,
1530
((QUICK_SELECT_I*)val2)->last_rowid);
1535
Initialize quick select for row retrieval.
1544
int QUICK_ROR_UNION_SELECT::reset()
1546
QUICK_SELECT_I *quick;
1548
have_prev_rowid= false;
1551
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
1552
while ((quick= it++))
1554
if (quick->init_ror_merged_scan(false))
1559
queue_remove_all(&queue);
1561
Initialize scans for merged quick selects and put all merged quick
1562
selects into the queue.
1564
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
1565
while ((quick= it++))
1569
if ((error= quick->get_next()))
1571
if (error == HA_ERR_END_OF_FILE)
1575
quick->save_last_pos();
1576
queue_insert(&queue, (unsigned char*)quick);
1579
if (head->file->ha_rnd_init(1))
1589
QUICK_ROR_UNION_SELECT::push_quick_back(QUICK_SELECT_I *quick_sel_range)
1591
return quick_selects.push_back(quick_sel_range);
1594
QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT()
1596
delete_queue(&queue);
1597
quick_selects.delete_elements();
1598
if (head->file->inited != handler::NONE)
1599
head->file->ha_rnd_end();
1600
free_root(&alloc,MYF(0));
1605
QUICK_RANGE::QUICK_RANGE()
1606
:min_key(0),max_key(0),min_length(0),max_length(0),
1607
flag(NO_MIN_RANGE | NO_MAX_RANGE),
1608
min_keypart_map(0), max_keypart_map(0)
1611
SEL_ARG::SEL_ARG(SEL_ARG &arg) :Sql_alloc()
1614
min_flag=arg.min_flag;
1615
max_flag=arg.max_flag;
1616
maybe_flag=arg.maybe_flag;
1617
maybe_null=arg.maybe_null;
1620
min_value=arg.min_value;
1621
max_value=arg.max_value;
1622
next_key_part=arg.next_key_part;
1623
use_count=1; elements=1;
1627
inline void SEL_ARG::make_root()
1629
left=right= &null_element;
1632
use_count=0; elements=1;
1635
SEL_ARG::SEL_ARG(Field *f,const unsigned char *min_value_arg,
1636
const unsigned char *max_value_arg)
1637
:min_flag(0), max_flag(0), maybe_flag(0), maybe_null(f->real_maybe_null()),
1638
elements(1), use_count(1), field(f), min_value((unsigned char*) min_value_arg),
1639
max_value((unsigned char*) max_value_arg), next(0),prev(0),
1640
next_key_part(0),color(BLACK),type(KEY_RANGE)
1642
left=right= &null_element;
1645
SEL_ARG::SEL_ARG(Field *field_,uint8_t part_,
1646
unsigned char *min_value_, unsigned char *max_value_,
1647
uint8_t min_flag_,uint8_t max_flag_,uint8_t maybe_flag_)
1648
:min_flag(min_flag_),max_flag(max_flag_),maybe_flag(maybe_flag_),
1649
part(part_),maybe_null(field_->real_maybe_null()), elements(1),use_count(1),
1650
field(field_), min_value(min_value_), max_value(max_value_),
1651
next(0),prev(0),next_key_part(0),color(BLACK),type(KEY_RANGE)
1653
left=right= &null_element;
1656
SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent,
1661
/* Bail out if we have already generated too many SEL_ARGs */
1662
if (++param->alloced_sel_args > MAX_SEL_ARGS)
1665
if (type != KEY_RANGE)
1667
if (!(tmp= new (param->mem_root) SEL_ARG(type)))
1668
return 0; // out of memory
1669
tmp->prev= *next_arg; // Link into next/prev chain
1670
(*next_arg)->next=tmp;
1675
if (!(tmp= new (param->mem_root) SEL_ARG(field,part, min_value,max_value,
1676
min_flag, max_flag, maybe_flag)))
1678
tmp->parent=new_parent;
1679
tmp->next_key_part=next_key_part;
1680
if (left != &null_element)
1681
if (!(tmp->left=left->clone(param, tmp, next_arg)))
1684
tmp->prev= *next_arg; // Link into next/prev chain
1685
(*next_arg)->next=tmp;
1688
if (right != &null_element)
1689
if (!(tmp->right= right->clone(param, tmp, next_arg)))
1692
increment_use_count(1);
1694
tmp->elements= this->elements;
1698
SEL_ARG *SEL_ARG::first()
1700
SEL_ARG *next_arg=this;
1701
if (!next_arg->left)
1702
return 0; // MAYBE_KEY
1703
while (next_arg->left != &null_element)
1704
next_arg=next_arg->left;
1708
SEL_ARG *SEL_ARG::last()
1710
SEL_ARG *next_arg=this;
1711
if (!next_arg->right)
1712
return 0; // MAYBE_KEY
1713
while (next_arg->right != &null_element)
1714
next_arg=next_arg->right;
1720
Check if a compare is ok, when one takes ranges in account
1721
Returns -2 or 2 if the ranges where 'joined' like < 2 and >= 2
1724
static int sel_cmp(Field *field, unsigned char *a, unsigned char *b, uint8_t a_flag,
1728
/* First check if there was a compare to a min or max element */
1729
if (a_flag & (NO_MIN_RANGE | NO_MAX_RANGE))
1731
if ((a_flag & (NO_MIN_RANGE | NO_MAX_RANGE)) ==
1732
(b_flag & (NO_MIN_RANGE | NO_MAX_RANGE)))
1734
return (a_flag & NO_MIN_RANGE) ? -1 : 1;
1736
if (b_flag & (NO_MIN_RANGE | NO_MAX_RANGE))
1737
return (b_flag & NO_MIN_RANGE) ? 1 : -1;
1739
if (field->real_maybe_null()) // If null is part of key
1746
goto end; // NULL where equal
1747
a++; b++; // Skip NULL marker
1749
cmp=field->key_cmp(a , b);
1750
if (cmp) return cmp < 0 ? -1 : 1; // The values differed
1752
// Check if the compared equal arguments was defined with open/closed range
1754
if (a_flag & (NEAR_MIN | NEAR_MAX))
1756
if ((a_flag & (NEAR_MIN | NEAR_MAX)) == (b_flag & (NEAR_MIN | NEAR_MAX)))
1758
if (!(b_flag & (NEAR_MIN | NEAR_MAX)))
1759
return (a_flag & NEAR_MIN) ? 2 : -2;
1760
return (a_flag & NEAR_MIN) ? 1 : -1;
1762
if (b_flag & (NEAR_MIN | NEAR_MAX))
1763
return (b_flag & NEAR_MIN) ? -2 : 2;
1764
return 0; // The elements where equal
1768
SEL_ARG *SEL_ARG::clone_tree(RANGE_OPT_PARAM *param)
1770
SEL_ARG tmp_link,*next_arg,*root;
1771
next_arg= &tmp_link;
1772
if (!(root= clone(param, (SEL_ARG *) 0, &next_arg)))
1774
next_arg->next=0; // Fix last link
1775
tmp_link.next->prev=0; // Fix first link
1776
if (root) // If not OOM
5136
key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
5156
if (key1->part != key2->part)
5160
return 0; // Can't optimize this
5163
// If one of the key is MAYBE_KEY then the found region may be bigger
5164
if (key1->type == SEL_ARG::MAYBE_KEY)
5170
if (key2->type == SEL_ARG::MAYBE_KEY)
5177
if (key1->use_count > 0)
5179
if (key2->use_count == 0 || key1->elements > key2->elements)
5181
std::swap(key1,key2);
5183
if (key1->use_count > 0 || !(key1=key1->clone_tree(param)))
5187
// Add tree at key2 to tree at key1
5188
bool key2_shared=key2->use_count != 0;
5189
key1->maybe_flag|=key2->maybe_flag;
5191
for (key2=key2->first(); key2; )
5193
SEL_ARG *tmp=key1->find_range(key2); // Find key1.min <= key2.min
5198
tmp=key1->first(); // tmp.min > key2.min
5201
else if ((cmp=tmp->cmp_max_to_min(key2)) < 0)
5202
{ // Found tmp.max < key2.min
5203
SEL_ARG *next=tmp->next;
5204
if (cmp == -2 && eq_tree(tmp->next_key_part,key2->next_key_part))
5206
// Join near ranges like tmp.max < 0 and key2.min >= 0
5207
SEL_ARG *key2_next=key2->next;
5210
if (!(key2=new SEL_ARG(*key2)))
5211
return 0; // out of memory
5212
key2->increment_use_count(key1->use_count+1);
5213
key2->next=key2_next; // New copy of key2
5215
key2->copy_min(tmp);
5216
if (!(key1=key1->tree_delete(tmp)))
5217
{ // Only one key in tree
5224
if (!(tmp=next)) // tmp.min > key2.min
5225
break; // Copy rest of key2
5228
{ // tmp.min > key2.min
5230
if ((tmp_cmp=tmp->cmp_min_to_max(key2)) > 0) // if tmp.min > key2.max
5232
if (tmp_cmp == 2 && eq_tree(tmp->next_key_part,key2->next_key_part))
5233
{ // ranges are connected
5234
tmp->copy_min_to_min(key2);
5235
key1->merge_flags(key2);
5236
if (tmp->min_flag & NO_MIN_RANGE &&
5237
tmp->max_flag & NO_MAX_RANGE)
5239
if (key1->maybe_flag)
5240
return new SEL_ARG(SEL_ARG::MAYBE_KEY);
5243
key2->increment_use_count(-1); // Free not used tree
5249
SEL_ARG *next=key2->next; // Keys are not overlapping
5252
SEL_ARG *cpy= new SEL_ARG(*key2); // Must make copy
5255
key1=key1->insert(cpy);
5256
key2->increment_use_count(key1->use_count+1);
5259
key1=key1->insert(key2); // Will destroy key2_root
5266
// tmp.max >= key2.min && tmp.min <= key.cmax(overlapping ranges)
5267
if (eq_tree(tmp->next_key_part,key2->next_key_part))
5269
if (tmp->is_same(key2))
5271
tmp->merge_flags(key2); // Copy maybe flags
5272
key2->increment_use_count(-1); // Free not used tree
5277
while (last->next && last->next->cmp_min_to_max(key2) <= 0 &&
5278
eq_tree(last->next->next_key_part,key2->next_key_part))
5282
key1=key1->tree_delete(save);
5284
last->copy_min(tmp);
5285
if (last->copy_min(key2) || last->copy_max(key2))
5288
for (; key2 ; key2=key2->next)
5289
key2->increment_use_count(-1); // Free not used tree
5290
if (key1->maybe_flag)
5291
return new SEL_ARG(SEL_ARG::MAYBE_KEY);
5299
if (cmp >= 0 && tmp->cmp_min_to_min(key2) < 0)
5300
{ // tmp.min <= x < key2.min
5301
SEL_ARG *new_arg=tmp->clone_first(key2);
5304
if ((new_arg->next_key_part= key1->next_key_part))
5305
new_arg->increment_use_count(key1->use_count+1);
5306
tmp->copy_min_to_min(key2);
5307
key1=key1->insert(new_arg);
5310
// tmp.min >= key2.min && tmp.min <= key2.max
5311
SEL_ARG key(*key2); // Get copy we can modify
5314
if (tmp->cmp_min_to_min(&key) > 0)
5315
{ // key.min <= x < tmp.min
5316
SEL_ARG *new_arg=key.clone_first(tmp);
5319
if ((new_arg->next_key_part=key.next_key_part))
5320
new_arg->increment_use_count(key1->use_count+1);
5321
key1=key1->insert(new_arg);
5323
if ((cmp=tmp->cmp_max_to_max(&key)) <= 0)
5324
{ // tmp.min. <= x <= tmp.max
5325
tmp->maybe_flag|= key.maybe_flag;
5326
key.increment_use_count(key1->use_count+1);
5327
tmp->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part);
5328
if (!cmp) // Key2 is ready
5330
key.copy_max_to_min(tmp);
5331
if (!(tmp=tmp->next))
5333
SEL_ARG *tmp2= new SEL_ARG(key);
5336
key1=key1->insert(tmp2);
5340
if (tmp->cmp_min_to_max(&key) > 0)
5342
SEL_ARG *tmp2= new SEL_ARG(key);
5345
key1=key1->insert(tmp2);
5351
SEL_ARG *new_arg=tmp->clone_last(&key); // tmp.min <= x <= key.max
5354
tmp->copy_max_to_min(&key);
5355
tmp->increment_use_count(key1->use_count+1);
5356
/* Increment key count as it may be used for next loop */
5357
key.increment_use_count(1);
5358
new_arg->next_key_part= key_or(param, tmp->next_key_part, key.next_key_part);
5359
key1=key1->insert(new_arg);
5369
SEL_ARG *next=key2->next;
5372
SEL_ARG *tmp=new SEL_ARG(*key2); // Must make copy
5375
key2->increment_use_count(key1->use_count+1);
5376
key1=key1->insert(tmp);
5379
key1=key1->insert(key2); // Will destroy key2_root
5387
/* Compare if two trees are equal */
5389
static bool eq_tree(SEL_ARG* a,SEL_ARG *b)
5393
if (!a || !b || !a->is_same(b))
5395
if (a->left != &null_element && b->left != &null_element)
5397
if (!eq_tree(a->left,b->left))
5400
else if (a->left != &null_element || b->left != &null_element)
5402
if (a->right != &null_element && b->right != &null_element)
5404
if (!eq_tree(a->right,b->right))
5407
else if (a->right != &null_element || b->right != &null_element)
5409
if (a->next_key_part != b->next_key_part)
5411
if (!a->next_key_part != !b->next_key_part ||
5412
!eq_tree(a->next_key_part, b->next_key_part))
5420
SEL_ARG::insert(SEL_ARG *key)
5422
SEL_ARG *element, **par= NULL, *last_element= NULL;
5424
for (element= this; element != &null_element ; )
5426
last_element=element;
5427
if (key->cmp_min_to_min(element) > 0)
5429
par= &element->right; element= element->right;
5433
par = &element->left; element= element->left;
5437
key->parent=last_element;
5439
if (par == &last_element->left)
5441
key->next=last_element;
5442
if ((key->prev=last_element->prev))
5443
key->prev->next=key;
5444
last_element->prev=key;
5448
if ((key->next=last_element->next))
5449
key->next->prev=key;
5450
key->prev=last_element;
5451
last_element->next=key;
5453
key->left=key->right= &null_element;
5454
SEL_ARG *root=rb_insert(key); // rebalance tree
5455
root->use_count=this->use_count; // copy root info
5456
root->elements= this->elements+1;
5457
root->maybe_flag=this->maybe_flag;
5463
** Find best key with min <= given key
5464
** Because the call context this should never return 0 to get_range
5468
SEL_ARG::find_range(SEL_ARG *key)
5470
SEL_ARG *element=this,*found=0;
5474
if (element == &null_element)
5476
int cmp=element->cmp_min_to_min(key);
5482
element=element->right;
5485
element=element->left;
5491
Remove a element from the tree
5495
key Key that is to be deleted from tree (this)
5498
This also frees all sub trees that is used by the element
5501
root of new tree (with key deleted)
5505
SEL_ARG::tree_delete(SEL_ARG *key)
5507
enum leaf_color remove_color;
5508
SEL_ARG *root,*nod,**par,*fix_par;
5513
/* Unlink from list */
5515
key->prev->next=key->next;
5517
key->next->prev=key->prev;
5518
key->increment_use_count(-1);
5522
par=key->parent_ptr();
5524
if (key->left == &null_element)
5526
*par=nod=key->right;
5527
fix_par=key->parent;
5528
if (nod != &null_element)
5529
nod->parent=fix_par;
5530
remove_color= key->color;
5532
else if (key->right == &null_element)
5534
*par= nod=key->left;
5535
nod->parent=fix_par=key->parent;
5536
remove_color= key->color;
5540
SEL_ARG *tmp=key->next; // next bigger key (exist!)
5541
nod= *tmp->parent_ptr()= tmp->right; // unlink tmp from tree
5542
fix_par=tmp->parent;
5543
if (nod != &null_element)
5544
nod->parent=fix_par;
5545
remove_color= tmp->color;
5547
tmp->parent=key->parent; // Move node in place of key
5548
(tmp->left=key->left)->parent=tmp;
5549
if ((tmp->right=key->right) != &null_element)
5550
tmp->right->parent=tmp;
5551
tmp->color=key->color;
5553
if (fix_par == key) // key->right == key->next
5554
fix_par=tmp; // new parent of nod
5557
if (root == &null_element)
5558
return 0; // Maybe root later
5559
if (remove_color == BLACK)
5560
root=rb_delete_fixup(root,nod,fix_par);
5562
test_rb_tree(root,root->parent);
5563
#endif /* EXTRA_DEBUG */
5565
root->use_count=this->use_count; // Fix root counters
5566
root->elements=this->elements-1;
5567
root->maybe_flag=this->maybe_flag;
5572
/* Functions to fix up the tree after insert and delete */
5574
static void left_rotate(SEL_ARG **root,SEL_ARG *leaf)
5576
SEL_ARG *y=leaf->right;
5577
leaf->right=y->left;
5578
if (y->left != &null_element)
5579
y->left->parent=leaf;
5580
if (!(y->parent=leaf->parent))
5583
*leaf->parent_ptr()=y;
5588
static void right_rotate(SEL_ARG **root,SEL_ARG *leaf)
5590
SEL_ARG *y=leaf->left;
5591
leaf->left=y->right;
5592
if (y->right != &null_element)
5593
y->right->parent=leaf;
5594
if (!(y->parent=leaf->parent))
5597
*leaf->parent_ptr()=y;
5604
SEL_ARG::rb_insert(SEL_ARG *leaf)
5606
SEL_ARG *y,*par,*par2,*root;
5607
root= this; root->parent= 0;
5610
while (leaf != root && (par= leaf->parent)->color == RED)
5611
{ // This can't be root or 1 level under
5612
if (par == (par2= leaf->parent->parent)->left)
5615
if (y->color == RED)
5620
leaf->color=RED; /* And the loop continues */
5624
if (leaf == par->right)
5626
left_rotate(&root,leaf->parent);
5627
par=leaf; /* leaf is now parent to old leaf */
5631
right_rotate(&root,par2);
5638
if (y->color == RED)
5643
leaf->color=RED; /* And the loop continues */
5647
if (leaf == par->left)
5649
right_rotate(&root,par);
5654
left_rotate(&root,par2);
5661
test_rb_tree(root,root->parent);
5662
#endif /* EXTRA_DEBUG */
5668
SEL_ARG *rb_delete_fixup(SEL_ARG *root,SEL_ARG *key,SEL_ARG *par)
5674
while (x != root && x->color == SEL_ARG::BLACK)
5679
if (w->color == SEL_ARG::RED)
5681
w->color=SEL_ARG::BLACK;
5682
par->color=SEL_ARG::RED;
5683
left_rotate(&root,par);
5686
if (w->left->color == SEL_ARG::BLACK && w->right->color == SEL_ARG::BLACK)
5688
w->color=SEL_ARG::RED;
5693
if (w->right->color == SEL_ARG::BLACK)
5695
w->left->color=SEL_ARG::BLACK;
5696
w->color=SEL_ARG::RED;
5697
right_rotate(&root,w);
5700
w->color=par->color;
5701
par->color=SEL_ARG::BLACK;
5702
w->right->color=SEL_ARG::BLACK;
5703
left_rotate(&root,par);
5711
if (w->color == SEL_ARG::RED)
5713
w->color=SEL_ARG::BLACK;
5714
par->color=SEL_ARG::RED;
5715
right_rotate(&root,par);
5718
if (w->right->color == SEL_ARG::BLACK && w->left->color == SEL_ARG::BLACK)
5720
w->color=SEL_ARG::RED;
5725
if (w->left->color == SEL_ARG::BLACK)
5727
w->right->color=SEL_ARG::BLACK;
5728
w->color=SEL_ARG::RED;
5729
left_rotate(&root,w);
5732
w->color=par->color;
5733
par->color=SEL_ARG::BLACK;
5734
w->left->color=SEL_ARG::BLACK;
5735
right_rotate(&root,par);
5742
x->color=SEL_ARG::BLACK;
5747
/* Test that the properties for a red-black tree hold */
5750
int test_rb_tree(SEL_ARG *element,SEL_ARG *parent)
5752
int count_l,count_r;
5754
if (element == &null_element)
5755
return 0; // Found end of tree
5756
if (element->parent != parent)
5758
errmsg_printf(ERRMSG_LVL_ERROR, "Wrong tree: Parent doesn't point at parent");
5761
if (element->color == SEL_ARG::RED &&
5762
(element->left->color == SEL_ARG::RED ||
5763
element->right->color == SEL_ARG::RED))
5765
errmsg_printf(ERRMSG_LVL_ERROR, "Wrong tree: Found two red in a row");
5768
if (element->left == element->right && element->left != &null_element)
5770
errmsg_printf(ERRMSG_LVL_ERROR, "Wrong tree: Found right == left");
5773
count_l=test_rb_tree(element->left,element);
5774
count_r=test_rb_tree(element->right,element);
5775
if (count_l >= 0 && count_r >= 0)
5777
if (count_l == count_r)
5778
return count_l+(element->color == SEL_ARG::BLACK);
5779
errmsg_printf(ERRMSG_LVL_ERROR, "Wrong tree: Incorrect black-count: %d - %d",
5782
return -1; // Error, no more warnings
5787
Count how many times SEL_ARG graph "root" refers to its part "key"
5790
count_key_part_usage()
5791
root An RB-Root node in a SEL_ARG graph.
5792
key Another RB-Root node in that SEL_ARG graph.
5795
The passed "root" node may refer to "key" node via root->next_key_part,
5798
This function counts how many times the node "key" is referred (via
5799
SEL_ARG::next_key_part) by
5800
- intervals of RB-tree pointed by "root",
5801
- intervals of RB-trees that are pointed by SEL_ARG::next_key_part from
5802
intervals of RB-tree pointed by "root",
5805
Here is an example (horizontal links represent next_key_part pointers,
5806
vertical links - next/prev prev pointers):
5809
|root|-----------------+
5813
+----+ +---+ $ | +---+ Here the return value
5814
| |- ... -| |---$-+--+->|key| will be 4.
5815
+----+ +---+ $ | | +---+
5820
| |---| |---------+ |
5827
Number of links to "key" from nodes reachable from "root".
5830
static ulong count_key_part_usage(SEL_ARG *root, SEL_ARG *key)
5833
for (root=root->first(); root ; root=root->next)
5835
if (root->next_key_part)
5837
if (root->next_key_part == key)
5839
if (root->next_key_part->part < key->part)
5840
count+=count_key_part_usage(root->next_key_part,key);
5848
Check if SEL_ARG::use_count value is correct
5851
SEL_ARG::test_use_count()
5852
root The root node of the SEL_ARG graph (an RB-tree root node that
5853
has the least value of sel_arg->part in the entire graph, and
5854
thus is the "origin" of the graph)
5857
Check if SEL_ARG::use_count value is correct. See the definition of
5858
use_count for what is "correct".
5861
void SEL_ARG::test_use_count(SEL_ARG *root)
5864
if (this == root && use_count != 1)
5866
errmsg_printf(ERRMSG_LVL_INFO, "Use_count: Wrong count %lu for root",use_count);
5869
if (this->type != SEL_ARG::KEY_RANGE)
5871
for (SEL_ARG *pos=first(); pos ; pos=pos->next)
5874
if (pos->next_key_part)
5876
ulong count=count_key_part_usage(root,pos->next_key_part);
5877
if (count > pos->next_key_part->use_count)
5879
errmsg_printf(ERRMSG_LVL_INFO, "Use_count: Wrong count for key at 0x%lx, %lu "
5880
"should be %lu", (long unsigned int)pos,
5881
pos->next_key_part->use_count, count);
5884
pos->next_key_part->test_use_count(root);
5887
if (e_count != elements)
5888
errmsg_printf(ERRMSG_LVL_WARN, "Wrong use count: %u (should be %u) for tree at 0x%lx",
5889
e_count, elements, (long unsigned int) this);
3524
5894
/****************************************************************************
3525
5895
MRR Range Sequence Interface implementation that walks a SEL_ARG* tree.
3526
5896
****************************************************************************/
4340
Range sequence interface implementation for array<QuickRange>: initialize
6722
Perform key scans for all used indexes (except CPK), get rowids and merge
6723
them into an ordered non-recurrent sequence of rowids.
6725
The merge/duplicate removal is performed using Unique class. We put all
6726
rowids into Unique, get the sorted sequence and destroy the Unique.
6728
If table has a clustered primary key that covers all rows (true for bdb
6729
and innodb currently) and one of the index_merge scans is a scan on PK,
6730
then rows that will be retrieved by PK scan are not put into Unique and
6731
primary key scan is not performed here, it is performed later separately.
6738
int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
6740
List_iterator_fast<QUICK_RANGE_SELECT> cur_quick_it(quick_selects);
6741
QUICK_RANGE_SELECT* cur_quick;
6744
handler *file= head->file;
6746
file->extra(HA_EXTRA_KEYREAD);
6747
head->prepare_for_position();
6749
cur_quick_it.rewind();
6750
cur_quick= cur_quick_it++;
6751
assert(cur_quick != 0);
6754
We reuse the same instance of handler so we need to call both init and
6757
if (cur_quick->init() || cur_quick->reset())
6760
unique= new Unique(refpos_order_cmp, (void *)file,
6762
session->variables.sortbuff_size);
6767
while ((result= cur_quick->get_next()) == HA_ERR_END_OF_FILE)
6769
cur_quick->range_end();
6770
cur_quick= cur_quick_it++;
6774
if (cur_quick->file->inited != handler::NONE)
6775
cur_quick->file->ha_index_end();
6776
if (cur_quick->init() || cur_quick->reset())
6782
if (result != HA_ERR_END_OF_FILE)
6784
cur_quick->range_end();
6790
if (session->killed)
6793
/* skip row if it will be retrieved by clustered PK scan */
6794
if (pk_quick_select && pk_quick_select->row_in_ranges())
6797
cur_quick->file->position(cur_quick->record);
6798
result= unique->unique_add((char*)cur_quick->file->ref);
6804
/* ok, all row ids are in Unique */
6805
result= unique->get(head);
6807
doing_pk_scan= false;
6808
/* index_merge currently doesn't support "using index" at all */
6809
file->extra(HA_EXTRA_NO_KEYREAD);
6810
/* start table scan */
6811
init_read_record(&read_record, session, head, (SQL_SELECT*) 0, 1, 1);
6817
Get next row for index_merge.
6819
The rows are read from
6820
1. rowids stored in Unique.
6821
2. QUICK_RANGE_SELECT with clustered primary key (if any).
6822
The sets of rows retrieved in 1) and 2) are guaranteed to be disjoint.
6825
int QUICK_INDEX_MERGE_SELECT::get_next()
6830
return(pk_quick_select->get_next());
6832
if ((result= read_record.read_record(&read_record)) == -1)
6834
result= HA_ERR_END_OF_FILE;
6835
end_read_record(&read_record);
6836
/* All rows from Unique have been retrieved, do a clustered PK scan */
6837
if (pk_quick_select)
6839
doing_pk_scan= true;
6840
if ((result= pk_quick_select->init()) ||
6841
(result= pk_quick_select->reset()))
6843
return(pk_quick_select->get_next());
6852
Retrieve next record.
6854
QUICK_ROR_INTERSECT_SELECT::get_next()
6857
Invariant on enter/exit: all intersected selects have retrieved all index
6858
records with rowid <= some_rowid_val and no intersected select has
6859
retrieved any index records with rowid > some_rowid_val.
6860
We start fresh and loop until we have retrieved the same rowid in each of
6861
the key scans or we got an error.
6863
If a Clustered PK scan is present, it is used only to check if row
6864
satisfies its condition (and never used for row retrieval).
6868
other - Error code if any error occurred.
6871
int QUICK_ROR_INTERSECT_SELECT::get_next()
6873
List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects);
6874
QUICK_RANGE_SELECT* quick;
6876
uint32_t last_rowid_count=0;
6880
/* Get a rowid for first quick and save it as a 'candidate' */
6882
error= quick->get_next();
6885
while (!error && !cpk_quick->row_in_ranges())
6886
error= quick->get_next();
6891
quick->file->position(quick->record);
6892
memcpy(last_rowid, quick->file->ref, head->file->ref_length);
6893
last_rowid_count= 1;
6895
while (last_rowid_count < quick_selects.elements)
6897
if (!(quick= quick_it++))
6905
if ((error= quick->get_next()))
6907
quick->file->position(quick->record);
6908
cmp= head->file->cmp_ref(quick->file->ref, last_rowid);
6911
/* Ok, current select 'caught up' and returned ref >= cur_ref */
6914
/* Found a row with ref > cur_ref. Make it a new 'candidate' */
6917
while (!cpk_quick->row_in_ranges())
6919
if ((error= quick->get_next()))
6923
memcpy(last_rowid, quick->file->ref, head->file->ref_length);
6924
last_rowid_count= 1;
6928
/* current 'candidate' row confirmed by this select */
6933
/* We get here if we got the same row ref in all scans. */
6934
if (need_to_fetch_row)
6935
error= head->file->rnd_pos(head->record[0], last_rowid);
6936
} while (error == HA_ERR_RECORD_DELETED);
6942
Retrieve next record.
6944
QUICK_ROR_UNION_SELECT::get_next()
6947
Enter/exit invariant:
6948
For each quick select in the queue a {key,rowid} tuple has been
6949
retrieved but the corresponding row hasn't been passed to output.
6953
other - Error code if any error occurred.
6956
int QUICK_ROR_UNION_SELECT::get_next()
6959
QUICK_SELECT_I *quick;
6966
if (!queue.elements)
6967
return(HA_ERR_END_OF_FILE);
6968
/* Ok, we have a queue with >= 1 scans */
6970
quick= (QUICK_SELECT_I*)queue_top(&queue);
6971
memcpy(cur_rowid, quick->last_rowid, rowid_length);
6973
/* put into queue rowid from the same stream as top element */
6974
if ((error= quick->get_next()))
6976
if (error != HA_ERR_END_OF_FILE)
6978
queue_remove(&queue, 0);
6982
quick->save_last_pos();
6983
queue_replaced(&queue);
6986
if (!have_prev_rowid)
6988
/* No rows have been returned yet */
6990
have_prev_rowid= true;
6993
dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid);
6997
cur_rowid= prev_rowid;
7000
error= head->file->rnd_pos(quick->record, prev_rowid);
7001
} while (error == HA_ERR_RECORD_DELETED);
7006
int QUICK_RANGE_SELECT::reset()
7009
unsigned char *mrange_buff;
7011
HANDLER_BUFFER empty_buf;
7013
cur_range= (QUICK_RANGE**) ranges.buffer;
7015
if (file->inited == handler::NONE && (error= file->ha_index_init(index,1)))
7018
/* Allocate buffer if we need one but haven't allocated it yet */
7019
if (mrr_buf_size && !mrr_buf_desc)
7021
buf_size= mrr_buf_size;
7022
while (buf_size && !my_multi_malloc(MYF(MY_WME),
7023
&mrr_buf_desc, sizeof(*mrr_buf_desc),
7024
&mrange_buff, buf_size,
7027
/* Try to shrink the buffers until both are 0. */
7031
return(HA_ERR_OUT_OF_MEM);
7033
/* Initialize the handler buffer. */
7034
mrr_buf_desc->buffer= mrange_buff;
7035
mrr_buf_desc->buffer_end= mrange_buff + buf_size;
7036
mrr_buf_desc->end_of_used_area= mrange_buff;
7040
empty_buf.buffer= empty_buf.buffer_end= empty_buf.end_of_used_area= NULL;
7043
mrr_flags |= HA_MRR_SORTED;
7044
RANGE_SEQ_IF seq_funcs= {quick_range_seq_init, quick_range_seq_next};
7045
error= file->multi_range_read_init(&seq_funcs, (void*)this, ranges.elements,
7046
mrr_flags, mrr_buf_desc? mrr_buf_desc:
7053
Range sequence interface implementation for array<QUICK_RANGE>: initialize
4343
7056
quick_range_seq_init()
4344
init_param Caller-opaque paramenter: QuickRangeSelect* pointer
7057
init_param Caller-opaque paramenter: QUICK_RANGE_SELECT* pointer
4345
7058
n_ranges Number of ranges in the sequence (ignored)
4346
7059
flags MRR flags (currently not used)
4373
7086
1 No more ranges in the sequence
4375
uint32_t optimizer::quick_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
7089
uint32_t quick_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range)
4377
QuickRangeSequenceContext *ctx= (QuickRangeSequenceContext*) rseq;
7091
QUICK_RANGE_SEQ_CTX *ctx= (QUICK_RANGE_SEQ_CTX*)rseq;
4379
7093
if (ctx->cur == ctx->last)
4380
7094
return 1; /* no more ranges */
4382
optimizer::QuickRange *cur= *(ctx->cur);
7096
QUICK_RANGE *cur= *(ctx->cur);
4383
7097
key_range *start_key= &range->start_key;
4384
key_range *end_key= &range->end_key;
7098
key_range *end_key= &range->end_key;
4386
start_key->key= cur->min_key;
7100
start_key->key= cur->min_key;
4387
7101
start_key->length= cur->min_length;
4388
7102
start_key->keypart_map= cur->min_keypart_map;
4389
start_key->flag= ((cur->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
4390
(cur->flag & EQ_RANGE) ?
4391
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
4392
end_key->key= cur->max_key;
4393
end_key->length= cur->max_length;
7103
start_key->flag= ((cur->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
7104
(cur->flag & EQ_RANGE) ?
7105
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
7106
end_key->key= cur->max_key;
7107
end_key->length= cur->max_length;
4394
7108
end_key->keypart_map= cur->max_keypart_map;
4396
7110
We use HA_READ_AFTER_KEY here because if we are reading on a key
4397
7111
prefix. We want to find all keys with this prefix.
4399
end_key->flag= (cur->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
7113
end_key->flag= (cur->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
4401
7115
range->range_flag= cur->flag;
4407
static inline uint32_t get_field_keypart(KeyInfo *index, Field *field);
4409
static inline optimizer::SEL_ARG * get_index_range_tree(uint32_t index,
4410
optimizer::SEL_TREE *range_tree,
4411
optimizer::Parameter *param,
4412
uint32_t *param_idx);
4414
static bool get_constant_key_infix(KeyInfo *index_info,
4415
optimizer::SEL_ARG *index_range_tree,
4416
KeyPartInfo *first_non_group_part,
4417
KeyPartInfo *min_max_arg_part,
4418
KeyPartInfo *last_part,
4420
unsigned char *key_infix,
4421
uint32_t *key_infix_len,
4422
KeyPartInfo **first_non_infix_part);
4424
static bool check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item);
7122
MRR range sequence interface: array<QUICK_RANGE> impl: utility func for NDB
7125
mrr_persistent_flag_storage()
7126
seq Range sequence being traversed
7130
MRR/NDB implementation needs to store some bits for each range. This
7131
function returns a reference to the "range_flag" associated with the
7134
This function should be removed when we get a proper MRR/NDB
7138
Reference to range_flag associated with range number #idx
7141
uint16_t &mrr_persistent_flag_storage(range_seq_t seq, uint32_t idx)
7143
QUICK_RANGE_SEQ_CTX *ctx= (QUICK_RANGE_SEQ_CTX*)seq;
7144
return ctx->first[idx]->flag;
7149
MRR range sequence interface: array<QUICK_RANGE> impl: utility func for NDB
7152
mrr_get_ptr_by_idx()
7153
seq Range sequence bening traversed
7154
idx Number of the range
7157
An extension of MRR range sequence interface needed by NDB: return the
7158
data associated with the given range.
7160
A proper MRR interface implementer is supposed to store and return
7161
range-associated data. NDB stores number of the range instead. So this
7162
is a helper function that translates range number to range associated
7165
This function does nothing, as currrently there is only one user of the
7166
MRR interface - the quick range select code, and this user doesn't need
7167
to use range-associated data.
7170
Reference to range-associated data
7173
char* &mrr_get_ptr_by_idx(range_seq_t, uint32_t)
7181
Get next possible record using quick-struct.
7184
QUICK_RANGE_SELECT::get_next()
7187
Record is read into table->record[0]
7191
HA_ERR_END_OF_FILE No (more) rows in range
7195
int QUICK_RANGE_SELECT::get_next()
7198
if (in_ror_merged_scan)
7201
We don't need to signal the bitmap change as the bitmap is always the
7202
same for this head->file
7204
head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
7207
int result= file->multi_range_read_next(&dummy);
7209
if (in_ror_merged_scan)
7211
/* Restore bitmaps set on entry */
7212
head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
7219
Get the next record with a different prefix.
7222
QUICK_RANGE_SELECT::get_next_prefix()
7223
prefix_length length of cur_prefix
7224
cur_prefix prefix of a key to be searched for
7227
Each subsequent call to the method retrieves the first record that has a
7228
prefix with length prefix_length different from cur_prefix, such that the
7229
record with the new prefix is within the ranges described by
7230
this->ranges. The record found is stored into the buffer pointed by
7232
The method is useful for GROUP-BY queries with range conditions to
7233
discover the prefix of the next group that satisfies the range conditions.
7236
This method is a modified copy of QUICK_RANGE_SELECT::get_next(), so both
7237
methods should be unified into a more general one to reduce code
7242
HA_ERR_END_OF_FILE if returned all keys
7243
other if some error occurred
7246
int QUICK_RANGE_SELECT::get_next_prefix(uint32_t prefix_length,
7247
key_part_map keypart_map,
7248
unsigned char *cur_prefix)
7253
key_range start_key, end_key;
7256
/* Read the next record in the same range with prefix after cur_prefix. */
7257
assert(cur_prefix != 0);
7258
result= file->index_read_map(record, cur_prefix, keypart_map,
7260
if (result || (file->compare_key(file->end_range) <= 0))
7264
uint32_t count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer);
7267
/* Ranges have already been used up before. None is left for read. */
7269
return HA_ERR_END_OF_FILE;
7271
last_range= *(cur_range++);
7273
start_key.key= (const unsigned char*) last_range->min_key;
7274
start_key.length= cmin(last_range->min_length, (uint16_t)prefix_length);
7275
start_key.keypart_map= last_range->min_keypart_map & keypart_map;
7276
start_key.flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY :
7277
(last_range->flag & EQ_RANGE) ?
7278
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT);
7279
end_key.key= (const unsigned char*) last_range->max_key;
7280
end_key.length= cmin(last_range->max_length, (uint16_t)prefix_length);
7281
end_key.keypart_map= last_range->max_keypart_map & keypart_map;
7283
We use READ_AFTER_KEY here because if we are reading on a key
7284
prefix we want to find all keys with this prefix
7286
end_key.flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY :
7289
result= file->read_range_first(last_range->min_keypart_map ? &start_key : 0,
7290
last_range->max_keypart_map ? &end_key : 0,
7291
test(last_range->flag & EQ_RANGE),
7293
if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE))
7294
last_range= 0; // Stop searching
7296
if (result != HA_ERR_END_OF_FILE)
7298
last_range= 0; // No matching rows; go to next range
7304
Check if current row will be retrieved by this QUICK_RANGE_SELECT
7307
It is assumed that currently a scan is being done on another index
7308
which reads all necessary parts of the index that is scanned by this
7310
The implementation does a binary search on sorted array of disjoint
7311
ranges, without taking size of range into account.
7313
This function is used to filter out clustered PK scan rows in
7314
index_merge quick select.
7317
true if current row will be retrieved by this quick select
7321
bool QUICK_RANGE_SELECT::row_in_ranges()
7325
uint32_t max= ranges.elements - 1;
7326
uint32_t mid= (max + min)/2;
7330
if (cmp_next(*(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid)))
7332
/* current row value > mid->max */
7337
mid= (min + max) / 2;
7339
res= *(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid);
7340
return (!cmp_next(res) && !cmp_prev(res));
7344
This is a hack: we inherit from QUICK_SELECT so that we can use the
7345
get_next() interface, but we have to hold a pointer to the original
7346
QUICK_SELECT because its data are used all over the place. What
7347
should be done is to factor out the data that is needed into a base
7348
class (QUICK_SELECT), and then have two subclasses (_ASC and _DESC)
7349
which handle the ranges and implement the get_next() function. But
7350
for now, this seems to work right at least.
7353
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q, uint32_t, bool *)
7354
:QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
7358
QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer;
7359
QUICK_RANGE **end_range= pr + ranges.elements;
7360
for (; pr!=end_range; pr++)
7361
rev_ranges.push_front(*pr);
7363
/* Remove EQ_RANGE flag for keys that are not using the full key */
7364
for (r = rev_it++; r; r = rev_it++)
7366
if ((r->flag & EQ_RANGE) &&
7367
head->key_info[index].key_length != r->max_length)
7368
r->flag&= ~EQ_RANGE;
7371
q->dont_free=1; // Don't free shared mem
7376
int QUICK_SELECT_DESC::get_next()
7378
/* The max key is handled as follows:
7379
* - if there is NO_MAX_RANGE, start at the end and move backwards
7380
* - if it is an EQ_RANGE, which means that max key covers the entire
7381
* key, go directly to the key and read through it (sorting backwards is
7382
* same as sorting forwards)
7383
* - if it is NEAR_MAX, go to the key or next, step back once, and
7385
* - otherwise (not NEAR_MAX == include the key), go after the key,
7386
* step back once, and move backwards
7393
{ // Already read through key
7394
result = ((last_range->flag & EQ_RANGE)
7395
? file->index_next_same(record, last_range->min_key,
7396
last_range->min_length) :
7397
file->index_prev(record));
7400
if (cmp_prev(*rev_it.ref()) == 0)
7403
else if (result != HA_ERR_END_OF_FILE)
7407
if (!(last_range= rev_it++))
7408
return HA_ERR_END_OF_FILE; // All ranges used
7410
if (last_range->flag & NO_MAX_RANGE) // Read last record
7413
if ((local_error=file->index_last(record)))
7414
return(local_error); // Empty table
7415
if (cmp_prev(last_range) == 0)
7417
last_range= 0; // No match; go to next range
7421
if (last_range->flag & EQ_RANGE)
7423
result = file->index_read_map(record, last_range->max_key,
7424
last_range->max_keypart_map,
7429
assert(last_range->flag & NEAR_MAX ||
7430
range_reads_after_key(last_range));
7431
result=file->index_read_map(record, last_range->max_key,
7432
last_range->max_keypart_map,
7433
((last_range->flag & NEAR_MAX) ?
7434
HA_READ_BEFORE_KEY :
7435
HA_READ_PREFIX_LAST_OR_PREV));
7439
if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
7441
last_range= 0; // Not found, to next range
7444
if (cmp_prev(last_range) == 0)
7446
if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE))
7447
last_range= 0; // Stop searching
7448
return 0; // Found key is in range
7450
last_range= 0; // To next range
7456
Compare if found key is over max-value
7457
Returns 0 if key <= range->max_key
7458
TODO: Figure out why can't this function be as simple as cmp_prev().
7461
int QUICK_RANGE_SELECT::cmp_next(QUICK_RANGE *range_arg)
7463
if (range_arg->flag & NO_MAX_RANGE)
7464
return 0; /* key can't be to large */
7466
KEY_PART *key_part=key_parts;
7467
uint32_t store_length;
7469
for (unsigned char *key=range_arg->max_key, *end=key+range_arg->max_length;
7471
key+= store_length, key_part++)
7474
store_length= key_part->store_length;
7475
if (key_part->null_bit)
7479
if (!key_part->field->is_null())
7483
else if (key_part->field->is_null())
7485
key++; // Skip null byte
7488
if ((cmp=key_part->field->key_cmp(key, key_part->length)) < 0)
7493
return (range_arg->flag & NEAR_MAX) ? 1 : 0; // Exact match
7498
Returns 0 if found key is inside range (found key >= range->min_key).
7501
int QUICK_RANGE_SELECT::cmp_prev(QUICK_RANGE *range_arg)
7504
if (range_arg->flag & NO_MIN_RANGE)
7505
return 0; /* key can't be to small */
7507
cmp= key_cmp(key_part_info, range_arg->min_key,
7508
range_arg->min_length);
7509
if (cmp > 0 || (cmp == 0 && (range_arg->flag & NEAR_MIN) == false))
7511
return 1; // outside of range
7516
* true if this range will require using HA_READ_AFTER_KEY
7517
See comment in get_next() about this
7520
bool QUICK_SELECT_DESC::range_reads_after_key(QUICK_RANGE *range_arg)
7522
return ((range_arg->flag & (NO_MAX_RANGE | NEAR_MAX)) ||
7523
!(range_arg->flag & EQ_RANGE) ||
7524
head->key_info[index].key_length != range_arg->max_length) ? 1 : 0;
7528
void QUICK_RANGE_SELECT::add_info_string(String *str)
7530
KEY *key_info= head->key_info + index;
7531
str->append(key_info->name);
7534
void QUICK_INDEX_MERGE_SELECT::add_info_string(String *str)
7536
QUICK_RANGE_SELECT *quick;
7538
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
7539
str->append(STRING_WITH_LEN("sort_union("));
7540
while ((quick= it++))
7546
quick->add_info_string(str);
7548
if (pk_quick_select)
7551
pk_quick_select->add_info_string(str);
7556
void QUICK_ROR_INTERSECT_SELECT::add_info_string(String *str)
7559
QUICK_RANGE_SELECT *quick;
7560
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
7561
str->append(STRING_WITH_LEN("intersect("));
7562
while ((quick= it++))
7564
KEY *key_info= head->key_info + quick->index;
7569
str->append(key_info->name);
7573
KEY *key_info= head->key_info + cpk_quick->index;
7575
str->append(key_info->name);
7580
void QUICK_ROR_UNION_SELECT::add_info_string(String *str)
7583
QUICK_SELECT_I *quick;
7584
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
7585
str->append(STRING_WITH_LEN("union("));
7586
while ((quick= it++))
7592
quick->add_info_string(str);
7598
void QUICK_RANGE_SELECT::add_keys_and_lengths(String *key_names,
7599
String *used_lengths)
7603
KEY *key_info= head->key_info + index;
7604
key_names->append(key_info->name);
7605
length= int64_t2str(max_used_key_length, buf, 10) - buf;
7606
used_lengths->append(buf, length);
7609
void QUICK_INDEX_MERGE_SELECT::add_keys_and_lengths(String *key_names,
7610
String *used_lengths)
7615
QUICK_RANGE_SELECT *quick;
7617
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
7618
while ((quick= it++))
7624
key_names->append(',');
7625
used_lengths->append(',');
7628
KEY *key_info= head->key_info + quick->index;
7629
key_names->append(key_info->name);
7630
length= int64_t2str(quick->max_used_key_length, buf, 10) - buf;
7631
used_lengths->append(buf, length);
7633
if (pk_quick_select)
7635
KEY *key_info= head->key_info + pk_quick_select->index;
7636
key_names->append(',');
7637
key_names->append(key_info->name);
7638
length= int64_t2str(pk_quick_select->max_used_key_length, buf, 10) - buf;
7639
used_lengths->append(',');
7640
used_lengths->append(buf, length);
7644
void QUICK_ROR_INTERSECT_SELECT::add_keys_and_lengths(String *key_names,
7645
String *used_lengths)
7650
QUICK_RANGE_SELECT *quick;
7651
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
7652
while ((quick= it++))
7654
KEY *key_info= head->key_info + quick->index;
7659
key_names->append(',');
7660
used_lengths->append(',');
7662
key_names->append(key_info->name);
7663
length= int64_t2str(quick->max_used_key_length, buf, 10) - buf;
7664
used_lengths->append(buf, length);
7669
KEY *key_info= head->key_info + cpk_quick->index;
7670
key_names->append(',');
7671
key_names->append(key_info->name);
7672
length= int64_t2str(cpk_quick->max_used_key_length, buf, 10) - buf;
7673
used_lengths->append(',');
7674
used_lengths->append(buf, length);
7678
void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names,
7679
String *used_lengths)
7682
QUICK_SELECT_I *quick;
7683
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
7684
while ((quick= it++))
7690
used_lengths->append(',');
7691
key_names->append(',');
7693
quick->add_keys_and_lengths(key_names, used_lengths);
7698
/*******************************************************************************
7699
* Implementation of QUICK_GROUP_MIN_MAX_SELECT
7700
*******************************************************************************/
7702
static inline uint32_t get_field_keypart(KEY *index, Field *field);
7703
static inline SEL_ARG * get_index_range_tree(uint32_t index, SEL_TREE* range_tree,
7704
PARAM *param, uint32_t *param_idx);
7705
static bool get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
7706
KEY_PART_INFO *first_non_group_part,
7707
KEY_PART_INFO *min_max_arg_part,
7708
KEY_PART_INFO *last_part, Session *session,
7709
unsigned char *key_infix, uint32_t *key_infix_len,
7710
KEY_PART_INFO **first_non_infix_part);
7712
check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item,
7713
Field::imagetype image_type);
4427
cost_group_min_max(Table* table,
4428
KeyInfo *index_info,
4429
uint32_t used_key_parts,
4430
uint32_t group_key_parts,
4431
optimizer::SEL_TREE *range_tree,
4432
optimizer::SEL_ARG *index_tree,
4433
ha_rows quick_prefix_records,
7716
cost_group_min_max(Table* table, KEY *index_info, uint32_t used_key_parts,
7717
uint32_t group_key_parts, SEL_TREE *range_tree,
7718
SEL_ARG *index_tree, ha_rows quick_prefix_records,
7719
bool have_min, bool have_max,
7720
double *read_cost, ha_rows *records);
5558
optimizer::QuickSelectInterface *optimizer::RangeReadPlan::make_quick(optimizer::Parameter *param, bool, memory::Root *parent_alloc)
5560
optimizer::QuickRangeSelect *quick= NULL;
5561
if ((quick= optimizer::get_quick_select(param,
5568
quick->records= records;
5569
quick->read_time= read_cost;
5575
uint32_t optimizer::RorScanInfo::findFirstNotSet() const
5577
boost::dynamic_bitset<> map= bitsToBitset();
5578
for (boost::dynamic_bitset<>::size_type i= 0; i < map.size(); i++)
5589
size_t optimizer::RorScanInfo::getBitCount() const
5591
boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5592
return tmp_bitset.count();
5596
void optimizer::RorScanInfo::subtractBitset(const boost::dynamic_bitset<>& in_bitset)
5598
boost::dynamic_bitset<> tmp_bitset= bitsToBitset();
5599
tmp_bitset-= in_bitset;
5600
covered_fields= tmp_bitset.to_ulong();
5604
boost::dynamic_bitset<> optimizer::RorScanInfo::bitsToBitset() const
5607
uint64_t conv= covered_fields;
5610
res.push_back((conv & 1) + '0');
5615
std::reverse(res.begin(), res.end());
5617
string final(covered_fields_size - res.length(), '0');
5619
return (boost::dynamic_bitset<>(final));
5623
} /* namespace drizzled */
8775
Construct new quick select for group queries with min/max.
8778
QUICK_GROUP_MIN_MAX_SELECT::QUICK_GROUP_MIN_MAX_SELECT()
8779
table The table being accessed
8780
join Descriptor of the current query
8781
have_min true if the query selects a MIN function
8782
have_max true if the query selects a MAX function
8783
min_max_arg_part The only argument field of all MIN/MAX functions
8784
group_prefix_len Length of all key parts in the group prefix
8785
prefix_key_parts All key parts in the group prefix
8786
index_info The index chosen for data access
8787
use_index The id of index_info
8788
read_cost Cost of this access method
8789
records Number of records returned
8790
key_infix_len Length of the key infix appended to the group prefix
8791
key_infix Infix of constants from equality predicates
8792
parent_alloc Memory pool for this and quick_prefix_select data
8798
QUICK_GROUP_MIN_MAX_SELECT::
8799
QUICK_GROUP_MIN_MAX_SELECT(Table *table, JOIN *join_arg, bool have_min_arg,
8801
KEY_PART_INFO *min_max_arg_part_arg,
8802
uint32_t group_prefix_len_arg, uint32_t group_key_parts_arg,
8803
uint32_t used_key_parts_arg, KEY *index_info_arg,
8804
uint32_t use_index, double read_cost_arg,
8805
ha_rows records_arg, uint32_t key_infix_len_arg,
8806
unsigned char *key_infix_arg, MEM_ROOT *parent_alloc)
8807
:join(join_arg), index_info(index_info_arg),
8808
group_prefix_len(group_prefix_len_arg),
8809
group_key_parts(group_key_parts_arg), have_min(have_min_arg),
8810
have_max(have_max_arg), seen_first_key(false),
8811
min_max_arg_part(min_max_arg_part_arg), key_infix(key_infix_arg),
8812
key_infix_len(key_infix_len_arg), min_functions_it(NULL),
8813
max_functions_it(NULL)
8818
record= head->record[0];
8819
tmp_record= head->record[1];
8820
read_time= read_cost_arg;
8821
records= records_arg;
8822
used_key_parts= used_key_parts_arg;
8823
real_key_parts= used_key_parts_arg;
8824
real_prefix_len= group_prefix_len + key_infix_len;
8826
min_max_arg_len= min_max_arg_part ? min_max_arg_part->store_length : 0;
8829
We can't have parent_alloc set as the init function can't handle this case
8832
assert(!parent_alloc);
8835
init_sql_alloc(&alloc, join->session->variables.range_alloc_block_size, 0);
8836
join->session->mem_root= &alloc;
8839
memset(&alloc, 0, sizeof(MEM_ROOT)); // ensure that it's not used
8844
Do post-constructor initialization.
8847
QUICK_GROUP_MIN_MAX_SELECT::init()
8850
The method performs initialization that cannot be done in the constructor
8851
such as memory allocations that may fail. It allocates memory for the
8852
group prefix and inifix buffers, and for the lists of MIN/MAX item to be
8853
updated during execution.
8860
int QUICK_GROUP_MIN_MAX_SELECT::init()
8862
if (group_prefix) /* Already initialized. */
8865
if (!(last_prefix= (unsigned char*) alloc_root(&alloc, group_prefix_len)))
8868
We may use group_prefix to store keys with all select fields, so allocate
8869
enough space for it.
8871
if (!(group_prefix= (unsigned char*) alloc_root(&alloc,
8872
real_prefix_len + min_max_arg_len)))
8875
if (key_infix_len > 0)
8878
The memory location pointed to by key_infix will be deleted soon, so
8879
allocate a new buffer and copy the key_infix into it.
8881
unsigned char *tmp_key_infix= (unsigned char*) alloc_root(&alloc, key_infix_len);
8884
memcpy(tmp_key_infix, this->key_infix, key_infix_len);
8885
this->key_infix= tmp_key_infix;
8888
if (min_max_arg_part)
8890
if (my_init_dynamic_array(&min_max_ranges, sizeof(QUICK_RANGE*), 16, 16))
8895
if (!(min_functions= new List<Item_sum>))
8899
min_functions= NULL;
8902
if (!(max_functions= new List<Item_sum>))
8906
max_functions= NULL;
8908
Item_sum *min_max_item;
8909
Item_sum **func_ptr= join->sum_funcs;
8910
while ((min_max_item= *(func_ptr++)))
8912
if (have_min && (min_max_item->sum_func() == Item_sum::MIN_FUNC))
8913
min_functions->push_back(min_max_item);
8914
else if (have_max && (min_max_item->sum_func() == Item_sum::MAX_FUNC))
8915
max_functions->push_back(min_max_item);
8920
if (!(min_functions_it= new List_iterator<Item_sum>(*min_functions)))
8926
if (!(max_functions_it= new List_iterator<Item_sum>(*max_functions)))
8931
min_max_ranges.elements= 0;
8937
QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT()
8939
if (file->inited != handler::NONE)
8940
file->ha_index_end();
8941
if (min_max_arg_part)
8942
delete_dynamic(&min_max_ranges);
8943
free_root(&alloc,MYF(0));
8944
delete min_functions_it;
8945
delete max_functions_it;
8946
delete quick_prefix_select;
8951
Eventually create and add a new quick range object.
8954
QUICK_GROUP_MIN_MAX_SELECT::add_range()
8955
sel_range Range object from which a
8958
Construct a new QUICK_RANGE object from a SEL_ARG object, and
8959
add it to the array min_max_ranges. If sel_arg is an infinite
8960
range, e.g. (x < 5 or x > 4), then skip it and do not construct
8968
bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range)
8971
uint32_t range_flag= sel_range->min_flag | sel_range->max_flag;
8973
/* Skip (-inf,+inf) ranges, e.g. (x < 5 or x > 4). */
8974
if ((range_flag & NO_MIN_RANGE) && (range_flag & NO_MAX_RANGE))
8977
if (!(sel_range->min_flag & NO_MIN_RANGE) &&
8978
!(sel_range->max_flag & NO_MAX_RANGE))
8980
if (sel_range->maybe_null &&
8981
sel_range->min_value[0] && sel_range->max_value[0])
8982
range_flag|= NULL_RANGE; /* IS NULL condition */
8983
else if (memcmp(sel_range->min_value, sel_range->max_value,
8984
min_max_arg_len) == 0)
8985
range_flag|= EQ_RANGE; /* equality condition */
8987
range= new QUICK_RANGE(sel_range->min_value, min_max_arg_len,
8988
make_keypart_map(sel_range->part),
8989
sel_range->max_value, min_max_arg_len,
8990
make_keypart_map(sel_range->part),
8994
if (insert_dynamic(&min_max_ranges, (unsigned char*)&range))
9001
Opens the ranges if there are more conditions in quick_prefix_select than
9002
the ones used for jumping through the prefixes.
9005
QUICK_GROUP_MIN_MAX_SELECT::adjust_prefix_ranges()
9008
quick_prefix_select is made over the conditions on the whole key.
9009
It defines a number of ranges of length x.
9010
However when jumping through the prefixes we use only the the first
9011
few most significant keyparts in the range key. However if there
9012
are more keyparts to follow the ones we are using we must make the
9013
condition on the key inclusive (because x < "ab" means
9014
x[0] < 'a' OR (x[0] == 'a' AND x[1] < 'b').
9015
To achive the above we must turn off the NEAR_MIN/NEAR_MAX
9017
void QUICK_GROUP_MIN_MAX_SELECT::adjust_prefix_ranges ()
9019
if (quick_prefix_select &&
9020
group_prefix_len < quick_prefix_select->max_used_key_length)
9025
for (inx= 0, arr= &quick_prefix_select->ranges; inx < arr->elements; inx++)
9029
get_dynamic(arr, (unsigned char*)&range, inx);
9030
range->flag &= ~(NEAR_MIN | NEAR_MAX);
9037
Determine the total number and length of the keys that will be used for
9041
QUICK_GROUP_MIN_MAX_SELECT::update_key_stat()
9044
The total length of the keys used for index lookup depends on whether
9045
there are any predicates referencing the min/max argument, and/or if
9046
the min/max argument field can be NULL.
9047
This function does an optimistic analysis whether the search key might
9048
be extended by a constant for the min/max keypart. It is 'optimistic'
9049
because during actual execution it may happen that a particular range
9050
is skipped, and then a shorter key will be used. However this is data
9051
dependent and can't be easily estimated here.
9057
void QUICK_GROUP_MIN_MAX_SELECT::update_key_stat()
9059
max_used_key_length= real_prefix_len;
9060
if (min_max_ranges.elements > 0)
9062
QUICK_RANGE *cur_range;
9064
{ /* Check if the right-most range has a lower boundary. */
9065
get_dynamic(&min_max_ranges, (unsigned char*)&cur_range,
9066
min_max_ranges.elements - 1);
9067
if (!(cur_range->flag & NO_MIN_RANGE))
9069
max_used_key_length+= min_max_arg_len;
9075
{ /* Check if the left-most range has an upper boundary. */
9076
get_dynamic(&min_max_ranges, (unsigned char*)&cur_range, 0);
9077
if (!(cur_range->flag & NO_MAX_RANGE))
9079
max_used_key_length+= min_max_arg_len;
9085
else if (have_min && min_max_arg_part &&
9086
min_max_arg_part->field->real_maybe_null())
9089
If a MIN/MAX argument value is NULL, we can quickly determine
9090
that we're in the beginning of the next group, because NULLs
9091
are always < any other value. This allows us to quickly
9092
determine the end of the current group and jump to the next
9093
group (see next_min()) and thus effectively increases the
9096
max_used_key_length+= min_max_arg_len;
9103
Initialize a quick group min/max select for key retrieval.
9106
QUICK_GROUP_MIN_MAX_SELECT::reset()
9109
Initialize the index chosen for access and find and store the prefix
9110
of the last group. The method is expensive since it performs disk access.
9117
int QUICK_GROUP_MIN_MAX_SELECT::reset(void)
9121
file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */
9122
if ((result= file->ha_index_init(index,1)))
9124
if (quick_prefix_select && quick_prefix_select->reset())
9126
result= file->index_last(record);
9127
if (result == HA_ERR_END_OF_FILE)
9129
/* Save the prefix of the last group. */
9130
key_copy(last_prefix, record, index_info, group_prefix_len);
9138
Get the next key containing the MIN and/or MAX key for the next group.
9141
QUICK_GROUP_MIN_MAX_SELECT::get_next()
9144
The method finds the next subsequent group of records that satisfies the
9145
query conditions and finds the keys that contain the MIN/MAX values for
9146
the key part referenced by the MIN/MAX function(s). Once a group and its
9147
MIN/MAX values are found, store these values in the Item_sum objects for
9148
the MIN/MAX functions. The rest of the values in the result row are stored
9149
in the Item_field::result_field of each select field. If the query does
9150
not contain MIN and/or MAX functions, then the function only finds the
9151
group prefix, which is a query answer itself.
9154
If both MIN and MAX are computed, then we use the fact that if there is
9155
no MIN key, there can't be a MAX key as well, so we can skip looking
9156
for a MAX key in this case.
9160
HA_ERR_END_OF_FILE if returned all keys
9161
other if some error occurred
9164
int QUICK_GROUP_MIN_MAX_SELECT::get_next()
9169
int is_last_prefix= 0;
9172
Loop until a group is found that satisfies all query conditions or the last
9177
result= next_prefix();
9179
Check if this is the last group prefix. Notice that at this point
9180
this->record contains the current prefix in record format.
9184
is_last_prefix= key_cmp(index_info->key_part, last_prefix,
9186
assert(is_last_prefix <= 0);
9190
if (result == HA_ERR_KEY_NOT_FOUND)
9197
min_res= next_min();
9199
update_min_result();
9201
/* If there is no MIN in the group, there is no MAX either. */
9202
if ((have_max && !have_min) ||
9203
(have_max && have_min && (min_res == 0)))
9205
max_res= next_max();
9207
update_max_result();
9208
/* If a MIN was found, a MAX must have been found as well. */
9209
assert(((have_max && !have_min) ||
9210
(have_max && have_min && (max_res == 0))));
9213
If this is just a GROUP BY or DISTINCT without MIN or MAX and there
9214
are equality predicates for the key parts after the group, find the
9215
first sub-group with the extended prefix.
9217
if (!have_min && !have_max && key_infix_len > 0)
9218
result= file->index_read_map(record, group_prefix,
9219
make_prev_keypart_map(real_key_parts),
9222
result= have_min ? min_res : have_max ? max_res : result;
9223
} while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
9224
is_last_prefix != 0);
9229
Partially mimic the behavior of end_select_send. Copy the
9230
field data from Item_field::field into Item_field::result_field
9231
of each non-aggregated field (the group fields, and optionally
9232
other fields in non-ANSI SQL mode).
9234
copy_fields(&join->tmp_table_param);
9236
else if (result == HA_ERR_KEY_NOT_FOUND)
9237
result= HA_ERR_END_OF_FILE;
9244
Retrieve the minimal key in the next group.
9247
QUICK_GROUP_MIN_MAX_SELECT::next_min()
9250
Find the minimal key within this group such that the key satisfies the query
9251
conditions and NULL semantics. The found key is loaded into this->record.
9254
Depending on the values of min_max_ranges.elements, key_infix_len, and
9255
whether there is a NULL in the MIN field, this function may directly
9256
return without any data access. In this case we use the key loaded into
9257
this->record by the call to this->next_prefix() just before this call.
9261
HA_ERR_KEY_NOT_FOUND if no MIN key was found that fulfills all conditions.
9262
HA_ERR_END_OF_FILE - "" -
9263
other if some error occurred
9266
int QUICK_GROUP_MIN_MAX_SELECT::next_min()
9270
/* Find the MIN key using the eventually extended group prefix. */
9271
if (min_max_ranges.elements > 0)
9273
if ((result= next_min_in_range()))
9278
/* Apply the constant equality conditions to the non-group select fields */
9279
if (key_infix_len > 0)
9281
if ((result= file->index_read_map(record, group_prefix,
9282
make_prev_keypart_map(real_key_parts),
9283
HA_READ_KEY_EXACT)))
9288
If the min/max argument field is NULL, skip subsequent rows in the same
9289
group with NULL in it. Notice that:
9290
- if the first row in a group doesn't have a NULL in the field, no row
9291
in the same group has (because NULL < any other value),
9292
- min_max_arg_part->field->ptr points to some place in 'record'.
9294
if (min_max_arg_part && min_max_arg_part->field->is_null())
9296
/* Find the first subsequent record without NULL in the MIN/MAX field. */
9297
key_copy(tmp_record, record, index_info, 0);
9298
result= file->index_read_map(record, tmp_record,
9299
make_keypart_map(real_key_parts),
9302
Check if the new record belongs to the current group by comparing its
9303
prefix with the group's prefix. If it is from the next group, then the
9304
whole group has NULLs in the MIN/MAX field, so use the first record in
9305
the group as a result.
9307
It is possible to reuse this new record as the result candidate for the
9308
next call to next_min(), and to save one lookup in the next call. For
9309
this add a new member 'this->next_group_prefix'.
9313
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
9314
key_restore(record, tmp_record, index_info, 0);
9316
else if (result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE)
9317
result= 0; /* There is a result in any case. */
9322
If the MIN attribute is non-nullable, this->record already contains the
9323
MIN key in the group, so just return.
9330
Retrieve the maximal key in the next group.
9333
QUICK_GROUP_MIN_MAX_SELECT::next_max()
9336
Lookup the maximal key of the group, and store it into this->record.
9340
HA_ERR_KEY_NOT_FOUND if no MAX key was found that fulfills all conditions.
9341
HA_ERR_END_OF_FILE - "" -
9342
other if some error occurred
9345
int QUICK_GROUP_MIN_MAX_SELECT::next_max()
9349
/* Get the last key in the (possibly extended) group. */
9350
if (min_max_ranges.elements > 0)
9351
result= next_max_in_range();
9353
result= file->index_read_map(record, group_prefix,
9354
make_prev_keypart_map(real_key_parts),
9355
HA_READ_PREFIX_LAST);
9361
Determine the prefix of the next group.
9364
QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
9367
Determine the prefix of the next group that satisfies the query conditions.
9368
If there is a range condition referencing the group attributes, use a
9369
QUICK_RANGE_SELECT object to retrieve the *first* key that satisfies the
9370
condition. If there is a key infix of constants, append this infix
9371
immediately after the group attributes. The possibly extended prefix is
9372
stored in this->group_prefix. The first key of the found group is stored in
9373
this->record, on which relies this->next_min().
9377
HA_ERR_KEY_NOT_FOUND if there is no key with the formed prefix
9378
HA_ERR_END_OF_FILE if there are no more keys
9379
other if some error occurred
9381
int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
9385
if (quick_prefix_select)
9387
unsigned char *cur_prefix= seen_first_key ? group_prefix : NULL;
9388
if ((result= quick_prefix_select->get_next_prefix(group_prefix_len,
9389
make_prev_keypart_map(group_key_parts), cur_prefix)))
9391
seen_first_key= true;
9395
if (!seen_first_key)
9397
result= file->index_first(record);
9400
seen_first_key= true;
9404
/* Load the first key in this group into record. */
9405
result= file->index_read_map(record, group_prefix,
9406
make_prev_keypart_map(group_key_parts),
9413
/* Save the prefix of this group for subsequent calls. */
9414
key_copy(group_prefix, record, index_info, group_prefix_len);
9415
/* Append key_infix to group_prefix. */
9416
if (key_infix_len > 0)
9417
memcpy(group_prefix + group_prefix_len,
9418
key_infix, key_infix_len);
9425
Find the minimal key in a group that satisfies some range conditions for the
9426
min/max argument field.
9429
QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
9432
Given the sequence of ranges min_max_ranges, find the minimal key that is
9433
in the left-most possible range. If there is no such key, then the current
9434
group does not have a MIN key that satisfies the WHERE clause. If a key is
9435
found, its value is stored in this->record.
9439
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
9441
HA_ERR_END_OF_FILE - "" -
9445
int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
9447
ha_rkey_function find_flag;
9448
key_part_map keypart_map;
9449
QUICK_RANGE *cur_range;
9450
bool found_null= false;
9451
int result= HA_ERR_KEY_NOT_FOUND;
9452
basic_string<unsigned char> max_key;
9454
max_key.reserve(real_prefix_len + min_max_arg_len);
9456
assert(min_max_ranges.elements > 0);
9458
for (uint32_t range_idx= 0; range_idx < min_max_ranges.elements; range_idx++)
9459
{ /* Search from the left-most range to the right. */
9460
get_dynamic(&min_max_ranges, (unsigned char*)&cur_range, range_idx);
9463
If the current value for the min/max argument is bigger than the right
9464
boundary of cur_range, there is no need to check this range.
9466
if (range_idx != 0 && !(cur_range->flag & NO_MAX_RANGE) &&
9467
(key_cmp(min_max_arg_part, (const unsigned char*) cur_range->max_key,
9468
min_max_arg_len) == 1))
9471
if (cur_range->flag & NO_MIN_RANGE)
9473
keypart_map= make_prev_keypart_map(real_key_parts);
9474
find_flag= HA_READ_KEY_EXACT;
9478
/* Extend the search key with the lower boundary for this range. */
9479
memcpy(group_prefix + real_prefix_len, cur_range->min_key,
9480
cur_range->min_length);
9481
keypart_map= make_keypart_map(real_key_parts);
9482
find_flag= (cur_range->flag & (EQ_RANGE | NULL_RANGE)) ?
9483
HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MIN) ?
9484
HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT;
9487
result= file->index_read_map(record, group_prefix, keypart_map, find_flag);
9490
if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
9491
(cur_range->flag & (EQ_RANGE | NULL_RANGE)))
9492
continue; /* Check the next range. */
9495
In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE,
9496
HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this
9497
range, it can't succeed for any other subsequent range.
9502
/* A key was found. */
9503
if (cur_range->flag & EQ_RANGE)
9504
break; /* No need to perform the checks below for equal keys. */
9506
if (cur_range->flag & NULL_RANGE)
9509
Remember this key, and continue looking for a non-NULL key that
9510
satisfies some other condition.
9512
memcpy(tmp_record, record, head->s->rec_buff_length);
9517
/* Check if record belongs to the current group. */
9518
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
9520
result= HA_ERR_KEY_NOT_FOUND;
9524
/* If there is an upper limit, check if the found key is in the range. */
9525
if ( !(cur_range->flag & NO_MAX_RANGE) )
9527
/* Compose the MAX key for the range. */
9529
max_key.append(group_prefix, real_prefix_len);
9530
max_key.append(cur_range->max_key, cur_range->max_length);
9531
/* Compare the found key with max_key. */
9532
int cmp_res= key_cmp(index_info->key_part,
9534
real_prefix_len + min_max_arg_len);
9535
if ((!((cur_range->flag & NEAR_MAX) && (cmp_res == -1)) || (cmp_res <= 0)))
9537
result= HA_ERR_KEY_NOT_FOUND;
9541
/* If we got to this point, the current key qualifies as MIN. */
9542
assert(result == 0);
9546
If there was a key with NULL in the MIN/MAX field, and there was no other
9547
key without NULL from the same group that satisfies some other condition,
9548
then use the key with the NULL.
9550
if (found_null && result)
9552
memcpy(record, tmp_record, head->s->rec_buff_length);
9560
Find the maximal key in a group that satisfies some range conditions for the
9561
min/max argument field.
9564
QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range()
9567
Given the sequence of ranges min_max_ranges, find the maximal key that is
9568
in the right-most possible range. If there is no such key, then the current
9569
group does not have a MAX key that satisfies the WHERE clause. If a key is
9570
found, its value is stored in this->record.
9574
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
9576
HA_ERR_END_OF_FILE - "" -
9580
int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range()
9582
ha_rkey_function find_flag;
9583
key_part_map keypart_map;
9584
QUICK_RANGE *cur_range;
9586
basic_string<unsigned char> min_key;
9587
min_key.reserve(real_prefix_len + min_max_arg_len);
9589
assert(min_max_ranges.elements > 0);
9591
for (uint32_t range_idx= min_max_ranges.elements; range_idx > 0; range_idx--)
9592
{ /* Search from the right-most range to the left. */
9593
get_dynamic(&min_max_ranges, (unsigned char*)&cur_range, range_idx - 1);
9596
If the current value for the min/max argument is smaller than the left
9597
boundary of cur_range, there is no need to check this range.
9599
if (range_idx != min_max_ranges.elements &&
9600
!(cur_range->flag & NO_MIN_RANGE) &&
9601
(key_cmp(min_max_arg_part, (const unsigned char*) cur_range->min_key,
9602
min_max_arg_len) == -1))
9605
if (cur_range->flag & NO_MAX_RANGE)
9607
keypart_map= make_prev_keypart_map(real_key_parts);
9608
find_flag= HA_READ_PREFIX_LAST;
9612
/* Extend the search key with the upper boundary for this range. */
9613
memcpy(group_prefix + real_prefix_len, cur_range->max_key,
9614
cur_range->max_length);
9615
keypart_map= make_keypart_map(real_key_parts);
9616
find_flag= (cur_range->flag & EQ_RANGE) ?
9617
HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MAX) ?
9618
HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV;
9621
result= file->index_read_map(record, group_prefix, keypart_map, find_flag);
9625
if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
9626
(cur_range->flag & EQ_RANGE))
9627
continue; /* Check the next range. */
9630
In no key was found with this upper bound, there certainly are no keys
9631
in the ranges to the left.
9635
/* A key was found. */
9636
if (cur_range->flag & EQ_RANGE)
9637
return 0; /* No need to perform the checks below for equal keys. */
9639
/* Check if record belongs to the current group. */
9640
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
9641
continue; // Row not found
9643
/* If there is a lower limit, check if the found key is in the range. */
9644
if ( !(cur_range->flag & NO_MIN_RANGE) )
9646
/* Compose the MIN key for the range. */
9648
min_key.append(group_prefix, real_prefix_len);
9649
min_key.append(cur_range->min_key, cur_range->min_length);
9651
/* Compare the found key with min_key. */
9652
int cmp_res= key_cmp(index_info->key_part,
9654
real_prefix_len + min_max_arg_len);
9655
if ((!((cur_range->flag & NEAR_MIN) && (cmp_res == 1)) ||
9659
/* If we got to this point, the current key qualifies as MAX. */
9662
return HA_ERR_KEY_NOT_FOUND;
9667
Update all MIN function results with the newly found value.
9670
QUICK_GROUP_MIN_MAX_SELECT::update_min_result()
9673
The method iterates through all MIN functions and updates the result value
9674
of each function by calling Item_sum::reset(), which in turn picks the new
9675
result value from this->head->record[0], previously updated by
9676
next_min(). The updated value is stored in a member variable of each of the
9677
Item_sum objects, depending on the value type.
9680
The update must be done separately for MIN and MAX, immediately after
9681
next_min() was called and before next_max() is called, because both MIN and
9682
MAX take their result value from the same buffer this->head->record[0]
9683
(i.e. this->record).
9689
void QUICK_GROUP_MIN_MAX_SELECT::update_min_result()
9693
min_functions_it->rewind();
9694
while ((min_func= (*min_functions_it)++))
9700
Update all MAX function results with the newly found value.
9703
QUICK_GROUP_MIN_MAX_SELECT::update_max_result()
9706
The method iterates through all MAX functions and updates the result value
9707
of each function by calling Item_sum::reset(), which in turn picks the new
9708
result value from this->head->record[0], previously updated by
9709
next_max(). The updated value is stored in a member variable of each of the
9710
Item_sum objects, depending on the value type.
9713
The update must be done separately for MIN and MAX, immediately after
9714
next_max() was called, because both MIN and MAX take their result value
9715
from the same buffer this->head->record[0] (i.e. this->record).
9721
void QUICK_GROUP_MIN_MAX_SELECT::update_max_result()
9725
max_functions_it->rewind();
9726
while ((max_func= (*max_functions_it)++))
9732
Append comma-separated list of keys this quick select uses to key_names;
9733
append comma-separated list of corresponding used lengths to used_lengths.
9736
QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths()
9737
key_names [out] Names of used indexes
9738
used_lengths [out] Corresponding lengths of the index names
9741
This method is used by select_describe to extract the names of the
9742
indexes used by a quick select.
9746
void QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths(String *key_names,
9747
String *used_lengths)
9751
key_names->append(index_info->name);
9752
length= int64_t2str(max_used_key_length, buf, 10) - buf;
9753
used_lengths->append(buf, length);
9756
static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, const char *)
9758
SEL_ARG **key,**end;
9762
String tmp(buff,sizeof(buff),&my_charset_bin);
9764
for (idx= 0,key=tree->keys, end=key+param->keys ;
9768
if (tree_map->is_set(idx))
9770
uint32_t keynr= param->real_keynr[idx];
9773
tmp.append(param->table->key_info[keynr].name);
9777
tmp.append(STRING_WITH_LEN("(empty)"));
9781
static void print_ror_scans_arr(Table *table,
9782
const char *, struct st_ror_scan_info **start,
9783
struct st_ror_scan_info **end)
9786
String tmp(buff,sizeof(buff),&my_charset_bin);
9788
for (;start != end; start++)
9792
tmp.append(table->key_info[(*start)->keynr].name);
9795
tmp.append(STRING_WITH_LEN("(empty)"));
9798
/*****************************************************************************
9799
** Instantiate templates
9800
*****************************************************************************/
9802
#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION
9803
template class List<QUICK_RANGE>;
9804
template class List_iterator<QUICK_RANGE>;