1
by brian
clean slate |
1 |
/* Copyright (C) 2001 MySQL AB
|
2 |
||
3 |
This program is free software; you can redistribute it and/or modify
|
|
4 |
it under the terms of the GNU General Public License as published by
|
|
5 |
the Free Software Foundation; version 2 of the License.
|
|
6 |
||
7 |
This program is distributed in the hope that it will be useful,
|
|
8 |
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
9 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
10 |
GNU General Public License for more details.
|
|
11 |
||
12 |
You should have received a copy of the GNU General Public License
|
|
13 |
along with this program; if not, write to the Free Software
|
|
14 |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
15 |
||
16 |
/*
|
|
17 |
Function to handle quick removal of duplicates
|
|
18 |
This code is used when doing multi-table deletes to find the rows in
|
|
19 |
reference tables that needs to be deleted.
|
|
20 |
||
21 |
The basic idea is as follows:
|
|
22 |
||
23 |
Store first all strings in a binary tree, ignoring duplicates.
|
|
24 |
When the tree uses more memory than 'max_heap_table_size',
|
|
25 |
write the tree (in sorted order) out to disk and start with a new tree.
|
|
26 |
When all data has been generated, merge the trees (removing any found
|
|
27 |
duplicates).
|
|
28 |
||
29 |
The unique entries will be returned in sort order, to ensure that we do the
|
|
30 |
deletes in disk order.
|
|
31 |
*/
|
|
32 |
||
243.1.17
by Jay Pipes
FINAL PHASE removal of mysql_priv.h (Bye, bye my friend.) |
33 |
#include <drizzled/server_includes.h> |
572.1.4
by Monty Taylor
Removed a bunch of unusued tests and defines from autoconf. |
34 |
#include <drizzled/sql_sort.h> |
584.1.15
by Monty Taylor
The mega-patch from hell. Renamed sql_class to session (since that's what it is) and removed it and field and table from common_includes. |
35 |
#include <drizzled/session.h> |
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
36 |
#include <queue> |
572.1.4
by Monty Taylor
Removed a bunch of unusued tests and defines from autoconf. |
37 |
|
38 |
#if defined(CMATH_NAMESPACE)
|
|
39 |
using namespace CMATH_NAMESPACE; |
|
40 |
#endif
|
|
1
by brian
clean slate |
41 |
|
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
42 |
using namespace std; |
43 |
||
1
by brian
clean slate |
44 |
|
654
by Brian Aker
Remove unused (yet more) |
45 |
int unique_write_to_file(unsigned char* key, element_count, |
77.1.45
by Monty Taylor
Warning fixes. |
46 |
Unique *unique) |
1
by brian
clean slate |
47 |
{
|
48 |
/*
|
|
49 |
Use unique->size (size of element stored in the tree) and not
|
|
50 |
unique->tree.size_of_element. The latter is different from unique->size
|
|
51 |
when tree implementation chooses to store pointer to key in TREE_ELEMENT
|
|
52 |
(instead of storing the element itself there)
|
|
53 |
*/
|
|
54 |
return my_b_write(&unique->file, key, unique->size) ? 1 : 0; |
|
55 |
}
|
|
56 |
||
481
by Brian Aker
Remove all of uchar. |
57 |
int unique_write_to_ptrs(unsigned char* key, |
654
by Brian Aker
Remove unused (yet more) |
58 |
element_count, Unique *unique) |
1
by brian
clean slate |
59 |
{
|
60 |
memcpy(unique->record_pointers, key, unique->size); |
|
61 |
unique->record_pointers+=unique->size; |
|
62 |
return 0; |
|
63 |
}
|
|
64 |
||
65 |
Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, |
|
641.3.8
by Monty Taylor
Removed my_malloc from drizzled. |
66 |
uint32_t size_arg, size_t max_in_memory_size_arg) |
1
by brian
clean slate |
67 |
:max_in_memory_size(max_in_memory_size_arg), size(size_arg), elements(0) |
68 |
{
|
|
69 |
my_b_clear(&file); |
|
70 |
init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func, 0, |
|
71 |
NULL, comp_func_fixed_arg); |
|
72 |
/* If the following fail's the next add will also fail */
|
|
73 |
my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16); |
|
74 |
/*
|
|
75 |
If you change the following, change it in get_max_elements function, too.
|
|
76 |
*/
|
|
77 |
max_elements= (ulong) (max_in_memory_size / |
|
78 |
ALIGN_SIZE(sizeof(TREE_ELEMENT)+size)); |
|
575.4.3
by ysano
Rename mysql to drizzle. |
79 |
open_cached_file(&file, drizzle_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, |
398.1.10
by Monty Taylor
Actually removed VOID() this time. |
80 |
MYF(MY_WME)); |
1
by brian
clean slate |
81 |
}
|
82 |
||
83 |
||
84 |
/*
|
|
85 |
Calculate log2(n!)
|
|
86 |
||
87 |
NOTES
|
|
88 |
Stirling's approximate formula is used:
|
|
89 |
||
90 |
n! ~= sqrt(2*M_PI*n) * (n/M_E)^n
|
|
91 |
||
92 |
Derivation of formula used for calculations is as follows:
|
|
93 |
||
94 |
log2(n!) = log(n!)/log(2) = log(sqrt(2*M_PI*n)*(n/M_E)^n) / log(2) =
|
|
95 |
||
96 |
= (log(2*M_PI*n)/2 + n*log(n/M_E)) / log(2).
|
|
97 |
*/
|
|
98 |
||
99 |
inline double log2_n_fact(double x) |
|
100 |
{
|
|
101 |
return (log(2*M_PI*x)/2 + x*log(x/M_E)) / M_LN2; |
|
102 |
}
|
|
103 |
||
104 |
||
105 |
/*
|
|
106 |
Calculate cost of merge_buffers function call for given sequence of
|
|
107 |
input stream lengths and store the number of rows in result stream in *last.
|
|
108 |
||
109 |
SYNOPSIS
|
|
110 |
get_merge_buffers_cost()
|
|
111 |
buff_elems Array of #s of elements in buffers
|
|
112 |
elem_size Size of element stored in buffer
|
|
113 |
first Pointer to first merged element size
|
|
114 |
last Pointer to last merged element size
|
|
115 |
||
116 |
RETURN
|
|
117 |
Cost of merge_buffers operation in disk seeks.
|
|
118 |
||
119 |
NOTES
|
|
120 |
It is assumed that no rows are eliminated during merge.
|
|
121 |
The cost is calculated as
|
|
122 |
||
123 |
cost(read_and_write) + cost(merge_comparisons).
|
|
124 |
||
125 |
All bytes in the sequences is read and written back during merge so cost
|
|
126 |
of disk io is 2*elem_size*total_buf_elems/IO_SIZE (2 is for read + write)
|
|
127 |
||
128 |
For comparisons cost calculations we assume that all merged sequences have
|
|
129 |
the same length, so each of total_buf_size elements will be added to a sort
|
|
130 |
heap with (n_buffers-1) elements. This gives the comparison cost:
|
|
131 |
||
132 |
total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID;
|
|
133 |
*/
|
|
134 |
||
654
by Brian Aker
Remove unused (yet more) |
135 |
static double get_merge_buffers_cost(uint32_t *, uint32_t elem_size, |
482
by Brian Aker
Remove uint. |
136 |
uint32_t *first, uint32_t *last) |
1
by brian
clean slate |
137 |
{
|
482
by Brian Aker
Remove uint. |
138 |
uint32_t total_buf_elems= 0; |
139 |
for (uint32_t *pbuf= first; pbuf <= last; pbuf++) |
|
1
by brian
clean slate |
140 |
total_buf_elems+= *pbuf; |
141 |
*last= total_buf_elems; |
|
142 |
||
143 |
int n_buffers= last - first + 1; |
|
144 |
||
145 |
/* Using log2(n)=log(n)/log(2) formula */
|
|
146 |
return 2*((double)total_buf_elems*elem_size) / IO_SIZE + |
|
147 |
total_buf_elems*log((double) n_buffers) / (TIME_FOR_COMPARE_ROWID * M_LN2); |
|
148 |
}
|
|
149 |
||
150 |
||
151 |
/*
|
|
152 |
Calculate cost of merging buffers into one in Unique::get, i.e. calculate
|
|
153 |
how long (in terms of disk seeks) the two calls
|
|
154 |
merge_many_buffs(...);
|
|
155 |
merge_buffers(...);
|
|
156 |
will take.
|
|
157 |
||
158 |
SYNOPSIS
|
|
159 |
get_merge_many_buffs_cost()
|
|
160 |
buffer buffer space for temporary data, at least
|
|
161 |
Unique::get_cost_calc_buff_size bytes
|
|
162 |
maxbuffer # of full buffers
|
|
163 |
max_n_elems # of elements in first maxbuffer buffers
|
|
164 |
last_n_elems # of elements in last buffer
|
|
165 |
elem_size size of buffer element
|
|
166 |
||
167 |
NOTES
|
|
168 |
maxbuffer+1 buffers are merged, where first maxbuffer buffers contain
|
|
169 |
max_n_elems elements each and last buffer contains last_n_elems elements.
|
|
170 |
||
171 |
The current implementation does a dumb simulation of merge_many_buffs
|
|
172 |
function actions.
|
|
173 |
||
174 |
RETURN
|
|
175 |
Cost of merge in disk seeks.
|
|
176 |
*/
|
|
177 |
||
482
by Brian Aker
Remove uint. |
178 |
static double get_merge_many_buffs_cost(uint32_t *buffer, |
179 |
uint32_t maxbuffer, uint32_t max_n_elems, |
|
180 |
uint32_t last_n_elems, int elem_size) |
|
1
by brian
clean slate |
181 |
{
|
182 |
register int i; |
|
183 |
double total_cost= 0.0; |
|
482
by Brian Aker
Remove uint. |
184 |
uint32_t *buff_elems= buffer; /* #s of elements in each of merged sequences */ |
1
by brian
clean slate |
185 |
|
186 |
/*
|
|
187 |
Set initial state: first maxbuffer sequences contain max_n_elems elements
|
|
188 |
each, last sequence contains last_n_elems elements.
|
|
189 |
*/
|
|
190 |
for (i = 0; i < (int)maxbuffer; i++) |
|
191 |
buff_elems[i]= max_n_elems; |
|
192 |
buff_elems[maxbuffer]= last_n_elems; |
|
193 |
||
194 |
/*
|
|
195 |
Do it exactly as merge_many_buff function does, calling
|
|
196 |
get_merge_buffers_cost to get cost of merge_buffers.
|
|
197 |
*/
|
|
198 |
if (maxbuffer >= MERGEBUFF2) |
|
199 |
{
|
|
200 |
while (maxbuffer >= MERGEBUFF2) |
|
201 |
{
|
|
482
by Brian Aker
Remove uint. |
202 |
uint32_t lastbuff= 0; |
1
by brian
clean slate |
203 |
for (i = 0; i <= (int) maxbuffer - MERGEBUFF*3/2; i += MERGEBUFF) |
204 |
{
|
|
205 |
total_cost+=get_merge_buffers_cost(buff_elems, elem_size, |
|
206 |
buff_elems + i, |
|
207 |
buff_elems + i + MERGEBUFF-1); |
|
208 |
lastbuff++; |
|
209 |
}
|
|
210 |
total_cost+=get_merge_buffers_cost(buff_elems, elem_size, |
|
211 |
buff_elems + i, |
|
212 |
buff_elems + maxbuffer); |
|
213 |
maxbuffer= lastbuff; |
|
214 |
}
|
|
215 |
}
|
|
216 |
||
217 |
/* Simulate final merge_buff call. */
|
|
218 |
total_cost += get_merge_buffers_cost(buff_elems, elem_size, |
|
219 |
buff_elems, buff_elems + maxbuffer); |
|
220 |
return total_cost; |
|
221 |
}
|
|
222 |
||
223 |
||
224 |
/*
|
|
225 |
Calculate cost of using Unique for processing nkeys elements of size
|
|
226 |
key_size using max_in_memory_size memory.
|
|
227 |
||
228 |
SYNOPSIS
|
|
229 |
Unique::get_use_cost()
|
|
230 |
buffer space for temporary data, use Unique::get_cost_calc_buff_size
|
|
231 |
to get # bytes needed.
|
|
232 |
nkeys #of elements in Unique
|
|
233 |
key_size size of each elements in bytes
|
|
234 |
max_in_memory_size amount of memory Unique will be allowed to use
|
|
235 |
||
236 |
RETURN
|
|
237 |
Cost in disk seeks.
|
|
238 |
||
239 |
NOTES
|
|
240 |
cost(using_unqiue) =
|
|
241 |
cost(create_trees) + (see #1)
|
|
242 |
cost(merge) + (see #2)
|
|
243 |
cost(read_result) (see #3)
|
|
244 |
||
245 |
1. Cost of trees creation
|
|
246 |
For each Unique::put operation there will be 2*log2(n+1) elements
|
|
247 |
comparisons, where n runs from 1 tree_size (we assume that all added
|
|
248 |
elements are different). Together this gives:
|
|
249 |
||
250 |
n_compares = 2*(log2(2) + log2(3) + ... + log2(N+1)) = 2*log2((N+1)!)
|
|
251 |
||
252 |
then cost(tree_creation) = n_compares*ROWID_COMPARE_COST;
|
|
253 |
||
254 |
Total cost of creating trees:
|
|
255 |
(n_trees - 1)*max_size_tree_cost + non_max_size_tree_cost.
|
|
256 |
||
257 |
Approximate value of log2(N!) is calculated by log2_n_fact function.
|
|
258 |
||
259 |
2. Cost of merging.
|
|
260 |
If only one tree is created by Unique no merging will be necessary.
|
|
261 |
Otherwise, we model execution of merge_many_buff function and count
|
|
262 |
#of merges. (The reason behind this is that number of buffers is small,
|
|
263 |
while size of buffers is big and we don't want to loose precision with
|
|
264 |
O(x)-style formula)
|
|
265 |
||
266 |
3. If only one tree is created by Unique no disk io will happen.
|
|
267 |
Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume
|
|
268 |
these will be random seeks.
|
|
269 |
*/
|
|
270 |
||
482
by Brian Aker
Remove uint. |
271 |
double Unique::get_use_cost(uint32_t *buffer, uint32_t nkeys, uint32_t key_size, |
641.3.8
by Monty Taylor
Removed my_malloc from drizzled. |
272 |
size_t max_in_memory_size) |
1
by brian
clean slate |
273 |
{
|
274 |
ulong max_elements_in_tree; |
|
275 |
ulong last_tree_elems; |
|
276 |
int n_full_trees; /* number of trees in unique - 1 */ |
|
277 |
double result; |
|
278 |
||
279 |
max_elements_in_tree= ((ulong) max_in_memory_size / |
|
280 |
ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size)); |
|
281 |
||
282 |
n_full_trees= nkeys / max_elements_in_tree; |
|
283 |
last_tree_elems= nkeys % max_elements_in_tree; |
|
284 |
||
285 |
/* Calculate cost of creating trees */
|
|
286 |
result= 2*log2_n_fact(last_tree_elems + 1.0); |
|
287 |
if (n_full_trees) |
|
288 |
result+= n_full_trees * log2_n_fact(max_elements_in_tree + 1.0); |
|
289 |
result /= TIME_FOR_COMPARE_ROWID; |
|
290 |
||
291 |
||
292 |
if (!n_full_trees) |
|
293 |
return result; |
|
294 |
||
295 |
/*
|
|
296 |
There is more then one tree and merging is necessary.
|
|
297 |
First, add cost of writing all trees to disk, assuming that all disk
|
|
298 |
writes are sequential.
|
|
299 |
*/
|
|
300 |
result += DISK_SEEK_BASE_COST * n_full_trees * |
|
301 |
ceil(((double) key_size)*max_elements_in_tree / IO_SIZE); |
|
302 |
result += DISK_SEEK_BASE_COST * ceil(((double) key_size)*last_tree_elems / IO_SIZE); |
|
303 |
||
304 |
/* Cost of merge */
|
|
305 |
double merge_cost= get_merge_many_buffs_cost(buffer, n_full_trees, |
|
306 |
max_elements_in_tree, |
|
307 |
last_tree_elems, key_size); |
|
308 |
if (merge_cost < 0.0) |
|
309 |
return merge_cost; |
|
310 |
||
311 |
result += merge_cost; |
|
312 |
/*
|
|
313 |
Add cost of reading the resulting sequence, assuming there were no
|
|
314 |
duplicate elements.
|
|
315 |
*/
|
|
316 |
result += ceil((double)key_size*nkeys/IO_SIZE); |
|
317 |
||
318 |
return result; |
|
319 |
}
|
|
320 |
||
321 |
Unique::~Unique() |
|
322 |
{
|
|
323 |
close_cached_file(&file); |
|
324 |
delete_tree(&tree); |
|
325 |
delete_dynamic(&file_ptrs); |
|
326 |
}
|
|
327 |
||
328 |
||
329 |
/* Write tree to disk; clear tree */
|
|
330 |
bool Unique::flush() |
|
331 |
{
|
|
332 |
BUFFPEK file_ptr; |
|
333 |
elements+= tree.elements_in_tree; |
|
334 |
file_ptr.count=tree.elements_in_tree; |
|
335 |
file_ptr.file_pos=my_b_tell(&file); |
|
336 |
||
337 |
if (tree_walk(&tree, (tree_walk_action) unique_write_to_file, |
|
338 |
(void*) this, left_root_right) || |
|
481
by Brian Aker
Remove all of uchar. |
339 |
insert_dynamic(&file_ptrs, (unsigned char*) &file_ptr)) |
1
by brian
clean slate |
340 |
return 1; |
341 |
delete_tree(&tree); |
|
342 |
return 0; |
|
343 |
}
|
|
344 |
||
345 |
||
346 |
/*
|
|
347 |
Clear the tree and the file.
|
|
348 |
You must call reset() if you want to reuse Unique after walk().
|
|
349 |
*/
|
|
350 |
||
351 |
void
|
|
352 |
Unique::reset() |
|
353 |
{
|
|
354 |
reset_tree(&tree); |
|
355 |
/*
|
|
356 |
If elements != 0, some trees were stored in the file (see how
|
|
357 |
flush() works). Note, that we can not count on my_b_tell(&file) == 0
|
|
358 |
here, because it can return 0 right after walk(), and walk() does not
|
|
359 |
reset any Unique member.
|
|
360 |
*/
|
|
361 |
if (elements) |
|
362 |
{
|
|
363 |
reset_dynamic(&file_ptrs); |
|
364 |
reinit_io_cache(&file, WRITE_CACHE, 0L, 0, 1); |
|
365 |
}
|
|
366 |
elements= 0; |
|
367 |
}
|
|
368 |
||
369 |
/*
|
|
370 |
The comparison function, passed to queue_init() in merge_walk() and in
|
|
371 |
merge_buffers() when the latter is called from Uniques::get() must
|
|
372 |
use comparison function of Uniques::tree, but compare members of struct
|
|
373 |
BUFFPEK.
|
|
374 |
*/
|
|
375 |
||
398.1.9
by Monty Taylor
Cleaned up stuff out of global.h. |
376 |
#ifdef __cplusplus
|
377 |
extern "C" { |
|
378 |
#endif
|
|
1
by brian
clean slate |
379 |
|
481
by Brian Aker
Remove all of uchar. |
380 |
static int buffpek_compare(void *arg, unsigned char *key_ptr1, unsigned char *key_ptr2) |
1
by brian
clean slate |
381 |
{
|
382 |
BUFFPEK_COMPARE_CONTEXT *ctx= (BUFFPEK_COMPARE_CONTEXT *) arg; |
|
383 |
return ctx->key_compare(ctx->key_compare_arg, |
|
481
by Brian Aker
Remove all of uchar. |
384 |
*((unsigned char **) key_ptr1), *((unsigned char **)key_ptr2)); |
1
by brian
clean slate |
385 |
}
|
386 |
||
398.1.9
by Monty Taylor
Cleaned up stuff out of global.h. |
387 |
#ifdef __cplusplus
|
388 |
}
|
|
389 |
#endif
|
|
390 |
||
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
391 |
/*
|
392 |
The comparison function object, passed to a priority_queue in merge_walk()
|
|
393 |
as its sort function parameter.
|
|
394 |
*/
|
|
1
by brian
clean slate |
395 |
|
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
396 |
class buffpek_compare_functor |
397 |
{
|
|
398 |
qsort_cmp2 key_compare; |
|
399 |
void *key_compare_arg; |
|
400 |
public: |
|
401 |
buffpek_compare_functor(qsort_cmp2 in_key_compare, void *in_compare_arg) |
|
402 |
: key_compare(in_key_compare), key_compare_arg(in_compare_arg) { } |
|
897.1.12
by Padraig
Making my function objects const correct. |
403 |
inline bool operator()(const BUFFPEK *i, const BUFFPEK *j) |
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
404 |
{
|
405 |
return key_compare(key_compare_arg, |
|
406 |
i->key, j->key); |
|
407 |
}
|
|
408 |
};
|
|
1
by brian
clean slate |
409 |
|
410 |
/*
|
|
411 |
DESCRIPTION
|
|
412 |
||
413 |
Function is very similar to merge_buffers, but instead of writing sorted
|
|
414 |
unique keys to the output file, it invokes walk_action for each key.
|
|
415 |
This saves I/O if you need to pass through all unique keys only once.
|
|
416 |
||
417 |
SYNOPSIS
|
|
418 |
merge_walk()
|
|
419 |
All params are 'IN' (but see comment for begin, end):
|
|
420 |
merge_buffer buffer to perform cached piece-by-piece loading
|
|
421 |
of trees; initially the buffer is empty
|
|
422 |
merge_buffer_size size of merge_buffer. Must be aligned with
|
|
423 |
key_length
|
|
424 |
key_length size of tree element; key_length * (end - begin)
|
|
425 |
must be less or equal than merge_buffer_size.
|
|
426 |
begin pointer to BUFFPEK struct for the first tree.
|
|
427 |
end pointer to BUFFPEK struct for the last tree;
|
|
428 |
end > begin and [begin, end) form a consecutive
|
|
429 |
range. BUFFPEKs structs in that range are used and
|
|
430 |
overwritten in merge_walk().
|
|
431 |
walk_action element visitor. Action is called for each unique
|
|
432 |
key.
|
|
433 |
walk_action_arg argument to walk action. Passed to it on each call.
|
|
434 |
compare elements comparison function
|
|
435 |
compare_arg comparison function argument
|
|
436 |
file file with all trees dumped. Trees in the file
|
|
437 |
must contain sorted unique values. Cache must be
|
|
438 |
initialized in read mode.
|
|
439 |
RETURN VALUE
|
|
440 |
0 ok
|
|
441 |
<> 0 error
|
|
442 |
*/
|
|
443 |
||
481
by Brian Aker
Remove all of uchar. |
444 |
static bool merge_walk(unsigned char *merge_buffer, ulong merge_buffer_size, |
482
by Brian Aker
Remove uint. |
445 |
uint32_t key_length, BUFFPEK *begin, BUFFPEK *end, |
1
by brian
clean slate |
446 |
tree_walk_action walk_action, void *walk_action_arg, |
447 |
qsort_cmp2 compare, void *compare_arg, |
|
448 |
IO_CACHE *file) |
|
449 |
{
|
|
450 |
if (end <= begin || |
|
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
451 |
merge_buffer_size < (ulong) (key_length * (end - begin + 1))) |
1
by brian
clean slate |
452 |
return 1; |
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
453 |
priority_queue<BUFFPEK *, vector<BUFFPEK *>, buffpek_compare_functor > |
454 |
queue(buffpek_compare_functor(compare, compare_arg)); |
|
1
by brian
clean slate |
455 |
/* we need space for one key when a piece of merge buffer is re-read */
|
456 |
merge_buffer_size-= key_length; |
|
481
by Brian Aker
Remove all of uchar. |
457 |
unsigned char *save_key_buff= merge_buffer + merge_buffer_size; |
895
by Brian Aker
Completion (?) of uint conversion. |
458 |
uint32_t max_key_count_per_piece= (uint32_t) (merge_buffer_size/(end-begin) / |
1
by brian
clean slate |
459 |
key_length); |
460 |
/* if piece_size is aligned reuse_freed_buffer will always hit */
|
|
482
by Brian Aker
Remove uint. |
461 |
uint32_t piece_size= max_key_count_per_piece * key_length; |
462 |
uint32_t bytes_read; /* to hold return value of read_to_buffer */ |
|
1
by brian
clean slate |
463 |
BUFFPEK *top; |
464 |
int res= 1; |
|
465 |
/*
|
|
466 |
Invariant: queue must contain top element from each tree, until a tree
|
|
467 |
is not completely walked through.
|
|
468 |
Here we're forcing the invariant, inserting one element from each tree
|
|
469 |
to the queue.
|
|
470 |
*/
|
|
471 |
for (top= begin; top != end; ++top) |
|
472 |
{
|
|
473 |
top->base= merge_buffer + (top - begin) * piece_size; |
|
474 |
top->max_keys= max_key_count_per_piece; |
|
475 |
bytes_read= read_to_buffer(file, top, key_length); |
|
895
by Brian Aker
Completion (?) of uint conversion. |
476 |
if (bytes_read == (uint32_t) (-1)) |
1
by brian
clean slate |
477 |
goto end; |
51.1.73
by Jay Pipes
Standardized TRUE/FALSE, removed/replaced DBUG symbols |
478 |
assert(bytes_read); |
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
479 |
queue.push(top); |
1
by brian
clean slate |
480 |
}
|
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
481 |
top= queue.top(); |
482 |
while (queue.size() > 1) |
|
1
by brian
clean slate |
483 |
{
|
484 |
/*
|
|
485 |
Every iteration one element is removed from the queue, and one is
|
|
486 |
inserted by the rules of the invariant. If two adjacent elements on
|
|
487 |
the top of the queue are not equal, biggest one is unique, because all
|
|
488 |
elements in each tree are unique. Action is applied only to unique
|
|
489 |
elements.
|
|
490 |
*/
|
|
491 |
void *old_key= top->key; |
|
492 |
/*
|
|
493 |
read next key from the cache or from the file and push it to the
|
|
494 |
queue; this gives new top.
|
|
495 |
*/
|
|
496 |
top->key+= key_length; |
|
497 |
if (--top->mem_count) |
|
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
498 |
{
|
499 |
queue.pop(); |
|
500 |
queue.push(top); |
|
501 |
}
|
|
1
by brian
clean slate |
502 |
else /* next piece should be read */ |
503 |
{
|
|
504 |
/* save old_key not to overwrite it in read_to_buffer */
|
|
505 |
memcpy(save_key_buff, old_key, key_length); |
|
506 |
old_key= save_key_buff; |
|
507 |
bytes_read= read_to_buffer(file, top, key_length); |
|
895
by Brian Aker
Completion (?) of uint conversion. |
508 |
if (bytes_read == (uint32_t) (-1)) |
1
by brian
clean slate |
509 |
goto end; |
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
510 |
else if (bytes_read > 0) /* top->key, top->mem_count are reset */ |
511 |
{ /* in read_to_buffer */ |
|
512 |
queue.pop(); |
|
513 |
queue.push(top); |
|
514 |
}
|
|
1
by brian
clean slate |
515 |
else
|
516 |
{
|
|
517 |
/*
|
|
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
518 |
Tree for old 'top' element is empty: remove it from the queue.
|
1
by brian
clean slate |
519 |
*/
|
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
520 |
queue.pop(); |
1
by brian
clean slate |
521 |
}
|
522 |
}
|
|
897.1.1
by Padraig
Replaced the instance of QUEUE in the merge_walk() function with |
523 |
top= queue.top(); |
1
by brian
clean slate |
524 |
/* new top has been obtained; if old top is unique, apply the action */
|
525 |
if (compare(compare_arg, old_key, top->key)) |
|
526 |
{
|
|
527 |
if (walk_action(old_key, 1, walk_action_arg)) |
|
528 |
goto end; |
|
529 |
}
|
|
530 |
}
|
|
531 |
/*
|
|
532 |
Applying walk_action to the tail of the last tree: this is safe because
|
|
533 |
either we had only one tree in the beginning, either we work with the
|
|
534 |
last tree in the queue.
|
|
535 |
*/
|
|
536 |
do
|
|
537 |
{
|
|
538 |
do
|
|
539 |
{
|
|
540 |
if (walk_action(top->key, 1, walk_action_arg)) |
|
541 |
goto end; |
|
542 |
top->key+= key_length; |
|
543 |
}
|
|
544 |
while (--top->mem_count); |
|
545 |
bytes_read= read_to_buffer(file, top, key_length); |
|
895
by Brian Aker
Completion (?) of uint conversion. |
546 |
if (bytes_read == (uint32_t) (-1)) |
1
by brian
clean slate |
547 |
goto end; |
548 |
}
|
|
549 |
while (bytes_read); |
|
550 |
res= 0; |
|
551 |
end: |
|
552 |
return res; |
|
553 |
}
|
|
554 |
||
555 |
||
556 |
/*
|
|
557 |
DESCRIPTION
|
|
558 |
Walks consecutively through all unique elements:
|
|
559 |
if all elements are in memory, then it simply invokes 'tree_walk', else
|
|
560 |
all flushed trees are loaded to memory piece-by-piece, pieces are
|
|
561 |
sorted, and action is called for each unique value.
|
|
562 |
Note: so as merging resets file_ptrs state, this method can change
|
|
563 |
internal Unique state to undefined: if you want to reuse Unique after
|
|
564 |
walk() you must call reset() first!
|
|
565 |
SYNOPSIS
|
|
566 |
Unique:walk()
|
|
567 |
All params are 'IN':
|
|
568 |
action function-visitor, typed in include/my_tree.h
|
|
569 |
function is called for each unique element
|
|
570 |
arg argument for visitor, which is passed to it on each call
|
|
571 |
RETURN VALUE
|
|
572 |
0 OK
|
|
573 |
<> 0 error
|
|
574 |
*/
|
|
575 |
||
576 |
bool Unique::walk(tree_walk_action action, void *walk_action_arg) |
|
577 |
{
|
|
578 |
int res; |
|
481
by Brian Aker
Remove all of uchar. |
579 |
unsigned char *merge_buffer; |
1
by brian
clean slate |
580 |
|
581 |
if (elements == 0) /* the whole tree is in memory */ |
|
582 |
return tree_walk(&tree, action, walk_action_arg, left_root_right); |
|
583 |
||
584 |
/* flush current tree to the file to have some memory for merge buffer */
|
|
585 |
if (flush()) |
|
586 |
return 1; |
|
587 |
if (flush_io_cache(&file) || reinit_io_cache(&file, READ_CACHE, 0L, 0, 0)) |
|
588 |
return 1; |
|
641.3.8
by Monty Taylor
Removed my_malloc from drizzled. |
589 |
if (!(merge_buffer= (unsigned char *) malloc(max_in_memory_size))) |
1
by brian
clean slate |
590 |
return 1; |
591 |
res= merge_walk(merge_buffer, (ulong) max_in_memory_size, size, |
|
592 |
(BUFFPEK *) file_ptrs.buffer, |
|
593 |
(BUFFPEK *) file_ptrs.buffer + file_ptrs.elements, |
|
594 |
action, walk_action_arg, |
|
595 |
tree.compare, tree.custom_arg, &file); |
|
477
by Monty Taylor
Removed my_free(). It turns out that it had been def'd to ignore the flags passed to it in the second arg anyway. Gotta love that. |
596 |
free((char*) merge_buffer); |
1
by brian
clean slate |
597 |
return res; |
598 |
}
|
|
599 |
||
600 |
/*
|
|
327.1.5
by Brian Aker
Refactor around classes. TABLE_LIST has been factored out of table.h |
601 |
Modify the Table element so that when one calls init_records()
|
1
by brian
clean slate |
602 |
the rows will be read in priority order.
|
603 |
*/
|
|
604 |
||
327.1.5
by Brian Aker
Refactor around classes. TABLE_LIST has been factored out of table.h |
605 |
bool Unique::get(Table *table) |
1
by brian
clean slate |
606 |
{
|
607 |
SORTPARAM sort_param; |
|
608 |
table->sort.found_records=elements+tree.elements_in_tree; |
|
609 |
||
610 |
if (my_b_tell(&file) == 0) |
|
611 |
{
|
|
612 |
/* Whole tree is in memory; Don't use disk if you don't need to */
|
|
481
by Brian Aker
Remove all of uchar. |
613 |
if ((record_pointers=table->sort.record_pointers= (unsigned char*) |
641.3.8
by Monty Taylor
Removed my_malloc from drizzled. |
614 |
malloc(size * tree.elements_in_tree))) |
1
by brian
clean slate |
615 |
{
|
616 |
(void) tree_walk(&tree, (tree_walk_action) unique_write_to_ptrs, |
|
617 |
this, left_root_right); |
|
618 |
return 0; |
|
619 |
}
|
|
620 |
}
|
|
621 |
/* Not enough memory; Save the result to file && free memory used by tree */
|
|
622 |
if (flush()) |
|
623 |
return 1; |
|
624 |
||
625 |
IO_CACHE *outfile=table->sort.io_cache; |
|
626 |
BUFFPEK *file_ptr= (BUFFPEK*) file_ptrs.buffer; |
|
482
by Brian Aker
Remove uint. |
627 |
uint32_t maxbuffer= file_ptrs.elements - 1; |
481
by Brian Aker
Remove all of uchar. |
628 |
unsigned char *sort_buffer; |
1
by brian
clean slate |
629 |
my_off_t save_pos; |
630 |
bool error=1; |
|
631 |
||
632 |
/* Open cached file if it isn't open */
|
|
684
by Brian Aker
Mass cleanup for casting. |
633 |
outfile=table->sort.io_cache= new IO_CACHE; |
641.3.9
by Monty Taylor
More removal of my_malloc. |
634 |
memset(outfile, 0, sizeof(IO_CACHE)); |
1
by brian
clean slate |
635 |
|
575.4.3
by ysano
Rename mysql to drizzle. |
636 |
if (!outfile || (! my_b_inited(outfile) && open_cached_file(outfile,drizzle_tmpdir,TEMP_PREFIX,READ_RECORD_BUFFER, MYF(MY_WME)))) |
1
by brian
clean slate |
637 |
return 1; |
684
by Brian Aker
Mass cleanup for casting. |
638 |
reinit_io_cache(outfile, WRITE_CACHE, 0L, 0, 0); |
1
by brian
clean slate |
639 |
|
212.6.6
by Mats Kindahl
Removing redundant use of casts in drizzled/ for memcmp(), memcpy(), memset(), and memmove(). |
640 |
memset(&sort_param, 0, sizeof(sort_param)); |
1
by brian
clean slate |
641 |
sort_param.max_rows= elements; |
642 |
sort_param.sort_form=table; |
|
643 |
sort_param.rec_length= sort_param.sort_length= sort_param.ref_length= |
|
644 |
size; |
|
895
by Brian Aker
Completion (?) of uint conversion. |
645 |
sort_param.keys= (uint32_t) (max_in_memory_size / sort_param.sort_length); |
1
by brian
clean slate |
646 |
sort_param.not_killable=1; |
647 |
||
641.3.8
by Monty Taylor
Removed my_malloc from drizzled. |
648 |
if (!(sort_buffer=(unsigned char*) malloc((sort_param.keys+1) * |
649 |
sort_param.sort_length))) |
|
1
by brian
clean slate |
650 |
return 1; |
651 |
sort_param.unique_buff= sort_buffer+(sort_param.keys* |
|
652 |
sort_param.sort_length); |
|
653 |
||
654 |
sort_param.compare= (qsort2_cmp) buffpek_compare; |
|
655 |
sort_param.cmp_context.key_compare= tree.compare; |
|
656 |
sort_param.cmp_context.key_compare_arg= tree.custom_arg; |
|
657 |
||
658 |
/* Merge the buffers to one file, removing duplicates */
|
|
659 |
if (merge_many_buff(&sort_param,sort_buffer,file_ptr,&maxbuffer,&file)) |
|
660 |
goto err; |
|
661 |
if (flush_io_cache(&file) || |
|
662 |
reinit_io_cache(&file,READ_CACHE,0L,0,0)) |
|
663 |
goto err; |
|
664 |
if (merge_buffers(&sort_param, &file, outfile, sort_buffer, file_ptr, |
|
665 |
file_ptr, file_ptr+maxbuffer,0)) |
|
666 |
goto err; |
|
667 |
error=0; |
|
668 |
err: |
|
460
by Monty Taylor
Removed x_free calls. |
669 |
if (sort_buffer) |
670 |
free(sort_buffer); |
|
1
by brian
clean slate |
671 |
if (flush_io_cache(outfile)) |
672 |
error=1; |
|
673 |
||
674 |
/* Setup io_cache for reading */
|
|
675 |
save_pos=outfile->pos_in_file; |
|
676 |
if (reinit_io_cache(outfile,READ_CACHE,0L,0,0)) |
|
677 |
error=1; |
|
678 |
outfile->end_of_file=save_pos; |
|
679 |
return error; |
|
680 |
}
|