1
by brian
clean slate |
1 |
/* Copyright (C) 2003 MySQL AB
|
2 |
||
3 |
This program is free software; you can redistribute it and/or modify
|
|
4 |
it under the terms of the GNU General Public License as published by
|
|
5 |
the Free Software Foundation; version 2 of the License.
|
|
6 |
||
7 |
This program is distributed in the hope that it will be useful,
|
|
8 |
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
9 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
10 |
GNU General Public License for more details.
|
|
11 |
||
12 |
You should have received a copy of the GNU General Public License
|
|
13 |
along with this program; if not, write to the Free Software
|
|
14 |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
15 |
||
16 |
/*
|
|
17 |
Make sure to look at ha_tina.h for more details.
|
|
18 |
||
19 |
First off, this is a play thing for me, there are a number of things
|
|
20 |
wrong with it:
|
|
21 |
*) It was designed for csv and therefore its performance is highly
|
|
22 |
questionable.
|
|
23 |
*) Indexes have not been implemented. This is because the files can
|
|
24 |
be traded in and out of the table directory without having to worry
|
|
25 |
about rebuilding anything.
|
|
26 |
*) NULLs and "" are treated equally (like a spreadsheet).
|
|
27 |
*) There was in the beginning no point to anyone seeing this other
|
|
28 |
then me, so there is a good chance that I haven't quite documented
|
|
29 |
it well.
|
|
30 |
*) Less design, more "make it work"
|
|
31 |
||
32 |
Now there are a few cool things with it:
|
|
33 |
*) Errors can result in corrupted data files.
|
|
34 |
*) Data files can be read by spreadsheets directly.
|
|
35 |
||
36 |
TODO:
|
|
37 |
*) Move to a block system for larger files
|
|
38 |
*) Error recovery, its all there, just need to finish it
|
|
39 |
*) Document how the chains work.
|
|
40 |
||
41 |
-Brian
|
|
42 |
*/
|
|
43 |
||
44 |
#ifdef USE_PRAGMA_IMPLEMENTATION
|
|
45 |
#pragma implementation // gcc: Class implementation |
|
46 |
#endif
|
|
47 |
||
212.5.45
by Monty Taylor
Removed excess AM_CPPFLAGS from the tree. Now the only thing that should be in the include path should be -I${top_srcdir} and -I${top_builddir}w |
48 |
#include <drizzled/mysql_priv.h> |
1
by brian
clean slate |
49 |
#include "ha_tina.h" |
50 |
||
51 |
||
52 |
/*
|
|
53 |
uchar + uchar + uint64_t + uint64_t + uint64_t + uint64_t + uchar
|
|
54 |
*/
|
|
55 |
#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(uint64_t) \
|
|
56 |
+ sizeof(uint64_t) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(uchar)
|
|
57 |
#define TINA_CHECK_HEADER 254 // The number we use to determine corruption |
|
58 |
#define BLOB_MEMROOT_ALLOC_SIZE 8192
|
|
59 |
||
60 |
/* The file extension */
|
|
61 |
#define CSV_EXT ".CSV" // The data file |
|
62 |
#define CSN_EXT ".CSN" // Files used during repair and update |
|
63 |
#define CSM_EXT ".CSM" // Meta file |
|
64 |
||
65 |
||
66 |
static TINA_SHARE *get_share(const char *table_name, TABLE *table); |
|
67 |
static int free_share(TINA_SHARE *share); |
|
68 |
static int read_meta_file(File meta_file, ha_rows *rows); |
|
69 |
static int write_meta_file(File meta_file, ha_rows rows, bool dirty); |
|
70 |
||
71 |
extern "C" void tina_get_status(void* param, int concurrent_insert); |
|
72 |
extern "C" void tina_update_status(void* param); |
|
146
by Brian Aker
my_bool cleanup. |
73 |
extern "C" bool tina_check_status(void* param); |
1
by brian
clean slate |
74 |
|
75 |
/* Stuff for shares */
|
|
76 |
pthread_mutex_t tina_mutex; |
|
77 |
static HASH tina_open_tables; |
|
78 |
static handler *tina_create_handler(handlerton *hton, |
|
79 |
TABLE_SHARE *table, |
|
80 |
MEM_ROOT *mem_root); |
|
81 |
||
82 |
||
83 |
/*****************************************************************************
|
|
84 |
** TINA tables
|
|
85 |
*****************************************************************************/
|
|
86 |
||
87 |
/*
|
|
88 |
Used for sorting chains with qsort().
|
|
89 |
*/
|
|
90 |
int sort_set (tina_set *a, tina_set *b) |
|
91 |
{
|
|
92 |
/*
|
|
93 |
We assume that intervals do not intersect. So, it is enought to compare
|
|
94 |
any two points. Here we take start of intervals for comparison.
|
|
95 |
*/
|
|
96 |
return ( a->begin > b->begin ? 1 : ( a->begin < b->begin ? -1 : 0 ) ); |
|
97 |
}
|
|
98 |
||
99 |
static uchar* tina_get_key(TINA_SHARE *share, size_t *length, |
|
146
by Brian Aker
my_bool cleanup. |
100 |
bool not_used __attribute__((unused))) |
1
by brian
clean slate |
101 |
{
|
102 |
*length=share->table_name_length; |
|
103 |
return (uchar*) share->table_name; |
|
104 |
}
|
|
105 |
||
106 |
static int tina_init_func(void *p) |
|
107 |
{
|
|
108 |
handlerton *tina_hton; |
|
109 |
||
110 |
tina_hton= (handlerton *)p; |
|
111 |
VOID(pthread_mutex_init(&tina_mutex,MY_MUTEX_INIT_FAST)); |
|
112 |
(void) hash_init(&tina_open_tables,system_charset_info,32,0,0, |
|
113 |
(hash_get_key) tina_get_key,0,0); |
|
114 |
tina_hton->state= SHOW_OPTION_YES; |
|
115 |
tina_hton->db_type= DB_TYPE_CSV_DB; |
|
116 |
tina_hton->create= tina_create_handler; |
|
117 |
tina_hton->flags= (HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES | |
|
118 |
HTON_NO_PARTITION); |
|
119 |
return 0; |
|
120 |
}
|
|
121 |
||
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
122 |
static int tina_done_func(void *p __attribute__((unused))) |
1
by brian
clean slate |
123 |
{
|
124 |
hash_free(&tina_open_tables); |
|
125 |
pthread_mutex_destroy(&tina_mutex); |
|
126 |
||
127 |
return 0; |
|
128 |
}
|
|
129 |
||
130 |
||
131 |
/*
|
|
132 |
Simple lock controls.
|
|
133 |
*/
|
|
77.1.6
by Monty Taylor
CSV is clean. |
134 |
static TINA_SHARE *get_share(const char *table_name, |
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
135 |
TABLE *table __attribute__((unused))) |
1
by brian
clean slate |
136 |
{
|
137 |
TINA_SHARE *share; |
|
138 |
char meta_file_name[FN_REFLEN]; |
|
15
by brian
Fix for stat, NETWARE removal |
139 |
struct stat file_stat; |
1
by brian
clean slate |
140 |
char *tmp_name; |
141 |
uint length; |
|
142 |
||
143 |
pthread_mutex_lock(&tina_mutex); |
|
144 |
length=(uint) strlen(table_name); |
|
145 |
||
146 |
/*
|
|
147 |
If share is not present in the hash, create a new share and
|
|
148 |
initialize its members.
|
|
149 |
*/
|
|
150 |
if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables, |
|
151 |
(uchar*) table_name, |
|
152 |
length))) |
|
153 |
{
|
|
154 |
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), |
|
155 |
&share, sizeof(*share), |
|
156 |
&tmp_name, length+1, |
|
157 |
NullS)) |
|
158 |
{
|
|
159 |
pthread_mutex_unlock(&tina_mutex); |
|
160 |
return NULL; |
|
161 |
}
|
|
162 |
||
163 |
share->use_count= 0; |
|
163
by Brian Aker
Merge Monty's code. |
164 |
share->is_log_table= false; |
1
by brian
clean slate |
165 |
share->table_name_length= length; |
166 |
share->table_name= tmp_name; |
|
163
by Brian Aker
Merge Monty's code. |
167 |
share->crashed= false; |
1
by brian
clean slate |
168 |
share->rows_recorded= 0; |
163
by Brian Aker
Merge Monty's code. |
169 |
share->update_file_opened= false; |
170 |
share->tina_write_opened= false; |
|
1
by brian
clean slate |
171 |
share->data_file_version= 0; |
172 |
strmov(share->table_name, table_name); |
|
173 |
fn_format(share->data_file_name, table_name, "", CSV_EXT, |
|
174 |
MY_REPLACE_EXT|MY_UNPACK_FILENAME); |
|
175 |
fn_format(meta_file_name, table_name, "", CSM_EXT, |
|
176 |
MY_REPLACE_EXT|MY_UNPACK_FILENAME); |
|
177 |
||
15
by brian
Fix for stat, NETWARE removal |
178 |
if (stat(share->data_file_name, &file_stat)) |
1
by brian
clean slate |
179 |
goto error; |
180 |
share->saved_data_file_length= file_stat.st_size; |
|
181 |
||
182 |
if (my_hash_insert(&tina_open_tables, (uchar*) share)) |
|
183 |
goto error; |
|
184 |
thr_lock_init(&share->lock); |
|
185 |
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); |
|
186 |
||
187 |
/*
|
|
188 |
Open or create the meta file. In the latter case, we'll get
|
|
189 |
an error during read_meta_file and mark the table as crashed.
|
|
190 |
Usually this will result in auto-repair, and we will get a good
|
|
191 |
meta-file in the end.
|
|
192 |
*/
|
|
193 |
if ((share->meta_file= my_open(meta_file_name, |
|
194 |
O_RDWR|O_CREAT, MYF(0))) == -1) |
|
163
by Brian Aker
Merge Monty's code. |
195 |
share->crashed= true; |
1
by brian
clean slate |
196 |
|
197 |
/*
|
|
198 |
If the meta file will not open we assume it is crashed and
|
|
199 |
mark it as such.
|
|
200 |
*/
|
|
201 |
if (read_meta_file(share->meta_file, &share->rows_recorded)) |
|
163
by Brian Aker
Merge Monty's code. |
202 |
share->crashed= true; |
1
by brian
clean slate |
203 |
}
|
204 |
share->use_count++; |
|
205 |
pthread_mutex_unlock(&tina_mutex); |
|
206 |
||
207 |
return share; |
|
208 |
||
209 |
error: |
|
210 |
pthread_mutex_unlock(&tina_mutex); |
|
211 |
my_free((uchar*) share, MYF(0)); |
|
212 |
||
213 |
return NULL; |
|
214 |
}
|
|
215 |
||
216 |
||
217 |
/*
|
|
218 |
Read CSV meta-file
|
|
219 |
||
220 |
SYNOPSIS
|
|
221 |
read_meta_file()
|
|
222 |
meta_file The meta-file filedes
|
|
223 |
ha_rows Pointer to the var we use to store rows count.
|
|
224 |
These are read from the meta-file.
|
|
225 |
||
226 |
DESCRIPTION
|
|
227 |
||
228 |
Read the meta-file info. For now we are only interested in
|
|
229 |
rows counf, crashed bit and magic number.
|
|
230 |
||
231 |
RETURN
|
|
232 |
0 - OK
|
|
233 |
non-zero - error occurred
|
|
234 |
*/
|
|
235 |
||
236 |
static int read_meta_file(File meta_file, ha_rows *rows) |
|
237 |
{
|
|
238 |
uchar meta_buffer[META_BUFFER_SIZE]; |
|
239 |
uchar *ptr= meta_buffer; |
|
240 |
||
241 |
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); |
|
242 |
if (my_read(meta_file, (uchar*)meta_buffer, META_BUFFER_SIZE, 0) |
|
243 |
!= META_BUFFER_SIZE) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
244 |
return(HA_ERR_CRASHED_ON_USAGE); |
1
by brian
clean slate |
245 |
|
246 |
/*
|
|
247 |
Parse out the meta data, we ignore version at the moment
|
|
248 |
*/
|
|
249 |
||
250 |
ptr+= sizeof(uchar)*2; // Move past header |
|
251 |
*rows= (ha_rows)uint8korr(ptr); |
|
252 |
ptr+= sizeof(uint64_t); // Move past rows |
|
253 |
/*
|
|
254 |
Move past check_point, auto_increment and forced_flushes fields.
|
|
255 |
They are present in the format, but we do not use them yet.
|
|
256 |
*/
|
|
257 |
ptr+= 3*sizeof(uint64_t); |
|
258 |
||
259 |
/* check crashed bit and magic number */
|
|
260 |
if ((meta_buffer[0] != (uchar)TINA_CHECK_HEADER) || |
|
163
by Brian Aker
Merge Monty's code. |
261 |
((bool)(*ptr)== true)) |
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
262 |
return(HA_ERR_CRASHED_ON_USAGE); |
1
by brian
clean slate |
263 |
|
264 |
my_sync(meta_file, MYF(MY_WME)); |
|
265 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
266 |
return(0); |
1
by brian
clean slate |
267 |
}
|
268 |
||
269 |
||
270 |
/*
|
|
271 |
Write CSV meta-file
|
|
272 |
||
273 |
SYNOPSIS
|
|
274 |
write_meta_file()
|
|
275 |
meta_file The meta-file filedes
|
|
276 |
ha_rows The number of rows we have in the datafile.
|
|
277 |
dirty A flag, which marks whether we have a corrupt table
|
|
278 |
||
279 |
DESCRIPTION
|
|
280 |
||
281 |
Write meta-info the the file. Only rows count, crashed bit and
|
|
282 |
magic number matter now.
|
|
283 |
||
284 |
RETURN
|
|
285 |
0 - OK
|
|
286 |
non-zero - error occurred
|
|
287 |
*/
|
|
288 |
||
289 |
static int write_meta_file(File meta_file, ha_rows rows, bool dirty) |
|
290 |
{
|
|
291 |
uchar meta_buffer[META_BUFFER_SIZE]; |
|
292 |
uchar *ptr= meta_buffer; |
|
293 |
||
294 |
*ptr= (uchar)TINA_CHECK_HEADER; |
|
295 |
ptr+= sizeof(uchar); |
|
296 |
*ptr= (uchar)TINA_VERSION; |
|
297 |
ptr+= sizeof(uchar); |
|
298 |
int8store(ptr, (uint64_t)rows); |
|
299 |
ptr+= sizeof(uint64_t); |
|
300 |
memset(ptr, 0, 3*sizeof(uint64_t)); |
|
301 |
/*
|
|
302 |
Skip over checkpoint, autoincrement and forced_flushes fields.
|
|
303 |
We'll need them later.
|
|
304 |
*/
|
|
305 |
ptr+= 3*sizeof(uint64_t); |
|
306 |
*ptr= (uchar)dirty; |
|
307 |
||
308 |
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); |
|
309 |
if (my_write(meta_file, (uchar *)meta_buffer, META_BUFFER_SIZE, 0) |
|
310 |
!= META_BUFFER_SIZE) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
311 |
return(-1); |
1
by brian
clean slate |
312 |
|
313 |
my_sync(meta_file, MYF(MY_WME)); |
|
314 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
315 |
return(0); |
1
by brian
clean slate |
316 |
}
|
317 |
||
318 |
bool ha_tina::check_and_repair(THD *thd) |
|
319 |
{
|
|
320 |
HA_CHECK_OPT check_opt; |
|
321 |
||
322 |
check_opt.init(); |
|
323 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
324 |
return(repair(thd, &check_opt)); |
1
by brian
clean slate |
325 |
}
|
326 |
||
327 |
||
328 |
int ha_tina::init_tina_writer() |
|
329 |
{
|
|
330 |
/*
|
|
331 |
Mark the file as crashed. We will set the flag back when we close
|
|
332 |
the file. In the case of the crash it will remain marked crashed,
|
|
333 |
which enforce recovery.
|
|
334 |
*/
|
|
163
by Brian Aker
Merge Monty's code. |
335 |
(void)write_meta_file(share->meta_file, share->rows_recorded, true); |
1
by brian
clean slate |
336 |
|
337 |
if ((share->tina_write_filedes= |
|
338 |
my_open(share->data_file_name, O_RDWR|O_APPEND, MYF(0))) == -1) |
|
339 |
{
|
|
163
by Brian Aker
Merge Monty's code. |
340 |
share->crashed= true; |
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
341 |
return(1); |
1
by brian
clean slate |
342 |
}
|
163
by Brian Aker
Merge Monty's code. |
343 |
share->tina_write_opened= true; |
1
by brian
clean slate |
344 |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
345 |
return(0); |
1
by brian
clean slate |
346 |
}
|
347 |
||
348 |
||
349 |
bool ha_tina::is_crashed() const |
|
350 |
{
|
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
351 |
return(share->crashed); |
1
by brian
clean slate |
352 |
}
|
353 |
||
354 |
/*
|
|
355 |
Free lock controls.
|
|
356 |
*/
|
|
357 |
static int free_share(TINA_SHARE *share) |
|
358 |
{
|
|
359 |
pthread_mutex_lock(&tina_mutex); |
|
360 |
int result_code= 0; |
|
361 |
if (!--share->use_count){ |
|
362 |
/* Write the meta file. Mark it as crashed if needed. */
|
|
363 |
(void)write_meta_file(share->meta_file, share->rows_recorded, |
|
163
by Brian Aker
Merge Monty's code. |
364 |
share->crashed ? true :false); |
1
by brian
clean slate |
365 |
if (my_close(share->meta_file, MYF(0))) |
366 |
result_code= 1; |
|
367 |
if (share->tina_write_opened) |
|
368 |
{
|
|
369 |
if (my_close(share->tina_write_filedes, MYF(0))) |
|
370 |
result_code= 1; |
|
163
by Brian Aker
Merge Monty's code. |
371 |
share->tina_write_opened= false; |
1
by brian
clean slate |
372 |
}
|
373 |
||
374 |
hash_delete(&tina_open_tables, (uchar*) share); |
|
375 |
thr_lock_delete(&share->lock); |
|
376 |
pthread_mutex_destroy(&share->mutex); |
|
377 |
my_free((uchar*) share, MYF(0)); |
|
378 |
}
|
|
379 |
pthread_mutex_unlock(&tina_mutex); |
|
380 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
381 |
return(result_code); |
1
by brian
clean slate |
382 |
}
|
383 |
||
384 |
||
385 |
/*
|
|
386 |
This function finds the end of a line and returns the length
|
|
387 |
of the line ending.
|
|
388 |
||
389 |
We support three kinds of line endings:
|
|
390 |
'\r' -- Old Mac OS line ending
|
|
391 |
'\n' -- Traditional Unix and Mac OS X line ending
|
|
392 |
'\r''\n' -- DOS\Windows line ending
|
|
393 |
*/
|
|
394 |
||
395 |
off_t find_eoln_buff(Transparent_file *data_buff, off_t begin, |
|
396 |
off_t end, int *eoln_len) |
|
397 |
{
|
|
398 |
*eoln_len= 0; |
|
399 |
||
400 |
for (off_t x= begin; x < end; x++) |
|
401 |
{
|
|
402 |
/* Unix (includes Mac OS X) */
|
|
403 |
if (data_buff->get_value(x) == '\n') |
|
404 |
*eoln_len= 1; |
|
405 |
else
|
|
406 |
if (data_buff->get_value(x) == '\r') // Mac or Dos |
|
407 |
{
|
|
408 |
/* old Mac line ending */
|
|
409 |
if (x + 1 == end || (data_buff->get_value(x + 1) != '\n')) |
|
410 |
*eoln_len= 1; |
|
411 |
else // DOS style ending |
|
412 |
*eoln_len= 2; |
|
413 |
}
|
|
414 |
||
415 |
if (*eoln_len) // end of line was found |
|
416 |
return x; |
|
417 |
}
|
|
418 |
||
419 |
return 0; |
|
420 |
}
|
|
421 |
||
422 |
||
423 |
static handler *tina_create_handler(handlerton *hton, |
|
424 |
TABLE_SHARE *table, |
|
425 |
MEM_ROOT *mem_root) |
|
426 |
{
|
|
427 |
return new (mem_root) ha_tina(hton, table); |
|
428 |
}
|
|
429 |
||
430 |
||
431 |
ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg) |
|
432 |
:handler(hton, table_arg), |
|
433 |
/*
|
|
434 |
These definitions are found in handler.h
|
|
435 |
They are not probably completely right.
|
|
436 |
*/
|
|
437 |
current_position(0), next_position(0), local_saved_data_file_length(0), |
|
438 |
file_buff(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH), |
|
439 |
local_data_file_version(0), records_is_known(0) |
|
440 |
{
|
|
441 |
/* Set our original buffers from pre-allocated memory */
|
|
442 |
buffer.set((char*)byte_buffer, IO_SIZE, &my_charset_bin); |
|
443 |
chain= chain_buffer; |
|
444 |
file_buff= new Transparent_file(); |
|
445 |
}
|
|
446 |
||
447 |
||
448 |
/*
|
|
449 |
Encode a buffer into the quoted format.
|
|
450 |
*/
|
|
451 |
||
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
452 |
int ha_tina::encode_quote(uchar *buf __attribute__((unused))) |
1
by brian
clean slate |
453 |
{
|
454 |
char attribute_buffer[1024]; |
|
455 |
String attribute(attribute_buffer, sizeof(attribute_buffer), |
|
456 |
&my_charset_bin); |
|
457 |
||
458 |
my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); |
|
459 |
buffer.length(0); |
|
460 |
||
461 |
for (Field **field=table->field ; *field ; field++) |
|
462 |
{
|
|
463 |
const char *ptr; |
|
464 |
const char *end_ptr; |
|
465 |
const bool was_null= (*field)->is_null(); |
|
466 |
||
467 |
/*
|
|
468 |
assistance for backwards compatibility in production builds.
|
|
469 |
note: this will not work for ENUM columns.
|
|
470 |
*/
|
|
471 |
if (was_null) |
|
472 |
{
|
|
473 |
(*field)->set_default(); |
|
474 |
(*field)->set_notnull(); |
|
475 |
}
|
|
476 |
||
477 |
(*field)->val_str(&attribute,&attribute); |
|
478 |
||
479 |
if (was_null) |
|
480 |
(*field)->set_null(); |
|
481 |
||
482 |
if ((*field)->str_needs_quotes()) |
|
483 |
{
|
|
484 |
ptr= attribute.ptr(); |
|
485 |
end_ptr= attribute.length() + ptr; |
|
486 |
||
487 |
buffer.append('"'); |
|
488 |
||
489 |
while (ptr < end_ptr) |
|
490 |
{
|
|
491 |
if (*ptr == '"') |
|
492 |
{
|
|
493 |
buffer.append('\\'); |
|
494 |
buffer.append('"'); |
|
495 |
*ptr++; |
|
496 |
}
|
|
497 |
else if (*ptr == '\r') |
|
498 |
{
|
|
499 |
buffer.append('\\'); |
|
500 |
buffer.append('r'); |
|
501 |
*ptr++; |
|
502 |
}
|
|
503 |
else if (*ptr == '\\') |
|
504 |
{
|
|
505 |
buffer.append('\\'); |
|
506 |
buffer.append('\\'); |
|
507 |
*ptr++; |
|
508 |
}
|
|
509 |
else if (*ptr == '\n') |
|
510 |
{
|
|
511 |
buffer.append('\\'); |
|
512 |
buffer.append('n'); |
|
513 |
*ptr++; |
|
514 |
}
|
|
515 |
else
|
|
516 |
buffer.append(*ptr++); |
|
517 |
}
|
|
518 |
buffer.append('"'); |
|
519 |
}
|
|
520 |
else
|
|
521 |
{
|
|
522 |
buffer.append(attribute); |
|
523 |
}
|
|
524 |
||
525 |
buffer.append(','); |
|
526 |
}
|
|
527 |
// Remove the comma, add a line feed
|
|
528 |
buffer.length(buffer.length() - 1); |
|
529 |
buffer.append('\n'); |
|
530 |
||
531 |
//buffer.replace(buffer.length(), 0, "\n", 1);
|
|
532 |
||
533 |
dbug_tmp_restore_column_map(table->read_set, org_bitmap); |
|
534 |
return (buffer.length()); |
|
535 |
}
|
|
536 |
||
537 |
/*
|
|
538 |
chain_append() adds delete positions to the chain that we use to keep
|
|
539 |
track of space. Then the chain will be used to cleanup "holes", occurred
|
|
540 |
due to deletes and updates.
|
|
541 |
*/
|
|
542 |
int ha_tina::chain_append() |
|
543 |
{
|
|
544 |
if ( chain_ptr != chain && (chain_ptr -1)->end == current_position) |
|
545 |
(chain_ptr -1)->end= next_position; |
|
546 |
else
|
|
547 |
{
|
|
548 |
/* We set up for the next position */
|
|
549 |
if ((off_t)(chain_ptr - chain) == (chain_size -1)) |
|
550 |
{
|
|
551 |
off_t location= chain_ptr - chain; |
|
552 |
chain_size += DEFAULT_CHAIN_LENGTH; |
|
553 |
if (chain_alloced) |
|
554 |
{
|
|
555 |
/* Must cast since my_malloc unlike malloc doesn't have a void ptr */
|
|
556 |
if ((chain= (tina_set *) my_realloc((uchar*)chain, |
|
557 |
chain_size, MYF(MY_WME))) == NULL) |
|
558 |
return -1; |
|
559 |
}
|
|
560 |
else
|
|
561 |
{
|
|
562 |
tina_set *ptr= (tina_set *) my_malloc(chain_size * sizeof(tina_set), |
|
563 |
MYF(MY_WME)); |
|
564 |
memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set)); |
|
565 |
chain= ptr; |
|
566 |
chain_alloced++; |
|
567 |
}
|
|
568 |
chain_ptr= chain + location; |
|
569 |
}
|
|
570 |
chain_ptr->begin= current_position; |
|
571 |
chain_ptr->end= next_position; |
|
572 |
chain_ptr++; |
|
573 |
}
|
|
574 |
||
575 |
return 0; |
|
576 |
}
|
|
577 |
||
578 |
||
579 |
/*
|
|
580 |
Scans for a row.
|
|
581 |
*/
|
|
582 |
int ha_tina::find_current_row(uchar *buf) |
|
583 |
{
|
|
584 |
off_t end_offset, curr_offset= current_position; |
|
585 |
int eoln_len; |
|
586 |
my_bitmap_map *org_bitmap; |
|
587 |
int error; |
|
588 |
bool read_all; |
|
589 |
||
590 |
free_root(&blobroot, MYF(MY_MARK_BLOCKS_FREE)); |
|
591 |
||
592 |
/*
|
|
593 |
We do not read further then local_saved_data_file_length in order
|
|
594 |
not to conflict with undergoing concurrent insert.
|
|
595 |
*/
|
|
596 |
if ((end_offset= |
|
597 |
find_eoln_buff(file_buff, current_position, |
|
598 |
local_saved_data_file_length, &eoln_len)) == 0) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
599 |
return(HA_ERR_END_OF_FILE); |
1
by brian
clean slate |
600 |
|
601 |
/* We must read all columns in case a table is opened for update */
|
|
602 |
read_all= !bitmap_is_clear_all(table->write_set); |
|
603 |
/* Avoid asserts in ::store() for columns that are not going to be updated */
|
|
604 |
org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); |
|
605 |
error= HA_ERR_CRASHED_ON_USAGE; |
|
606 |
||
607 |
memset(buf, 0, table->s->null_bytes); |
|
608 |
||
609 |
for (Field **field=table->field ; *field ; field++) |
|
610 |
{
|
|
611 |
char curr_char; |
|
612 |
||
613 |
buffer.length(0); |
|
614 |
if (curr_offset >= end_offset) |
|
615 |
goto err; |
|
616 |
curr_char= file_buff->get_value(curr_offset); |
|
617 |
if (curr_char == '"') |
|
618 |
{
|
|
619 |
curr_offset++; // Incrementpast the first quote |
|
620 |
||
621 |
for(; curr_offset < end_offset; curr_offset++) |
|
622 |
{
|
|
623 |
curr_char= file_buff->get_value(curr_offset); |
|
624 |
// Need to convert line feeds!
|
|
625 |
if (curr_char == '"' && |
|
626 |
(curr_offset == end_offset - 1 || |
|
627 |
file_buff->get_value(curr_offset + 1) == ',')) |
|
628 |
{
|
|
629 |
curr_offset+= 2; // Move past the , and the " |
|
630 |
break; |
|
631 |
}
|
|
632 |
if (curr_char == '\\' && curr_offset != (end_offset - 1)) |
|
633 |
{
|
|
634 |
curr_offset++; |
|
635 |
curr_char= file_buff->get_value(curr_offset); |
|
636 |
if (curr_char == 'r') |
|
637 |
buffer.append('\r'); |
|
638 |
else if (curr_char == 'n' ) |
|
639 |
buffer.append('\n'); |
|
640 |
else if (curr_char == '\\' || curr_char == '"') |
|
641 |
buffer.append(curr_char); |
|
642 |
else /* This could only happed with an externally created file */ |
|
643 |
{
|
|
644 |
buffer.append('\\'); |
|
645 |
buffer.append(curr_char); |
|
646 |
}
|
|
647 |
}
|
|
648 |
else // ordinary symbol |
|
649 |
{
|
|
650 |
/*
|
|
651 |
We are at final symbol and no last quote was found =>
|
|
652 |
we are working with a damaged file.
|
|
653 |
*/
|
|
654 |
if (curr_offset == end_offset - 1) |
|
655 |
goto err; |
|
656 |
buffer.append(curr_char); |
|
657 |
}
|
|
658 |
}
|
|
659 |
}
|
|
660 |
else
|
|
661 |
{
|
|
662 |
for(; curr_offset < end_offset; curr_offset++) |
|
663 |
{
|
|
664 |
curr_char= file_buff->get_value(curr_offset); |
|
665 |
if (curr_char == ',') |
|
666 |
{
|
|
667 |
curr_offset++; // Skip the , |
|
668 |
break; |
|
669 |
}
|
|
670 |
buffer.append(curr_char); |
|
671 |
}
|
|
672 |
}
|
|
673 |
||
674 |
if (read_all || bitmap_is_set(table->read_set, (*field)->field_index)) |
|
675 |
{
|
|
676 |
if ((*field)->store(buffer.ptr(), buffer.length(), buffer.charset(), |
|
677 |
CHECK_FIELD_WARN)) |
|
678 |
goto err; |
|
679 |
if ((*field)->flags & BLOB_FLAG) |
|
680 |
{
|
|
681 |
Field_blob *blob= *(Field_blob**) field; |
|
682 |
uchar *src, *tgt; |
|
683 |
uint length, packlength; |
|
684 |
||
685 |
packlength= blob->pack_length_no_ptr(); |
|
686 |
length= blob->get_length(blob->ptr); |
|
212.6.3
by Mats Kindahl
Removing deprecated functions from code and replacing them with C99 equivalents: |
687 |
memcpy(&src, blob->ptr + packlength, sizeof(char*)); |
1
by brian
clean slate |
688 |
if (src) |
689 |
{
|
|
690 |
tgt= (uchar*) alloc_root(&blobroot, length); |
|
212.6.3
by Mats Kindahl
Removing deprecated functions from code and replacing them with C99 equivalents: |
691 |
memcpy(tgt, src, length); |
692 |
memcpy(blob->ptr + packlength, &tgt, sizeof(char*)); |
|
1
by brian
clean slate |
693 |
}
|
694 |
}
|
|
695 |
}
|
|
696 |
}
|
|
697 |
next_position= end_offset + eoln_len; |
|
698 |
error= 0; |
|
699 |
||
700 |
err: |
|
701 |
dbug_tmp_restore_column_map(table->write_set, org_bitmap); |
|
702 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
703 |
return(error); |
1
by brian
clean slate |
704 |
}
|
705 |
||
706 |
/*
|
|
707 |
If frm_error() is called in table.cc this is called to find out what file
|
|
708 |
extensions exist for this handler.
|
|
709 |
*/
|
|
710 |
static const char *ha_tina_exts[] = { |
|
711 |
CSV_EXT, |
|
712 |
CSM_EXT, |
|
713 |
NullS
|
|
714 |
};
|
|
715 |
||
716 |
const char **ha_tina::bas_ext() const |
|
717 |
{
|
|
718 |
return ha_tina_exts; |
|
719 |
}
|
|
720 |
||
721 |
/*
|
|
722 |
Three functions below are needed to enable concurrent insert functionality
|
|
723 |
for CSV engine. For more details see mysys/thr_lock.c
|
|
724 |
*/
|
|
725 |
||
77.1.6
by Monty Taylor
CSV is clean. |
726 |
void tina_get_status(void* param, |
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
727 |
int concurrent_insert __attribute__((unused))) |
1
by brian
clean slate |
728 |
{
|
729 |
ha_tina *tina= (ha_tina*) param; |
|
730 |
tina->get_status(); |
|
731 |
}
|
|
732 |
||
733 |
void tina_update_status(void* param) |
|
734 |
{
|
|
735 |
ha_tina *tina= (ha_tina*) param; |
|
736 |
tina->update_status(); |
|
737 |
}
|
|
738 |
||
739 |
/* this should exist and return 0 for concurrent insert to work */
|
|
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
740 |
bool tina_check_status(void* param __attribute__((unused))) |
1
by brian
clean slate |
741 |
{
|
742 |
return 0; |
|
743 |
}
|
|
744 |
||
745 |
/*
|
|
746 |
Save the state of the table
|
|
747 |
||
748 |
SYNOPSIS
|
|
749 |
get_status()
|
|
750 |
||
751 |
DESCRIPTION
|
|
752 |
This function is used to retrieve the file length. During the lock
|
|
753 |
phase of concurrent insert. For more details see comment to
|
|
754 |
ha_tina::update_status below.
|
|
755 |
*/
|
|
756 |
||
757 |
void ha_tina::get_status() |
|
758 |
{
|
|
759 |
if (share->is_log_table) |
|
760 |
{
|
|
761 |
/*
|
|
762 |
We have to use mutex to follow pthreads memory visibility
|
|
763 |
rules for share->saved_data_file_length
|
|
764 |
*/
|
|
765 |
pthread_mutex_lock(&share->mutex); |
|
766 |
local_saved_data_file_length= share->saved_data_file_length; |
|
767 |
pthread_mutex_unlock(&share->mutex); |
|
768 |
return; |
|
769 |
}
|
|
770 |
local_saved_data_file_length= share->saved_data_file_length; |
|
771 |
}
|
|
772 |
||
773 |
||
774 |
/*
|
|
775 |
Correct the state of the table. Called by unlock routines
|
|
776 |
before the write lock is released.
|
|
777 |
||
778 |
SYNOPSIS
|
|
779 |
update_status()
|
|
780 |
||
781 |
DESCRIPTION
|
|
782 |
When we employ concurrent insert lock, we save current length of the file
|
|
783 |
during the lock phase. We do not read further saved value, as we don't
|
|
784 |
want to interfere with undergoing concurrent insert. Writers update file
|
|
785 |
length info during unlock with update_status().
|
|
786 |
||
787 |
NOTE
|
|
788 |
For log tables concurrent insert works different. The reason is that
|
|
789 |
log tables are always opened and locked. And as they do not unlock
|
|
790 |
tables, the file length after writes should be updated in a different
|
|
791 |
way. For this purpose we need is_log_table flag. When this flag is set
|
|
792 |
we call update_status() explicitly after each row write.
|
|
793 |
*/
|
|
794 |
||
795 |
void ha_tina::update_status() |
|
796 |
{
|
|
797 |
/* correct local_saved_data_file_length for writers */
|
|
798 |
share->saved_data_file_length= local_saved_data_file_length; |
|
799 |
}
|
|
800 |
||
801 |
||
802 |
/*
|
|
803 |
Open a database file. Keep in mind that tables are caches, so
|
|
804 |
this will not be called for every request. Any sort of positions
|
|
805 |
that need to be reset should be kept in the ::extra() call.
|
|
806 |
*/
|
|
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
807 |
int ha_tina::open(const char *name, int mode __attribute__((unused)), |
77.1.6
by Monty Taylor
CSV is clean. |
808 |
uint open_options) |
1
by brian
clean slate |
809 |
{
|
810 |
if (!(share= get_share(name, table))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
811 |
return(HA_ERR_OUT_OF_MEM); |
1
by brian
clean slate |
812 |
|
813 |
if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR)) |
|
814 |
{
|
|
815 |
free_share(share); |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
816 |
return(HA_ERR_CRASHED_ON_USAGE); |
1
by brian
clean slate |
817 |
}
|
818 |
||
819 |
local_data_file_version= share->data_file_version; |
|
820 |
if ((data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
821 |
return(0); |
1
by brian
clean slate |
822 |
|
823 |
/*
|
|
824 |
Init locking. Pass handler object to the locking routines,
|
|
825 |
so that they could save/update local_saved_data_file_length value
|
|
826 |
during locking. This is needed to enable concurrent inserts.
|
|
827 |
*/
|
|
828 |
thr_lock_data_init(&share->lock, &lock, (void*) this); |
|
829 |
ref_length=sizeof(off_t); |
|
830 |
||
831 |
share->lock.get_status= tina_get_status; |
|
832 |
share->lock.update_status= tina_update_status; |
|
833 |
share->lock.check_status= tina_check_status; |
|
834 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
835 |
return(0); |
1
by brian
clean slate |
836 |
}
|
837 |
||
838 |
||
839 |
/*
|
|
840 |
Close a database file. We remove ourselves from the shared strucutre.
|
|
841 |
If it is empty we destroy it.
|
|
842 |
*/
|
|
843 |
int ha_tina::close(void) |
|
844 |
{
|
|
845 |
int rc= 0; |
|
846 |
rc= my_close(data_file, MYF(0)); |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
847 |
return(free_share(share) || rc); |
1
by brian
clean slate |
848 |
}
|
849 |
||
850 |
/*
|
|
851 |
This is an INSERT. At the moment this handler just seeks to the end
|
|
852 |
of the file and appends the data. In an error case it really should
|
|
853 |
just truncate to the original position (this is not done yet).
|
|
854 |
*/
|
|
855 |
int ha_tina::write_row(uchar * buf) |
|
856 |
{
|
|
857 |
int size; |
|
858 |
||
859 |
if (share->crashed) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
860 |
return(HA_ERR_CRASHED_ON_USAGE); |
1
by brian
clean slate |
861 |
|
862 |
ha_statistic_increment(&SSV::ha_write_count); |
|
863 |
||
864 |
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) |
|
865 |
table->timestamp_field->set_time(); |
|
866 |
||
867 |
size= encode_quote(buf); |
|
868 |
||
869 |
if (!share->tina_write_opened) |
|
870 |
if (init_tina_writer()) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
871 |
return(-1); |
1
by brian
clean slate |
872 |
|
873 |
/* use pwrite, as concurrent reader could have changed the position */
|
|
874 |
if (my_write(share->tina_write_filedes, (uchar*)buffer.ptr(), size, |
|
875 |
MYF(MY_WME | MY_NABP))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
876 |
return(-1); |
1
by brian
clean slate |
877 |
|
878 |
/* update local copy of the max position to see our own changes */
|
|
879 |
local_saved_data_file_length+= size; |
|
880 |
||
881 |
/* update shared info */
|
|
882 |
pthread_mutex_lock(&share->mutex); |
|
883 |
share->rows_recorded++; |
|
884 |
/* update status for the log tables */
|
|
885 |
if (share->is_log_table) |
|
886 |
update_status(); |
|
887 |
pthread_mutex_unlock(&share->mutex); |
|
888 |
||
889 |
stats.records++; |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
890 |
return(0); |
1
by brian
clean slate |
891 |
}
|
892 |
||
893 |
||
894 |
int ha_tina::open_update_temp_file_if_needed() |
|
895 |
{
|
|
896 |
char updated_fname[FN_REFLEN]; |
|
897 |
||
898 |
if (!share->update_file_opened) |
|
899 |
{
|
|
900 |
if ((update_temp_file= |
|
901 |
my_create(fn_format(updated_fname, share->table_name, |
|
902 |
"", CSN_EXT, |
|
903 |
MY_REPLACE_EXT | MY_UNPACK_FILENAME), |
|
904 |
0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0) |
|
905 |
return 1; |
|
163
by Brian Aker
Merge Monty's code. |
906 |
share->update_file_opened= true; |
1
by brian
clean slate |
907 |
temp_file_length= 0; |
908 |
}
|
|
909 |
return 0; |
|
910 |
}
|
|
911 |
||
912 |
/*
|
|
913 |
This is called for an update.
|
|
914 |
Make sure you put in code to increment the auto increment, also
|
|
915 |
update any timestamp data. Currently auto increment is not being
|
|
916 |
fixed since autoincrements have yet to be added to this table handler.
|
|
917 |
This will be called in a table scan right before the previous ::rnd_next()
|
|
918 |
call.
|
|
919 |
*/
|
|
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
920 |
int ha_tina::update_row(const uchar * old_data __attribute__((unused)), |
77.1.6
by Monty Taylor
CSV is clean. |
921 |
uchar * new_data) |
1
by brian
clean slate |
922 |
{
|
923 |
int size; |
|
924 |
int rc= -1; |
|
925 |
||
926 |
ha_statistic_increment(&SSV::ha_update_count); |
|
927 |
||
928 |
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) |
|
929 |
table->timestamp_field->set_time(); |
|
930 |
||
931 |
size= encode_quote(new_data); |
|
932 |
||
933 |
/*
|
|
934 |
During update we mark each updating record as deleted
|
|
935 |
(see the chain_append()) then write new one to the temporary data file.
|
|
936 |
At the end of the sequence in the rnd_end() we append all non-marked
|
|
937 |
records from the data file to the temporary data file then rename it.
|
|
938 |
The temp_file_length is used to calculate new data file length.
|
|
939 |
*/
|
|
940 |
if (chain_append()) |
|
941 |
goto err; |
|
942 |
||
943 |
if (open_update_temp_file_if_needed()) |
|
944 |
goto err; |
|
945 |
||
946 |
if (my_write(update_temp_file, (uchar*)buffer.ptr(), size, |
|
947 |
MYF(MY_WME | MY_NABP))) |
|
948 |
goto err; |
|
949 |
temp_file_length+= size; |
|
950 |
rc= 0; |
|
951 |
||
952 |
/* UPDATE should never happen on the log tables */
|
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
953 |
assert(!share->is_log_table); |
1
by brian
clean slate |
954 |
|
955 |
err: |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
956 |
return(rc); |
1
by brian
clean slate |
957 |
}
|
958 |
||
959 |
||
960 |
/*
|
|
961 |
Deletes a row. First the database will find the row, and then call this
|
|
962 |
method. In the case of a table scan, the previous call to this will be
|
|
963 |
the ::rnd_next() that found this row.
|
|
964 |
The exception to this is an ORDER BY. This will cause the table handler
|
|
965 |
to walk the table noting the positions of all rows that match a query.
|
|
966 |
The table will then be deleted/positioned based on the ORDER (so RANDOM,
|
|
967 |
DESC, ASC).
|
|
968 |
*/
|
|
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
969 |
int ha_tina::delete_row(const uchar * buf __attribute__((unused))) |
1
by brian
clean slate |
970 |
{
|
971 |
ha_statistic_increment(&SSV::ha_delete_count); |
|
972 |
||
973 |
if (chain_append()) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
974 |
return(-1); |
1
by brian
clean slate |
975 |
|
976 |
stats.records--; |
|
977 |
/* Update shared info */
|
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
978 |
assert(share->rows_recorded); |
1
by brian
clean slate |
979 |
pthread_mutex_lock(&share->mutex); |
980 |
share->rows_recorded--; |
|
981 |
pthread_mutex_unlock(&share->mutex); |
|
982 |
||
983 |
/* DELETE should never happen on the log table */
|
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
984 |
assert(!share->is_log_table); |
1
by brian
clean slate |
985 |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
986 |
return(0); |
1
by brian
clean slate |
987 |
}
|
988 |
||
989 |
||
990 |
/**
|
|
991 |
@brief Initialize the data file.
|
|
992 |
|
|
993 |
@details Compare the local version of the data file with the shared one.
|
|
994 |
If they differ, there are some changes behind and we have to reopen
|
|
995 |
the data file to make the changes visible.
|
|
996 |
Call @c file_buff->init_buff() at the end to read the beginning of the
|
|
997 |
data file into buffer.
|
|
998 |
|
|
999 |
@retval 0 OK.
|
|
1000 |
@retval 1 There was an error.
|
|
1001 |
*/
|
|
1002 |
||
1003 |
int ha_tina::init_data_file() |
|
1004 |
{
|
|
1005 |
if (local_data_file_version != share->data_file_version) |
|
1006 |
{
|
|
1007 |
local_data_file_version= share->data_file_version; |
|
1008 |
if (my_close(data_file, MYF(0)) || |
|
1009 |
(data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1) |
|
1010 |
return 1; |
|
1011 |
}
|
|
1012 |
file_buff->init_buff(data_file); |
|
1013 |
return 0; |
|
1014 |
}
|
|
1015 |
||
1016 |
||
1017 |
/*
|
|
1018 |
All table scans call this first.
|
|
1019 |
The order of a table scan is:
|
|
1020 |
||
1021 |
ha_tina::store_lock
|
|
1022 |
ha_tina::external_lock
|
|
1023 |
ha_tina::info
|
|
1024 |
ha_tina::rnd_init
|
|
1025 |
ha_tina::extra
|
|
1026 |
ENUM HA_EXTRA_CACHE Cash record in HA_rrnd()
|
|
1027 |
ha_tina::rnd_next
|
|
1028 |
ha_tina::rnd_next
|
|
1029 |
ha_tina::rnd_next
|
|
1030 |
ha_tina::rnd_next
|
|
1031 |
ha_tina::rnd_next
|
|
1032 |
ha_tina::rnd_next
|
|
1033 |
ha_tina::rnd_next
|
|
1034 |
ha_tina::rnd_next
|
|
1035 |
ha_tina::rnd_next
|
|
1036 |
ha_tina::extra
|
|
1037 |
ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
|
|
1038 |
ha_tina::external_lock
|
|
1039 |
ha_tina::extra
|
|
1040 |
ENUM HA_EXTRA_RESET Reset database to after open
|
|
1041 |
||
1042 |
Each call to ::rnd_next() represents a row returned in the can. When no more
|
|
1043 |
rows can be returned, rnd_next() returns a value of HA_ERR_END_OF_FILE.
|
|
1044 |
The ::info() call is just for the optimizer.
|
|
1045 |
||
1046 |
*/
|
|
1047 |
||
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
1048 |
int ha_tina::rnd_init(bool scan __attribute__((unused))) |
1
by brian
clean slate |
1049 |
{
|
1050 |
/* set buffer to the beginning of the file */
|
|
1051 |
if (share->crashed || init_data_file()) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1052 |
return(HA_ERR_CRASHED_ON_USAGE); |
1
by brian
clean slate |
1053 |
|
1054 |
current_position= next_position= 0; |
|
1055 |
stats.records= 0; |
|
1056 |
records_is_known= 0; |
|
1057 |
chain_ptr= chain; |
|
1058 |
||
1059 |
init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0); |
|
1060 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1061 |
return(0); |
1
by brian
clean slate |
1062 |
}
|
1063 |
||
1064 |
/*
|
|
1065 |
::rnd_next() does all the heavy lifting for a table scan. You will need to
|
|
1066 |
populate *buf with the correct field data. You can walk the field to
|
|
1067 |
determine at what position you should store the data (take a look at how
|
|
1068 |
::find_current_row() works). The structure is something like:
|
|
1069 |
0Foo Dog Friend
|
|
1070 |
The first offset is for the first attribute. All space before that is
|
|
1071 |
reserved for null count.
|
|
1072 |
Basically this works as a mask for which rows are nulled (compared to just
|
|
1073 |
empty).
|
|
1074 |
This table handler doesn't do nulls and does not know the difference between
|
|
1075 |
NULL and "". This is ok since this table handler is for spreadsheets and
|
|
1076 |
they don't know about them either :)
|
|
1077 |
*/
|
|
1078 |
int ha_tina::rnd_next(uchar *buf) |
|
1079 |
{
|
|
1080 |
int rc; |
|
1081 |
||
1082 |
if (share->crashed) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1083 |
return(HA_ERR_CRASHED_ON_USAGE); |
1
by brian
clean slate |
1084 |
|
1085 |
ha_statistic_increment(&SSV::ha_read_rnd_next_count); |
|
1086 |
||
1087 |
current_position= next_position; |
|
1088 |
||
1089 |
/* don't scan an empty file */
|
|
1090 |
if (!local_saved_data_file_length) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1091 |
return(HA_ERR_END_OF_FILE); |
1
by brian
clean slate |
1092 |
|
1093 |
if ((rc= find_current_row(buf))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1094 |
return(rc); |
1
by brian
clean slate |
1095 |
|
1096 |
stats.records++; |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1097 |
return(0); |
1
by brian
clean slate |
1098 |
}
|
1099 |
||
1100 |
/*
|
|
1101 |
In the case of an order by rows will need to be sorted.
|
|
1102 |
::position() is called after each call to ::rnd_next(),
|
|
1103 |
the data it stores is to a byte array. You can store this
|
|
1104 |
data via my_store_ptr(). ref_length is a variable defined to the
|
|
1105 |
class that is the sizeof() of position being stored. In our case
|
|
1106 |
its just a position. Look at the bdb code if you want to see a case
|
|
1107 |
where something other then a number is stored.
|
|
1108 |
*/
|
|
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
1109 |
void ha_tina::position(const uchar *record __attribute__((unused))) |
1
by brian
clean slate |
1110 |
{
|
1111 |
my_store_ptr(ref, ref_length, current_position); |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1112 |
return; |
1
by brian
clean slate |
1113 |
}
|
1114 |
||
1115 |
||
1116 |
/*
|
|
1117 |
Used to fetch a row from a posiion stored with ::position().
|
|
1118 |
my_get_ptr() retrieves the data for you.
|
|
1119 |
*/
|
|
1120 |
||
1121 |
int ha_tina::rnd_pos(uchar * buf, uchar *pos) |
|
1122 |
{
|
|
1123 |
ha_statistic_increment(&SSV::ha_read_rnd_count); |
|
1124 |
current_position= (off_t)my_get_ptr(pos,ref_length); |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1125 |
return(find_current_row(buf)); |
1
by brian
clean slate |
1126 |
}
|
1127 |
||
1128 |
/*
|
|
1129 |
::info() is used to return information to the optimizer.
|
|
1130 |
Currently this table handler doesn't implement most of the fields
|
|
1131 |
really needed. SHOW also makes use of this data
|
|
1132 |
*/
|
|
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
1133 |
int ha_tina::info(uint flag __attribute__((unused))) |
1
by brian
clean slate |
1134 |
{
|
1135 |
/* This is a lie, but you don't want the optimizer to see zero or 1 */
|
|
1136 |
if (!records_is_known && stats.records < 2) |
|
1137 |
stats.records= 2; |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1138 |
return(0); |
1
by brian
clean slate |
1139 |
}
|
1140 |
||
1141 |
/*
|
|
1142 |
Set end_pos to the last valid byte of continuous area, closest
|
|
1143 |
to the given "hole", stored in the buffer. "Valid" here means,
|
|
1144 |
not listed in the chain of deleted records ("holes").
|
|
1145 |
*/
|
|
1146 |
bool ha_tina::get_write_pos(off_t *end_pos, tina_set *closest_hole) |
|
1147 |
{
|
|
1148 |
if (closest_hole == chain_ptr) /* no more chains */ |
|
1149 |
*end_pos= file_buff->end(); |
|
1150 |
else
|
|
1151 |
*end_pos= min(file_buff->end(), |
|
1152 |
closest_hole->begin); |
|
1153 |
return (closest_hole != chain_ptr) && (*end_pos == closest_hole->begin); |
|
1154 |
}
|
|
1155 |
||
1156 |
||
1157 |
/*
|
|
1158 |
Called after each table scan. In particular after deletes,
|
|
1159 |
and updates. In the last case we employ chain of deleted
|
|
1160 |
slots to clean up all of the dead space we have collected while
|
|
1161 |
performing deletes/updates.
|
|
1162 |
*/
|
|
1163 |
int ha_tina::rnd_end() |
|
1164 |
{
|
|
1165 |
char updated_fname[FN_REFLEN]; |
|
1166 |
off_t file_buffer_start= 0; |
|
1167 |
||
1168 |
free_root(&blobroot, MYF(0)); |
|
1169 |
records_is_known= 1; |
|
1170 |
||
1171 |
if ((chain_ptr - chain) > 0) |
|
1172 |
{
|
|
1173 |
tina_set *ptr= chain; |
|
1174 |
||
1175 |
/*
|
|
1176 |
Re-read the beginning of a file (as the buffer should point to the
|
|
1177 |
end of file after the scan).
|
|
1178 |
*/
|
|
1179 |
file_buff->init_buff(data_file); |
|
1180 |
||
1181 |
/*
|
|
1182 |
The sort is needed when there were updates/deletes with random orders.
|
|
1183 |
It sorts so that we move the firts blocks to the beginning.
|
|
1184 |
*/
|
|
1185 |
my_qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set), |
|
1186 |
(qsort_cmp)sort_set); |
|
1187 |
||
1188 |
off_t write_begin= 0, write_end; |
|
1189 |
||
1190 |
/* create the file to write updated table if it wasn't yet created */
|
|
1191 |
if (open_update_temp_file_if_needed()) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1192 |
return(-1); |
1
by brian
clean slate |
1193 |
|
1194 |
/* write the file with updated info */
|
|
1195 |
while ((file_buffer_start != -1)) // while not end of file |
|
1196 |
{
|
|
1197 |
bool in_hole= get_write_pos(&write_end, ptr); |
|
1198 |
off_t write_length= write_end - write_begin; |
|
1199 |
||
1200 |
/* if there is something to write, write it */
|
|
1201 |
if (write_length) |
|
1202 |
{
|
|
1203 |
if (my_write(update_temp_file, |
|
1204 |
(uchar*) (file_buff->ptr() + |
|
1205 |
(write_begin - file_buff->start())), |
|
1206 |
write_length, MYF_RW)) |
|
1207 |
goto error; |
|
1208 |
temp_file_length+= write_length; |
|
1209 |
}
|
|
1210 |
if (in_hole) |
|
1211 |
{
|
|
1212 |
/* skip hole */
|
|
1213 |
while (file_buff->end() <= ptr->end && file_buffer_start != -1) |
|
1214 |
file_buffer_start= file_buff->read_next(); |
|
1215 |
write_begin= ptr->end; |
|
1216 |
ptr++; |
|
1217 |
}
|
|
1218 |
else
|
|
1219 |
write_begin= write_end; |
|
1220 |
||
1221 |
if (write_end == file_buff->end()) |
|
1222 |
file_buffer_start= file_buff->read_next(); /* shift the buffer */ |
|
1223 |
||
1224 |
}
|
|
1225 |
||
1226 |
if (my_sync(update_temp_file, MYF(MY_WME)) || |
|
1227 |
my_close(update_temp_file, MYF(0))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1228 |
return(-1); |
1
by brian
clean slate |
1229 |
|
163
by Brian Aker
Merge Monty's code. |
1230 |
share->update_file_opened= false; |
1
by brian
clean slate |
1231 |
|
1232 |
if (share->tina_write_opened) |
|
1233 |
{
|
|
1234 |
if (my_close(share->tina_write_filedes, MYF(0))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1235 |
return(-1); |
1
by brian
clean slate |
1236 |
/*
|
1237 |
Mark that the writer fd is closed, so that init_tina_writer()
|
|
1238 |
will reopen it later.
|
|
1239 |
*/
|
|
163
by Brian Aker
Merge Monty's code. |
1240 |
share->tina_write_opened= false; |
1
by brian
clean slate |
1241 |
}
|
1242 |
||
1243 |
/*
|
|
1244 |
Close opened fildes's. Then move updated file in place
|
|
1245 |
of the old datafile.
|
|
1246 |
*/
|
|
1247 |
if (my_close(data_file, MYF(0)) || |
|
1248 |
my_rename(fn_format(updated_fname, share->table_name, "", CSN_EXT, |
|
1249 |
MY_REPLACE_EXT | MY_UNPACK_FILENAME), |
|
1250 |
share->data_file_name, MYF(0))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1251 |
return(-1); |
1
by brian
clean slate |
1252 |
|
1253 |
/* Open the file again */
|
|
1254 |
if (((data_file= my_open(share->data_file_name, O_RDONLY, MYF(0))) == -1)) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1255 |
return(-1); |
1
by brian
clean slate |
1256 |
/*
|
1257 |
As we reopened the data file, increase share->data_file_version
|
|
1258 |
in order to force other threads waiting on a table lock and
|
|
1259 |
have already opened the table to reopen the data file.
|
|
1260 |
That makes the latest changes become visible to them.
|
|
1261 |
Update local_data_file_version as no need to reopen it in the
|
|
1262 |
current thread.
|
|
1263 |
*/
|
|
1264 |
share->data_file_version++; |
|
1265 |
local_data_file_version= share->data_file_version; |
|
1266 |
/*
|
|
1267 |
The datafile is consistent at this point and the write filedes is
|
|
1268 |
closed, so nothing worrying will happen to it in case of a crash.
|
|
1269 |
Here we record this fact to the meta-file.
|
|
1270 |
*/
|
|
163
by Brian Aker
Merge Monty's code. |
1271 |
(void)write_meta_file(share->meta_file, share->rows_recorded, false); |
1
by brian
clean slate |
1272 |
/*
|
1273 |
Update local_saved_data_file_length with the real length of the
|
|
1274 |
data file.
|
|
1275 |
*/
|
|
1276 |
local_saved_data_file_length= temp_file_length; |
|
1277 |
}
|
|
1278 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1279 |
return(0); |
1
by brian
clean slate |
1280 |
error: |
1281 |
my_close(update_temp_file, MYF(0)); |
|
163
by Brian Aker
Merge Monty's code. |
1282 |
share->update_file_opened= false; |
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1283 |
return(-1); |
1
by brian
clean slate |
1284 |
}
|
1285 |
||
1286 |
||
1287 |
/*
|
|
1288 |
Repair CSV table in the case, it is crashed.
|
|
1289 |
||
1290 |
SYNOPSIS
|
|
1291 |
repair()
|
|
1292 |
thd The thread, performing repair
|
|
1293 |
check_opt The options for repair. We do not use it currently.
|
|
1294 |
||
1295 |
DESCRIPTION
|
|
1296 |
If the file is empty, change # of rows in the file and complete recovery.
|
|
1297 |
Otherwise, scan the table looking for bad rows. If none were found,
|
|
1298 |
we mark file as a good one and return. If a bad row was encountered,
|
|
1299 |
we truncate the datafile up to the last good row.
|
|
1300 |
||
1301 |
TODO: Make repair more clever - it should try to recover subsequent
|
|
1302 |
rows (after the first bad one) as well.
|
|
1303 |
*/
|
|
1304 |
||
77.1.6
by Monty Taylor
CSV is clean. |
1305 |
int ha_tina::repair(THD* thd, |
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
1306 |
HA_CHECK_OPT* check_opt __attribute__((unused))) |
1
by brian
clean slate |
1307 |
{
|
1308 |
char repaired_fname[FN_REFLEN]; |
|
1309 |
uchar *buf; |
|
1310 |
File repair_file; |
|
1311 |
int rc; |
|
1312 |
ha_rows rows_repaired= 0; |
|
1313 |
off_t write_begin= 0, write_end; |
|
1314 |
||
1315 |
/* empty file */
|
|
1316 |
if (!share->saved_data_file_length) |
|
1317 |
{
|
|
1318 |
share->rows_recorded= 0; |
|
1319 |
goto end; |
|
1320 |
}
|
|
1321 |
||
1322 |
/* Don't assert in field::val() functions */
|
|
1323 |
table->use_all_columns(); |
|
1324 |
if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1325 |
return(HA_ERR_OUT_OF_MEM); |
1
by brian
clean slate |
1326 |
|
1327 |
/* position buffer to the start of the file */
|
|
1328 |
if (init_data_file()) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1329 |
return(HA_ERR_CRASHED_ON_REPAIR); |
1
by brian
clean slate |
1330 |
|
1331 |
/*
|
|
1332 |
Local_saved_data_file_length is initialized during the lock phase.
|
|
1333 |
Sometimes this is not getting executed before ::repair (e.g. for
|
|
1334 |
the log tables). We set it manually here.
|
|
1335 |
*/
|
|
1336 |
local_saved_data_file_length= share->saved_data_file_length; |
|
1337 |
/* set current position to the beginning of the file */
|
|
1338 |
current_position= next_position= 0; |
|
1339 |
||
1340 |
init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0); |
|
1341 |
||
1342 |
/* Read the file row-by-row. If everything is ok, repair is not needed. */
|
|
1343 |
while (!(rc= find_current_row(buf))) |
|
1344 |
{
|
|
1345 |
thd_inc_row_count(thd); |
|
1346 |
rows_repaired++; |
|
1347 |
current_position= next_position; |
|
1348 |
}
|
|
1349 |
||
1350 |
free_root(&blobroot, MYF(0)); |
|
1351 |
||
1352 |
my_free((char*)buf, MYF(0)); |
|
1353 |
||
1354 |
if (rc == HA_ERR_END_OF_FILE) |
|
1355 |
{
|
|
1356 |
/*
|
|
1357 |
All rows were read ok until end of file, the file does not need repair.
|
|
1358 |
If rows_recorded != rows_repaired, we should update rows_recorded value
|
|
1359 |
to the current amount of rows.
|
|
1360 |
*/
|
|
1361 |
share->rows_recorded= rows_repaired; |
|
1362 |
goto end; |
|
1363 |
}
|
|
1364 |
||
1365 |
/*
|
|
1366 |
Otherwise we've encountered a bad row => repair is needed.
|
|
1367 |
Let us create a temporary file.
|
|
1368 |
*/
|
|
1369 |
if ((repair_file= my_create(fn_format(repaired_fname, share->table_name, |
|
1370 |
"", CSN_EXT, |
|
1371 |
MY_REPLACE_EXT|MY_UNPACK_FILENAME), |
|
1372 |
0, O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1373 |
return(HA_ERR_CRASHED_ON_REPAIR); |
1
by brian
clean slate |
1374 |
|
1375 |
file_buff->init_buff(data_file); |
|
1376 |
||
1377 |
||
1378 |
/* we just truncated the file up to the first bad row. update rows count. */
|
|
1379 |
share->rows_recorded= rows_repaired; |
|
1380 |
||
1381 |
/* write repaired file */
|
|
1382 |
while (1) |
|
1383 |
{
|
|
1384 |
write_end= min(file_buff->end(), current_position); |
|
1385 |
if ((write_end - write_begin) && |
|
1386 |
(my_write(repair_file, (uchar*)file_buff->ptr(), |
|
1387 |
write_end - write_begin, MYF_RW))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1388 |
return(-1); |
1
by brian
clean slate |
1389 |
|
1390 |
write_begin= write_end; |
|
1391 |
if (write_end== current_position) |
|
1392 |
break; |
|
1393 |
else
|
|
1394 |
file_buff->read_next(); /* shift the buffer */ |
|
1395 |
}
|
|
1396 |
||
1397 |
/*
|
|
1398 |
Close the files and rename repaired file to the datafile.
|
|
1399 |
We have to close the files, as on Windows one cannot rename
|
|
1400 |
a file, which descriptor is still open. EACCES will be returned
|
|
1401 |
when trying to delete the "to"-file in my_rename().
|
|
1402 |
*/
|
|
1403 |
if (my_close(data_file,MYF(0)) || my_close(repair_file, MYF(0)) || |
|
1404 |
my_rename(repaired_fname, share->data_file_name, MYF(0))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1405 |
return(-1); |
1
by brian
clean slate |
1406 |
|
1407 |
/* Open the file again, it should now be repaired */
|
|
1408 |
if ((data_file= my_open(share->data_file_name, O_RDWR|O_APPEND, |
|
1409 |
MYF(0))) == -1) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1410 |
return(-1); |
1
by brian
clean slate |
1411 |
|
1412 |
/* Set new file size. The file size will be updated by ::update_status() */
|
|
1413 |
local_saved_data_file_length= (size_t) current_position; |
|
1414 |
||
1415 |
end: |
|
163
by Brian Aker
Merge Monty's code. |
1416 |
share->crashed= false; |
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1417 |
return(HA_ADMIN_OK); |
1
by brian
clean slate |
1418 |
}
|
1419 |
||
1420 |
/*
|
|
1421 |
DELETE without WHERE calls this
|
|
1422 |
*/
|
|
1423 |
||
1424 |
int ha_tina::delete_all_rows() |
|
1425 |
{
|
|
1426 |
int rc; |
|
1427 |
||
1428 |
if (!records_is_known) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1429 |
return(my_errno=HA_ERR_WRONG_COMMAND); |
1
by brian
clean slate |
1430 |
|
1431 |
if (!share->tina_write_opened) |
|
1432 |
if (init_tina_writer()) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1433 |
return(-1); |
1
by brian
clean slate |
1434 |
|
1435 |
/* Truncate the file to zero size */
|
|
30
by Brian Aker
Large file and ftruncate() support |
1436 |
rc= ftruncate(share->tina_write_filedes, 0); |
1
by brian
clean slate |
1437 |
|
1438 |
stats.records=0; |
|
1439 |
/* Update shared info */
|
|
1440 |
pthread_mutex_lock(&share->mutex); |
|
1441 |
share->rows_recorded= 0; |
|
1442 |
pthread_mutex_unlock(&share->mutex); |
|
1443 |
local_saved_data_file_length= 0; |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1444 |
return(rc); |
1
by brian
clean slate |
1445 |
}
|
1446 |
||
1447 |
/*
|
|
1448 |
Called by the database to lock the table. Keep in mind that this
|
|
1449 |
is an internal lock.
|
|
1450 |
*/
|
|
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
1451 |
THR_LOCK_DATA **ha_tina::store_lock(THD *thd __attribute__((unused)), |
1
by brian
clean slate |
1452 |
THR_LOCK_DATA **to, |
1453 |
enum thr_lock_type lock_type) |
|
1454 |
{
|
|
1455 |
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) |
|
1456 |
lock.type=lock_type; |
|
1457 |
*to++= &lock; |
|
1458 |
return to; |
|
1459 |
}
|
|
1460 |
||
1461 |
/*
|
|
1462 |
Create a table. You do not want to leave the table open after a call to
|
|
1463 |
this (the database will call ::open() if it needs to).
|
|
1464 |
*/
|
|
1465 |
||
1466 |
int ha_tina::create(const char *name, TABLE *table_arg, |
|
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
1467 |
HA_CREATE_INFO *create_info __attribute__((unused))) |
1
by brian
clean slate |
1468 |
{
|
1469 |
char name_buff[FN_REFLEN]; |
|
1470 |
File create_file; |
|
1471 |
||
1472 |
/*
|
|
1473 |
check columns
|
|
1474 |
*/
|
|
1475 |
for (Field **field= table_arg->s->field; *field; field++) |
|
1476 |
{
|
|
1477 |
if ((*field)->real_maybe_null()) |
|
1478 |
{
|
|
1479 |
my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "nullable columns"); |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1480 |
return(HA_ERR_UNSUPPORTED); |
1
by brian
clean slate |
1481 |
}
|
1482 |
}
|
|
1483 |
||
1484 |
||
1485 |
if ((create_file= my_create(fn_format(name_buff, name, "", CSM_EXT, |
|
1486 |
MY_REPLACE_EXT|MY_UNPACK_FILENAME), 0, |
|
1487 |
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1488 |
return(-1); |
1
by brian
clean slate |
1489 |
|
163
by Brian Aker
Merge Monty's code. |
1490 |
write_meta_file(create_file, 0, false); |
1
by brian
clean slate |
1491 |
my_close(create_file, MYF(0)); |
1492 |
||
1493 |
if ((create_file= my_create(fn_format(name_buff, name, "", CSV_EXT, |
|
1494 |
MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, |
|
1495 |
O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1496 |
return(-1); |
1
by brian
clean slate |
1497 |
|
1498 |
my_close(create_file, MYF(0)); |
|
1499 |
||
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1500 |
return(0); |
1
by brian
clean slate |
1501 |
}
|
1502 |
||
77.1.6
by Monty Taylor
CSV is clean. |
1503 |
int ha_tina::check(THD* thd, |
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
1504 |
HA_CHECK_OPT* check_opt __attribute__((unused))) |
1
by brian
clean slate |
1505 |
{
|
1506 |
int rc= 0; |
|
1507 |
uchar *buf; |
|
1508 |
const char *old_proc_info; |
|
1509 |
ha_rows count= share->rows_recorded; |
|
1510 |
||
1511 |
old_proc_info= thd_proc_info(thd, "Checking table"); |
|
1512 |
if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1513 |
return(HA_ERR_OUT_OF_MEM); |
1
by brian
clean slate |
1514 |
|
1515 |
/* position buffer to the start of the file */
|
|
1516 |
if (init_data_file()) |
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1517 |
return(HA_ERR_CRASHED); |
1
by brian
clean slate |
1518 |
|
1519 |
/*
|
|
1520 |
Local_saved_data_file_length is initialized during the lock phase.
|
|
1521 |
Check does not use store_lock in certain cases. So, we set it
|
|
1522 |
manually here.
|
|
1523 |
*/
|
|
1524 |
local_saved_data_file_length= share->saved_data_file_length; |
|
1525 |
/* set current position to the beginning of the file */
|
|
1526 |
current_position= next_position= 0; |
|
1527 |
||
1528 |
init_alloc_root(&blobroot, BLOB_MEMROOT_ALLOC_SIZE, 0); |
|
1529 |
||
1530 |
/* Read the file row-by-row. If everything is ok, repair is not needed. */
|
|
1531 |
while (!(rc= find_current_row(buf))) |
|
1532 |
{
|
|
1533 |
thd_inc_row_count(thd); |
|
1534 |
count--; |
|
1535 |
current_position= next_position; |
|
1536 |
}
|
|
1537 |
||
1538 |
free_root(&blobroot, MYF(0)); |
|
1539 |
||
1540 |
my_free((char*)buf, MYF(0)); |
|
1541 |
thd_proc_info(thd, old_proc_info); |
|
1542 |
||
1543 |
if ((rc != HA_ERR_END_OF_FILE) || count) |
|
1544 |
{
|
|
163
by Brian Aker
Merge Monty's code. |
1545 |
share->crashed= true; |
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1546 |
return(HA_ADMIN_CORRUPT); |
1
by brian
clean slate |
1547 |
}
|
1548 |
else
|
|
51.3.8
by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines |
1549 |
return(HA_ADMIN_OK); |
1
by brian
clean slate |
1550 |
}
|
1551 |
||
1552 |
||
212.1.3
by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)). |
1553 |
bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info __attribute__((unused)), |
1554 |
uint table_changes __attribute__((unused))) |
|
1
by brian
clean slate |
1555 |
{
|
1556 |
return COMPATIBLE_DATA_YES; |
|
1557 |
}
|
|
1558 |
||
1559 |
mysql_declare_plugin(csv) |
|
1560 |
{
|
|
1561 |
MYSQL_STORAGE_ENGINE_PLUGIN, |
|
1562 |
"CSV", |
|
177.4.3
by mark
ripped out more plugin ABI and API version checking, and plugin versions are now strings |
1563 |
"1.0", |
1
by brian
clean slate |
1564 |
"Brian Aker, MySQL AB", |
1565 |
"CSV storage engine", |
|
1566 |
PLUGIN_LICENSE_GPL, |
|
1567 |
tina_init_func, /* Plugin Init */ |
|
1568 |
tina_done_func, /* Plugin Deinit */ |
|
1569 |
NULL, /* status variables */ |
|
1570 |
NULL, /* system variables */ |
|
1571 |
NULL /* config options */ |
|
1572 |
}
|
|
1573 |
mysql_declare_plugin_end; |
|
1574 |