1
by brian
clean slate |
1 |
/* Copyright (C) 2006 MySQL AB
|
2 |
||
3 |
This program is free software; you can redistribute it and/or modify
|
|
4 |
it under the terms of the GNU General Public License as published by
|
|
5 |
the Free Software Foundation; version 2 of the License.
|
|
6 |
||
7 |
This program is distributed in the hope that it will be useful,
|
|
8 |
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
9 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
10 |
GNU General Public License for more details.
|
|
11 |
||
12 |
You should have received a copy of the GNU General Public License
|
|
13 |
along with this program; if not, write to the Free Software
|
|
14 |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
15 |
||
16 |
Library for providing TAP support for testing C and C++ was written
|
|
17 |
by Mats Kindahl <mats@mysql.com>.
|
|
18 |
*/
|
|
19 |
||
20 |
#include "tap.h" |
|
21 |
||
22 |
#include "my_global.h" |
|
23 |
||
24 |
#include <stdlib.h> |
|
25 |
#include <stdarg.h> |
|
26 |
#include <stdio.h> |
|
27 |
#include <string.h> |
|
28 |
#include <signal.h> |
|
29 |
||
30 |
/*
|
|
31 |
Visual Studio 2003 does not know vsnprintf but knows _vsnprintf.
|
|
32 |
We don't put this #define in config-win.h because we prefer
|
|
33 |
my_vsnprintf everywhere instead, except when linking with libmysys
|
|
34 |
is not desirable - the case here.
|
|
35 |
*/
|
|
36 |
#if defined(_MSC_VER) && ( _MSC_VER == 1310 )
|
|
37 |
#define vsnprintf _vsnprintf
|
|
38 |
#endif
|
|
39 |
||
40 |
/**
|
|
41 |
@defgroup MyTAP_Internal MyTAP Internals
|
|
42 |
||
43 |
Internal functions and data structures for the MyTAP implementation.
|
|
44 |
*/
|
|
45 |
||
46 |
/**
|
|
47 |
Test data structure.
|
|
48 |
||
49 |
Data structure containing all information about the test suite.
|
|
50 |
||
51 |
@ingroup MyTAP_Internal
|
|
52 |
*/
|
|
53 |
static TEST_DATA g_test = { 0, 0, 0, "" }; |
|
54 |
||
55 |
/**
|
|
56 |
Output stream for test report message.
|
|
57 |
||
58 |
The macro is just a temporary solution.
|
|
59 |
||
60 |
@ingroup MyTAP_Internal
|
|
61 |
*/
|
|
62 |
#define tapout stdout
|
|
63 |
||
64 |
/**
|
|
65 |
Emit the beginning of a test line, that is: "(not) ok", test number,
|
|
66 |
and description.
|
|
67 |
||
68 |
To emit the directive, use the emit_dir() function
|
|
69 |
||
70 |
@ingroup MyTAP_Internal
|
|
71 |
||
72 |
@see emit_dir
|
|
73 |
||
74 |
@param pass 'true' if test passed, 'false' otherwise
|
|
75 |
@param fmt Description of test in printf() format.
|
|
76 |
@param ap Vararg list for the description string above.
|
|
77 |
*/
|
|
78 |
static void |
|
79 |
vemit_tap(int pass, char const *fmt, va_list ap) |
|
80 |
{
|
|
81 |
fprintf(tapout, "%sok %d%s", |
|
82 |
pass ? "" : "not ", |
|
83 |
++g_test.last, |
|
84 |
(fmt && *fmt) ? " - " : ""); |
|
85 |
if (fmt && *fmt) |
|
86 |
vfprintf(tapout, fmt, ap); |
|
87 |
}
|
|
88 |
||
89 |
||
90 |
/**
|
|
91 |
Emit a TAP directive.
|
|
92 |
||
93 |
TAP directives are comments after that have the form:
|
|
94 |
||
95 |
@code
|
|
96 |
ok 1 # skip reason for skipping
|
|
97 |
not ok 2 # todo some text explaining what remains
|
|
98 |
@endcode
|
|
99 |
||
100 |
@ingroup MyTAP_Internal
|
|
101 |
||
102 |
@param dir Directive as a string
|
|
103 |
@param why Explanation string
|
|
104 |
*/
|
|
105 |
static void |
|
106 |
emit_dir(const char *dir, const char *why) |
|
107 |
{
|
|
108 |
fprintf(tapout, " # %s %s", dir, why); |
|
109 |
}
|
|
110 |
||
111 |
||
112 |
/**
|
|
113 |
Emit a newline to the TAP output stream.
|
|
114 |
||
115 |
@ingroup MyTAP_Internal
|
|
116 |
*/
|
|
117 |
static void |
|
118 |
emit_endl() |
|
119 |
{
|
|
120 |
fprintf(tapout, "\n"); |
|
121 |
}
|
|
122 |
||
123 |
static void |
|
124 |
handle_core_signal(int signo) |
|
125 |
{
|
|
126 |
BAIL_OUT("Signal %d thrown", signo); |
|
127 |
}
|
|
128 |
||
129 |
void
|
|
130 |
BAIL_OUT(char const *fmt, ...) |
|
131 |
{
|
|
132 |
va_list ap; |
|
133 |
va_start(ap, fmt); |
|
134 |
fprintf(tapout, "Bail out! "); |
|
135 |
vfprintf(tapout, fmt, ap); |
|
136 |
emit_endl(); |
|
137 |
va_end(ap); |
|
138 |
exit(255); |
|
139 |
}
|
|
140 |
||
141 |
||
142 |
void
|
|
143 |
diag(char const *fmt, ...) |
|
144 |
{
|
|
145 |
va_list ap; |
|
146 |
va_start(ap, fmt); |
|
147 |
fprintf(tapout, "# "); |
|
148 |
vfprintf(tapout, fmt, ap); |
|
149 |
emit_endl(); |
|
150 |
va_end(ap); |
|
151 |
}
|
|
152 |
||
153 |
typedef struct signal_entry { |
|
154 |
int signo; |
|
155 |
void (*handler)(int); |
|
156 |
} signal_entry; |
|
157 |
||
158 |
static signal_entry install_signal[]= { |
|
159 |
{ SIGQUIT, handle_core_signal }, |
|
160 |
{ SIGILL, handle_core_signal }, |
|
161 |
{ SIGABRT, handle_core_signal }, |
|
162 |
{ SIGFPE, handle_core_signal }, |
|
163 |
{ SIGSEGV, handle_core_signal } |
|
164 |
#ifdef SIGBUS
|
|
165 |
, { SIGBUS, handle_core_signal } |
|
166 |
#endif
|
|
167 |
#ifdef SIGXCPU
|
|
168 |
, { SIGXCPU, handle_core_signal } |
|
169 |
#endif
|
|
170 |
#ifdef SIGXCPU
|
|
171 |
, { SIGXFSZ, handle_core_signal } |
|
172 |
#endif
|
|
173 |
#ifdef SIGXCPU
|
|
174 |
, { SIGSYS, handle_core_signal } |
|
175 |
#endif
|
|
176 |
#ifdef SIGXCPU
|
|
177 |
, { SIGTRAP, handle_core_signal } |
|
178 |
#endif
|
|
179 |
};
|
|
180 |
||
181 |
void
|
|
182 |
plan(int const count) |
|
183 |
{
|
|
184 |
/*
|
|
185 |
Install signal handler
|
|
186 |
*/
|
|
187 |
size_t i; |
|
188 |
for (i= 0; i < sizeof(install_signal)/sizeof(*install_signal); ++i) |
|
189 |
signal(install_signal[i].signo, install_signal[i].handler); |
|
190 |
||
191 |
g_test.plan= count; |
|
192 |
switch (count) |
|
193 |
{
|
|
194 |
case NO_PLAN: |
|
195 |
break; |
|
196 |
default: |
|
197 |
if (count > 0) |
|
198 |
fprintf(tapout, "1..%d\n", count); |
|
199 |
break; |
|
200 |
}
|
|
201 |
}
|
|
202 |
||
203 |
||
204 |
void
|
|
205 |
skip_all(char const *reason, ...) |
|
206 |
{
|
|
207 |
va_list ap; |
|
208 |
va_start(ap, reason); |
|
209 |
fprintf(tapout, "1..0 # skip "); |
|
210 |
vfprintf(tapout, reason, ap); |
|
211 |
va_end(ap); |
|
212 |
exit(0); |
|
213 |
}
|
|
214 |
||
215 |
void
|
|
216 |
ok(int const pass, char const *fmt, ...) |
|
217 |
{
|
|
218 |
va_list ap; |
|
219 |
va_start(ap, fmt); |
|
220 |
||
221 |
if (!pass && *g_test.todo == '\0') |
|
222 |
++g_test.failed; |
|
223 |
||
224 |
vemit_tap(pass, fmt, ap); |
|
225 |
va_end(ap); |
|
226 |
if (*g_test.todo != '\0') |
|
227 |
emit_dir("todo", g_test.todo); |
|
228 |
emit_endl(); |
|
229 |
}
|
|
230 |
||
231 |
||
232 |
void
|
|
233 |
skip(int how_many, char const *const fmt, ...) |
|
234 |
{
|
|
235 |
char reason[80]; |
|
236 |
if (fmt && *fmt) |
|
237 |
{
|
|
238 |
va_list ap; |
|
239 |
va_start(ap, fmt); |
|
240 |
vsnprintf(reason, sizeof(reason), fmt, ap); |
|
241 |
va_end(ap); |
|
242 |
}
|
|
243 |
else
|
|
244 |
reason[0] = '\0'; |
|
245 |
||
246 |
while (how_many-- > 0) |
|
247 |
{
|
|
248 |
va_list ap; |
|
249 |
memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */ |
|
250 |
vemit_tap(1, NULL, ap); |
|
251 |
emit_dir("skip", reason); |
|
252 |
emit_endl(); |
|
253 |
}
|
|
254 |
}
|
|
255 |
||
256 |
void
|
|
257 |
todo_start(char const *message, ...) |
|
258 |
{
|
|
259 |
va_list ap; |
|
260 |
va_start(ap, message); |
|
261 |
vsnprintf(g_test.todo, sizeof(g_test.todo), message, ap); |
|
262 |
va_end(ap); |
|
263 |
}
|
|
264 |
||
265 |
void
|
|
266 |
todo_end() |
|
267 |
{
|
|
268 |
*g_test.todo = '\0'; |
|
269 |
}
|
|
270 |
||
271 |
int exit_status() { |
|
272 |
/*
|
|
273 |
If there were no plan, we write one last instead.
|
|
274 |
*/
|
|
275 |
if (g_test.plan == NO_PLAN) |
|
276 |
plan(g_test.last); |
|
277 |
||
278 |
if (g_test.plan != g_test.last) |
|
279 |
{
|
|
280 |
diag("%d tests planned but%s %d executed", |
|
281 |
g_test.plan, (g_test.plan > g_test.last ? " only" : ""), g_test.last); |
|
282 |
return EXIT_FAILURE; |
|
283 |
}
|
|
284 |
||
285 |
if (g_test.failed > 0) |
|
286 |
{
|
|
287 |
diag("Failed %d tests!", g_test.failed); |
|
288 |
return EXIT_FAILURE; |
|
289 |
}
|
|
290 |
||
291 |
return EXIT_SUCCESS; |
|
292 |
}
|
|
293 |
||
294 |
/**
|
|
295 |
@mainpage Testing C and C++ using MyTAP
|
|
296 |
||
297 |
@section IntroSec Introduction
|
|
298 |
||
299 |
Unit tests are used to test individual components of a system. In
|
|
300 |
contrast, functional tests usually test the entire system. The
|
|
301 |
rationale is that each component should be correct if the system is
|
|
302 |
to be correct. Unit tests are usually small pieces of code that
|
|
303 |
tests an individual function, class, a module, or other unit of the
|
|
304 |
code.
|
|
305 |
||
306 |
Observe that a correctly functioning system can be built from
|
|
307 |
"faulty" components. The problem with this approach is that as the
|
|
308 |
system evolves, the bugs surface in unexpected ways, making
|
|
309 |
maintenance harder.
|
|
310 |
||
311 |
The advantages of using unit tests to test components of the system
|
|
312 |
are several:
|
|
313 |
||
314 |
- The unit tests can make a more thorough testing than the
|
|
315 |
functional tests by testing correctness even for pathological use
|
|
316 |
(which shouldn't be present in the system). This increases the
|
|
317 |
overall robustness of the system and makes maintenance easier.
|
|
318 |
||
319 |
- It is easier and faster to find problems with a malfunctioning
|
|
320 |
component than to find problems in a malfunctioning system. This
|
|
321 |
shortens the compile-run-edit cycle and therefore improves the
|
|
322 |
overall performance of development.
|
|
323 |
||
324 |
- The component has to support at least two uses: in the system and
|
|
325 |
in a unit test. This leads to more generic and stable interfaces
|
|
326 |
and in addition promotes the development of reusable components.
|
|
327 |
||
328 |
For example, the following are typical functional tests:
|
|
329 |
- Does transactions work according to specifications?
|
|
330 |
- Can we connect a client to the server and execute statements?
|
|
331 |
||
332 |
In contrast, the following are typical unit tests:
|
|
333 |
||
334 |
- Can the 'String' class handle a specified list of character sets?
|
|
335 |
- Does all operations for 'my_bitmap' produce the correct result?
|
|
336 |
- Does all the NIST test vectors for the AES implementation encrypt
|
|
337 |
correctly?
|
|
338 |
||
339 |
||
340 |
@section UnitTest Writing unit tests
|
|
341 |
||
342 |
The purpose of writing unit tests is to use them to drive component
|
|
343 |
development towards a solution that passes the tests. This means that the
|
|
344 |
unit tests has to be as complete as possible, testing at least:
|
|
345 |
||
346 |
- Normal input
|
|
347 |
- Borderline cases
|
|
348 |
- Faulty input
|
|
349 |
- Error handling
|
|
350 |
- Bad environment
|
|
351 |
||
352 |
@subsection NormalSubSec Normal input
|
|
353 |
||
354 |
This is to test that the component have the expected behaviour.
|
|
355 |
This is just plain simple: test that it works. For example, test
|
|
356 |
that you can unpack what you packed, adding gives the sum, pincing
|
|
357 |
the duck makes it quack.
|
|
358 |
||
359 |
This is what everybody does when they write tests.
|
|
360 |
||
361 |
||
362 |
@subsection BorderlineTests Borderline cases
|
|
363 |
||
364 |
If you have a size anywhere for your component, does it work for
|
|
365 |
size 1? Size 0? Sizes close to <code>UINT_MAX</code>?
|
|
366 |
||
367 |
It might not be sensible to have a size 0, so in this case it is
|
|
368 |
not a borderline case, but rather a faulty input (see @ref
|
|
369 |
FaultyInputTests).
|
|
370 |
||
371 |
||
372 |
@subsection FaultyInputTests Faulty input
|
|
373 |
||
374 |
Does your bitmap handle 0 bits size? Well, it might not be designed
|
|
375 |
for it, but is should <em>not</em> crash the application, but
|
|
376 |
rather produce an error. This is called defensive programming.
|
|
377 |
||
378 |
Unfortunately, adding checks for values that should just not be
|
|
379 |
entered at all is not always practical: the checks cost cycles and
|
|
380 |
might cost more than it's worth. For example, some functions are
|
|
381 |
designed so that you may not give it a null pointer. In those
|
|
382 |
cases it's not sensible to pass it <code>NULL</code> just to see it
|
|
383 |
crash.
|
|
384 |
||
385 |
Since every experienced programmer add an <code>assert()</code> to
|
|
386 |
ensure that you get a proper failure for the debug builds when a
|
|
387 |
null pointer passed (you add asserts too, right?), you will in this
|
|
388 |
case instead have a controlled (early) crash in the debug build.
|
|
389 |
||
390 |
||
391 |
@subsection ErrorHandlingTests Error handling
|
|
392 |
||
393 |
This is testing that the errors your component is designed to give
|
|
394 |
actually are produced. For example, testing that trying to open a
|
|
395 |
non-existing file produces a sensible error code.
|
|
396 |
||
397 |
||
398 |
@subsection BadEnvironmentTests Environment
|
|
399 |
||
400 |
Sometimes, modules has to behave well even when the environment
|
|
401 |
fails to work correctly. Typical examples are when the computer is
|
|
402 |
out of dynamic memory or when the disk is full. You can emulate
|
|
403 |
this by replacing, e.g., <code>malloc()</code> with your own
|
|
404 |
version that will work for a while, but then fail. Some things are
|
|
405 |
worth to keep in mind here:
|
|
406 |
||
407 |
- Make sure to make the function fail deterministically, so that
|
|
408 |
you really can repeat the test.
|
|
409 |
||
410 |
- Make sure that it doesn't just fail immediately. The unit might
|
|
411 |
have checks for the first case, but might actually fail some time
|
|
412 |
in the near future.
|
|
413 |
||
414 |
||
415 |
@section UnitTest How to structure a unit test
|
|
416 |
||
417 |
In this section we will give some advice on how to structure the
|
|
418 |
unit tests to make the development run smoothly. The basic
|
|
419 |
structure of a test is:
|
|
420 |
||
421 |
- Plan
|
|
422 |
- Test
|
|
423 |
- Report
|
|
424 |
||
425 |
||
426 |
@subsection TestPlanning Plan the test
|
|
427 |
||
428 |
Planning the test means telling how many tests there are. In the
|
|
429 |
event that one of the tests causes a crash, it is then possible to
|
|
430 |
see that there are fewer tests than expected, and print a proper
|
|
431 |
error message.
|
|
432 |
||
433 |
To plan a test, use the @c plan() function in the following manner:
|
|
434 |
||
435 |
@code
|
|
436 |
int main(int argc, char *argv[])
|
|
437 |
{
|
|
438 |
plan(5);
|
|
439 |
.
|
|
440 |
.
|
|
441 |
.
|
|
442 |
}
|
|
443 |
@endcode
|
|
444 |
||
445 |
If you don't call the @c plan() function, the number of tests
|
|
446 |
executed will be printed at the end. This is intended to be used
|
|
447 |
while developing the unit and you are constantly adding tests. It
|
|
448 |
is not indented to be used after the unit has been released.
|
|
449 |
||
450 |
||
451 |
@subsection TestRunning Execute the test
|
|
452 |
||
453 |
To report the status of a test, the @c ok() function is used in the
|
|
454 |
following manner:
|
|
455 |
||
456 |
@code
|
|
457 |
int main(int argc, char *argv[])
|
|
458 |
{
|
|
459 |
plan(5);
|
|
460 |
ok(ducks == paddling_ducks,
|
|
461 |
"%d ducks did not paddle", ducks - paddling_ducks);
|
|
462 |
.
|
|
463 |
.
|
|
464 |
.
|
|
465 |
}
|
|
466 |
@endcode
|
|
467 |
||
468 |
This will print a test result line on the standard output in TAP
|
|
469 |
format, which allows TAP handling frameworks (like Test::Harness)
|
|
470 |
to parse the status of the test.
|
|
471 |
||
472 |
@subsection TestReport Report the result of the test
|
|
473 |
||
474 |
At the end, a complete test report should be written, with some
|
|
475 |
statistics. If the test returns EXIT_SUCCESS, all tests were
|
|
476 |
successfull, otherwise at least one test failed.
|
|
477 |
||
478 |
To get a TAP complient output and exit status, report the exit
|
|
479 |
status in the following manner:
|
|
480 |
||
481 |
@code
|
|
482 |
int main(int argc, char *argv[])
|
|
483 |
{
|
|
484 |
plan(5);
|
|
485 |
ok(ducks == paddling_ducks,
|
|
486 |
"%d ducks did not paddle", ducks - paddling_ducks);
|
|
487 |
.
|
|
488 |
.
|
|
489 |
.
|
|
490 |
return exit_status();
|
|
491 |
}
|
|
492 |
@endcode
|
|
493 |
||
494 |
@section DontDoThis Ways to not do unit testing
|
|
495 |
||
496 |
In this section, we'll go through some quite common ways to write
|
|
497 |
tests that are <em>not</em> a good idea.
|
|
498 |
||
499 |
@subsection BreadthFirstTests Doing breadth-first testing
|
|
500 |
||
501 |
If you're writing a library with several functions, don't test all
|
|
502 |
functions using size 1, then all functions using size 2, etc. If a
|
|
503 |
test for size 42 fails, you have no easy way of tracking down why
|
|
504 |
it failed.
|
|
505 |
||
506 |
It is better to concentrate on getting one function to work at a
|
|
507 |
time, which means that you test each function for all sizes that
|
|
508 |
you think is reasonable. Then you continue with the next function,
|
|
509 |
doing the same. This is usually also the way that a library is
|
|
510 |
developed (one function at a time) so stick to testing that is
|
|
511 |
appropriate for now the unit is developed.
|
|
512 |
||
513 |
@subsection JustToBeSafeTest Writing unnecessarily large tests
|
|
514 |
||
515 |
Don't write tests that use parameters in the range 1-1024 unless
|
|
516 |
you have a very good reason to belive that the component will
|
|
517 |
succeed for 562 but fail for 564 (the numbers picked are just
|
|
518 |
examples).
|
|
519 |
||
520 |
It is very common to write extensive tests "just to be safe."
|
|
521 |
Having a test suite with a lot of values might give you a warm
|
|
522 |
fuzzy feeling, but it doesn't really help you find the bugs. Good
|
|
523 |
tests fail; seriously, if you write a test that you expect to
|
|
524 |
succeed, you don't need to write it. If you think that it
|
|
525 |
<em>might</em> fail, <em>then</em> you should write it.
|
|
526 |
||
527 |
Don't take this as an excuse to avoid writing any tests at all
|
|
528 |
"since I make no mistakes" (when it comes to this, there are two
|
|
529 |
kinds of people: those who admit they make mistakes, and those who
|
|
530 |
don't); rather, this means that there is no reason to test that
|
|
531 |
using a buffer with size 100 works when you have a test for buffer
|
|
532 |
size 96.
|
|
533 |
||
534 |
The drawback is that the test suite takes longer to run, for little
|
|
535 |
or no benefit. It is acceptable to do a exhaustive test if it
|
|
536 |
doesn't take too long to run and it is quite common to do an
|
|
537 |
exhaustive test of a function for a small set of values.
|
|
538 |
Use your judgment to decide what is excessive: your milage may
|
|
539 |
vary.
|
|
540 |
*/
|
|
541 |
||
542 |
/**
|
|
543 |
@example simple.t.c
|
|
544 |
||
545 |
This is an simple example of how to write a test using the
|
|
546 |
library. The output of this program is:
|
|
547 |
||
548 |
@code
|
|
549 |
1..1
|
|
550 |
# Testing basic functions
|
|
551 |
ok 1 - Testing gcs()
|
|
552 |
@endcode
|
|
553 |
||
554 |
The basic structure is: plan the number of test points using the
|
|
555 |
plan() function, perform the test and write out the result of each
|
|
556 |
test point using the ok() function, print out a diagnostics message
|
|
557 |
using diag(), and report the result of the test by calling the
|
|
558 |
exit_status() function. Observe that this test does excessive
|
|
559 |
testing (see @ref JustToBeSafeTest), but the test point doesn't
|
|
560 |
take very long time.
|
|
561 |
*/
|
|
562 |
||
563 |
/**
|
|
564 |
@example todo.t.c
|
|
565 |
||
566 |
This example demonstrates how to use the <code>todo_start()</code>
|
|
567 |
and <code>todo_end()</code> function to mark a sequence of tests to
|
|
568 |
be done. Observe that the tests are assumed to fail: if any test
|
|
569 |
succeeds, it is considered a "bonus".
|
|
570 |
*/
|
|
571 |
||
572 |
/**
|
|
573 |
@example skip.t.c
|
|
574 |
||
575 |
This is an example of how the <code>SKIP_BLOCK_IF</code> can be
|
|
576 |
used to skip a predetermined number of tests. Observe that the
|
|
577 |
macro actually skips the following statement, but it's not sensible
|
|
578 |
to use anything than a block.
|
|
579 |
*/
|
|
580 |
||
581 |
/**
|
|
582 |
@example skip_all.t.c
|
|
583 |
||
584 |
Sometimes, you skip an entire test because it's testing a feature
|
|
585 |
that doesn't exist on the system that you're testing. To skip an
|
|
586 |
entire test, use the <code>skip_all()</code> function according to
|
|
587 |
this example.
|
|
588 |
*/
|