1
/* Copyright (C) 2006 MySQL AB
3
This program is free software; you can redistribute it and/or modify
4
it under the terms of the GNU General Public License as published by
5
the Free Software Foundation; version 2 of the License.
7
This program is distributed in the hope that it will be useful,
8
but WITHOUT ANY WARRANTY; without even the implied warranty of
9
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
GNU General Public License for more details.
12
You should have received a copy of the GNU General Public License
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16
Library for providing TAP support for testing C and C++ was written
17
by Mats Kindahl <mats@mysql.com>.
22
#include "my_global.h"
31
Visual Studio 2003 does not know vsnprintf but knows _vsnprintf.
32
We don't put this #define in config-win.h because we prefer
33
my_vsnprintf everywhere instead, except when linking with libmysys
34
is not desirable - the case here.
36
#if defined(_MSC_VER) && ( _MSC_VER == 1310 )
37
#define vsnprintf _vsnprintf
41
@defgroup MyTAP_Internal MyTAP Internals
43
Internal functions and data structures for the MyTAP implementation.
49
Data structure containing all information about the test suite.
51
@ingroup MyTAP_Internal
53
static TEST_DATA g_test = { 0, 0, 0, "" };
56
Output stream for test report message.
58
The macro is just a temporary solution.
60
@ingroup MyTAP_Internal
65
Emit the beginning of a test line, that is: "(not) ok", test number,
68
To emit the directive, use the emit_dir() function
70
@ingroup MyTAP_Internal
74
@param pass 'true' if test passed, 'false' otherwise
75
@param fmt Description of test in printf() format.
76
@param ap Vararg list for the description string above.
79
vemit_tap(int pass, char const *fmt, va_list ap)
81
fprintf(tapout, "%sok %d%s",
84
(fmt && *fmt) ? " - " : "");
86
vfprintf(tapout, fmt, ap);
93
TAP directives are comments after that have the form:
96
ok 1 # skip reason for skipping
97
not ok 2 # todo some text explaining what remains
100
@ingroup MyTAP_Internal
102
@param dir Directive as a string
103
@param why Explanation string
106
emit_dir(const char *dir, const char *why)
108
fprintf(tapout, " # %s %s", dir, why);
113
Emit a newline to the TAP output stream.
115
@ingroup MyTAP_Internal
120
fprintf(tapout, "\n");
124
handle_core_signal(int signo)
126
BAIL_OUT("Signal %d thrown", signo);
130
BAIL_OUT(char const *fmt, ...)
134
fprintf(tapout, "Bail out! ");
135
vfprintf(tapout, fmt, ap);
143
diag(char const *fmt, ...)
147
fprintf(tapout, "# ");
148
vfprintf(tapout, fmt, ap);
153
typedef struct signal_entry {
155
void (*handler)(int);
158
static signal_entry install_signal[]= {
159
{ SIGQUIT, handle_core_signal },
160
{ SIGILL, handle_core_signal },
161
{ SIGABRT, handle_core_signal },
162
{ SIGFPE, handle_core_signal },
163
{ SIGSEGV, handle_core_signal }
165
, { SIGBUS, handle_core_signal }
168
, { SIGXCPU, handle_core_signal }
171
, { SIGXFSZ, handle_core_signal }
174
, { SIGSYS, handle_core_signal }
177
, { SIGTRAP, handle_core_signal }
182
plan(int const count)
185
Install signal handler
188
for (i= 0; i < sizeof(install_signal)/sizeof(*install_signal); ++i)
189
signal(install_signal[i].signo, install_signal[i].handler);
198
fprintf(tapout, "1..%d\n", count);
205
skip_all(char const *reason, ...)
208
va_start(ap, reason);
209
fprintf(tapout, "1..0 # skip ");
210
vfprintf(tapout, reason, ap);
216
ok(int const pass, char const *fmt, ...)
221
if (!pass && *g_test.todo == '\0')
224
vemit_tap(pass, fmt, ap);
226
if (*g_test.todo != '\0')
227
emit_dir("todo", g_test.todo);
233
skip(int how_many, char const *const fmt, ...)
240
vsnprintf(reason, sizeof(reason), fmt, ap);
246
while (how_many-- > 0)
249
memset((char*) &ap, 0, sizeof(ap)); /* Keep compiler happy */
250
vemit_tap(1, NULL, ap);
251
emit_dir("skip", reason);
257
todo_start(char const *message, ...)
260
va_start(ap, message);
261
vsnprintf(g_test.todo, sizeof(g_test.todo), message, ap);
273
If there were no plan, we write one last instead.
275
if (g_test.plan == NO_PLAN)
278
if (g_test.plan != g_test.last)
280
diag("%d tests planned but%s %d executed",
281
g_test.plan, (g_test.plan > g_test.last ? " only" : ""), g_test.last);
285
if (g_test.failed > 0)
287
diag("Failed %d tests!", g_test.failed);
295
@mainpage Testing C and C++ using MyTAP
297
@section IntroSec Introduction
299
Unit tests are used to test individual components of a system. In
300
contrast, functional tests usually test the entire system. The
301
rationale is that each component should be correct if the system is
302
to be correct. Unit tests are usually small pieces of code that
303
tests an individual function, class, a module, or other unit of the
306
Observe that a correctly functioning system can be built from
307
"faulty" components. The problem with this approach is that as the
308
system evolves, the bugs surface in unexpected ways, making
311
The advantages of using unit tests to test components of the system
314
- The unit tests can make a more thorough testing than the
315
functional tests by testing correctness even for pathological use
316
(which shouldn't be present in the system). This increases the
317
overall robustness of the system and makes maintenance easier.
319
- It is easier and faster to find problems with a malfunctioning
320
component than to find problems in a malfunctioning system. This
321
shortens the compile-run-edit cycle and therefore improves the
322
overall performance of development.
324
- The component has to support at least two uses: in the system and
325
in a unit test. This leads to more generic and stable interfaces
326
and in addition promotes the development of reusable components.
328
For example, the following are typical functional tests:
329
- Does transactions work according to specifications?
330
- Can we connect a client to the server and execute statements?
332
In contrast, the following are typical unit tests:
334
- Can the 'String' class handle a specified list of character sets?
335
- Does all operations for 'my_bitmap' produce the correct result?
336
- Does all the NIST test vectors for the AES implementation encrypt
340
@section UnitTest Writing unit tests
342
The purpose of writing unit tests is to use them to drive component
343
development towards a solution that passes the tests. This means that the
344
unit tests has to be as complete as possible, testing at least:
352
@subsection NormalSubSec Normal input
354
This is to test that the component have the expected behaviour.
355
This is just plain simple: test that it works. For example, test
356
that you can unpack what you packed, adding gives the sum, pincing
357
the duck makes it quack.
359
This is what everybody does when they write tests.
362
@subsection BorderlineTests Borderline cases
364
If you have a size anywhere for your component, does it work for
365
size 1? Size 0? Sizes close to <code>UINT_MAX</code>?
367
It might not be sensible to have a size 0, so in this case it is
368
not a borderline case, but rather a faulty input (see @ref
372
@subsection FaultyInputTests Faulty input
374
Does your bitmap handle 0 bits size? Well, it might not be designed
375
for it, but is should <em>not</em> crash the application, but
376
rather produce an error. This is called defensive programming.
378
Unfortunately, adding checks for values that should just not be
379
entered at all is not always practical: the checks cost cycles and
380
might cost more than it's worth. For example, some functions are
381
designed so that you may not give it a null pointer. In those
382
cases it's not sensible to pass it <code>NULL</code> just to see it
385
Since every experienced programmer add an <code>assert()</code> to
386
ensure that you get a proper failure for the debug builds when a
387
null pointer passed (you add asserts too, right?), you will in this
388
case instead have a controlled (early) crash in the debug build.
391
@subsection ErrorHandlingTests Error handling
393
This is testing that the errors your component is designed to give
394
actually are produced. For example, testing that trying to open a
395
non-existing file produces a sensible error code.
398
@subsection BadEnvironmentTests Environment
400
Sometimes, modules has to behave well even when the environment
401
fails to work correctly. Typical examples are when the computer is
402
out of dynamic memory or when the disk is full. You can emulate
403
this by replacing, e.g., <code>malloc()</code> with your own
404
version that will work for a while, but then fail. Some things are
405
worth to keep in mind here:
407
- Make sure to make the function fail deterministically, so that
408
you really can repeat the test.
410
- Make sure that it doesn't just fail immediately. The unit might
411
have checks for the first case, but might actually fail some time
415
@section UnitTest How to structure a unit test
417
In this section we will give some advice on how to structure the
418
unit tests to make the development run smoothly. The basic
419
structure of a test is:
426
@subsection TestPlanning Plan the test
428
Planning the test means telling how many tests there are. In the
429
event that one of the tests causes a crash, it is then possible to
430
see that there are fewer tests than expected, and print a proper
433
To plan a test, use the @c plan() function in the following manner:
436
int main(int argc, char *argv[])
445
If you don't call the @c plan() function, the number of tests
446
executed will be printed at the end. This is intended to be used
447
while developing the unit and you are constantly adding tests. It
448
is not indented to be used after the unit has been released.
451
@subsection TestRunning Execute the test
453
To report the status of a test, the @c ok() function is used in the
457
int main(int argc, char *argv[])
460
ok(ducks == paddling_ducks,
461
"%d ducks did not paddle", ducks - paddling_ducks);
468
This will print a test result line on the standard output in TAP
469
format, which allows TAP handling frameworks (like Test::Harness)
470
to parse the status of the test.
472
@subsection TestReport Report the result of the test
474
At the end, a complete test report should be written, with some
475
statistics. If the test returns EXIT_SUCCESS, all tests were
476
successfull, otherwise at least one test failed.
478
To get a TAP complient output and exit status, report the exit
479
status in the following manner:
482
int main(int argc, char *argv[])
485
ok(ducks == paddling_ducks,
486
"%d ducks did not paddle", ducks - paddling_ducks);
490
return exit_status();
494
@section DontDoThis Ways to not do unit testing
496
In this section, we'll go through some quite common ways to write
497
tests that are <em>not</em> a good idea.
499
@subsection BreadthFirstTests Doing breadth-first testing
501
If you're writing a library with several functions, don't test all
502
functions using size 1, then all functions using size 2, etc. If a
503
test for size 42 fails, you have no easy way of tracking down why
506
It is better to concentrate on getting one function to work at a
507
time, which means that you test each function for all sizes that
508
you think is reasonable. Then you continue with the next function,
509
doing the same. This is usually also the way that a library is
510
developed (one function at a time) so stick to testing that is
511
appropriate for now the unit is developed.
513
@subsection JustToBeSafeTest Writing unnecessarily large tests
515
Don't write tests that use parameters in the range 1-1024 unless
516
you have a very good reason to belive that the component will
517
succeed for 562 but fail for 564 (the numbers picked are just
520
It is very common to write extensive tests "just to be safe."
521
Having a test suite with a lot of values might give you a warm
522
fuzzy feeling, but it doesn't really help you find the bugs. Good
523
tests fail; seriously, if you write a test that you expect to
524
succeed, you don't need to write it. If you think that it
525
<em>might</em> fail, <em>then</em> you should write it.
527
Don't take this as an excuse to avoid writing any tests at all
528
"since I make no mistakes" (when it comes to this, there are two
529
kinds of people: those who admit they make mistakes, and those who
530
don't); rather, this means that there is no reason to test that
531
using a buffer with size 100 works when you have a test for buffer
534
The drawback is that the test suite takes longer to run, for little
535
or no benefit. It is acceptable to do a exhaustive test if it
536
doesn't take too long to run and it is quite common to do an
537
exhaustive test of a function for a small set of values.
538
Use your judgment to decide what is excessive: your milage may
545
This is an simple example of how to write a test using the
546
library. The output of this program is:
550
# Testing basic functions
554
The basic structure is: plan the number of test points using the
555
plan() function, perform the test and write out the result of each
556
test point using the ok() function, print out a diagnostics message
557
using diag(), and report the result of the test by calling the
558
exit_status() function. Observe that this test does excessive
559
testing (see @ref JustToBeSafeTest), but the test point doesn't
566
This example demonstrates how to use the <code>todo_start()</code>
567
and <code>todo_end()</code> function to mark a sequence of tests to
568
be done. Observe that the tests are assumed to fail: if any test
569
succeeds, it is considered a "bonus".
575
This is an example of how the <code>SKIP_BLOCK_IF</code> can be
576
used to skip a predetermined number of tests. Observe that the
577
macro actually skips the following statement, but it's not sensible
578
to use anything than a block.
582
@example skip_all.t.c
584
Sometimes, you skip an entire test because it's testing a feature
585
that doesn't exist on the system that you're testing. To skip an
586
entire test, use the <code>skip_all()</code> function according to