Add more exhaustive db tests that include higher bucket sizes

This commit is contained in:
christos 2015-11-18 18:35:35 +00:00
parent 14fcefd52e
commit 2bfc5fd1e4
3 changed files with 242 additions and 27 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: Makefile,v 1.2 2015/06/22 19:06:05 christos Exp $
# $NetBSD: Makefile,v 1.3 2015/11/18 18:35:35 christos Exp $
.include <bsd.own.mk>
@ -12,6 +12,7 @@ BINDIR= ${TESTSDIR}
MKMAN= no
PROGS+= h_db
PROGS+= h_lfsr
FILESDIR= ${TESTSDIR}

179
tests/lib/libc/db/h_lfsr.c Normal file
View File

@ -0,0 +1,179 @@
/*-
* Copyright (c) 2015 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Christos Zoulas.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: h_lfsr.c,v 1.1 2015/11/18 18:35:35 christos Exp $");
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <err.h>
#include <string.h>
#include <unistd.h>
#include <db.h>
#define MAXKEY 0xffff
#ifdef DEBUG
#define DPRINTF(...) printf(__VA_ARGS__)
#else
#define DPRINTF(...)
#endif
static uint16_t
next(uint16_t *cur)
{
uint16_t lsb = *cur & 1;
*cur >>= 1;
*cur ^= (-lsb) & 0xB400u;
return *cur;
}
int
main(int argc, char *argv[])
{
char buf[65536];
char kb[256];
DBT key, val;
DB *db;
HASHINFO hi;
uint8_t c;
uint16_t len;
uint32_t pagesize = atoi(argv[1]);
memset(&hi, 0, sizeof(hi));
memset(buf, 'a', sizeof(buf));
hi.bsize = pagesize;
hi.nelem = 65536;
hi.ffactor = 128;
key.data = kb;
val.data = buf;
db = dbopen(NULL, O_CREAT|O_TRUNC|O_RDWR, 0, DB_HASH, &hi);
if (db == NULL)
err(EXIT_FAILURE, "dbopen");
len = 0xaec1;
for (size_t i = 0; i < MAXKEY; i++) {
key.size = (len & 0xff) + 1;
c = len >> 8;
memset(kb, c, key.size);
val.size = (next(&len) & 0xff) + 1;
switch ((*db->put)(db, &key, &val, R_NOOVERWRITE)) {
case 0:
DPRINTF("put %zu %zu %#x\n",
key.size, val.size, c);
break;
case -1:
err(EXIT_FAILURE, "put error %zu %zu %#x",
key.size, val.size, c);
case 1:
errx(EXIT_FAILURE, "put overwrite %zu %zu %#x",
key.size, val.size, c);
default:
abort();
}
}
len = 0xaec1;
for (size_t i = 0; i < MAXKEY; i++) {
key.size = (len & 0xff) + 1;
c = len >> 8;
memset(kb, c, key.size);
next(&len);
switch ((*db->get)(db, &key, &val, 0)) {
case 0:
DPRINTF("get %zu %zu %#x\n",
key.size, val.size, c);
break;
case -1:
err(EXIT_FAILURE, "get %zu %zu %#x",
key.size, val.size, c);
case 1:
errx(EXIT_FAILURE, "get not found %zu %zu %#x",
key.size, val.size, c);
default:
abort();
}
if (memcmp(key.data, kb, key.size) != 0)
errx(EXIT_FAILURE, "get badkey %zu %zu %#x",
key.size, val.size, c);
if (val.size != (len & 0xff) + 1U)
errx(EXIT_FAILURE, "get badvallen %zu %zu %#x",
key.size, val.size, c);
if (memcmp(val.data, buf, val.size) != 0)
errx(EXIT_FAILURE, "get badval %zu %zu %#x",
key.size, val.size, c);
}
len = 0xaec1;
for (size_t i = 0; i < MAXKEY; i++) {
key.size = (len & 0xff) + 1;
c = len >> 8;
memset(kb, c, key.size);
next(&len);
switch ((*db->del)(db, &key, 0)) {
case 0:
DPRINTF("del %zu %zu %#x\n",
key.size, val.size, c);
break;
case -1:
err(EXIT_FAILURE, "del %zu %zu %#x", key.size,
val.size, c);
case 1:
errx(EXIT_FAILURE, "del not found %zu %zu %#x",
key.size, val.size, c);
default:
abort();
}
}
len = 0xaec1;
for (size_t i = 0; i < MAXKEY; i++) {
key.size = (len & 0xff) + 1;
c = len >> 8;
memset(kb, c, key.size);
next(&len);
switch ((*db->get)(db, &key, &val, 0)) {
case 0:
errx(EXIT_FAILURE, "get2 found %zu %zu %#x",
key.size, val.size, c);
break;
case -1:
err(EXIT_FAILURE, "get2 %zu %zu %#x",
key.size, val.size, c);
case 1:
DPRINTF("get2 %zu %zu %#x\n",
key.size, val.size, c);
break;
default:
abort();
}
}
return 0;
}

View File

@ -1,4 +1,4 @@
# $NetBSD: t_db.sh,v 1.5 2015/02/26 13:00:26 martin Exp $
# $NetBSD: t_db.sh,v 1.6 2015/11/18 18:35:35 christos Exp $
#
# Copyright (c) 2008 The NetBSD Foundation, Inc.
# All rights reserved.
@ -25,11 +25,16 @@
# POSSIBILITY OF SUCH DAMAGE.
#
prog()
prog_db()
{
echo $(atf_get_srcdir)/h_db
}
prog_lfsr()
{
echo $(atf_get_srcdir)/h_lfsr
}
dict()
{
if [ -f /usr/share/dict/words ]; then
@ -66,7 +71,7 @@ small_btree_body()
echo k$i
done >in
atf_check -o file:exp "$(prog)" btree in
atf_check -o file:exp "$(prog_db)" btree in
}
atf_test_case small_hash
@ -92,7 +97,7 @@ small_hash_body()
echo k$i
done >in
atf_check -o file:exp "$(prog)" hash in
atf_check -o file:exp "$(prog_db)" hash in
}
atf_test_case small_recno
@ -116,7 +121,7 @@ small_recno_body()
printf("p\nk%d\nd%s\ng\nk%d\n", i, $0, i);
}' >in
atf_check -o file:exp "$(prog)" recno in
atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case medium_btree
@ -144,7 +149,7 @@ medium_btree_body()
echo k$i
done >in
atf_check -o file:exp "$(prog)" btree in
atf_check -o file:exp "$(prog_db)" btree in
}
atf_test_case medium_hash
@ -172,7 +177,7 @@ medium_hash_body()
echo k$i
done >in
atf_check -o file:exp "$(prog)" hash in
atf_check -o file:exp "$(prog_db)" hash in
}
atf_test_case medium_recno
@ -197,7 +202,7 @@ medium_recno_body()
printf("p\nk%d\nd%s\ng\nk%d\n", i, $0, i);
}' >in
atf_check -o file:exp "$(prog)" recno in
atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case big_btree
@ -226,7 +231,7 @@ big_btree_body()
echo k$i
done >in
atf_check "$(prog)" -o out btree in
atf_check "$(prog_db)" -o out btree in
cmp -s exp out || atf_fail "test failed for page size: $psize"
done
}
@ -254,7 +259,7 @@ big_hash_body()
echo k$i
done >in
atf_check "$(prog)" -o out hash in
atf_check "$(prog_db)" -o out hash in
cmp -s exp out || atf_fail "test failed"
}
@ -282,7 +287,7 @@ big_recno_body()
for psize in 512 16384 65536; do
echo "checking page size: $psize"
atf_check "$(prog)" -o out recno in
atf_check "$(prog_db)" -o out recno in
cmp -s exp out || atf_fail "test failed for page size: $psize"
done
}
@ -350,7 +355,7 @@ random_recno_body()
printf("g\nk%d\n", i);
}' >in
atf_check -o file:exp "$(prog)" recno in
atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case reverse_recno
@ -388,7 +393,7 @@ reverse_recno_body()
printf("g\nk%d\n", i);
}' >in
atf_check -o file:exp "$(prog)" recno in
atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case alternate_recno
@ -440,7 +445,7 @@ alternate_recno_body()
printf("g\nk%d\n", i);
}' >in
atf_check "$(prog)" -o out recno in
atf_check "$(prog_db)" -o out recno in
sort -o exp exp
sort -o out out
@ -509,7 +514,7 @@ h_delete()
}' >> exp
fi
atf_check "$(prog)" -o out $type in
atf_check "$(prog_db)" -o out $type in
atf_check -o file:exp cat out
}
@ -553,7 +558,7 @@ h_repeated()
}
}' >in
$(prog) btree in
$(prog_db) btree in
}
atf_test_case repeated_btree
@ -608,7 +613,7 @@ duplicate_btree_body()
printf("o\n");
}' >in
atf_check -o file:exp -x "$(prog) -iflags=1 btree in | sort"
atf_check -o file:exp -x "$(prog_db) -iflags=1 btree in | sort"
}
h_cursor_flags()
@ -637,7 +642,7 @@ h_cursor_flags()
printf("eR_CURSOR SHOULD HAVE FAILED\n");
}' >in
atf_check -o ignore -e ignore -s ne:0 "$(prog)" -o out $type in
atf_check -o ignore -e ignore -s ne:0 "$(prog_db)" -o out $type in
atf_check -s ne:0 test -s out
cat exp |
@ -651,7 +656,7 @@ h_cursor_flags()
printf("eR_CURSOR SHOULD HAVE FAILED\n");
}' >in
atf_check -o ignore -e ignore -s ne:0 "$(prog)" -o out $type in
atf_check -o ignore -e ignore -s ne:0 "$(prog_db)" -o out $type in
atf_check -s ne:0 test -s out
}
@ -707,7 +712,7 @@ reverse_order_recno_body()
printf("or\n");
}' >in
atf_check -o file:exp "$(prog)" recno in
atf_check -o file:exp "$(prog_db)" recno in
}
atf_test_case small_page_btree
@ -737,7 +742,7 @@ small_page_btree_body()
echo k$i
done >in
atf_check -o file:exp "$(prog)" -i psize=512 btree in
atf_check -o file:exp "$(prog_db)" -i psize=512 btree in
}
h_byte_orders()
@ -757,14 +762,14 @@ h_byte_orders()
echo k$i
done >in
atf_check -o file:exp "$(prog)" -ilorder=$order -f byte.file $type in
atf_check -o file:exp "$(prog_db)" -ilorder=$order -f byte.file $type in
for i in `sed 50q $(dict)`; do
echo g
echo k$i
done >in
atf_check -o file:exp "$(prog)" -s -ilorder=$order -f byte.file $type in
atf_check -o file:exp "$(prog_db)" -s -ilorder=$order -f byte.file $type in
done
}
@ -794,7 +799,7 @@ h_bsize_ffactor()
ffactor=$2
echo "bucketsize $bsize, fill factor $ffactor"
atf_check -o file:exp "$(prog)" "-ibsize=$bsize,\
atf_check -o file:exp "$(prog_db)" "-ibsize=$bsize,\
ffactor=$ffactor,nelem=25000,cachesize=65536" hash in
}
@ -864,9 +869,21 @@ bsize_ffactor_body()
h_bsize_ffactor 8192 341
h_bsize_ffactor 8192 455
h_bsize_ffactor 8192 683
h_bsize_ffactor 16384 341
h_bsize_ffactor 16384 455
h_bsize_ffactor 16384 683
h_bsize_ffactor 32768 341
h_bsize_ffactor 32768 455
h_bsize_ffactor 32768 683
h_bsize_ffactor 65536 341
h_bsize_ffactor 65536 455
h_bsize_ffactor 65536 683
}
# FIXME: what does it test?
# This tests 64K block size addition/removal
atf_test_case four_char_hash
four_char_hash_head()
{
@ -887,7 +904,24 @@ r
k1234
EOF
atf_check "$(prog)" -i bsize=65536 hash in
atf_check "$(prog_db)" -i bsize=65536 hash in
}
atf_test_case bsize_torture
bsize_torture_head()
{
atf_set "timeout" "36000"
atf_set "descr" "Checks hash database with various bucket sizes"
}
bsize_torture_body()
{
TMPDIR="$(pwd)/db_dir"; export TMPDIR
mkdir ${TMPDIR}
for i in 2048 4096 8192 16384 32768 65536
do
atf_check "$(prog_lfsr)" $i
done
}
atf_init_test_cases()
@ -917,4 +951,5 @@ atf_init_test_cases()
atf_add_test_case byte_orders_hash
atf_add_test_case bsize_ffactor
atf_add_test_case four_char_hash
atf_add_test_case bsize_torture
}