Fixed large file support under Linux. I'm unable to test under Windows.

Ticket #191. (CVS 790)

FossilOrigin-Name: 9864a1265b5a37c12b0dd8446d81b84c5a3acc43
This commit is contained in:
drh 2002-12-01 02:00:57 +00:00
parent 86bc1c8273
commit d0d006e29c
6 changed files with 228 additions and 21 deletions

View File

@ -1,5 +1,5 @@
C Add\sthe\s"space_used.tcl"\sscript\sto\sthe\stools\sdirectory.\s\sThis\sscript\sis\sused\nto\smeasure\show\smuch\sdisk\sspace\sis\sused\sby\seach\stable\sand\sindex\sof\sa\sdatabase.\s(CVS\s789)
D 2002-11-24T14:52:27
C Fixed\slarge\sfile\ssupport\sunder\sLinux.\s\sI'm\sunable\sto\stest\sunder\sWindows.\nTicket\s#191.\s(CVS\s790)
D 2002-12-01T02:00:58
F Makefile.in 868c17a1ae1c07603d491274cc8f86c04acf2a1e
F Makefile.linux-gcc b86a99c493a5bfb402d1d9178dcdc4bd4b32f906
F README f1de682fbbd94899d50aca13d387d1b3fd3be2dd
@ -30,9 +30,9 @@ F src/hash.h cd0433998bc1a3759d244e1637fe5a3c13b53bf8
F src/insert.c 764300a0bd8074a2174946c0bf8a550bd833397a
F src/main.c f04f93b8928d6d85976e5137fea146a46de1fd6e
F src/md5.c fe4f9c9c6f71dfc26af8da63e4d04489b1430565
F src/os.c caf5a34b35a2d99a58457517261c879ac29b0a05
F src/os.c b3160bb723aea31c12787aea70ee9161e8f18b72
F src/os.h b7b79563fc55c6d58b703c88ade9ab1504c48bba
F src/pager.c 691571c468e36212677ed0b1db475dba4dba46bf
F src/pager.c 1ef5be147f7d5923275c0500fbad8f7463a14f9f
F src/pager.h 6991c9c2dc5e4c7f2df4d4ba47d1c6458f763a32
F src/parse.y 469c9636ff713e63c00234662209f11668671ae9
F src/printf.c 5c50fc1da75c8f5bf432b1ad17d91d6653acd167
@ -45,7 +45,7 @@ F src/sqliteInt.h 74198ccddb3443514f0218de1f5a6668c3dbbe59
F src/table.c eed2098c9b577aa17f8abe89313a9c4413f57d63
F src/tclsqlite.c 9f2c00a92338c51171ded8943bd42d77f7e69e64
F src/test1.c a46e9f61915b32787c5d5a05a4b92e4dacc437d9
F src/test2.c 8f7d88895a49b3eed111469f2ab1071cb780ed12
F src/test2.c e3a2e08729b3727b82590b265288d3bbac3e3740
F src/test3.c 03d4f962f482599df9027a5814490c441a58cd99
F src/threadtest.c d641a5219e718e18a1a80a50eb9bb549f451f42e
F src/tokenize.c 75e3bb37305b64e118e709752066f494c4f93c30
@ -56,6 +56,7 @@ F src/vdbe.c 7d46f087c1da55ba9be38f70d92a03c52c8a2ccb
F src/vdbe.h b7584044223104ba7896a7f87b66daebdd6022ba
F src/where.c 615a0f0bed305bcb27073c69347ea75018e8b58d
F test/all.test efd958d048c70a3247997c482f0b33561f7759f0
F test/bigfile.test 38d1071817caceb636c613e3546082b90e749a49
F test/bigrow.test 8ab252dba108f12ad64e337b0f2ff31a807ac578
F test/btree.test 10e75aec120ecefc0edc4c912a0980a43db1b6c2
F test/btree2.test e3b81ec33dc2f89b3e6087436dfe605b870c9080
@ -150,7 +151,7 @@ F www/speed.tcl a20a792738475b68756ea7a19321600f23d1d803
F www/sqlite.tcl ae3dcfb077e53833b59d4fcc94d8a12c50a44098
F www/tclsqlite.tcl 1db15abeb446aad0caf0b95b8b9579720e4ea331
F www/vdbe.tcl 2013852c27a02a091d39a766bc87cff329f21218
P dd5396a73a782b6abe9da3de43a5ee11e0dddbb2
R f02620fdf50f08e680c6e4c887b23004
P 83b2c27a568cd67cd5162d513766b23ff9fc2227
R 525c357fc6616ffd36403b1889baba38
U drh
Z d6b1541e76d56cb79d446daffe01fa52
Z 7562882b3aaedd7e2c8b415c77774707

View File

@ -1 +1 @@
83b2c27a568cd67cd5162d513766b23ff9fc2227
9864a1265b5a37c12b0dd8446d81b84c5a3acc43

View File

@ -39,15 +39,17 @@
/*
** Macros for performance tracing. Normally turned off
*/
#if 0
#if 1
static int last_page = 0;
#define SEEK(X) last_page=(X)
#define TRACE1(X) fprintf(stderr,X)
#define TRACE2(X,Y) fprintf(stderr,X,Y)
#define SEEK(X) last_page=(X)
#define TRACE1(X) fprintf(stderr,X)
#define TRACE2(X,Y) fprintf(stderr,X,Y)
#define TRACE3(X,Y,Z) fprintf(stderr,X,Y,Z)
#else
#define SEEK(X)
#define TRACE1(X)
#define TRACE2(X,Y)
#define TRACE3(X,Y,Z)
#endif
@ -233,7 +235,7 @@ int sqliteOsFileExists(const char *zFilename){
** SQLITE_OK.
**
** On failure, the function returns SQLITE_CANTOPEN and leaves
** *pResulst and *pReadonly unchanged.
** *id and *pReadonly unchanged.
*/
int sqliteOsOpenReadWrite(
const char *zFilename,
@ -495,6 +497,7 @@ int sqliteOsRead(OsFile *id, void *pBuf, int amt){
#if OS_WIN
DWORD got;
SimulateIOError(SQLITE_IOERR);
TRACE2("READ %d\n", last_page);
if( !ReadFile(id->h, pBuf, amt, &got, 0) ){
got = 0;
}
@ -528,6 +531,7 @@ int sqliteOsWrite(OsFile *id, const void *pBuf, int amt){
int rc;
DWORD wrote;
SimulateIOError(SQLITE_IOERR);
TRACE2("WRITE %d\n", last_page);
while( amt>0 && (rc = WriteFile(id->h, pBuf, amt, &wrote, 0))!=0 && wrote>0 ){
amt -= wrote;
pBuf = &((char*)pBuf)[wrote];
@ -551,7 +555,10 @@ int sqliteOsSeek(OsFile *id, off_t offset){
#if OS_WIN
{
LONG upperBits = offset>>32;
SetFilePointer(id->h, offset, &upperBits, FILE_BEGIN);
LONG lowerBits = offset & 0xffffffff;
DWORD rc;
rc = SetFilePointer(id->h, lowerBits, &upperBits, FILE_BEGIN);
TRACE3("SEEK rc=0x%x upper=0x%x\n", rc, upperBits);
}
return SQLITE_OK;
#endif

View File

@ -18,7 +18,7 @@
** file simultaneously, or one process from reading the database while
** another is writing.
**
** @(#) $Id: pager.c,v 1.58 2002/11/11 01:04:48 drh Exp $
** @(#) $Id: pager.c,v 1.59 2002/12/01 02:00:58 drh Exp $
*/
#include "os.h" /* Must be first to enable large file support */
#include "sqliteInt.h"
@ -395,7 +395,7 @@ static int pager_playback_one_page(Pager *pPager, OsFile *jfd){
memcpy(PGHDR_TO_DATA(pPg), pgRec.aData, SQLITE_PAGE_SIZE);
memset(PGHDR_TO_EXTRA(pPg), 0, pPager->nExtra);
}
rc = sqliteOsSeek(&pPager->fd, (pgRec.pgno-1)*SQLITE_PAGE_SIZE);
rc = sqliteOsSeek(&pPager->fd, (pgRec.pgno-1)*(off_t)SQLITE_PAGE_SIZE);
if( rc==SQLITE_OK ){
rc = sqliteOsWrite(&pPager->fd, pgRec.aData, SQLITE_PAGE_SIZE);
}
@ -834,7 +834,7 @@ static int syncAllPages(Pager *pPager){
for(pPg=pPager->pFirst; pPg; pPg=pPg->pNextFree){
if( pPg->dirty ){
if( lastPgno==0 || pPg->pgno!=lastPgno+1 ){
sqliteOsSeek(&pPager->fd, (pPg->pgno-1)*SQLITE_PAGE_SIZE);
sqliteOsSeek(&pPager->fd, (pPg->pgno-1)*(off_t)SQLITE_PAGE_SIZE);
}
rc = sqliteOsWrite(&pPager->fd, PGHDR_TO_DATA(pPg), SQLITE_PAGE_SIZE);
if( rc!=SQLITE_OK ) break;
@ -1061,7 +1061,7 @@ int sqlitepager_get(Pager *pPager, Pgno pgno, void **ppPage){
memset(PGHDR_TO_DATA(pPg), 0, SQLITE_PAGE_SIZE);
}else{
int rc;
sqliteOsSeek(&pPager->fd, (pgno-1)*SQLITE_PAGE_SIZE);
sqliteOsSeek(&pPager->fd, (pgno-1)*(off_t)SQLITE_PAGE_SIZE);
rc = sqliteOsRead(&pPager->fd, PGHDR_TO_DATA(pPg), SQLITE_PAGE_SIZE);
if( rc!=SQLITE_OK ){
off_t fileSize;
@ -1457,7 +1457,7 @@ int sqlitepager_commit(Pager *pPager){
}
for(pPg=pPager->pAll; pPg; pPg=pPg->pNextAll){
if( pPg->dirty==0 ) continue;
rc = sqliteOsSeek(&pPager->fd, (pPg->pgno-1)*SQLITE_PAGE_SIZE);
rc = sqliteOsSeek(&pPager->fd, (pPg->pgno-1)*(off_t)SQLITE_PAGE_SIZE);
if( rc!=SQLITE_OK ) goto commit_abort;
rc = sqliteOsWrite(&pPager->fd, PGHDR_TO_DATA(pPg), SQLITE_PAGE_SIZE);
if( rc!=SQLITE_OK ) goto commit_abort;

View File

@ -13,8 +13,9 @@
** is not included in the SQLite library. It is used for automated
** testing of the SQLite library.
**
** $Id: test2.c,v 1.11 2002/11/09 00:33:17 drh Exp $
** $Id: test2.c,v 1.12 2002/12/01 02:00:58 drh Exp $
*/
#include "os.h"
#include "sqliteInt.h"
#include "pager.h"
#include "tcl.h"
@ -472,6 +473,53 @@ static int page_write(
return TCL_OK;
}
/*
** Usage: fake_big_file N FILENAME
**
** Write a few bytes at the N megabyte point of FILENAME. This will
** create a large file. If the file was a valid SQLite database, then
** the next time the database is opened, SQLite will begin allocating
** new pages after N. If N is 2096 or bigger, this will test the
** ability of SQLite to write to large files.
*/
static int fake_big_file(
void *NotUsed,
Tcl_Interp *interp, /* The TCL interpreter that invoked this command */
int argc, /* Number of arguments */
const char **argv /* Text of each argument */
){
int rc;
int n;
off_t offset;
OsFile fd;
int readOnly = 0;
if( argc!=3 ){
Tcl_AppendResult(interp, "wrong # args: should be \"", argv[0],
" N-MEGABYTES FILE\"", 0);
return TCL_ERROR;
}
if( Tcl_GetInt(interp, argv[1], &n) ) return TCL_ERROR;
rc = sqliteOsOpenReadWrite(argv[2], &fd, &readOnly);
if( rc ){
Tcl_AppendResult(interp, "open failed: ", errorName(rc), 0);
return TCL_ERROR;
}
offset = n;
offset *= 1024*1024;
rc = sqliteOsSeek(&fd, offset);
if( rc ){
Tcl_AppendResult(interp, "seek failed: ", errorName(rc), 0);
return TCL_ERROR;
}
rc = sqliteOsWrite(&fd, "Hello, World!", 14);
if( rc ){
Tcl_AppendResult(interp, "write failed: ", errorName(rc), 0);
return TCL_ERROR;
}
sqliteOsClose(&fd);
return TCL_OK;
}
/*
** Register commands with the TCL interpreter.
*/
@ -496,6 +544,7 @@ int Sqlitetest2_Init(Tcl_Interp *interp){
{ "page_read", (Tcl_CmdProc*)page_read },
{ "page_write", (Tcl_CmdProc*)page_write },
{ "page_number", (Tcl_CmdProc*)page_number },
{ "fake_big_file", (Tcl_CmdProc*)fake_big_file },
};
int i;
for(i=0; i<sizeof(aCmd)/sizeof(aCmd[0]); i++){

150
test/bigfile.test Normal file
View File

@ -0,0 +1,150 @@
# 2002 November 30
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
# This file implements regression tests for SQLite library. The
# focus of this script testing the ability of SQLite to handle database
# files larger than 4GB.
#
# $Id: bigfile.test,v 1.1 2002/12/01 02:00:58 drh Exp $
#
set testdir [file dirname $argv0]
source $testdir/tester.tcl
# This is the md5 checksum of all the data in table t1 as created
# by the first test. We will use this number to make sure that data
# never changes.
#
set MAGIC_SUM {593f1efcfdbe698c28b4b1b693f7e4cf}
do_test bigfile-1.1 {
execsql {
BEGIN;
CREATE TABLE t1(x);
INSERT INTO t1 VALUES('abcdefghijklmnopqrstuvwxyz');
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
INSERT INTO t1 SELECT rowid || ' ' || x FROM t1;
COMMIT;
}
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.2 {
db close
fake_big_file 4096 test.db
sqlite db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
# The previous test may fail on some systems because they are unable
# to handle large files. If that is so, then skip all of the following
# tests. We will know the above test failed because the "db" command
# does not exist.
#
if {[llength [info command db]]>0} {
do_test bigfile-1.3 {
execsql {
CREATE TABLE t2 AS SELECT * FROM t1;
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
do_test bigfile-1.4 {
db close
sqlite db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.5 {
execsql {
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
do_test bigfile-1.6 {
db close
fake_big_file 8192 test.db
sqlite db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.7 {
execsql {
CREATE TABLE t3 AS SELECT * FROM t1;
SELECT md5sum(x) FROM t3;
}
} $::MAGIC_SUM
do_test bigfile-1.8 {
db close
sqlite db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.9 {
execsql {
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
do_test bigfile-1.10 {
execsql {
SELECT md5sum(x) FROM t3;
}
} $::MAGIC_SUM
do_test bigfile-1.11 {
db close
fake_big_file 16384 test.db
sqlite db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.12 {
execsql {
CREATE TABLE t4 AS SELECT * FROM t1;
SELECT md5sum(x) FROM t4;
}
} $::MAGIC_SUM
do_test bigfile-1.13 {
db close
sqlite db test.db
execsql {
SELECT md5sum(x) FROM t1;
}
} $::MAGIC_SUM
do_test bigfile-1.14 {
execsql {
SELECT md5sum(x) FROM t2;
}
} $::MAGIC_SUM
do_test bigfile-1.15 {
execsql {
SELECT md5sum(x) FROM t3;
}
} $::MAGIC_SUM
do_test bigfile-1.16 {
execsql {
SELECT md5sum(x) FROM t3;
}
} $::MAGIC_SUM
} ;# End of the "if( db command exists )"
finish_test