Integrate contentless delete with auto-merge.

FossilOrigin-Name: 85c1589ab1fc69d1eef4bbc1bdefa2b10af5f6b9c08e813130b93829b592f416
This commit is contained in:
dan 2023-07-22 19:47:46 +00:00
parent 330e36c2c6
commit 2159292ce0
7 changed files with 282 additions and 174 deletions

View File

@ -623,6 +623,11 @@ int sqlite3Fts5HashWrite(
*/
void sqlite3Fts5HashClear(Fts5Hash*);
/*
** Return true if the hash is empty, false otherwise.
*/
int sqlite3Fts5HashIsEmpty(Fts5Hash*);
int sqlite3Fts5HashQuery(
Fts5Hash*, /* Hash table to query */
int nPre,
@ -644,6 +649,7 @@ void sqlite3Fts5HashScanEntry(Fts5Hash *,
);
/*
** End of interface to code in fts5_hash.c.
**************************************************************************/

View File

@ -529,6 +529,13 @@ int sqlite3Fts5HashScanInit(
return fts5HashEntrySort(p, pTerm, nTerm, &p->pScan);
}
/*
** Return true if the hash table is empty, false otherwise.
*/
int sqlite3Fts5HashIsEmpty(Fts5Hash *pHash){
return pHash->nEntry==0;
}
void sqlite3Fts5HashScanNext(Fts5Hash *p){
assert( !sqlite3Fts5HashScanEof(p) );
p->pScan = p->pScan->pScanNext;

View File

@ -56,6 +56,8 @@
#define FTS5_MAX_LEVEL 64
#define FTS5_MERGE_TOMBSTONE_WEIGHT 5
/*
** There are two versions of the format used for the structure record:
**
@ -332,6 +334,12 @@ struct Fts5Data {
/*
** One object per %_data table.
**
** nContentlessDelete:
** The number of contentless delete operations since the most recent
** call to fts5IndexFlush() or fts5IndexDiscardData(). This is tracked
** so that extra auto-merge work can be done by fts5IndexFlush() to
** account for the delete operations.
*/
struct Fts5Index {
Fts5Config *pConfig; /* Virtual table configuration */
@ -346,6 +354,7 @@ struct Fts5Index {
int nPendingData; /* Current bytes of pending data */
i64 iWriteRowid; /* Rowid for current doc being written */
int bDelete; /* Current write is a delete */
int nContentlessDelete; /* Number of contentless delete ops */
/* Error state. */
int rc; /* Current error code */
@ -3980,6 +3989,7 @@ static void fts5IndexDiscardData(Fts5Index *p){
sqlite3Fts5HashClear(p->pHash);
p->nPendingData = 0;
}
p->nContentlessDelete = 0;
}
/*
@ -4722,6 +4732,7 @@ static int fts5IndexMerge(
int nRem = nPg;
int bRet = 0;
Fts5Structure *pStruct = *ppStruct;
int bTombstone = 0;
while( nRem>0 && p->rc==SQLITE_OK ){
int iLvl; /* To iterate through levels */
int iBestLvl = 0; /* Level offering the most input segments */
@ -4731,6 +4742,7 @@ static int fts5IndexMerge(
assert( pStruct->nLevel>0 );
for(iLvl=0; iLvl<pStruct->nLevel; iLvl++){
Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl];
int nThisSeg = 0;
if( pLvl->nMerge ){
if( pLvl->nMerge>nBest ){
iBestLvl = iLvl;
@ -4738,8 +4750,20 @@ static int fts5IndexMerge(
}
break;
}
if( pLvl->nSeg>nBest ){
nBest = pLvl->nSeg;
nThisSeg = pLvl->nSeg;
if( bTombstone && nThisSeg ){
int iSeg;
int nPg = 0;
int nTomb = 0;
for(iSeg=0; iSeg<pLvl->nSeg; iSeg++){
Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg];
nPg += pSeg->pgnoLast;
nTomb += pSeg->nPgTombstone;
}
nThisSeg += ((nTomb*FTS5_MERGE_TOMBSTONE_WEIGHT) / nPg);
}
if( nThisSeg>nBest ){
nBest = nThisSeg;
iBestLvl = iLvl;
}
}
@ -4752,7 +4776,12 @@ static int fts5IndexMerge(
#endif
if( nBest<nMin && pStruct->aLevel[iBestLvl].nMerge==0 ){
break;
if( bTombstone || p->pConfig->bContentlessDelete==0 ){
break;
}else{
bTombstone = 1;
continue;
}
}
bRet = 1;
fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem);
@ -5273,192 +5302,195 @@ static void fts5FlushOneHash(Fts5Index *p){
/* Obtain a reference to the index structure and allocate a new segment-id
** for the new level-0 segment. */
pStruct = fts5StructureRead(p);
iSegid = fts5AllocateSegid(p, pStruct);
fts5StructureInvalidate(p);
if( iSegid ){
const int pgsz = p->pConfig->pgsz;
int eDetail = p->pConfig->eDetail;
int bSecureDelete = p->pConfig->bSecureDelete;
Fts5StructureSegment *pSeg; /* New segment within pStruct */
Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */
Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */
Fts5SegWriter writer;
fts5WriteInit(p, &writer, iSegid);
pBuf = &writer.writer.buf;
pPgidx = &writer.writer.pgidx;
/* fts5WriteInit() should have initialized the buffers to (most likely)
** the maximum space required. */
assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) );
assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) );
/* Begin scanning through hash table entries. This loop runs once for each
** term/doclist currently stored within the hash table. */
if( p->rc==SQLITE_OK ){
p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0);
}
while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){
const char *zTerm; /* Buffer containing term */
int nTerm; /* Size of zTerm in bytes */
const u8 *pDoclist; /* Pointer to doclist for this term */
int nDoclist; /* Size of doclist in bytes */
/* Get the term and doclist for this entry. */
sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist);
nTerm = (int)strlen(zTerm);
if( bSecureDelete==0 ){
fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
if( p->rc!=SQLITE_OK ) break;
assert( writer.bFirstRowidInPage==0 );
if( sqlite3Fts5HashIsEmpty(pHash)==0 ){
iSegid = fts5AllocateSegid(p, pStruct);
if( iSegid ){
const int pgsz = p->pConfig->pgsz;
int eDetail = p->pConfig->eDetail;
int bSecureDelete = p->pConfig->bSecureDelete;
Fts5StructureSegment *pSeg; /* New segment within pStruct */
Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */
Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */
Fts5SegWriter writer;
fts5WriteInit(p, &writer, iSegid);
pBuf = &writer.writer.buf;
pPgidx = &writer.writer.pgidx;
/* fts5WriteInit() should have initialized the buffers to (most likely)
** the maximum space required. */
assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) );
assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) );
/* Begin scanning through hash table entries. This loop runs once for each
** term/doclist currently stored within the hash table. */
if( p->rc==SQLITE_OK ){
p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0);
}
if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){
/* The entire doclist will fit on the current leaf. */
fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist);
}else{
int bTermWritten = !bSecureDelete;
i64 iRowid = 0;
i64 iPrev = 0;
int iOff = 0;
/* The entire doclist will not fit on this leaf. The following
** loop iterates through the poslists that make up the current
** doclist. */
while( p->rc==SQLITE_OK && iOff<nDoclist ){
u64 iDelta = 0;
iOff += fts5GetVarint(&pDoclist[iOff], &iDelta);
iRowid += iDelta;
/* If in secure delete mode, and if this entry in the poslist is
** in fact a delete, then edit the existing segments directly
** using fts5FlushSecureDelete(). */
if( bSecureDelete ){
if( eDetail==FTS5_DETAIL_NONE ){
if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
fts5FlushSecureDelete(p, pStruct, zTerm, iRowid);
iOff++;
while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){
const char *zTerm; /* Buffer containing term */
int nTerm; /* Size of zTerm in bytes */
const u8 *pDoclist; /* Pointer to doclist for this term */
int nDoclist; /* Size of doclist in bytes */
/* Get the term and doclist for this entry. */
sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist);
nTerm = (int)strlen(zTerm);
if( bSecureDelete==0 ){
fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
if( p->rc!=SQLITE_OK ) break;
assert( writer.bFirstRowidInPage==0 );
}
if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){
/* The entire doclist will fit on the current leaf. */
fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist);
}else{
int bTermWritten = !bSecureDelete;
i64 iRowid = 0;
i64 iPrev = 0;
int iOff = 0;
/* The entire doclist will not fit on this leaf. The following
** loop iterates through the poslists that make up the current
** doclist. */
while( p->rc==SQLITE_OK && iOff<nDoclist ){
u64 iDelta = 0;
iOff += fts5GetVarint(&pDoclist[iOff], &iDelta);
iRowid += iDelta;
/* If in secure delete mode, and if this entry in the poslist is
** in fact a delete, then edit the existing segments directly
** using fts5FlushSecureDelete(). */
if( bSecureDelete ){
if( eDetail==FTS5_DETAIL_NONE ){
if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
fts5FlushSecureDelete(p, pStruct, zTerm, iRowid);
iOff++;
if( iOff<nDoclist && pDoclist[iOff]==0x00 ){
iOff++;
nDoclist = 0;
}else{
continue;
}
}
}else if( (pDoclist[iOff] & 0x01) ){
fts5FlushSecureDelete(p, pStruct, zTerm, iRowid);
if( p->rc!=SQLITE_OK || pDoclist[iOff]==0x01 ){
iOff++;
nDoclist = 0;
}else{
continue;
}
}
}else if( (pDoclist[iOff] & 0x01) ){
fts5FlushSecureDelete(p, pStruct, zTerm, iRowid);
if( p->rc!=SQLITE_OK || pDoclist[iOff]==0x01 ){
iOff++;
continue;
}
}
}
if( p->rc==SQLITE_OK && bTermWritten==0 ){
fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
bTermWritten = 1;
assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 );
}
if( writer.bFirstRowidInPage ){
fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */
pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid);
writer.bFirstRowidInPage = 0;
fts5WriteDlidxAppend(p, &writer, iRowid);
}else{
pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid-iPrev);
}
if( p->rc!=SQLITE_OK ) break;
assert( pBuf->n<=pBuf->nSpace );
iPrev = iRowid;
if( eDetail==FTS5_DETAIL_NONE ){
if( iOff<nDoclist && pDoclist[iOff]==0 ){
pBuf->p[pBuf->n++] = 0;
iOff++;
if( p->rc==SQLITE_OK && bTermWritten==0 ){
fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm);
bTermWritten = 1;
assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 );
}
if( writer.bFirstRowidInPage ){
fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */
pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid);
writer.bFirstRowidInPage = 0;
fts5WriteDlidxAppend(p, &writer, iRowid);
}else{
pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid-iPrev);
}
if( p->rc!=SQLITE_OK ) break;
assert( pBuf->n<=pBuf->nSpace );
iPrev = iRowid;
if( eDetail==FTS5_DETAIL_NONE ){
if( iOff<nDoclist && pDoclist[iOff]==0 ){
pBuf->p[pBuf->n++] = 0;
iOff++;
if( iOff<nDoclist && pDoclist[iOff]==0 ){
pBuf->p[pBuf->n++] = 0;
iOff++;
}
}
if( (pBuf->n + pPgidx->n)>=pgsz ){
fts5WriteFlushLeaf(p, &writer);
}
}
if( (pBuf->n + pPgidx->n)>=pgsz ){
fts5WriteFlushLeaf(p, &writer);
}
}else{
int bDummy;
int nPos;
int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy);
nCopy += nPos;
if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){
/* The entire poslist will fit on the current leaf. So copy
** it in one go. */
fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy);
}else{
/* The entire poslist will not fit on this leaf. So it needs
** to be broken into sections. The only qualification being
** that each varint must be stored contiguously. */
const u8 *pPoslist = &pDoclist[iOff];
int iPos = 0;
while( p->rc==SQLITE_OK ){
int nSpace = pgsz - pBuf->n - pPgidx->n;
int n = 0;
if( (nCopy - iPos)<=nSpace ){
n = nCopy - iPos;
}else{
n = fts5PoslistPrefix(&pPoslist[iPos], nSpace);
int bDummy;
int nPos;
int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy);
nCopy += nPos;
if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){
/* The entire poslist will fit on the current leaf. So copy
** it in one go. */
fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy);
}else{
/* The entire poslist will not fit on this leaf. So it needs
** to be broken into sections. The only qualification being
** that each varint must be stored contiguously. */
const u8 *pPoslist = &pDoclist[iOff];
int iPos = 0;
while( p->rc==SQLITE_OK ){
int nSpace = pgsz - pBuf->n - pPgidx->n;
int n = 0;
if( (nCopy - iPos)<=nSpace ){
n = nCopy - iPos;
}else{
n = fts5PoslistPrefix(&pPoslist[iPos], nSpace);
}
assert( n>0 );
fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n);
iPos += n;
if( (pBuf->n + pPgidx->n)>=pgsz ){
fts5WriteFlushLeaf(p, &writer);
}
if( iPos>=nCopy ) break;
}
assert( n>0 );
fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n);
iPos += n;
if( (pBuf->n + pPgidx->n)>=pgsz ){
fts5WriteFlushLeaf(p, &writer);
}
if( iPos>=nCopy ) break;
}
iOff += nCopy;
}
iOff += nCopy;
}
}
/* TODO2: Doclist terminator written here. */
/* pBuf->p[pBuf->n++] = '\0'; */
assert( pBuf->n<=pBuf->nSpace );
if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash);
}
/* TODO2: Doclist terminator written here. */
/* pBuf->p[pBuf->n++] = '\0'; */
assert( pBuf->n<=pBuf->nSpace );
if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash);
}
sqlite3Fts5HashClear(pHash);
fts5WriteFinish(p, &writer, &pgnoLast);
assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 );
if( pgnoLast>0 ){
/* Update the Fts5Structure. It is written back to the database by the
** fts5StructureRelease() call below. */
if( pStruct->nLevel==0 ){
fts5StructureAddLevel(&p->rc, &pStruct);
}
fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0);
if( p->rc==SQLITE_OK ){
pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ];
pSeg->iSegid = iSegid;
pSeg->pgnoFirst = 1;
pSeg->pgnoLast = pgnoLast;
if( pStruct->nOriginCntr>0 ){
pSeg->iOrigin1 = pStruct->nOriginCntr;
pSeg->iOrigin2 = pStruct->nOriginCntr;
pStruct->nOriginCntr++;
sqlite3Fts5HashClear(pHash);
fts5WriteFinish(p, &writer, &pgnoLast);
assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 );
if( pgnoLast>0 ){
/* Update the Fts5Structure. It is written back to the database by the
** fts5StructureRelease() call below. */
if( pStruct->nLevel==0 ){
fts5StructureAddLevel(&p->rc, &pStruct);
}
pStruct->nSegment++;
fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0);
if( p->rc==SQLITE_OK ){
pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ];
pSeg->iSegid = iSegid;
pSeg->pgnoFirst = 1;
pSeg->pgnoLast = pgnoLast;
if( pStruct->nOriginCntr>0 ){
pSeg->iOrigin1 = pStruct->nOriginCntr;
pSeg->iOrigin2 = pStruct->nOriginCntr;
pStruct->nOriginCntr++;
}
pStruct->nSegment++;
}
fts5StructurePromote(p, 0, pStruct);
}
fts5StructurePromote(p, 0, pStruct);
}
}
fts5IndexAutomerge(p, &pStruct, pgnoLast);
fts5IndexAutomerge(p, &pStruct, pgnoLast + p->nContentlessDelete);
fts5IndexCrisismerge(p, &pStruct);
fts5StructureWrite(p, pStruct);
fts5StructureRelease(pStruct);
p->nContentlessDelete = 0;
}
/*
@ -5466,7 +5498,7 @@ static void fts5FlushOneHash(Fts5Index *p){
*/
static void fts5IndexFlush(Fts5Index *p){
/* Unless it is empty, flush the hash table to disk */
if( p->nPendingData ){
if( p->nPendingData || (p->nContentlessDelete && p->pConfig->nAutomerge>0) ){
assert( p->pHash );
p->nPendingData = 0;
fts5FlushOneHash(p);
@ -6800,6 +6832,8 @@ static void fts5IndexTombstoneAdd(
int nHash = 0;
Fts5Data **apHash = 0;
p->nContentlessDelete++;
if( pSeg->nPgTombstone>0 ){
iPg = iRowid % pSeg->nPgTombstone;
pPg = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg));

View File

@ -1777,8 +1777,7 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){
Fts5FullTable *pTab = (Fts5FullTable*)pVtab;
fts5CheckTransactionState(pTab, FTS5_SYNC, 0);
pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg;
fts5TripCursors(pTab);
rc = sqlite3Fts5StorageSync(pTab->pStorage);
rc = sqlite3Fts5FlushToDisk(&pTab->p);
pTab->p.pConfig->pzErrmsg = 0;
return rc;
}

View File

@ -0,0 +1,61 @@
# 2023 July 21
#
# The author disclaims copyright to this source code. In place of
# a legal notice, here is a blessing:
#
# May you do good and not evil.
# May you find forgiveness for yourself and forgive others.
# May you share freely, never taking more than you give.
#
#***********************************************************************
#
# This file contains tests for the content= and content_rowid= options.
#
source [file join [file dirname [info script]] fts5_common.tcl]
set testprefix fts5contentless4
# If SQLITE_ENABLE_FTS5 is defined, omit this file.
ifcapable !fts5 {
finish_test
return
}
proc document {n} {
set vocab [list A B C D E F G H I J K L M N O P Q R S T U V W X Y Z]
set ret [list]
for {set ii 0} {$ii < $n} {incr ii} {
lappend ret [lindex $vocab [expr int(rand()*[llength $vocab])]]
}
set ret
}
db func document document
do_execsql_test 1.0 {
CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1);
INSERT INTO ft(ft, rank) VALUES('pgsz', 240);
WITH s(i) AS (
SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<1000
)
INSERT INTO ft SELECT document(12) FROM s;
}
do_execsql_test 1.1 {
INSERT INTO ft(ft) VALUES('optimize');
}
do_execsql_test 1.2 {
DELETE FROM ft WHERE rowid < 1000
}
execsql_pp {
SELECT * FROM fts5_structure((
SELECT block FROM ft_data WHERE id=10
))
}
finish_test
finish_test

View File

@ -1,5 +1,5 @@
C Fix\sfts5\sincremental\soptimization\sso\sthat\sit\stoo\scan\shandle\san\sindex\sthat\sconsists\sof\sa\ssingle\ssegment\swith\sone\sor\smore\stombstone\shash\spages.
D 2023-07-21T21:10:33.579
C Integrate\scontentless\sdelete\swith\sauto-merge.
D 2023-07-22T19:47:46.359
F .fossil-settings/empty-dirs dbb81e8fc0401ac46a1491ab34a7f2c7c0452f2f06b54ebb845d024ca8283ef1
F .fossil-settings/ignore-glob 35175cdfcf539b2318cb04a9901442804be81cd677d8b889fcc9149c21f239ea
F LICENSE.md df5091916dbb40e6e9686186587125e1b2ff51f022cc334e886c19a0e9982724
@ -86,14 +86,14 @@ F ext/fts3/unicode/mkunicode.tcl d5aebf022fa4577ee8cdf27468f0d847879993959101f6d
F ext/fts3/unicode/parseunicode.tcl a981bd6466d12dd17967515801c3ff23f74a281be1a03cf1e6f52a6959fc77eb
F ext/fts5/extract_api_docs.tcl a36e54ec777172ddd3f9a88daf593b00848368e0
F ext/fts5/fts5.h c132a9323f22a972c4c93a8d5a3d901113a6e612faf30ca8e695788438c5ca2a
F ext/fts5/fts5Int.h fa9dd8ecbda6340f406c6f21b9b524b4666817aa89850e60a42937eea87cef6a
F ext/fts5/fts5Int.h f59c14f725ad0fcb8a81b9bf012e5021c6501bf43e73aa00b00d728e2ac7efaf
F ext/fts5/fts5_aux.c 572d5ec92ba7301df2fea3258576332f2f4d2dfd66d8263afd157d9deceac480
F ext/fts5/fts5_buffer.c 3001fbabb585d6de52947b44b455235072b741038391f830d6b729225eeaf6a5
F ext/fts5/fts5_config.c 010fabcc0aaa0dfa76b19146e8bddf7de368933eeac01e294af6607447500caa
F ext/fts5/fts5_expr.c 2473c13542f463cae4b938c498d6193c90d38ea1a2a4f9849c0479736e50d24d
F ext/fts5/fts5_hash.c d4fb70940359f2120ccd1de7ffe64cc3efe65de9e8995b822cd536ff64c96982
F ext/fts5/fts5_index.c e500a5d33ae312c2ebab91123f0ef5f9bbc3eb555252c7d40ba4d8688780c7ca
F ext/fts5/fts5_main.c ede405f0f11db562653b988d043a531daa66093b46c1b35b8fcddb54819cba84
F ext/fts5/fts5_hash.c 60224220ccfb2846b741b6dbb1b8872094ec6d87b3118c04244dafc83e6f9c40
F ext/fts5/fts5_index.c 31b8c8dd6913d76d6d7755342e36816495e5ad177d253f6bf39e0efdb9dc31e0
F ext/fts5/fts5_main.c 2f87ee44fdb21539c264541149f07f70e065d58f37420063e5ddef80ba0f5ede
F ext/fts5/fts5_storage.c 3c9b41fce41b6410f2e8f82eb035c6a29b2560483f773e6dc98cf3cb2e4ddbb5
F ext/fts5/fts5_tcl.c b1445cbe69908c411df8084a10b2485500ac70a9c747cdc8cda175a3da59d8ae
F ext/fts5/fts5_test_mi.c 08c11ec968148d4cb4119d96d819f8c1f329812c568bac3684f5464be177d3ee
@ -135,6 +135,7 @@ F ext/fts5/test/fts5content.test 213506436fb2c87567b8e31f6d43ab30aab99354cec74ed
F ext/fts5/test/fts5contentless.test 9a42a86822670792ba632f5c57459addeb774d93b29d5e6ddae08faa64c2b6d9
F ext/fts5/test/fts5contentless2.test 12c778d134a121b8bad000fbf3ae900d53226fee840ce36fe941b92737f1fda7
F ext/fts5/test/fts5contentless3.test cd3b8332c737d1d6f28e04d6338876c79c22815b8ecd34fb677409a013a45224
F ext/fts5/test/fts5contentless4.test 3b11ccbbe928d45eb8f985c0137a8fe2c69b70b940b10de31540040de5674311
F ext/fts5/test/fts5corrupt.test 77ae6f41a7eba10620efb921cf7dbe218b0ef232b04519deb43581cb17a57ebe
F ext/fts5/test/fts5corrupt2.test 7453752ba12ce91690c469a6449d412561cc604b1dec994e16ab132952e7805f
F ext/fts5/test/fts5corrupt3.test 7da9895dafa404efd20728f66ff4b94399788bdc042c36fe2689801bba2ccd78
@ -2047,8 +2048,8 @@ F vsixtest/vsixtest.tcl 6a9a6ab600c25a91a7acc6293828957a386a8a93
F vsixtest/vsixtest.vcxproj.data 2ed517e100c66dc455b492e1a33350c1b20fbcdc
F vsixtest/vsixtest.vcxproj.filters 37e51ffedcdb064aad6ff33b6148725226cd608e
F vsixtest/vsixtest_TemporaryKey.pfx e5b1b036facdb453873e7084e1cae9102ccc67a0
P f4926006b371d9a1439a25384bd50a50c2f1c03f75a7c2c3134ae72abb971c91
R 8117856750f55ecca6e7594df2bcd37e
P e61c9b083f5e0b6b6ee18f9394581ad816f445dbfb72ed1fe954f4182755a576
R 223fbfa40ff414b8abee5011e5d1533a
U dan
Z 3675210f196caeb196ef6ee2658c36e9
Z 8cb202caf797f67b895f2a9a207e2618
# Remove this line to create a well-formed Fossil manifest.

View File

@ -1 +1 @@
e61c9b083f5e0b6b6ee18f9394581ad816f445dbfb72ed1fe954f4182755a576
85c1589ab1fc69d1eef4bbc1bdefa2b10af5f6b9c08e813130b93829b592f416