From 80ff32f5be7f377468d629de765d5fbf6bfa0805 Mon Sep 17 00:00:00 2001
From: drh
Date: Sun, 4 Nov 2001 18:32:46 +0000
Subject: [PATCH] Increase maximum row size to 1MB. (CVS 300)
FossilOrigin-Name: 7dd58fad398253608f55867cf1c7749eef005657
---
manifest | 26 +++----
manifest.uuid | 2 +-
src/btree.c | 50 ++++++++------
src/sqliteInt.h | 18 +++--
src/tokenize.c | 4 +-
src/vdbe.c | 176 +++++++++++++++++++++++++++--------------------
src/vdbe.h | 4 +-
src/where.c | 4 +-
test/bigrow.test | 118 +++++++++++++++++++++++++++++--
www/changes.tcl | 5 ++
10 files changed, 279 insertions(+), 128 deletions(-)
diff --git a/manifest b/manifest
index a6069ea1ae..3b815255a8 100644
--- a/manifest
+++ b/manifest
@@ -1,5 +1,5 @@
-C Version\s2.0.8\s(CVS\s462)
-D 2001-11-04T00:00:00
+C Increase\smaximum\srow\ssize\sto\s1MB.\s(CVS\s300)
+D 2001-11-04T18:32:47
F Makefile.in 6801df952cb1df64aa32e4de85fed24511d28efd
F Makefile.template 1fdb891f14083ee0b63cf7282f91529634438e7a
F README a4c0ba11354ef6ba0776b400d057c59da47a4cc0
@@ -19,7 +19,7 @@ F libtool c56e618713c9510a103bda6b95f3ea3900dcacd6
F ltmain.sh e9ed72eb1d690f447c13945eaf69e28af531eda1
F publish.sh 33cbe6798969f637698044023c139080e5d772a6
F src/TODO af7f3cab0228e34149cf98e073aa83d45878e7e6
-F src/btree.c f5b3bf49c98a90754097e8f0a946931d9cc857ef
+F src/btree.c 2789f704777d29b1b38e62e4798381ce602dc0fb
F src/btree.h 57d653ef5137b91f2a068aaf71a2905468dd2cb7
F src/build.c 8857c16751a5e9c5ee845e1b3cf2da78935c8cb3
F src/delete.c a4c13c444544f315703d5fbed6419c8786f66581
@@ -40,20 +40,20 @@ F src/select.c c34b02eafaa69fde6b4428df7861c3417b3079f9
F src/shell.c 71597951753b56a97fea1c7a30908f31e635c00c
F src/shell.tcl 27ecbd63dd88396ad16d81ab44f73e6c0ea9d20e
F src/sqlite.h.in 934de9112747ad8d8e7d5fec44876246b24ca5a3
-F src/sqliteInt.h 9a18aebf42a805ba02f55eba2239beabe35f02b3
+F src/sqliteInt.h fa9f56b77e0790f0ec329195c2255e2d8e440b0a
F src/table.c c89698bd5bb4b8d14722d6ee7e9be014c383d24a
F src/tclsqlite.c 4896e078495bf868742f5394dcf01c5efe5bea02
F src/test1.c e4b31f62ea71963cbae44338acf477a04fc8fc49
F src/test2.c e9f99aa5ee73872819259d6612c11e55e1644321
F src/test3.c 4a0d7b882fdae731dbb759f512ad867122452f96
-F src/tokenize.c 8f4c2b5e7fb471ba194979fb4dd5f947402fd792
+F src/tokenize.c 9ede24b17630351d70258bf8fa4f70f5990d45ae
F src/update.c 4eeb154a2da8a934d180e2d9e4211ac0a7a4ce8b
F src/util.c aa4d2de60cb2445239b71c79c3a8c0b7c0d3336a
-F src/vdbe.c 9e4fd512dd3e66d37f5b53ae88138fda4f9aa227
-F src/vdbe.h 4a587ec56943d34698edf507ad5a746e87cb8cf4
-F src/where.c 22fe910c7c8e2736eb37e9861343e90c0b513c86
+F src/vdbe.c a71e73e9a4a63fe2f63546a1a43fce4de5136476
+F src/vdbe.h c29e6fdfa157b3cce18258c05d9d533eb9cd1377
+F src/where.c 601f096f2a37ca688a775ca36d33534b13b876cb
F test/all.test 2a51e5395ac7c2c539689b123b9782a05e3837fe
-F test/bigrow.test a35f2de9948b24e427fb292c35947795efe182d0
+F test/bigrow.test 9458134d67f81559845f934fdd6802fe19a68ad1
F test/btree.test 47952c7a0c22660566264c68c0664592b7da85ce
F test/btree2.test 08e9485619265cbaf5d11bd71f357cdc26bb87e0
F test/copy.test 768e6f1701a07d08090e1ca7f7dcce0a7a72b43e
@@ -102,7 +102,7 @@ F www/arch.fig d5f9752a4dbf242e9cfffffd3f5762b6c63b3bcf
F www/arch.png 82ef36db1143828a7abc88b1e308a5f55d4336f4
F www/arch.tcl 03b521d252575f93b9c52f7c8b0007011512fcfb
F www/c_interface.tcl d446234c1d3ed747fcefd30e972a19f2b2fc0e05
-F www/changes.tcl 13c447ca789c2ee8994ad827296c71b1dce628ba
+F www/changes.tcl 797653d0d9988c716beb8a33e54b682fe53a4b93
F www/crosscompile.tcl c99efacb3aefaa550c6e80d91b240f55eb9fd33e
F www/download.tcl 3e51c9ff1326b0a182846134987301310dff7d60
F www/dynload.tcl 02eb8273aa78cfa9070dd4501dca937fb22b466c
@@ -114,7 +114,7 @@ F www/speed.tcl 212a91d555384e01873160d6a189f1490c791bc2
F www/sqlite.tcl 6a21242a272e9c0939a04419a51c3d50cae33e3e
F www/tclsqlite.tcl 13d50723f583888fc80ae1a38247c0ab415066fa
F www/vdbe.tcl bb7d620995f0a987293e9d4fb6185a3b077e9b44
-P 0a8c2f4f9812ffa7d43be0e3b59648dca40fa83c
-R 20a1ec902a198db024868b1e2643b8b3
+P 0fd2874205f1a4b89fc069cb429c1b0c7a0b99c1
+R 810197be2f3f50401607f0c17a5fe11e
U drh
-Z cdd8ae8efa6d763d99acb5bacc9c14f3
+Z 863cf3e2c644c7eb0aa13f1cb68860b5
diff --git a/manifest.uuid b/manifest.uuid
index 1cce2bb0d9..1cb9881ffa 100644
--- a/manifest.uuid
+++ b/manifest.uuid
@@ -1 +1 @@
-0fd2874205f1a4b89fc069cb429c1b0c7a0b99c1
\ No newline at end of file
+7dd58fad398253608f55867cf1c7749eef005657
\ No newline at end of file
diff --git a/src/btree.c b/src/btree.c
index 78708657af..aea978b04f 100644
--- a/src/btree.c
+++ b/src/btree.c
@@ -9,7 +9,7 @@
** May you share freely, never taking more than you give.
**
*************************************************************************
-** $Id: btree.c,v 1.36 2001/11/01 13:52:53 drh Exp $
+** $Id: btree.c,v 1.37 2001/11/04 18:32:47 drh Exp $
**
** This file implements a external (disk-based) database using BTrees.
** For a detailed discussion of BTrees, refer to
@@ -99,7 +99,7 @@ typedef struct OverflowPage OverflowPage;
** SQLite database in order to identify the file as a real database.
*/
static const char zMagicHeader[] =
- "** This file contains an SQLite 2.0 database **";
+ "** This file contains an SQLite 2.1 database **";
#define MAGIC_SIZE (sizeof(zMagicHeader))
/*
@@ -175,8 +175,12 @@ struct CellHdr {
Pgno leftChild; /* Child page that comes before this cell */
u16 nKey; /* Number of bytes in the key */
u16 iNext; /* Index in MemPage.u.aDisk[] of next cell in sorted order */
- u32 nData; /* Number of bytes of data */
+ u8 nKeyHi;
+ u8 nDataHi;
+ u16 nData; /* Number of bytes of data */
};
+#define NKEY(h) (h.nKey + h.nKeyHi*65536)
+#define NDATA(h) (h.nData + h.nDataHi*65536)
/*
** The minimum size of a complete Cell. The Cell must contain a header
@@ -340,7 +344,7 @@ struct BtCursor {
** is NOT included in the value returned from this routine.
*/
static int cellSize(Cell *pCell){
- int n = pCell->h.nKey + pCell->h.nData;
+ int n = NKEY(pCell->h) + NDATA(pCell->h);
if( n>MX_LOCAL_PAYLOAD ){
n = MX_LOCAL_PAYLOAD + sizeof(Pgno);
}else{
@@ -936,7 +940,7 @@ int sqliteBtreeKeySize(BtCursor *pCur, int *pSize){
*pSize = 0;
}else{
pCell = pPage->apCell[pCur->idx];
- *pSize = pCell->h.nKey;
+ *pSize = NKEY(pCell->h);
}
return SQLITE_OK;
}
@@ -1019,8 +1023,8 @@ int sqliteBtreeKey(BtCursor *pCur, int offset, int amt, char *zBuf){
return 0;
}
pCell = pPage->apCell[pCur->idx];
- if( amt+offset > pCell->h.nKey ){
- amt = pCell->h.nKey - offset;
+ if( amt+offset > NKEY(pCell->h) ){
+ amt = NKEY(pCell->h) - offset;
if( amt<=0 ){
return 0;
}
@@ -1045,7 +1049,7 @@ int sqliteBtreeDataSize(BtCursor *pCur, int *pSize){
*pSize = 0;
}else{
pCell = pPage->apCell[pCur->idx];
- *pSize = pCell->h.nData;
+ *pSize = NDATA(pCell->h);
}
return SQLITE_OK;
}
@@ -1070,13 +1074,13 @@ int sqliteBtreeData(BtCursor *pCur, int offset, int amt, char *zBuf){
return 0;
}
pCell = pPage->apCell[pCur->idx];
- if( amt+offset > pCell->h.nData ){
- amt = pCell->h.nData - offset;
+ if( amt+offset > NDATA(pCell->h) ){
+ amt = NDATA(pCell->h) - offset;
if( amt<=0 ){
return 0;
}
}
- getPayload(pCur, offset + pCell->h.nKey, amt, zBuf);
+ getPayload(pCur, offset + NKEY(pCell->h), amt, zBuf);
return amt;
}
@@ -1114,8 +1118,8 @@ int sqliteBtreeKeyCompare(
assert( pCur->pPage );
assert( pCur->idx>=0 && pCur->idxpPage->nCell );
pCell = pCur->pPage->apCell[pCur->idx];
- if( nKey > pCell->h.nKey ){
- nKey = pCell->h.nKey;
+ if( nKey > NKEY(pCell->h) ){
+ nKey = NKEY(pCell->h);
}
n = nKey;
if( n>MX_LOCAL_PAYLOAD ){
@@ -1187,7 +1191,7 @@ static int compareKey(
assert( pCur->pPage );
assert( pCur->pPage->nCell>pCur->idx && pCur->idx>=0 );
pCell = pCur->pPage->apCell[pCur->idx];
- c = pCell->h.nKey - nKeyOrig;
+ c = NKEY(pCell->h) - nKeyOrig;
}
*pResult = c;
return SQLITE_OK;
@@ -1495,7 +1499,7 @@ static int clearCell(Btree *pBt, Cell *pCell){
Pgno ovfl, nextOvfl;
int rc;
- if( pCell->h.nKey + pCell->h.nData <= MX_LOCAL_PAYLOAD ){
+ if( NKEY(pCell->h) + NDATA(pCell->h) <= MX_LOCAL_PAYLOAD ){
return SQLITE_OK;
}
ovfl = pCell->ovfl;
@@ -1531,8 +1535,10 @@ static int fillInCell(
char *pSpace;
pCell->h.leftChild = 0;
- pCell->h.nKey = nKey;
- pCell->h.nData = nData;
+ pCell->h.nKey = nKey & 0xffff;
+ pCell->h.nKeyHi = nKey >> 16;
+ pCell->h.nData = nData & 0xffff;
+ pCell->h.nDataHi = nData >> 16;
pCell->h.iNext = 0;
pNext = &pCell->ovfl;
@@ -2413,7 +2419,7 @@ int sqliteBtreePageDump(Btree *pBt, int pgno, int recursive){
Cell *pCell = (Cell*)&pPage->u.aDisk[idx];
int sz = cellSize(pCell);
sprintf(range,"%d..%d", idx, idx+sz-1);
- sz = pCell->h.nKey + pCell->h.nData;
+ sz = NKEY(pCell->h) + NDATA(pCell->h);
if( sz>sizeof(payload)-1 ) sz = sizeof(payload)-1;
memcpy(payload, pCell->aPayload, sz);
for(j=0; jh.leftChild, pCell->h.nKey, pCell->h.nData,
+ i, range, (int)pCell->h.leftChild, NKEY(pCell->h), NDATA(pCell->h),
payload
);
if( pPage->isInit && pPage->apCell[i]!=pCell ){
@@ -2652,7 +2658,7 @@ static int checkTreePage(
/* Check payload overflow pages
*/
- sz = pCell->h.nKey + pCell->h.nData;
+ sz = NKEY(pCell->h) + NDATA(pCell->h);
sprintf(zContext, "On page %d cell %d: ", iPage, i);
if( sz>MX_LOCAL_PAYLOAD ){
int nPage = (sz - MX_LOCAL_PAYLOAD + OVERFLOW_SIZE - 1)/OVERFLOW_SIZE;
@@ -2662,8 +2668,8 @@ static int checkTreePage(
/* Check that keys are in the right order
*/
cur.idx = i;
- zKey2 = sqliteMalloc( pCell->h.nKey+1 );
- getPayload(&cur, 0, pCell->h.nKey, zKey2);
+ zKey2 = sqliteMalloc( NKEY(pCell->h)+1 );
+ getPayload(&cur, 0, NKEY(pCell->h), zKey2);
if( zKey1 && strcmp(zKey1,zKey2)>=0 ){
checkAppendMsg(pCheck, zContext, "Key is out of order");
}
diff --git a/src/sqliteInt.h b/src/sqliteInt.h
index f54236401c..c1232e6845 100644
--- a/src/sqliteInt.h
+++ b/src/sqliteInt.h
@@ -11,7 +11,7 @@
*************************************************************************
** Internal interface definitions for SQLite.
**
-** @(#) $Id: sqliteInt.h,v 1.65 2001/10/22 02:58:10 drh Exp $
+** @(#) $Id: sqliteInt.h,v 1.66 2001/11/04 18:32:47 drh Exp $
*/
#include "sqlite.h"
#include "hash.h"
@@ -40,9 +40,15 @@ typedef unsigned char u8; /* 1-byte unsigned integer */
/*
** The maximum number of bytes of data that can be put into a single
-** row of a single table.
+** row of a single table. The upper bound on this limit is 16777215
+** bytes (or 16MB-1). We have arbitrarily set the limit to just 1MB
+** here because the overflow page chain is inefficient for really big
+** records and we want to discourage people from thinking that
+** multi-megabyte records are OK. If your needs are different, you can
+** change this define and recompile to increase or decrease the record
+** size.
*/
-#define MAX_BYTES_PER_ROW 65535
+#define MAX_BYTES_PER_ROW 1048576
/*
** If memory allocation problems are found, recompile with
@@ -237,8 +243,8 @@ struct Index {
** this structure.
*/
struct Token {
- char *z; /* Text of the token. Not NULL-terminated! */
- int n; /* Number of characters in this token */
+ const char *z; /* Text of the token. Not NULL-terminated! */
+ int n; /* Number of characters in this token */
};
/*
@@ -417,7 +423,7 @@ int sqliteSortCompare(const char *, const char *);
void sqliteSetString(char **, const char *, ...);
void sqliteSetNString(char **, ...);
void sqliteDequote(char*);
-int sqliteRunParser(Parse*, char*, char **);
+int sqliteRunParser(Parse*, const char*, char **);
void sqliteExec(Parse*);
Expr *sqliteExpr(int, Expr*, Expr*, Token*);
void sqliteExprSpan(Expr*,Token*,Token*);
diff --git a/src/tokenize.c b/src/tokenize.c
index 6116f1c0e6..61edd6202e 100644
--- a/src/tokenize.c
+++ b/src/tokenize.c
@@ -15,7 +15,7 @@
** individual tokens and sends those tokens one-by-one over to the
** parser for analysis.
**
-** $Id: tokenize.c,v 1.30 2001/10/22 02:58:10 drh Exp $
+** $Id: tokenize.c,v 1.31 2001/11/04 18:32:48 drh Exp $
*/
#include "sqliteInt.h"
#include "os.h"
@@ -346,7 +346,7 @@ static int sqliteGetToken(const unsigned char *z, int *tokenType){
** memory obtained from malloc() and *pzErrMsg made to point to that
** error message. Or maybe not.
*/
-int sqliteRunParser(Parse *pParse, char *zSql, char **pzErrMsg){
+int sqliteRunParser(Parse *pParse, const char *zSql, char **pzErrMsg){
int nErr = 0;
int i;
void *pEngine;
diff --git a/src/vdbe.c b/src/vdbe.c
index 8f8ce00cf3..b121e186e4 100644
--- a/src/vdbe.c
+++ b/src/vdbe.c
@@ -30,7 +30,7 @@
** But other routines are also provided to help in building up
** a program instruction by instruction.
**
-** $Id: vdbe.c,v 1.91 2001/11/01 14:41:34 drh Exp $
+** $Id: vdbe.c,v 1.92 2001/11/04 18:32:48 drh Exp $
*/
#include "sqliteInt.h"
#include
@@ -348,7 +348,7 @@ void sqliteVdbeChangeP1(Vdbe *p, int addr, int val){
**
** If addr<0 then change P3 on the most recently inserted instruction.
*/
-void sqliteVdbeChangeP3(Vdbe *p, int addr, char *zP3, int n){
+void sqliteVdbeChangeP3(Vdbe *p, int addr, const char *zP3, int n){
Op *pOp;
if( p==0 || p->aOp==0 ) return;
if( addr<0 || addr>=p->nOp ){
@@ -364,7 +364,7 @@ void sqliteVdbeChangeP3(Vdbe *p, int addr, char *zP3, int n){
pOp->p3 = 0;
pOp->p3type = P3_NOTUSED;
}else if( n<0 ){
- pOp->p3 = zP3;
+ pOp->p3 = (char*)zP3;
pOp->p3type = n;
}else{
sqliteSetNString(&pOp->p3, zP3, n, 0);
@@ -1910,27 +1910,37 @@ case OP_NotNull: {
/* Opcode: MakeRecord P1 * *
**
** Convert the top P1 entries of the stack into a single entry
-** suitable for use as a data record in a database table. To do this
-** all entries (except NULLs) are converted to strings and
-** concatenated. The null-terminators are included on all string
-** except for NULL columns which are represented by zero bytes.
-** The lowest entry
-** on the stack is the first in the concatenation and the top of
-** the stack is the last. After all columns are concatenated, an
-** index header is added. The index header consists of P1 16-bit integers
-** which hold the offset of the beginning of each column data from the
-** beginning of the completed record including the header.
-**
-** The Column opcode is used to unpack a record manufactured with
-** the opcode.
+** suitable for use as a data record in a database table. The
+** details of the format are irrelavant as long as the OP_Column
+** opcode can decode the record later. Refer to source code
+** comments for the details of the record format.
*/
case OP_MakeRecord: {
char *zNewRecord;
int nByte;
int nField;
int i, j;
- u16 addr;
+ int idxWidth;
+ u32 addr;
+ /* Assuming the record contains N fields, the record format looks
+ ** like this:
+ **
+ ** -------------------------------------------------------------------
+ ** | idx0 | idx1 | ... | idx(N-1) | idx(N) | data0 | ... | data(N-1) |
+ ** -------------------------------------------------------------------
+ **
+ ** All data fields are converted to strings before being stored and
+ ** are stored with their null terminators. NULL entries omit the
+ ** null terminator. Thus an empty string uses 1 byte and a NULL uses
+ ** zero bytes. Data(0) is taken from the lowest element of the stack
+ ** and data(N-1) is the top of the stack.
+ **
+ ** Each of the idx() entries is either 1, 2, or 3 bytes depending on
+ ** how big the total record is. Idx(0) contains the offset to the start
+ ** of data(0). Idx(k) contains the offset to the start of data(k).
+ ** Idx(N) contains the total number of bytes in the record.
+ */
nField = pOp->p1;
VERIFY( if( p->tos+1MAX_BYTES_PER_ROW ){
rc = SQLITE_TOOBIG;
goto abort_due_to_error;
@@ -1948,14 +1965,26 @@ case OP_MakeRecord: {
zNewRecord = sqliteMalloc( nByte );
if( zNewRecord==0 ) goto no_mem;
j = 0;
- addr = sizeof(addr)*nField;
+ addr = idxWidth*(nField+1);
for(i=p->tos-nField+1; i<=p->tos; i++){
- memcpy(&zNewRecord[j], (char*)&addr, sizeof(addr));
- j += sizeof(addr);
+ zNewRecord[j++] = addr & 0xff;
+ if( idxWidth>1 ){
+ zNewRecord[j++] = (addr>>8)&0xff;
+ if( idxWidth>2 ){
+ zNewRecord[j++] = (addr>>16)&0xff;
+ }
+ }
if( (aStack[i].flags & STK_Null)==0 ){
addr += aStack[i].n;
}
}
+ zNewRecord[j++] = addr & 0xff;
+ if( idxWidth>1 ){
+ zNewRecord[j++] = (addr>>8)&0xff;
+ if( idxWidth>2 ){
+ zNewRecord[j++] = (addr>>16)&0xff;
+ }
+ }
for(i=p->tos-nField+1; i<=p->tos; i++){
if( (aStack[i].flags & STK_Null)==0 ){
memcpy(&zNewRecord[j], zStack[i], aStack[i].n);
@@ -2420,19 +2449,20 @@ case OP_Close: {
case OP_MoveTo: {
int i = pOp->p1;
int tos = p->tos;
+ Cursor *pC;
+
VERIFY( if( tos<0 ) goto not_enough_stack; )
- if( i>=0 && inCursor && p->aCsr[i].pCursor ){
+ if( i>=0 && inCursor && (pC = &p->aCsr[i])->pCursor!=0 ){
int res;
if( aStack[tos].flags & STK_Int ){
int iKey = bigEndian(aStack[tos].i);
- sqliteBtreeMoveto(p->aCsr[i].pCursor,
- (char*)&iKey, sizeof(int), &res);
- p->aCsr[i].lastRecno = aStack[tos].i;
- p->aCsr[i].recnoIsValid = 1;
+ sqliteBtreeMoveto(pC->pCursor, (char*)&iKey, sizeof(int), &res);
+ pC->lastRecno = aStack[tos].i;
+ pC->recnoIsValid = 1;
}else{
if( Stringify(p, tos) ) goto no_mem;
- sqliteBtreeMoveto(p->aCsr[i].pCursor, zStack[tos], aStack[tos].n, &res);
- p->aCsr[i].recnoIsValid = 0;
+ sqliteBtreeMoveto(pC->pCursor, zStack[tos], aStack[tos].n, &res);
+ pC->recnoIsValid = 0;
}
p->nFetch++;
}
@@ -2496,17 +2526,16 @@ case OP_Found: {
int i = pOp->p1;
int tos = p->tos;
int alreadyExists = 0;
+ Cursor *pC;
VERIFY( if( tos<0 ) goto not_enough_stack; )
- if( VERIFY( i>=0 && inCursor && ) p->aCsr[i].pCursor ){
+ if( VERIFY( i>=0 && inCursor && ) (pC = &p->aCsr[i])->pCursor!=0 ){
int res, rx;
if( aStack[tos].flags & STK_Int ){
int iKey = bigEndian(aStack[tos].i);
- rx = sqliteBtreeMoveto(p->aCsr[i].pCursor,
- (char*)&iKey, sizeof(int), &res);
+ rx = sqliteBtreeMoveto(pC->pCursor, (char*)&iKey, sizeof(int), &res);
}else{
if( Stringify(p, tos) ) goto no_mem;
- rx = sqliteBtreeMoveto(p->aCsr[i].pCursor,
- zStack[tos], aStack[tos].n, &res);
+ rx = sqliteBtreeMoveto(pC->pCursor, zStack[tos], aStack[tos].n, &res);
}
alreadyExists = rx==SQLITE_OK && res==0;
}
@@ -2531,7 +2560,8 @@ case OP_Found: {
case OP_NewRecno: {
int i = pOp->p1;
int v = 0;
- if( VERIFY( i<0 || i>=p->nCursor || ) p->aCsr[i].pCursor==0 ){
+ Cursor *pC;
+ if( VERIFY( i<0 || i>=p->nCursor || ) (pC = &p->aCsr[i])->pCursor==0 ){
v = 0;
}else{
/* A probablistic algorithm is used to locate an unused rowid.
@@ -2561,7 +2591,7 @@ case OP_NewRecno: {
}
if( v==0 ) continue;
x = bigEndian(v);
- rx = sqliteBtreeMoveto(p->aCsr[i].pCursor, &x, sizeof(int), &res);
+ rx = sqliteBtreeMoveto(pC->pCursor, &x, sizeof(int), &res);
cnt++;
}while( cnt<1000 && rx==SQLITE_OK && res==0 );
db->nextRowid = v;
@@ -2656,67 +2686,61 @@ case OP_KeyAsData: {
** data.
*/
case OP_Column: {
- int amt, offset, nCol, payloadSize;
- u16 aHdr[10];
- static const int mxHdr = sizeof(aHdr)/sizeof(aHdr[0]);
+ int amt, offset, end, nCol, payloadSize;
int i = pOp->p1;
int p2 = pOp->p2;
int tos = p->tos+1;
+ Cursor *pC;
BtCursor *pCrsr;
- char *z;
+ int idxWidth;
+ unsigned char aHdr[10];
+ int (*xRead)(BtCursor*, int, int, char*);
VERIFY( if( NeedStack(p, tos+1) ) goto no_mem; )
- if( VERIFY( i>=0 && inCursor && ) (pCrsr = p->aCsr[i].pCursor)!=0 ){
- int (*xSize)(BtCursor*, int*);
- int (*xRead)(BtCursor*, int, int, char*);
+ if( VERIFY( i>=0 && inCursor && ) (pC = &p->aCsr[i])->pCursor!=0 ){
/* Use different access functions depending on whether the information
** is coming from the key or the data of the record.
*/
- if( p->aCsr[i].keyAsData ){
- xSize = sqliteBtreeKeySize;
+ pCrsr = pC->pCursor;
+ if( pC->keyAsData ){
+ sqliteBtreeKeySize(pCrsr, &payloadSize);
xRead = sqliteBtreeKey;
}else{
- xSize = sqliteBtreeDataSize;
+ sqliteBtreeDataSize(pCrsr, &payloadSize);
xRead = sqliteBtreeData;
}
- /*
- ** The code is complicated by efforts to minimize the number
- ** of invocations of xRead() since that call can be expensive.
- ** For the common case where P2 is small, xRead() is invoked
- ** twice. For larger values of P2, it has to be called
- ** three times.
+ /* Figure out how many bytes in the column data and where the column
+ ** data begins.
*/
- (*xSize)(pCrsr, &payloadSize);
- if( payloadSize < sizeof(aHdr[0])*(p2+1) ){
+ if( payloadSize<256 ){
+ idxWidth = 1;
+ }else if( payloadSize<65536 ){
+ idxWidth = 2;
+ }else{
+ idxWidth = 3;
+ }
+
+ /* Figure out where the requested column is stored and how big it is.
+ */
+ if( payloadSize < idxWidth*(p2+1) ){
rc = SQLITE_CORRUPT;
goto abort_due_to_error;
}
- if( p2+11 ){
+ offset |= aHdr[1]<<8;
+ end |= aHdr[idxWidth+1]<<8;
+ if( idxWidth>2 ){
+ offset |= aHdr[2]<<16;
+ end |= aHdr[idxWidth+2]<<16;
}
}
- if( payloadSize < nCol || amt<0 || offset<0 ){
+ amt = end - offset;
+ if( amt<0 || offset<0 || end>payloadSize ){
rc = SQLITE_CORRUPT;
goto abort_due_to_error;
}
@@ -2727,7 +2751,7 @@ case OP_Column: {
if( amt==0 ){
aStack[tos].flags = STK_Null;
}else{
- z = sqliteMalloc( amt );
+ char *z = sqliteMalloc( amt );
if( z==0 ) goto no_mem;
(*xRead)(pCrsr, offset, amt, z);
aStack[tos].flags = STK_Str | STK_Dyn;
diff --git a/src/vdbe.h b/src/vdbe.h
index 81e2356d2a..30d955b784 100644
--- a/src/vdbe.h
+++ b/src/vdbe.h
@@ -15,7 +15,7 @@
** or VDBE. The VDBE implements an abstract machine that runs a
** simple program to access and modify the underlying database.
**
-** $Id: vdbe.h,v 1.31 2001/11/01 14:41:34 drh Exp $
+** $Id: vdbe.h,v 1.32 2001/11/04 18:32:48 drh Exp $
*/
#ifndef _SQLITE_VDBE_H_
#define _SQLITE_VDBE_H_
@@ -207,7 +207,7 @@ void sqliteVdbeCreateCallback(Vdbe*, int*);
int sqliteVdbeAddOp(Vdbe*,int,int,int);
int sqliteVdbeAddOpList(Vdbe*, int nOp, VdbeOp const *aOp);
void sqliteVdbeChangeP1(Vdbe*, int addr, int P1);
-void sqliteVdbeChangeP3(Vdbe*, int addr, char *zP1, int N);
+void sqliteVdbeChangeP3(Vdbe*, int addr, const char *zP1, int N);
void sqliteVdbeDequoteP3(Vdbe*, int addr);
int sqliteVdbeMakeLabel(Vdbe*);
void sqliteVdbeDelete(Vdbe*);
diff --git a/src/where.c b/src/where.c
index b63bc059a7..a18db244d3 100644
--- a/src/where.c
+++ b/src/where.c
@@ -13,7 +13,7 @@
** the WHERE clause of SQL statements. Also found here are subroutines
** to generate VDBE code to evaluate expressions.
**
-** $Id: where.c,v 1.23 2001/10/13 01:06:49 drh Exp $
+** $Id: where.c,v 1.24 2001/11/04 18:32:48 drh Exp $
*/
#include "sqliteInt.h"
@@ -101,7 +101,7 @@ static int exprTableUsage(int base, Expr *p){
** structure.
**
** "base" is the cursor number (the value of the iTable field) that
-** corresponds to the first entyr in the table list. This is the
+** corresponds to the first entry in the table list. This is the
** same as pParse->nTab.
*/
static void exprAnalyze(int base, ExprInfo *pInfo){
diff --git a/test/bigrow.test b/test/bigrow.test
index b4c61f8ae3..8851c2d0bb 100644
--- a/test/bigrow.test
+++ b/test/bigrow.test
@@ -12,7 +12,7 @@
# focus of this file is stressing the library by putting large amounts
# of data in a single row of a table.
#
-# $Id: bigrow.test,v 1.1 2001/09/24 03:12:40 drh Exp $
+# $Id: bigrow.test,v 1.2 2001/11/04 18:32:48 drh Exp $
set testdir [file dirname $argv0]
source $testdir/tester.tcl
@@ -50,11 +50,22 @@ do_test bigrow-1.3 {
execsql {SELECT b FROM t1}
} [list $::big1]
do_test bigrow-1.4 {
- set sql "INSERT INTO t1 VALUES('abc',"
- append sql "'[string range $::bigstr 0 65520]', 'xyz');"
+ set ::big2 [string range $::bigstr 0 65520]
+ set sql "INSERT INTO t1 VALUES('abc2',"
+ append sql "'$::big2', 'xyz2');"
set r [catch {execsql $sql} msg]
lappend r $msg
-} {1 {too much data for one table row}}
+} {0 {}}
+do_test bigrow-1.4.1 {
+ execsql {SELECT b FROM t1 ORDER BY c}
+} [list $::big1 $::big2]
+do_test bigrow-1.4.2 {
+ execsql {SELECT c FROM t1 ORDER BY c}
+} {xyz xyz2}
+do_test bigrow-1.4.3 {
+ execsql {DELETE FROM t1 WHERE a='abc2'}
+ execsql {SELECT c FROM t1}
+} {xyz}
do_test bigrow-1.5 {
execsql {
@@ -101,5 +112,104 @@ do_test bigrow-2.3 {
}
execsql "SELECT b FROM t1 WHERE a=='$::big1'"
} {abc}
+catch {unset ::bigstr}
+catch {unset ::big1}
+catch {unset ::big2}
+
+# Mosts of the tests above were created back when rows were limited in
+# size to 64K. Now rows can be much bigger. Test that logic. Also
+# make sure things work correctly at the transition boundries between
+# row sizes of 256 to 257 bytes and from 65536 to 65537 bytes.
+#
+# We begin by testing the 256..257 transition.
+#
+do_test bigrow-3.1 {
+ execsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 30 hi}
+do_test bigrow-3.2 {
+ execsql {
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 240 hi}
+for {set i 1} {$i<10} {incr i} {
+ do_test bigrow-3.3.$i {
+ execsql "UPDATE t1 SET b=b||'$i'"
+ execsql {SELECT a,length(b),c FROM t1}
+ } "one [expr {240+$i}] hi"
+}
+
+# Now test the 65536..65537 row-size transition.
+#
+do_test bigrow-4.1 {
+ execsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 30 hi}
+do_test bigrow-4.2 {
+ execsql {
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ UPDATE t1 SET b=b||b;
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 122880 hi}
+do_test bigrow-4.3 {
+ execsql {
+ UPDATE t1 SET b=substr(b,0,65515)
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 65515 hi}
+for {set i 1} {$i<10} {incr i} {
+ do_test bigrow-4.4.$i {
+ execsql "UPDATE t1 SET b=b||'$i'"
+ execsql {SELECT a,length(b),c FROM t1}
+ } "one [expr {65515+$i}] hi"
+}
+
+# Check to make sure the library recovers safely if a row contains
+# too much data.
+#
+do_test bigrow-5.1 {
+ execsql {
+ DELETE FROM t1;
+ INSERT INTO t1(a,b,c) VALUES('one','abcdefghijklmnopqrstuvwxyz0123','hi');
+ }
+ execsql {SELECT a,length(b),c FROM t1}
+} {one 30 hi}
+set i 1
+for {set sz 60} {$sz<1048560} {incr sz $sz} {
+ do_test bigrow-5.2.$i {
+ execsql {
+ UPDATE t1 SET b=b||b;
+ SELECT a,length(b),c FROM t1;
+ }
+ } "one $sz hi"
+ incr i
+}
+do_test bigrow-5.3 {
+ set r [catch {execsql {UPDATE t1 SET b=b||b}} msg]
+ lappend r $msg
+} {1 {too much data for one table row}}
+do_test bigrow-5.4 {
+ execsql {DROP TABLE t1}
+} {}
finish_test
diff --git a/www/changes.tcl b/www/changes.tcl
index e9fb9bcda6..a437937515 100644
--- a/www/changes.tcl
+++ b/www/changes.tcl
@@ -17,6 +17,11 @@ proc chng {date desc} {
puts "
"
}
+chng {2001 ??? ?? (2.1.0)} {
+Change the format of data records so that records up to 16MB in size
+ can be stored.
+}
+
chng {2001 Nov 3 (2.0.8)} {
Made selected parameters in API functions const. This should
be fully backwards compatible.