| | @@ -325,11 +325,11 @@ |
| 325 | 325 | ** [sqlite3_libversion_number()], [sqlite3_sourceid()], |
| 326 | 326 | ** [sqlite_version()] and [sqlite_source_id()]. |
| 327 | 327 | */ |
| 328 | 328 | #define SQLITE_VERSION "3.8.11" |
| 329 | 329 | #define SQLITE_VERSION_NUMBER 3008011 |
| 330 | | -#define SQLITE_SOURCE_ID "2015-07-03 17:54:49 030f60a7ba171650ce8c0ac32dc166eab80aca32" |
| 330 | +#define SQLITE_SOURCE_ID "2015-07-08 16:22:42 5348ffc3fda5168c1e9e14aa88b0c6aedbda7c94" |
| 331 | 331 | |
| 332 | 332 | /* |
| 333 | 333 | ** CAPI3REF: Run-Time Library Version Numbers |
| 334 | 334 | ** KEYWORDS: sqlite3_version, sqlite3_sourceid |
| 335 | 335 | ** |
| | @@ -6503,10 +6503,13 @@ |
| 6503 | 6503 | #define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ |
| 6504 | 6504 | #define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ |
| 6505 | 6505 | #define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */ |
| 6506 | 6506 | #define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */ |
| 6507 | 6507 | #define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */ |
| 6508 | +#define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */ |
| 6509 | +#define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */ |
| 6510 | +#define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */ |
| 6508 | 6511 | |
| 6509 | 6512 | /* |
| 6510 | 6513 | ** CAPI3REF: Retrieve the mutex for a database connection |
| 6511 | 6514 | ** METHOD: sqlite3 |
| 6512 | 6515 | ** |
| | @@ -8941,10 +8944,20 @@ |
| 8941 | 8944 | #if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS |
| 8942 | 8945 | # undef SQLITE_MAX_WORKER_THREADS |
| 8943 | 8946 | # define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS |
| 8944 | 8947 | #endif |
| 8945 | 8948 | |
| 8949 | +/* |
| 8950 | +** The default initial allocation for the pagecache when using separate |
| 8951 | +** pagecaches for each database connection. A positive number is the |
| 8952 | +** number of pages. A negative number N translations means that a buffer |
| 8953 | +** of -1024*N bytes is allocated and used for as many pages as it will hold. |
| 8954 | +*/ |
| 8955 | +#ifndef SQLITE_DEFAULT_PCACHE_INITSZ |
| 8956 | +# define SQLITE_DEFAULT_PCACHE_INITSZ 100 |
| 8957 | +#endif |
| 8958 | + |
| 8946 | 8959 | |
| 8947 | 8960 | /* |
| 8948 | 8961 | ** GCC does not define the offsetof() macro so we'll have to do it |
| 8949 | 8962 | ** ourselves. |
| 8950 | 8963 | */ |
| | @@ -14043,11 +14056,11 @@ |
| 14043 | 14056 | (void*)0, /* pScratch */ |
| 14044 | 14057 | 0, /* szScratch */ |
| 14045 | 14058 | 0, /* nScratch */ |
| 14046 | 14059 | (void*)0, /* pPage */ |
| 14047 | 14060 | 0, /* szPage */ |
| 14048 | | - 0, /* nPage */ |
| 14061 | + SQLITE_DEFAULT_PCACHE_INITSZ, /* nPage */ |
| 14049 | 14062 | 0, /* mxParserStack */ |
| 14050 | 14063 | 0, /* sharedCacheEnabled */ |
| 14051 | 14064 | SQLITE_SORTER_PMASZ, /* szPma */ |
| 14052 | 14065 | /* All the rest should always be initialized to zero */ |
| 14053 | 14066 | 0, /* isInit */ |
| | @@ -19454,11 +19467,11 @@ |
| 19454 | 19467 | ** The sqlite3_mutex_alloc() routine allocates a new |
| 19455 | 19468 | ** mutex and returns a pointer to it. If it returns NULL |
| 19456 | 19469 | ** that means that a mutex could not be allocated. |
| 19457 | 19470 | */ |
| 19458 | 19471 | static sqlite3_mutex *debugMutexAlloc(int id){ |
| 19459 | | - static sqlite3_debug_mutex aStatic[SQLITE_MUTEX_STATIC_APP3 - 1]; |
| 19472 | + static sqlite3_debug_mutex aStatic[SQLITE_MUTEX_STATIC_VFS3 - 1]; |
| 19460 | 19473 | sqlite3_debug_mutex *pNew = 0; |
| 19461 | 19474 | switch( id ){ |
| 19462 | 19475 | case SQLITE_MUTEX_FAST: |
| 19463 | 19476 | case SQLITE_MUTEX_RECURSIVE: { |
| 19464 | 19477 | pNew = sqlite3Malloc(sizeof(*pNew)); |
| | @@ -19669,10 +19682,13 @@ |
| 19669 | 19682 | ** <li> SQLITE_MUTEX_STATIC_LRU |
| 19670 | 19683 | ** <li> SQLITE_MUTEX_STATIC_PMEM |
| 19671 | 19684 | ** <li> SQLITE_MUTEX_STATIC_APP1 |
| 19672 | 19685 | ** <li> SQLITE_MUTEX_STATIC_APP2 |
| 19673 | 19686 | ** <li> SQLITE_MUTEX_STATIC_APP3 |
| 19687 | +** <li> SQLITE_MUTEX_STATIC_VFS1 |
| 19688 | +** <li> SQLITE_MUTEX_STATIC_VFS2 |
| 19689 | +** <li> SQLITE_MUTEX_STATIC_VFS3 |
| 19674 | 19690 | ** </ul> |
| 19675 | 19691 | ** |
| 19676 | 19692 | ** The first two constants cause sqlite3_mutex_alloc() to create |
| 19677 | 19693 | ** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE |
| 19678 | 19694 | ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. |
| | @@ -19697,10 +19713,13 @@ |
| 19697 | 19713 | ** mutex types, the same mutex is returned on every call that has |
| 19698 | 19714 | ** the same type number. |
| 19699 | 19715 | */ |
| 19700 | 19716 | static sqlite3_mutex *pthreadMutexAlloc(int iType){ |
| 19701 | 19717 | static sqlite3_mutex staticMutexes[] = { |
| 19718 | + SQLITE3_MUTEX_INITIALIZER, |
| 19719 | + SQLITE3_MUTEX_INITIALIZER, |
| 19720 | + SQLITE3_MUTEX_INITIALIZER, |
| 19702 | 19721 | SQLITE3_MUTEX_INITIALIZER, |
| 19703 | 19722 | SQLITE3_MUTEX_INITIALIZER, |
| 19704 | 19723 | SQLITE3_MUTEX_INITIALIZER, |
| 19705 | 19724 | SQLITE3_MUTEX_INITIALIZER, |
| 19706 | 19725 | SQLITE3_MUTEX_INITIALIZER, |
| | @@ -20311,10 +20330,13 @@ |
| 20311 | 20330 | SQLITE3_MUTEX_INITIALIZER, |
| 20312 | 20331 | SQLITE3_MUTEX_INITIALIZER, |
| 20313 | 20332 | SQLITE3_MUTEX_INITIALIZER, |
| 20314 | 20333 | SQLITE3_MUTEX_INITIALIZER, |
| 20315 | 20334 | SQLITE3_MUTEX_INITIALIZER, |
| 20335 | + SQLITE3_MUTEX_INITIALIZER, |
| 20336 | + SQLITE3_MUTEX_INITIALIZER, |
| 20337 | + SQLITE3_MUTEX_INITIALIZER, |
| 20316 | 20338 | SQLITE3_MUTEX_INITIALIZER |
| 20317 | 20339 | }; |
| 20318 | 20340 | |
| 20319 | 20341 | static int winMutex_isInit = 0; |
| 20320 | 20342 | static int winMutex_isNt = -1; /* <0 means "need to query" */ |
| | @@ -20382,10 +20404,13 @@ |
| 20382 | 20404 | ** <li> SQLITE_MUTEX_STATIC_LRU |
| 20383 | 20405 | ** <li> SQLITE_MUTEX_STATIC_PMEM |
| 20384 | 20406 | ** <li> SQLITE_MUTEX_STATIC_APP1 |
| 20385 | 20407 | ** <li> SQLITE_MUTEX_STATIC_APP2 |
| 20386 | 20408 | ** <li> SQLITE_MUTEX_STATIC_APP3 |
| 20409 | +** <li> SQLITE_MUTEX_STATIC_VFS1 |
| 20410 | +** <li> SQLITE_MUTEX_STATIC_VFS2 |
| 20411 | +** <li> SQLITE_MUTEX_STATIC_VFS3 |
| 20387 | 20412 | ** </ul> |
| 20388 | 20413 | ** |
| 20389 | 20414 | ** The first two constants cause sqlite3_mutex_alloc() to create |
| 20390 | 20415 | ** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE |
| 20391 | 20416 | ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. |
| | @@ -20793,14 +20818,13 @@ |
| 20793 | 20818 | sqlite3GlobalConfig.pScratch = 0; |
| 20794 | 20819 | sqlite3GlobalConfig.szScratch = 0; |
| 20795 | 20820 | sqlite3GlobalConfig.nScratch = 0; |
| 20796 | 20821 | } |
| 20797 | 20822 | if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512 |
| 20798 | | - || sqlite3GlobalConfig.nPage<1 ){ |
| 20823 | + || sqlite3GlobalConfig.nPage<=0 ){ |
| 20799 | 20824 | sqlite3GlobalConfig.pPage = 0; |
| 20800 | 20825 | sqlite3GlobalConfig.szPage = 0; |
| 20801 | | - sqlite3GlobalConfig.nPage = 0; |
| 20802 | 20826 | } |
| 20803 | 20827 | rc = sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData); |
| 20804 | 20828 | if( rc!=SQLITE_OK ) memset(&mem0, 0, sizeof(mem0)); |
| 20805 | 20829 | return rc; |
| 20806 | 20830 | } |
| | @@ -26522,18 +26546,18 @@ |
| 26522 | 26546 | ** unixEnterMutex() |
| 26523 | 26547 | ** assert( unixMutexHeld() ); |
| 26524 | 26548 | ** unixEnterLeave() |
| 26525 | 26549 | */ |
| 26526 | 26550 | static void unixEnterMutex(void){ |
| 26527 | | - sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); |
| 26551 | + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); |
| 26528 | 26552 | } |
| 26529 | 26553 | static void unixLeaveMutex(void){ |
| 26530 | | - sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); |
| 26554 | + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); |
| 26531 | 26555 | } |
| 26532 | 26556 | #ifdef SQLITE_DEBUG |
| 26533 | 26557 | static int unixMutexHeld(void) { |
| 26534 | | - return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); |
| 26558 | + return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); |
| 26535 | 26559 | } |
| 26536 | 26560 | #endif |
| 26537 | 26561 | |
| 26538 | 26562 | |
| 26539 | 26563 | #ifdef SQLITE_HAVE_OS_TRACE |
| | @@ -37047,18 +37071,18 @@ |
| 37047 | 37071 | ** winShmEnterMutex() |
| 37048 | 37072 | ** assert( winShmMutexHeld() ); |
| 37049 | 37073 | ** winShmLeaveMutex() |
| 37050 | 37074 | */ |
| 37051 | 37075 | static void winShmEnterMutex(void){ |
| 37052 | | - sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); |
| 37076 | + sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); |
| 37053 | 37077 | } |
| 37054 | 37078 | static void winShmLeaveMutex(void){ |
| 37055 | | - sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); |
| 37079 | + sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); |
| 37056 | 37080 | } |
| 37057 | 37081 | #ifndef NDEBUG |
| 37058 | 37082 | static int winShmMutexHeld(void) { |
| 37059 | | - return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); |
| 37083 | + return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); |
| 37060 | 37084 | } |
| 37061 | 37085 | #endif |
| 37062 | 37086 | |
| 37063 | 37087 | /* |
| 37064 | 37088 | ** Object used to represent a single file opened and mmapped to provide |
| | @@ -40399,12 +40423,75 @@ |
| 40399 | 40423 | ** This file implements the default page cache implementation (the |
| 40400 | 40424 | ** sqlite3_pcache interface). It also contains part of the implementation |
| 40401 | 40425 | ** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features. |
| 40402 | 40426 | ** If the default page cache implementation is overridden, then neither of |
| 40403 | 40427 | ** these two features are available. |
| 40428 | +** |
| 40429 | +** A Page cache line looks like this: |
| 40430 | +** |
| 40431 | +** ------------------------------------------------------------- |
| 40432 | +** | database page content | PgHdr1 | MemPage | PgHdr | |
| 40433 | +** ------------------------------------------------------------- |
| 40434 | +** |
| 40435 | +** The database page content is up front (so that buffer overreads tend to |
| 40436 | +** flow harmlessly into the PgHdr1, MemPage, and PgHdr extensions). MemPage |
| 40437 | +** is the extension added by the btree.c module containing information such |
| 40438 | +** as the database page number and how that database page is used. PgHdr |
| 40439 | +** is added by the pcache.c layer and contains information used to keep track |
| 40440 | +** of which pages are "dirty". PgHdr1 is an extension added by this |
| 40441 | +** module (pcache1.c). The PgHdr1 header is a subclass of sqlite3_pcache_page. |
| 40442 | +** PgHdr1 contains information needed to look up a page by its page number. |
| 40443 | +** The superclass sqlite3_pcache_page.pBuf points to the start of the |
| 40444 | +** database page content and sqlite3_pcache_page.pExtra points to PgHdr. |
| 40445 | +** |
| 40446 | +** The size of the extension (MemPage+PgHdr+PgHdr1) can be determined at |
| 40447 | +** runtime using sqlite3_config(SQLITE_CONFIG_PCACHE_HDRSZ, &size). The |
| 40448 | +** sizes of the extensions sum to 272 bytes on x64 for 3.8.10, but this |
| 40449 | +** size can vary according to architecture, compile-time options, and |
| 40450 | +** SQLite library version number. |
| 40451 | +** |
| 40452 | +** If SQLITE_PCACHE_SEPARATE_HEADER is defined, then the extension is obtained |
| 40453 | +** using a separate memory allocation from the database page content. This |
| 40454 | +** seeks to overcome the "clownshoe" problem (also called "internal |
| 40455 | +** fragmentation" in academic literature) of allocating a few bytes more |
| 40456 | +** than a power of two with the memory allocator rounding up to the next |
| 40457 | +** power of two, and leaving the rounded-up space unused. |
| 40458 | +** |
| 40459 | +** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates |
| 40460 | +** with this module. Information is passed back and forth as PgHdr1 pointers. |
| 40461 | +** |
| 40462 | +** The pcache.c and pager.c modules deal pointers to PgHdr objects. |
| 40463 | +** The btree.c module deals with pointers to MemPage objects. |
| 40464 | +** |
| 40465 | +** SOURCE OF PAGE CACHE MEMORY: |
| 40466 | +** |
| 40467 | +** Memory for a page might come from any of three sources: |
| 40468 | +** |
| 40469 | +** (1) The general-purpose memory allocator - sqlite3Malloc() |
| 40470 | +** (2) Global page-cache memory provided using sqlite3_config() with |
| 40471 | +** SQLITE_CONFIG_PAGECACHE. |
| 40472 | +** (3) PCache-local bulk allocation. |
| 40473 | +** |
| 40474 | +** The third case is a chunk of heap memory (defaulting to 100 pages worth) |
| 40475 | +** that is allocated when the page cache is created. The size of the local |
| 40476 | +** bulk allocation can be adjusted using |
| 40477 | +** |
| 40478 | +** sqlite3_config(SQLITE_CONFIG_PCACHE, 0, 0, N). |
| 40479 | +** |
| 40480 | +** If N is positive, then N pages worth of memory are allocated using a single |
| 40481 | +** sqlite3Malloc() call and that memory is used for the first N pages allocated. |
| 40482 | +** Or if N is negative, then -1024*N bytes of memory are allocated and used |
| 40483 | +** for as many pages as can be accomodated. |
| 40484 | +** |
| 40485 | +** Only one of (2) or (3) can be used. Once the memory available to (2) or |
| 40486 | +** (3) is exhausted, subsequent allocations fail over to the general-purpose |
| 40487 | +** memory allocator (1). |
| 40488 | +** |
| 40489 | +** Earlier versions of SQLite used only methods (1) and (2). But experiments |
| 40490 | +** show that method (3) with N==100 provides about a 5% performance boost for |
| 40491 | +** common workloads. |
| 40404 | 40492 | */ |
| 40405 | | - |
| 40406 | 40493 | |
| 40407 | 40494 | typedef struct PCache1 PCache1; |
| 40408 | 40495 | typedef struct PgHdr1 PgHdr1; |
| 40409 | 40496 | typedef struct PgFreeslot PgFreeslot; |
| 40410 | 40497 | typedef struct PGroup PGroup; |
| | @@ -40453,12 +40540,13 @@ |
| 40453 | 40540 | ** flag (bPurgeable) are set when the cache is created. nMax may be |
| 40454 | 40541 | ** modified at any time by a call to the pcache1Cachesize() method. |
| 40455 | 40542 | ** The PGroup mutex must be held when accessing nMax. |
| 40456 | 40543 | */ |
| 40457 | 40544 | PGroup *pGroup; /* PGroup this cache belongs to */ |
| 40458 | | - int szPage; /* Size of allocated pages in bytes */ |
| 40459 | | - int szExtra; /* Size of extra space in bytes */ |
| 40545 | + int szPage; /* Size of database content section */ |
| 40546 | + int szExtra; /* sizeof(MemPage)+sizeof(PgHdr) */ |
| 40547 | + int szAlloc; /* Total size of one pcache line */ |
| 40460 | 40548 | int bPurgeable; /* True if cache is purgeable */ |
| 40461 | 40549 | unsigned int nMin; /* Minimum number of pages reserved */ |
| 40462 | 40550 | unsigned int nMax; /* Configured "cache_size" value */ |
| 40463 | 40551 | unsigned int n90pct; /* nMax*9/10 */ |
| 40464 | 40552 | unsigned int iMaxKey; /* Largest key seen since xTruncate() */ |
| | @@ -40468,10 +40556,12 @@ |
| 40468 | 40556 | */ |
| 40469 | 40557 | unsigned int nRecyclable; /* Number of pages in the LRU list */ |
| 40470 | 40558 | unsigned int nPage; /* Total number of pages in apHash */ |
| 40471 | 40559 | unsigned int nHash; /* Number of slots in apHash[] */ |
| 40472 | 40560 | PgHdr1 **apHash; /* Hash table for fast lookup by key */ |
| 40561 | + PgHdr1 *pFree; /* List of unused pcache-local pages */ |
| 40562 | + void *pBulk; /* Bulk memory used by pcache-local */ |
| 40473 | 40563 | }; |
| 40474 | 40564 | |
| 40475 | 40565 | /* |
| 40476 | 40566 | ** Each cache entry is represented by an instance of the following |
| 40477 | 40567 | ** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of |
| | @@ -40480,19 +40570,20 @@ |
| 40480 | 40570 | */ |
| 40481 | 40571 | struct PgHdr1 { |
| 40482 | 40572 | sqlite3_pcache_page page; |
| 40483 | 40573 | unsigned int iKey; /* Key value (page number) */ |
| 40484 | 40574 | u8 isPinned; /* Page in use, not on the LRU list */ |
| 40575 | + u8 isBulkLocal; /* This page from bulk local storage */ |
| 40485 | 40576 | PgHdr1 *pNext; /* Next in hash table chain */ |
| 40486 | 40577 | PCache1 *pCache; /* Cache that currently owns this page */ |
| 40487 | 40578 | PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */ |
| 40488 | 40579 | PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */ |
| 40489 | 40580 | }; |
| 40490 | 40581 | |
| 40491 | 40582 | /* |
| 40492 | | -** Free slots in the allocator used to divide up the buffer provided using |
| 40493 | | -** the SQLITE_CONFIG_PAGECACHE mechanism. |
| 40583 | +** Free slots in the allocator used to divide up the global page cache |
| 40584 | +** buffer provided using the SQLITE_CONFIG_PAGECACHE mechanism. |
| 40494 | 40585 | */ |
| 40495 | 40586 | struct PgFreeslot { |
| 40496 | 40587 | PgFreeslot *pNext; /* Next free slot */ |
| 40497 | 40588 | }; |
| 40498 | 40589 | |
| | @@ -40506,14 +40597,15 @@ |
| 40506 | 40597 | ** szSlot, nSlot, pStart, pEnd, nReserve, and isInit values are all |
| 40507 | 40598 | ** fixed at sqlite3_initialize() time and do not require mutex protection. |
| 40508 | 40599 | ** The nFreeSlot and pFree values do require mutex protection. |
| 40509 | 40600 | */ |
| 40510 | 40601 | int isInit; /* True if initialized */ |
| 40602 | + int separateCache; /* Use a new PGroup for each PCache */ |
| 40511 | 40603 | int szSlot; /* Size of each free slot */ |
| 40512 | 40604 | int nSlot; /* The number of pcache slots */ |
| 40513 | 40605 | int nReserve; /* Try to keep nFreeSlot above this */ |
| 40514 | | - void *pStart, *pEnd; /* Bounds of pagecache malloc range */ |
| 40606 | + void *pStart, *pEnd; /* Bounds of global page cache memory */ |
| 40515 | 40607 | /* Above requires no mutex. Use mutex below for variable that follow. */ |
| 40516 | 40608 | sqlite3_mutex *mutex; /* Mutex for accessing the following: */ |
| 40517 | 40609 | PgFreeslot *pFree; /* Free page blocks */ |
| 40518 | 40610 | int nFreeSlot; /* Number of unused pcache slots */ |
| 40519 | 40611 | /* The following value requires a mutex to change. We skip the mutex on |
| | @@ -40556,10 +40648,11 @@ |
| 40556 | 40648 | ** to be serialized already. There is no need for further mutexing. |
| 40557 | 40649 | */ |
| 40558 | 40650 | SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ |
| 40559 | 40651 | if( pcache1.isInit ){ |
| 40560 | 40652 | PgFreeslot *p; |
| 40653 | + if( pBuf==0 ) sz = n = 0; |
| 40561 | 40654 | sz = ROUNDDOWN8(sz); |
| 40562 | 40655 | pcache1.szSlot = sz; |
| 40563 | 40656 | pcache1.nSlot = pcache1.nFreeSlot = n; |
| 40564 | 40657 | pcache1.nReserve = n>90 ? 10 : (n/10 + 1); |
| 40565 | 40658 | pcache1.pStart = pBuf; |
| | @@ -40620,13 +40713,13 @@ |
| 40620 | 40713 | } |
| 40621 | 40714 | |
| 40622 | 40715 | /* |
| 40623 | 40716 | ** Free an allocated buffer obtained from pcache1Alloc(). |
| 40624 | 40717 | */ |
| 40625 | | -static int pcache1Free(void *p){ |
| 40718 | +static void pcache1Free(void *p){ |
| 40626 | 40719 | int nFreed = 0; |
| 40627 | | - if( p==0 ) return 0; |
| 40720 | + if( p==0 ) return; |
| 40628 | 40721 | if( p>=pcache1.pStart && p<pcache1.pEnd ){ |
| 40629 | 40722 | PgFreeslot *pSlot; |
| 40630 | 40723 | sqlite3_mutex_enter(pcache1.mutex); |
| 40631 | 40724 | sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_USED, 1); |
| 40632 | 40725 | pSlot = (PgFreeslot*)p; |
| | @@ -40637,19 +40730,18 @@ |
| 40637 | 40730 | assert( pcache1.nFreeSlot<=pcache1.nSlot ); |
| 40638 | 40731 | sqlite3_mutex_leave(pcache1.mutex); |
| 40639 | 40732 | }else{ |
| 40640 | 40733 | assert( sqlite3MemdebugHasType(p, MEMTYPE_PCACHE) ); |
| 40641 | 40734 | sqlite3MemdebugSetType(p, MEMTYPE_HEAP); |
| 40642 | | - nFreed = sqlite3MallocSize(p); |
| 40643 | 40735 | #ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS |
| 40736 | + nFreed = sqlite3MallocSize(p); |
| 40644 | 40737 | sqlite3_mutex_enter(pcache1.mutex); |
| 40645 | 40738 | sqlite3StatusDown(SQLITE_STATUS_PAGECACHE_OVERFLOW, nFreed); |
| 40646 | 40739 | sqlite3_mutex_leave(pcache1.mutex); |
| 40647 | 40740 | #endif |
| 40648 | 40741 | sqlite3_free(p); |
| 40649 | 40742 | } |
| 40650 | | - return nFreed; |
| 40651 | 40743 | } |
| 40652 | 40744 | |
| 40653 | 40745 | #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT |
| 40654 | 40746 | /* |
| 40655 | 40747 | ** Return the size of a pcache allocation |
| | @@ -40673,58 +40765,69 @@ |
| 40673 | 40765 | */ |
| 40674 | 40766 | static PgHdr1 *pcache1AllocPage(PCache1 *pCache){ |
| 40675 | 40767 | PgHdr1 *p = 0; |
| 40676 | 40768 | void *pPg; |
| 40677 | 40769 | |
| 40678 | | - /* The group mutex must be released before pcache1Alloc() is called. This |
| 40679 | | - ** is because it may call sqlite3_release_memory(), which assumes that |
| 40680 | | - ** this mutex is not held. */ |
| 40681 | 40770 | assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); |
| 40682 | | - pcache1LeaveMutex(pCache->pGroup); |
| 40771 | + if( pCache->pFree ){ |
| 40772 | + p = pCache->pFree; |
| 40773 | + pCache->pFree = p->pNext; |
| 40774 | + p->pNext = 0; |
| 40775 | + }else{ |
| 40776 | +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT |
| 40777 | + /* The group mutex must be released before pcache1Alloc() is called. This |
| 40778 | + ** is because it might call sqlite3_release_memory(), which assumes that |
| 40779 | + ** this mutex is not held. */ |
| 40780 | + assert( pcache1.separateCache==0 ); |
| 40781 | + assert( pCache->pGroup==&pcache1.grp ); |
| 40782 | + pcache1LeaveMutex(pCache->pGroup); |
| 40783 | +#endif |
| 40683 | 40784 | #ifdef SQLITE_PCACHE_SEPARATE_HEADER |
| 40684 | | - pPg = pcache1Alloc(pCache->szPage); |
| 40685 | | - p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra); |
| 40686 | | - if( !pPg || !p ){ |
| 40687 | | - pcache1Free(pPg); |
| 40688 | | - sqlite3_free(p); |
| 40689 | | - pPg = 0; |
| 40690 | | - } |
| 40785 | + pPg = pcache1Alloc(pCache->szPage); |
| 40786 | + p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra); |
| 40787 | + if( !pPg || !p ){ |
| 40788 | + pcache1Free(pPg); |
| 40789 | + sqlite3_free(p); |
| 40790 | + pPg = 0; |
| 40791 | + } |
| 40691 | 40792 | #else |
| 40692 | | - pPg = pcache1Alloc(ROUND8(sizeof(PgHdr1)) + pCache->szPage + pCache->szExtra); |
| 40693 | | - p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage]; |
| 40793 | + pPg = pcache1Alloc(pCache->szAlloc); |
| 40794 | + p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage]; |
| 40694 | 40795 | #endif |
| 40695 | | - pcache1EnterMutex(pCache->pGroup); |
| 40696 | | - |
| 40697 | | - if( pPg ){ |
| 40796 | +#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT |
| 40797 | + pcache1EnterMutex(pCache->pGroup); |
| 40798 | +#endif |
| 40799 | + if( pPg==0 ) return 0; |
| 40698 | 40800 | p->page.pBuf = pPg; |
| 40699 | 40801 | p->page.pExtra = &p[1]; |
| 40700 | | - if( pCache->bPurgeable ){ |
| 40701 | | - pCache->pGroup->nCurrentPage++; |
| 40702 | | - } |
| 40703 | | - return p; |
| 40802 | + p->isBulkLocal = 0; |
| 40704 | 40803 | } |
| 40705 | | - return 0; |
| 40804 | + if( pCache->bPurgeable ){ |
| 40805 | + pCache->pGroup->nCurrentPage++; |
| 40806 | + } |
| 40807 | + return p; |
| 40706 | 40808 | } |
| 40707 | 40809 | |
| 40708 | 40810 | /* |
| 40709 | 40811 | ** Free a page object allocated by pcache1AllocPage(). |
| 40710 | | -** |
| 40711 | | -** The pointer is allowed to be NULL, which is prudent. But it turns out |
| 40712 | | -** that the current implementation happens to never call this routine |
| 40713 | | -** with a NULL pointer, so we mark the NULL test with ALWAYS(). |
| 40714 | 40812 | */ |
| 40715 | 40813 | static void pcache1FreePage(PgHdr1 *p){ |
| 40716 | | - if( ALWAYS(p) ){ |
| 40717 | | - PCache1 *pCache = p->pCache; |
| 40718 | | - assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) ); |
| 40814 | + PCache1 *pCache; |
| 40815 | + assert( p!=0 ); |
| 40816 | + pCache = p->pCache; |
| 40817 | + assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) ); |
| 40818 | + if( p->isBulkLocal ){ |
| 40819 | + p->pNext = pCache->pFree; |
| 40820 | + pCache->pFree = p; |
| 40821 | + }else{ |
| 40719 | 40822 | pcache1Free(p->page.pBuf); |
| 40720 | 40823 | #ifdef SQLITE_PCACHE_SEPARATE_HEADER |
| 40721 | 40824 | sqlite3_free(p); |
| 40722 | 40825 | #endif |
| 40723 | | - if( pCache->bPurgeable ){ |
| 40724 | | - pCache->pGroup->nCurrentPage--; |
| 40725 | | - } |
| 40826 | + } |
| 40827 | + if( pCache->bPurgeable ){ |
| 40828 | + pCache->pGroup->nCurrentPage--; |
| 40726 | 40829 | } |
| 40727 | 40830 | } |
| 40728 | 40831 | |
| 40729 | 40832 | /* |
| 40730 | 40833 | ** Malloc function used by SQLite to obtain space from the buffer configured |
| | @@ -40920,10 +41023,35 @@ |
| 40920 | 41023 | */ |
| 40921 | 41024 | static int pcache1Init(void *NotUsed){ |
| 40922 | 41025 | UNUSED_PARAMETER(NotUsed); |
| 40923 | 41026 | assert( pcache1.isInit==0 ); |
| 40924 | 41027 | memset(&pcache1, 0, sizeof(pcache1)); |
| 41028 | + |
| 41029 | + |
| 41030 | + /* |
| 41031 | + ** The pcache1.separateCache variable is true if each PCache has its own |
| 41032 | + ** private PGroup (mode-1). pcache1.separateCache is false if the single |
| 41033 | + ** PGroup in pcache1.grp is used for all page caches (mode-2). |
| 41034 | + ** |
| 41035 | + ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT |
| 41036 | + ** |
| 41037 | + ** * Use a unified cache in single-threaded applications that have |
| 41038 | + ** configured a start-time buffer for use as page-cache memory using |
| 41039 | + ** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL |
| 41040 | + ** pBuf argument. |
| 41041 | + ** |
| 41042 | + ** * Otherwise use separate caches (mode-1) |
| 41043 | + */ |
| 41044 | +#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) |
| 41045 | + pcache1.separateCache = 0; |
| 41046 | +#elif SQLITE_THREADSAFE |
| 41047 | + pcache1.separateCache = sqlite3GlobalConfig.pPage==0 |
| 41048 | + || sqlite3GlobalConfig.bCoreMutex>0; |
| 41049 | +#else |
| 41050 | + pcache1.separateCache = sqlite3GlobalConfig.pPage==0; |
| 41051 | +#endif |
| 41052 | + |
| 40925 | 41053 | #if SQLITE_THREADSAFE |
| 40926 | 41054 | if( sqlite3GlobalConfig.bCoreMutex ){ |
| 40927 | 41055 | pcache1.grp.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU); |
| 40928 | 41056 | pcache1.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PMEM); |
| 40929 | 41057 | } |
| | @@ -40955,52 +41083,65 @@ |
| 40955 | 41083 | static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ |
| 40956 | 41084 | PCache1 *pCache; /* The newly created page cache */ |
| 40957 | 41085 | PGroup *pGroup; /* The group the new page cache will belong to */ |
| 40958 | 41086 | int sz; /* Bytes of memory required to allocate the new cache */ |
| 40959 | 41087 | |
| 40960 | | - /* |
| 40961 | | - ** The separateCache variable is true if each PCache has its own private |
| 40962 | | - ** PGroup. In other words, separateCache is true for mode (1) where no |
| 40963 | | - ** mutexing is required. |
| 40964 | | - ** |
| 40965 | | - ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT |
| 40966 | | - ** |
| 40967 | | - ** * Always use a unified cache in single-threaded applications |
| 40968 | | - ** |
| 40969 | | - ** * Otherwise (if multi-threaded and ENABLE_MEMORY_MANAGEMENT is off) |
| 40970 | | - ** use separate caches (mode-1) |
| 40971 | | - */ |
| 40972 | | -#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE==0 |
| 40973 | | - const int separateCache = 0; |
| 40974 | | -#else |
| 40975 | | - int separateCache = sqlite3GlobalConfig.bCoreMutex>0; |
| 40976 | | -#endif |
| 40977 | | - |
| 40978 | 41088 | assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 ); |
| 40979 | 41089 | assert( szExtra < 300 ); |
| 40980 | 41090 | |
| 40981 | | - sz = sizeof(PCache1) + sizeof(PGroup)*separateCache; |
| 41091 | + sz = sizeof(PCache1) + sizeof(PGroup)*pcache1.separateCache; |
| 40982 | 41092 | pCache = (PCache1 *)sqlite3MallocZero(sz); |
| 40983 | 41093 | if( pCache ){ |
| 40984 | | - if( separateCache ){ |
| 41094 | + if( pcache1.separateCache ){ |
| 40985 | 41095 | pGroup = (PGroup*)&pCache[1]; |
| 40986 | 41096 | pGroup->mxPinned = 10; |
| 40987 | 41097 | }else{ |
| 40988 | 41098 | pGroup = &pcache1.grp; |
| 40989 | 41099 | } |
| 40990 | 41100 | pCache->pGroup = pGroup; |
| 40991 | 41101 | pCache->szPage = szPage; |
| 40992 | 41102 | pCache->szExtra = szExtra; |
| 41103 | + pCache->szAlloc = szPage + szExtra + ROUND8(sizeof(PgHdr1)); |
| 40993 | 41104 | pCache->bPurgeable = (bPurgeable ? 1 : 0); |
| 40994 | 41105 | pcache1EnterMutex(pGroup); |
| 40995 | 41106 | pcache1ResizeHash(pCache); |
| 40996 | 41107 | if( bPurgeable ){ |
| 40997 | 41108 | pCache->nMin = 10; |
| 40998 | 41109 | pGroup->nMinPage += pCache->nMin; |
| 40999 | 41110 | pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; |
| 41000 | 41111 | } |
| 41001 | 41112 | pcache1LeaveMutex(pGroup); |
| 41113 | + /* Try to initialize the local bulk pagecache line allocation if using |
| 41114 | + ** separate caches and if nPage!=0 */ |
| 41115 | + if( pcache1.separateCache |
| 41116 | + && sqlite3GlobalConfig.nPage!=0 |
| 41117 | + && sqlite3GlobalConfig.pPage==0 |
| 41118 | + ){ |
| 41119 | + int szBulk; |
| 41120 | + char *zBulk; |
| 41121 | + sqlite3BeginBenignMalloc(); |
| 41122 | + if( sqlite3GlobalConfig.nPage>0 ){ |
| 41123 | + szBulk = pCache->szAlloc * sqlite3GlobalConfig.nPage; |
| 41124 | + }else{ |
| 41125 | + szBulk = -1024*sqlite3GlobalConfig.nPage; |
| 41126 | + } |
| 41127 | + zBulk = pCache->pBulk = sqlite3Malloc( szBulk ); |
| 41128 | + sqlite3EndBenignMalloc(); |
| 41129 | + if( zBulk ){ |
| 41130 | + int nBulk = sqlite3MallocSize(zBulk)/pCache->szAlloc; |
| 41131 | + int i; |
| 41132 | + for(i=0; i<nBulk; i++){ |
| 41133 | + PgHdr1 *pX = (PgHdr1*)&zBulk[szPage]; |
| 41134 | + pX->page.pBuf = zBulk; |
| 41135 | + pX->page.pExtra = &pX[1]; |
| 41136 | + pX->isBulkLocal = 1; |
| 41137 | + pX->pNext = pCache->pFree; |
| 41138 | + pCache->pFree = pX; |
| 41139 | + zBulk += pCache->szAlloc; |
| 41140 | + } |
| 41141 | + } |
| 41142 | + } |
| 41002 | 41143 | if( pCache->nHash==0 ){ |
| 41003 | 41144 | pcache1Destroy((sqlite3_pcache*)pCache); |
| 41004 | 41145 | pCache = 0; |
| 41005 | 41146 | } |
| 41006 | 41147 | } |
| | @@ -41090,30 +41231,21 @@ |
| 41090 | 41231 | |
| 41091 | 41232 | if( pCache->nPage>=pCache->nHash ) pcache1ResizeHash(pCache); |
| 41092 | 41233 | assert( pCache->nHash>0 && pCache->apHash ); |
| 41093 | 41234 | |
| 41094 | 41235 | /* Step 4. Try to recycle a page. */ |
| 41095 | | - if( pCache->bPurgeable && pGroup->pLruTail && ( |
| 41096 | | - (pCache->nPage+1>=pCache->nMax) |
| 41097 | | - || pGroup->nCurrentPage>=pGroup->nMaxPage |
| 41098 | | - || pcache1UnderMemoryPressure(pCache) |
| 41099 | | - )){ |
| 41236 | + if( pCache->bPurgeable |
| 41237 | + && pGroup->pLruTail |
| 41238 | + && ((pCache->nPage+1>=pCache->nMax) || pcache1UnderMemoryPressure(pCache)) |
| 41239 | + ){ |
| 41100 | 41240 | PCache1 *pOther; |
| 41101 | 41241 | pPage = pGroup->pLruTail; |
| 41102 | 41242 | assert( pPage->isPinned==0 ); |
| 41103 | 41243 | pcache1RemoveFromHash(pPage, 0); |
| 41104 | 41244 | pcache1PinPage(pPage); |
| 41105 | 41245 | pOther = pPage->pCache; |
| 41106 | | - |
| 41107 | | - /* We want to verify that szPage and szExtra are the same for pOther |
| 41108 | | - ** and pCache. Assert that we can verify this by comparing sums. */ |
| 41109 | | - assert( (pCache->szPage & (pCache->szPage-1))==0 && pCache->szPage>=512 ); |
| 41110 | | - assert( pCache->szExtra<512 ); |
| 41111 | | - assert( (pOther->szPage & (pOther->szPage-1))==0 && pOther->szPage>=512 ); |
| 41112 | | - assert( pOther->szExtra<512 ); |
| 41113 | | - |
| 41114 | | - if( pOther->szPage+pOther->szExtra != pCache->szPage+pCache->szExtra ){ |
| 41246 | + if( pOther->szAlloc != pCache->szAlloc ){ |
| 41115 | 41247 | pcache1FreePage(pPage); |
| 41116 | 41248 | pPage = 0; |
| 41117 | 41249 | }else{ |
| 41118 | 41250 | pGroup->nCurrentPage -= (pOther->bPurgeable - pCache->bPurgeable); |
| 41119 | 41251 | } |
| | @@ -41385,10 +41517,11 @@ |
| 41385 | 41517 | assert( pGroup->nMinPage >= pCache->nMin ); |
| 41386 | 41518 | pGroup->nMinPage -= pCache->nMin; |
| 41387 | 41519 | pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; |
| 41388 | 41520 | pcache1EnforceMaxPage(pGroup); |
| 41389 | 41521 | pcache1LeaveMutex(pGroup); |
| 41522 | + sqlite3_free(pCache->pBulk); |
| 41390 | 41523 | sqlite3_free(pCache->apHash); |
| 41391 | 41524 | sqlite3_free(pCache); |
| 41392 | 41525 | } |
| 41393 | 41526 | |
| 41394 | 41527 | /* |
| | @@ -41440,11 +41573,11 @@ |
| 41440 | 41573 | */ |
| 41441 | 41574 | SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){ |
| 41442 | 41575 | int nFree = 0; |
| 41443 | 41576 | assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); |
| 41444 | 41577 | assert( sqlite3_mutex_notheld(pcache1.mutex) ); |
| 41445 | | - if( pcache1.pStart==0 ){ |
| 41578 | + if( sqlite3GlobalConfig.nPage==0 ){ |
| 41446 | 41579 | PgHdr1 *p; |
| 41447 | 41580 | pcache1EnterMutex(&pcache1.grp); |
| 41448 | 41581 | while( (nReq<0 || nFree<nReq) && ((p=pcache1.grp.pLruTail)!=0) ){ |
| 41449 | 41582 | nFree += pcache1MemSize(p->page.pBuf); |
| 41450 | 41583 | #ifdef SQLITE_PCACHE_SEPARATE_HEADER |
| | @@ -62542,10 +62675,11 @@ |
| 62542 | 62675 | u32 *heap = 0; /* Min-heap used for checking cell coverage */ |
| 62543 | 62676 | u32 x, prev = 0; /* Next and previous entry on the min-heap */ |
| 62544 | 62677 | const char *saved_zPfx = pCheck->zPfx; |
| 62545 | 62678 | int saved_v1 = pCheck->v1; |
| 62546 | 62679 | int saved_v2 = pCheck->v2; |
| 62680 | + u8 savedIsInit; |
| 62547 | 62681 | |
| 62548 | 62682 | /* Check that the page exists |
| 62549 | 62683 | */ |
| 62550 | 62684 | pBt = pCheck->pBt; |
| 62551 | 62685 | usableSize = pBt->usableSize; |
| | @@ -62559,10 +62693,11 @@ |
| 62559 | 62693 | goto end_of_check; |
| 62560 | 62694 | } |
| 62561 | 62695 | |
| 62562 | 62696 | /* Clear MemPage.isInit to make sure the corruption detection code in |
| 62563 | 62697 | ** btreeInitPage() is executed. */ |
| 62698 | + savedIsInit = pPage->isInit; |
| 62564 | 62699 | pPage->isInit = 0; |
| 62565 | 62700 | if( (rc = btreeInitPage(pPage))!=0 ){ |
| 62566 | 62701 | assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */ |
| 62567 | 62702 | checkAppendMsg(pCheck, |
| 62568 | 62703 | "btreeInitPage() returns error code %d", rc); |
| | @@ -62700,11 +62835,11 @@ |
| 62700 | 62835 | while( i>0 ){ |
| 62701 | 62836 | int size, j; |
| 62702 | 62837 | assert( (u32)i<=usableSize-4 ); /* Enforced by btreeInitPage() */ |
| 62703 | 62838 | size = get2byte(&data[i+2]); |
| 62704 | 62839 | assert( (u32)(i+size)<=usableSize ); /* Enforced by btreeInitPage() */ |
| 62705 | | - btreeHeapInsert(heap, (i<<16)|(i+size-1)); |
| 62840 | + btreeHeapInsert(heap, (((u32)i)<<16)|(i+size-1)); |
| 62706 | 62841 | /* EVIDENCE-OF: R-58208-19414 The first 2 bytes of a freeblock are a |
| 62707 | 62842 | ** big-endian integer which is the offset in the b-tree page of the next |
| 62708 | 62843 | ** freeblock in the chain, or zero if the freeblock is the last on the |
| 62709 | 62844 | ** chain. */ |
| 62710 | 62845 | j = get2byte(&data[i]); |
| | @@ -62751,10 +62886,11 @@ |
| 62751 | 62886 | nFrag, data[hdr+7], iPage); |
| 62752 | 62887 | } |
| 62753 | 62888 | } |
| 62754 | 62889 | |
| 62755 | 62890 | end_of_check: |
| 62891 | + if( !doCoverageCheck ) pPage->isInit = savedIsInit; |
| 62756 | 62892 | releasePage(pPage); |
| 62757 | 62893 | pCheck->zPfx = saved_zPfx; |
| 62758 | 62894 | pCheck->v1 = saved_v1; |
| 62759 | 62895 | pCheck->v2 = saved_v2; |
| 62760 | 62896 | return depth+1; |
| | @@ -69020,10 +69156,11 @@ |
| 69020 | 69156 | ** to ignore the compiler warnings and leave this variable uninitialized. |
| 69021 | 69157 | */ |
| 69022 | 69158 | /* mem1.u.i = 0; // not needed, here to silence compiler warning */ |
| 69023 | 69159 | |
| 69024 | 69160 | idx1 = getVarint32(aKey1, szHdr1); |
| 69161 | + if( szHdr1>98307 ) return SQLITE_CORRUPT; |
| 69025 | 69162 | d1 = szHdr1; |
| 69026 | 69163 | assert( pKeyInfo->nField+pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB ); |
| 69027 | 69164 | assert( pKeyInfo->aSortOrder!=0 ); |
| 69028 | 69165 | assert( pKeyInfo->nField>0 ); |
| 69029 | 69166 | assert( idx1<=szHdr1 || CORRUPT_DB ); |
| | @@ -109379,14 +109516,18 @@ |
| 109379 | 109516 | sqlite3VdbeResolveLabel(v, addrCont); |
| 109380 | 109517 | |
| 109381 | 109518 | /* Execute the recursive SELECT taking the single row in Current as |
| 109382 | 109519 | ** the value for the recursive-table. Store the results in the Queue. |
| 109383 | 109520 | */ |
| 109384 | | - p->pPrior = 0; |
| 109385 | | - sqlite3Select(pParse, p, &destQueue); |
| 109386 | | - assert( p->pPrior==0 ); |
| 109387 | | - p->pPrior = pSetup; |
| 109521 | + if( p->selFlags & SF_Aggregate ){ |
| 109522 | + sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported"); |
| 109523 | + }else{ |
| 109524 | + p->pPrior = 0; |
| 109525 | + sqlite3Select(pParse, p, &destQueue); |
| 109526 | + assert( p->pPrior==0 ); |
| 109527 | + p->pPrior = pSetup; |
| 109528 | + } |
| 109388 | 109529 | |
| 109389 | 109530 | /* Keep running the loop until the Queue is empty */ |
| 109390 | 109531 | sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop); |
| 109391 | 109532 | sqlite3VdbeResolveLabel(v, addrBreak); |
| 109392 | 109533 | |
| 109393 | 109534 | |