Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Reduce the size of the MemPage object by about 32 bytes. Other structure size optimizations. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
21695c3476804477cb378b5a64319638 |
User & Date: | drh 2012-02-02 19:37:18.502 |
Context
2012-02-02
| ||
21:02 | More structure packing for smaller objects and less memory usage. (check-in: f14e7f29ff user: drh tags: trunk) | |
19:37 | Reduce the size of the MemPage object by about 32 bytes. Other structure size optimizations. (check-in: 21695c3476 user: drh tags: trunk) | |
18:46 | When non-aggregate columns occur in an aggregate query with a single min() or max(), then the values of the non-aggregate columns are taken from one of the rows that was the min() or max(). (check-in: fa13edd39c user: drh tags: trunk) | |
Changes
Changes to src/btree.c.
︙ | ︙ | |||
866 867 868 869 870 871 872 | ** pages that do contain overflow cells. */ static u8 *findOverflowCell(MemPage *pPage, int iCell){ int i; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); for(i=pPage->nOverflow-1; i>=0; i--){ int k; | < | < | | 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 | ** pages that do contain overflow cells. */ static u8 *findOverflowCell(MemPage *pPage, int iCell){ int i; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); for(i=pPage->nOverflow-1; i>=0; i--){ int k; k = pPage->aiOvfl[i]; if( k<=iCell ){ if( k==iCell ){ return pPage->apOvfl[i]; } iCell--; } } return findCell(pPage, iCell); } |
︙ | ︙ | |||
5517 5518 5519 5520 5521 5522 5523 | /* ** Insert a new cell on pPage at cell index "i". pCell points to the ** content of the cell. ** ** If the cell content will fit on the page, then put it there. If it ** will not fit, then make a copy of the cell content into pTemp if ** pTemp is not null. Regardless of pTemp, allocate a new entry | | | 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 | /* ** Insert a new cell on pPage at cell index "i". pCell points to the ** content of the cell. ** ** If the cell content will fit on the page, then put it there. If it ** will not fit, then make a copy of the cell content into pTemp if ** pTemp is not null. Regardless of pTemp, allocate a new entry ** in pPage->apOvfl[] and make it point to the cell content (either ** in pTemp or the original pCell) and also record its index. ** Allocating a new entry in pPage->aCell[] implies that ** pPage->nOverflow is incremented. ** ** If nSkip is non-zero, then do not copy the first nSkip bytes of the ** cell. The caller will overwrite them after this function returns. If ** nSkip is non-zero, then pCell may not point to an invalid memory location |
︙ | ︙ | |||
5551 5552 5553 5554 5555 5556 5557 | int nSkip = (iChild ? 4 : 0); if( *pRC ) return; assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); assert( pPage->nCell<=MX_CELL(pPage->pBt) && MX_CELL(pPage->pBt)<=10921 ); | | > | | | | 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 | int nSkip = (iChild ? 4 : 0); if( *pRC ) return; assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); assert( pPage->nCell<=MX_CELL(pPage->pBt) && MX_CELL(pPage->pBt)<=10921 ); assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) ); assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); /* The cell should normally be sized correctly. However, when moving a ** malformed cell from a leaf page to an interior page, if the cell size ** wanted to be less than 4 but got rounded up to 4 on the leaf, then size ** might be less than 8 (leaf-size + pointer) on the interior node. Hence ** the term after the || in the following assert(). */ assert( sz==cellSizePtr(pPage, pCell) || (sz==8 && iChild>0) ); if( pPage->nOverflow || sz+2>pPage->nFree ){ if( pTemp ){ memcpy(pTemp+nSkip, pCell+nSkip, sz-nSkip); pCell = pTemp; } if( iChild ){ put4byte(pCell, iChild); } j = pPage->nOverflow++; assert( j<(int)(sizeof(pPage->apOvfl)/sizeof(pPage->apOvfl[0])) ); pPage->apOvfl[j] = pCell; pPage->aiOvfl[j] = (u16)i; }else{ int rc = sqlite3PagerWrite(pPage->pDbPage); if( rc!=SQLITE_OK ){ *pRC = rc; return; } assert( sqlite3PagerIswriteable(pPage->pDbPage) ); |
︙ | ︙ | |||
5718 5719 5720 5721 5722 5723 5724 | ** may be inserted. If both these operations are successful, proceed. */ rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); if( rc==SQLITE_OK ){ u8 *pOut = &pSpace[4]; | | | 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 | ** may be inserted. If both these operations are successful, proceed. */ rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); if( rc==SQLITE_OK ){ u8 *pOut = &pSpace[4]; u8 *pCell = pPage->apOvfl[0]; u16 szCell = cellSizePtr(pPage, pCell); u8 *pStop; assert( sqlite3PagerIswriteable(pNew->pDbPage) ); assert( pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) ); zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF); assemblePage(pNew, 1, &pCell, &szCell); |
︙ | ︙ | |||
5828 5829 5830 5831 5832 5833 5834 | ** on page pFrom to page pTo. If page pFrom was not a leaf page, then ** the pointer-map entries for each child page are updated so that the ** parent page stored in the pointer map is page pTo. If pFrom contained ** any cells with overflow page pointers, then the corresponding pointer ** map entries are also updated so that the parent page is page pTo. ** ** If pFrom is currently carrying any overflow cells (entries in the | | | 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 | ** on page pFrom to page pTo. If page pFrom was not a leaf page, then ** the pointer-map entries for each child page are updated so that the ** parent page stored in the pointer map is page pTo. If pFrom contained ** any cells with overflow page pointers, then the corresponding pointer ** map entries are also updated so that the parent page is page pTo. ** ** If pFrom is currently carrying any overflow cells (entries in the ** MemPage.apOvfl[] array), they are not copied to pTo. ** ** Before returning, page pTo is reinitialized using btreeInitPage(). ** ** The performance of this function is not critical. It is only used by ** the balance_shallower() and balance_deeper() procedures, neither of ** which are called often under normal circumstances. */ |
︙ | ︙ | |||
5965 5966 5967 5968 5969 5970 5971 | /* At this point pParent may have at most one overflow cell. And if ** this overflow cell is present, it must be the cell with ** index iParentIdx. This scenario comes about when this function ** is called (indirectly) from sqlite3BtreeDelete(). */ assert( pParent->nOverflow==0 || pParent->nOverflow==1 ); | | | 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 | /* At this point pParent may have at most one overflow cell. And if ** this overflow cell is present, it must be the cell with ** index iParentIdx. This scenario comes about when this function ** is called (indirectly) from sqlite3BtreeDelete(). */ assert( pParent->nOverflow==0 || pParent->nOverflow==1 ); assert( pParent->nOverflow==0 || pParent->aiOvfl[0]==iParentIdx ); if( !aOvflSpace ){ return SQLITE_NOMEM; } /* Find the sibling pages to balance. Also locate the cells in pParent ** that divide the siblings. An attempt is made to find NN siblings on |
︙ | ︙ | |||
6012 6013 6014 6015 6016 6017 6018 | if( rc ){ memset(apOld, 0, (i+1)*sizeof(MemPage*)); goto balance_cleanup; } nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; if( (i--)==0 ) break; | | | | 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 | if( rc ){ memset(apOld, 0, (i+1)*sizeof(MemPage*)); goto balance_cleanup; } nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; if( (i--)==0 ) break; if( i+nxDiv==pParent->aiOvfl[0] && pParent->nOverflow ){ apDiv[i] = pParent->apOvfl[0]; pgno = get4byte(apDiv[i]); szNew[i] = cellSizePtr(pParent, apDiv[i]); pParent->nOverflow = 0; }else{ apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow); pgno = get4byte(apDiv[i]); szNew[i] = cellSizePtr(pParent, apDiv[i]); |
︙ | ︙ | |||
6454 6455 6456 6457 6458 6459 6460 | ** setting a pointer map entry is a relatively expensive operation, this ** code only sets pointer map entries for child or overflow pages that have ** actually moved between pages. */ MemPage *pNew = apNew[0]; MemPage *pOld = apCopy[0]; int nOverflow = pOld->nOverflow; int iNextOld = pOld->nCell + nOverflow; | | | | | | 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 | ** setting a pointer map entry is a relatively expensive operation, this ** code only sets pointer map entries for child or overflow pages that have ** actually moved between pages. */ MemPage *pNew = apNew[0]; MemPage *pOld = apCopy[0]; int nOverflow = pOld->nOverflow; int iNextOld = pOld->nCell + nOverflow; int iOverflow = (nOverflow ? pOld->aiOvfl[0] : -1); j = 0; /* Current 'old' sibling page */ k = 0; /* Current 'new' sibling page */ for(i=0; i<nCell; i++){ int isDivider = 0; while( i==iNextOld ){ /* Cell i is the cell immediately following the last cell on old ** sibling page j. If the siblings are not leaf pages of an ** intkey b-tree, then cell i was a divider cell. */ assert( j+1 < ArraySize(apCopy) ); pOld = apCopy[++j]; iNextOld = i + !leafData + pOld->nCell + pOld->nOverflow; if( pOld->nOverflow ){ nOverflow = pOld->nOverflow; iOverflow = i + !leafData + pOld->aiOvfl[0]; } isDivider = !leafData; } assert(nOverflow>0 || iOverflow<i ); assert(nOverflow<2 || pOld->aiOvfl[0]==pOld->aiOvfl[1]-1); assert(nOverflow<3 || pOld->aiOvfl[1]==pOld->aiOvfl[2]-1); if( i==iOverflow ){ isDivider = 1; if( (--nOverflow)>0 ){ iOverflow++; } } |
︙ | ︙ | |||
6596 6597 6598 6599 6600 6601 6602 | assert( sqlite3PagerIswriteable(pChild->pDbPage) ); assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); assert( pChild->nCell==pRoot->nCell ); TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); /* Copy the overflow cells from pRoot to pChild */ | > | > > | 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 | assert( sqlite3PagerIswriteable(pChild->pDbPage) ); assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); assert( pChild->nCell==pRoot->nCell ); TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); /* Copy the overflow cells from pRoot to pChild */ memcpy(pChild->aiOvfl, pRoot->aiOvfl, pRoot->nOverflow*sizeof(pRoot->aiOvfl[0])); memcpy(pChild->apOvfl, pRoot->apOvfl, pRoot->nOverflow*sizeof(pRoot->apOvfl[0])); pChild->nOverflow = pRoot->nOverflow; /* Zero the contents of pRoot. Then install pChild as the right-child. */ zeroPage(pRoot, pChild->aData[0] & ~PTF_LEAF); put4byte(&pRoot->aData[pRoot->hdrOffset+8], pgnoChild); *ppChild = pChild; |
︙ | ︙ | |||
6659 6660 6661 6662 6663 6664 6665 | int const iIdx = pCur->aiIdx[iPage-1]; rc = sqlite3PagerWrite(pParent->pDbPage); if( rc==SQLITE_OK ){ #ifndef SQLITE_OMIT_QUICKBALANCE if( pPage->hasData && pPage->nOverflow==1 | | | 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 | int const iIdx = pCur->aiIdx[iPage-1]; rc = sqlite3PagerWrite(pParent->pDbPage); if( rc==SQLITE_OK ){ #ifndef SQLITE_OMIT_QUICKBALANCE if( pPage->hasData && pPage->nOverflow==1 && pPage->aiOvfl[0]==pPage->nCell && pParent->pgno!=1 && pParent->nCell==iIdx ){ /* Call balance_quick() to create a new sibling of pPage on which ** to store the overflow cell. balance_quick() inserts a new cell ** into pParent, which may cause pParent overflow. If this ** happens, the next interation of the do-loop will balance pParent |
︙ | ︙ |
Changes to src/btreeInt.h.
︙ | ︙ | |||
280 281 282 283 284 285 286 | u8 max1bytePayload; /* min(maxLocal,127) */ u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */ u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */ u16 cellOffset; /* Index in aData of first cell pointer */ u16 nFree; /* Number of free bytes on the page */ u16 nCell; /* Number of cells on this page, local and ovfl */ u16 maskPage; /* Mask for page offset */ | < | | | | 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 | u8 max1bytePayload; /* min(maxLocal,127) */ u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */ u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */ u16 cellOffset; /* Index in aData of first cell pointer */ u16 nFree; /* Number of free bytes on the page */ u16 nCell; /* Number of cells on this page, local and ovfl */ u16 maskPage; /* Mask for page offset */ u16 aiOvfl[5]; /* Insert the i-th overflow cell before the aiOvfl-th ** non-overflow cell */ u8 *apOvfl[5]; /* Pointers to the body of overflow cells */ BtShared *pBt; /* Pointer to BtShared that this page is part of */ u8 *aData; /* Pointer to disk image of the page data */ u8 *aDataEnd; /* One byte past the end of usable data */ u8 *aCellIdx; /* The cell index area */ DbPage *pDbPage; /* Pager page handle */ Pgno pgno; /* Page number for this page */ }; |
︙ | ︙ | |||
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 | ** found at self->pBt->mutex. */ struct BtCursor { Btree *pBtree; /* The Btree to which this cursor belongs */ BtShared *pBt; /* The BtShared this cursor points to */ BtCursor *pNext, *pPrev; /* Forms a linked list of all cursors */ struct KeyInfo *pKeyInfo; /* Argument passed to comparison function */ Pgno pgnoRoot; /* The root page of this tree */ sqlite3_int64 cachedRowid; /* Next rowid cache. 0 means not valid */ CellInfo info; /* A parse of the cell we are pointing at */ i64 nKey; /* Size of pKey, or last integer key */ void *pKey; /* Saved key that was cursor's last known position */ int skipNext; /* Prev() is noop if negative. Next() is noop if positive */ u8 wrFlag; /* True if writable */ u8 atLast; /* Cursor pointing to the last entry */ u8 validNKey; /* True if info.nKey is valid */ u8 eState; /* One of the CURSOR_XXX constants (see below) */ #ifndef SQLITE_OMIT_INCRBLOB | > > > < | 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 | ** found at self->pBt->mutex. */ struct BtCursor { Btree *pBtree; /* The Btree to which this cursor belongs */ BtShared *pBt; /* The BtShared this cursor points to */ BtCursor *pNext, *pPrev; /* Forms a linked list of all cursors */ struct KeyInfo *pKeyInfo; /* Argument passed to comparison function */ #ifndef SQLITE_OMIT_INCRBLOB Pgno *aOverflow; /* Cache of overflow page locations */ #endif Pgno pgnoRoot; /* The root page of this tree */ sqlite3_int64 cachedRowid; /* Next rowid cache. 0 means not valid */ CellInfo info; /* A parse of the cell we are pointing at */ i64 nKey; /* Size of pKey, or last integer key */ void *pKey; /* Saved key that was cursor's last known position */ int skipNext; /* Prev() is noop if negative. Next() is noop if positive */ u8 wrFlag; /* True if writable */ u8 atLast; /* Cursor pointing to the last entry */ u8 validNKey; /* True if info.nKey is valid */ u8 eState; /* One of the CURSOR_XXX constants (see below) */ #ifndef SQLITE_OMIT_INCRBLOB u8 isIncrblobHandle; /* True if this cursor is an incr. io handle */ #endif i16 iPage; /* Index of current page in apPage */ u16 aiIdx[BTCURSOR_MAX_DEPTH]; /* Current index in apPage[i] */ MemPage *apPage[BTCURSOR_MAX_DEPTH]; /* Pages from root to current page */ }; |
︙ | ︙ | |||
631 632 633 634 635 636 637 | ** This structure is passed around through all the sanity checking routines ** in order to keep track of some global state information. */ typedef struct IntegrityCk IntegrityCk; struct IntegrityCk { BtShared *pBt; /* The tree being checked out */ Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ | < > | 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 | ** This structure is passed around through all the sanity checking routines ** in order to keep track of some global state information. */ typedef struct IntegrityCk IntegrityCk; struct IntegrityCk { BtShared *pBt; /* The tree being checked out */ Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ int *anRef; /* Number of times each page is referenced */ Pgno nPage; /* Number of pages in the database */ int mxErr; /* Stop accumulating errors when this reaches zero */ int nErr; /* Number of messages written to zErrMsg so far */ int mallocFailed; /* A memory allocation error has occurred */ StrAccum errMsg; /* Accumulate the error message text here */ }; /* ** Routines to read or write a two- and four-byte big-endian integer values. */ #define get2byte(x) ((x)[0]<<8 | (x)[1]) #define put2byte(p,v) ((p)[0] = (u8)((v)>>8), (p)[1] = (u8)(v)) #define get4byte sqlite3Get4byte #define put4byte sqlite3Put4byte |
Changes to src/pcache1.c.
︙ | ︙ | |||
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 | PGroup *pGroup; /* PGroup this cache belongs to */ int szPage; /* Size of allocated pages in bytes */ int szExtra; /* Size of extra space in bytes */ int bPurgeable; /* True if cache is purgeable */ unsigned int nMin; /* Minimum number of pages reserved */ unsigned int nMax; /* Configured "cache_size" value */ unsigned int n90pct; /* nMax*9/10 */ /* Hash table of all pages. The following variables may only be accessed ** when the accessor is holding the PGroup mutex. */ unsigned int nRecyclable; /* Number of pages in the LRU list */ unsigned int nPage; /* Total number of pages in apHash */ unsigned int nHash; /* Number of slots in apHash[] */ PgHdr1 **apHash; /* Hash table for fast lookup by key */ | > < < | 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | PGroup *pGroup; /* PGroup this cache belongs to */ int szPage; /* Size of allocated pages in bytes */ int szExtra; /* Size of extra space in bytes */ int bPurgeable; /* True if cache is purgeable */ unsigned int nMin; /* Minimum number of pages reserved */ unsigned int nMax; /* Configured "cache_size" value */ unsigned int n90pct; /* nMax*9/10 */ unsigned int iMaxKey; /* Largest key seen since xTruncate() */ /* Hash table of all pages. The following variables may only be accessed ** when the accessor is holding the PGroup mutex. */ unsigned int nRecyclable; /* Number of pages in the LRU list */ unsigned int nPage; /* Total number of pages in apHash */ unsigned int nHash; /* Number of slots in apHash[] */ PgHdr1 **apHash; /* Hash table for fast lookup by key */ }; /* ** Each cache entry is represented by an instance of the following ** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of ** PgHdr1.pCache->szPage bytes is allocated directly before this structure ** in memory. |
︙ | ︙ | |||
125 126 127 128 129 130 131 | int isInit; /* True if initialized */ int szSlot; /* Size of each free slot */ int nSlot; /* The number of pcache slots */ int nReserve; /* Try to keep nFreeSlot above this */ void *pStart, *pEnd; /* Bounds of pagecache malloc range */ /* Above requires no mutex. Use mutex below for variable that follow. */ sqlite3_mutex *mutex; /* Mutex for accessing the following: */ | < > | 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | int isInit; /* True if initialized */ int szSlot; /* Size of each free slot */ int nSlot; /* The number of pcache slots */ int nReserve; /* Try to keep nFreeSlot above this */ void *pStart, *pEnd; /* Bounds of pagecache malloc range */ /* Above requires no mutex. Use mutex below for variable that follow. */ sqlite3_mutex *mutex; /* Mutex for accessing the following: */ PgFreeslot *pFree; /* Free page blocks */ int nFreeSlot; /* Number of unused pcache slots */ /* The following value requires a mutex to change. We skip the mutex on ** reading because (1) most platforms read a 32-bit integer atomically and ** (2) even if an incorrect value is read, no great harm is done since this ** is really just an optimization. */ int bUnderPressure; /* True if low on PAGECACHE memory */ } pcache1_g; |
︙ | ︙ |