Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Working when run against sqlite.fossil. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | scrub-backup |
Files: | files | file ages | folders |
SHA1: |
b0bd9dd6255be161ea289ba6caa3fbd5 |
User & Date: | drh 2016-05-05 23:39:30.803 |
Context
2016-05-05
| ||
23:59 | Additional error reporting. Open the source database read/write so that it can delete the WAL file when done. (check-in: d2efd3c176 user: drh tags: scrub-backup) | |
23:39 | Working when run against sqlite.fossil. (check-in: b0bd9dd625 user: drh tags: scrub-backup) | |
23:09 | Finished implementation compiles, but untested. (check-in: aeb88bdf6f user: drh tags: scrub-backup) | |
Changes
Changes to ext/misc/scrub.c.
︙ | ︙ | |||
338 339 340 341 342 343 344 | static void scrubBackupOverflow(ScrubState *p, int pgno, u32 nByte){ u8 *a, *aBuf; aBuf = scrubBackupAllocPage(p); if( aBuf==0 ) return; while( nByte>0 && pgno!=0 ){ a = scrubBackupRead(p, pgno, aBuf); | | | 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 | static void scrubBackupOverflow(ScrubState *p, int pgno, u32 nByte){ u8 *a, *aBuf; aBuf = scrubBackupAllocPage(p); if( aBuf==0 ) return; while( nByte>0 && pgno!=0 ){ a = scrubBackupRead(p, pgno, aBuf); if( a==0 ) break; if( nByte >= (p->szUsable)-4 ){ nByte -= (p->szUsable) - 4; }else{ u32 x = (p->szUsable - 4) - nByte; u32 i = p->szUsable - x; memset(&a[i], 0, x); nByte = 0; |
︙ | ︙ | |||
368 369 370 371 372 373 374 375 376 377 378 379 380 381 | u32 nCell; u32 nPrefix; u32 szHdr; u32 iChild; u8 *aTop; u8 *aCell; u32 x, y; if( p->rcErr ) return; if( iDepth>50 ){ scrubBackupErr(p, "corrupt: b-tree too deep at page %d", pgno); return; } | > | 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 | u32 nCell; u32 nPrefix; u32 szHdr; u32 iChild; u8 *aTop; u8 *aCell; u32 x, y; int ln = 0; if( p->rcErr ) return; if( iDepth>50 ){ scrubBackupErr(p, "corrupt: b-tree too deep at page %d", pgno); return; } |
︙ | ︙ | |||
390 391 392 393 394 395 396 | szHdr = 8 + 4*(aTop[0]==0x02 || aTop[0]==0x05); aCell = aTop + szHdr; nCell = scrubBackupInt16(&aTop[3]); /* Zero out the gap between the cell index and the start of the ** cell content area */ x = scrubBackupInt16(&aTop[5]); /* First byte of cell content area */ | | | | | | | | | | | | | | | > | 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 | szHdr = 8 + 4*(aTop[0]==0x02 || aTop[0]==0x05); aCell = aTop + szHdr; nCell = scrubBackupInt16(&aTop[3]); /* Zero out the gap between the cell index and the start of the ** cell content area */ x = scrubBackupInt16(&aTop[5]); /* First byte of cell content area */ if( x>p->szUsable ){ ln=__LINE__; goto btree_corrupt; } y = szHdr + nPrefix + nCell*2; if( y>x ){ ln=__LINE__; goto btree_corrupt; } if( y<x ) memset(a+y, 0, x-y); /* Zero the gap */ /* Zero out all the free blocks */ pc = scrubBackupInt16(&aTop[1]); if( pc>0 && pc<x ){ ln=__LINE__; goto btree_corrupt; } while( pc ){ if( pc>(p->szUsable)-4 ){ ln=__LINE__; goto btree_corrupt; } n = scrubBackupInt16(&a[pc+2]); if( pc+n>(p->szUsable) ){ ln=__LINE__; goto btree_corrupt; } if( n>4 ) memset(&a[pc+4], 0, n-4); x = scrubBackupInt16(&a[pc]); if( x<pc+4 && x>0 ){ ln=__LINE__; goto btree_corrupt; } pc = x; } /* Write this one page */ scrubBackupWrite(p, pgno, a); /* Walk the tree and process child pages */ for(i=0; i<nCell; i++){ u32 X, M, K, nLocal; sqlite3_int64 P; pc = scrubBackupInt16(&aCell[i*2]); if( pc <= szHdr ){ ln=__LINE__; goto btree_corrupt; } if( pc > p->szUsable-3 ){ ln=__LINE__; goto btree_corrupt; } if( aTop[0]==0x05 || aTop[0]==0x02 ){ if( pc+4 > p->szUsable ){ ln=__LINE__; goto btree_corrupt; } iChild = scrubBackupInt32(&a[pc]); pc += 4; scrubBackupBtree(p, iChild, iDepth+1); if( aTop[0]==0x05 ) continue; } pc += scrubBackupVarint(&a[pc], &P); if( pc >= p->szUsable ){ ln=__LINE__; goto btree_corrupt; } if( aTop[0]==0x0d ){ X = p->szUsable - 35; }else{ X = ((p->szUsable - 12)*64/255) - 23; } if( P<=X ){ /* All content is local. No overflow */ continue; } M = ((p->szUsable - 12)*32/255)-23; K = M + ((P-M)%(p->szUsable-4)); if( aTop[0]==0x0d ){ pc += scrubBackupVarintSize(&a[pc]); if( pc > (p->szUsable-4) ){ ln=__LINE__; goto btree_corrupt; } } nLocal = K<=X ? K : M; if( pc+nLocal > p->szUsable-4 ){ ln=__LINE__; goto btree_corrupt; } iChild = scrubBackupInt32(&a[pc+nLocal]); scrubBackupOverflow(p, iChild, P-nLocal); } /* Walk the right-most tree */ if( aTop[0]==0x05 || aTop[0]==0x02 ){ iChild = scrubBackupInt32(&aTop[8]); scrubBackupBtree(p, iChild, iDepth+1); } /* All done */ if( pgno>1 ) sqlite3_free(a); return; btree_corrupt: scrubBackupErr(p, "corruption on page %d of source database (errid=%d)", pgno, ln); if( pgno>1 ) sqlite3_free(a); } /* ** Copy all ptrmap pages from source to destination. ** This routine is only called if the source database is in autovacuum ** or incremental vacuum mode. |
︙ | ︙ | |||
520 521 522 523 524 525 526 | /* Copy ptrmap pages */ n = scrubBackupInt32(&s.page1[52]); if( n ) scrubBackupPtrmap(&s); /* Copy all of the btrees */ scrubBackupBtree(&s, 1, 0); pStmt = scrubBackupPrepare(&s, s.dbSrc, | | | 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 | /* Copy ptrmap pages */ n = scrubBackupInt32(&s.page1[52]); if( n ) scrubBackupPtrmap(&s); /* Copy all of the btrees */ scrubBackupBtree(&s, 1, 0); pStmt = scrubBackupPrepare(&s, s.dbSrc, "SELECT rootpage FROM sqlite_master WHERE coalesce(rootpage,0)>0"); if( pStmt==0 ) goto scrub_abort; while( sqlite3_step(pStmt)==SQLITE_ROW ){ i = (u32)sqlite3_column_int(pStmt, 0); scrubBackupBtree(&s, i, 0); } sqlite3_finalize(pStmt); |
︙ | ︙ |