Fossil SCM
Merge trunk
Commit
6164dac54c3a135fd76b5d06d2b8b58139dd840b
Parent
ed7905b64da56ae…
23 files changed
+162
-50
+36
-26
+2
-2
+4
-22
+235
-26
+6
-7
+50
-2
+50
-2
+30
-7
+2
-1
+11
-6
+249
-143
-1
+8
-4
+226
-82
+1
-1
+162
-86
+1
-1
+93
+331
-61
+19
+334
+20
~
src/attach.c
~
src/checkin.c
~
src/db.c
~
src/event.c
~
src/export.c
~
src/foci.c
~
src/import.c
~
src/import.c
~
src/info.c
~
src/json_wiki.c
~
src/manifest.c
~
src/merge.c
~
src/path.c
~
src/pivot.c
~
src/sqlite3.c
~
src/sqlite3.h
~
src/wiki.c
~
test/merge6.test
~
test/merge_exe.test
~
test/merge_renames.test
~
test/tester.tcl
~
test/wiki.test
~
www/encryptedrepos.wiki
+162
-50
| --- src/attach.c | ||
| +++ src/attach.c | ||
| @@ -246,10 +246,66 @@ | ||
| 246 | 246 | db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", rid); |
| 247 | 247 | } |
| 248 | 248 | manifest_crosslink(rid, pAttach, MC_NONE); |
| 249 | 249 | } |
| 250 | 250 | |
| 251 | + | |
| 252 | +/* | |
| 253 | +** Commit a new attachment into the repository | |
| 254 | +*/ | |
| 255 | +void attach_commit( | |
| 256 | + const char *zName, /* The filename of the attachment */ | |
| 257 | + const char *zTarget, /* The artifact uuid to attach to */ | |
| 258 | + const char *aContent, /* The content of the attachment */ | |
| 259 | + int szContent, /* The length of the attachment */ | |
| 260 | + int needModerator, /* Moderate the attachment? */ | |
| 261 | + const char *zComment /* The comment for the attachment */ | |
| 262 | +){ | |
| 263 | + Blob content; | |
| 264 | + Blob manifest; | |
| 265 | + Blob cksum; | |
| 266 | + char *zUUID; | |
| 267 | + char *zDate; | |
| 268 | + int rid; | |
| 269 | + int i, n; | |
| 270 | + int addCompress = 0; | |
| 271 | + Manifest *pManifest; | |
| 272 | + | |
| 273 | + db_begin_transaction(); | |
| 274 | + blob_init(&content, aContent, szContent); | |
| 275 | + pManifest = manifest_parse(&content, 0, 0); | |
| 276 | + manifest_destroy(pManifest); | |
| 277 | + blob_init(&content, aContent, szContent); | |
| 278 | + if( pManifest ){ | |
| 279 | + blob_compress(&content, &content); | |
| 280 | + addCompress = 1; | |
| 281 | + } | |
| 282 | + rid = content_put_ex(&content, 0, 0, 0, needModerator); | |
| 283 | + zUUID = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid); | |
| 284 | + blob_zero(&manifest); | |
| 285 | + for(i=n=0; zName[i]; i++){ | |
| 286 | + if( zName[i]=='/' || zName[i]=='\\' ) n = i+1; | |
| 287 | + } | |
| 288 | + zName += n; | |
| 289 | + if( zName[0]==0 ) zName = "unknown"; | |
| 290 | + blob_appendf(&manifest, "A %F%s %F %s\n", | |
| 291 | + zName, addCompress ? ".gz" : "", zTarget, zUUID); | |
| 292 | + while( fossil_isspace(zComment[0]) ) zComment++; | |
| 293 | + n = strlen(zComment); | |
| 294 | + while( n>0 && fossil_isspace(zComment[n-1]) ){ n--; } | |
| 295 | + if( n>0 ){ | |
| 296 | + blob_appendf(&manifest, "C %#F\n", n, zComment); | |
| 297 | + } | |
| 298 | + zDate = date_in_standard_format("now"); | |
| 299 | + blob_appendf(&manifest, "D %s\n", zDate); | |
| 300 | + blob_appendf(&manifest, "U %F\n", login_name()); | |
| 301 | + md5sum_blob(&manifest, &cksum); | |
| 302 | + blob_appendf(&manifest, "Z %b\n", &cksum); | |
| 303 | + attach_put(&manifest, rid, needModerator); | |
| 304 | + assert( blob_is_reset(&manifest) ); | |
| 305 | + db_end_transaction(0); | |
| 306 | +} | |
| 251 | 307 | |
| 252 | 308 | /* |
| 253 | 309 | ** WEBPAGE: attachadd |
| 254 | 310 | ** Add a new attachment. |
| 255 | 311 | ** |
| @@ -300,11 +356,11 @@ | ||
| 300 | 356 | zTechNote = db_text(0, "SELECT substr(tagname,7) FROM tag" |
| 301 | 357 | " WHERE tagname GLOB 'event-%q*'", zTechNote); |
| 302 | 358 | if( zTechNote==0) fossil_redirect_home(); |
| 303 | 359 | } |
| 304 | 360 | zTarget = zTechNote; |
| 305 | - zTargetType = mprintf("Tech Note <a href=\"%R/technote/%h\">%h</a>", | |
| 361 | + zTargetType = mprintf("Tech Note <a href=\"%R/technote/%s\">%S</a>", | |
| 306 | 362 | zTechNote, zTechNote); |
| 307 | 363 | |
| 308 | 364 | }else{ |
| 309 | 365 | if( g.perm.ApndTkt==0 || g.perm.Attach==0 ){ |
| 310 | 366 | login_needed(g.anon.ApndTkt && g.anon.Attach); |
| @@ -322,59 +378,14 @@ | ||
| 322 | 378 | if( zFrom==0 ) zFrom = mprintf("%s/home", g.zTop); |
| 323 | 379 | if( P("cancel") ){ |
| 324 | 380 | cgi_redirect(zFrom); |
| 325 | 381 | } |
| 326 | 382 | if( P("ok") && szContent>0 && (goodCaptcha = captcha_is_correct()) ){ |
| 327 | - Blob content; | |
| 328 | - Blob manifest; | |
| 329 | - Blob cksum; | |
| 330 | - char *zUUID; | |
| 331 | - const char *zComment; | |
| 332 | - char *zDate; | |
| 333 | - int rid; | |
| 334 | - int i, n; | |
| 335 | - int addCompress = 0; | |
| 336 | - Manifest *pManifest; | |
| 337 | - int needModerator; | |
| 338 | - | |
| 339 | - db_begin_transaction(); | |
| 340 | - blob_init(&content, aContent, szContent); | |
| 341 | - pManifest = manifest_parse(&content, 0, 0); | |
| 342 | - manifest_destroy(pManifest); | |
| 343 | - blob_init(&content, aContent, szContent); | |
| 344 | - if( pManifest ){ | |
| 345 | - blob_compress(&content, &content); | |
| 346 | - addCompress = 1; | |
| 347 | - } | |
| 348 | - needModerator = | |
| 349 | - (zTkt!=0 && ticket_need_moderation(0)) || | |
| 350 | - (zPage!=0 && wiki_need_moderation(0)); | |
| 351 | - rid = content_put_ex(&content, 0, 0, 0, needModerator); | |
| 352 | - zUUID = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid); | |
| 353 | - blob_zero(&manifest); | |
| 354 | - for(i=n=0; zName[i]; i++){ | |
| 355 | - if( zName[i]=='/' || zName[i]=='\\' ) n = i; | |
| 356 | - } | |
| 357 | - zName += n; | |
| 358 | - if( zName[0]==0 ) zName = "unknown"; | |
| 359 | - blob_appendf(&manifest, "A %F%s %F %s\n", | |
| 360 | - zName, addCompress ? ".gz" : "", zTarget, zUUID); | |
| 361 | - zComment = PD("comment", ""); | |
| 362 | - while( fossil_isspace(zComment[0]) ) zComment++; | |
| 363 | - n = strlen(zComment); | |
| 364 | - while( n>0 && fossil_isspace(zComment[n-1]) ){ n--; } | |
| 365 | - if( n>0 ){ | |
| 366 | - blob_appendf(&manifest, "C %#F\n", n, zComment); | |
| 367 | - } | |
| 368 | - zDate = date_in_standard_format("now"); | |
| 369 | - blob_appendf(&manifest, "D %s\n", zDate); | |
| 370 | - blob_appendf(&manifest, "U %F\n", login_name()); | |
| 371 | - md5sum_blob(&manifest, &cksum); | |
| 372 | - blob_appendf(&manifest, "Z %b\n", &cksum); | |
| 373 | - attach_put(&manifest, rid, needModerator); | |
| 374 | - assert( blob_is_reset(&manifest) ); | |
| 375 | - db_end_transaction(0); | |
| 383 | + int needModerator = (zTkt!=0 && ticket_need_moderation(0)) || | |
| 384 | + (zPage!=0 && wiki_need_moderation(0)); | |
| 385 | + const char *zComment = PD("comment", ""); | |
| 386 | + attach_commit(zName, zTarget, aContent, szContent, needModerator, zComment); | |
| 376 | 387 | cgi_redirect(zFrom); |
| 377 | 388 | } |
| 378 | 389 | style_header("Add Attachment"); |
| 379 | 390 | if( !goodCaptcha ){ |
| 380 | 391 | @ <p class="generalError">Error: Incorrect security code.</p> |
| @@ -670,5 +681,106 @@ | ||
| 670 | 681 | @ </ul> |
| 671 | 682 | } |
| 672 | 683 | db_finalize(&q); |
| 673 | 684 | |
| 674 | 685 | } |
| 686 | + | |
| 687 | +/* | |
| 688 | +** COMMAND: attachment* | |
| 689 | +** | |
| 690 | +** Usage: %fossil attachment add ?PAGENAME? FILENAME ?OPTIONS? | |
| 691 | +** | |
| 692 | +** Add an attachment to an existing wiki page or tech note. | |
| 693 | +** | |
| 694 | +** Options: | |
| 695 | +** -t|--technote DATETIME Specifies the timestamp of | |
| 696 | +** the technote to which the attachment | |
| 697 | +** is to be made. The attachment will be | |
| 698 | +** to the most recently modified tech note | |
| 699 | +** with the specified timestamp. | |
| 700 | +** -t|--technote TECHNOTE-ID Specifies the technote to be | |
| 701 | +** updated by its technote id. | |
| 702 | +** | |
| 703 | +** One of PAGENAME, DATETIME or TECHNOTE-ID must be specified. | |
| 704 | +*/ | |
| 705 | +void attachment_cmd(void){ | |
| 706 | + int n; | |
| 707 | + db_find_and_open_repository(0, 0); | |
| 708 | + if( g.argc<3 ){ | |
| 709 | + goto attachment_cmd_usage; | |
| 710 | + } | |
| 711 | + n = strlen(g.argv[2]); | |
| 712 | + if( n==0 ){ | |
| 713 | + goto attachment_cmd_usage; | |
| 714 | + } | |
| 715 | + | |
| 716 | + if( strncmp(g.argv[2],"add",n)==0 ){ | |
| 717 | + const char *zPageName; /* Name of the wiki page to attach to */ | |
| 718 | + const char *zFile; /* Name of the file to be attached */ | |
| 719 | + const char *zETime; /* The name of the technote to attach to */ | |
| 720 | + Manifest *pWiki = 0; /* Parsed wiki page content */ | |
| 721 | + char *zBody = 0; /* Wiki page content */ | |
| 722 | + int rid; | |
| 723 | + const char *zTarget; /* Target of the attachment */ | |
| 724 | + Blob content; /* The content of the attachment */ | |
| 725 | + zETime = find_option("technote","t",1); | |
| 726 | + if( !zETime ){ | |
| 727 | + if( g.argc!=5 ){ | |
| 728 | + usage("add PAGENAME FILENAME"); | |
| 729 | + } | |
| 730 | + zPageName = g.argv[3]; | |
| 731 | + rid = db_int(0, "SELECT x.rid FROM tag t, tagxref x" | |
| 732 | + " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'" | |
| 733 | + " ORDER BY x.mtime DESC LIMIT 1", | |
| 734 | + zPageName | |
| 735 | + ); | |
| 736 | + if( (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0 ){ | |
| 737 | + zBody = pWiki->zWiki; | |
| 738 | + } | |
| 739 | + if( zBody==0 ){ | |
| 740 | + fossil_fatal("wiki page [%s] not found",zPageName); | |
| 741 | + } | |
| 742 | + zTarget = zPageName; | |
| 743 | + zFile = g.argv[4]; | |
| 744 | + }else{ | |
| 745 | + if( g.argc!=4 ){ | |
| 746 | + usage("add FILENAME --technote DATETIME|TECHNOTE-ID"); | |
| 747 | + } | |
| 748 | + rid = wiki_technote_to_rid(zETime); | |
| 749 | + if( rid<0 ){ | |
| 750 | + fossil_fatal("ambiguous tech note id: %s", zETime); | |
| 751 | + } | |
| 752 | + if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){ | |
| 753 | + zBody = pWiki->zWiki; | |
| 754 | + } | |
| 755 | + if( zBody==0 ){ | |
| 756 | + fossil_fatal("technote [%s] not found",zETime); | |
| 757 | + } | |
| 758 | + zTarget = db_text(0, | |
| 759 | + "SELECT substr(tagname,7) FROM tag WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')", | |
| 760 | + rid | |
| 761 | + ); | |
| 762 | + zFile = g.argv[3]; | |
| 763 | + } | |
| 764 | + blob_read_from_file(&content, zFile); | |
| 765 | + user_select(); | |
| 766 | + attach_commit( | |
| 767 | + zFile, /* The filename of the attachment */ | |
| 768 | + zTarget, /* The artifact uuid to attach to */ | |
| 769 | + blob_buffer(&content), /* The content of the attachment */ | |
| 770 | + blob_size(&content), /* The length of the attachment */ | |
| 771 | + 0, /* No need to moderate the attachment */ | |
| 772 | + "" /* Empty attachment comment */ | |
| 773 | + ); | |
| 774 | + if( !zETime ){ | |
| 775 | + fossil_print("Attached %s to wiki page %s.\n", zFile, zPageName); | |
| 776 | + }else{ | |
| 777 | + fossil_print("Attached %s to tech note %s.\n", zFile, zETime); | |
| 778 | + } | |
| 779 | + }else{ | |
| 780 | + goto attachment_cmd_usage; | |
| 781 | + } | |
| 782 | + return; | |
| 783 | + | |
| 784 | +attachment_cmd_usage: | |
| 785 | + usage("add ?PAGENAME? FILENAME [-t|--technote DATETIME ]"); | |
| 786 | +} | |
| 675 | 787 |
| --- src/attach.c | |
| +++ src/attach.c | |
| @@ -246,10 +246,66 @@ | |
| 246 | db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", rid); |
| 247 | } |
| 248 | manifest_crosslink(rid, pAttach, MC_NONE); |
| 249 | } |
| 250 | |
| 251 | |
| 252 | /* |
| 253 | ** WEBPAGE: attachadd |
| 254 | ** Add a new attachment. |
| 255 | ** |
| @@ -300,11 +356,11 @@ | |
| 300 | zTechNote = db_text(0, "SELECT substr(tagname,7) FROM tag" |
| 301 | " WHERE tagname GLOB 'event-%q*'", zTechNote); |
| 302 | if( zTechNote==0) fossil_redirect_home(); |
| 303 | } |
| 304 | zTarget = zTechNote; |
| 305 | zTargetType = mprintf("Tech Note <a href=\"%R/technote/%h\">%h</a>", |
| 306 | zTechNote, zTechNote); |
| 307 | |
| 308 | }else{ |
| 309 | if( g.perm.ApndTkt==0 || g.perm.Attach==0 ){ |
| 310 | login_needed(g.anon.ApndTkt && g.anon.Attach); |
| @@ -322,59 +378,14 @@ | |
| 322 | if( zFrom==0 ) zFrom = mprintf("%s/home", g.zTop); |
| 323 | if( P("cancel") ){ |
| 324 | cgi_redirect(zFrom); |
| 325 | } |
| 326 | if( P("ok") && szContent>0 && (goodCaptcha = captcha_is_correct()) ){ |
| 327 | Blob content; |
| 328 | Blob manifest; |
| 329 | Blob cksum; |
| 330 | char *zUUID; |
| 331 | const char *zComment; |
| 332 | char *zDate; |
| 333 | int rid; |
| 334 | int i, n; |
| 335 | int addCompress = 0; |
| 336 | Manifest *pManifest; |
| 337 | int needModerator; |
| 338 | |
| 339 | db_begin_transaction(); |
| 340 | blob_init(&content, aContent, szContent); |
| 341 | pManifest = manifest_parse(&content, 0, 0); |
| 342 | manifest_destroy(pManifest); |
| 343 | blob_init(&content, aContent, szContent); |
| 344 | if( pManifest ){ |
| 345 | blob_compress(&content, &content); |
| 346 | addCompress = 1; |
| 347 | } |
| 348 | needModerator = |
| 349 | (zTkt!=0 && ticket_need_moderation(0)) || |
| 350 | (zPage!=0 && wiki_need_moderation(0)); |
| 351 | rid = content_put_ex(&content, 0, 0, 0, needModerator); |
| 352 | zUUID = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid); |
| 353 | blob_zero(&manifest); |
| 354 | for(i=n=0; zName[i]; i++){ |
| 355 | if( zName[i]=='/' || zName[i]=='\\' ) n = i; |
| 356 | } |
| 357 | zName += n; |
| 358 | if( zName[0]==0 ) zName = "unknown"; |
| 359 | blob_appendf(&manifest, "A %F%s %F %s\n", |
| 360 | zName, addCompress ? ".gz" : "", zTarget, zUUID); |
| 361 | zComment = PD("comment", ""); |
| 362 | while( fossil_isspace(zComment[0]) ) zComment++; |
| 363 | n = strlen(zComment); |
| 364 | while( n>0 && fossil_isspace(zComment[n-1]) ){ n--; } |
| 365 | if( n>0 ){ |
| 366 | blob_appendf(&manifest, "C %#F\n", n, zComment); |
| 367 | } |
| 368 | zDate = date_in_standard_format("now"); |
| 369 | blob_appendf(&manifest, "D %s\n", zDate); |
| 370 | blob_appendf(&manifest, "U %F\n", login_name()); |
| 371 | md5sum_blob(&manifest, &cksum); |
| 372 | blob_appendf(&manifest, "Z %b\n", &cksum); |
| 373 | attach_put(&manifest, rid, needModerator); |
| 374 | assert( blob_is_reset(&manifest) ); |
| 375 | db_end_transaction(0); |
| 376 | cgi_redirect(zFrom); |
| 377 | } |
| 378 | style_header("Add Attachment"); |
| 379 | if( !goodCaptcha ){ |
| 380 | @ <p class="generalError">Error: Incorrect security code.</p> |
| @@ -670,5 +681,106 @@ | |
| 670 | @ </ul> |
| 671 | } |
| 672 | db_finalize(&q); |
| 673 | |
| 674 | } |
| 675 |
| --- src/attach.c | |
| +++ src/attach.c | |
| @@ -246,10 +246,66 @@ | |
| 246 | db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", rid); |
| 247 | } |
| 248 | manifest_crosslink(rid, pAttach, MC_NONE); |
| 249 | } |
| 250 | |
| 251 | |
| 252 | /* |
| 253 | ** Commit a new attachment into the repository |
| 254 | */ |
| 255 | void attach_commit( |
| 256 | const char *zName, /* The filename of the attachment */ |
| 257 | const char *zTarget, /* The artifact uuid to attach to */ |
| 258 | const char *aContent, /* The content of the attachment */ |
| 259 | int szContent, /* The length of the attachment */ |
| 260 | int needModerator, /* Moderate the attachment? */ |
| 261 | const char *zComment /* The comment for the attachment */ |
| 262 | ){ |
| 263 | Blob content; |
| 264 | Blob manifest; |
| 265 | Blob cksum; |
| 266 | char *zUUID; |
| 267 | char *zDate; |
| 268 | int rid; |
| 269 | int i, n; |
| 270 | int addCompress = 0; |
| 271 | Manifest *pManifest; |
| 272 | |
| 273 | db_begin_transaction(); |
| 274 | blob_init(&content, aContent, szContent); |
| 275 | pManifest = manifest_parse(&content, 0, 0); |
| 276 | manifest_destroy(pManifest); |
| 277 | blob_init(&content, aContent, szContent); |
| 278 | if( pManifest ){ |
| 279 | blob_compress(&content, &content); |
| 280 | addCompress = 1; |
| 281 | } |
| 282 | rid = content_put_ex(&content, 0, 0, 0, needModerator); |
| 283 | zUUID = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid); |
| 284 | blob_zero(&manifest); |
| 285 | for(i=n=0; zName[i]; i++){ |
| 286 | if( zName[i]=='/' || zName[i]=='\\' ) n = i+1; |
| 287 | } |
| 288 | zName += n; |
| 289 | if( zName[0]==0 ) zName = "unknown"; |
| 290 | blob_appendf(&manifest, "A %F%s %F %s\n", |
| 291 | zName, addCompress ? ".gz" : "", zTarget, zUUID); |
| 292 | while( fossil_isspace(zComment[0]) ) zComment++; |
| 293 | n = strlen(zComment); |
| 294 | while( n>0 && fossil_isspace(zComment[n-1]) ){ n--; } |
| 295 | if( n>0 ){ |
| 296 | blob_appendf(&manifest, "C %#F\n", n, zComment); |
| 297 | } |
| 298 | zDate = date_in_standard_format("now"); |
| 299 | blob_appendf(&manifest, "D %s\n", zDate); |
| 300 | blob_appendf(&manifest, "U %F\n", login_name()); |
| 301 | md5sum_blob(&manifest, &cksum); |
| 302 | blob_appendf(&manifest, "Z %b\n", &cksum); |
| 303 | attach_put(&manifest, rid, needModerator); |
| 304 | assert( blob_is_reset(&manifest) ); |
| 305 | db_end_transaction(0); |
| 306 | } |
| 307 | |
| 308 | /* |
| 309 | ** WEBPAGE: attachadd |
| 310 | ** Add a new attachment. |
| 311 | ** |
| @@ -300,11 +356,11 @@ | |
| 356 | zTechNote = db_text(0, "SELECT substr(tagname,7) FROM tag" |
| 357 | " WHERE tagname GLOB 'event-%q*'", zTechNote); |
| 358 | if( zTechNote==0) fossil_redirect_home(); |
| 359 | } |
| 360 | zTarget = zTechNote; |
| 361 | zTargetType = mprintf("Tech Note <a href=\"%R/technote/%s\">%S</a>", |
| 362 | zTechNote, zTechNote); |
| 363 | |
| 364 | }else{ |
| 365 | if( g.perm.ApndTkt==0 || g.perm.Attach==0 ){ |
| 366 | login_needed(g.anon.ApndTkt && g.anon.Attach); |
| @@ -322,59 +378,14 @@ | |
| 378 | if( zFrom==0 ) zFrom = mprintf("%s/home", g.zTop); |
| 379 | if( P("cancel") ){ |
| 380 | cgi_redirect(zFrom); |
| 381 | } |
| 382 | if( P("ok") && szContent>0 && (goodCaptcha = captcha_is_correct()) ){ |
| 383 | int needModerator = (zTkt!=0 && ticket_need_moderation(0)) || |
| 384 | (zPage!=0 && wiki_need_moderation(0)); |
| 385 | const char *zComment = PD("comment", ""); |
| 386 | attach_commit(zName, zTarget, aContent, szContent, needModerator, zComment); |
| 387 | cgi_redirect(zFrom); |
| 388 | } |
| 389 | style_header("Add Attachment"); |
| 390 | if( !goodCaptcha ){ |
| 391 | @ <p class="generalError">Error: Incorrect security code.</p> |
| @@ -670,5 +681,106 @@ | |
| 681 | @ </ul> |
| 682 | } |
| 683 | db_finalize(&q); |
| 684 | |
| 685 | } |
| 686 | |
| 687 | /* |
| 688 | ** COMMAND: attachment* |
| 689 | ** |
| 690 | ** Usage: %fossil attachment add ?PAGENAME? FILENAME ?OPTIONS? |
| 691 | ** |
| 692 | ** Add an attachment to an existing wiki page or tech note. |
| 693 | ** |
| 694 | ** Options: |
| 695 | ** -t|--technote DATETIME Specifies the timestamp of |
| 696 | ** the technote to which the attachment |
| 697 | ** is to be made. The attachment will be |
| 698 | ** to the most recently modified tech note |
| 699 | ** with the specified timestamp. |
| 700 | ** -t|--technote TECHNOTE-ID Specifies the technote to be |
| 701 | ** updated by its technote id. |
| 702 | ** |
| 703 | ** One of PAGENAME, DATETIME or TECHNOTE-ID must be specified. |
| 704 | */ |
| 705 | void attachment_cmd(void){ |
| 706 | int n; |
| 707 | db_find_and_open_repository(0, 0); |
| 708 | if( g.argc<3 ){ |
| 709 | goto attachment_cmd_usage; |
| 710 | } |
| 711 | n = strlen(g.argv[2]); |
| 712 | if( n==0 ){ |
| 713 | goto attachment_cmd_usage; |
| 714 | } |
| 715 | |
| 716 | if( strncmp(g.argv[2],"add",n)==0 ){ |
| 717 | const char *zPageName; /* Name of the wiki page to attach to */ |
| 718 | const char *zFile; /* Name of the file to be attached */ |
| 719 | const char *zETime; /* The name of the technote to attach to */ |
| 720 | Manifest *pWiki = 0; /* Parsed wiki page content */ |
| 721 | char *zBody = 0; /* Wiki page content */ |
| 722 | int rid; |
| 723 | const char *zTarget; /* Target of the attachment */ |
| 724 | Blob content; /* The content of the attachment */ |
| 725 | zETime = find_option("technote","t",1); |
| 726 | if( !zETime ){ |
| 727 | if( g.argc!=5 ){ |
| 728 | usage("add PAGENAME FILENAME"); |
| 729 | } |
| 730 | zPageName = g.argv[3]; |
| 731 | rid = db_int(0, "SELECT x.rid FROM tag t, tagxref x" |
| 732 | " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'" |
| 733 | " ORDER BY x.mtime DESC LIMIT 1", |
| 734 | zPageName |
| 735 | ); |
| 736 | if( (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0 ){ |
| 737 | zBody = pWiki->zWiki; |
| 738 | } |
| 739 | if( zBody==0 ){ |
| 740 | fossil_fatal("wiki page [%s] not found",zPageName); |
| 741 | } |
| 742 | zTarget = zPageName; |
| 743 | zFile = g.argv[4]; |
| 744 | }else{ |
| 745 | if( g.argc!=4 ){ |
| 746 | usage("add FILENAME --technote DATETIME|TECHNOTE-ID"); |
| 747 | } |
| 748 | rid = wiki_technote_to_rid(zETime); |
| 749 | if( rid<0 ){ |
| 750 | fossil_fatal("ambiguous tech note id: %s", zETime); |
| 751 | } |
| 752 | if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){ |
| 753 | zBody = pWiki->zWiki; |
| 754 | } |
| 755 | if( zBody==0 ){ |
| 756 | fossil_fatal("technote [%s] not found",zETime); |
| 757 | } |
| 758 | zTarget = db_text(0, |
| 759 | "SELECT substr(tagname,7) FROM tag WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')", |
| 760 | rid |
| 761 | ); |
| 762 | zFile = g.argv[3]; |
| 763 | } |
| 764 | blob_read_from_file(&content, zFile); |
| 765 | user_select(); |
| 766 | attach_commit( |
| 767 | zFile, /* The filename of the attachment */ |
| 768 | zTarget, /* The artifact uuid to attach to */ |
| 769 | blob_buffer(&content), /* The content of the attachment */ |
| 770 | blob_size(&content), /* The length of the attachment */ |
| 771 | 0, /* No need to moderate the attachment */ |
| 772 | "" /* Empty attachment comment */ |
| 773 | ); |
| 774 | if( !zETime ){ |
| 775 | fossil_print("Attached %s to wiki page %s.\n", zFile, zPageName); |
| 776 | }else{ |
| 777 | fossil_print("Attached %s to tech note %s.\n", zFile, zETime); |
| 778 | } |
| 779 | }else{ |
| 780 | goto attachment_cmd_usage; |
| 781 | } |
| 782 | return; |
| 783 | |
| 784 | attachment_cmd_usage: |
| 785 | usage("add ?PAGENAME? FILENAME [-t|--technote DATETIME ]"); |
| 786 | } |
| 787 |
+36
-26
| --- src/checkin.c | ||
| +++ src/checkin.c | ||
| @@ -549,20 +549,20 @@ | ||
| 549 | 549 | /* |
| 550 | 550 | ** COMMAND: extras |
| 551 | 551 | ** |
| 552 | 552 | ** Usage: %fossil extras ?OPTIONS? ?PATH1 ...? |
| 553 | 553 | ** |
| 554 | -** Print a list of all files in the source tree that are not part of | |
| 555 | -** the current checkout. See also the "clean" command. If paths are | |
| 556 | -** specified, only files in the given directories will be listed. | |
| 554 | +** Print a list of all files in the source tree that are not part of the | |
| 555 | +** current checkout. See also the "clean" command. If paths are specified, | |
| 556 | +** only files in the given directories will be listed. | |
| 557 | 557 | ** |
| 558 | 558 | ** Files and subdirectories whose names begin with "." are normally |
| 559 | 559 | ** ignored but can be included by adding the --dotfiles option. |
| 560 | 560 | ** |
| 561 | -** The GLOBPATTERN is a comma-separated list of GLOB expressions for | |
| 562 | -** files that are ignored. The GLOBPATTERN specified by the "ignore-glob" | |
| 563 | -** is used if the --ignore option is omitted. | |
| 561 | +** Files whose names match any of the glob patterns in the "ignore-glob" | |
| 562 | +** setting are ignored. This setting can be overridden by the --ignore | |
| 563 | +** option, whose CSG argument is a comma-separated list of glob patterns. | |
| 564 | 564 | ** |
| 565 | 565 | ** Pathnames are displayed according to the "relative-paths" setting, |
| 566 | 566 | ** unless overridden by the --abs-paths or --rel-paths options. |
| 567 | 567 | ** |
| 568 | 568 | ** Options: |
| @@ -635,29 +635,38 @@ | ||
| 635 | 635 | /* |
| 636 | 636 | ** COMMAND: clean |
| 637 | 637 | ** |
| 638 | 638 | ** Usage: %fossil clean ?OPTIONS? ?PATH ...? |
| 639 | 639 | ** |
| 640 | -** Delete all "extra" files in the source tree. "Extra" files are | |
| 641 | -** files that are not officially part of the checkout. This operation | |
| 642 | -** cannot be undone. If one or more PATH arguments appear, then only | |
| 643 | -** the files named, or files contained with directories named, will be | |
| 644 | -** removed. | |
| 645 | -** | |
| 646 | -** Prompted are issued to confirm the removal of each file, unless | |
| 647 | -** the --force flag is used or unless the file matches glob pattern | |
| 648 | -** specified by the --clean option. No file that matches glob patterns | |
| 649 | -** specified by --ignore or --keep will ever be deleted. The default | |
| 650 | -** values for --clean, --ignore, and --keep are determined by the | |
| 651 | -** (versionable) clean-glob, ignore-glob, and keep-glob settings. | |
| 652 | -** Files and subdirectories whose names begin with "." are automatically | |
| 653 | -** ignored unless the --dotfiles option is used. | |
| 654 | -** | |
| 655 | -** The --verily option ignores the keep-glob and ignore-glob settings | |
| 656 | -** and turns on --force, --dotfiles, and --emptydirs. Use the --verily | |
| 657 | -** option when you really want to clean up everything. Extreme care | |
| 658 | -** should be exercised when using the --verily option. | |
| 640 | +** Delete all "extra" files in the source tree. "Extra" files are files | |
| 641 | +** that are not officially part of the checkout. If one or more PATH | |
| 642 | +** arguments appear, then only the files named, or files contained with | |
| 643 | +** directories named, will be removed. | |
| 644 | +** | |
| 645 | +** If the --prompt option is used, prompts are issued to confirm the | |
| 646 | +** permanent removal of each file. Otherwise, files are backed up to the | |
| 647 | +** undo buffer prior to removal, and prompts are issued only for files | |
| 648 | +** whose removal cannot be undone due to their large size or due to | |
| 649 | +** --disable-undo being used. | |
| 650 | +** | |
| 651 | +** The --force option treats all prompts as having been answered yes, | |
| 652 | +** whereas --no-prompt treats them as having been answered no. | |
| 653 | +** | |
| 654 | +** Files matching any glob pattern specified by the --clean option are | |
| 655 | +** deleted without prompting, and the removal cannot be undone. | |
| 656 | +** | |
| 657 | +** No file that matches glob patterns specified by --ignore or --keep will | |
| 658 | +** ever be deleted. Files and subdirectories whose names begin with "." | |
| 659 | +** are automatically ignored unless the --dotfiles option is used. | |
| 660 | +** | |
| 661 | +** The default values for --clean, --ignore, and --keep are determined by | |
| 662 | +** the (versionable) clean-glob, ignore-glob, and keep-glob settings. | |
| 663 | +** | |
| 664 | +** The --verily option ignores the keep-glob and ignore-glob settings and | |
| 665 | +** turns on --force, --emptydirs, --dotfiles, and --disable-undo. Use the | |
| 666 | +** --verily option when you really want to clean up everything. Extreme | |
| 667 | +** care should be exercised when using the --verily option. | |
| 659 | 668 | ** |
| 660 | 669 | ** Options: |
| 661 | 670 | ** --allckouts Check for empty directories within any checkouts |
| 662 | 671 | ** that may be nested within the current one. This |
| 663 | 672 | ** option should be used with great care because the |
| @@ -678,11 +687,12 @@ | ||
| 678 | 687 | ** argument. Matching files, if any, are removed |
| 679 | 688 | ** prior to checking for any empty directories; |
| 680 | 689 | ** therefore, directories that contain only files |
| 681 | 690 | ** that were removed will be removed as well. |
| 682 | 691 | ** -f|--force Remove files without prompting. |
| 683 | -** -i|--prompt Prompt before removing each file. | |
| 692 | +** -i|--prompt Prompt before removing each file. This option | |
| 693 | +** implies the --disable-undo option. | |
| 684 | 694 | ** -x|--verily WARNING: Removes everything that is not a managed |
| 685 | 695 | ** file or the repository itself. This option |
| 686 | 696 | ** implies the --force, --emptydirs, --dotfiles, and |
| 687 | 697 | ** --disable-undo options. Furthermore, it completely |
| 688 | 698 | ** disregards the keep-glob and ignore-glob settings. |
| 689 | 699 |
| --- src/checkin.c | |
| +++ src/checkin.c | |
| @@ -549,20 +549,20 @@ | |
| 549 | /* |
| 550 | ** COMMAND: extras |
| 551 | ** |
| 552 | ** Usage: %fossil extras ?OPTIONS? ?PATH1 ...? |
| 553 | ** |
| 554 | ** Print a list of all files in the source tree that are not part of |
| 555 | ** the current checkout. See also the "clean" command. If paths are |
| 556 | ** specified, only files in the given directories will be listed. |
| 557 | ** |
| 558 | ** Files and subdirectories whose names begin with "." are normally |
| 559 | ** ignored but can be included by adding the --dotfiles option. |
| 560 | ** |
| 561 | ** The GLOBPATTERN is a comma-separated list of GLOB expressions for |
| 562 | ** files that are ignored. The GLOBPATTERN specified by the "ignore-glob" |
| 563 | ** is used if the --ignore option is omitted. |
| 564 | ** |
| 565 | ** Pathnames are displayed according to the "relative-paths" setting, |
| 566 | ** unless overridden by the --abs-paths or --rel-paths options. |
| 567 | ** |
| 568 | ** Options: |
| @@ -635,29 +635,38 @@ | |
| 635 | /* |
| 636 | ** COMMAND: clean |
| 637 | ** |
| 638 | ** Usage: %fossil clean ?OPTIONS? ?PATH ...? |
| 639 | ** |
| 640 | ** Delete all "extra" files in the source tree. "Extra" files are |
| 641 | ** files that are not officially part of the checkout. This operation |
| 642 | ** cannot be undone. If one or more PATH arguments appear, then only |
| 643 | ** the files named, or files contained with directories named, will be |
| 644 | ** removed. |
| 645 | ** |
| 646 | ** Prompted are issued to confirm the removal of each file, unless |
| 647 | ** the --force flag is used or unless the file matches glob pattern |
| 648 | ** specified by the --clean option. No file that matches glob patterns |
| 649 | ** specified by --ignore or --keep will ever be deleted. The default |
| 650 | ** values for --clean, --ignore, and --keep are determined by the |
| 651 | ** (versionable) clean-glob, ignore-glob, and keep-glob settings. |
| 652 | ** Files and subdirectories whose names begin with "." are automatically |
| 653 | ** ignored unless the --dotfiles option is used. |
| 654 | ** |
| 655 | ** The --verily option ignores the keep-glob and ignore-glob settings |
| 656 | ** and turns on --force, --dotfiles, and --emptydirs. Use the --verily |
| 657 | ** option when you really want to clean up everything. Extreme care |
| 658 | ** should be exercised when using the --verily option. |
| 659 | ** |
| 660 | ** Options: |
| 661 | ** --allckouts Check for empty directories within any checkouts |
| 662 | ** that may be nested within the current one. This |
| 663 | ** option should be used with great care because the |
| @@ -678,11 +687,12 @@ | |
| 678 | ** argument. Matching files, if any, are removed |
| 679 | ** prior to checking for any empty directories; |
| 680 | ** therefore, directories that contain only files |
| 681 | ** that were removed will be removed as well. |
| 682 | ** -f|--force Remove files without prompting. |
| 683 | ** -i|--prompt Prompt before removing each file. |
| 684 | ** -x|--verily WARNING: Removes everything that is not a managed |
| 685 | ** file or the repository itself. This option |
| 686 | ** implies the --force, --emptydirs, --dotfiles, and |
| 687 | ** --disable-undo options. Furthermore, it completely |
| 688 | ** disregards the keep-glob and ignore-glob settings. |
| 689 |
| --- src/checkin.c | |
| +++ src/checkin.c | |
| @@ -549,20 +549,20 @@ | |
| 549 | /* |
| 550 | ** COMMAND: extras |
| 551 | ** |
| 552 | ** Usage: %fossil extras ?OPTIONS? ?PATH1 ...? |
| 553 | ** |
| 554 | ** Print a list of all files in the source tree that are not part of the |
| 555 | ** current checkout. See also the "clean" command. If paths are specified, |
| 556 | ** only files in the given directories will be listed. |
| 557 | ** |
| 558 | ** Files and subdirectories whose names begin with "." are normally |
| 559 | ** ignored but can be included by adding the --dotfiles option. |
| 560 | ** |
| 561 | ** Files whose names match any of the glob patterns in the "ignore-glob" |
| 562 | ** setting are ignored. This setting can be overridden by the --ignore |
| 563 | ** option, whose CSG argument is a comma-separated list of glob patterns. |
| 564 | ** |
| 565 | ** Pathnames are displayed according to the "relative-paths" setting, |
| 566 | ** unless overridden by the --abs-paths or --rel-paths options. |
| 567 | ** |
| 568 | ** Options: |
| @@ -635,29 +635,38 @@ | |
| 635 | /* |
| 636 | ** COMMAND: clean |
| 637 | ** |
| 638 | ** Usage: %fossil clean ?OPTIONS? ?PATH ...? |
| 639 | ** |
| 640 | ** Delete all "extra" files in the source tree. "Extra" files are files |
| 641 | ** that are not officially part of the checkout. If one or more PATH |
| 642 | ** arguments appear, then only the files named, or files contained with |
| 643 | ** directories named, will be removed. |
| 644 | ** |
| 645 | ** If the --prompt option is used, prompts are issued to confirm the |
| 646 | ** permanent removal of each file. Otherwise, files are backed up to the |
| 647 | ** undo buffer prior to removal, and prompts are issued only for files |
| 648 | ** whose removal cannot be undone due to their large size or due to |
| 649 | ** --disable-undo being used. |
| 650 | ** |
| 651 | ** The --force option treats all prompts as having been answered yes, |
| 652 | ** whereas --no-prompt treats them as having been answered no. |
| 653 | ** |
| 654 | ** Files matching any glob pattern specified by the --clean option are |
| 655 | ** deleted without prompting, and the removal cannot be undone. |
| 656 | ** |
| 657 | ** No file that matches glob patterns specified by --ignore or --keep will |
| 658 | ** ever be deleted. Files and subdirectories whose names begin with "." |
| 659 | ** are automatically ignored unless the --dotfiles option is used. |
| 660 | ** |
| 661 | ** The default values for --clean, --ignore, and --keep are determined by |
| 662 | ** the (versionable) clean-glob, ignore-glob, and keep-glob settings. |
| 663 | ** |
| 664 | ** The --verily option ignores the keep-glob and ignore-glob settings and |
| 665 | ** turns on --force, --emptydirs, --dotfiles, and --disable-undo. Use the |
| 666 | ** --verily option when you really want to clean up everything. Extreme |
| 667 | ** care should be exercised when using the --verily option. |
| 668 | ** |
| 669 | ** Options: |
| 670 | ** --allckouts Check for empty directories within any checkouts |
| 671 | ** that may be nested within the current one. This |
| 672 | ** option should be used with great care because the |
| @@ -678,11 +687,12 @@ | |
| 687 | ** argument. Matching files, if any, are removed |
| 688 | ** prior to checking for any empty directories; |
| 689 | ** therefore, directories that contain only files |
| 690 | ** that were removed will be removed as well. |
| 691 | ** -f|--force Remove files without prompting. |
| 692 | ** -i|--prompt Prompt before removing each file. This option |
| 693 | ** implies the --disable-undo option. |
| 694 | ** -x|--verily WARNING: Removes everything that is not a managed |
| 695 | ** file or the repository itself. This option |
| 696 | ** implies the --force, --emptydirs, --dotfiles, and |
| 697 | ** --disable-undo options. Furthermore, it completely |
| 698 | ** disregards the keep-glob and ignore-glob settings. |
| 699 |
M
src/db.c
+2
-2
| --- src/db.c | ||
| +++ src/db.c | ||
| @@ -2677,12 +2677,12 @@ | ||
| 2677 | 2677 | ** differ only in case are the same file. Defaults to |
| 2678 | 2678 | ** TRUE for unix and FALSE for Cygwin, Mac and Windows. |
| 2679 | 2679 | ** |
| 2680 | 2680 | ** clean-glob The VALUE is a comma or newline-separated list of GLOB |
| 2681 | 2681 | ** (versionable) patterns specifying files that the "clean" command will |
| 2682 | -** delete without prompting even when the -force flag has | |
| 2683 | -** not been used. Example: *.a *.lib *.o | |
| 2682 | +** delete without prompting or allowing undo. | |
| 2683 | +** Example: *.a,*.lib,*.o | |
| 2684 | 2684 | ** |
| 2685 | 2685 | ** clearsign When enabled, fossil will attempt to sign all commits |
| 2686 | 2686 | ** with gpg. When disabled (the default), commits will |
| 2687 | 2687 | ** be unsigned. Default: off |
| 2688 | 2688 | ** |
| 2689 | 2689 |
| --- src/db.c | |
| +++ src/db.c | |
| @@ -2677,12 +2677,12 @@ | |
| 2677 | ** differ only in case are the same file. Defaults to |
| 2678 | ** TRUE for unix and FALSE for Cygwin, Mac and Windows. |
| 2679 | ** |
| 2680 | ** clean-glob The VALUE is a comma or newline-separated list of GLOB |
| 2681 | ** (versionable) patterns specifying files that the "clean" command will |
| 2682 | ** delete without prompting even when the -force flag has |
| 2683 | ** not been used. Example: *.a *.lib *.o |
| 2684 | ** |
| 2685 | ** clearsign When enabled, fossil will attempt to sign all commits |
| 2686 | ** with gpg. When disabled (the default), commits will |
| 2687 | ** be unsigned. Default: off |
| 2688 | ** |
| 2689 |
| --- src/db.c | |
| +++ src/db.c | |
| @@ -2677,12 +2677,12 @@ | |
| 2677 | ** differ only in case are the same file. Defaults to |
| 2678 | ** TRUE for unix and FALSE for Cygwin, Mac and Windows. |
| 2679 | ** |
| 2680 | ** clean-glob The VALUE is a comma or newline-separated list of GLOB |
| 2681 | ** (versionable) patterns specifying files that the "clean" command will |
| 2682 | ** delete without prompting or allowing undo. |
| 2683 | ** Example: *.a,*.lib,*.o |
| 2684 | ** |
| 2685 | ** clearsign When enabled, fossil will attempt to sign all commits |
| 2686 | ** with gpg. When disabled (the default), commits will |
| 2687 | ** be unsigned. Default: off |
| 2688 | ** |
| 2689 |
+4
-22
| --- src/event.c | ||
| +++ src/event.c | ||
| @@ -540,44 +540,26 @@ | ||
| 540 | 540 | style_footer(); |
| 541 | 541 | } |
| 542 | 542 | |
| 543 | 543 | /* |
| 544 | 544 | ** Add a new tech note to the repository. The timestamp is |
| 545 | -** given by the zETime parameter. isNew must be true to create | |
| 545 | +** given by the zETime parameter. rid must be zero to create | |
| 546 | 546 | ** a new page. If no previous page with the name zPageName exists |
| 547 | 547 | ** and isNew is false, then this routine throws an error. |
| 548 | 548 | */ |
| 549 | 549 | void event_cmd_commit( |
| 550 | 550 | char *zETime, /* timestamp */ |
| 551 | - int isNew, /* true to create a new page */ | |
| 551 | + int rid, /* Artifact id of the tech note */ | |
| 552 | 552 | Blob *pContent, /* content of the new page */ |
| 553 | 553 | const char *zMimeType, /* mimetype of the content */ |
| 554 | 554 | const char *zComment, /* comment to go on the timeline */ |
| 555 | 555 | const char *zTags, /* tags */ |
| 556 | 556 | const char *zClr /* background color */ |
| 557 | 557 | ){ |
| 558 | - int rid; /* Artifact id of the tech note */ | |
| 559 | 558 | const char *zId; /* id of the tech note */ |
| 560 | - rid = db_int(0, "SELECT objid FROM event" | |
| 561 | - " WHERE datetime(mtime)=datetime('%q') AND type = 'e'" | |
| 562 | - " LIMIT 1", | |
| 563 | - zETime | |
| 564 | - ); | |
| 565 | - if( rid==0 && !isNew ){ | |
| 566 | -#ifdef FOSSIL_ENABLE_JSON | |
| 567 | - g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND; | |
| 568 | -#endif | |
| 569 | - fossil_fatal("no such tech note: %s", zETime); | |
| 570 | - } | |
| 571 | - if( rid!=0 && isNew ){ | |
| 572 | -#ifdef FOSSIL_ENABLE_JSON | |
| 573 | - g.json.resultCode = FSL_JSON_E_RESOURCE_ALREADY_EXISTS; | |
| 574 | -#endif | |
| 575 | - fossil_fatal("tech note %s already exists", zETime); | |
| 576 | - } | |
| 577 | - | |
| 578 | - if ( isNew ){ | |
| 559 | + | |
| 560 | + if ( rid==0 ){ | |
| 579 | 561 | zId = db_text(0, "SELECT lower(hex(randomblob(20)))"); |
| 580 | 562 | }else{ |
| 581 | 563 | zId = db_text(0, |
| 582 | 564 | "SELECT substr(tagname,7) FROM tag" |
| 583 | 565 | " WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')", |
| 584 | 566 |
| --- src/event.c | |
| +++ src/event.c | |
| @@ -540,44 +540,26 @@ | |
| 540 | style_footer(); |
| 541 | } |
| 542 | |
| 543 | /* |
| 544 | ** Add a new tech note to the repository. The timestamp is |
| 545 | ** given by the zETime parameter. isNew must be true to create |
| 546 | ** a new page. If no previous page with the name zPageName exists |
| 547 | ** and isNew is false, then this routine throws an error. |
| 548 | */ |
| 549 | void event_cmd_commit( |
| 550 | char *zETime, /* timestamp */ |
| 551 | int isNew, /* true to create a new page */ |
| 552 | Blob *pContent, /* content of the new page */ |
| 553 | const char *zMimeType, /* mimetype of the content */ |
| 554 | const char *zComment, /* comment to go on the timeline */ |
| 555 | const char *zTags, /* tags */ |
| 556 | const char *zClr /* background color */ |
| 557 | ){ |
| 558 | int rid; /* Artifact id of the tech note */ |
| 559 | const char *zId; /* id of the tech note */ |
| 560 | rid = db_int(0, "SELECT objid FROM event" |
| 561 | " WHERE datetime(mtime)=datetime('%q') AND type = 'e'" |
| 562 | " LIMIT 1", |
| 563 | zETime |
| 564 | ); |
| 565 | if( rid==0 && !isNew ){ |
| 566 | #ifdef FOSSIL_ENABLE_JSON |
| 567 | g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND; |
| 568 | #endif |
| 569 | fossil_fatal("no such tech note: %s", zETime); |
| 570 | } |
| 571 | if( rid!=0 && isNew ){ |
| 572 | #ifdef FOSSIL_ENABLE_JSON |
| 573 | g.json.resultCode = FSL_JSON_E_RESOURCE_ALREADY_EXISTS; |
| 574 | #endif |
| 575 | fossil_fatal("tech note %s already exists", zETime); |
| 576 | } |
| 577 | |
| 578 | if ( isNew ){ |
| 579 | zId = db_text(0, "SELECT lower(hex(randomblob(20)))"); |
| 580 | }else{ |
| 581 | zId = db_text(0, |
| 582 | "SELECT substr(tagname,7) FROM tag" |
| 583 | " WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')", |
| 584 |
| --- src/event.c | |
| +++ src/event.c | |
| @@ -540,44 +540,26 @@ | |
| 540 | style_footer(); |
| 541 | } |
| 542 | |
| 543 | /* |
| 544 | ** Add a new tech note to the repository. The timestamp is |
| 545 | ** given by the zETime parameter. rid must be zero to create |
| 546 | ** a new page. If no previous page with the name zPageName exists |
| 547 | ** and isNew is false, then this routine throws an error. |
| 548 | */ |
| 549 | void event_cmd_commit( |
| 550 | char *zETime, /* timestamp */ |
| 551 | int rid, /* Artifact id of the tech note */ |
| 552 | Blob *pContent, /* content of the new page */ |
| 553 | const char *zMimeType, /* mimetype of the content */ |
| 554 | const char *zComment, /* comment to go on the timeline */ |
| 555 | const char *zTags, /* tags */ |
| 556 | const char *zClr /* background color */ |
| 557 | ){ |
| 558 | const char *zId; /* id of the tech note */ |
| 559 | |
| 560 | if ( rid==0 ){ |
| 561 | zId = db_text(0, "SELECT lower(hex(randomblob(20)))"); |
| 562 | }else{ |
| 563 | zId = db_text(0, |
| 564 | "SELECT substr(tagname,7) FROM tag" |
| 565 | " WHERE tagid=(SELECT tagid FROM event WHERE objid='%d')", |
| 566 |
+235
-26
| --- src/export.c | ||
| +++ src/export.c | ||
| @@ -19,10 +19,28 @@ | ||
| 19 | 19 | */ |
| 20 | 20 | #include "config.h" |
| 21 | 21 | #include "export.h" |
| 22 | 22 | #include <assert.h> |
| 23 | 23 | |
| 24 | +#if INTERFACE | |
| 25 | +/* | |
| 26 | +** struct mark_t | |
| 27 | +** holds information for translating between git commits | |
| 28 | +** and fossil commits. | |
| 29 | +** -git_name: This is the mark name that identifies the commit to git. | |
| 30 | +** It will always begin with a ':'. | |
| 31 | +** -rid: The unique object ID that identifies this commit within the | |
| 32 | +** repository database. | |
| 33 | +** -uuid: The SHA-1 of artifact corresponding to rid. | |
| 34 | +*/ | |
| 35 | +struct mark_t{ | |
| 36 | + char *name; | |
| 37 | + int rid; | |
| 38 | + char uuid[41]; | |
| 39 | +}; | |
| 40 | +#endif | |
| 41 | + | |
| 24 | 42 | /* |
| 25 | 43 | ** Output a "committer" record for the given user. |
| 26 | 44 | */ |
| 27 | 45 | static void print_person(const char *zUser){ |
| 28 | 46 | static Stmt q; |
| @@ -96,10 +114,197 @@ | ||
| 96 | 114 | db_reset(&q); |
| 97 | 115 | } |
| 98 | 116 | |
| 99 | 117 | #define BLOBMARK(rid) ((rid) * 2) |
| 100 | 118 | #define COMMITMARK(rid) ((rid) * 2 + 1) |
| 119 | + | |
| 120 | +/* | |
| 121 | +** insert_commit_xref() | |
| 122 | +** Insert a new (mark,rid,uuid) entry into the 'xmark' table. | |
| 123 | +** zName and zUuid must be non-null and must point to NULL-terminated strings. | |
| 124 | +*/ | |
| 125 | +void insert_commit_xref(int rid, const char *zName, const char *zUuid){ | |
| 126 | + db_multi_exec( | |
| 127 | + "INSERT OR IGNORE INTO xmark(tname, trid, tuuid)" | |
| 128 | + "VALUES(%Q,%d,%Q)", | |
| 129 | + zName, rid, zUuid | |
| 130 | + ); | |
| 131 | +} | |
| 132 | + | |
| 133 | +/* | |
| 134 | +** create_mark() | |
| 135 | +** Create a new (mark,rid,uuid) entry for the given rid in the 'xmark' table, | |
| 136 | +** and return that information as a struct mark_t in *mark. | |
| 137 | +** This function returns -1 in the case where 'rid' does not exist, otherwise | |
| 138 | +** it returns 0. | |
| 139 | +** mark->name is dynamically allocated and is owned by the caller upon return. | |
| 140 | +*/ | |
| 141 | +int create_mark(int rid, struct mark_t *mark){ | |
| 142 | + char sid[13]; | |
| 143 | + char *zUuid = rid_to_uuid(rid); | |
| 144 | + if(!zUuid){ | |
| 145 | + fossil_trace("Undefined rid=%d\n", rid); | |
| 146 | + return -1; | |
| 147 | + } | |
| 148 | + mark->rid = rid; | |
| 149 | + sprintf(sid, ":%d", COMMITMARK(rid)); | |
| 150 | + mark->name = fossil_strdup(sid); | |
| 151 | + strcpy(mark->uuid, zUuid); | |
| 152 | + free(zUuid); | |
| 153 | + insert_commit_xref(mark->rid, mark->name, mark->uuid); | |
| 154 | + return 0; | |
| 155 | +} | |
| 156 | + | |
| 157 | +/* | |
| 158 | +** mark_name_from_rid() | |
| 159 | +** Find the mark associated with the given rid. Mark names always start | |
| 160 | +** with ':', and are pulled from the 'xmark' temporary table. | |
| 161 | +** This function returns NULL if the rid does not exist in the 'xmark' table. | |
| 162 | +** Otherwise, it returns the name of the mark, which is dynamically allocated | |
| 163 | +** and is owned by the caller of this function. | |
| 164 | +*/ | |
| 165 | +char * mark_name_from_rid(int rid){ | |
| 166 | + char *zMark = db_text(0, "SELECT tname FROM xmark WHERE trid=%d", rid); | |
| 167 | + if(zMark==NULL){ | |
| 168 | + struct mark_t mark; | |
| 169 | + if(create_mark(rid, &mark)==0){ | |
| 170 | + zMark = mark.name; | |
| 171 | + }else{ | |
| 172 | + return NULL; | |
| 173 | + } | |
| 174 | + } | |
| 175 | + return zMark; | |
| 176 | +} | |
| 177 | + | |
| 178 | +/* | |
| 179 | +** parse_mark() | |
| 180 | +** Create a new (mark,rid,uuid) entry in the 'xmark' table given a line | |
| 181 | +** from a marks file. Return the cross-ref information as a struct mark_t | |
| 182 | +** in *mark. | |
| 183 | +** This function returns -1 in the case that the line is blank, malformed, or | |
| 184 | +** the rid/uuid named in 'line' does not match what is in the repository | |
| 185 | +** database. Otherwise, 0 is returned. | |
| 186 | +** mark->name is dynamically allocated, and owned by the caller. | |
| 187 | +*/ | |
| 188 | +int parse_mark(char *line, struct mark_t *mark){ | |
| 189 | + char *cur_tok; | |
| 190 | + cur_tok = strtok(line, " \t"); | |
| 191 | + if(!cur_tok||strlen(cur_tok)<2){ | |
| 192 | + return -1; | |
| 193 | + } | |
| 194 | + mark->rid = atoi(&cur_tok[1]); | |
| 195 | + if(cur_tok[0]!='c'){ | |
| 196 | + /* This is probably a blob mark */ | |
| 197 | + mark->name = NULL; | |
| 198 | + return 0; | |
| 199 | + } | |
| 200 | + | |
| 201 | + cur_tok = strtok(NULL, " \t"); | |
| 202 | + if(!cur_tok){ | |
| 203 | + /* This mark was generated by an older version of Fossil and doesn't | |
| 204 | + ** include the mark name and uuid. create_mark() will name the new mark | |
| 205 | + ** exactly as it was when exported to git, so that we should have a | |
| 206 | + ** valid mapping from git sha1<->mark name<->fossil sha1. */ | |
| 207 | + return create_mark(mark->rid, mark); | |
| 208 | + }else{ | |
| 209 | + mark->name = fossil_strdup(cur_tok); | |
| 210 | + } | |
| 211 | + | |
| 212 | + cur_tok = strtok(NULL, "\n"); | |
| 213 | + if(!cur_tok||strlen(cur_tok)!=40){ | |
| 214 | + free(mark->name); | |
| 215 | + fossil_trace("Invalid SHA-1 in marks file: %s\n", cur_tok); | |
| 216 | + return -1; | |
| 217 | + }else{ | |
| 218 | + strcpy(mark->uuid, cur_tok); | |
| 219 | + } | |
| 220 | + | |
| 221 | + /* make sure that rid corresponds to UUID */ | |
| 222 | + if(fast_uuid_to_rid(mark->uuid)!=mark->rid){ | |
| 223 | + free(mark->name); | |
| 224 | + fossil_trace("Non-existent SHA-1 in marks file: %s\n", mark->uuid); | |
| 225 | + return -1; | |
| 226 | + } | |
| 227 | + | |
| 228 | + /* insert a cross-ref into the 'xmark' table */ | |
| 229 | + insert_commit_xref(mark->rid, mark->name, mark->uuid); | |
| 230 | + return 0; | |
| 231 | +} | |
| 232 | + | |
| 233 | +/* | |
| 234 | +** import_marks() | |
| 235 | +** Import the marks specified in file 'f' into the 'xmark' table. | |
| 236 | +** If 'blobs' is non-null, insert all blob marks into it. | |
| 237 | +** If 'vers' is non-null, insert all commit marks into it. | |
| 238 | +** Each line in the file must be at most 100 characters in length. This | |
| 239 | +** seems like a reasonable maximum for a 40-character uuid, and 1-13 | |
| 240 | +** character rid. | |
| 241 | +** The function returns -1 if any of the lines in file 'f' are malformed, | |
| 242 | +** or the rid/uuid information doesn't match what is in the repository | |
| 243 | +** database. Otherwise, 0 is returned. | |
| 244 | +*/ | |
| 245 | +int import_marks(FILE* f, Bag *blobs, Bag *vers){ | |
| 246 | + char line[101]; | |
| 247 | + while(fgets(line, sizeof(line), f)){ | |
| 248 | + struct mark_t mark; | |
| 249 | + if(strlen(line)==100&&line[99]!='\n'){ | |
| 250 | + /* line too long */ | |
| 251 | + return -1; | |
| 252 | + } | |
| 253 | + if( parse_mark(line, &mark)<0 ){ | |
| 254 | + return -1; | |
| 255 | + }else if( line[0]=='b' ){ | |
| 256 | + /* Don't import blob marks into 'xmark' table--git doesn't use them, | |
| 257 | + ** so they need to be left free for git to reuse. */ | |
| 258 | + if(blobs!=NULL){ | |
| 259 | + bag_insert(blobs, mark.rid); | |
| 260 | + } | |
| 261 | + }else if( vers!=NULL ){ | |
| 262 | + bag_insert(vers, mark.rid); | |
| 263 | + } | |
| 264 | + free(mark.name); | |
| 265 | + } | |
| 266 | + return 0; | |
| 267 | +} | |
| 268 | + | |
| 269 | +/* | |
| 270 | +** If 'blobs' is non-null, it must point to a Bag of blob rids to be | |
| 271 | +** written to disk. Blob rids are written as 'b<rid>'. | |
| 272 | +** If 'vers' is non-null, it must point to a Bag of commit rids to be | |
| 273 | +** written to disk. Commit rids are written as 'c<rid> :<mark> <uuid>'. | |
| 274 | +** All commit (mark,rid,uuid) tuples are stored in 'xmark' table. | |
| 275 | +** This function does not fail, but may produce errors if a uuid cannot | |
| 276 | +** be found for an rid in 'vers'. | |
| 277 | +*/ | |
| 278 | +void export_marks(FILE* f, Bag *blobs, Bag *vers){ | |
| 279 | + int rid; | |
| 280 | + if( blobs!=NULL ){ | |
| 281 | + rid = bag_first(blobs); | |
| 282 | + if(rid!=0){ | |
| 283 | + do{ | |
| 284 | + fprintf(f, "b%d\n", rid); | |
| 285 | + }while((rid = bag_next(blobs, rid))!=0); | |
| 286 | + } | |
| 287 | + } | |
| 288 | + if( vers!=NULL ){ | |
| 289 | + rid = bag_first(vers); | |
| 290 | + if( rid!=0 ){ | |
| 291 | + do{ | |
| 292 | + char *zUuid = rid_to_uuid(rid); | |
| 293 | + char *zMark; | |
| 294 | + if(zUuid==NULL){ | |
| 295 | + fossil_trace("No uuid matching rid=%d when exporting marks\n", rid); | |
| 296 | + continue; | |
| 297 | + } | |
| 298 | + zMark = mark_name_from_rid(rid); | |
| 299 | + fprintf(f, "c%d %s %s\n", rid, zMark, zUuid); | |
| 300 | + free(zMark); | |
| 301 | + free(zUuid); | |
| 302 | + }while( (rid = bag_next(vers, rid))!=0 ); | |
| 303 | + } | |
| 304 | + } | |
| 305 | +} | |
| 101 | 306 | |
| 102 | 307 | /* |
| 103 | 308 | ** COMMAND: export |
| 104 | 309 | ** |
| 105 | 310 | ** Usage: %fossil export --git ?OPTIONS? ?REPOSITORY? |
| @@ -147,35 +352,40 @@ | ||
| 147 | 352 | verify_all_options(); |
| 148 | 353 | if( g.argc!=2 && g.argc!=3 ){ usage("--git ?REPOSITORY?"); } |
| 149 | 354 | |
| 150 | 355 | db_multi_exec("CREATE TEMPORARY TABLE oldblob(rid INTEGER PRIMARY KEY)"); |
| 151 | 356 | db_multi_exec("CREATE TEMPORARY TABLE oldcommit(rid INTEGER PRIMARY KEY)"); |
| 357 | + db_multi_exec("CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT)"); | |
| 152 | 358 | if( markfile_in!=0 ){ |
| 153 | 359 | Stmt qb,qc; |
| 154 | - char line[100]; | |
| 155 | 360 | FILE *f; |
| 361 | + int rid; | |
| 156 | 362 | |
| 157 | 363 | f = fossil_fopen(markfile_in, "r"); |
| 158 | 364 | if( f==0 ){ |
| 159 | 365 | fossil_fatal("cannot open %s for reading", markfile_in); |
| 160 | 366 | } |
| 367 | + if(import_marks(f, &blobs, &vers)<0){ | |
| 368 | + fossil_fatal("error importing marks from file: %s\n", markfile_in); | |
| 369 | + } | |
| 161 | 370 | db_prepare(&qb, "INSERT OR IGNORE INTO oldblob VALUES (:rid)"); |
| 162 | 371 | db_prepare(&qc, "INSERT OR IGNORE INTO oldcommit VALUES (:rid)"); |
| 163 | - while( fgets(line, sizeof(line), f)!=0 ){ | |
| 164 | - if( *line == 'b' ){ | |
| 165 | - db_bind_text(&qb, ":rid", line + 1); | |
| 372 | + rid = bag_first(&blobs); | |
| 373 | + if(rid!=0){ | |
| 374 | + do{ | |
| 375 | + db_bind_int(&qb, ":rid", rid); | |
| 166 | 376 | db_step(&qb); |
| 167 | 377 | db_reset(&qb); |
| 168 | - bag_insert(&blobs, atoi(line + 1)); | |
| 169 | - }else if( *line == 'c' ){ | |
| 170 | - db_bind_text(&qc, ":rid", line + 1); | |
| 378 | + }while((rid = bag_next(&blobs, rid))!=0); | |
| 379 | + } | |
| 380 | + rid = bag_first(&vers); | |
| 381 | + if(rid!=0){ | |
| 382 | + do{ | |
| 383 | + db_bind_int(&qc, ":rid", rid); | |
| 171 | 384 | db_step(&qc); |
| 172 | 385 | db_reset(&qc); |
| 173 | - bag_insert(&vers, atoi(line + 1)); | |
| 174 | - }else{ | |
| 175 | - fossil_fatal("bad input from %s: %s", markfile_in, line); | |
| 176 | - } | |
| 386 | + }while((rid = bag_next(&vers, rid))!=0); | |
| 177 | 387 | } |
| 178 | 388 | db_finalize(&qb); |
| 179 | 389 | db_finalize(&qc); |
| 180 | 390 | fclose(f); |
| 181 | 391 | } |
| @@ -249,10 +459,11 @@ | ||
| 249 | 459 | int ckinId = db_column_int(&q, 1); |
| 250 | 460 | const char *zComment = db_column_text(&q, 2); |
| 251 | 461 | const char *zUser = db_column_text(&q, 3); |
| 252 | 462 | const char *zBranch = db_column_text(&q, 4); |
| 253 | 463 | char *zBr; |
| 464 | + char *zMark; | |
| 254 | 465 | |
| 255 | 466 | bag_insert(&vers, ckinId); |
| 256 | 467 | db_bind_int(&q2, ":rid", ckinId); |
| 257 | 468 | db_step(&q2); |
| 258 | 469 | db_reset(&q2); |
| @@ -259,11 +470,13 @@ | ||
| 259 | 470 | if( zBranch==0 ) zBranch = "trunk"; |
| 260 | 471 | zBr = mprintf("%s", zBranch); |
| 261 | 472 | for(i=0; zBr[i]; i++){ |
| 262 | 473 | if( !fossil_isalnum(zBr[i]) ) zBr[i] = '_'; |
| 263 | 474 | } |
| 264 | - printf("commit refs/heads/%s\nmark :%d\n", zBr, COMMITMARK(ckinId)); | |
| 475 | + zMark = mark_name_from_rid(ckinId); | |
| 476 | + printf("commit refs/heads/%s\nmark %s\n", zBr, zMark); | |
| 477 | + free(zMark); | |
| 265 | 478 | free(zBr); |
| 266 | 479 | printf("committer"); |
| 267 | 480 | print_person(zUser); |
| 268 | 481 | printf(" %s +0000\n", zSecondsSince1970); |
| 269 | 482 | if( zComment==0 ) zComment = "null comment"; |
| @@ -273,19 +486,24 @@ | ||
| 273 | 486 | " WHERE cid=%d AND isprim" |
| 274 | 487 | " AND pid IN (SELECT objid FROM event)", |
| 275 | 488 | ckinId |
| 276 | 489 | ); |
| 277 | 490 | if( db_step(&q3) == SQLITE_ROW ){ |
| 278 | - printf("from :%d\n", COMMITMARK(db_column_int(&q3, 0))); | |
| 491 | + int pid = db_column_int(&q3, 0); | |
| 492 | + zMark = mark_name_from_rid(pid); | |
| 493 | + printf("from %s\n", zMark); | |
| 494 | + free(zMark); | |
| 279 | 495 | db_prepare(&q4, |
| 280 | 496 | "SELECT pid FROM plink" |
| 281 | 497 | " WHERE cid=%d AND NOT isprim" |
| 282 | 498 | " AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=pid)" |
| 283 | 499 | " ORDER BY pid", |
| 284 | 500 | ckinId); |
| 285 | 501 | while( db_step(&q4)==SQLITE_ROW ){ |
| 286 | - printf("merge :%d\n", COMMITMARK(db_column_int(&q4,0))); | |
| 502 | + zMark = mark_name_from_rid(db_column_int(&q4, 0)); | |
| 503 | + printf("merge %s\n", zMark); | |
| 504 | + free(zMark); | |
| 287 | 505 | } |
| 288 | 506 | db_finalize(&q4); |
| 289 | 507 | }else{ |
| 290 | 508 | printf("deleteall\n"); |
| 291 | 509 | } |
| @@ -316,11 +534,10 @@ | ||
| 316 | 534 | db_finalize(&q3); |
| 317 | 535 | printf("\n"); |
| 318 | 536 | } |
| 319 | 537 | db_finalize(&q2); |
| 320 | 538 | db_finalize(&q); |
| 321 | - bag_clear(&blobs); | |
| 322 | 539 | manifest_cache_clear(); |
| 323 | 540 | |
| 324 | 541 | |
| 325 | 542 | /* Output tags */ |
| 326 | 543 | db_prepare(&q, |
| @@ -345,28 +562,20 @@ | ||
| 345 | 562 | printf("tagger <tagger> %s +0000\n", zSecSince1970); |
| 346 | 563 | printf("data 0\n"); |
| 347 | 564 | fossil_free(zEncoded); |
| 348 | 565 | } |
| 349 | 566 | db_finalize(&q); |
| 350 | - bag_clear(&vers); | |
| 351 | 567 | |
| 352 | 568 | if( markfile_out!=0 ){ |
| 353 | 569 | FILE *f; |
| 354 | 570 | f = fossil_fopen(markfile_out, "w"); |
| 355 | 571 | if( f == 0 ){ |
| 356 | 572 | fossil_fatal("cannot open %s for writing", markfile_out); |
| 357 | 573 | } |
| 358 | - db_prepare(&q, "SELECT rid FROM oldblob"); | |
| 359 | - while( db_step(&q)==SQLITE_ROW ){ | |
| 360 | - fprintf(f, "b%d\n", db_column_int(&q, 0)); | |
| 361 | - } | |
| 362 | - db_finalize(&q); | |
| 363 | - db_prepare(&q, "SELECT rid FROM oldcommit"); | |
| 364 | - while( db_step(&q)==SQLITE_ROW ){ | |
| 365 | - fprintf(f, "c%d\n", db_column_int(&q, 0)); | |
| 366 | - } | |
| 367 | - db_finalize(&q); | |
| 574 | + export_marks(f, &blobs, &vers); | |
| 368 | 575 | if( ferror(f)!=0 || fclose(f)!=0 ) { |
| 369 | 576 | fossil_fatal("error while writing %s", markfile_out); |
| 370 | 577 | } |
| 371 | 578 | } |
| 579 | + bag_clear(&blobs); | |
| 580 | + bag_clear(&vers); | |
| 372 | 581 | } |
| 373 | 582 |
| --- src/export.c | |
| +++ src/export.c | |
| @@ -19,10 +19,28 @@ | |
| 19 | */ |
| 20 | #include "config.h" |
| 21 | #include "export.h" |
| 22 | #include <assert.h> |
| 23 | |
| 24 | /* |
| 25 | ** Output a "committer" record for the given user. |
| 26 | */ |
| 27 | static void print_person(const char *zUser){ |
| 28 | static Stmt q; |
| @@ -96,10 +114,197 @@ | |
| 96 | db_reset(&q); |
| 97 | } |
| 98 | |
| 99 | #define BLOBMARK(rid) ((rid) * 2) |
| 100 | #define COMMITMARK(rid) ((rid) * 2 + 1) |
| 101 | |
| 102 | /* |
| 103 | ** COMMAND: export |
| 104 | ** |
| 105 | ** Usage: %fossil export --git ?OPTIONS? ?REPOSITORY? |
| @@ -147,35 +352,40 @@ | |
| 147 | verify_all_options(); |
| 148 | if( g.argc!=2 && g.argc!=3 ){ usage("--git ?REPOSITORY?"); } |
| 149 | |
| 150 | db_multi_exec("CREATE TEMPORARY TABLE oldblob(rid INTEGER PRIMARY KEY)"); |
| 151 | db_multi_exec("CREATE TEMPORARY TABLE oldcommit(rid INTEGER PRIMARY KEY)"); |
| 152 | if( markfile_in!=0 ){ |
| 153 | Stmt qb,qc; |
| 154 | char line[100]; |
| 155 | FILE *f; |
| 156 | |
| 157 | f = fossil_fopen(markfile_in, "r"); |
| 158 | if( f==0 ){ |
| 159 | fossil_fatal("cannot open %s for reading", markfile_in); |
| 160 | } |
| 161 | db_prepare(&qb, "INSERT OR IGNORE INTO oldblob VALUES (:rid)"); |
| 162 | db_prepare(&qc, "INSERT OR IGNORE INTO oldcommit VALUES (:rid)"); |
| 163 | while( fgets(line, sizeof(line), f)!=0 ){ |
| 164 | if( *line == 'b' ){ |
| 165 | db_bind_text(&qb, ":rid", line + 1); |
| 166 | db_step(&qb); |
| 167 | db_reset(&qb); |
| 168 | bag_insert(&blobs, atoi(line + 1)); |
| 169 | }else if( *line == 'c' ){ |
| 170 | db_bind_text(&qc, ":rid", line + 1); |
| 171 | db_step(&qc); |
| 172 | db_reset(&qc); |
| 173 | bag_insert(&vers, atoi(line + 1)); |
| 174 | }else{ |
| 175 | fossil_fatal("bad input from %s: %s", markfile_in, line); |
| 176 | } |
| 177 | } |
| 178 | db_finalize(&qb); |
| 179 | db_finalize(&qc); |
| 180 | fclose(f); |
| 181 | } |
| @@ -249,10 +459,11 @@ | |
| 249 | int ckinId = db_column_int(&q, 1); |
| 250 | const char *zComment = db_column_text(&q, 2); |
| 251 | const char *zUser = db_column_text(&q, 3); |
| 252 | const char *zBranch = db_column_text(&q, 4); |
| 253 | char *zBr; |
| 254 | |
| 255 | bag_insert(&vers, ckinId); |
| 256 | db_bind_int(&q2, ":rid", ckinId); |
| 257 | db_step(&q2); |
| 258 | db_reset(&q2); |
| @@ -259,11 +470,13 @@ | |
| 259 | if( zBranch==0 ) zBranch = "trunk"; |
| 260 | zBr = mprintf("%s", zBranch); |
| 261 | for(i=0; zBr[i]; i++){ |
| 262 | if( !fossil_isalnum(zBr[i]) ) zBr[i] = '_'; |
| 263 | } |
| 264 | printf("commit refs/heads/%s\nmark :%d\n", zBr, COMMITMARK(ckinId)); |
| 265 | free(zBr); |
| 266 | printf("committer"); |
| 267 | print_person(zUser); |
| 268 | printf(" %s +0000\n", zSecondsSince1970); |
| 269 | if( zComment==0 ) zComment = "null comment"; |
| @@ -273,19 +486,24 @@ | |
| 273 | " WHERE cid=%d AND isprim" |
| 274 | " AND pid IN (SELECT objid FROM event)", |
| 275 | ckinId |
| 276 | ); |
| 277 | if( db_step(&q3) == SQLITE_ROW ){ |
| 278 | printf("from :%d\n", COMMITMARK(db_column_int(&q3, 0))); |
| 279 | db_prepare(&q4, |
| 280 | "SELECT pid FROM plink" |
| 281 | " WHERE cid=%d AND NOT isprim" |
| 282 | " AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=pid)" |
| 283 | " ORDER BY pid", |
| 284 | ckinId); |
| 285 | while( db_step(&q4)==SQLITE_ROW ){ |
| 286 | printf("merge :%d\n", COMMITMARK(db_column_int(&q4,0))); |
| 287 | } |
| 288 | db_finalize(&q4); |
| 289 | }else{ |
| 290 | printf("deleteall\n"); |
| 291 | } |
| @@ -316,11 +534,10 @@ | |
| 316 | db_finalize(&q3); |
| 317 | printf("\n"); |
| 318 | } |
| 319 | db_finalize(&q2); |
| 320 | db_finalize(&q); |
| 321 | bag_clear(&blobs); |
| 322 | manifest_cache_clear(); |
| 323 | |
| 324 | |
| 325 | /* Output tags */ |
| 326 | db_prepare(&q, |
| @@ -345,28 +562,20 @@ | |
| 345 | printf("tagger <tagger> %s +0000\n", zSecSince1970); |
| 346 | printf("data 0\n"); |
| 347 | fossil_free(zEncoded); |
| 348 | } |
| 349 | db_finalize(&q); |
| 350 | bag_clear(&vers); |
| 351 | |
| 352 | if( markfile_out!=0 ){ |
| 353 | FILE *f; |
| 354 | f = fossil_fopen(markfile_out, "w"); |
| 355 | if( f == 0 ){ |
| 356 | fossil_fatal("cannot open %s for writing", markfile_out); |
| 357 | } |
| 358 | db_prepare(&q, "SELECT rid FROM oldblob"); |
| 359 | while( db_step(&q)==SQLITE_ROW ){ |
| 360 | fprintf(f, "b%d\n", db_column_int(&q, 0)); |
| 361 | } |
| 362 | db_finalize(&q); |
| 363 | db_prepare(&q, "SELECT rid FROM oldcommit"); |
| 364 | while( db_step(&q)==SQLITE_ROW ){ |
| 365 | fprintf(f, "c%d\n", db_column_int(&q, 0)); |
| 366 | } |
| 367 | db_finalize(&q); |
| 368 | if( ferror(f)!=0 || fclose(f)!=0 ) { |
| 369 | fossil_fatal("error while writing %s", markfile_out); |
| 370 | } |
| 371 | } |
| 372 | } |
| 373 |
| --- src/export.c | |
| +++ src/export.c | |
| @@ -19,10 +19,28 @@ | |
| 19 | */ |
| 20 | #include "config.h" |
| 21 | #include "export.h" |
| 22 | #include <assert.h> |
| 23 | |
| 24 | #if INTERFACE |
| 25 | /* |
| 26 | ** struct mark_t |
| 27 | ** holds information for translating between git commits |
| 28 | ** and fossil commits. |
| 29 | ** -git_name: This is the mark name that identifies the commit to git. |
| 30 | ** It will always begin with a ':'. |
| 31 | ** -rid: The unique object ID that identifies this commit within the |
| 32 | ** repository database. |
| 33 | ** -uuid: The SHA-1 of artifact corresponding to rid. |
| 34 | */ |
| 35 | struct mark_t{ |
| 36 | char *name; |
| 37 | int rid; |
| 38 | char uuid[41]; |
| 39 | }; |
| 40 | #endif |
| 41 | |
| 42 | /* |
| 43 | ** Output a "committer" record for the given user. |
| 44 | */ |
| 45 | static void print_person(const char *zUser){ |
| 46 | static Stmt q; |
| @@ -96,10 +114,197 @@ | |
| 114 | db_reset(&q); |
| 115 | } |
| 116 | |
| 117 | #define BLOBMARK(rid) ((rid) * 2) |
| 118 | #define COMMITMARK(rid) ((rid) * 2 + 1) |
| 119 | |
| 120 | /* |
| 121 | ** insert_commit_xref() |
| 122 | ** Insert a new (mark,rid,uuid) entry into the 'xmark' table. |
| 123 | ** zName and zUuid must be non-null and must point to NULL-terminated strings. |
| 124 | */ |
| 125 | void insert_commit_xref(int rid, const char *zName, const char *zUuid){ |
| 126 | db_multi_exec( |
| 127 | "INSERT OR IGNORE INTO xmark(tname, trid, tuuid)" |
| 128 | "VALUES(%Q,%d,%Q)", |
| 129 | zName, rid, zUuid |
| 130 | ); |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | ** create_mark() |
| 135 | ** Create a new (mark,rid,uuid) entry for the given rid in the 'xmark' table, |
| 136 | ** and return that information as a struct mark_t in *mark. |
| 137 | ** This function returns -1 in the case where 'rid' does not exist, otherwise |
| 138 | ** it returns 0. |
| 139 | ** mark->name is dynamically allocated and is owned by the caller upon return. |
| 140 | */ |
| 141 | int create_mark(int rid, struct mark_t *mark){ |
| 142 | char sid[13]; |
| 143 | char *zUuid = rid_to_uuid(rid); |
| 144 | if(!zUuid){ |
| 145 | fossil_trace("Undefined rid=%d\n", rid); |
| 146 | return -1; |
| 147 | } |
| 148 | mark->rid = rid; |
| 149 | sprintf(sid, ":%d", COMMITMARK(rid)); |
| 150 | mark->name = fossil_strdup(sid); |
| 151 | strcpy(mark->uuid, zUuid); |
| 152 | free(zUuid); |
| 153 | insert_commit_xref(mark->rid, mark->name, mark->uuid); |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | /* |
| 158 | ** mark_name_from_rid() |
| 159 | ** Find the mark associated with the given rid. Mark names always start |
| 160 | ** with ':', and are pulled from the 'xmark' temporary table. |
| 161 | ** This function returns NULL if the rid does not exist in the 'xmark' table. |
| 162 | ** Otherwise, it returns the name of the mark, which is dynamically allocated |
| 163 | ** and is owned by the caller of this function. |
| 164 | */ |
| 165 | char * mark_name_from_rid(int rid){ |
| 166 | char *zMark = db_text(0, "SELECT tname FROM xmark WHERE trid=%d", rid); |
| 167 | if(zMark==NULL){ |
| 168 | struct mark_t mark; |
| 169 | if(create_mark(rid, &mark)==0){ |
| 170 | zMark = mark.name; |
| 171 | }else{ |
| 172 | return NULL; |
| 173 | } |
| 174 | } |
| 175 | return zMark; |
| 176 | } |
| 177 | |
| 178 | /* |
| 179 | ** parse_mark() |
| 180 | ** Create a new (mark,rid,uuid) entry in the 'xmark' table given a line |
| 181 | ** from a marks file. Return the cross-ref information as a struct mark_t |
| 182 | ** in *mark. |
| 183 | ** This function returns -1 in the case that the line is blank, malformed, or |
| 184 | ** the rid/uuid named in 'line' does not match what is in the repository |
| 185 | ** database. Otherwise, 0 is returned. |
| 186 | ** mark->name is dynamically allocated, and owned by the caller. |
| 187 | */ |
| 188 | int parse_mark(char *line, struct mark_t *mark){ |
| 189 | char *cur_tok; |
| 190 | cur_tok = strtok(line, " \t"); |
| 191 | if(!cur_tok||strlen(cur_tok)<2){ |
| 192 | return -1; |
| 193 | } |
| 194 | mark->rid = atoi(&cur_tok[1]); |
| 195 | if(cur_tok[0]!='c'){ |
| 196 | /* This is probably a blob mark */ |
| 197 | mark->name = NULL; |
| 198 | return 0; |
| 199 | } |
| 200 | |
| 201 | cur_tok = strtok(NULL, " \t"); |
| 202 | if(!cur_tok){ |
| 203 | /* This mark was generated by an older version of Fossil and doesn't |
| 204 | ** include the mark name and uuid. create_mark() will name the new mark |
| 205 | ** exactly as it was when exported to git, so that we should have a |
| 206 | ** valid mapping from git sha1<->mark name<->fossil sha1. */ |
| 207 | return create_mark(mark->rid, mark); |
| 208 | }else{ |
| 209 | mark->name = fossil_strdup(cur_tok); |
| 210 | } |
| 211 | |
| 212 | cur_tok = strtok(NULL, "\n"); |
| 213 | if(!cur_tok||strlen(cur_tok)!=40){ |
| 214 | free(mark->name); |
| 215 | fossil_trace("Invalid SHA-1 in marks file: %s\n", cur_tok); |
| 216 | return -1; |
| 217 | }else{ |
| 218 | strcpy(mark->uuid, cur_tok); |
| 219 | } |
| 220 | |
| 221 | /* make sure that rid corresponds to UUID */ |
| 222 | if(fast_uuid_to_rid(mark->uuid)!=mark->rid){ |
| 223 | free(mark->name); |
| 224 | fossil_trace("Non-existent SHA-1 in marks file: %s\n", mark->uuid); |
| 225 | return -1; |
| 226 | } |
| 227 | |
| 228 | /* insert a cross-ref into the 'xmark' table */ |
| 229 | insert_commit_xref(mark->rid, mark->name, mark->uuid); |
| 230 | return 0; |
| 231 | } |
| 232 | |
| 233 | /* |
| 234 | ** import_marks() |
| 235 | ** Import the marks specified in file 'f' into the 'xmark' table. |
| 236 | ** If 'blobs' is non-null, insert all blob marks into it. |
| 237 | ** If 'vers' is non-null, insert all commit marks into it. |
| 238 | ** Each line in the file must be at most 100 characters in length. This |
| 239 | ** seems like a reasonable maximum for a 40-character uuid, and 1-13 |
| 240 | ** character rid. |
| 241 | ** The function returns -1 if any of the lines in file 'f' are malformed, |
| 242 | ** or the rid/uuid information doesn't match what is in the repository |
| 243 | ** database. Otherwise, 0 is returned. |
| 244 | */ |
| 245 | int import_marks(FILE* f, Bag *blobs, Bag *vers){ |
| 246 | char line[101]; |
| 247 | while(fgets(line, sizeof(line), f)){ |
| 248 | struct mark_t mark; |
| 249 | if(strlen(line)==100&&line[99]!='\n'){ |
| 250 | /* line too long */ |
| 251 | return -1; |
| 252 | } |
| 253 | if( parse_mark(line, &mark)<0 ){ |
| 254 | return -1; |
| 255 | }else if( line[0]=='b' ){ |
| 256 | /* Don't import blob marks into 'xmark' table--git doesn't use them, |
| 257 | ** so they need to be left free for git to reuse. */ |
| 258 | if(blobs!=NULL){ |
| 259 | bag_insert(blobs, mark.rid); |
| 260 | } |
| 261 | }else if( vers!=NULL ){ |
| 262 | bag_insert(vers, mark.rid); |
| 263 | } |
| 264 | free(mark.name); |
| 265 | } |
| 266 | return 0; |
| 267 | } |
| 268 | |
| 269 | /* |
| 270 | ** If 'blobs' is non-null, it must point to a Bag of blob rids to be |
| 271 | ** written to disk. Blob rids are written as 'b<rid>'. |
| 272 | ** If 'vers' is non-null, it must point to a Bag of commit rids to be |
| 273 | ** written to disk. Commit rids are written as 'c<rid> :<mark> <uuid>'. |
| 274 | ** All commit (mark,rid,uuid) tuples are stored in 'xmark' table. |
| 275 | ** This function does not fail, but may produce errors if a uuid cannot |
| 276 | ** be found for an rid in 'vers'. |
| 277 | */ |
| 278 | void export_marks(FILE* f, Bag *blobs, Bag *vers){ |
| 279 | int rid; |
| 280 | if( blobs!=NULL ){ |
| 281 | rid = bag_first(blobs); |
| 282 | if(rid!=0){ |
| 283 | do{ |
| 284 | fprintf(f, "b%d\n", rid); |
| 285 | }while((rid = bag_next(blobs, rid))!=0); |
| 286 | } |
| 287 | } |
| 288 | if( vers!=NULL ){ |
| 289 | rid = bag_first(vers); |
| 290 | if( rid!=0 ){ |
| 291 | do{ |
| 292 | char *zUuid = rid_to_uuid(rid); |
| 293 | char *zMark; |
| 294 | if(zUuid==NULL){ |
| 295 | fossil_trace("No uuid matching rid=%d when exporting marks\n", rid); |
| 296 | continue; |
| 297 | } |
| 298 | zMark = mark_name_from_rid(rid); |
| 299 | fprintf(f, "c%d %s %s\n", rid, zMark, zUuid); |
| 300 | free(zMark); |
| 301 | free(zUuid); |
| 302 | }while( (rid = bag_next(vers, rid))!=0 ); |
| 303 | } |
| 304 | } |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | ** COMMAND: export |
| 309 | ** |
| 310 | ** Usage: %fossil export --git ?OPTIONS? ?REPOSITORY? |
| @@ -147,35 +352,40 @@ | |
| 352 | verify_all_options(); |
| 353 | if( g.argc!=2 && g.argc!=3 ){ usage("--git ?REPOSITORY?"); } |
| 354 | |
| 355 | db_multi_exec("CREATE TEMPORARY TABLE oldblob(rid INTEGER PRIMARY KEY)"); |
| 356 | db_multi_exec("CREATE TEMPORARY TABLE oldcommit(rid INTEGER PRIMARY KEY)"); |
| 357 | db_multi_exec("CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT)"); |
| 358 | if( markfile_in!=0 ){ |
| 359 | Stmt qb,qc; |
| 360 | FILE *f; |
| 361 | int rid; |
| 362 | |
| 363 | f = fossil_fopen(markfile_in, "r"); |
| 364 | if( f==0 ){ |
| 365 | fossil_fatal("cannot open %s for reading", markfile_in); |
| 366 | } |
| 367 | if(import_marks(f, &blobs, &vers)<0){ |
| 368 | fossil_fatal("error importing marks from file: %s\n", markfile_in); |
| 369 | } |
| 370 | db_prepare(&qb, "INSERT OR IGNORE INTO oldblob VALUES (:rid)"); |
| 371 | db_prepare(&qc, "INSERT OR IGNORE INTO oldcommit VALUES (:rid)"); |
| 372 | rid = bag_first(&blobs); |
| 373 | if(rid!=0){ |
| 374 | do{ |
| 375 | db_bind_int(&qb, ":rid", rid); |
| 376 | db_step(&qb); |
| 377 | db_reset(&qb); |
| 378 | }while((rid = bag_next(&blobs, rid))!=0); |
| 379 | } |
| 380 | rid = bag_first(&vers); |
| 381 | if(rid!=0){ |
| 382 | do{ |
| 383 | db_bind_int(&qc, ":rid", rid); |
| 384 | db_step(&qc); |
| 385 | db_reset(&qc); |
| 386 | }while((rid = bag_next(&vers, rid))!=0); |
| 387 | } |
| 388 | db_finalize(&qb); |
| 389 | db_finalize(&qc); |
| 390 | fclose(f); |
| 391 | } |
| @@ -249,10 +459,11 @@ | |
| 459 | int ckinId = db_column_int(&q, 1); |
| 460 | const char *zComment = db_column_text(&q, 2); |
| 461 | const char *zUser = db_column_text(&q, 3); |
| 462 | const char *zBranch = db_column_text(&q, 4); |
| 463 | char *zBr; |
| 464 | char *zMark; |
| 465 | |
| 466 | bag_insert(&vers, ckinId); |
| 467 | db_bind_int(&q2, ":rid", ckinId); |
| 468 | db_step(&q2); |
| 469 | db_reset(&q2); |
| @@ -259,11 +470,13 @@ | |
| 470 | if( zBranch==0 ) zBranch = "trunk"; |
| 471 | zBr = mprintf("%s", zBranch); |
| 472 | for(i=0; zBr[i]; i++){ |
| 473 | if( !fossil_isalnum(zBr[i]) ) zBr[i] = '_'; |
| 474 | } |
| 475 | zMark = mark_name_from_rid(ckinId); |
| 476 | printf("commit refs/heads/%s\nmark %s\n", zBr, zMark); |
| 477 | free(zMark); |
| 478 | free(zBr); |
| 479 | printf("committer"); |
| 480 | print_person(zUser); |
| 481 | printf(" %s +0000\n", zSecondsSince1970); |
| 482 | if( zComment==0 ) zComment = "null comment"; |
| @@ -273,19 +486,24 @@ | |
| 486 | " WHERE cid=%d AND isprim" |
| 487 | " AND pid IN (SELECT objid FROM event)", |
| 488 | ckinId |
| 489 | ); |
| 490 | if( db_step(&q3) == SQLITE_ROW ){ |
| 491 | int pid = db_column_int(&q3, 0); |
| 492 | zMark = mark_name_from_rid(pid); |
| 493 | printf("from %s\n", zMark); |
| 494 | free(zMark); |
| 495 | db_prepare(&q4, |
| 496 | "SELECT pid FROM plink" |
| 497 | " WHERE cid=%d AND NOT isprim" |
| 498 | " AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=pid)" |
| 499 | " ORDER BY pid", |
| 500 | ckinId); |
| 501 | while( db_step(&q4)==SQLITE_ROW ){ |
| 502 | zMark = mark_name_from_rid(db_column_int(&q4, 0)); |
| 503 | printf("merge %s\n", zMark); |
| 504 | free(zMark); |
| 505 | } |
| 506 | db_finalize(&q4); |
| 507 | }else{ |
| 508 | printf("deleteall\n"); |
| 509 | } |
| @@ -316,11 +534,10 @@ | |
| 534 | db_finalize(&q3); |
| 535 | printf("\n"); |
| 536 | } |
| 537 | db_finalize(&q2); |
| 538 | db_finalize(&q); |
| 539 | manifest_cache_clear(); |
| 540 | |
| 541 | |
| 542 | /* Output tags */ |
| 543 | db_prepare(&q, |
| @@ -345,28 +562,20 @@ | |
| 562 | printf("tagger <tagger> %s +0000\n", zSecSince1970); |
| 563 | printf("data 0\n"); |
| 564 | fossil_free(zEncoded); |
| 565 | } |
| 566 | db_finalize(&q); |
| 567 | |
| 568 | if( markfile_out!=0 ){ |
| 569 | FILE *f; |
| 570 | f = fossil_fopen(markfile_out, "w"); |
| 571 | if( f == 0 ){ |
| 572 | fossil_fatal("cannot open %s for writing", markfile_out); |
| 573 | } |
| 574 | export_marks(f, &blobs, &vers); |
| 575 | if( ferror(f)!=0 || fclose(f)!=0 ) { |
| 576 | fossil_fatal("error while writing %s", markfile_out); |
| 577 | } |
| 578 | } |
| 579 | bag_clear(&blobs); |
| 580 | bag_clear(&vers); |
| 581 | } |
| 582 |
+6
-7
| --- src/foci.c | ||
| +++ src/foci.c | ||
| @@ -13,23 +13,22 @@ | ||
| 13 | 13 | ** [email protected] |
| 14 | 14 | ** http://www.hwaci.com/drh/ |
| 15 | 15 | ** |
| 16 | 16 | ******************************************************************************* |
| 17 | 17 | ** |
| 18 | -** This routine implements an SQLite virtual table that gives all of the | |
| 19 | -** files associated with a single check-in. | |
| 18 | +** This routine implements eponymous virtual table for SQLite that gives | |
| 19 | +** all of the files associated with a single check-in. | |
| 20 | 20 | ** |
| 21 | -** The filename "foci" is short for "Files of Check-in". | |
| 21 | +** The source code filename "foci" is short for "Files of Check-in". | |
| 22 | 22 | ** |
| 23 | 23 | ** Usage example: |
| 24 | 24 | ** |
| 25 | -** CREATE VIRTUAL TABLE temp.foci USING files_of_checkin; | |
| 26 | -** -- ^^^^--- important! | |
| 27 | -** SELECT * FROM foci WHERE checkinID=symbolic_name_to_rid('trunk'); | |
| 25 | +** SELECT * FROM files_of_checkin | |
| 26 | +** WHERE checkinID=symbolic_name_to_rid('trunk'); | |
| 28 | 27 | ** |
| 29 | 28 | ** The symbolic_name_to_rid('trunk') function finds the BLOB.RID value |
| 30 | -** corresponding to the 'trunk' tag. Then the files_of_checkin virtual table | |
| 29 | +** corresponding to the 'trunk' tag. Then the foci virtual table | |
| 31 | 30 | ** decodes the manifest defined by that BLOB and returns all files described |
| 32 | 31 | ** by that manifest. The "schema" for the temp.foci table is: |
| 33 | 32 | ** |
| 34 | 33 | ** CREATE TABLE files_of_checkin( |
| 35 | 34 | ** checkinID INTEGER, -- RID for the check-in manifest |
| 36 | 35 |
| --- src/foci.c | |
| +++ src/foci.c | |
| @@ -13,23 +13,22 @@ | |
| 13 | ** [email protected] |
| 14 | ** http://www.hwaci.com/drh/ |
| 15 | ** |
| 16 | ******************************************************************************* |
| 17 | ** |
| 18 | ** This routine implements an SQLite virtual table that gives all of the |
| 19 | ** files associated with a single check-in. |
| 20 | ** |
| 21 | ** The filename "foci" is short for "Files of Check-in". |
| 22 | ** |
| 23 | ** Usage example: |
| 24 | ** |
| 25 | ** CREATE VIRTUAL TABLE temp.foci USING files_of_checkin; |
| 26 | ** -- ^^^^--- important! |
| 27 | ** SELECT * FROM foci WHERE checkinID=symbolic_name_to_rid('trunk'); |
| 28 | ** |
| 29 | ** The symbolic_name_to_rid('trunk') function finds the BLOB.RID value |
| 30 | ** corresponding to the 'trunk' tag. Then the files_of_checkin virtual table |
| 31 | ** decodes the manifest defined by that BLOB and returns all files described |
| 32 | ** by that manifest. The "schema" for the temp.foci table is: |
| 33 | ** |
| 34 | ** CREATE TABLE files_of_checkin( |
| 35 | ** checkinID INTEGER, -- RID for the check-in manifest |
| 36 |
| --- src/foci.c | |
| +++ src/foci.c | |
| @@ -13,23 +13,22 @@ | |
| 13 | ** [email protected] |
| 14 | ** http://www.hwaci.com/drh/ |
| 15 | ** |
| 16 | ******************************************************************************* |
| 17 | ** |
| 18 | ** This routine implements eponymous virtual table for SQLite that gives |
| 19 | ** all of the files associated with a single check-in. |
| 20 | ** |
| 21 | ** The source code filename "foci" is short for "Files of Check-in". |
| 22 | ** |
| 23 | ** Usage example: |
| 24 | ** |
| 25 | ** SELECT * FROM files_of_checkin |
| 26 | ** WHERE checkinID=symbolic_name_to_rid('trunk'); |
| 27 | ** |
| 28 | ** The symbolic_name_to_rid('trunk') function finds the BLOB.RID value |
| 29 | ** corresponding to the 'trunk' tag. Then the foci virtual table |
| 30 | ** decodes the manifest defined by that BLOB and returns all files described |
| 31 | ** by that manifest. The "schema" for the temp.foci table is: |
| 32 | ** |
| 33 | ** CREATE TABLE files_of_checkin( |
| 34 | ** checkinID INTEGER, -- RID for the check-in manifest |
| 35 |
+50
-2
| --- src/import.c | ||
| +++ src/import.c | ||
| @@ -1537,10 +1537,13 @@ | ||
| 1537 | 1537 | ** data is read from standard input. |
| 1538 | 1538 | ** |
| 1539 | 1539 | ** The following formats are currently understood by this command |
| 1540 | 1540 | ** |
| 1541 | 1541 | ** --git Import from the git-fast-export file format (default) |
| 1542 | +** Options: | |
| 1543 | +** --import-marks FILE Restore marks table from FILE | |
| 1544 | +** --export-marks FILE Save marks table to FILE | |
| 1542 | 1545 | ** |
| 1543 | 1546 | ** --svn Import from the svnadmin-dump file format. The default |
| 1544 | 1547 | ** behaviour (unless overridden by --flat) is to treat 3 |
| 1545 | 1548 | ** folders in the SVN root as special, following the |
| 1546 | 1549 | ** common layout of SVN repositories. These are (by |
| @@ -1585,19 +1588,24 @@ | ||
| 1585 | 1588 | char *zPassword; |
| 1586 | 1589 | FILE *pIn; |
| 1587 | 1590 | Stmt q; |
| 1588 | 1591 | int forceFlag = find_option("force", "f", 0)!=0; |
| 1589 | 1592 | int svnFlag = find_option("svn", 0, 0)!=0; |
| 1593 | + int gitFlag = find_option("git", 0, 0)!=0; | |
| 1590 | 1594 | int omitRebuild = find_option("no-rebuild",0,0)!=0; |
| 1591 | 1595 | int omitVacuum = find_option("no-vacuum",0,0)!=0; |
| 1592 | 1596 | |
| 1593 | 1597 | /* Options common to all input formats */ |
| 1594 | 1598 | int incrFlag = find_option("incremental", "i", 0)!=0; |
| 1595 | 1599 | |
| 1596 | 1600 | /* Options for --svn only */ |
| 1597 | 1601 | const char *zBase=""; |
| 1598 | 1602 | int flatFlag=0; |
| 1603 | + | |
| 1604 | + /* Options for --git only */ | |
| 1605 | + const char *markfile_in; | |
| 1606 | + const char *markfile_out; | |
| 1599 | 1607 | |
| 1600 | 1608 | /* Interpret --rename-* options. Use a table to avoid code duplication. */ |
| 1601 | 1609 | const struct { |
| 1602 | 1610 | const char *zOpt, **varPre, *zDefaultPre, **varSuf, *zDefaultSuf; |
| 1603 | 1611 | int format; /* 1=git, 2=svn, 3=any */ |
| @@ -1650,12 +1658,13 @@ | ||
| 1650 | 1658 | gsvn.zTrunk = find_option("trunk", 0, 1); |
| 1651 | 1659 | gsvn.zBranches = find_option("branches", 0, 1); |
| 1652 | 1660 | gsvn.zTags = find_option("tags", 0, 1); |
| 1653 | 1661 | gsvn.revFlag = find_option("rev-tags", 0, 0) |
| 1654 | 1662 | || (incrFlag && !find_option("no-rev-tags", 0, 0)); |
| 1655 | - }else{ | |
| 1656 | - find_option("git",0,0); /* Skip the --git option for now */ | |
| 1663 | + }else if( gitFlag ){ | |
| 1664 | + markfile_in = find_option("import-marks", 0, 1); | |
| 1665 | + markfile_out = find_option("export-marks", 0, 1); | |
| 1657 | 1666 | } |
| 1658 | 1667 | verify_all_options(); |
| 1659 | 1668 | |
| 1660 | 1669 | if( g.argc!=3 && g.argc!=4 ){ |
| 1661 | 1670 | usage("--git|--svn ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE?"); |
| @@ -1729,10 +1738,13 @@ | ||
| 1729 | 1738 | gsvn.lenTags++; |
| 1730 | 1739 | } |
| 1731 | 1740 | } |
| 1732 | 1741 | svn_dump_import(pIn); |
| 1733 | 1742 | }else{ |
| 1743 | + Bag blobs, vers; | |
| 1744 | + bag_init(&blobs); | |
| 1745 | + bag_init(&vers); | |
| 1734 | 1746 | /* The following temp-tables are used to hold information needed for |
| 1735 | 1747 | ** the import. |
| 1736 | 1748 | ** |
| 1737 | 1749 | ** The XMARK table provides a mapping from fast-import "marks" and symbols |
| 1738 | 1750 | ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts). |
| @@ -1753,10 +1765,21 @@ | ||
| 1753 | 1765 | db_multi_exec( |
| 1754 | 1766 | "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);" |
| 1755 | 1767 | "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" |
| 1756 | 1768 | "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);" |
| 1757 | 1769 | ); |
| 1770 | + | |
| 1771 | + if(markfile_in){ | |
| 1772 | + FILE *f = fossil_fopen(markfile_in, "r"); | |
| 1773 | + if(!f){ | |
| 1774 | + fossil_fatal("cannot open %s for reading\n", markfile_in); | |
| 1775 | + } | |
| 1776 | + if(import_marks(f, &blobs, NULL)<0){ | |
| 1777 | + fossil_fatal("error importing marks from file: %s\n", markfile_in); | |
| 1778 | + } | |
| 1779 | + fclose(f); | |
| 1780 | + } | |
| 1758 | 1781 | |
| 1759 | 1782 | manifest_crosslink_begin(); |
| 1760 | 1783 | git_fast_import(pIn); |
| 1761 | 1784 | db_prepare(&q, "SELECT tcontent FROM xtag"); |
| 1762 | 1785 | while( db_step(&q)==SQLITE_ROW ){ |
| @@ -1764,10 +1787,35 @@ | ||
| 1764 | 1787 | db_ephemeral_blob(&q, 0, &record); |
| 1765 | 1788 | fast_insert_content(&record, 0, 0, 1); |
| 1766 | 1789 | import_reset(0); |
| 1767 | 1790 | } |
| 1768 | 1791 | db_finalize(&q); |
| 1792 | + if(markfile_out){ | |
| 1793 | + int rid; | |
| 1794 | + Stmt q_marks; | |
| 1795 | + FILE *f; | |
| 1796 | + db_prepare(&q_marks, "SELECT DISTINCT trid FROM xmark"); | |
| 1797 | + while( db_step(&q_marks)==SQLITE_ROW){ | |
| 1798 | + rid = db_column_int(&q_marks, 0); | |
| 1799 | + if(db_int(0, "SELECT count(objid) FROM event WHERE objid=%d AND type='ci'", rid)==0){ | |
| 1800 | + if(bag_find(&blobs, rid)==0){ | |
| 1801 | + bag_insert(&blobs, rid); | |
| 1802 | + } | |
| 1803 | + }else{ | |
| 1804 | + bag_insert(&vers, rid); | |
| 1805 | + } | |
| 1806 | + } | |
| 1807 | + db_finalize(&q_marks); | |
| 1808 | + f = fossil_fopen(markfile_out, "w"); | |
| 1809 | + if(!f){ | |
| 1810 | + fossil_fatal("cannot open %s for writing\n", markfile_out); | |
| 1811 | + } | |
| 1812 | + export_marks(f, &blobs, &vers); | |
| 1813 | + fclose(f); | |
| 1814 | + bag_clear(&blobs); | |
| 1815 | + bag_clear(&vers); | |
| 1816 | + } | |
| 1769 | 1817 | manifest_crosslink_end(MC_NONE); |
| 1770 | 1818 | } |
| 1771 | 1819 | |
| 1772 | 1820 | verify_cancel(); |
| 1773 | 1821 | db_end_transaction(0); |
| 1774 | 1822 |
| --- src/import.c | |
| +++ src/import.c | |
| @@ -1537,10 +1537,13 @@ | |
| 1537 | ** data is read from standard input. |
| 1538 | ** |
| 1539 | ** The following formats are currently understood by this command |
| 1540 | ** |
| 1541 | ** --git Import from the git-fast-export file format (default) |
| 1542 | ** |
| 1543 | ** --svn Import from the svnadmin-dump file format. The default |
| 1544 | ** behaviour (unless overridden by --flat) is to treat 3 |
| 1545 | ** folders in the SVN root as special, following the |
| 1546 | ** common layout of SVN repositories. These are (by |
| @@ -1585,19 +1588,24 @@ | |
| 1585 | char *zPassword; |
| 1586 | FILE *pIn; |
| 1587 | Stmt q; |
| 1588 | int forceFlag = find_option("force", "f", 0)!=0; |
| 1589 | int svnFlag = find_option("svn", 0, 0)!=0; |
| 1590 | int omitRebuild = find_option("no-rebuild",0,0)!=0; |
| 1591 | int omitVacuum = find_option("no-vacuum",0,0)!=0; |
| 1592 | |
| 1593 | /* Options common to all input formats */ |
| 1594 | int incrFlag = find_option("incremental", "i", 0)!=0; |
| 1595 | |
| 1596 | /* Options for --svn only */ |
| 1597 | const char *zBase=""; |
| 1598 | int flatFlag=0; |
| 1599 | |
| 1600 | /* Interpret --rename-* options. Use a table to avoid code duplication. */ |
| 1601 | const struct { |
| 1602 | const char *zOpt, **varPre, *zDefaultPre, **varSuf, *zDefaultSuf; |
| 1603 | int format; /* 1=git, 2=svn, 3=any */ |
| @@ -1650,12 +1658,13 @@ | |
| 1650 | gsvn.zTrunk = find_option("trunk", 0, 1); |
| 1651 | gsvn.zBranches = find_option("branches", 0, 1); |
| 1652 | gsvn.zTags = find_option("tags", 0, 1); |
| 1653 | gsvn.revFlag = find_option("rev-tags", 0, 0) |
| 1654 | || (incrFlag && !find_option("no-rev-tags", 0, 0)); |
| 1655 | }else{ |
| 1656 | find_option("git",0,0); /* Skip the --git option for now */ |
| 1657 | } |
| 1658 | verify_all_options(); |
| 1659 | |
| 1660 | if( g.argc!=3 && g.argc!=4 ){ |
| 1661 | usage("--git|--svn ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE?"); |
| @@ -1729,10 +1738,13 @@ | |
| 1729 | gsvn.lenTags++; |
| 1730 | } |
| 1731 | } |
| 1732 | svn_dump_import(pIn); |
| 1733 | }else{ |
| 1734 | /* The following temp-tables are used to hold information needed for |
| 1735 | ** the import. |
| 1736 | ** |
| 1737 | ** The XMARK table provides a mapping from fast-import "marks" and symbols |
| 1738 | ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts). |
| @@ -1753,10 +1765,21 @@ | |
| 1753 | db_multi_exec( |
| 1754 | "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);" |
| 1755 | "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" |
| 1756 | "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);" |
| 1757 | ); |
| 1758 | |
| 1759 | manifest_crosslink_begin(); |
| 1760 | git_fast_import(pIn); |
| 1761 | db_prepare(&q, "SELECT tcontent FROM xtag"); |
| 1762 | while( db_step(&q)==SQLITE_ROW ){ |
| @@ -1764,10 +1787,35 @@ | |
| 1764 | db_ephemeral_blob(&q, 0, &record); |
| 1765 | fast_insert_content(&record, 0, 0, 1); |
| 1766 | import_reset(0); |
| 1767 | } |
| 1768 | db_finalize(&q); |
| 1769 | manifest_crosslink_end(MC_NONE); |
| 1770 | } |
| 1771 | |
| 1772 | verify_cancel(); |
| 1773 | db_end_transaction(0); |
| 1774 |
| --- src/import.c | |
| +++ src/import.c | |
| @@ -1537,10 +1537,13 @@ | |
| 1537 | ** data is read from standard input. |
| 1538 | ** |
| 1539 | ** The following formats are currently understood by this command |
| 1540 | ** |
| 1541 | ** --git Import from the git-fast-export file format (default) |
| 1542 | ** Options: |
| 1543 | ** --import-marks FILE Restore marks table from FILE |
| 1544 | ** --export-marks FILE Save marks table to FILE |
| 1545 | ** |
| 1546 | ** --svn Import from the svnadmin-dump file format. The default |
| 1547 | ** behaviour (unless overridden by --flat) is to treat 3 |
| 1548 | ** folders in the SVN root as special, following the |
| 1549 | ** common layout of SVN repositories. These are (by |
| @@ -1585,19 +1588,24 @@ | |
| 1588 | char *zPassword; |
| 1589 | FILE *pIn; |
| 1590 | Stmt q; |
| 1591 | int forceFlag = find_option("force", "f", 0)!=0; |
| 1592 | int svnFlag = find_option("svn", 0, 0)!=0; |
| 1593 | int gitFlag = find_option("git", 0, 0)!=0; |
| 1594 | int omitRebuild = find_option("no-rebuild",0,0)!=0; |
| 1595 | int omitVacuum = find_option("no-vacuum",0,0)!=0; |
| 1596 | |
| 1597 | /* Options common to all input formats */ |
| 1598 | int incrFlag = find_option("incremental", "i", 0)!=0; |
| 1599 | |
| 1600 | /* Options for --svn only */ |
| 1601 | const char *zBase=""; |
| 1602 | int flatFlag=0; |
| 1603 | |
| 1604 | /* Options for --git only */ |
| 1605 | const char *markfile_in; |
| 1606 | const char *markfile_out; |
| 1607 | |
| 1608 | /* Interpret --rename-* options. Use a table to avoid code duplication. */ |
| 1609 | const struct { |
| 1610 | const char *zOpt, **varPre, *zDefaultPre, **varSuf, *zDefaultSuf; |
| 1611 | int format; /* 1=git, 2=svn, 3=any */ |
| @@ -1650,12 +1658,13 @@ | |
| 1658 | gsvn.zTrunk = find_option("trunk", 0, 1); |
| 1659 | gsvn.zBranches = find_option("branches", 0, 1); |
| 1660 | gsvn.zTags = find_option("tags", 0, 1); |
| 1661 | gsvn.revFlag = find_option("rev-tags", 0, 0) |
| 1662 | || (incrFlag && !find_option("no-rev-tags", 0, 0)); |
| 1663 | }else if( gitFlag ){ |
| 1664 | markfile_in = find_option("import-marks", 0, 1); |
| 1665 | markfile_out = find_option("export-marks", 0, 1); |
| 1666 | } |
| 1667 | verify_all_options(); |
| 1668 | |
| 1669 | if( g.argc!=3 && g.argc!=4 ){ |
| 1670 | usage("--git|--svn ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE?"); |
| @@ -1729,10 +1738,13 @@ | |
| 1738 | gsvn.lenTags++; |
| 1739 | } |
| 1740 | } |
| 1741 | svn_dump_import(pIn); |
| 1742 | }else{ |
| 1743 | Bag blobs, vers; |
| 1744 | bag_init(&blobs); |
| 1745 | bag_init(&vers); |
| 1746 | /* The following temp-tables are used to hold information needed for |
| 1747 | ** the import. |
| 1748 | ** |
| 1749 | ** The XMARK table provides a mapping from fast-import "marks" and symbols |
| 1750 | ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts). |
| @@ -1753,10 +1765,21 @@ | |
| 1765 | db_multi_exec( |
| 1766 | "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);" |
| 1767 | "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" |
| 1768 | "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);" |
| 1769 | ); |
| 1770 | |
| 1771 | if(markfile_in){ |
| 1772 | FILE *f = fossil_fopen(markfile_in, "r"); |
| 1773 | if(!f){ |
| 1774 | fossil_fatal("cannot open %s for reading\n", markfile_in); |
| 1775 | } |
| 1776 | if(import_marks(f, &blobs, NULL)<0){ |
| 1777 | fossil_fatal("error importing marks from file: %s\n", markfile_in); |
| 1778 | } |
| 1779 | fclose(f); |
| 1780 | } |
| 1781 | |
| 1782 | manifest_crosslink_begin(); |
| 1783 | git_fast_import(pIn); |
| 1784 | db_prepare(&q, "SELECT tcontent FROM xtag"); |
| 1785 | while( db_step(&q)==SQLITE_ROW ){ |
| @@ -1764,10 +1787,35 @@ | |
| 1787 | db_ephemeral_blob(&q, 0, &record); |
| 1788 | fast_insert_content(&record, 0, 0, 1); |
| 1789 | import_reset(0); |
| 1790 | } |
| 1791 | db_finalize(&q); |
| 1792 | if(markfile_out){ |
| 1793 | int rid; |
| 1794 | Stmt q_marks; |
| 1795 | FILE *f; |
| 1796 | db_prepare(&q_marks, "SELECT DISTINCT trid FROM xmark"); |
| 1797 | while( db_step(&q_marks)==SQLITE_ROW){ |
| 1798 | rid = db_column_int(&q_marks, 0); |
| 1799 | if(db_int(0, "SELECT count(objid) FROM event WHERE objid=%d AND type='ci'", rid)==0){ |
| 1800 | if(bag_find(&blobs, rid)==0){ |
| 1801 | bag_insert(&blobs, rid); |
| 1802 | } |
| 1803 | }else{ |
| 1804 | bag_insert(&vers, rid); |
| 1805 | } |
| 1806 | } |
| 1807 | db_finalize(&q_marks); |
| 1808 | f = fossil_fopen(markfile_out, "w"); |
| 1809 | if(!f){ |
| 1810 | fossil_fatal("cannot open %s for writing\n", markfile_out); |
| 1811 | } |
| 1812 | export_marks(f, &blobs, &vers); |
| 1813 | fclose(f); |
| 1814 | bag_clear(&blobs); |
| 1815 | bag_clear(&vers); |
| 1816 | } |
| 1817 | manifest_crosslink_end(MC_NONE); |
| 1818 | } |
| 1819 | |
| 1820 | verify_cancel(); |
| 1821 | db_end_transaction(0); |
| 1822 |
+50
-2
| --- src/import.c | ||
| +++ src/import.c | ||
| @@ -1537,10 +1537,13 @@ | ||
| 1537 | 1537 | ** data is read from standard input. |
| 1538 | 1538 | ** |
| 1539 | 1539 | ** The following formats are currently understood by this command |
| 1540 | 1540 | ** |
| 1541 | 1541 | ** --git Import from the git-fast-export file format (default) |
| 1542 | +** Options: | |
| 1543 | +** --import-marks FILE Restore marks table from FILE | |
| 1544 | +** --export-marks FILE Save marks table to FILE | |
| 1542 | 1545 | ** |
| 1543 | 1546 | ** --svn Import from the svnadmin-dump file format. The default |
| 1544 | 1547 | ** behaviour (unless overridden by --flat) is to treat 3 |
| 1545 | 1548 | ** folders in the SVN root as special, following the |
| 1546 | 1549 | ** common layout of SVN repositories. These are (by |
| @@ -1585,19 +1588,24 @@ | ||
| 1585 | 1588 | char *zPassword; |
| 1586 | 1589 | FILE *pIn; |
| 1587 | 1590 | Stmt q; |
| 1588 | 1591 | int forceFlag = find_option("force", "f", 0)!=0; |
| 1589 | 1592 | int svnFlag = find_option("svn", 0, 0)!=0; |
| 1593 | + int gitFlag = find_option("git", 0, 0)!=0; | |
| 1590 | 1594 | int omitRebuild = find_option("no-rebuild",0,0)!=0; |
| 1591 | 1595 | int omitVacuum = find_option("no-vacuum",0,0)!=0; |
| 1592 | 1596 | |
| 1593 | 1597 | /* Options common to all input formats */ |
| 1594 | 1598 | int incrFlag = find_option("incremental", "i", 0)!=0; |
| 1595 | 1599 | |
| 1596 | 1600 | /* Options for --svn only */ |
| 1597 | 1601 | const char *zBase=""; |
| 1598 | 1602 | int flatFlag=0; |
| 1603 | + | |
| 1604 | + /* Options for --git only */ | |
| 1605 | + const char *markfile_in; | |
| 1606 | + const char *markfile_out; | |
| 1599 | 1607 | |
| 1600 | 1608 | /* Interpret --rename-* options. Use a table to avoid code duplication. */ |
| 1601 | 1609 | const struct { |
| 1602 | 1610 | const char *zOpt, **varPre, *zDefaultPre, **varSuf, *zDefaultSuf; |
| 1603 | 1611 | int format; /* 1=git, 2=svn, 3=any */ |
| @@ -1650,12 +1658,13 @@ | ||
| 1650 | 1658 | gsvn.zTrunk = find_option("trunk", 0, 1); |
| 1651 | 1659 | gsvn.zBranches = find_option("branches", 0, 1); |
| 1652 | 1660 | gsvn.zTags = find_option("tags", 0, 1); |
| 1653 | 1661 | gsvn.revFlag = find_option("rev-tags", 0, 0) |
| 1654 | 1662 | || (incrFlag && !find_option("no-rev-tags", 0, 0)); |
| 1655 | - }else{ | |
| 1656 | - find_option("git",0,0); /* Skip the --git option for now */ | |
| 1663 | + }else if( gitFlag ){ | |
| 1664 | + markfile_in = find_option("import-marks", 0, 1); | |
| 1665 | + markfile_out = find_option("export-marks", 0, 1); | |
| 1657 | 1666 | } |
| 1658 | 1667 | verify_all_options(); |
| 1659 | 1668 | |
| 1660 | 1669 | if( g.argc!=3 && g.argc!=4 ){ |
| 1661 | 1670 | usage("--git|--svn ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE?"); |
| @@ -1729,10 +1738,13 @@ | ||
| 1729 | 1738 | gsvn.lenTags++; |
| 1730 | 1739 | } |
| 1731 | 1740 | } |
| 1732 | 1741 | svn_dump_import(pIn); |
| 1733 | 1742 | }else{ |
| 1743 | + Bag blobs, vers; | |
| 1744 | + bag_init(&blobs); | |
| 1745 | + bag_init(&vers); | |
| 1734 | 1746 | /* The following temp-tables are used to hold information needed for |
| 1735 | 1747 | ** the import. |
| 1736 | 1748 | ** |
| 1737 | 1749 | ** The XMARK table provides a mapping from fast-import "marks" and symbols |
| 1738 | 1750 | ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts). |
| @@ -1753,10 +1765,21 @@ | ||
| 1753 | 1765 | db_multi_exec( |
| 1754 | 1766 | "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);" |
| 1755 | 1767 | "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" |
| 1756 | 1768 | "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);" |
| 1757 | 1769 | ); |
| 1770 | + | |
| 1771 | + if(markfile_in){ | |
| 1772 | + FILE *f = fossil_fopen(markfile_in, "r"); | |
| 1773 | + if(!f){ | |
| 1774 | + fossil_fatal("cannot open %s for reading\n", markfile_in); | |
| 1775 | + } | |
| 1776 | + if(import_marks(f, &blobs, NULL)<0){ | |
| 1777 | + fossil_fatal("error importing marks from file: %s\n", markfile_in); | |
| 1778 | + } | |
| 1779 | + fclose(f); | |
| 1780 | + } | |
| 1758 | 1781 | |
| 1759 | 1782 | manifest_crosslink_begin(); |
| 1760 | 1783 | git_fast_import(pIn); |
| 1761 | 1784 | db_prepare(&q, "SELECT tcontent FROM xtag"); |
| 1762 | 1785 | while( db_step(&q)==SQLITE_ROW ){ |
| @@ -1764,10 +1787,35 @@ | ||
| 1764 | 1787 | db_ephemeral_blob(&q, 0, &record); |
| 1765 | 1788 | fast_insert_content(&record, 0, 0, 1); |
| 1766 | 1789 | import_reset(0); |
| 1767 | 1790 | } |
| 1768 | 1791 | db_finalize(&q); |
| 1792 | + if(markfile_out){ | |
| 1793 | + int rid; | |
| 1794 | + Stmt q_marks; | |
| 1795 | + FILE *f; | |
| 1796 | + db_prepare(&q_marks, "SELECT DISTINCT trid FROM xmark"); | |
| 1797 | + while( db_step(&q_marks)==SQLITE_ROW){ | |
| 1798 | + rid = db_column_int(&q_marks, 0); | |
| 1799 | + if(db_int(0, "SELECT count(objid) FROM event WHERE objid=%d AND type='ci'", rid)==0){ | |
| 1800 | + if(bag_find(&blobs, rid)==0){ | |
| 1801 | + bag_insert(&blobs, rid); | |
| 1802 | + } | |
| 1803 | + }else{ | |
| 1804 | + bag_insert(&vers, rid); | |
| 1805 | + } | |
| 1806 | + } | |
| 1807 | + db_finalize(&q_marks); | |
| 1808 | + f = fossil_fopen(markfile_out, "w"); | |
| 1809 | + if(!f){ | |
| 1810 | + fossil_fatal("cannot open %s for writing\n", markfile_out); | |
| 1811 | + } | |
| 1812 | + export_marks(f, &blobs, &vers); | |
| 1813 | + fclose(f); | |
| 1814 | + bag_clear(&blobs); | |
| 1815 | + bag_clear(&vers); | |
| 1816 | + } | |
| 1769 | 1817 | manifest_crosslink_end(MC_NONE); |
| 1770 | 1818 | } |
| 1771 | 1819 | |
| 1772 | 1820 | verify_cancel(); |
| 1773 | 1821 | db_end_transaction(0); |
| 1774 | 1822 |
| --- src/import.c | |
| +++ src/import.c | |
| @@ -1537,10 +1537,13 @@ | |
| 1537 | ** data is read from standard input. |
| 1538 | ** |
| 1539 | ** The following formats are currently understood by this command |
| 1540 | ** |
| 1541 | ** --git Import from the git-fast-export file format (default) |
| 1542 | ** |
| 1543 | ** --svn Import from the svnadmin-dump file format. The default |
| 1544 | ** behaviour (unless overridden by --flat) is to treat 3 |
| 1545 | ** folders in the SVN root as special, following the |
| 1546 | ** common layout of SVN repositories. These are (by |
| @@ -1585,19 +1588,24 @@ | |
| 1585 | char *zPassword; |
| 1586 | FILE *pIn; |
| 1587 | Stmt q; |
| 1588 | int forceFlag = find_option("force", "f", 0)!=0; |
| 1589 | int svnFlag = find_option("svn", 0, 0)!=0; |
| 1590 | int omitRebuild = find_option("no-rebuild",0,0)!=0; |
| 1591 | int omitVacuum = find_option("no-vacuum",0,0)!=0; |
| 1592 | |
| 1593 | /* Options common to all input formats */ |
| 1594 | int incrFlag = find_option("incremental", "i", 0)!=0; |
| 1595 | |
| 1596 | /* Options for --svn only */ |
| 1597 | const char *zBase=""; |
| 1598 | int flatFlag=0; |
| 1599 | |
| 1600 | /* Interpret --rename-* options. Use a table to avoid code duplication. */ |
| 1601 | const struct { |
| 1602 | const char *zOpt, **varPre, *zDefaultPre, **varSuf, *zDefaultSuf; |
| 1603 | int format; /* 1=git, 2=svn, 3=any */ |
| @@ -1650,12 +1658,13 @@ | |
| 1650 | gsvn.zTrunk = find_option("trunk", 0, 1); |
| 1651 | gsvn.zBranches = find_option("branches", 0, 1); |
| 1652 | gsvn.zTags = find_option("tags", 0, 1); |
| 1653 | gsvn.revFlag = find_option("rev-tags", 0, 0) |
| 1654 | || (incrFlag && !find_option("no-rev-tags", 0, 0)); |
| 1655 | }else{ |
| 1656 | find_option("git",0,0); /* Skip the --git option for now */ |
| 1657 | } |
| 1658 | verify_all_options(); |
| 1659 | |
| 1660 | if( g.argc!=3 && g.argc!=4 ){ |
| 1661 | usage("--git|--svn ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE?"); |
| @@ -1729,10 +1738,13 @@ | |
| 1729 | gsvn.lenTags++; |
| 1730 | } |
| 1731 | } |
| 1732 | svn_dump_import(pIn); |
| 1733 | }else{ |
| 1734 | /* The following temp-tables are used to hold information needed for |
| 1735 | ** the import. |
| 1736 | ** |
| 1737 | ** The XMARK table provides a mapping from fast-import "marks" and symbols |
| 1738 | ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts). |
| @@ -1753,10 +1765,21 @@ | |
| 1753 | db_multi_exec( |
| 1754 | "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);" |
| 1755 | "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" |
| 1756 | "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);" |
| 1757 | ); |
| 1758 | |
| 1759 | manifest_crosslink_begin(); |
| 1760 | git_fast_import(pIn); |
| 1761 | db_prepare(&q, "SELECT tcontent FROM xtag"); |
| 1762 | while( db_step(&q)==SQLITE_ROW ){ |
| @@ -1764,10 +1787,35 @@ | |
| 1764 | db_ephemeral_blob(&q, 0, &record); |
| 1765 | fast_insert_content(&record, 0, 0, 1); |
| 1766 | import_reset(0); |
| 1767 | } |
| 1768 | db_finalize(&q); |
| 1769 | manifest_crosslink_end(MC_NONE); |
| 1770 | } |
| 1771 | |
| 1772 | verify_cancel(); |
| 1773 | db_end_transaction(0); |
| 1774 |
| --- src/import.c | |
| +++ src/import.c | |
| @@ -1537,10 +1537,13 @@ | |
| 1537 | ** data is read from standard input. |
| 1538 | ** |
| 1539 | ** The following formats are currently understood by this command |
| 1540 | ** |
| 1541 | ** --git Import from the git-fast-export file format (default) |
| 1542 | ** Options: |
| 1543 | ** --import-marks FILE Restore marks table from FILE |
| 1544 | ** --export-marks FILE Save marks table to FILE |
| 1545 | ** |
| 1546 | ** --svn Import from the svnadmin-dump file format. The default |
| 1547 | ** behaviour (unless overridden by --flat) is to treat 3 |
| 1548 | ** folders in the SVN root as special, following the |
| 1549 | ** common layout of SVN repositories. These are (by |
| @@ -1585,19 +1588,24 @@ | |
| 1588 | char *zPassword; |
| 1589 | FILE *pIn; |
| 1590 | Stmt q; |
| 1591 | int forceFlag = find_option("force", "f", 0)!=0; |
| 1592 | int svnFlag = find_option("svn", 0, 0)!=0; |
| 1593 | int gitFlag = find_option("git", 0, 0)!=0; |
| 1594 | int omitRebuild = find_option("no-rebuild",0,0)!=0; |
| 1595 | int omitVacuum = find_option("no-vacuum",0,0)!=0; |
| 1596 | |
| 1597 | /* Options common to all input formats */ |
| 1598 | int incrFlag = find_option("incremental", "i", 0)!=0; |
| 1599 | |
| 1600 | /* Options for --svn only */ |
| 1601 | const char *zBase=""; |
| 1602 | int flatFlag=0; |
| 1603 | |
| 1604 | /* Options for --git only */ |
| 1605 | const char *markfile_in; |
| 1606 | const char *markfile_out; |
| 1607 | |
| 1608 | /* Interpret --rename-* options. Use a table to avoid code duplication. */ |
| 1609 | const struct { |
| 1610 | const char *zOpt, **varPre, *zDefaultPre, **varSuf, *zDefaultSuf; |
| 1611 | int format; /* 1=git, 2=svn, 3=any */ |
| @@ -1650,12 +1658,13 @@ | |
| 1658 | gsvn.zTrunk = find_option("trunk", 0, 1); |
| 1659 | gsvn.zBranches = find_option("branches", 0, 1); |
| 1660 | gsvn.zTags = find_option("tags", 0, 1); |
| 1661 | gsvn.revFlag = find_option("rev-tags", 0, 0) |
| 1662 | || (incrFlag && !find_option("no-rev-tags", 0, 0)); |
| 1663 | }else if( gitFlag ){ |
| 1664 | markfile_in = find_option("import-marks", 0, 1); |
| 1665 | markfile_out = find_option("export-marks", 0, 1); |
| 1666 | } |
| 1667 | verify_all_options(); |
| 1668 | |
| 1669 | if( g.argc!=3 && g.argc!=4 ){ |
| 1670 | usage("--git|--svn ?OPTIONS? NEW-REPOSITORY ?INPUT-FILE?"); |
| @@ -1729,10 +1738,13 @@ | |
| 1738 | gsvn.lenTags++; |
| 1739 | } |
| 1740 | } |
| 1741 | svn_dump_import(pIn); |
| 1742 | }else{ |
| 1743 | Bag blobs, vers; |
| 1744 | bag_init(&blobs); |
| 1745 | bag_init(&vers); |
| 1746 | /* The following temp-tables are used to hold information needed for |
| 1747 | ** the import. |
| 1748 | ** |
| 1749 | ** The XMARK table provides a mapping from fast-import "marks" and symbols |
| 1750 | ** into artifact ids (UUIDs - the 40-byte hex SHA1 hash of artifacts). |
| @@ -1753,10 +1765,21 @@ | |
| 1765 | db_multi_exec( |
| 1766 | "CREATE TEMP TABLE xmark(tname TEXT UNIQUE, trid INT, tuuid TEXT);" |
| 1767 | "CREATE TEMP TABLE xbranch(tname TEXT UNIQUE, brnm TEXT);" |
| 1768 | "CREATE TEMP TABLE xtag(tname TEXT UNIQUE, tcontent TEXT);" |
| 1769 | ); |
| 1770 | |
| 1771 | if(markfile_in){ |
| 1772 | FILE *f = fossil_fopen(markfile_in, "r"); |
| 1773 | if(!f){ |
| 1774 | fossil_fatal("cannot open %s for reading\n", markfile_in); |
| 1775 | } |
| 1776 | if(import_marks(f, &blobs, NULL)<0){ |
| 1777 | fossil_fatal("error importing marks from file: %s\n", markfile_in); |
| 1778 | } |
| 1779 | fclose(f); |
| 1780 | } |
| 1781 | |
| 1782 | manifest_crosslink_begin(); |
| 1783 | git_fast_import(pIn); |
| 1784 | db_prepare(&q, "SELECT tcontent FROM xtag"); |
| 1785 | while( db_step(&q)==SQLITE_ROW ){ |
| @@ -1764,10 +1787,35 @@ | |
| 1787 | db_ephemeral_blob(&q, 0, &record); |
| 1788 | fast_insert_content(&record, 0, 0, 1); |
| 1789 | import_reset(0); |
| 1790 | } |
| 1791 | db_finalize(&q); |
| 1792 | if(markfile_out){ |
| 1793 | int rid; |
| 1794 | Stmt q_marks; |
| 1795 | FILE *f; |
| 1796 | db_prepare(&q_marks, "SELECT DISTINCT trid FROM xmark"); |
| 1797 | while( db_step(&q_marks)==SQLITE_ROW){ |
| 1798 | rid = db_column_int(&q_marks, 0); |
| 1799 | if(db_int(0, "SELECT count(objid) FROM event WHERE objid=%d AND type='ci'", rid)==0){ |
| 1800 | if(bag_find(&blobs, rid)==0){ |
| 1801 | bag_insert(&blobs, rid); |
| 1802 | } |
| 1803 | }else{ |
| 1804 | bag_insert(&vers, rid); |
| 1805 | } |
| 1806 | } |
| 1807 | db_finalize(&q_marks); |
| 1808 | f = fossil_fopen(markfile_out, "w"); |
| 1809 | if(!f){ |
| 1810 | fossil_fatal("cannot open %s for writing\n", markfile_out); |
| 1811 | } |
| 1812 | export_marks(f, &blobs, &vers); |
| 1813 | fclose(f); |
| 1814 | bag_clear(&blobs); |
| 1815 | bag_clear(&vers); |
| 1816 | } |
| 1817 | manifest_crosslink_end(MC_NONE); |
| 1818 | } |
| 1819 | |
| 1820 | verify_cancel(); |
| 1821 | db_end_transaction(0); |
| 1822 |
+30
-7
| --- src/info.c | ||
| +++ src/info.c | ||
| @@ -1330,10 +1330,11 @@ | ||
| 1330 | 1330 | const char *zDate = db_column_text(&q, 0); |
| 1331 | 1331 | const char *zUser = db_column_text(&q, 1); |
| 1332 | 1332 | const char *zCom = db_column_text(&q, 2); |
| 1333 | 1333 | const char *zType = db_column_text(&q, 3); |
| 1334 | 1334 | const char *zUuid = db_column_text(&q, 4); |
| 1335 | + int eventTagId = db_column_int(&q, 5); | |
| 1335 | 1336 | if( cnt>0 ){ |
| 1336 | 1337 | @ Also |
| 1337 | 1338 | } |
| 1338 | 1339 | if( zType[0]=='w' ){ |
| 1339 | 1340 | @ Wiki edit |
| @@ -1343,17 +1344,21 @@ | ||
| 1343 | 1344 | objType |= OBJTYPE_TICKET; |
| 1344 | 1345 | }else if( zType[0]=='c' ){ |
| 1345 | 1346 | @ Manifest of check-in |
| 1346 | 1347 | objType |= OBJTYPE_CHECKIN; |
| 1347 | 1348 | }else if( zType[0]=='e' ){ |
| 1348 | - @ Instance of technote | |
| 1349 | - objType |= OBJTYPE_EVENT; | |
| 1350 | - hyperlink_to_event_tagid(db_column_int(&q, 5)); | |
| 1349 | + if( eventTagId != 0) { | |
| 1350 | + @ Instance of technote | |
| 1351 | + objType |= OBJTYPE_EVENT; | |
| 1352 | + hyperlink_to_event_tagid(db_column_int(&q, 5)); | |
| 1353 | + }else{ | |
| 1354 | + @ Attachment to technote | |
| 1355 | + } | |
| 1351 | 1356 | }else{ |
| 1352 | 1357 | @ Tag referencing |
| 1353 | 1358 | } |
| 1354 | - if( zType[0]!='e' ){ | |
| 1359 | + if( zType[0]!='e' || eventTagId == 0){ | |
| 1355 | 1360 | hyperlink_to_uuid(zUuid); |
| 1356 | 1361 | } |
| 1357 | 1362 | @ - %!W(zCom) by |
| 1358 | 1363 | hyperlink_to_user(zUser,zDate," on"); |
| 1359 | 1364 | hyperlink_to_date(zDate, "."); |
| @@ -1383,14 +1388,32 @@ | ||
| 1383 | 1388 | }else{ |
| 1384 | 1389 | @ Attachment "%h(zFilename)" to |
| 1385 | 1390 | } |
| 1386 | 1391 | objType |= OBJTYPE_ATTACHMENT; |
| 1387 | 1392 | if( strlen(zTarget)==UUID_SIZE && validate16(zTarget,UUID_SIZE) ){ |
| 1388 | - if( g.perm.Hyperlink && g.anon.RdTkt ){ | |
| 1389 | - @ ticket [%z(href("%R/tktview?name=%!S",zTarget))%S(zTarget)</a>] | |
| 1393 | + if ( db_exists("SELECT 1 FROM tag WHERE tagname='tkt-%q'", | |
| 1394 | + zTarget) | |
| 1395 | + ){ | |
| 1396 | + if( g.perm.Hyperlink && g.anon.RdTkt ){ | |
| 1397 | + @ ticket [%z(href("%R/tktview?name=%!S",zTarget))%S(zTarget)</a>] | |
| 1398 | + }else{ | |
| 1399 | + @ ticket [%S(zTarget)] | |
| 1400 | + } | |
| 1401 | + }else if( db_exists("SELECT 1 FROM tag WHERE tagname='event-%q'", | |
| 1402 | + zTarget) | |
| 1403 | + ){ | |
| 1404 | + if( g.perm.Hyperlink && g.anon.RdWiki ){ | |
| 1405 | + @ tech note [%z(href("%R/technote/%h",zTarget))%S(zTarget)</a>] | |
| 1406 | + }else{ | |
| 1407 | + @ tech note [%S(zTarget)] | |
| 1408 | + } | |
| 1390 | 1409 | }else{ |
| 1391 | - @ ticket [%S(zTarget)] | |
| 1410 | + if( g.perm.Hyperlink && g.anon.RdWiki ){ | |
| 1411 | + @ wiki page [%z(href("%R/wiki?name=%t",zTarget))%h(zTarget)</a>] | |
| 1412 | + }else{ | |
| 1413 | + @ wiki page [%h(zTarget)] | |
| 1414 | + } | |
| 1392 | 1415 | } |
| 1393 | 1416 | }else{ |
| 1394 | 1417 | if( g.perm.Hyperlink && g.anon.RdWiki ){ |
| 1395 | 1418 | @ wiki page [%z(href("%R/wiki?name=%t",zTarget))%h(zTarget)</a>] |
| 1396 | 1419 | }else{ |
| 1397 | 1420 |
| --- src/info.c | |
| +++ src/info.c | |
| @@ -1330,10 +1330,11 @@ | |
| 1330 | const char *zDate = db_column_text(&q, 0); |
| 1331 | const char *zUser = db_column_text(&q, 1); |
| 1332 | const char *zCom = db_column_text(&q, 2); |
| 1333 | const char *zType = db_column_text(&q, 3); |
| 1334 | const char *zUuid = db_column_text(&q, 4); |
| 1335 | if( cnt>0 ){ |
| 1336 | @ Also |
| 1337 | } |
| 1338 | if( zType[0]=='w' ){ |
| 1339 | @ Wiki edit |
| @@ -1343,17 +1344,21 @@ | |
| 1343 | objType |= OBJTYPE_TICKET; |
| 1344 | }else if( zType[0]=='c' ){ |
| 1345 | @ Manifest of check-in |
| 1346 | objType |= OBJTYPE_CHECKIN; |
| 1347 | }else if( zType[0]=='e' ){ |
| 1348 | @ Instance of technote |
| 1349 | objType |= OBJTYPE_EVENT; |
| 1350 | hyperlink_to_event_tagid(db_column_int(&q, 5)); |
| 1351 | }else{ |
| 1352 | @ Tag referencing |
| 1353 | } |
| 1354 | if( zType[0]!='e' ){ |
| 1355 | hyperlink_to_uuid(zUuid); |
| 1356 | } |
| 1357 | @ - %!W(zCom) by |
| 1358 | hyperlink_to_user(zUser,zDate," on"); |
| 1359 | hyperlink_to_date(zDate, "."); |
| @@ -1383,14 +1388,32 @@ | |
| 1383 | }else{ |
| 1384 | @ Attachment "%h(zFilename)" to |
| 1385 | } |
| 1386 | objType |= OBJTYPE_ATTACHMENT; |
| 1387 | if( strlen(zTarget)==UUID_SIZE && validate16(zTarget,UUID_SIZE) ){ |
| 1388 | if( g.perm.Hyperlink && g.anon.RdTkt ){ |
| 1389 | @ ticket [%z(href("%R/tktview?name=%!S",zTarget))%S(zTarget)</a>] |
| 1390 | }else{ |
| 1391 | @ ticket [%S(zTarget)] |
| 1392 | } |
| 1393 | }else{ |
| 1394 | if( g.perm.Hyperlink && g.anon.RdWiki ){ |
| 1395 | @ wiki page [%z(href("%R/wiki?name=%t",zTarget))%h(zTarget)</a>] |
| 1396 | }else{ |
| 1397 |
| --- src/info.c | |
| +++ src/info.c | |
| @@ -1330,10 +1330,11 @@ | |
| 1330 | const char *zDate = db_column_text(&q, 0); |
| 1331 | const char *zUser = db_column_text(&q, 1); |
| 1332 | const char *zCom = db_column_text(&q, 2); |
| 1333 | const char *zType = db_column_text(&q, 3); |
| 1334 | const char *zUuid = db_column_text(&q, 4); |
| 1335 | int eventTagId = db_column_int(&q, 5); |
| 1336 | if( cnt>0 ){ |
| 1337 | @ Also |
| 1338 | } |
| 1339 | if( zType[0]=='w' ){ |
| 1340 | @ Wiki edit |
| @@ -1343,17 +1344,21 @@ | |
| 1344 | objType |= OBJTYPE_TICKET; |
| 1345 | }else if( zType[0]=='c' ){ |
| 1346 | @ Manifest of check-in |
| 1347 | objType |= OBJTYPE_CHECKIN; |
| 1348 | }else if( zType[0]=='e' ){ |
| 1349 | if( eventTagId != 0) { |
| 1350 | @ Instance of technote |
| 1351 | objType |= OBJTYPE_EVENT; |
| 1352 | hyperlink_to_event_tagid(db_column_int(&q, 5)); |
| 1353 | }else{ |
| 1354 | @ Attachment to technote |
| 1355 | } |
| 1356 | }else{ |
| 1357 | @ Tag referencing |
| 1358 | } |
| 1359 | if( zType[0]!='e' || eventTagId == 0){ |
| 1360 | hyperlink_to_uuid(zUuid); |
| 1361 | } |
| 1362 | @ - %!W(zCom) by |
| 1363 | hyperlink_to_user(zUser,zDate," on"); |
| 1364 | hyperlink_to_date(zDate, "."); |
| @@ -1383,14 +1388,32 @@ | |
| 1388 | }else{ |
| 1389 | @ Attachment "%h(zFilename)" to |
| 1390 | } |
| 1391 | objType |= OBJTYPE_ATTACHMENT; |
| 1392 | if( strlen(zTarget)==UUID_SIZE && validate16(zTarget,UUID_SIZE) ){ |
| 1393 | if ( db_exists("SELECT 1 FROM tag WHERE tagname='tkt-%q'", |
| 1394 | zTarget) |
| 1395 | ){ |
| 1396 | if( g.perm.Hyperlink && g.anon.RdTkt ){ |
| 1397 | @ ticket [%z(href("%R/tktview?name=%!S",zTarget))%S(zTarget)</a>] |
| 1398 | }else{ |
| 1399 | @ ticket [%S(zTarget)] |
| 1400 | } |
| 1401 | }else if( db_exists("SELECT 1 FROM tag WHERE tagname='event-%q'", |
| 1402 | zTarget) |
| 1403 | ){ |
| 1404 | if( g.perm.Hyperlink && g.anon.RdWiki ){ |
| 1405 | @ tech note [%z(href("%R/technote/%h",zTarget))%S(zTarget)</a>] |
| 1406 | }else{ |
| 1407 | @ tech note [%S(zTarget)] |
| 1408 | } |
| 1409 | }else{ |
| 1410 | if( g.perm.Hyperlink && g.anon.RdWiki ){ |
| 1411 | @ wiki page [%z(href("%R/wiki?name=%t",zTarget))%h(zTarget)</a>] |
| 1412 | }else{ |
| 1413 | @ wiki page [%h(zTarget)] |
| 1414 | } |
| 1415 | } |
| 1416 | }else{ |
| 1417 | if( g.perm.Hyperlink && g.anon.RdWiki ){ |
| 1418 | @ wiki page [%z(href("%R/wiki?name=%t",zTarget))%h(zTarget)</a>] |
| 1419 | }else{ |
| 1420 |
+2
-1
| --- src/json_wiki.c | ||
| +++ src/json_wiki.c | ||
| @@ -374,12 +374,13 @@ | ||
| 374 | 374 | if(contentLen){ |
| 375 | 375 | blob_append(&content, cson_string_cstr(jstr),contentLen); |
| 376 | 376 | } |
| 377 | 377 | |
| 378 | 378 | zMimeType = json_find_option_cstr("mimetype","mimetype","M"); |
| 379 | + zMimeType = wiki_filter_mimetypes(zMimeType); | |
| 379 | 380 | |
| 380 | - wiki_cmd_commit(zPageName, 0==rid, &content, zMimeType, 0); | |
| 381 | + wiki_cmd_commit(zPageName, rid, &content, zMimeType, 0); | |
| 381 | 382 | blob_reset(&content); |
| 382 | 383 | /* |
| 383 | 384 | Our return value here has a race condition: if this operation |
| 384 | 385 | is called concurrently for the same wiki page via two requests, |
| 385 | 386 | payV could reflect the results of the other save operation. |
| 386 | 387 |
| --- src/json_wiki.c | |
| +++ src/json_wiki.c | |
| @@ -374,12 +374,13 @@ | |
| 374 | if(contentLen){ |
| 375 | blob_append(&content, cson_string_cstr(jstr),contentLen); |
| 376 | } |
| 377 | |
| 378 | zMimeType = json_find_option_cstr("mimetype","mimetype","M"); |
| 379 | |
| 380 | wiki_cmd_commit(zPageName, 0==rid, &content, zMimeType, 0); |
| 381 | blob_reset(&content); |
| 382 | /* |
| 383 | Our return value here has a race condition: if this operation |
| 384 | is called concurrently for the same wiki page via two requests, |
| 385 | payV could reflect the results of the other save operation. |
| 386 |
| --- src/json_wiki.c | |
| +++ src/json_wiki.c | |
| @@ -374,12 +374,13 @@ | |
| 374 | if(contentLen){ |
| 375 | blob_append(&content, cson_string_cstr(jstr),contentLen); |
| 376 | } |
| 377 | |
| 378 | zMimeType = json_find_option_cstr("mimetype","mimetype","M"); |
| 379 | zMimeType = wiki_filter_mimetypes(zMimeType); |
| 380 | |
| 381 | wiki_cmd_commit(zPageName, rid, &content, zMimeType, 0); |
| 382 | blob_reset(&content); |
| 383 | /* |
| 384 | Our return value here has a race condition: if this operation |
| 385 | is called concurrently for the same wiki page via two requests, |
| 386 | payV could reflect the results of the other save operation. |
| 387 |
+11
-6
| --- src/manifest.c | ||
| +++ src/manifest.c | ||
| @@ -2066,15 +2066,17 @@ | ||
| 2066 | 2066 | const char isAdd = (zSrc && zSrc[0]) ? 1 : 0; |
| 2067 | 2067 | char *zComment; |
| 2068 | 2068 | if( isAdd ){ |
| 2069 | 2069 | zComment = mprintf( |
| 2070 | 2070 | "Add attachment [/artifact/%!S|%h] to" |
| 2071 | - " tech note [/technote/%h|%.10h]", | |
| 2071 | + " tech note [/technote/%!S|%S]", | |
| 2072 | 2072 | zSrc, zName, zTarget, zTarget); |
| 2073 | 2073 | }else{ |
| 2074 | - zComment = mprintf("Delete attachment \"%h\" from tech note [%.10h]", | |
| 2075 | - zName, zTarget); | |
| 2074 | + zComment = mprintf( | |
| 2075 | + "Delete attachment \"%h\" from" | |
| 2076 | + " tech note [/technote/%!S|%S]", | |
| 2077 | + zName, zTarget, zTarget); | |
| 2076 | 2078 | } |
| 2077 | 2079 | db_multi_exec("UPDATE event SET comment=%Q, type='e'" |
| 2078 | 2080 | " WHERE objid=%Q", |
| 2079 | 2081 | zComment, zAttachId); |
| 2080 | 2082 | fossil_free(zComment); |
| @@ -2162,15 +2164,18 @@ | ||
| 2162 | 2164 | p->zAttachName, p->zAttachTarget); |
| 2163 | 2165 | } |
| 2164 | 2166 | }else if( 'e' == attachToType ){ |
| 2165 | 2167 | if( isAdd ){ |
| 2166 | 2168 | zComment = mprintf( |
| 2167 | - "Add attachment [/artifact/%!S|%h] to tech note [/technote/%h|%.10h]", | |
| 2169 | + "Add attachment [/artifact/%!S|%h] to tech note [/technote/%!S|%S]", | |
| 2168 | 2170 | p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget); |
| 2169 | 2171 | }else{ |
| 2170 | - zComment = mprintf("Delete attachment \"%h\" from tech note [%.10h]", | |
| 2171 | - p->zAttachName, p->zAttachTarget); | |
| 2172 | + zComment = mprintf( | |
| 2173 | + "Delete attachment \"/artifact/%!S|%h\" from" | |
| 2174 | + " tech note [/technote/%!S|%S]", | |
| 2175 | + p->zAttachName, p->zAttachName, | |
| 2176 | + p->zAttachTarget,p->zAttachTarget); | |
| 2172 | 2177 | } |
| 2173 | 2178 | }else{ |
| 2174 | 2179 | if( isAdd ){ |
| 2175 | 2180 | zComment = mprintf( |
| 2176 | 2181 | "Add attachment [/artifact/%!S|%h] to ticket [%!S|%S]", |
| 2177 | 2182 |
| --- src/manifest.c | |
| +++ src/manifest.c | |
| @@ -2066,15 +2066,17 @@ | |
| 2066 | const char isAdd = (zSrc && zSrc[0]) ? 1 : 0; |
| 2067 | char *zComment; |
| 2068 | if( isAdd ){ |
| 2069 | zComment = mprintf( |
| 2070 | "Add attachment [/artifact/%!S|%h] to" |
| 2071 | " tech note [/technote/%h|%.10h]", |
| 2072 | zSrc, zName, zTarget, zTarget); |
| 2073 | }else{ |
| 2074 | zComment = mprintf("Delete attachment \"%h\" from tech note [%.10h]", |
| 2075 | zName, zTarget); |
| 2076 | } |
| 2077 | db_multi_exec("UPDATE event SET comment=%Q, type='e'" |
| 2078 | " WHERE objid=%Q", |
| 2079 | zComment, zAttachId); |
| 2080 | fossil_free(zComment); |
| @@ -2162,15 +2164,18 @@ | |
| 2162 | p->zAttachName, p->zAttachTarget); |
| 2163 | } |
| 2164 | }else if( 'e' == attachToType ){ |
| 2165 | if( isAdd ){ |
| 2166 | zComment = mprintf( |
| 2167 | "Add attachment [/artifact/%!S|%h] to tech note [/technote/%h|%.10h]", |
| 2168 | p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget); |
| 2169 | }else{ |
| 2170 | zComment = mprintf("Delete attachment \"%h\" from tech note [%.10h]", |
| 2171 | p->zAttachName, p->zAttachTarget); |
| 2172 | } |
| 2173 | }else{ |
| 2174 | if( isAdd ){ |
| 2175 | zComment = mprintf( |
| 2176 | "Add attachment [/artifact/%!S|%h] to ticket [%!S|%S]", |
| 2177 |
| --- src/manifest.c | |
| +++ src/manifest.c | |
| @@ -2066,15 +2066,17 @@ | |
| 2066 | const char isAdd = (zSrc && zSrc[0]) ? 1 : 0; |
| 2067 | char *zComment; |
| 2068 | if( isAdd ){ |
| 2069 | zComment = mprintf( |
| 2070 | "Add attachment [/artifact/%!S|%h] to" |
| 2071 | " tech note [/technote/%!S|%S]", |
| 2072 | zSrc, zName, zTarget, zTarget); |
| 2073 | }else{ |
| 2074 | zComment = mprintf( |
| 2075 | "Delete attachment \"%h\" from" |
| 2076 | " tech note [/technote/%!S|%S]", |
| 2077 | zName, zTarget, zTarget); |
| 2078 | } |
| 2079 | db_multi_exec("UPDATE event SET comment=%Q, type='e'" |
| 2080 | " WHERE objid=%Q", |
| 2081 | zComment, zAttachId); |
| 2082 | fossil_free(zComment); |
| @@ -2162,15 +2164,18 @@ | |
| 2164 | p->zAttachName, p->zAttachTarget); |
| 2165 | } |
| 2166 | }else if( 'e' == attachToType ){ |
| 2167 | if( isAdd ){ |
| 2168 | zComment = mprintf( |
| 2169 | "Add attachment [/artifact/%!S|%h] to tech note [/technote/%!S|%S]", |
| 2170 | p->zAttachSrc, p->zAttachName, p->zAttachTarget, p->zAttachTarget); |
| 2171 | }else{ |
| 2172 | zComment = mprintf( |
| 2173 | "Delete attachment \"/artifact/%!S|%h\" from" |
| 2174 | " tech note [/technote/%!S|%S]", |
| 2175 | p->zAttachName, p->zAttachName, |
| 2176 | p->zAttachTarget,p->zAttachTarget); |
| 2177 | } |
| 2178 | }else{ |
| 2179 | if( isAdd ){ |
| 2180 | zComment = mprintf( |
| 2181 | "Add attachment [/artifact/%!S|%h] to ticket [%!S|%S]", |
| 2182 |
+249
-143
| --- src/merge.c | ||
| +++ src/merge.c | ||
| @@ -128,10 +128,46 @@ | ||
| 128 | 128 | } |
| 129 | 129 | } |
| 130 | 130 | db_finalize(&q); |
| 131 | 131 | return fForkSeen; |
| 132 | 132 | } |
| 133 | + | |
| 134 | +/* | |
| 135 | +** Add an entry to the FV table for all files renamed between | |
| 136 | +** version N and the version specified by vid. | |
| 137 | +*/ | |
| 138 | +static void add_renames( | |
| 139 | + const char *zFnCol, /* The FV column for the filename in vid */ | |
| 140 | + int vid, /* The desired version's RID */ | |
| 141 | + int nid, /* Version N's RID */ | |
| 142 | + int revOk, /* Ok to move backwards (child->parent) if true */ | |
| 143 | + const char *zDebug /* Generate trace output if not NULL */ | |
| 144 | +){ | |
| 145 | + int nChng; /* Number of file name changes */ | |
| 146 | + int *aChng; /* An array of file name changes */ | |
| 147 | + int i; /* Loop counter */ | |
| 148 | + find_filename_changes(nid, vid, revOk, &nChng, &aChng, zDebug); | |
| 149 | + if( nChng==0 ) return; | |
| 150 | + for(i=0; i<nChng; i++){ | |
| 151 | + char *zN, *zV; | |
| 152 | + zN = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]); | |
| 153 | + zV = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2+1]); | |
| 154 | + db_multi_exec( | |
| 155 | + "INSERT OR IGNORE INTO fv(%s,fnn) VALUES(%Q,%Q)", | |
| 156 | + zFnCol /*safe-for-%s*/, zV, zN | |
| 157 | + ); | |
| 158 | + if( db_changes()==0 ){ | |
| 159 | + db_multi_exec( | |
| 160 | + "UPDATE fv SET %s=%Q WHERE fnn=%Q", | |
| 161 | + zFnCol /*safe-for-%s*/, zV, zN | |
| 162 | + ); | |
| 163 | + } | |
| 164 | + free(zN); | |
| 165 | + free(zV); | |
| 166 | + } | |
| 167 | + free(aChng); | |
| 168 | +} | |
| 133 | 169 | |
| 134 | 170 | /* |
| 135 | 171 | ** COMMAND: merge |
| 136 | 172 | ** |
| 137 | 173 | ** Usage: %fossil merge ?OPTIONS? ?VERSION? |
| @@ -178,10 +214,11 @@ | ||
| 178 | 214 | */ |
| 179 | 215 | void merge_cmd(void){ |
| 180 | 216 | int vid; /* Current version "V" */ |
| 181 | 217 | int mid; /* Version we are merging from "M" */ |
| 182 | 218 | int pid; /* The pivot version - most recent common ancestor P */ |
| 219 | + int nid = 0; /* The name pivot version "N" */ | |
| 183 | 220 | int verboseFlag; /* True if the -v|--verbose option is present */ |
| 184 | 221 | int integrateFlag; /* True if the --integrate option is present */ |
| 185 | 222 | int pickFlag; /* True if the --cherrypick option is present */ |
| 186 | 223 | int backoutFlag; /* True if the --backout option is present */ |
| 187 | 224 | int dryRunFlag; /* True if the --dry-run or -n option is present */ |
| @@ -188,23 +225,22 @@ | ||
| 188 | 225 | int forceFlag; /* True if the --force or -f option is present */ |
| 189 | 226 | int forceMissingFlag; /* True if the --force-missing option is present */ |
| 190 | 227 | const char *zBinGlob; /* The value of --binary */ |
| 191 | 228 | const char *zPivot; /* The value of --baseline */ |
| 192 | 229 | int debugFlag; /* True if --debug is present */ |
| 193 | - int nChng; /* Number of file name changes */ | |
| 194 | - int *aChng; /* An array of file name changes */ | |
| 195 | - int i; /* Loop counter */ | |
| 196 | 230 | int nConflict = 0; /* Number of conflicts seen */ |
| 197 | 231 | int nOverwrite = 0; /* Number of unmanaged files overwritten */ |
| 232 | + char vAncestor = 'p'; /* If P is an ancestor of V then 'p', else 'n' */ | |
| 198 | 233 | Stmt q; |
| 199 | 234 | |
| 200 | 235 | |
| 201 | 236 | /* Notation: |
| 202 | 237 | ** |
| 203 | 238 | ** V The current checkout |
| 204 | 239 | ** M The version being merged in |
| 205 | 240 | ** P The "pivot" - the most recent common ancestor of V and M. |
| 241 | + ** N The "name pivot" - for detecting renames | |
| 206 | 242 | */ |
| 207 | 243 | |
| 208 | 244 | undo_capture_command_line(); |
| 209 | 245 | verboseFlag = find_option("verbose","v",0)!=0; |
| 210 | 246 | forceMissingFlag = find_option("force-missing",0,0)!=0; |
| @@ -291,37 +327,49 @@ | ||
| 291 | 327 | fossil_fatal("not a version: %s", zPivot); |
| 292 | 328 | } |
| 293 | 329 | if( pickFlag ){ |
| 294 | 330 | fossil_fatal("incompatible options: --cherrypick & --baseline"); |
| 295 | 331 | } |
| 296 | - }else if( pickFlag || backoutFlag ){ | |
| 332 | + } | |
| 333 | + if( pickFlag || backoutFlag ){ | |
| 297 | 334 | if( integrateFlag ){ |
| 298 | 335 | fossil_fatal("incompatible options: --integrate & --cherrypick or --backout"); |
| 299 | 336 | } |
| 300 | 337 | pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid); |
| 301 | 338 | if( pid<=0 ){ |
| 302 | 339 | fossil_fatal("cannot find an ancestor for %s", g.argv[2]); |
| 303 | 340 | } |
| 304 | 341 | }else{ |
| 342 | + if( !zPivot ){ | |
| 343 | + pivot_set_primary(mid); | |
| 344 | + pivot_set_secondary(vid); | |
| 345 | + db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0"); | |
| 346 | + while( db_step(&q)==SQLITE_ROW ){ | |
| 347 | + pivot_set_secondary(db_column_int(&q,0)); | |
| 348 | + } | |
| 349 | + db_finalize(&q); | |
| 350 | + pid = pivot_find(0); | |
| 351 | + if( pid<=0 ){ | |
| 352 | + fossil_fatal("cannot find a common ancestor between the current " | |
| 353 | + "checkout and %s", g.argv[2]); | |
| 354 | + } | |
| 355 | + } | |
| 305 | 356 | pivot_set_primary(mid); |
| 306 | 357 | pivot_set_secondary(vid); |
| 307 | - db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0"); | |
| 308 | - while( db_step(&q)==SQLITE_ROW ){ | |
| 309 | - pivot_set_secondary(db_column_int(&q,0)); | |
| 310 | - } | |
| 311 | - db_finalize(&q); | |
| 312 | - pid = pivot_find(); | |
| 313 | - if( pid<=0 ){ | |
| 314 | - fossil_fatal("cannot find a common ancestor between the current " | |
| 315 | - "checkout and %s", g.argv[2]); | |
| 358 | + nid = pivot_find(1); | |
| 359 | + if( nid!=pid ){ | |
| 360 | + pivot_set_primary(nid); | |
| 361 | + pivot_set_secondary(pid); | |
| 362 | + nid = pivot_find(1); | |
| 316 | 363 | } |
| 317 | 364 | } |
| 318 | 365 | if( backoutFlag ){ |
| 319 | 366 | int t = pid; |
| 320 | 367 | pid = mid; |
| 321 | 368 | mid = t; |
| 322 | 369 | } |
| 370 | + if( nid==0 ) nid = pid; | |
| 323 | 371 | if( !is_a_version(pid) ){ |
| 324 | 372 | fossil_fatal("not a version: record #%d", pid); |
| 325 | 373 | } |
| 326 | 374 | if( !forceFlag && mid==pid ){ |
| 327 | 375 | fossil_print("Merge skipped because it is a no-op. " |
| @@ -343,12 +391,25 @@ | ||
| 343 | 391 | fossil_fatal("missing content, unable to merge"); |
| 344 | 392 | } |
| 345 | 393 | if( load_vfile_from_rid(pid) && !forceMissingFlag ){ |
| 346 | 394 | fossil_fatal("missing content, unable to merge"); |
| 347 | 395 | } |
| 396 | + if( zPivot ){ | |
| 397 | + vAncestor = db_exists( | |
| 398 | + "WITH RECURSIVE ancestor(id) AS (" | |
| 399 | + " VALUES(%d)" | |
| 400 | + " UNION ALL" | |
| 401 | + " SELECT pid FROM plink, ancestor" | |
| 402 | + " WHERE cid=ancestor.id AND pid!=%d AND cid!=%d)" | |
| 403 | + "SELECT 1 FROM ancestor WHERE id=%d LIMIT 1", | |
| 404 | + vid, nid, pid, pid | |
| 405 | + ) ? 'p' : 'n'; | |
| 406 | + } | |
| 348 | 407 | if( debugFlag ){ |
| 349 | 408 | char *z; |
| 409 | + z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", nid); | |
| 410 | + fossil_print("N=%d %z\n", nid, z); | |
| 350 | 411 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", pid); |
| 351 | 412 | fossil_print("P=%d %z\n", pid, z); |
| 352 | 413 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", mid); |
| 353 | 414 | fossil_print("M=%d %z\n", mid, z); |
| 354 | 415 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid); |
| @@ -361,115 +422,102 @@ | ||
| 361 | 422 | ** in the current checkout, the pivot, and the version being merged. |
| 362 | 423 | */ |
| 363 | 424 | db_multi_exec( |
| 364 | 425 | "DROP TABLE IF EXISTS fv;" |
| 365 | 426 | "CREATE TEMP TABLE fv(" |
| 366 | - " fn TEXT PRIMARY KEY %s," /* The filename */ | |
| 367 | - " idv INTEGER," /* VFILE entry for current version */ | |
| 368 | - " idp INTEGER," /* VFILE entry for the pivot */ | |
| 369 | - " idm INTEGER," /* VFILE entry for version merging in */ | |
| 427 | + " fn TEXT UNIQUE %s," /* The filename */ | |
| 428 | + " idv INTEGER DEFAULT 0," /* VFILE entry for current version */ | |
| 429 | + " idp INTEGER DEFAULT 0," /* VFILE entry for the pivot */ | |
| 430 | + " idm INTEGER DEFAULT 0," /* VFILE entry for version merging in */ | |
| 370 | 431 | " chnged BOOLEAN," /* True if current version has been edited */ |
| 371 | - " ridv INTEGER," /* Record ID for current version */ | |
| 372 | - " ridp INTEGER," /* Record ID for pivot */ | |
| 373 | - " ridm INTEGER," /* Record ID for merge */ | |
| 432 | + " ridv INTEGER DEFAULT 0," /* Record ID for current version */ | |
| 433 | + " ridp INTEGER DEFAULT 0," /* Record ID for pivot */ | |
| 434 | + " ridm INTEGER DEFAULT 0," /* Record ID for merge */ | |
| 374 | 435 | " isexe BOOLEAN," /* Execute permission enabled */ |
| 375 | - " fnp TEXT %s," /* The filename in the pivot */ | |
| 376 | - " fnm TEXT %s," /* the filename in the merged version */ | |
| 436 | + " fnp TEXT UNIQUE %s," /* The filename in the pivot */ | |
| 437 | + " fnm TEXT UNIQUE %s," /* The filename in the merged version */ | |
| 438 | + " fnn TEXT UNIQUE %s," /* The filename in the name pivot */ | |
| 377 | 439 | " islinkv BOOLEAN," /* True if current version is a symlink */ |
| 378 | 440 | " islinkm BOOLEAN" /* True if merged version in is a symlink */ |
| 379 | 441 | ");", |
| 380 | - filename_collation(), filename_collation(), filename_collation() | |
| 381 | - ); | |
| 382 | - | |
| 383 | - /* Add files found in V | |
| 384 | - */ | |
| 385 | - db_multi_exec( | |
| 386 | - "INSERT OR IGNORE" | |
| 387 | - " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" | |
| 388 | - " SELECT pathname, pathname, pathname, id, 0, 0, rid, 0, 0, isexe, chnged " | |
| 389 | - " FROM vfile WHERE vid=%d", | |
| 390 | - vid | |
| 391 | - ); | |
| 392 | - | |
| 393 | - /* | |
| 394 | - ** Compute name changes from P->V | |
| 395 | - */ | |
| 396 | - find_filename_changes(pid, vid, 0, &nChng, &aChng, debugFlag ? "P->V" : 0); | |
| 397 | - if( nChng ){ | |
| 398 | - for(i=0; i<nChng; i++){ | |
| 399 | - char *z; | |
| 400 | - z = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]); | |
| 401 | - db_multi_exec( | |
| 402 | - "UPDATE fv SET fnp=%Q, fnm=%Q" | |
| 403 | - " WHERE fn=(SELECT name FROM filename WHERE fnid=%d)", | |
| 404 | - z, z, aChng[i*2+1] | |
| 405 | - ); | |
| 406 | - free(z); | |
| 407 | - } | |
| 408 | - fossil_free(aChng); | |
| 409 | - db_multi_exec("UPDATE fv SET fnm=fnp WHERE fnp!=fn"); | |
| 410 | - } | |
| 411 | - | |
| 412 | - /* Add files found in P but not in V | |
| 413 | - */ | |
| 414 | - db_multi_exec( | |
| 415 | - "INSERT OR IGNORE" | |
| 416 | - " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" | |
| 417 | - " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 " | |
| 418 | - " FROM vfile" | |
| 419 | - " WHERE vid=%d AND pathname %s NOT IN (SELECT fnp FROM fv)", | |
| 420 | - pid, filename_collation() | |
| 421 | - ); | |
| 422 | - | |
| 423 | - /* | |
| 424 | - ** Compute name changes from P->M | |
| 425 | - */ | |
| 426 | - find_filename_changes(pid, mid, 0, &nChng, &aChng, debugFlag ? "P->M" : 0); | |
| 427 | - if( nChng ){ | |
| 428 | - if( nChng>4 ) db_multi_exec("CREATE INDEX fv_fnp ON fv(fnp)"); | |
| 429 | - for(i=0; i<nChng; i++){ | |
| 430 | - db_multi_exec( | |
| 431 | - "UPDATE fv SET fnm=(SELECT name FROM filename WHERE fnid=%d)" | |
| 432 | - " WHERE fnp=(SELECT name FROM filename WHERE fnid=%d)", | |
| 433 | - aChng[i*2+1], aChng[i*2] | |
| 434 | - ); | |
| 435 | - } | |
| 436 | - fossil_free(aChng); | |
| 437 | - } | |
| 438 | - | |
| 439 | - /* Add files found in M but not in P or V. | |
| 440 | - */ | |
| 441 | - db_multi_exec( | |
| 442 | - "INSERT OR IGNORE" | |
| 443 | - " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" | |
| 444 | - " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 " | |
| 445 | - " FROM vfile" | |
| 446 | - " WHERE vid=%d" | |
| 447 | - " AND pathname %s NOT IN (SELECT fnp FROM fv UNION SELECT fnm FROM fv)", | |
| 448 | - mid, filename_collation() | |
| 449 | - ); | |
| 450 | - | |
| 451 | - /* | |
| 452 | - ** Compute the file version ids for P and M. | |
| 453 | - */ | |
| 442 | + filename_collation(), filename_collation(), filename_collation(), | |
| 443 | + filename_collation() | |
| 444 | + ); | |
| 445 | + | |
| 446 | + /* | |
| 447 | + ** Compute name changes from N to V, P, and M | |
| 448 | + */ | |
| 449 | + add_renames("fn", vid, nid, 0, debugFlag ? "N->V" : 0); | |
| 450 | + add_renames("fnp", pid, nid, 0, debugFlag ? "N->P" : 0); | |
| 451 | + add_renames("fnm", mid, nid, backoutFlag, debugFlag ? "N->M" : 0); | |
| 452 | + | |
| 453 | + /* | |
| 454 | + ** Add files found in V | |
| 455 | + */ | |
| 456 | + db_multi_exec( | |
| 457 | + "UPDATE OR IGNORE fv SET fn=coalesce(fn%c,fnn) WHERE fn IS NULL;" | |
| 458 | + "REPLACE INTO fv(fn,fnp,fnm,fnn,idv,ridv,islinkv,isexe,chnged)" | |
| 459 | + " SELECT pathname, fnp, fnm, fnn, id, rid, islink, vf.isexe, vf.chnged" | |
| 460 | + " FROM vfile vf" | |
| 461 | + " LEFT JOIN fv ON fn=coalesce(origname,pathname)" | |
| 462 | + " AND rid>0 AND vf.chnged NOT IN (3,5)" | |
| 463 | + " WHERE vid=%d;", | |
| 464 | + vAncestor, vid | |
| 465 | + ); | |
| 466 | + | |
| 467 | + /* | |
| 468 | + ** Add files found in P | |
| 469 | + */ | |
| 470 | + db_multi_exec( | |
| 471 | + "UPDATE OR IGNORE fv SET fnp=coalesce(fnn," | |
| 472 | + " (SELECT coalesce(origname,pathname) FROM vfile WHERE id=idv))" | |
| 473 | + " WHERE fnp IS NULL;" | |
| 474 | + "INSERT OR IGNORE INTO fv(fnp)" | |
| 475 | + " SELECT coalesce(origname,pathname) FROM vfile WHERE vid=%d;", | |
| 476 | + pid | |
| 477 | + ); | |
| 478 | + | |
| 479 | + /* | |
| 480 | + ** Add files found in M | |
| 481 | + */ | |
| 482 | + db_multi_exec( | |
| 483 | + "UPDATE OR IGNORE fv SET fnm=fnp WHERE fnm IS NULL;" | |
| 484 | + "INSERT OR IGNORE INTO fv(fnm)" | |
| 485 | + " SELECT pathname FROM vfile WHERE vid=%d;", | |
| 486 | + mid | |
| 487 | + ); | |
| 488 | + | |
| 489 | + /* | |
| 490 | + ** Compute the file version ids for P and M | |
| 491 | + */ | |
| 492 | + if( pid==vid ){ | |
| 493 | + db_multi_exec( | |
| 494 | + "UPDATE fv SET idp=idv, ridp=ridv WHERE ridv>0 AND chnged NOT IN (3,5)" | |
| 495 | + ); | |
| 496 | + }else{ | |
| 497 | + db_multi_exec( | |
| 498 | + "UPDATE fv SET" | |
| 499 | + " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0)," | |
| 500 | + " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0)", | |
| 501 | + pid, pid | |
| 502 | + ); | |
| 503 | + } | |
| 454 | 504 | db_multi_exec( |
| 455 | 505 | "UPDATE fv SET" |
| 456 | - " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0)," | |
| 457 | - " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0)," | |
| 458 | 506 | " idm=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnm=pathname),0)," |
| 459 | 507 | " ridm=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnm=pathname),0)," |
| 460 | - " islinkv=coalesce((SELECT islink FROM vfile" | |
| 508 | + " islinkm=coalesce((SELECT islink FROM vfile" | |
| 461 | 509 | " WHERE vid=%d AND fnm=pathname),0)," |
| 462 | - " islinkm=coalesce((SELECT islink FROM vfile" | |
| 463 | - " WHERE vid=%d AND fnm=pathname),0)", | |
| 464 | - pid, pid, mid, mid, vid, mid | |
| 510 | + " isexe=coalesce((SELECT isexe FROM vfile WHERE vid=%d AND fnm=pathname)," | |
| 511 | + " isexe)", | |
| 512 | + mid, mid, mid, mid | |
| 465 | 513 | ); |
| 466 | 514 | |
| 467 | 515 | if( debugFlag ){ |
| 468 | 516 | db_prepare(&q, |
| 469 | 517 | "SELECT rowid, fn, fnp, fnm, chnged, ridv, ridp, ridm, " |
| 470 | - " isexe, islinkv, islinkm FROM fv" | |
| 518 | + " isexe, islinkv, islinkm, fnn FROM fv" | |
| 471 | 519 | ); |
| 472 | 520 | while( db_step(&q)==SQLITE_ROW ){ |
| 473 | 521 | fossil_print("%3d: ridv=%-4d ridp=%-4d ridm=%-4d chnged=%d isexe=%d " |
| 474 | 522 | " islinkv=%d islinkm=%d\n", |
| 475 | 523 | db_column_int(&q, 0), |
| @@ -481,14 +529,36 @@ | ||
| 481 | 529 | db_column_int(&q, 9), |
| 482 | 530 | db_column_int(&q, 10)); |
| 483 | 531 | fossil_print(" fn = [%s]\n", db_column_text(&q, 1)); |
| 484 | 532 | fossil_print(" fnp = [%s]\n", db_column_text(&q, 2)); |
| 485 | 533 | fossil_print(" fnm = [%s]\n", db_column_text(&q, 3)); |
| 534 | + fossil_print(" fnn = [%s]\n", db_column_text(&q, 11)); | |
| 486 | 535 | } |
| 487 | 536 | db_finalize(&q); |
| 488 | 537 | } |
| 489 | 538 | |
| 539 | + /* | |
| 540 | + ** Update the execute bit on files where it's changed from P->M but not P->V | |
| 541 | + */ | |
| 542 | + db_prepare(&q, | |
| 543 | + "SELECT idv, fn, fv.isexe FROM fv, vfile p, vfile v" | |
| 544 | + " WHERE p.id=idp AND v.id=idv AND fv.isexe!=p.isexe AND v.isexe=p.isexe" | |
| 545 | + ); | |
| 546 | + while( db_step(&q)==SQLITE_ROW ){ | |
| 547 | + int idv = db_column_int(&q, 0); | |
| 548 | + const char *zName = db_column_text(&q, 1); | |
| 549 | + int isExe = db_column_int(&q, 2); | |
| 550 | + fossil_print("%s %s\n", isExe ? "EXECUTABLE" : "UNEXEC", zName); | |
| 551 | + if( !dryRunFlag ){ | |
| 552 | + char *zFullPath = mprintf("%s/%s", g.zLocalRoot, zName); | |
| 553 | + file_wd_setexe(zFullPath, isExe); | |
| 554 | + free(zFullPath); | |
| 555 | + db_multi_exec("UPDATE vfile SET isexe=%d WHERE id=%d", isExe, idv); | |
| 556 | + } | |
| 557 | + } | |
| 558 | + db_finalize(&q); | |
| 559 | + | |
| 490 | 560 | /* |
| 491 | 561 | ** Find files in M and V but not in P and report conflicts. |
| 492 | 562 | ** The file in M will be ignored. It will be treated as if it |
| 493 | 563 | ** does not exist. |
| 494 | 564 | */ |
| @@ -500,46 +570,10 @@ | ||
| 500 | 570 | char *zName = db_text(0, "SELECT pathname FROM vfile WHERE id=%d", idm); |
| 501 | 571 | fossil_warning("WARNING: no common ancestor for %s", zName); |
| 502 | 572 | free(zName); |
| 503 | 573 | db_multi_exec("UPDATE fv SET idm=0 WHERE idm=%d", idm); |
| 504 | 574 | } |
| 505 | - db_finalize(&q); | |
| 506 | - | |
| 507 | - /* | |
| 508 | - ** Add to V files that are not in V or P but are in M | |
| 509 | - */ | |
| 510 | - db_prepare(&q, | |
| 511 | - "SELECT idm, rowid, fnm FROM fv AS x" | |
| 512 | - " WHERE idp=0 AND idv=0 AND idm>0" | |
| 513 | - ); | |
| 514 | - while( db_step(&q)==SQLITE_ROW ){ | |
| 515 | - int idm = db_column_int(&q, 0); | |
| 516 | - int rowid = db_column_int(&q, 1); | |
| 517 | - int idv; | |
| 518 | - const char *zName; | |
| 519 | - char *zFullName; | |
| 520 | - db_multi_exec( | |
| 521 | - "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)" | |
| 522 | - " SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d", | |
| 523 | - vid, integrateFlag?5:3, idm | |
| 524 | - ); | |
| 525 | - idv = db_last_insert_rowid(); | |
| 526 | - db_multi_exec("UPDATE fv SET idv=%d WHERE rowid=%d", idv, rowid); | |
| 527 | - zName = db_column_text(&q, 2); | |
| 528 | - zFullName = mprintf("%s%s", g.zLocalRoot, zName); | |
| 529 | - if( file_wd_isfile_or_link(zFullName) ){ | |
| 530 | - fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName); | |
| 531 | - nOverwrite++; | |
| 532 | - }else{ | |
| 533 | - fossil_print("ADDED %s\n", zName); | |
| 534 | - } | |
| 535 | - fossil_free(zFullName); | |
| 536 | - if( !dryRunFlag ){ | |
| 537 | - undo_save(zName); | |
| 538 | - vfile_to_disk(0, idm, 0, 0); | |
| 539 | - } | |
| 540 | - } | |
| 541 | 575 | db_finalize(&q); |
| 542 | 576 | |
| 543 | 577 | /* |
| 544 | 578 | ** Find files that have changed from P->M but not P->V. |
| 545 | 579 | ** Copy the M content over into V. |
| @@ -661,45 +695,117 @@ | ||
| 661 | 695 | free(zFullPath); |
| 662 | 696 | } |
| 663 | 697 | } |
| 664 | 698 | db_finalize(&q); |
| 665 | 699 | |
| 700 | + /* For certain sets of renames (e.g. A -> B and B -> A), a file that is | |
| 701 | + ** being renamed must first be moved to a temporary location to avoid | |
| 702 | + ** being overwritten by another rename operation. A row is added to the | |
| 703 | + ** TMPRN table for each of these temporary renames. | |
| 704 | + */ | |
| 705 | + db_multi_exec( | |
| 706 | + "DROP TABLE IF EXISTS tmprn;" | |
| 707 | + "CREATE TEMP TABLE tmprn(fn UNIQUE, tmpfn);" | |
| 708 | + ); | |
| 709 | + | |
| 666 | 710 | /* |
| 667 | 711 | ** Rename files that have taken a rename on P->M but which keep the same |
| 668 | 712 | ** name on P->V. If a file is renamed on P->V only or on both P->V and |
| 669 | 713 | ** P->M then we retain the V name of the file. |
| 670 | 714 | */ |
| 671 | 715 | db_prepare(&q, |
| 672 | - "SELECT idv, fnp, fnm FROM fv" | |
| 716 | + "SELECT idv, fnp, fnm, isexe FROM fv" | |
| 673 | 717 | " WHERE idv>0 AND idp>0 AND idm>0 AND fnp=fn AND fnm!=fnp" |
| 674 | 718 | ); |
| 675 | 719 | while( db_step(&q)==SQLITE_ROW ){ |
| 676 | 720 | int idv = db_column_int(&q, 0); |
| 677 | 721 | const char *zOldName = db_column_text(&q, 1); |
| 678 | 722 | const char *zNewName = db_column_text(&q, 2); |
| 723 | + int isExe = db_column_int(&q, 3); | |
| 679 | 724 | fossil_print("RENAME %s -> %s\n", zOldName, zNewName); |
| 680 | 725 | if( !dryRunFlag ) undo_save(zOldName); |
| 681 | 726 | if( !dryRunFlag ) undo_save(zNewName); |
| 682 | 727 | db_multi_exec( |
| 728 | + "UPDATE vfile SET pathname=NULL, origname=pathname" | |
| 729 | + " WHERE vid=%d AND pathname=%Q;" | |
| 683 | 730 | "UPDATE vfile SET pathname=%Q, origname=coalesce(origname,pathname)" |
| 684 | - " WHERE id=%d AND vid=%d", zNewName, idv, vid | |
| 731 | + " WHERE id=%d;", | |
| 732 | + vid, zNewName, zNewName, idv | |
| 685 | 733 | ); |
| 686 | 734 | if( !dryRunFlag ){ |
| 687 | - char *zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName); | |
| 688 | - char *zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName); | |
| 735 | + char *zFullOldPath, *zFullNewPath; | |
| 736 | + zFullOldPath = db_text(0,"SELECT tmpfn FROM tmprn WHERE fn=%Q", zOldName); | |
| 737 | + if( !zFullOldPath ){ | |
| 738 | + zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName); | |
| 739 | + } | |
| 740 | + zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName); | |
| 741 | + if( file_wd_size(zFullNewPath)>=0 ){ | |
| 742 | + char zTmpPath[300]; | |
| 743 | + file_tempname(sizeof(zTmpPath), zTmpPath); | |
| 744 | + db_multi_exec("INSERT INTO tmprn(fn,tmpfn) VALUES(%Q,%Q)", | |
| 745 | + zNewName, zTmpPath); | |
| 746 | + if( file_wd_islink(zFullNewPath) ){ | |
| 747 | + symlink_copy(zFullNewPath, zTmpPath); | |
| 748 | + }else{ | |
| 749 | + file_copy(zFullNewPath, zTmpPath); | |
| 750 | + } | |
| 751 | + } | |
| 689 | 752 | if( file_wd_islink(zFullOldPath) ){ |
| 690 | 753 | symlink_copy(zFullOldPath, zFullNewPath); |
| 691 | 754 | }else{ |
| 692 | 755 | file_copy(zFullOldPath, zFullNewPath); |
| 693 | 756 | } |
| 757 | + file_wd_setexe(zFullNewPath, isExe); | |
| 694 | 758 | file_delete(zFullOldPath); |
| 695 | 759 | free(zFullNewPath); |
| 696 | 760 | free(zFullOldPath); |
| 697 | 761 | } |
| 698 | 762 | } |
| 699 | 763 | db_finalize(&q); |
| 700 | 764 | |
| 765 | + /* A file that has been deleted and replaced by a renamed file will have a | |
| 766 | + ** NULL pathname. Change it to something that makes the output of "status" | |
| 767 | + ** and similar commands make sense for such files and that will (most likely) | |
| 768 | + ** not be an actual existing pathname. | |
| 769 | + */ | |
| 770 | + db_multi_exec( | |
| 771 | + "UPDATE vfile SET pathname=origname || ' (overwritten by rename)'" | |
| 772 | + " WHERE pathname IS NULL" | |
| 773 | + ); | |
| 774 | + | |
| 775 | + /* | |
| 776 | + ** Add to V files that are not in V or P but are in M | |
| 777 | + */ | |
| 778 | + db_prepare(&q, | |
| 779 | + "SELECT idm, fnm FROM fv" | |
| 780 | + " WHERE idp=0 AND idv=0 AND idm>0" | |
| 781 | + ); | |
| 782 | + while( db_step(&q)==SQLITE_ROW ){ | |
| 783 | + int idm = db_column_int(&q, 0); | |
| 784 | + const char *zName; | |
| 785 | + char *zFullName; | |
| 786 | + db_multi_exec( | |
| 787 | + "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)" | |
| 788 | + " SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d", | |
| 789 | + vid, integrateFlag?5:3, idm | |
| 790 | + ); | |
| 791 | + zName = db_column_text(&q, 1); | |
| 792 | + zFullName = mprintf("%s%s", g.zLocalRoot, zName); | |
| 793 | + if( file_wd_isfile_or_link(zFullName) | |
| 794 | + && !db_exists("SELECT 1 FROM fv WHERE fn=%Q", zName) ){ | |
| 795 | + fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName); | |
| 796 | + nOverwrite++; | |
| 797 | + }else{ | |
| 798 | + fossil_print("ADDED %s\n", zName); | |
| 799 | + } | |
| 800 | + fossil_free(zFullName); | |
| 801 | + if( !dryRunFlag ){ | |
| 802 | + undo_save(zName); | |
| 803 | + vfile_to_disk(0, idm, 0, 0); | |
| 804 | + } | |
| 805 | + } | |
| 806 | + db_finalize(&q); | |
| 701 | 807 | |
| 702 | 808 | /* Report on conflicts |
| 703 | 809 | */ |
| 704 | 810 | if( nConflict ){ |
| 705 | 811 | fossil_warning("WARNING: %d merge conflicts", nConflict); |
| 706 | 812 |
| --- src/merge.c | |
| +++ src/merge.c | |
| @@ -128,10 +128,46 @@ | |
| 128 | } |
| 129 | } |
| 130 | db_finalize(&q); |
| 131 | return fForkSeen; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | ** COMMAND: merge |
| 136 | ** |
| 137 | ** Usage: %fossil merge ?OPTIONS? ?VERSION? |
| @@ -178,10 +214,11 @@ | |
| 178 | */ |
| 179 | void merge_cmd(void){ |
| 180 | int vid; /* Current version "V" */ |
| 181 | int mid; /* Version we are merging from "M" */ |
| 182 | int pid; /* The pivot version - most recent common ancestor P */ |
| 183 | int verboseFlag; /* True if the -v|--verbose option is present */ |
| 184 | int integrateFlag; /* True if the --integrate option is present */ |
| 185 | int pickFlag; /* True if the --cherrypick option is present */ |
| 186 | int backoutFlag; /* True if the --backout option is present */ |
| 187 | int dryRunFlag; /* True if the --dry-run or -n option is present */ |
| @@ -188,23 +225,22 @@ | |
| 188 | int forceFlag; /* True if the --force or -f option is present */ |
| 189 | int forceMissingFlag; /* True if the --force-missing option is present */ |
| 190 | const char *zBinGlob; /* The value of --binary */ |
| 191 | const char *zPivot; /* The value of --baseline */ |
| 192 | int debugFlag; /* True if --debug is present */ |
| 193 | int nChng; /* Number of file name changes */ |
| 194 | int *aChng; /* An array of file name changes */ |
| 195 | int i; /* Loop counter */ |
| 196 | int nConflict = 0; /* Number of conflicts seen */ |
| 197 | int nOverwrite = 0; /* Number of unmanaged files overwritten */ |
| 198 | Stmt q; |
| 199 | |
| 200 | |
| 201 | /* Notation: |
| 202 | ** |
| 203 | ** V The current checkout |
| 204 | ** M The version being merged in |
| 205 | ** P The "pivot" - the most recent common ancestor of V and M. |
| 206 | */ |
| 207 | |
| 208 | undo_capture_command_line(); |
| 209 | verboseFlag = find_option("verbose","v",0)!=0; |
| 210 | forceMissingFlag = find_option("force-missing",0,0)!=0; |
| @@ -291,37 +327,49 @@ | |
| 291 | fossil_fatal("not a version: %s", zPivot); |
| 292 | } |
| 293 | if( pickFlag ){ |
| 294 | fossil_fatal("incompatible options: --cherrypick & --baseline"); |
| 295 | } |
| 296 | }else if( pickFlag || backoutFlag ){ |
| 297 | if( integrateFlag ){ |
| 298 | fossil_fatal("incompatible options: --integrate & --cherrypick or --backout"); |
| 299 | } |
| 300 | pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid); |
| 301 | if( pid<=0 ){ |
| 302 | fossil_fatal("cannot find an ancestor for %s", g.argv[2]); |
| 303 | } |
| 304 | }else{ |
| 305 | pivot_set_primary(mid); |
| 306 | pivot_set_secondary(vid); |
| 307 | db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0"); |
| 308 | while( db_step(&q)==SQLITE_ROW ){ |
| 309 | pivot_set_secondary(db_column_int(&q,0)); |
| 310 | } |
| 311 | db_finalize(&q); |
| 312 | pid = pivot_find(); |
| 313 | if( pid<=0 ){ |
| 314 | fossil_fatal("cannot find a common ancestor between the current " |
| 315 | "checkout and %s", g.argv[2]); |
| 316 | } |
| 317 | } |
| 318 | if( backoutFlag ){ |
| 319 | int t = pid; |
| 320 | pid = mid; |
| 321 | mid = t; |
| 322 | } |
| 323 | if( !is_a_version(pid) ){ |
| 324 | fossil_fatal("not a version: record #%d", pid); |
| 325 | } |
| 326 | if( !forceFlag && mid==pid ){ |
| 327 | fossil_print("Merge skipped because it is a no-op. " |
| @@ -343,12 +391,25 @@ | |
| 343 | fossil_fatal("missing content, unable to merge"); |
| 344 | } |
| 345 | if( load_vfile_from_rid(pid) && !forceMissingFlag ){ |
| 346 | fossil_fatal("missing content, unable to merge"); |
| 347 | } |
| 348 | if( debugFlag ){ |
| 349 | char *z; |
| 350 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", pid); |
| 351 | fossil_print("P=%d %z\n", pid, z); |
| 352 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", mid); |
| 353 | fossil_print("M=%d %z\n", mid, z); |
| 354 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid); |
| @@ -361,115 +422,102 @@ | |
| 361 | ** in the current checkout, the pivot, and the version being merged. |
| 362 | */ |
| 363 | db_multi_exec( |
| 364 | "DROP TABLE IF EXISTS fv;" |
| 365 | "CREATE TEMP TABLE fv(" |
| 366 | " fn TEXT PRIMARY KEY %s," /* The filename */ |
| 367 | " idv INTEGER," /* VFILE entry for current version */ |
| 368 | " idp INTEGER," /* VFILE entry for the pivot */ |
| 369 | " idm INTEGER," /* VFILE entry for version merging in */ |
| 370 | " chnged BOOLEAN," /* True if current version has been edited */ |
| 371 | " ridv INTEGER," /* Record ID for current version */ |
| 372 | " ridp INTEGER," /* Record ID for pivot */ |
| 373 | " ridm INTEGER," /* Record ID for merge */ |
| 374 | " isexe BOOLEAN," /* Execute permission enabled */ |
| 375 | " fnp TEXT %s," /* The filename in the pivot */ |
| 376 | " fnm TEXT %s," /* the filename in the merged version */ |
| 377 | " islinkv BOOLEAN," /* True if current version is a symlink */ |
| 378 | " islinkm BOOLEAN" /* True if merged version in is a symlink */ |
| 379 | ");", |
| 380 | filename_collation(), filename_collation(), filename_collation() |
| 381 | ); |
| 382 | |
| 383 | /* Add files found in V |
| 384 | */ |
| 385 | db_multi_exec( |
| 386 | "INSERT OR IGNORE" |
| 387 | " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" |
| 388 | " SELECT pathname, pathname, pathname, id, 0, 0, rid, 0, 0, isexe, chnged " |
| 389 | " FROM vfile WHERE vid=%d", |
| 390 | vid |
| 391 | ); |
| 392 | |
| 393 | /* |
| 394 | ** Compute name changes from P->V |
| 395 | */ |
| 396 | find_filename_changes(pid, vid, 0, &nChng, &aChng, debugFlag ? "P->V" : 0); |
| 397 | if( nChng ){ |
| 398 | for(i=0; i<nChng; i++){ |
| 399 | char *z; |
| 400 | z = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]); |
| 401 | db_multi_exec( |
| 402 | "UPDATE fv SET fnp=%Q, fnm=%Q" |
| 403 | " WHERE fn=(SELECT name FROM filename WHERE fnid=%d)", |
| 404 | z, z, aChng[i*2+1] |
| 405 | ); |
| 406 | free(z); |
| 407 | } |
| 408 | fossil_free(aChng); |
| 409 | db_multi_exec("UPDATE fv SET fnm=fnp WHERE fnp!=fn"); |
| 410 | } |
| 411 | |
| 412 | /* Add files found in P but not in V |
| 413 | */ |
| 414 | db_multi_exec( |
| 415 | "INSERT OR IGNORE" |
| 416 | " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" |
| 417 | " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 " |
| 418 | " FROM vfile" |
| 419 | " WHERE vid=%d AND pathname %s NOT IN (SELECT fnp FROM fv)", |
| 420 | pid, filename_collation() |
| 421 | ); |
| 422 | |
| 423 | /* |
| 424 | ** Compute name changes from P->M |
| 425 | */ |
| 426 | find_filename_changes(pid, mid, 0, &nChng, &aChng, debugFlag ? "P->M" : 0); |
| 427 | if( nChng ){ |
| 428 | if( nChng>4 ) db_multi_exec("CREATE INDEX fv_fnp ON fv(fnp)"); |
| 429 | for(i=0; i<nChng; i++){ |
| 430 | db_multi_exec( |
| 431 | "UPDATE fv SET fnm=(SELECT name FROM filename WHERE fnid=%d)" |
| 432 | " WHERE fnp=(SELECT name FROM filename WHERE fnid=%d)", |
| 433 | aChng[i*2+1], aChng[i*2] |
| 434 | ); |
| 435 | } |
| 436 | fossil_free(aChng); |
| 437 | } |
| 438 | |
| 439 | /* Add files found in M but not in P or V. |
| 440 | */ |
| 441 | db_multi_exec( |
| 442 | "INSERT OR IGNORE" |
| 443 | " INTO fv(fn,fnp,fnm,idv,idp,idm,ridv,ridp,ridm,isexe,chnged)" |
| 444 | " SELECT pathname, pathname, pathname, 0, 0, 0, 0, 0, 0, isexe, 0 " |
| 445 | " FROM vfile" |
| 446 | " WHERE vid=%d" |
| 447 | " AND pathname %s NOT IN (SELECT fnp FROM fv UNION SELECT fnm FROM fv)", |
| 448 | mid, filename_collation() |
| 449 | ); |
| 450 | |
| 451 | /* |
| 452 | ** Compute the file version ids for P and M. |
| 453 | */ |
| 454 | db_multi_exec( |
| 455 | "UPDATE fv SET" |
| 456 | " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0)," |
| 457 | " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0)," |
| 458 | " idm=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnm=pathname),0)," |
| 459 | " ridm=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnm=pathname),0)," |
| 460 | " islinkv=coalesce((SELECT islink FROM vfile" |
| 461 | " WHERE vid=%d AND fnm=pathname),0)," |
| 462 | " islinkm=coalesce((SELECT islink FROM vfile" |
| 463 | " WHERE vid=%d AND fnm=pathname),0)", |
| 464 | pid, pid, mid, mid, vid, mid |
| 465 | ); |
| 466 | |
| 467 | if( debugFlag ){ |
| 468 | db_prepare(&q, |
| 469 | "SELECT rowid, fn, fnp, fnm, chnged, ridv, ridp, ridm, " |
| 470 | " isexe, islinkv, islinkm FROM fv" |
| 471 | ); |
| 472 | while( db_step(&q)==SQLITE_ROW ){ |
| 473 | fossil_print("%3d: ridv=%-4d ridp=%-4d ridm=%-4d chnged=%d isexe=%d " |
| 474 | " islinkv=%d islinkm=%d\n", |
| 475 | db_column_int(&q, 0), |
| @@ -481,14 +529,36 @@ | |
| 481 | db_column_int(&q, 9), |
| 482 | db_column_int(&q, 10)); |
| 483 | fossil_print(" fn = [%s]\n", db_column_text(&q, 1)); |
| 484 | fossil_print(" fnp = [%s]\n", db_column_text(&q, 2)); |
| 485 | fossil_print(" fnm = [%s]\n", db_column_text(&q, 3)); |
| 486 | } |
| 487 | db_finalize(&q); |
| 488 | } |
| 489 | |
| 490 | /* |
| 491 | ** Find files in M and V but not in P and report conflicts. |
| 492 | ** The file in M will be ignored. It will be treated as if it |
| 493 | ** does not exist. |
| 494 | */ |
| @@ -500,46 +570,10 @@ | |
| 500 | char *zName = db_text(0, "SELECT pathname FROM vfile WHERE id=%d", idm); |
| 501 | fossil_warning("WARNING: no common ancestor for %s", zName); |
| 502 | free(zName); |
| 503 | db_multi_exec("UPDATE fv SET idm=0 WHERE idm=%d", idm); |
| 504 | } |
| 505 | db_finalize(&q); |
| 506 | |
| 507 | /* |
| 508 | ** Add to V files that are not in V or P but are in M |
| 509 | */ |
| 510 | db_prepare(&q, |
| 511 | "SELECT idm, rowid, fnm FROM fv AS x" |
| 512 | " WHERE idp=0 AND idv=0 AND idm>0" |
| 513 | ); |
| 514 | while( db_step(&q)==SQLITE_ROW ){ |
| 515 | int idm = db_column_int(&q, 0); |
| 516 | int rowid = db_column_int(&q, 1); |
| 517 | int idv; |
| 518 | const char *zName; |
| 519 | char *zFullName; |
| 520 | db_multi_exec( |
| 521 | "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)" |
| 522 | " SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d", |
| 523 | vid, integrateFlag?5:3, idm |
| 524 | ); |
| 525 | idv = db_last_insert_rowid(); |
| 526 | db_multi_exec("UPDATE fv SET idv=%d WHERE rowid=%d", idv, rowid); |
| 527 | zName = db_column_text(&q, 2); |
| 528 | zFullName = mprintf("%s%s", g.zLocalRoot, zName); |
| 529 | if( file_wd_isfile_or_link(zFullName) ){ |
| 530 | fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName); |
| 531 | nOverwrite++; |
| 532 | }else{ |
| 533 | fossil_print("ADDED %s\n", zName); |
| 534 | } |
| 535 | fossil_free(zFullName); |
| 536 | if( !dryRunFlag ){ |
| 537 | undo_save(zName); |
| 538 | vfile_to_disk(0, idm, 0, 0); |
| 539 | } |
| 540 | } |
| 541 | db_finalize(&q); |
| 542 | |
| 543 | /* |
| 544 | ** Find files that have changed from P->M but not P->V. |
| 545 | ** Copy the M content over into V. |
| @@ -661,45 +695,117 @@ | |
| 661 | free(zFullPath); |
| 662 | } |
| 663 | } |
| 664 | db_finalize(&q); |
| 665 | |
| 666 | /* |
| 667 | ** Rename files that have taken a rename on P->M but which keep the same |
| 668 | ** name on P->V. If a file is renamed on P->V only or on both P->V and |
| 669 | ** P->M then we retain the V name of the file. |
| 670 | */ |
| 671 | db_prepare(&q, |
| 672 | "SELECT idv, fnp, fnm FROM fv" |
| 673 | " WHERE idv>0 AND idp>0 AND idm>0 AND fnp=fn AND fnm!=fnp" |
| 674 | ); |
| 675 | while( db_step(&q)==SQLITE_ROW ){ |
| 676 | int idv = db_column_int(&q, 0); |
| 677 | const char *zOldName = db_column_text(&q, 1); |
| 678 | const char *zNewName = db_column_text(&q, 2); |
| 679 | fossil_print("RENAME %s -> %s\n", zOldName, zNewName); |
| 680 | if( !dryRunFlag ) undo_save(zOldName); |
| 681 | if( !dryRunFlag ) undo_save(zNewName); |
| 682 | db_multi_exec( |
| 683 | "UPDATE vfile SET pathname=%Q, origname=coalesce(origname,pathname)" |
| 684 | " WHERE id=%d AND vid=%d", zNewName, idv, vid |
| 685 | ); |
| 686 | if( !dryRunFlag ){ |
| 687 | char *zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName); |
| 688 | char *zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName); |
| 689 | if( file_wd_islink(zFullOldPath) ){ |
| 690 | symlink_copy(zFullOldPath, zFullNewPath); |
| 691 | }else{ |
| 692 | file_copy(zFullOldPath, zFullNewPath); |
| 693 | } |
| 694 | file_delete(zFullOldPath); |
| 695 | free(zFullNewPath); |
| 696 | free(zFullOldPath); |
| 697 | } |
| 698 | } |
| 699 | db_finalize(&q); |
| 700 | |
| 701 | |
| 702 | /* Report on conflicts |
| 703 | */ |
| 704 | if( nConflict ){ |
| 705 | fossil_warning("WARNING: %d merge conflicts", nConflict); |
| 706 |
| --- src/merge.c | |
| +++ src/merge.c | |
| @@ -128,10 +128,46 @@ | |
| 128 | } |
| 129 | } |
| 130 | db_finalize(&q); |
| 131 | return fForkSeen; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | ** Add an entry to the FV table for all files renamed between |
| 136 | ** version N and the version specified by vid. |
| 137 | */ |
| 138 | static void add_renames( |
| 139 | const char *zFnCol, /* The FV column for the filename in vid */ |
| 140 | int vid, /* The desired version's RID */ |
| 141 | int nid, /* Version N's RID */ |
| 142 | int revOk, /* Ok to move backwards (child->parent) if true */ |
| 143 | const char *zDebug /* Generate trace output if not NULL */ |
| 144 | ){ |
| 145 | int nChng; /* Number of file name changes */ |
| 146 | int *aChng; /* An array of file name changes */ |
| 147 | int i; /* Loop counter */ |
| 148 | find_filename_changes(nid, vid, revOk, &nChng, &aChng, zDebug); |
| 149 | if( nChng==0 ) return; |
| 150 | for(i=0; i<nChng; i++){ |
| 151 | char *zN, *zV; |
| 152 | zN = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2]); |
| 153 | zV = db_text(0, "SELECT name FROM filename WHERE fnid=%d", aChng[i*2+1]); |
| 154 | db_multi_exec( |
| 155 | "INSERT OR IGNORE INTO fv(%s,fnn) VALUES(%Q,%Q)", |
| 156 | zFnCol /*safe-for-%s*/, zV, zN |
| 157 | ); |
| 158 | if( db_changes()==0 ){ |
| 159 | db_multi_exec( |
| 160 | "UPDATE fv SET %s=%Q WHERE fnn=%Q", |
| 161 | zFnCol /*safe-for-%s*/, zV, zN |
| 162 | ); |
| 163 | } |
| 164 | free(zN); |
| 165 | free(zV); |
| 166 | } |
| 167 | free(aChng); |
| 168 | } |
| 169 | |
| 170 | /* |
| 171 | ** COMMAND: merge |
| 172 | ** |
| 173 | ** Usage: %fossil merge ?OPTIONS? ?VERSION? |
| @@ -178,10 +214,11 @@ | |
| 214 | */ |
| 215 | void merge_cmd(void){ |
| 216 | int vid; /* Current version "V" */ |
| 217 | int mid; /* Version we are merging from "M" */ |
| 218 | int pid; /* The pivot version - most recent common ancestor P */ |
| 219 | int nid = 0; /* The name pivot version "N" */ |
| 220 | int verboseFlag; /* True if the -v|--verbose option is present */ |
| 221 | int integrateFlag; /* True if the --integrate option is present */ |
| 222 | int pickFlag; /* True if the --cherrypick option is present */ |
| 223 | int backoutFlag; /* True if the --backout option is present */ |
| 224 | int dryRunFlag; /* True if the --dry-run or -n option is present */ |
| @@ -188,23 +225,22 @@ | |
| 225 | int forceFlag; /* True if the --force or -f option is present */ |
| 226 | int forceMissingFlag; /* True if the --force-missing option is present */ |
| 227 | const char *zBinGlob; /* The value of --binary */ |
| 228 | const char *zPivot; /* The value of --baseline */ |
| 229 | int debugFlag; /* True if --debug is present */ |
| 230 | int nConflict = 0; /* Number of conflicts seen */ |
| 231 | int nOverwrite = 0; /* Number of unmanaged files overwritten */ |
| 232 | char vAncestor = 'p'; /* If P is an ancestor of V then 'p', else 'n' */ |
| 233 | Stmt q; |
| 234 | |
| 235 | |
| 236 | /* Notation: |
| 237 | ** |
| 238 | ** V The current checkout |
| 239 | ** M The version being merged in |
| 240 | ** P The "pivot" - the most recent common ancestor of V and M. |
| 241 | ** N The "name pivot" - for detecting renames |
| 242 | */ |
| 243 | |
| 244 | undo_capture_command_line(); |
| 245 | verboseFlag = find_option("verbose","v",0)!=0; |
| 246 | forceMissingFlag = find_option("force-missing",0,0)!=0; |
| @@ -291,37 +327,49 @@ | |
| 327 | fossil_fatal("not a version: %s", zPivot); |
| 328 | } |
| 329 | if( pickFlag ){ |
| 330 | fossil_fatal("incompatible options: --cherrypick & --baseline"); |
| 331 | } |
| 332 | } |
| 333 | if( pickFlag || backoutFlag ){ |
| 334 | if( integrateFlag ){ |
| 335 | fossil_fatal("incompatible options: --integrate & --cherrypick or --backout"); |
| 336 | } |
| 337 | pid = db_int(0, "SELECT pid FROM plink WHERE cid=%d AND isprim", mid); |
| 338 | if( pid<=0 ){ |
| 339 | fossil_fatal("cannot find an ancestor for %s", g.argv[2]); |
| 340 | } |
| 341 | }else{ |
| 342 | if( !zPivot ){ |
| 343 | pivot_set_primary(mid); |
| 344 | pivot_set_secondary(vid); |
| 345 | db_prepare(&q, "SELECT merge FROM vmerge WHERE id=0"); |
| 346 | while( db_step(&q)==SQLITE_ROW ){ |
| 347 | pivot_set_secondary(db_column_int(&q,0)); |
| 348 | } |
| 349 | db_finalize(&q); |
| 350 | pid = pivot_find(0); |
| 351 | if( pid<=0 ){ |
| 352 | fossil_fatal("cannot find a common ancestor between the current " |
| 353 | "checkout and %s", g.argv[2]); |
| 354 | } |
| 355 | } |
| 356 | pivot_set_primary(mid); |
| 357 | pivot_set_secondary(vid); |
| 358 | nid = pivot_find(1); |
| 359 | if( nid!=pid ){ |
| 360 | pivot_set_primary(nid); |
| 361 | pivot_set_secondary(pid); |
| 362 | nid = pivot_find(1); |
| 363 | } |
| 364 | } |
| 365 | if( backoutFlag ){ |
| 366 | int t = pid; |
| 367 | pid = mid; |
| 368 | mid = t; |
| 369 | } |
| 370 | if( nid==0 ) nid = pid; |
| 371 | if( !is_a_version(pid) ){ |
| 372 | fossil_fatal("not a version: record #%d", pid); |
| 373 | } |
| 374 | if( !forceFlag && mid==pid ){ |
| 375 | fossil_print("Merge skipped because it is a no-op. " |
| @@ -343,12 +391,25 @@ | |
| 391 | fossil_fatal("missing content, unable to merge"); |
| 392 | } |
| 393 | if( load_vfile_from_rid(pid) && !forceMissingFlag ){ |
| 394 | fossil_fatal("missing content, unable to merge"); |
| 395 | } |
| 396 | if( zPivot ){ |
| 397 | vAncestor = db_exists( |
| 398 | "WITH RECURSIVE ancestor(id) AS (" |
| 399 | " VALUES(%d)" |
| 400 | " UNION ALL" |
| 401 | " SELECT pid FROM plink, ancestor" |
| 402 | " WHERE cid=ancestor.id AND pid!=%d AND cid!=%d)" |
| 403 | "SELECT 1 FROM ancestor WHERE id=%d LIMIT 1", |
| 404 | vid, nid, pid, pid |
| 405 | ) ? 'p' : 'n'; |
| 406 | } |
| 407 | if( debugFlag ){ |
| 408 | char *z; |
| 409 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", nid); |
| 410 | fossil_print("N=%d %z\n", nid, z); |
| 411 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", pid); |
| 412 | fossil_print("P=%d %z\n", pid, z); |
| 413 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", mid); |
| 414 | fossil_print("M=%d %z\n", mid, z); |
| 415 | z = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", vid); |
| @@ -361,115 +422,102 @@ | |
| 422 | ** in the current checkout, the pivot, and the version being merged. |
| 423 | */ |
| 424 | db_multi_exec( |
| 425 | "DROP TABLE IF EXISTS fv;" |
| 426 | "CREATE TEMP TABLE fv(" |
| 427 | " fn TEXT UNIQUE %s," /* The filename */ |
| 428 | " idv INTEGER DEFAULT 0," /* VFILE entry for current version */ |
| 429 | " idp INTEGER DEFAULT 0," /* VFILE entry for the pivot */ |
| 430 | " idm INTEGER DEFAULT 0," /* VFILE entry for version merging in */ |
| 431 | " chnged BOOLEAN," /* True if current version has been edited */ |
| 432 | " ridv INTEGER DEFAULT 0," /* Record ID for current version */ |
| 433 | " ridp INTEGER DEFAULT 0," /* Record ID for pivot */ |
| 434 | " ridm INTEGER DEFAULT 0," /* Record ID for merge */ |
| 435 | " isexe BOOLEAN," /* Execute permission enabled */ |
| 436 | " fnp TEXT UNIQUE %s," /* The filename in the pivot */ |
| 437 | " fnm TEXT UNIQUE %s," /* The filename in the merged version */ |
| 438 | " fnn TEXT UNIQUE %s," /* The filename in the name pivot */ |
| 439 | " islinkv BOOLEAN," /* True if current version is a symlink */ |
| 440 | " islinkm BOOLEAN" /* True if merged version in is a symlink */ |
| 441 | ");", |
| 442 | filename_collation(), filename_collation(), filename_collation(), |
| 443 | filename_collation() |
| 444 | ); |
| 445 | |
| 446 | /* |
| 447 | ** Compute name changes from N to V, P, and M |
| 448 | */ |
| 449 | add_renames("fn", vid, nid, 0, debugFlag ? "N->V" : 0); |
| 450 | add_renames("fnp", pid, nid, 0, debugFlag ? "N->P" : 0); |
| 451 | add_renames("fnm", mid, nid, backoutFlag, debugFlag ? "N->M" : 0); |
| 452 | |
| 453 | /* |
| 454 | ** Add files found in V |
| 455 | */ |
| 456 | db_multi_exec( |
| 457 | "UPDATE OR IGNORE fv SET fn=coalesce(fn%c,fnn) WHERE fn IS NULL;" |
| 458 | "REPLACE INTO fv(fn,fnp,fnm,fnn,idv,ridv,islinkv,isexe,chnged)" |
| 459 | " SELECT pathname, fnp, fnm, fnn, id, rid, islink, vf.isexe, vf.chnged" |
| 460 | " FROM vfile vf" |
| 461 | " LEFT JOIN fv ON fn=coalesce(origname,pathname)" |
| 462 | " AND rid>0 AND vf.chnged NOT IN (3,5)" |
| 463 | " WHERE vid=%d;", |
| 464 | vAncestor, vid |
| 465 | ); |
| 466 | |
| 467 | /* |
| 468 | ** Add files found in P |
| 469 | */ |
| 470 | db_multi_exec( |
| 471 | "UPDATE OR IGNORE fv SET fnp=coalesce(fnn," |
| 472 | " (SELECT coalesce(origname,pathname) FROM vfile WHERE id=idv))" |
| 473 | " WHERE fnp IS NULL;" |
| 474 | "INSERT OR IGNORE INTO fv(fnp)" |
| 475 | " SELECT coalesce(origname,pathname) FROM vfile WHERE vid=%d;", |
| 476 | pid |
| 477 | ); |
| 478 | |
| 479 | /* |
| 480 | ** Add files found in M |
| 481 | */ |
| 482 | db_multi_exec( |
| 483 | "UPDATE OR IGNORE fv SET fnm=fnp WHERE fnm IS NULL;" |
| 484 | "INSERT OR IGNORE INTO fv(fnm)" |
| 485 | " SELECT pathname FROM vfile WHERE vid=%d;", |
| 486 | mid |
| 487 | ); |
| 488 | |
| 489 | /* |
| 490 | ** Compute the file version ids for P and M |
| 491 | */ |
| 492 | if( pid==vid ){ |
| 493 | db_multi_exec( |
| 494 | "UPDATE fv SET idp=idv, ridp=ridv WHERE ridv>0 AND chnged NOT IN (3,5)" |
| 495 | ); |
| 496 | }else{ |
| 497 | db_multi_exec( |
| 498 | "UPDATE fv SET" |
| 499 | " idp=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnp=pathname),0)," |
| 500 | " ridp=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnp=pathname),0)", |
| 501 | pid, pid |
| 502 | ); |
| 503 | } |
| 504 | db_multi_exec( |
| 505 | "UPDATE fv SET" |
| 506 | " idm=coalesce((SELECT id FROM vfile WHERE vid=%d AND fnm=pathname),0)," |
| 507 | " ridm=coalesce((SELECT rid FROM vfile WHERE vid=%d AND fnm=pathname),0)," |
| 508 | " islinkm=coalesce((SELECT islink FROM vfile" |
| 509 | " WHERE vid=%d AND fnm=pathname),0)," |
| 510 | " isexe=coalesce((SELECT isexe FROM vfile WHERE vid=%d AND fnm=pathname)," |
| 511 | " isexe)", |
| 512 | mid, mid, mid, mid |
| 513 | ); |
| 514 | |
| 515 | if( debugFlag ){ |
| 516 | db_prepare(&q, |
| 517 | "SELECT rowid, fn, fnp, fnm, chnged, ridv, ridp, ridm, " |
| 518 | " isexe, islinkv, islinkm, fnn FROM fv" |
| 519 | ); |
| 520 | while( db_step(&q)==SQLITE_ROW ){ |
| 521 | fossil_print("%3d: ridv=%-4d ridp=%-4d ridm=%-4d chnged=%d isexe=%d " |
| 522 | " islinkv=%d islinkm=%d\n", |
| 523 | db_column_int(&q, 0), |
| @@ -481,14 +529,36 @@ | |
| 529 | db_column_int(&q, 9), |
| 530 | db_column_int(&q, 10)); |
| 531 | fossil_print(" fn = [%s]\n", db_column_text(&q, 1)); |
| 532 | fossil_print(" fnp = [%s]\n", db_column_text(&q, 2)); |
| 533 | fossil_print(" fnm = [%s]\n", db_column_text(&q, 3)); |
| 534 | fossil_print(" fnn = [%s]\n", db_column_text(&q, 11)); |
| 535 | } |
| 536 | db_finalize(&q); |
| 537 | } |
| 538 | |
| 539 | /* |
| 540 | ** Update the execute bit on files where it's changed from P->M but not P->V |
| 541 | */ |
| 542 | db_prepare(&q, |
| 543 | "SELECT idv, fn, fv.isexe FROM fv, vfile p, vfile v" |
| 544 | " WHERE p.id=idp AND v.id=idv AND fv.isexe!=p.isexe AND v.isexe=p.isexe" |
| 545 | ); |
| 546 | while( db_step(&q)==SQLITE_ROW ){ |
| 547 | int idv = db_column_int(&q, 0); |
| 548 | const char *zName = db_column_text(&q, 1); |
| 549 | int isExe = db_column_int(&q, 2); |
| 550 | fossil_print("%s %s\n", isExe ? "EXECUTABLE" : "UNEXEC", zName); |
| 551 | if( !dryRunFlag ){ |
| 552 | char *zFullPath = mprintf("%s/%s", g.zLocalRoot, zName); |
| 553 | file_wd_setexe(zFullPath, isExe); |
| 554 | free(zFullPath); |
| 555 | db_multi_exec("UPDATE vfile SET isexe=%d WHERE id=%d", isExe, idv); |
| 556 | } |
| 557 | } |
| 558 | db_finalize(&q); |
| 559 | |
| 560 | /* |
| 561 | ** Find files in M and V but not in P and report conflicts. |
| 562 | ** The file in M will be ignored. It will be treated as if it |
| 563 | ** does not exist. |
| 564 | */ |
| @@ -500,46 +570,10 @@ | |
| 570 | char *zName = db_text(0, "SELECT pathname FROM vfile WHERE id=%d", idm); |
| 571 | fossil_warning("WARNING: no common ancestor for %s", zName); |
| 572 | free(zName); |
| 573 | db_multi_exec("UPDATE fv SET idm=0 WHERE idm=%d", idm); |
| 574 | } |
| 575 | db_finalize(&q); |
| 576 | |
| 577 | /* |
| 578 | ** Find files that have changed from P->M but not P->V. |
| 579 | ** Copy the M content over into V. |
| @@ -661,45 +695,117 @@ | |
| 695 | free(zFullPath); |
| 696 | } |
| 697 | } |
| 698 | db_finalize(&q); |
| 699 | |
| 700 | /* For certain sets of renames (e.g. A -> B and B -> A), a file that is |
| 701 | ** being renamed must first be moved to a temporary location to avoid |
| 702 | ** being overwritten by another rename operation. A row is added to the |
| 703 | ** TMPRN table for each of these temporary renames. |
| 704 | */ |
| 705 | db_multi_exec( |
| 706 | "DROP TABLE IF EXISTS tmprn;" |
| 707 | "CREATE TEMP TABLE tmprn(fn UNIQUE, tmpfn);" |
| 708 | ); |
| 709 | |
| 710 | /* |
| 711 | ** Rename files that have taken a rename on P->M but which keep the same |
| 712 | ** name on P->V. If a file is renamed on P->V only or on both P->V and |
| 713 | ** P->M then we retain the V name of the file. |
| 714 | */ |
| 715 | db_prepare(&q, |
| 716 | "SELECT idv, fnp, fnm, isexe FROM fv" |
| 717 | " WHERE idv>0 AND idp>0 AND idm>0 AND fnp=fn AND fnm!=fnp" |
| 718 | ); |
| 719 | while( db_step(&q)==SQLITE_ROW ){ |
| 720 | int idv = db_column_int(&q, 0); |
| 721 | const char *zOldName = db_column_text(&q, 1); |
| 722 | const char *zNewName = db_column_text(&q, 2); |
| 723 | int isExe = db_column_int(&q, 3); |
| 724 | fossil_print("RENAME %s -> %s\n", zOldName, zNewName); |
| 725 | if( !dryRunFlag ) undo_save(zOldName); |
| 726 | if( !dryRunFlag ) undo_save(zNewName); |
| 727 | db_multi_exec( |
| 728 | "UPDATE vfile SET pathname=NULL, origname=pathname" |
| 729 | " WHERE vid=%d AND pathname=%Q;" |
| 730 | "UPDATE vfile SET pathname=%Q, origname=coalesce(origname,pathname)" |
| 731 | " WHERE id=%d;", |
| 732 | vid, zNewName, zNewName, idv |
| 733 | ); |
| 734 | if( !dryRunFlag ){ |
| 735 | char *zFullOldPath, *zFullNewPath; |
| 736 | zFullOldPath = db_text(0,"SELECT tmpfn FROM tmprn WHERE fn=%Q", zOldName); |
| 737 | if( !zFullOldPath ){ |
| 738 | zFullOldPath = mprintf("%s%s", g.zLocalRoot, zOldName); |
| 739 | } |
| 740 | zFullNewPath = mprintf("%s%s", g.zLocalRoot, zNewName); |
| 741 | if( file_wd_size(zFullNewPath)>=0 ){ |
| 742 | char zTmpPath[300]; |
| 743 | file_tempname(sizeof(zTmpPath), zTmpPath); |
| 744 | db_multi_exec("INSERT INTO tmprn(fn,tmpfn) VALUES(%Q,%Q)", |
| 745 | zNewName, zTmpPath); |
| 746 | if( file_wd_islink(zFullNewPath) ){ |
| 747 | symlink_copy(zFullNewPath, zTmpPath); |
| 748 | }else{ |
| 749 | file_copy(zFullNewPath, zTmpPath); |
| 750 | } |
| 751 | } |
| 752 | if( file_wd_islink(zFullOldPath) ){ |
| 753 | symlink_copy(zFullOldPath, zFullNewPath); |
| 754 | }else{ |
| 755 | file_copy(zFullOldPath, zFullNewPath); |
| 756 | } |
| 757 | file_wd_setexe(zFullNewPath, isExe); |
| 758 | file_delete(zFullOldPath); |
| 759 | free(zFullNewPath); |
| 760 | free(zFullOldPath); |
| 761 | } |
| 762 | } |
| 763 | db_finalize(&q); |
| 764 | |
| 765 | /* A file that has been deleted and replaced by a renamed file will have a |
| 766 | ** NULL pathname. Change it to something that makes the output of "status" |
| 767 | ** and similar commands make sense for such files and that will (most likely) |
| 768 | ** not be an actual existing pathname. |
| 769 | */ |
| 770 | db_multi_exec( |
| 771 | "UPDATE vfile SET pathname=origname || ' (overwritten by rename)'" |
| 772 | " WHERE pathname IS NULL" |
| 773 | ); |
| 774 | |
| 775 | /* |
| 776 | ** Add to V files that are not in V or P but are in M |
| 777 | */ |
| 778 | db_prepare(&q, |
| 779 | "SELECT idm, fnm FROM fv" |
| 780 | " WHERE idp=0 AND idv=0 AND idm>0" |
| 781 | ); |
| 782 | while( db_step(&q)==SQLITE_ROW ){ |
| 783 | int idm = db_column_int(&q, 0); |
| 784 | const char *zName; |
| 785 | char *zFullName; |
| 786 | db_multi_exec( |
| 787 | "INSERT INTO vfile(vid,chnged,deleted,rid,mrid,isexe,islink,pathname)" |
| 788 | " SELECT %d,%d,0,rid,mrid,isexe,islink,pathname FROM vfile WHERE id=%d", |
| 789 | vid, integrateFlag?5:3, idm |
| 790 | ); |
| 791 | zName = db_column_text(&q, 1); |
| 792 | zFullName = mprintf("%s%s", g.zLocalRoot, zName); |
| 793 | if( file_wd_isfile_or_link(zFullName) |
| 794 | && !db_exists("SELECT 1 FROM fv WHERE fn=%Q", zName) ){ |
| 795 | fossil_print("ADDED %s (overwrites an unmanaged file)\n", zName); |
| 796 | nOverwrite++; |
| 797 | }else{ |
| 798 | fossil_print("ADDED %s\n", zName); |
| 799 | } |
| 800 | fossil_free(zFullName); |
| 801 | if( !dryRunFlag ){ |
| 802 | undo_save(zName); |
| 803 | vfile_to_disk(0, idm, 0, 0); |
| 804 | } |
| 805 | } |
| 806 | db_finalize(&q); |
| 807 | |
| 808 | /* Report on conflicts |
| 809 | */ |
| 810 | if( nConflict ){ |
| 811 | fossil_warning("WARNING: %d merge conflicts", nConflict); |
| 812 |
-1
| --- src/path.c | ||
| +++ src/path.c | ||
| @@ -452,11 +452,10 @@ | ||
| 452 | 452 | if( nChng ){ |
| 453 | 453 | aChng = *aiChng = fossil_malloc( nChng*2*sizeof(int) ); |
| 454 | 454 | for(pChng=pAll, i=0; pChng; pChng=pChng->pNext){ |
| 455 | 455 | if( pChng->newName==0 ) continue; |
| 456 | 456 | if( pChng->origName==0 ) continue; |
| 457 | - if( pChng->newName==pChng->origName ) continue; | |
| 458 | 457 | aChng[i] = pChng->origName; |
| 459 | 458 | aChng[i+1] = pChng->newName; |
| 460 | 459 | if( zDebug ){ |
| 461 | 460 | fossil_print("%s summary %d[%z] -> %d[%z]\n", |
| 462 | 461 | zDebug, |
| 463 | 462 |
| --- src/path.c | |
| +++ src/path.c | |
| @@ -452,11 +452,10 @@ | |
| 452 | if( nChng ){ |
| 453 | aChng = *aiChng = fossil_malloc( nChng*2*sizeof(int) ); |
| 454 | for(pChng=pAll, i=0; pChng; pChng=pChng->pNext){ |
| 455 | if( pChng->newName==0 ) continue; |
| 456 | if( pChng->origName==0 ) continue; |
| 457 | if( pChng->newName==pChng->origName ) continue; |
| 458 | aChng[i] = pChng->origName; |
| 459 | aChng[i+1] = pChng->newName; |
| 460 | if( zDebug ){ |
| 461 | fossil_print("%s summary %d[%z] -> %d[%z]\n", |
| 462 | zDebug, |
| 463 |
| --- src/path.c | |
| +++ src/path.c | |
| @@ -452,11 +452,10 @@ | |
| 452 | if( nChng ){ |
| 453 | aChng = *aiChng = fossil_malloc( nChng*2*sizeof(int) ); |
| 454 | for(pChng=pAll, i=0; pChng; pChng=pChng->pNext){ |
| 455 | if( pChng->newName==0 ) continue; |
| 456 | if( pChng->origName==0 ) continue; |
| 457 | aChng[i] = pChng->origName; |
| 458 | aChng[i+1] = pChng->newName; |
| 459 | if( zDebug ){ |
| 460 | fossil_print("%s summary %d[%z] -> %d[%z]\n", |
| 461 | zDebug, |
| 462 |
+8
-4
| --- src/pivot.c | ||
| +++ src/pivot.c | ||
| @@ -73,12 +73,14 @@ | ||
| 73 | 73 | |
| 74 | 74 | /* |
| 75 | 75 | ** Find the most recent common ancestor of the primary and one of |
| 76 | 76 | ** the secondaries. Return its rid. Return 0 if no common ancestor |
| 77 | 77 | ** can be found. |
| 78 | +** | |
| 79 | +** If ignoreMerges is true, follow only "primary" parent links. | |
| 78 | 80 | */ |
| 79 | -int pivot_find(void){ | |
| 81 | +int pivot_find(int ignoreMerges){ | |
| 80 | 82 | Stmt q1, q2, u1, i1; |
| 81 | 83 | int rid = 0; |
| 82 | 84 | |
| 83 | 85 | /* aqueue must contain at least one primary and one other. Otherwise |
| 84 | 86 | ** we abort early |
| @@ -102,11 +104,12 @@ | ||
| 102 | 104 | db_prepare(&q2, |
| 103 | 105 | "SELECT 1 FROM aqueue A, plink, aqueue B" |
| 104 | 106 | " WHERE plink.pid=:rid" |
| 105 | 107 | " AND plink.cid=B.rid" |
| 106 | 108 | " AND A.rid=:rid" |
| 107 | - " AND A.src!=B.src" | |
| 109 | + " AND A.src!=B.src %s", | |
| 110 | + ignoreMerges ? "AND plink.isprim" : "" | |
| 108 | 111 | ); |
| 109 | 112 | |
| 110 | 113 | /* Mark the :rid record has having been checked. It is not the |
| 111 | 114 | ** common ancestor. |
| 112 | 115 | */ |
| @@ -122,11 +125,12 @@ | ||
| 122 | 125 | " coalesce((SELECT mtime FROM plink X WHERE X.cid=plink.pid), 0.0)," |
| 123 | 126 | " 1," |
| 124 | 127 | " aqueue.src " |
| 125 | 128 | " FROM plink, aqueue" |
| 126 | 129 | " WHERE plink.cid=:rid" |
| 127 | - " AND aqueue.rid=:rid" | |
| 130 | + " AND aqueue.rid=:rid %s", | |
| 131 | + ignoreMerges ? "AND plink.isprim" : "" | |
| 128 | 132 | ); |
| 129 | 133 | |
| 130 | 134 | while( db_step(&q1)==SQLITE_ROW ){ |
| 131 | 135 | rid = db_column_int(&q1, 0); |
| 132 | 136 | db_reset(&q1); |
| @@ -161,10 +165,10 @@ | ||
| 161 | 165 | db_must_be_within_tree(); |
| 162 | 166 | pivot_set_primary(name_to_rid(g.argv[2])); |
| 163 | 167 | for(i=3; i<g.argc; i++){ |
| 164 | 168 | pivot_set_secondary(name_to_rid(g.argv[i])); |
| 165 | 169 | } |
| 166 | - rid = pivot_find(); | |
| 170 | + rid = pivot_find(0); | |
| 167 | 171 | printf("pivot=%s\n", |
| 168 | 172 | db_text("?","SELECT uuid FROM blob WHERE rid=%d",rid) |
| 169 | 173 | ); |
| 170 | 174 | } |
| 171 | 175 |
| --- src/pivot.c | |
| +++ src/pivot.c | |
| @@ -73,12 +73,14 @@ | |
| 73 | |
| 74 | /* |
| 75 | ** Find the most recent common ancestor of the primary and one of |
| 76 | ** the secondaries. Return its rid. Return 0 if no common ancestor |
| 77 | ** can be found. |
| 78 | */ |
| 79 | int pivot_find(void){ |
| 80 | Stmt q1, q2, u1, i1; |
| 81 | int rid = 0; |
| 82 | |
| 83 | /* aqueue must contain at least one primary and one other. Otherwise |
| 84 | ** we abort early |
| @@ -102,11 +104,12 @@ | |
| 102 | db_prepare(&q2, |
| 103 | "SELECT 1 FROM aqueue A, plink, aqueue B" |
| 104 | " WHERE plink.pid=:rid" |
| 105 | " AND plink.cid=B.rid" |
| 106 | " AND A.rid=:rid" |
| 107 | " AND A.src!=B.src" |
| 108 | ); |
| 109 | |
| 110 | /* Mark the :rid record has having been checked. It is not the |
| 111 | ** common ancestor. |
| 112 | */ |
| @@ -122,11 +125,12 @@ | |
| 122 | " coalesce((SELECT mtime FROM plink X WHERE X.cid=plink.pid), 0.0)," |
| 123 | " 1," |
| 124 | " aqueue.src " |
| 125 | " FROM plink, aqueue" |
| 126 | " WHERE plink.cid=:rid" |
| 127 | " AND aqueue.rid=:rid" |
| 128 | ); |
| 129 | |
| 130 | while( db_step(&q1)==SQLITE_ROW ){ |
| 131 | rid = db_column_int(&q1, 0); |
| 132 | db_reset(&q1); |
| @@ -161,10 +165,10 @@ | |
| 161 | db_must_be_within_tree(); |
| 162 | pivot_set_primary(name_to_rid(g.argv[2])); |
| 163 | for(i=3; i<g.argc; i++){ |
| 164 | pivot_set_secondary(name_to_rid(g.argv[i])); |
| 165 | } |
| 166 | rid = pivot_find(); |
| 167 | printf("pivot=%s\n", |
| 168 | db_text("?","SELECT uuid FROM blob WHERE rid=%d",rid) |
| 169 | ); |
| 170 | } |
| 171 |
| --- src/pivot.c | |
| +++ src/pivot.c | |
| @@ -73,12 +73,14 @@ | |
| 73 | |
| 74 | /* |
| 75 | ** Find the most recent common ancestor of the primary and one of |
| 76 | ** the secondaries. Return its rid. Return 0 if no common ancestor |
| 77 | ** can be found. |
| 78 | ** |
| 79 | ** If ignoreMerges is true, follow only "primary" parent links. |
| 80 | */ |
| 81 | int pivot_find(int ignoreMerges){ |
| 82 | Stmt q1, q2, u1, i1; |
| 83 | int rid = 0; |
| 84 | |
| 85 | /* aqueue must contain at least one primary and one other. Otherwise |
| 86 | ** we abort early |
| @@ -102,11 +104,12 @@ | |
| 104 | db_prepare(&q2, |
| 105 | "SELECT 1 FROM aqueue A, plink, aqueue B" |
| 106 | " WHERE plink.pid=:rid" |
| 107 | " AND plink.cid=B.rid" |
| 108 | " AND A.rid=:rid" |
| 109 | " AND A.src!=B.src %s", |
| 110 | ignoreMerges ? "AND plink.isprim" : "" |
| 111 | ); |
| 112 | |
| 113 | /* Mark the :rid record has having been checked. It is not the |
| 114 | ** common ancestor. |
| 115 | */ |
| @@ -122,11 +125,12 @@ | |
| 125 | " coalesce((SELECT mtime FROM plink X WHERE X.cid=plink.pid), 0.0)," |
| 126 | " 1," |
| 127 | " aqueue.src " |
| 128 | " FROM plink, aqueue" |
| 129 | " WHERE plink.cid=:rid" |
| 130 | " AND aqueue.rid=:rid %s", |
| 131 | ignoreMerges ? "AND plink.isprim" : "" |
| 132 | ); |
| 133 | |
| 134 | while( db_step(&q1)==SQLITE_ROW ){ |
| 135 | rid = db_column_int(&q1, 0); |
| 136 | db_reset(&q1); |
| @@ -161,10 +165,10 @@ | |
| 165 | db_must_be_within_tree(); |
| 166 | pivot_set_primary(name_to_rid(g.argv[2])); |
| 167 | for(i=3; i<g.argc; i++){ |
| 168 | pivot_set_secondary(name_to_rid(g.argv[i])); |
| 169 | } |
| 170 | rid = pivot_find(0); |
| 171 | printf("pivot=%s\n", |
| 172 | db_text("?","SELECT uuid FROM blob WHERE rid=%d",rid) |
| 173 | ); |
| 174 | } |
| 175 |
+226
-82
| --- src/sqlite3.c | ||
| +++ src/sqlite3.c | ||
| @@ -363,11 +363,11 @@ | ||
| 363 | 363 | ** [sqlite3_libversion_number()], [sqlite3_sourceid()], |
| 364 | 364 | ** [sqlite_version()] and [sqlite_source_id()]. |
| 365 | 365 | */ |
| 366 | 366 | #define SQLITE_VERSION "3.13.0" |
| 367 | 367 | #define SQLITE_VERSION_NUMBER 3013000 |
| 368 | -#define SQLITE_SOURCE_ID "2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea" | |
| 368 | +#define SQLITE_SOURCE_ID "2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2" | |
| 369 | 369 | |
| 370 | 370 | /* |
| 371 | 371 | ** CAPI3REF: Run-Time Library Version Numbers |
| 372 | 372 | ** KEYWORDS: sqlite3_version, sqlite3_sourceid |
| 373 | 373 | ** |
| @@ -10917,11 +10917,11 @@ | ||
| 10917 | 10917 | ** |
| 10918 | 10918 | ** When doing coverage testing ALWAYS and NEVER are hard-coded to |
| 10919 | 10919 | ** be true and false so that the unreachable code they specify will |
| 10920 | 10920 | ** not be counted as untested code. |
| 10921 | 10921 | */ |
| 10922 | -#if defined(SQLITE_COVERAGE_TEST) | |
| 10922 | +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST) | |
| 10923 | 10923 | # define ALWAYS(X) (1) |
| 10924 | 10924 | # define NEVER(X) (0) |
| 10925 | 10925 | #elif !defined(NDEBUG) |
| 10926 | 10926 | # define ALWAYS(X) ((X)?1:(assert(0),0)) |
| 10927 | 10927 | # define NEVER(X) ((X)?(assert(0),1):0) |
| @@ -12954,11 +12954,11 @@ | ||
| 12954 | 12954 | */ |
| 12955 | 12955 | struct PgHdr { |
| 12956 | 12956 | sqlite3_pcache_page *pPage; /* Pcache object page handle */ |
| 12957 | 12957 | void *pData; /* Page data */ |
| 12958 | 12958 | void *pExtra; /* Extra content */ |
| 12959 | - PgHdr *pDirty; /* Transient list of dirty pages */ | |
| 12959 | + PgHdr *pDirty; /* Transient list of dirty sorted by pgno */ | |
| 12960 | 12960 | Pager *pPager; /* The pager this page is part of */ |
| 12961 | 12961 | Pgno pgno; /* Page number for this page */ |
| 12962 | 12962 | #ifdef SQLITE_CHECK_PAGES |
| 12963 | 12963 | u32 pageHash; /* Hash of page content */ |
| 12964 | 12964 | #endif |
| @@ -12979,15 +12979,14 @@ | ||
| 12979 | 12979 | #define PGHDR_CLEAN 0x001 /* Page not on the PCache.pDirty list */ |
| 12980 | 12980 | #define PGHDR_DIRTY 0x002 /* Page is on the PCache.pDirty list */ |
| 12981 | 12981 | #define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */ |
| 12982 | 12982 | #define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before |
| 12983 | 12983 | ** writing this page to the database */ |
| 12984 | -#define PGHDR_NEED_READ 0x010 /* Content is unread */ | |
| 12985 | -#define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */ | |
| 12986 | -#define PGHDR_MMAP 0x040 /* This is an mmap page object */ | |
| 12984 | +#define PGHDR_DONT_WRITE 0x010 /* Do not write content to disk */ | |
| 12985 | +#define PGHDR_MMAP 0x020 /* This is an mmap page object */ | |
| 12987 | 12986 | |
| 12988 | -#define PGHDR_WAL_APPEND 0x080 /* Appended to wal file */ | |
| 12987 | +#define PGHDR_WAL_APPEND 0x040 /* Appended to wal file */ | |
| 12989 | 12988 | |
| 12990 | 12989 | /* Initialize and shutdown the page cache subsystem */ |
| 12991 | 12990 | SQLITE_PRIVATE int sqlite3PcacheInitialize(void); |
| 12992 | 12991 | SQLITE_PRIVATE void sqlite3PcacheShutdown(void); |
| 12993 | 12992 | |
| @@ -13065,10 +13064,15 @@ | ||
| 13065 | 13064 | ** interface is only available if SQLITE_CHECK_PAGES is defined when the |
| 13066 | 13065 | ** library is built. |
| 13067 | 13066 | */ |
| 13068 | 13067 | SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); |
| 13069 | 13068 | #endif |
| 13069 | + | |
| 13070 | +#if defined(SQLITE_DEBUG) | |
| 13071 | +/* Check invariants on a PgHdr object */ | |
| 13072 | +SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr*); | |
| 13073 | +#endif | |
| 13070 | 13074 | |
| 13071 | 13075 | /* Set and get the suggested cache-size for the specified pager-cache. |
| 13072 | 13076 | ** |
| 13073 | 13077 | ** If no global maximum is configured, then the system attempts to limit |
| 13074 | 13078 | ** the total number of pages cached by purgeable pager-caches to the sum |
| @@ -43115,11 +43119,33 @@ | ||
| 43115 | 43119 | ** This file implements that page cache. |
| 43116 | 43120 | */ |
| 43117 | 43121 | /* #include "sqliteInt.h" */ |
| 43118 | 43122 | |
| 43119 | 43123 | /* |
| 43120 | -** A complete page cache is an instance of this structure. | |
| 43124 | +** A complete page cache is an instance of this structure. Every | |
| 43125 | +** entry in the cache holds a single page of the database file. The | |
| 43126 | +** btree layer only operates on the cached copy of the database pages. | |
| 43127 | +** | |
| 43128 | +** A page cache entry is "clean" if it exactly matches what is currently | |
| 43129 | +** on disk. A page is "dirty" if it has been modified and needs to be | |
| 43130 | +** persisted to disk. | |
| 43131 | +** | |
| 43132 | +** pDirty, pDirtyTail, pSynced: | |
| 43133 | +** All dirty pages are linked into the doubly linked list using | |
| 43134 | +** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order | |
| 43135 | +** such that p was added to the list more recently than p->pDirtyNext. | |
| 43136 | +** PCache.pDirty points to the first (newest) element in the list and | |
| 43137 | +** pDirtyTail to the last (oldest). | |
| 43138 | +** | |
| 43139 | +** The PCache.pSynced variable is used to optimize searching for a dirty | |
| 43140 | +** page to eject from the cache mid-transaction. It is better to eject | |
| 43141 | +** a page that does not require a journal sync than one that does. | |
| 43142 | +** Therefore, pSynced is maintained to that it *almost* always points | |
| 43143 | +** to either the oldest page in the pDirty/pDirtyTail list that has a | |
| 43144 | +** clear PGHDR_NEED_SYNC flag or to a page that is older than this one | |
| 43145 | +** (so that the right page to eject can be found by following pDirtyPrev | |
| 43146 | +** pointers). | |
| 43121 | 43147 | */ |
| 43122 | 43148 | struct PCache { |
| 43123 | 43149 | PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ |
| 43124 | 43150 | PgHdr *pSynced; /* Last synced page in dirty page list */ |
| 43125 | 43151 | int nRefSum; /* Sum of ref counts over all pages */ |
| @@ -43131,10 +43157,99 @@ | ||
| 43131 | 43157 | u8 eCreate; /* eCreate value for for xFetch() */ |
| 43132 | 43158 | int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ |
| 43133 | 43159 | void *pStress; /* Argument to xStress */ |
| 43134 | 43160 | sqlite3_pcache *pCache; /* Pluggable cache module */ |
| 43135 | 43161 | }; |
| 43162 | + | |
| 43163 | +/********************************** Test and Debug Logic **********************/ | |
| 43164 | +/* | |
| 43165 | +** Debug tracing macros. Enable by by changing the "0" to "1" and | |
| 43166 | +** recompiling. | |
| 43167 | +** | |
| 43168 | +** When sqlite3PcacheTrace is 1, single line trace messages are issued. | |
| 43169 | +** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries | |
| 43170 | +** is displayed for many operations, resulting in a lot of output. | |
| 43171 | +*/ | |
| 43172 | +#if defined(SQLITE_DEBUG) && 0 | |
| 43173 | + int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */ | |
| 43174 | + int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */ | |
| 43175 | +# define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;} | |
| 43176 | + void pcacheDump(PCache *pCache){ | |
| 43177 | + int N; | |
| 43178 | + int i, j; | |
| 43179 | + sqlite3_pcache_page *pLower; | |
| 43180 | + PgHdr *pPg; | |
| 43181 | + unsigned char *a; | |
| 43182 | + | |
| 43183 | + if( sqlite3PcacheTrace<2 ) return; | |
| 43184 | + if( pCache->pCache==0 ) return; | |
| 43185 | + N = sqlite3PcachePagecount(pCache); | |
| 43186 | + if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; | |
| 43187 | + for(i=1; i<=N; i++){ | |
| 43188 | + pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); | |
| 43189 | + if( pLower==0 ) continue; | |
| 43190 | + pPg = (PgHdr*)pLower->pExtra; | |
| 43191 | + printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); | |
| 43192 | + a = (unsigned char *)pLower->pBuf; | |
| 43193 | + for(j=0; j<12; j++) printf("%02x", a[j]); | |
| 43194 | + printf("\n"); | |
| 43195 | + if( pPg->pPage==0 ){ | |
| 43196 | + sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); | |
| 43197 | + } | |
| 43198 | + } | |
| 43199 | + } | |
| 43200 | + #else | |
| 43201 | +# define pcacheTrace(X) | |
| 43202 | +# define pcacheDump(X) | |
| 43203 | +#endif | |
| 43204 | + | |
| 43205 | +/* | |
| 43206 | +** Check invariants on a PgHdr entry. Return true if everything is OK. | |
| 43207 | +** Return false if any invariant is violated. | |
| 43208 | +** | |
| 43209 | +** This routine is for use inside of assert() statements only. For | |
| 43210 | +** example: | |
| 43211 | +** | |
| 43212 | +** assert( sqlite3PcachePageSanity(pPg) ); | |
| 43213 | +*/ | |
| 43214 | +#if SQLITE_DEBUG | |
| 43215 | +SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){ | |
| 43216 | + PCache *pCache; | |
| 43217 | + assert( pPg!=0 ); | |
| 43218 | + assert( pPg->pgno>0 ); /* Page number is 1 or more */ | |
| 43219 | + pCache = pPg->pCache; | |
| 43220 | + assert( pCache!=0 ); /* Every page has an associated PCache */ | |
| 43221 | + if( pPg->flags & PGHDR_CLEAN ){ | |
| 43222 | + assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */ | |
| 43223 | + assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */ | |
| 43224 | + assert( pCache->pDirtyTail!=pPg ); | |
| 43225 | + } | |
| 43226 | + /* WRITEABLE pages must also be DIRTY */ | |
| 43227 | + if( pPg->flags & PGHDR_WRITEABLE ){ | |
| 43228 | + assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */ | |
| 43229 | + } | |
| 43230 | + /* NEED_SYNC can be set independently of WRITEABLE. This can happen, | |
| 43231 | + ** for example, when using the sqlite3PagerDontWrite() optimization: | |
| 43232 | + ** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK. | |
| 43233 | + ** (2) Page X moved to freelist, WRITEABLE is cleared | |
| 43234 | + ** (3) Page X reused, WRITEABLE is set again | |
| 43235 | + ** If NEED_SYNC had been cleared in step 2, then it would not be reset | |
| 43236 | + ** in step 3, and page might be written into the database without first | |
| 43237 | + ** syncing the rollback journal, which might cause corruption on a power | |
| 43238 | + ** loss. | |
| 43239 | + ** | |
| 43240 | + ** Another example is when the database page size is smaller than the | |
| 43241 | + ** disk sector size. When any page of a sector is journalled, all pages | |
| 43242 | + ** in that sector are marked NEED_SYNC even if they are still CLEAN, just | |
| 43243 | + ** in case they are later modified, since all pages in the same sector | |
| 43244 | + ** must be journalled and synced before any of those pages can be safely | |
| 43245 | + ** written. | |
| 43246 | + */ | |
| 43247 | + return 1; | |
| 43248 | +} | |
| 43249 | +#endif /* SQLITE_DEBUG */ | |
| 43250 | + | |
| 43136 | 43251 | |
| 43137 | 43252 | /********************************** Linked List Management ********************/ |
| 43138 | 43253 | |
| 43139 | 43254 | /* Allowed values for second argument to pcacheManageDirtyList() */ |
| 43140 | 43255 | #define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */ |
| @@ -43148,21 +43263,20 @@ | ||
| 43148 | 43263 | ** the dirty list. Doing both moves pPage to the front of the dirty list. |
| 43149 | 43264 | */ |
| 43150 | 43265 | static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ |
| 43151 | 43266 | PCache *p = pPage->pCache; |
| 43152 | 43267 | |
| 43268 | + pcacheTrace(("%p.DIRTYLIST.%s %d\n", p, | |
| 43269 | + addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT", | |
| 43270 | + pPage->pgno)); | |
| 43153 | 43271 | if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ |
| 43154 | 43272 | assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); |
| 43155 | 43273 | assert( pPage->pDirtyPrev || pPage==p->pDirty ); |
| 43156 | 43274 | |
| 43157 | 43275 | /* Update the PCache1.pSynced variable if necessary. */ |
| 43158 | 43276 | if( p->pSynced==pPage ){ |
| 43159 | - PgHdr *pSynced = pPage->pDirtyPrev; | |
| 43160 | - while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){ | |
| 43161 | - pSynced = pSynced->pDirtyPrev; | |
| 43162 | - } | |
| 43163 | - p->pSynced = pSynced; | |
| 43277 | + p->pSynced = pPage->pDirtyPrev; | |
| 43164 | 43278 | } |
| 43165 | 43279 | |
| 43166 | 43280 | if( pPage->pDirtyNext ){ |
| 43167 | 43281 | pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; |
| 43168 | 43282 | }else{ |
| @@ -43170,14 +43284,19 @@ | ||
| 43170 | 43284 | p->pDirtyTail = pPage->pDirtyPrev; |
| 43171 | 43285 | } |
| 43172 | 43286 | if( pPage->pDirtyPrev ){ |
| 43173 | 43287 | pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; |
| 43174 | 43288 | }else{ |
| 43289 | + /* If there are now no dirty pages in the cache, set eCreate to 2. | |
| 43290 | + ** This is an optimization that allows sqlite3PcacheFetch() to skip | |
| 43291 | + ** searching for a dirty page to eject from the cache when it might | |
| 43292 | + ** otherwise have to. */ | |
| 43175 | 43293 | assert( pPage==p->pDirty ); |
| 43176 | 43294 | p->pDirty = pPage->pDirtyNext; |
| 43177 | - if( p->pDirty==0 && p->bPurgeable ){ | |
| 43178 | - assert( p->eCreate==1 ); | |
| 43295 | + assert( p->bPurgeable || p->eCreate==2 ); | |
| 43296 | + if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/ | |
| 43297 | + assert( p->bPurgeable==0 || p->eCreate==1 ); | |
| 43179 | 43298 | p->eCreate = 2; |
| 43180 | 43299 | } |
| 43181 | 43300 | } |
| 43182 | 43301 | pPage->pDirtyNext = 0; |
| 43183 | 43302 | pPage->pDirtyPrev = 0; |
| @@ -43195,23 +43314,34 @@ | ||
| 43195 | 43314 | assert( p->eCreate==2 ); |
| 43196 | 43315 | p->eCreate = 1; |
| 43197 | 43316 | } |
| 43198 | 43317 | } |
| 43199 | 43318 | p->pDirty = pPage; |
| 43200 | - if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){ | |
| 43319 | + | |
| 43320 | + /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set | |
| 43321 | + ** pSynced to point to it. Checking the NEED_SYNC flag is an | |
| 43322 | + ** optimization, as if pSynced points to a page with the NEED_SYNC | |
| 43323 | + ** flag set sqlite3PcacheFetchStress() searches through all newer | |
| 43324 | + ** entries of the dirty-list for a page with NEED_SYNC clear anyway. */ | |
| 43325 | + if( !p->pSynced | |
| 43326 | + && 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/ | |
| 43327 | + ){ | |
| 43201 | 43328 | p->pSynced = pPage; |
| 43202 | 43329 | } |
| 43203 | 43330 | } |
| 43331 | + pcacheDump(p); | |
| 43204 | 43332 | } |
| 43205 | 43333 | |
| 43206 | 43334 | /* |
| 43207 | 43335 | ** Wrapper around the pluggable caches xUnpin method. If the cache is |
| 43208 | 43336 | ** being used for an in-memory database, this function is a no-op. |
| 43209 | 43337 | */ |
| 43210 | 43338 | static void pcacheUnpin(PgHdr *p){ |
| 43211 | 43339 | if( p->pCache->bPurgeable ){ |
| 43340 | + pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno)); | |
| 43212 | 43341 | sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); |
| 43342 | + pcacheDump(p->pCache); | |
| 43213 | 43343 | } |
| 43214 | 43344 | } |
| 43215 | 43345 | |
| 43216 | 43346 | /* |
| 43217 | 43347 | ** Compute the number of pages of cache requested. p->szCache is the |
| @@ -43277,10 +43407,11 @@ | ||
| 43277 | 43407 | p->eCreate = 2; |
| 43278 | 43408 | p->xStress = xStress; |
| 43279 | 43409 | p->pStress = pStress; |
| 43280 | 43410 | p->szCache = 100; |
| 43281 | 43411 | p->szSpill = 1; |
| 43412 | + pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable)); | |
| 43282 | 43413 | return sqlite3PcacheSetPageSize(p, szPage); |
| 43283 | 43414 | } |
| 43284 | 43415 | |
| 43285 | 43416 | /* |
| 43286 | 43417 | ** Change the page size for PCache object. The caller must ensure that there |
| @@ -43299,10 +43430,11 @@ | ||
| 43299 | 43430 | if( pCache->pCache ){ |
| 43300 | 43431 | sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); |
| 43301 | 43432 | } |
| 43302 | 43433 | pCache->pCache = pNew; |
| 43303 | 43434 | pCache->szPage = szPage; |
| 43435 | + pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage)); | |
| 43304 | 43436 | } |
| 43305 | 43437 | return SQLITE_OK; |
| 43306 | 43438 | } |
| 43307 | 43439 | |
| 43308 | 43440 | /* |
| @@ -43333,15 +43465,17 @@ | ||
| 43333 | 43465 | PCache *pCache, /* Obtain the page from this cache */ |
| 43334 | 43466 | Pgno pgno, /* Page number to obtain */ |
| 43335 | 43467 | int createFlag /* If true, create page if it does not exist already */ |
| 43336 | 43468 | ){ |
| 43337 | 43469 | int eCreate; |
| 43470 | + sqlite3_pcache_page *pRes; | |
| 43338 | 43471 | |
| 43339 | 43472 | assert( pCache!=0 ); |
| 43340 | 43473 | assert( pCache->pCache!=0 ); |
| 43341 | 43474 | assert( createFlag==3 || createFlag==0 ); |
| 43342 | 43475 | assert( pgno>0 ); |
| 43476 | + assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) ); | |
| 43343 | 43477 | |
| 43344 | 43478 | /* eCreate defines what to do if the page does not exist. |
| 43345 | 43479 | ** 0 Do not allocate a new page. (createFlag==0) |
| 43346 | 43480 | ** 1 Allocate a new page if doing so is inexpensive. |
| 43347 | 43481 | ** (createFlag==1 AND bPurgeable AND pDirty) |
| @@ -43350,11 +43484,14 @@ | ||
| 43350 | 43484 | */ |
| 43351 | 43485 | eCreate = createFlag & pCache->eCreate; |
| 43352 | 43486 | assert( eCreate==0 || eCreate==1 || eCreate==2 ); |
| 43353 | 43487 | assert( createFlag==0 || pCache->eCreate==eCreate ); |
| 43354 | 43488 | assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); |
| 43355 | - return sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); | |
| 43489 | + pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); | |
| 43490 | + pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno, | |
| 43491 | + createFlag?" create":"",pRes)); | |
| 43492 | + return pRes; | |
| 43356 | 43493 | } |
| 43357 | 43494 | |
| 43358 | 43495 | /* |
| 43359 | 43496 | ** If the sqlite3PcacheFetch() routine is unable to allocate a new |
| 43360 | 43497 | ** page because no clean pages are available for reuse and the cache |
| @@ -43377,11 +43514,15 @@ | ||
| 43377 | 43514 | if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){ |
| 43378 | 43515 | /* Find a dirty page to write-out and recycle. First try to find a |
| 43379 | 43516 | ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC |
| 43380 | 43517 | ** cleared), but if that is not possible settle for any other |
| 43381 | 43518 | ** unreferenced dirty page. |
| 43382 | - */ | |
| 43519 | + ** | |
| 43520 | + ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC | |
| 43521 | + ** flag is currently referenced, then the following may leave pSynced | |
| 43522 | + ** set incorrectly (pointing to other than the LRU page with NEED_SYNC | |
| 43523 | + ** cleared). This is Ok, as pSynced is just an optimization. */ | |
| 43383 | 43524 | for(pPg=pCache->pSynced; |
| 43384 | 43525 | pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); |
| 43385 | 43526 | pPg=pPg->pDirtyPrev |
| 43386 | 43527 | ); |
| 43387 | 43528 | pCache->pSynced = pPg; |
| @@ -43395,11 +43536,13 @@ | ||
| 43395 | 43536 | "spill page %d making room for %d - cache used: %d/%d", |
| 43396 | 43537 | pPg->pgno, pgno, |
| 43397 | 43538 | sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), |
| 43398 | 43539 | numberOfCachePages(pCache)); |
| 43399 | 43540 | #endif |
| 43541 | + pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno)); | |
| 43400 | 43542 | rc = pCache->xStress(pCache->pStress, pPg); |
| 43543 | + pcacheDump(pCache); | |
| 43401 | 43544 | if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ |
| 43402 | 43545 | return rc; |
| 43403 | 43546 | } |
| 43404 | 43547 | } |
| 43405 | 43548 | } |
| @@ -43455,10 +43598,11 @@ | ||
| 43455 | 43598 | if( !pPgHdr->pPage ){ |
| 43456 | 43599 | return pcacheFetchFinishWithInit(pCache, pgno, pPage); |
| 43457 | 43600 | } |
| 43458 | 43601 | pCache->nRefSum++; |
| 43459 | 43602 | pPgHdr->nRef++; |
| 43603 | + assert( sqlite3PcachePageSanity(pPgHdr) ); | |
| 43460 | 43604 | return pPgHdr; |
| 43461 | 43605 | } |
| 43462 | 43606 | |
| 43463 | 43607 | /* |
| 43464 | 43608 | ** Decrement the reference count on a page. If the page is clean and the |
| @@ -43468,12 +43612,15 @@ | ||
| 43468 | 43612 | assert( p->nRef>0 ); |
| 43469 | 43613 | p->pCache->nRefSum--; |
| 43470 | 43614 | if( (--p->nRef)==0 ){ |
| 43471 | 43615 | if( p->flags&PGHDR_CLEAN ){ |
| 43472 | 43616 | pcacheUnpin(p); |
| 43473 | - }else if( p->pDirtyPrev!=0 ){ | |
| 43474 | - /* Move the page to the head of the dirty list. */ | |
| 43617 | + }else if( p->pDirtyPrev!=0 ){ /*OPTIMIZATION-IF-FALSE*/ | |
| 43618 | + /* Move the page to the head of the dirty list. If p->pDirtyPrev==0, | |
| 43619 | + ** then page p is already at the head of the dirty list and the | |
| 43620 | + ** following call would be a no-op. Hence the OPTIMIZATION-IF-FALSE | |
| 43621 | + ** tag above. */ | |
| 43475 | 43622 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); |
| 43476 | 43623 | } |
| 43477 | 43624 | } |
| 43478 | 43625 | } |
| 43479 | 43626 | |
| @@ -43480,10 +43627,11 @@ | ||
| 43480 | 43627 | /* |
| 43481 | 43628 | ** Increase the reference count of a supplied page by 1. |
| 43482 | 43629 | */ |
| 43483 | 43630 | SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){ |
| 43484 | 43631 | assert(p->nRef>0); |
| 43632 | + assert( sqlite3PcachePageSanity(p) ); | |
| 43485 | 43633 | p->nRef++; |
| 43486 | 43634 | p->pCache->nRefSum++; |
| 43487 | 43635 | } |
| 43488 | 43636 | |
| 43489 | 43637 | /* |
| @@ -43491,10 +43639,11 @@ | ||
| 43491 | 43639 | ** page. This function deletes that reference, so after it returns the |
| 43492 | 43640 | ** page pointed to by p is invalid. |
| 43493 | 43641 | */ |
| 43494 | 43642 | SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ |
| 43495 | 43643 | assert( p->nRef==1 ); |
| 43644 | + assert( sqlite3PcachePageSanity(p) ); | |
| 43496 | 43645 | if( p->flags&PGHDR_DIRTY ){ |
| 43497 | 43646 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); |
| 43498 | 43647 | } |
| 43499 | 43648 | p->pCache->nRefSum--; |
| 43500 | 43649 | sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1); |
| @@ -43504,30 +43653,36 @@ | ||
| 43504 | 43653 | ** Make sure the page is marked as dirty. If it isn't dirty already, |
| 43505 | 43654 | ** make it so. |
| 43506 | 43655 | */ |
| 43507 | 43656 | SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ |
| 43508 | 43657 | assert( p->nRef>0 ); |
| 43509 | - if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ | |
| 43658 | + assert( sqlite3PcachePageSanity(p) ); | |
| 43659 | + if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/ | |
| 43510 | 43660 | p->flags &= ~PGHDR_DONT_WRITE; |
| 43511 | 43661 | if( p->flags & PGHDR_CLEAN ){ |
| 43512 | 43662 | p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); |
| 43663 | + pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno)); | |
| 43513 | 43664 | assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); |
| 43514 | 43665 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); |
| 43515 | 43666 | } |
| 43667 | + assert( sqlite3PcachePageSanity(p) ); | |
| 43516 | 43668 | } |
| 43517 | 43669 | } |
| 43518 | 43670 | |
| 43519 | 43671 | /* |
| 43520 | 43672 | ** Make sure the page is marked as clean. If it isn't clean already, |
| 43521 | 43673 | ** make it so. |
| 43522 | 43674 | */ |
| 43523 | 43675 | SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){ |
| 43524 | - if( (p->flags & PGHDR_DIRTY) ){ | |
| 43676 | + assert( sqlite3PcachePageSanity(p) ); | |
| 43677 | + if( ALWAYS((p->flags & PGHDR_DIRTY)!=0) ){ | |
| 43525 | 43678 | assert( (p->flags & PGHDR_CLEAN)==0 ); |
| 43526 | 43679 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); |
| 43527 | 43680 | p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); |
| 43528 | 43681 | p->flags |= PGHDR_CLEAN; |
| 43682 | + pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno)); | |
| 43683 | + assert( sqlite3PcachePageSanity(p) ); | |
| 43529 | 43684 | if( p->nRef==0 ){ |
| 43530 | 43685 | pcacheUnpin(p); |
| 43531 | 43686 | } |
| 43532 | 43687 | } |
| 43533 | 43688 | } |
| @@ -43535,10 +43690,11 @@ | ||
| 43535 | 43690 | /* |
| 43536 | 43691 | ** Make every page in the cache clean. |
| 43537 | 43692 | */ |
| 43538 | 43693 | SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){ |
| 43539 | 43694 | PgHdr *p; |
| 43695 | + pcacheTrace(("%p.CLEAN-ALL\n",pCache)); | |
| 43540 | 43696 | while( (p = pCache->pDirty)!=0 ){ |
| 43541 | 43697 | sqlite3PcacheMakeClean(p); |
| 43542 | 43698 | } |
| 43543 | 43699 | } |
| 43544 | 43700 | |
| @@ -43545,10 +43701,11 @@ | ||
| 43545 | 43701 | /* |
| 43546 | 43702 | ** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages. |
| 43547 | 43703 | */ |
| 43548 | 43704 | SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache *pCache){ |
| 43549 | 43705 | PgHdr *p; |
| 43706 | + pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache)); | |
| 43550 | 43707 | for(p=pCache->pDirty; p; p=p->pDirtyNext){ |
| 43551 | 43708 | p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE); |
| 43552 | 43709 | } |
| 43553 | 43710 | pCache->pSynced = pCache->pDirtyTail; |
| 43554 | 43711 | } |
| @@ -43569,10 +43726,12 @@ | ||
| 43569 | 43726 | */ |
| 43570 | 43727 | SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ |
| 43571 | 43728 | PCache *pCache = p->pCache; |
| 43572 | 43729 | assert( p->nRef>0 ); |
| 43573 | 43730 | assert( newPgno>0 ); |
| 43731 | + assert( sqlite3PcachePageSanity(p) ); | |
| 43732 | + pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno)); | |
| 43574 | 43733 | sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); |
| 43575 | 43734 | p->pgno = newPgno; |
| 43576 | 43735 | if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ |
| 43577 | 43736 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); |
| 43578 | 43737 | } |
| @@ -43589,10 +43748,11 @@ | ||
| 43589 | 43748 | */ |
| 43590 | 43749 | SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ |
| 43591 | 43750 | if( pCache->pCache ){ |
| 43592 | 43751 | PgHdr *p; |
| 43593 | 43752 | PgHdr *pNext; |
| 43753 | + pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno)); | |
| 43594 | 43754 | for(p=pCache->pDirty; p; p=pNext){ |
| 43595 | 43755 | pNext = p->pDirtyNext; |
| 43596 | 43756 | /* This routine never gets call with a positive pgno except right |
| 43597 | 43757 | ** after sqlite3PcacheCleanAll(). So if there are dirty pages, |
| 43598 | 43758 | ** it must be that pgno==0. |
| @@ -43619,10 +43779,11 @@ | ||
| 43619 | 43779 | /* |
| 43620 | 43780 | ** Close a cache. |
| 43621 | 43781 | */ |
| 43622 | 43782 | SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){ |
| 43623 | 43783 | assert( pCache->pCache!=0 ); |
| 43784 | + pcacheTrace(("%p.CLOSE\n",pCache)); | |
| 43624 | 43785 | sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); |
| 43625 | 43786 | } |
| 43626 | 43787 | |
| 43627 | 43788 | /* |
| 43628 | 43789 | ** Discard the contents of the cache. |
| @@ -47585,24 +47746,28 @@ | ||
| 47585 | 47746 | } |
| 47586 | 47747 | |
| 47587 | 47748 | static int pager_truncate(Pager *pPager, Pgno nPage); |
| 47588 | 47749 | |
| 47589 | 47750 | /* |
| 47590 | -** The write transaction open on the pager passed as the only argument is | |
| 47591 | -** being committed. This function returns true if all dirty pages should | |
| 47592 | -** be flushed to disk, or false otherwise. Pages should be flushed to disk | |
| 47593 | -** unless one of the following is true: | |
| 47594 | -** | |
| 47595 | -** * The db is an in-memory database. | |
| 47596 | -** | |
| 47597 | -** * The db is a temporary database and the db file has not been opened. | |
| 47598 | -** | |
| 47599 | -** * The db is a temporary database and the cache contains less than | |
| 47600 | -** C/4 dirty pages, where C is the configured cache-size. | |
| 47601 | -*/ | |
| 47602 | -static int pagerFlushOnCommit(Pager *pPager){ | |
| 47751 | +** The write transaction open on pPager is being committed (bCommit==1) | |
| 47752 | +** or rolled back (bCommit==0). | |
| 47753 | +** | |
| 47754 | +** Return TRUE if and only if all dirty pages should be flushed to disk. | |
| 47755 | +** | |
| 47756 | +** Rules: | |
| 47757 | +** | |
| 47758 | +** * For non-TEMP databases, always sync to disk. This is necessary | |
| 47759 | +** for transactions to be durable. | |
| 47760 | +** | |
| 47761 | +** * Sync TEMP database only on a COMMIT (not a ROLLBACK) when the backing | |
| 47762 | +** file has been created already (via a spill on pagerStress()) and | |
| 47763 | +** when the number of dirty pages in memory exceeds 25% of the total | |
| 47764 | +** cache size. | |
| 47765 | +*/ | |
| 47766 | +static int pagerFlushOnCommit(Pager *pPager, int bCommit){ | |
| 47603 | 47767 | if( pPager->tempFile==0 ) return 1; |
| 47768 | + if( !bCommit ) return 0; | |
| 47604 | 47769 | if( !isOpen(pPager->fd) ) return 0; |
| 47605 | 47770 | return (sqlite3PCachePercentDirty(pPager->pPCache)>=25); |
| 47606 | 47771 | } |
| 47607 | 47772 | |
| 47608 | 47773 | /* |
| @@ -47706,11 +47871,11 @@ | ||
| 47706 | 47871 | } |
| 47707 | 47872 | pPager->journalOff = 0; |
| 47708 | 47873 | }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST |
| 47709 | 47874 | || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) |
| 47710 | 47875 | ){ |
| 47711 | - rc = zeroJournalHdr(pPager, hasMaster); | |
| 47876 | + rc = zeroJournalHdr(pPager, hasMaster||pPager->tempFile); | |
| 47712 | 47877 | pPager->journalOff = 0; |
| 47713 | 47878 | }else{ |
| 47714 | 47879 | /* This branch may be executed with Pager.journalMode==MEMORY if |
| 47715 | 47880 | ** a hot-journal was just rolled back. In this case the journal |
| 47716 | 47881 | ** file should be closed and deleted. If this connection writes to |
| @@ -47741,16 +47906,18 @@ | ||
| 47741 | 47906 | #endif |
| 47742 | 47907 | |
| 47743 | 47908 | sqlite3BitvecDestroy(pPager->pInJournal); |
| 47744 | 47909 | pPager->pInJournal = 0; |
| 47745 | 47910 | pPager->nRec = 0; |
| 47746 | - if( MEMDB || pagerFlushOnCommit(pPager) ){ | |
| 47747 | - sqlite3PcacheCleanAll(pPager->pPCache); | |
| 47748 | - }else{ | |
| 47749 | - sqlite3PcacheClearWritable(pPager->pPCache); | |
| 47911 | + if( rc==SQLITE_OK ){ | |
| 47912 | + if( pagerFlushOnCommit(pPager, bCommit) ){ | |
| 47913 | + sqlite3PcacheCleanAll(pPager->pPCache); | |
| 47914 | + }else{ | |
| 47915 | + sqlite3PcacheClearWritable(pPager->pPCache); | |
| 47916 | + } | |
| 47917 | + sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); | |
| 47750 | 47918 | } |
| 47751 | - sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); | |
| 47752 | 47919 | |
| 47753 | 47920 | if( pagerUseWal(pPager) ){ |
| 47754 | 47921 | /* Drop the WAL write-lock, if any. Also, if the connection was in |
| 47755 | 47922 | ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE |
| 47756 | 47923 | ** lock held on the database file. |
| @@ -48080,11 +48247,10 @@ | ||
| 48080 | 48247 | pPager->doNotSpill |= SPILLFLAG_ROLLBACK; |
| 48081 | 48248 | rc = sqlite3PagerGet(pPager, pgno, &pPg, 1); |
| 48082 | 48249 | assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 ); |
| 48083 | 48250 | pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK; |
| 48084 | 48251 | if( rc!=SQLITE_OK ) return rc; |
| 48085 | - pPg->flags &= ~PGHDR_NEED_READ; | |
| 48086 | 48252 | sqlite3PcacheMakeDirty(pPg); |
| 48087 | 48253 | } |
| 48088 | 48254 | if( pPg ){ |
| 48089 | 48255 | /* No page should ever be explicitly rolled back that is in use, except |
| 48090 | 48256 | ** for page 1 which is held in use in order to keep the lock on the |
| @@ -48094,37 +48260,14 @@ | ||
| 48094 | 48260 | */ |
| 48095 | 48261 | void *pData; |
| 48096 | 48262 | pData = pPg->pData; |
| 48097 | 48263 | memcpy(pData, (u8*)aData, pPager->pageSize); |
| 48098 | 48264 | pPager->xReiniter(pPg); |
| 48099 | - if( isMainJrnl && (!isSavepnt || *pOffset<=pPager->journalHdr) ){ | |
| 48100 | - /* If the contents of this page were just restored from the main | |
| 48101 | - ** journal file, then its content must be as they were when the | |
| 48102 | - ** transaction was first opened. In this case we can mark the page | |
| 48103 | - ** as clean, since there will be no need to write it out to the | |
| 48104 | - ** database. | |
| 48105 | - ** | |
| 48106 | - ** There is one exception to this rule. If the page is being rolled | |
| 48107 | - ** back as part of a savepoint (or statement) rollback from an | |
| 48108 | - ** unsynced portion of the main journal file, then it is not safe | |
| 48109 | - ** to mark the page as clean. This is because marking the page as | |
| 48110 | - ** clean will clear the PGHDR_NEED_SYNC flag. Since the page is | |
| 48111 | - ** already in the journal file (recorded in Pager.pInJournal) and | |
| 48112 | - ** the PGHDR_NEED_SYNC flag is cleared, if the page is written to | |
| 48113 | - ** again within this transaction, it will be marked as dirty but | |
| 48114 | - ** the PGHDR_NEED_SYNC flag will not be set. It could then potentially | |
| 48115 | - ** be written out into the database file before its journal file | |
| 48116 | - ** segment is synced. If a crash occurs during or following this, | |
| 48117 | - ** database corruption may ensue. | |
| 48118 | - ** | |
| 48119 | - ** Update: Another exception is for temp files that are not | |
| 48120 | - ** in-memory databases. In this case the page may have been dirty | |
| 48121 | - ** at the start of the transaction. | |
| 48122 | - */ | |
| 48123 | - assert( !pagerUseWal(pPager) ); | |
| 48124 | - if( pPager->tempFile==0 ) sqlite3PcacheMakeClean(pPg); | |
| 48125 | - } | |
| 48265 | + /* It used to be that sqlite3PcacheMakeClean(pPg) was called here. But | |
| 48266 | + ** that call was dangerous and had no detectable benefit since the cache | |
| 48267 | + ** is normally cleaned by sqlite3PcacheCleanAll() after rollback and so | |
| 48268 | + ** has been removed. */ | |
| 48126 | 48269 | pager_set_pagehash(pPg); |
| 48127 | 48270 | |
| 48128 | 48271 | /* If this was page 1, then restore the value of Pager.dbFileVers. |
| 48129 | 48272 | ** Do this before any decoding. */ |
| 48130 | 48273 | if( pgno==1 ){ |
| @@ -51731,10 +51874,11 @@ | ||
| 51731 | 51874 | if( !pPager->tempFile && (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ |
| 51732 | 51875 | PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager))); |
| 51733 | 51876 | IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) |
| 51734 | 51877 | pPg->flags |= PGHDR_DONT_WRITE; |
| 51735 | 51878 | pPg->flags &= ~PGHDR_WRITEABLE; |
| 51879 | + testcase( pPg->flags & PGHDR_NEED_SYNC ); | |
| 51736 | 51880 | pager_set_pagehash(pPg); |
| 51737 | 51881 | } |
| 51738 | 51882 | } |
| 51739 | 51883 | |
| 51740 | 51884 | /* |
| @@ -51926,21 +52070,21 @@ | ||
| 51926 | 52070 | |
| 51927 | 52071 | /* If a prior error occurred, report that error again. */ |
| 51928 | 52072 | if( NEVER(pPager->errCode) ) return pPager->errCode; |
| 51929 | 52073 | |
| 51930 | 52074 | /* Provide the ability to easily simulate an I/O error during testing */ |
| 51931 | - if( (rc = sqlite3FaultSim(400))!=SQLITE_OK ) return rc; | |
| 52075 | + if( sqlite3FaultSim(400) ) return SQLITE_IOERR; | |
| 51932 | 52076 | |
| 51933 | 52077 | PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", |
| 51934 | 52078 | pPager->zFilename, zMaster, pPager->dbSize)); |
| 51935 | 52079 | |
| 51936 | 52080 | /* If no database changes have been made, return early. */ |
| 51937 | 52081 | if( pPager->eState<PAGER_WRITER_CACHEMOD ) return SQLITE_OK; |
| 51938 | 52082 | |
| 51939 | 52083 | assert( MEMDB==0 || pPager->tempFile ); |
| 51940 | 52084 | assert( isOpen(pPager->fd) || pPager->tempFile ); |
| 51941 | - if( 0==pagerFlushOnCommit(pPager) ){ | |
| 52085 | + if( 0==pagerFlushOnCommit(pPager, 1) ){ | |
| 51942 | 52086 | /* If this is an in-memory db, or no pages have been written to, or this |
| 51943 | 52087 | ** function has already been called, it is mostly a no-op. However, any |
| 51944 | 52088 | ** backup in progress needs to be restarted. */ |
| 51945 | 52089 | sqlite3BackupRestart(pPager->pBackup); |
| 51946 | 52090 | }else{ |
| @@ -52561,10 +52705,11 @@ | ||
| 52561 | 52705 | assert( assert_pager_state(pPager) ); |
| 52562 | 52706 | |
| 52563 | 52707 | /* In order to be able to rollback, an in-memory database must journal |
| 52564 | 52708 | ** the page we are moving from. |
| 52565 | 52709 | */ |
| 52710 | + assert( pPager->tempFile || !MEMDB ); | |
| 52566 | 52711 | if( pPager->tempFile ){ |
| 52567 | 52712 | rc = sqlite3PagerWrite(pPg); |
| 52568 | 52713 | if( rc ) return rc; |
| 52569 | 52714 | } |
| 52570 | 52715 | |
| @@ -52635,12 +52780,11 @@ | ||
| 52635 | 52780 | |
| 52636 | 52781 | /* For an in-memory database, make sure the original page continues |
| 52637 | 52782 | ** to exist, in case the transaction needs to roll back. Use pPgOld |
| 52638 | 52783 | ** as the original page since it has already been allocated. |
| 52639 | 52784 | */ |
| 52640 | - if( pPager->tempFile ){ | |
| 52641 | - assert( pPgOld ); | |
| 52785 | + if( pPager->tempFile && pPgOld ){ | |
| 52642 | 52786 | sqlite3PcacheMove(pPgOld, origPgno); |
| 52643 | 52787 | sqlite3PagerUnrefNotNull(pPgOld); |
| 52644 | 52788 | } |
| 52645 | 52789 | |
| 52646 | 52790 | if( needSyncPgno ){ |
| @@ -59256,15 +59400,15 @@ | ||
| 59256 | 59400 | flagByte &= ~PTF_LEAF; |
| 59257 | 59401 | pPage->childPtrSize = 4-4*pPage->leaf; |
| 59258 | 59402 | pPage->xCellSize = cellSizePtr; |
| 59259 | 59403 | pBt = pPage->pBt; |
| 59260 | 59404 | if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ |
| 59261 | - /* EVIDENCE-OF: R-03640-13415 A value of 5 means the page is an interior | |
| 59262 | - ** table b-tree page. */ | |
| 59405 | + /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an | |
| 59406 | + ** interior table b-tree page. */ | |
| 59263 | 59407 | assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); |
| 59264 | - /* EVIDENCE-OF: R-20501-61796 A value of 13 means the page is a leaf | |
| 59265 | - ** table b-tree page. */ | |
| 59408 | + /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a | |
| 59409 | + ** leaf table b-tree page. */ | |
| 59266 | 59410 | assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); |
| 59267 | 59411 | pPage->intKey = 1; |
| 59268 | 59412 | if( pPage->leaf ){ |
| 59269 | 59413 | pPage->intKeyLeaf = 1; |
| 59270 | 59414 | pPage->xParseCell = btreeParseCellPtr; |
| @@ -59274,15 +59418,15 @@ | ||
| 59274 | 59418 | pPage->xParseCell = btreeParseCellPtrNoPayload; |
| 59275 | 59419 | } |
| 59276 | 59420 | pPage->maxLocal = pBt->maxLeaf; |
| 59277 | 59421 | pPage->minLocal = pBt->minLeaf; |
| 59278 | 59422 | }else if( flagByte==PTF_ZERODATA ){ |
| 59279 | - /* EVIDENCE-OF: R-27225-53936 A value of 2 means the page is an interior | |
| 59280 | - ** index b-tree page. */ | |
| 59423 | + /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an | |
| 59424 | + ** interior index b-tree page. */ | |
| 59281 | 59425 | assert( (PTF_ZERODATA)==2 ); |
| 59282 | - /* EVIDENCE-OF: R-16571-11615 A value of 10 means the page is a leaf | |
| 59283 | - ** index b-tree page. */ | |
| 59426 | + /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a | |
| 59427 | + ** leaf index b-tree page. */ | |
| 59284 | 59428 | assert( (PTF_ZERODATA|PTF_LEAF)==10 ); |
| 59285 | 59429 | pPage->intKey = 0; |
| 59286 | 59430 | pPage->intKeyLeaf = 0; |
| 59287 | 59431 | pPage->xParseCell = btreeParseCellPtrIndex; |
| 59288 | 59432 | pPage->maxLocal = pBt->maxLocal; |
| @@ -192850,11 +192994,11 @@ | ||
| 192850 | 192994 | int nArg, /* Number of args */ |
| 192851 | 192995 | sqlite3_value **apUnused /* Function arguments */ |
| 192852 | 192996 | ){ |
| 192853 | 192997 | assert( nArg==0 ); |
| 192854 | 192998 | UNUSED_PARAM2(nArg, apUnused); |
| 192855 | - sqlite3_result_text(pCtx, "fts5: 2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea", -1, SQLITE_TRANSIENT); | |
| 192999 | + sqlite3_result_text(pCtx, "fts5: 2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2", -1, SQLITE_TRANSIENT); | |
| 192856 | 193000 | } |
| 192857 | 193001 | |
| 192858 | 193002 | static int fts5Init(sqlite3 *db){ |
| 192859 | 193003 | static const sqlite3_module fts5Mod = { |
| 192860 | 193004 | /* iVersion */ 2, |
| 192861 | 193005 |
| --- src/sqlite3.c | |
| +++ src/sqlite3.c | |
| @@ -363,11 +363,11 @@ | |
| 363 | ** [sqlite3_libversion_number()], [sqlite3_sourceid()], |
| 364 | ** [sqlite_version()] and [sqlite_source_id()]. |
| 365 | */ |
| 366 | #define SQLITE_VERSION "3.13.0" |
| 367 | #define SQLITE_VERSION_NUMBER 3013000 |
| 368 | #define SQLITE_SOURCE_ID "2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea" |
| 369 | |
| 370 | /* |
| 371 | ** CAPI3REF: Run-Time Library Version Numbers |
| 372 | ** KEYWORDS: sqlite3_version, sqlite3_sourceid |
| 373 | ** |
| @@ -10917,11 +10917,11 @@ | |
| 10917 | ** |
| 10918 | ** When doing coverage testing ALWAYS and NEVER are hard-coded to |
| 10919 | ** be true and false so that the unreachable code they specify will |
| 10920 | ** not be counted as untested code. |
| 10921 | */ |
| 10922 | #if defined(SQLITE_COVERAGE_TEST) |
| 10923 | # define ALWAYS(X) (1) |
| 10924 | # define NEVER(X) (0) |
| 10925 | #elif !defined(NDEBUG) |
| 10926 | # define ALWAYS(X) ((X)?1:(assert(0),0)) |
| 10927 | # define NEVER(X) ((X)?(assert(0),1):0) |
| @@ -12954,11 +12954,11 @@ | |
| 12954 | */ |
| 12955 | struct PgHdr { |
| 12956 | sqlite3_pcache_page *pPage; /* Pcache object page handle */ |
| 12957 | void *pData; /* Page data */ |
| 12958 | void *pExtra; /* Extra content */ |
| 12959 | PgHdr *pDirty; /* Transient list of dirty pages */ |
| 12960 | Pager *pPager; /* The pager this page is part of */ |
| 12961 | Pgno pgno; /* Page number for this page */ |
| 12962 | #ifdef SQLITE_CHECK_PAGES |
| 12963 | u32 pageHash; /* Hash of page content */ |
| 12964 | #endif |
| @@ -12979,15 +12979,14 @@ | |
| 12979 | #define PGHDR_CLEAN 0x001 /* Page not on the PCache.pDirty list */ |
| 12980 | #define PGHDR_DIRTY 0x002 /* Page is on the PCache.pDirty list */ |
| 12981 | #define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */ |
| 12982 | #define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before |
| 12983 | ** writing this page to the database */ |
| 12984 | #define PGHDR_NEED_READ 0x010 /* Content is unread */ |
| 12985 | #define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */ |
| 12986 | #define PGHDR_MMAP 0x040 /* This is an mmap page object */ |
| 12987 | |
| 12988 | #define PGHDR_WAL_APPEND 0x080 /* Appended to wal file */ |
| 12989 | |
| 12990 | /* Initialize and shutdown the page cache subsystem */ |
| 12991 | SQLITE_PRIVATE int sqlite3PcacheInitialize(void); |
| 12992 | SQLITE_PRIVATE void sqlite3PcacheShutdown(void); |
| 12993 | |
| @@ -13065,10 +13064,15 @@ | |
| 13065 | ** interface is only available if SQLITE_CHECK_PAGES is defined when the |
| 13066 | ** library is built. |
| 13067 | */ |
| 13068 | SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); |
| 13069 | #endif |
| 13070 | |
| 13071 | /* Set and get the suggested cache-size for the specified pager-cache. |
| 13072 | ** |
| 13073 | ** If no global maximum is configured, then the system attempts to limit |
| 13074 | ** the total number of pages cached by purgeable pager-caches to the sum |
| @@ -43115,11 +43119,33 @@ | |
| 43115 | ** This file implements that page cache. |
| 43116 | */ |
| 43117 | /* #include "sqliteInt.h" */ |
| 43118 | |
| 43119 | /* |
| 43120 | ** A complete page cache is an instance of this structure. |
| 43121 | */ |
| 43122 | struct PCache { |
| 43123 | PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ |
| 43124 | PgHdr *pSynced; /* Last synced page in dirty page list */ |
| 43125 | int nRefSum; /* Sum of ref counts over all pages */ |
| @@ -43131,10 +43157,99 @@ | |
| 43131 | u8 eCreate; /* eCreate value for for xFetch() */ |
| 43132 | int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ |
| 43133 | void *pStress; /* Argument to xStress */ |
| 43134 | sqlite3_pcache *pCache; /* Pluggable cache module */ |
| 43135 | }; |
| 43136 | |
| 43137 | /********************************** Linked List Management ********************/ |
| 43138 | |
| 43139 | /* Allowed values for second argument to pcacheManageDirtyList() */ |
| 43140 | #define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */ |
| @@ -43148,21 +43263,20 @@ | |
| 43148 | ** the dirty list. Doing both moves pPage to the front of the dirty list. |
| 43149 | */ |
| 43150 | static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ |
| 43151 | PCache *p = pPage->pCache; |
| 43152 | |
| 43153 | if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ |
| 43154 | assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); |
| 43155 | assert( pPage->pDirtyPrev || pPage==p->pDirty ); |
| 43156 | |
| 43157 | /* Update the PCache1.pSynced variable if necessary. */ |
| 43158 | if( p->pSynced==pPage ){ |
| 43159 | PgHdr *pSynced = pPage->pDirtyPrev; |
| 43160 | while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){ |
| 43161 | pSynced = pSynced->pDirtyPrev; |
| 43162 | } |
| 43163 | p->pSynced = pSynced; |
| 43164 | } |
| 43165 | |
| 43166 | if( pPage->pDirtyNext ){ |
| 43167 | pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; |
| 43168 | }else{ |
| @@ -43170,14 +43284,19 @@ | |
| 43170 | p->pDirtyTail = pPage->pDirtyPrev; |
| 43171 | } |
| 43172 | if( pPage->pDirtyPrev ){ |
| 43173 | pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; |
| 43174 | }else{ |
| 43175 | assert( pPage==p->pDirty ); |
| 43176 | p->pDirty = pPage->pDirtyNext; |
| 43177 | if( p->pDirty==0 && p->bPurgeable ){ |
| 43178 | assert( p->eCreate==1 ); |
| 43179 | p->eCreate = 2; |
| 43180 | } |
| 43181 | } |
| 43182 | pPage->pDirtyNext = 0; |
| 43183 | pPage->pDirtyPrev = 0; |
| @@ -43195,23 +43314,34 @@ | |
| 43195 | assert( p->eCreate==2 ); |
| 43196 | p->eCreate = 1; |
| 43197 | } |
| 43198 | } |
| 43199 | p->pDirty = pPage; |
| 43200 | if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){ |
| 43201 | p->pSynced = pPage; |
| 43202 | } |
| 43203 | } |
| 43204 | } |
| 43205 | |
| 43206 | /* |
| 43207 | ** Wrapper around the pluggable caches xUnpin method. If the cache is |
| 43208 | ** being used for an in-memory database, this function is a no-op. |
| 43209 | */ |
| 43210 | static void pcacheUnpin(PgHdr *p){ |
| 43211 | if( p->pCache->bPurgeable ){ |
| 43212 | sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); |
| 43213 | } |
| 43214 | } |
| 43215 | |
| 43216 | /* |
| 43217 | ** Compute the number of pages of cache requested. p->szCache is the |
| @@ -43277,10 +43407,11 @@ | |
| 43277 | p->eCreate = 2; |
| 43278 | p->xStress = xStress; |
| 43279 | p->pStress = pStress; |
| 43280 | p->szCache = 100; |
| 43281 | p->szSpill = 1; |
| 43282 | return sqlite3PcacheSetPageSize(p, szPage); |
| 43283 | } |
| 43284 | |
| 43285 | /* |
| 43286 | ** Change the page size for PCache object. The caller must ensure that there |
| @@ -43299,10 +43430,11 @@ | |
| 43299 | if( pCache->pCache ){ |
| 43300 | sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); |
| 43301 | } |
| 43302 | pCache->pCache = pNew; |
| 43303 | pCache->szPage = szPage; |
| 43304 | } |
| 43305 | return SQLITE_OK; |
| 43306 | } |
| 43307 | |
| 43308 | /* |
| @@ -43333,15 +43465,17 @@ | |
| 43333 | PCache *pCache, /* Obtain the page from this cache */ |
| 43334 | Pgno pgno, /* Page number to obtain */ |
| 43335 | int createFlag /* If true, create page if it does not exist already */ |
| 43336 | ){ |
| 43337 | int eCreate; |
| 43338 | |
| 43339 | assert( pCache!=0 ); |
| 43340 | assert( pCache->pCache!=0 ); |
| 43341 | assert( createFlag==3 || createFlag==0 ); |
| 43342 | assert( pgno>0 ); |
| 43343 | |
| 43344 | /* eCreate defines what to do if the page does not exist. |
| 43345 | ** 0 Do not allocate a new page. (createFlag==0) |
| 43346 | ** 1 Allocate a new page if doing so is inexpensive. |
| 43347 | ** (createFlag==1 AND bPurgeable AND pDirty) |
| @@ -43350,11 +43484,14 @@ | |
| 43350 | */ |
| 43351 | eCreate = createFlag & pCache->eCreate; |
| 43352 | assert( eCreate==0 || eCreate==1 || eCreate==2 ); |
| 43353 | assert( createFlag==0 || pCache->eCreate==eCreate ); |
| 43354 | assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); |
| 43355 | return sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); |
| 43356 | } |
| 43357 | |
| 43358 | /* |
| 43359 | ** If the sqlite3PcacheFetch() routine is unable to allocate a new |
| 43360 | ** page because no clean pages are available for reuse and the cache |
| @@ -43377,11 +43514,15 @@ | |
| 43377 | if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){ |
| 43378 | /* Find a dirty page to write-out and recycle. First try to find a |
| 43379 | ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC |
| 43380 | ** cleared), but if that is not possible settle for any other |
| 43381 | ** unreferenced dirty page. |
| 43382 | */ |
| 43383 | for(pPg=pCache->pSynced; |
| 43384 | pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); |
| 43385 | pPg=pPg->pDirtyPrev |
| 43386 | ); |
| 43387 | pCache->pSynced = pPg; |
| @@ -43395,11 +43536,13 @@ | |
| 43395 | "spill page %d making room for %d - cache used: %d/%d", |
| 43396 | pPg->pgno, pgno, |
| 43397 | sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), |
| 43398 | numberOfCachePages(pCache)); |
| 43399 | #endif |
| 43400 | rc = pCache->xStress(pCache->pStress, pPg); |
| 43401 | if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ |
| 43402 | return rc; |
| 43403 | } |
| 43404 | } |
| 43405 | } |
| @@ -43455,10 +43598,11 @@ | |
| 43455 | if( !pPgHdr->pPage ){ |
| 43456 | return pcacheFetchFinishWithInit(pCache, pgno, pPage); |
| 43457 | } |
| 43458 | pCache->nRefSum++; |
| 43459 | pPgHdr->nRef++; |
| 43460 | return pPgHdr; |
| 43461 | } |
| 43462 | |
| 43463 | /* |
| 43464 | ** Decrement the reference count on a page. If the page is clean and the |
| @@ -43468,12 +43612,15 @@ | |
| 43468 | assert( p->nRef>0 ); |
| 43469 | p->pCache->nRefSum--; |
| 43470 | if( (--p->nRef)==0 ){ |
| 43471 | if( p->flags&PGHDR_CLEAN ){ |
| 43472 | pcacheUnpin(p); |
| 43473 | }else if( p->pDirtyPrev!=0 ){ |
| 43474 | /* Move the page to the head of the dirty list. */ |
| 43475 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); |
| 43476 | } |
| 43477 | } |
| 43478 | } |
| 43479 | |
| @@ -43480,10 +43627,11 @@ | |
| 43480 | /* |
| 43481 | ** Increase the reference count of a supplied page by 1. |
| 43482 | */ |
| 43483 | SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){ |
| 43484 | assert(p->nRef>0); |
| 43485 | p->nRef++; |
| 43486 | p->pCache->nRefSum++; |
| 43487 | } |
| 43488 | |
| 43489 | /* |
| @@ -43491,10 +43639,11 @@ | |
| 43491 | ** page. This function deletes that reference, so after it returns the |
| 43492 | ** page pointed to by p is invalid. |
| 43493 | */ |
| 43494 | SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ |
| 43495 | assert( p->nRef==1 ); |
| 43496 | if( p->flags&PGHDR_DIRTY ){ |
| 43497 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); |
| 43498 | } |
| 43499 | p->pCache->nRefSum--; |
| 43500 | sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1); |
| @@ -43504,30 +43653,36 @@ | |
| 43504 | ** Make sure the page is marked as dirty. If it isn't dirty already, |
| 43505 | ** make it so. |
| 43506 | */ |
| 43507 | SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ |
| 43508 | assert( p->nRef>0 ); |
| 43509 | if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ |
| 43510 | p->flags &= ~PGHDR_DONT_WRITE; |
| 43511 | if( p->flags & PGHDR_CLEAN ){ |
| 43512 | p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); |
| 43513 | assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); |
| 43514 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); |
| 43515 | } |
| 43516 | } |
| 43517 | } |
| 43518 | |
| 43519 | /* |
| 43520 | ** Make sure the page is marked as clean. If it isn't clean already, |
| 43521 | ** make it so. |
| 43522 | */ |
| 43523 | SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){ |
| 43524 | if( (p->flags & PGHDR_DIRTY) ){ |
| 43525 | assert( (p->flags & PGHDR_CLEAN)==0 ); |
| 43526 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); |
| 43527 | p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); |
| 43528 | p->flags |= PGHDR_CLEAN; |
| 43529 | if( p->nRef==0 ){ |
| 43530 | pcacheUnpin(p); |
| 43531 | } |
| 43532 | } |
| 43533 | } |
| @@ -43535,10 +43690,11 @@ | |
| 43535 | /* |
| 43536 | ** Make every page in the cache clean. |
| 43537 | */ |
| 43538 | SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){ |
| 43539 | PgHdr *p; |
| 43540 | while( (p = pCache->pDirty)!=0 ){ |
| 43541 | sqlite3PcacheMakeClean(p); |
| 43542 | } |
| 43543 | } |
| 43544 | |
| @@ -43545,10 +43701,11 @@ | |
| 43545 | /* |
| 43546 | ** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages. |
| 43547 | */ |
| 43548 | SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache *pCache){ |
| 43549 | PgHdr *p; |
| 43550 | for(p=pCache->pDirty; p; p=p->pDirtyNext){ |
| 43551 | p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE); |
| 43552 | } |
| 43553 | pCache->pSynced = pCache->pDirtyTail; |
| 43554 | } |
| @@ -43569,10 +43726,12 @@ | |
| 43569 | */ |
| 43570 | SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ |
| 43571 | PCache *pCache = p->pCache; |
| 43572 | assert( p->nRef>0 ); |
| 43573 | assert( newPgno>0 ); |
| 43574 | sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); |
| 43575 | p->pgno = newPgno; |
| 43576 | if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ |
| 43577 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); |
| 43578 | } |
| @@ -43589,10 +43748,11 @@ | |
| 43589 | */ |
| 43590 | SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ |
| 43591 | if( pCache->pCache ){ |
| 43592 | PgHdr *p; |
| 43593 | PgHdr *pNext; |
| 43594 | for(p=pCache->pDirty; p; p=pNext){ |
| 43595 | pNext = p->pDirtyNext; |
| 43596 | /* This routine never gets call with a positive pgno except right |
| 43597 | ** after sqlite3PcacheCleanAll(). So if there are dirty pages, |
| 43598 | ** it must be that pgno==0. |
| @@ -43619,10 +43779,11 @@ | |
| 43619 | /* |
| 43620 | ** Close a cache. |
| 43621 | */ |
| 43622 | SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){ |
| 43623 | assert( pCache->pCache!=0 ); |
| 43624 | sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); |
| 43625 | } |
| 43626 | |
| 43627 | /* |
| 43628 | ** Discard the contents of the cache. |
| @@ -47585,24 +47746,28 @@ | |
| 47585 | } |
| 47586 | |
| 47587 | static int pager_truncate(Pager *pPager, Pgno nPage); |
| 47588 | |
| 47589 | /* |
| 47590 | ** The write transaction open on the pager passed as the only argument is |
| 47591 | ** being committed. This function returns true if all dirty pages should |
| 47592 | ** be flushed to disk, or false otherwise. Pages should be flushed to disk |
| 47593 | ** unless one of the following is true: |
| 47594 | ** |
| 47595 | ** * The db is an in-memory database. |
| 47596 | ** |
| 47597 | ** * The db is a temporary database and the db file has not been opened. |
| 47598 | ** |
| 47599 | ** * The db is a temporary database and the cache contains less than |
| 47600 | ** C/4 dirty pages, where C is the configured cache-size. |
| 47601 | */ |
| 47602 | static int pagerFlushOnCommit(Pager *pPager){ |
| 47603 | if( pPager->tempFile==0 ) return 1; |
| 47604 | if( !isOpen(pPager->fd) ) return 0; |
| 47605 | return (sqlite3PCachePercentDirty(pPager->pPCache)>=25); |
| 47606 | } |
| 47607 | |
| 47608 | /* |
| @@ -47706,11 +47871,11 @@ | |
| 47706 | } |
| 47707 | pPager->journalOff = 0; |
| 47708 | }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST |
| 47709 | || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) |
| 47710 | ){ |
| 47711 | rc = zeroJournalHdr(pPager, hasMaster); |
| 47712 | pPager->journalOff = 0; |
| 47713 | }else{ |
| 47714 | /* This branch may be executed with Pager.journalMode==MEMORY if |
| 47715 | ** a hot-journal was just rolled back. In this case the journal |
| 47716 | ** file should be closed and deleted. If this connection writes to |
| @@ -47741,16 +47906,18 @@ | |
| 47741 | #endif |
| 47742 | |
| 47743 | sqlite3BitvecDestroy(pPager->pInJournal); |
| 47744 | pPager->pInJournal = 0; |
| 47745 | pPager->nRec = 0; |
| 47746 | if( MEMDB || pagerFlushOnCommit(pPager) ){ |
| 47747 | sqlite3PcacheCleanAll(pPager->pPCache); |
| 47748 | }else{ |
| 47749 | sqlite3PcacheClearWritable(pPager->pPCache); |
| 47750 | } |
| 47751 | sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); |
| 47752 | |
| 47753 | if( pagerUseWal(pPager) ){ |
| 47754 | /* Drop the WAL write-lock, if any. Also, if the connection was in |
| 47755 | ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE |
| 47756 | ** lock held on the database file. |
| @@ -48080,11 +48247,10 @@ | |
| 48080 | pPager->doNotSpill |= SPILLFLAG_ROLLBACK; |
| 48081 | rc = sqlite3PagerGet(pPager, pgno, &pPg, 1); |
| 48082 | assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 ); |
| 48083 | pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK; |
| 48084 | if( rc!=SQLITE_OK ) return rc; |
| 48085 | pPg->flags &= ~PGHDR_NEED_READ; |
| 48086 | sqlite3PcacheMakeDirty(pPg); |
| 48087 | } |
| 48088 | if( pPg ){ |
| 48089 | /* No page should ever be explicitly rolled back that is in use, except |
| 48090 | ** for page 1 which is held in use in order to keep the lock on the |
| @@ -48094,37 +48260,14 @@ | |
| 48094 | */ |
| 48095 | void *pData; |
| 48096 | pData = pPg->pData; |
| 48097 | memcpy(pData, (u8*)aData, pPager->pageSize); |
| 48098 | pPager->xReiniter(pPg); |
| 48099 | if( isMainJrnl && (!isSavepnt || *pOffset<=pPager->journalHdr) ){ |
| 48100 | /* If the contents of this page were just restored from the main |
| 48101 | ** journal file, then its content must be as they were when the |
| 48102 | ** transaction was first opened. In this case we can mark the page |
| 48103 | ** as clean, since there will be no need to write it out to the |
| 48104 | ** database. |
| 48105 | ** |
| 48106 | ** There is one exception to this rule. If the page is being rolled |
| 48107 | ** back as part of a savepoint (or statement) rollback from an |
| 48108 | ** unsynced portion of the main journal file, then it is not safe |
| 48109 | ** to mark the page as clean. This is because marking the page as |
| 48110 | ** clean will clear the PGHDR_NEED_SYNC flag. Since the page is |
| 48111 | ** already in the journal file (recorded in Pager.pInJournal) and |
| 48112 | ** the PGHDR_NEED_SYNC flag is cleared, if the page is written to |
| 48113 | ** again within this transaction, it will be marked as dirty but |
| 48114 | ** the PGHDR_NEED_SYNC flag will not be set. It could then potentially |
| 48115 | ** be written out into the database file before its journal file |
| 48116 | ** segment is synced. If a crash occurs during or following this, |
| 48117 | ** database corruption may ensue. |
| 48118 | ** |
| 48119 | ** Update: Another exception is for temp files that are not |
| 48120 | ** in-memory databases. In this case the page may have been dirty |
| 48121 | ** at the start of the transaction. |
| 48122 | */ |
| 48123 | assert( !pagerUseWal(pPager) ); |
| 48124 | if( pPager->tempFile==0 ) sqlite3PcacheMakeClean(pPg); |
| 48125 | } |
| 48126 | pager_set_pagehash(pPg); |
| 48127 | |
| 48128 | /* If this was page 1, then restore the value of Pager.dbFileVers. |
| 48129 | ** Do this before any decoding. */ |
| 48130 | if( pgno==1 ){ |
| @@ -51731,10 +51874,11 @@ | |
| 51731 | if( !pPager->tempFile && (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ |
| 51732 | PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager))); |
| 51733 | IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) |
| 51734 | pPg->flags |= PGHDR_DONT_WRITE; |
| 51735 | pPg->flags &= ~PGHDR_WRITEABLE; |
| 51736 | pager_set_pagehash(pPg); |
| 51737 | } |
| 51738 | } |
| 51739 | |
| 51740 | /* |
| @@ -51926,21 +52070,21 @@ | |
| 51926 | |
| 51927 | /* If a prior error occurred, report that error again. */ |
| 51928 | if( NEVER(pPager->errCode) ) return pPager->errCode; |
| 51929 | |
| 51930 | /* Provide the ability to easily simulate an I/O error during testing */ |
| 51931 | if( (rc = sqlite3FaultSim(400))!=SQLITE_OK ) return rc; |
| 51932 | |
| 51933 | PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", |
| 51934 | pPager->zFilename, zMaster, pPager->dbSize)); |
| 51935 | |
| 51936 | /* If no database changes have been made, return early. */ |
| 51937 | if( pPager->eState<PAGER_WRITER_CACHEMOD ) return SQLITE_OK; |
| 51938 | |
| 51939 | assert( MEMDB==0 || pPager->tempFile ); |
| 51940 | assert( isOpen(pPager->fd) || pPager->tempFile ); |
| 51941 | if( 0==pagerFlushOnCommit(pPager) ){ |
| 51942 | /* If this is an in-memory db, or no pages have been written to, or this |
| 51943 | ** function has already been called, it is mostly a no-op. However, any |
| 51944 | ** backup in progress needs to be restarted. */ |
| 51945 | sqlite3BackupRestart(pPager->pBackup); |
| 51946 | }else{ |
| @@ -52561,10 +52705,11 @@ | |
| 52561 | assert( assert_pager_state(pPager) ); |
| 52562 | |
| 52563 | /* In order to be able to rollback, an in-memory database must journal |
| 52564 | ** the page we are moving from. |
| 52565 | */ |
| 52566 | if( pPager->tempFile ){ |
| 52567 | rc = sqlite3PagerWrite(pPg); |
| 52568 | if( rc ) return rc; |
| 52569 | } |
| 52570 | |
| @@ -52635,12 +52780,11 @@ | |
| 52635 | |
| 52636 | /* For an in-memory database, make sure the original page continues |
| 52637 | ** to exist, in case the transaction needs to roll back. Use pPgOld |
| 52638 | ** as the original page since it has already been allocated. |
| 52639 | */ |
| 52640 | if( pPager->tempFile ){ |
| 52641 | assert( pPgOld ); |
| 52642 | sqlite3PcacheMove(pPgOld, origPgno); |
| 52643 | sqlite3PagerUnrefNotNull(pPgOld); |
| 52644 | } |
| 52645 | |
| 52646 | if( needSyncPgno ){ |
| @@ -59256,15 +59400,15 @@ | |
| 59256 | flagByte &= ~PTF_LEAF; |
| 59257 | pPage->childPtrSize = 4-4*pPage->leaf; |
| 59258 | pPage->xCellSize = cellSizePtr; |
| 59259 | pBt = pPage->pBt; |
| 59260 | if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ |
| 59261 | /* EVIDENCE-OF: R-03640-13415 A value of 5 means the page is an interior |
| 59262 | ** table b-tree page. */ |
| 59263 | assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); |
| 59264 | /* EVIDENCE-OF: R-20501-61796 A value of 13 means the page is a leaf |
| 59265 | ** table b-tree page. */ |
| 59266 | assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); |
| 59267 | pPage->intKey = 1; |
| 59268 | if( pPage->leaf ){ |
| 59269 | pPage->intKeyLeaf = 1; |
| 59270 | pPage->xParseCell = btreeParseCellPtr; |
| @@ -59274,15 +59418,15 @@ | |
| 59274 | pPage->xParseCell = btreeParseCellPtrNoPayload; |
| 59275 | } |
| 59276 | pPage->maxLocal = pBt->maxLeaf; |
| 59277 | pPage->minLocal = pBt->minLeaf; |
| 59278 | }else if( flagByte==PTF_ZERODATA ){ |
| 59279 | /* EVIDENCE-OF: R-27225-53936 A value of 2 means the page is an interior |
| 59280 | ** index b-tree page. */ |
| 59281 | assert( (PTF_ZERODATA)==2 ); |
| 59282 | /* EVIDENCE-OF: R-16571-11615 A value of 10 means the page is a leaf |
| 59283 | ** index b-tree page. */ |
| 59284 | assert( (PTF_ZERODATA|PTF_LEAF)==10 ); |
| 59285 | pPage->intKey = 0; |
| 59286 | pPage->intKeyLeaf = 0; |
| 59287 | pPage->xParseCell = btreeParseCellPtrIndex; |
| 59288 | pPage->maxLocal = pBt->maxLocal; |
| @@ -192850,11 +192994,11 @@ | |
| 192850 | int nArg, /* Number of args */ |
| 192851 | sqlite3_value **apUnused /* Function arguments */ |
| 192852 | ){ |
| 192853 | assert( nArg==0 ); |
| 192854 | UNUSED_PARAM2(nArg, apUnused); |
| 192855 | sqlite3_result_text(pCtx, "fts5: 2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea", -1, SQLITE_TRANSIENT); |
| 192856 | } |
| 192857 | |
| 192858 | static int fts5Init(sqlite3 *db){ |
| 192859 | static const sqlite3_module fts5Mod = { |
| 192860 | /* iVersion */ 2, |
| 192861 |
| --- src/sqlite3.c | |
| +++ src/sqlite3.c | |
| @@ -363,11 +363,11 @@ | |
| 363 | ** [sqlite3_libversion_number()], [sqlite3_sourceid()], |
| 364 | ** [sqlite_version()] and [sqlite_source_id()]. |
| 365 | */ |
| 366 | #define SQLITE_VERSION "3.13.0" |
| 367 | #define SQLITE_VERSION_NUMBER 3013000 |
| 368 | #define SQLITE_SOURCE_ID "2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2" |
| 369 | |
| 370 | /* |
| 371 | ** CAPI3REF: Run-Time Library Version Numbers |
| 372 | ** KEYWORDS: sqlite3_version, sqlite3_sourceid |
| 373 | ** |
| @@ -10917,11 +10917,11 @@ | |
| 10917 | ** |
| 10918 | ** When doing coverage testing ALWAYS and NEVER are hard-coded to |
| 10919 | ** be true and false so that the unreachable code they specify will |
| 10920 | ** not be counted as untested code. |
| 10921 | */ |
| 10922 | #if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST) |
| 10923 | # define ALWAYS(X) (1) |
| 10924 | # define NEVER(X) (0) |
| 10925 | #elif !defined(NDEBUG) |
| 10926 | # define ALWAYS(X) ((X)?1:(assert(0),0)) |
| 10927 | # define NEVER(X) ((X)?(assert(0),1):0) |
| @@ -12954,11 +12954,11 @@ | |
| 12954 | */ |
| 12955 | struct PgHdr { |
| 12956 | sqlite3_pcache_page *pPage; /* Pcache object page handle */ |
| 12957 | void *pData; /* Page data */ |
| 12958 | void *pExtra; /* Extra content */ |
| 12959 | PgHdr *pDirty; /* Transient list of dirty sorted by pgno */ |
| 12960 | Pager *pPager; /* The pager this page is part of */ |
| 12961 | Pgno pgno; /* Page number for this page */ |
| 12962 | #ifdef SQLITE_CHECK_PAGES |
| 12963 | u32 pageHash; /* Hash of page content */ |
| 12964 | #endif |
| @@ -12979,15 +12979,14 @@ | |
| 12979 | #define PGHDR_CLEAN 0x001 /* Page not on the PCache.pDirty list */ |
| 12980 | #define PGHDR_DIRTY 0x002 /* Page is on the PCache.pDirty list */ |
| 12981 | #define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */ |
| 12982 | #define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before |
| 12983 | ** writing this page to the database */ |
| 12984 | #define PGHDR_DONT_WRITE 0x010 /* Do not write content to disk */ |
| 12985 | #define PGHDR_MMAP 0x020 /* This is an mmap page object */ |
| 12986 | |
| 12987 | #define PGHDR_WAL_APPEND 0x040 /* Appended to wal file */ |
| 12988 | |
| 12989 | /* Initialize and shutdown the page cache subsystem */ |
| 12990 | SQLITE_PRIVATE int sqlite3PcacheInitialize(void); |
| 12991 | SQLITE_PRIVATE void sqlite3PcacheShutdown(void); |
| 12992 | |
| @@ -13065,10 +13064,15 @@ | |
| 13064 | ** interface is only available if SQLITE_CHECK_PAGES is defined when the |
| 13065 | ** library is built. |
| 13066 | */ |
| 13067 | SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); |
| 13068 | #endif |
| 13069 | |
| 13070 | #if defined(SQLITE_DEBUG) |
| 13071 | /* Check invariants on a PgHdr object */ |
| 13072 | SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr*); |
| 13073 | #endif |
| 13074 | |
| 13075 | /* Set and get the suggested cache-size for the specified pager-cache. |
| 13076 | ** |
| 13077 | ** If no global maximum is configured, then the system attempts to limit |
| 13078 | ** the total number of pages cached by purgeable pager-caches to the sum |
| @@ -43115,11 +43119,33 @@ | |
| 43119 | ** This file implements that page cache. |
| 43120 | */ |
| 43121 | /* #include "sqliteInt.h" */ |
| 43122 | |
| 43123 | /* |
| 43124 | ** A complete page cache is an instance of this structure. Every |
| 43125 | ** entry in the cache holds a single page of the database file. The |
| 43126 | ** btree layer only operates on the cached copy of the database pages. |
| 43127 | ** |
| 43128 | ** A page cache entry is "clean" if it exactly matches what is currently |
| 43129 | ** on disk. A page is "dirty" if it has been modified and needs to be |
| 43130 | ** persisted to disk. |
| 43131 | ** |
| 43132 | ** pDirty, pDirtyTail, pSynced: |
| 43133 | ** All dirty pages are linked into the doubly linked list using |
| 43134 | ** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order |
| 43135 | ** such that p was added to the list more recently than p->pDirtyNext. |
| 43136 | ** PCache.pDirty points to the first (newest) element in the list and |
| 43137 | ** pDirtyTail to the last (oldest). |
| 43138 | ** |
| 43139 | ** The PCache.pSynced variable is used to optimize searching for a dirty |
| 43140 | ** page to eject from the cache mid-transaction. It is better to eject |
| 43141 | ** a page that does not require a journal sync than one that does. |
| 43142 | ** Therefore, pSynced is maintained to that it *almost* always points |
| 43143 | ** to either the oldest page in the pDirty/pDirtyTail list that has a |
| 43144 | ** clear PGHDR_NEED_SYNC flag or to a page that is older than this one |
| 43145 | ** (so that the right page to eject can be found by following pDirtyPrev |
| 43146 | ** pointers). |
| 43147 | */ |
| 43148 | struct PCache { |
| 43149 | PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ |
| 43150 | PgHdr *pSynced; /* Last synced page in dirty page list */ |
| 43151 | int nRefSum; /* Sum of ref counts over all pages */ |
| @@ -43131,10 +43157,99 @@ | |
| 43157 | u8 eCreate; /* eCreate value for for xFetch() */ |
| 43158 | int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ |
| 43159 | void *pStress; /* Argument to xStress */ |
| 43160 | sqlite3_pcache *pCache; /* Pluggable cache module */ |
| 43161 | }; |
| 43162 | |
| 43163 | /********************************** Test and Debug Logic **********************/ |
| 43164 | /* |
| 43165 | ** Debug tracing macros. Enable by by changing the "0" to "1" and |
| 43166 | ** recompiling. |
| 43167 | ** |
| 43168 | ** When sqlite3PcacheTrace is 1, single line trace messages are issued. |
| 43169 | ** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries |
| 43170 | ** is displayed for many operations, resulting in a lot of output. |
| 43171 | */ |
| 43172 | #if defined(SQLITE_DEBUG) && 0 |
| 43173 | int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */ |
| 43174 | int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */ |
| 43175 | # define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;} |
| 43176 | void pcacheDump(PCache *pCache){ |
| 43177 | int N; |
| 43178 | int i, j; |
| 43179 | sqlite3_pcache_page *pLower; |
| 43180 | PgHdr *pPg; |
| 43181 | unsigned char *a; |
| 43182 | |
| 43183 | if( sqlite3PcacheTrace<2 ) return; |
| 43184 | if( pCache->pCache==0 ) return; |
| 43185 | N = sqlite3PcachePagecount(pCache); |
| 43186 | if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; |
| 43187 | for(i=1; i<=N; i++){ |
| 43188 | pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); |
| 43189 | if( pLower==0 ) continue; |
| 43190 | pPg = (PgHdr*)pLower->pExtra; |
| 43191 | printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); |
| 43192 | a = (unsigned char *)pLower->pBuf; |
| 43193 | for(j=0; j<12; j++) printf("%02x", a[j]); |
| 43194 | printf("\n"); |
| 43195 | if( pPg->pPage==0 ){ |
| 43196 | sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); |
| 43197 | } |
| 43198 | } |
| 43199 | } |
| 43200 | #else |
| 43201 | # define pcacheTrace(X) |
| 43202 | # define pcacheDump(X) |
| 43203 | #endif |
| 43204 | |
| 43205 | /* |
| 43206 | ** Check invariants on a PgHdr entry. Return true if everything is OK. |
| 43207 | ** Return false if any invariant is violated. |
| 43208 | ** |
| 43209 | ** This routine is for use inside of assert() statements only. For |
| 43210 | ** example: |
| 43211 | ** |
| 43212 | ** assert( sqlite3PcachePageSanity(pPg) ); |
| 43213 | */ |
| 43214 | #if SQLITE_DEBUG |
| 43215 | SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){ |
| 43216 | PCache *pCache; |
| 43217 | assert( pPg!=0 ); |
| 43218 | assert( pPg->pgno>0 ); /* Page number is 1 or more */ |
| 43219 | pCache = pPg->pCache; |
| 43220 | assert( pCache!=0 ); /* Every page has an associated PCache */ |
| 43221 | if( pPg->flags & PGHDR_CLEAN ){ |
| 43222 | assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */ |
| 43223 | assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */ |
| 43224 | assert( pCache->pDirtyTail!=pPg ); |
| 43225 | } |
| 43226 | /* WRITEABLE pages must also be DIRTY */ |
| 43227 | if( pPg->flags & PGHDR_WRITEABLE ){ |
| 43228 | assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */ |
| 43229 | } |
| 43230 | /* NEED_SYNC can be set independently of WRITEABLE. This can happen, |
| 43231 | ** for example, when using the sqlite3PagerDontWrite() optimization: |
| 43232 | ** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK. |
| 43233 | ** (2) Page X moved to freelist, WRITEABLE is cleared |
| 43234 | ** (3) Page X reused, WRITEABLE is set again |
| 43235 | ** If NEED_SYNC had been cleared in step 2, then it would not be reset |
| 43236 | ** in step 3, and page might be written into the database without first |
| 43237 | ** syncing the rollback journal, which might cause corruption on a power |
| 43238 | ** loss. |
| 43239 | ** |
| 43240 | ** Another example is when the database page size is smaller than the |
| 43241 | ** disk sector size. When any page of a sector is journalled, all pages |
| 43242 | ** in that sector are marked NEED_SYNC even if they are still CLEAN, just |
| 43243 | ** in case they are later modified, since all pages in the same sector |
| 43244 | ** must be journalled and synced before any of those pages can be safely |
| 43245 | ** written. |
| 43246 | */ |
| 43247 | return 1; |
| 43248 | } |
| 43249 | #endif /* SQLITE_DEBUG */ |
| 43250 | |
| 43251 | |
| 43252 | /********************************** Linked List Management ********************/ |
| 43253 | |
| 43254 | /* Allowed values for second argument to pcacheManageDirtyList() */ |
| 43255 | #define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */ |
| @@ -43148,21 +43263,20 @@ | |
| 43263 | ** the dirty list. Doing both moves pPage to the front of the dirty list. |
| 43264 | */ |
| 43265 | static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ |
| 43266 | PCache *p = pPage->pCache; |
| 43267 | |
| 43268 | pcacheTrace(("%p.DIRTYLIST.%s %d\n", p, |
| 43269 | addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT", |
| 43270 | pPage->pgno)); |
| 43271 | if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ |
| 43272 | assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); |
| 43273 | assert( pPage->pDirtyPrev || pPage==p->pDirty ); |
| 43274 | |
| 43275 | /* Update the PCache1.pSynced variable if necessary. */ |
| 43276 | if( p->pSynced==pPage ){ |
| 43277 | p->pSynced = pPage->pDirtyPrev; |
| 43278 | } |
| 43279 | |
| 43280 | if( pPage->pDirtyNext ){ |
| 43281 | pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; |
| 43282 | }else{ |
| @@ -43170,14 +43284,19 @@ | |
| 43284 | p->pDirtyTail = pPage->pDirtyPrev; |
| 43285 | } |
| 43286 | if( pPage->pDirtyPrev ){ |
| 43287 | pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; |
| 43288 | }else{ |
| 43289 | /* If there are now no dirty pages in the cache, set eCreate to 2. |
| 43290 | ** This is an optimization that allows sqlite3PcacheFetch() to skip |
| 43291 | ** searching for a dirty page to eject from the cache when it might |
| 43292 | ** otherwise have to. */ |
| 43293 | assert( pPage==p->pDirty ); |
| 43294 | p->pDirty = pPage->pDirtyNext; |
| 43295 | assert( p->bPurgeable || p->eCreate==2 ); |
| 43296 | if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/ |
| 43297 | assert( p->bPurgeable==0 || p->eCreate==1 ); |
| 43298 | p->eCreate = 2; |
| 43299 | } |
| 43300 | } |
| 43301 | pPage->pDirtyNext = 0; |
| 43302 | pPage->pDirtyPrev = 0; |
| @@ -43195,23 +43314,34 @@ | |
| 43314 | assert( p->eCreate==2 ); |
| 43315 | p->eCreate = 1; |
| 43316 | } |
| 43317 | } |
| 43318 | p->pDirty = pPage; |
| 43319 | |
| 43320 | /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set |
| 43321 | ** pSynced to point to it. Checking the NEED_SYNC flag is an |
| 43322 | ** optimization, as if pSynced points to a page with the NEED_SYNC |
| 43323 | ** flag set sqlite3PcacheFetchStress() searches through all newer |
| 43324 | ** entries of the dirty-list for a page with NEED_SYNC clear anyway. */ |
| 43325 | if( !p->pSynced |
| 43326 | && 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/ |
| 43327 | ){ |
| 43328 | p->pSynced = pPage; |
| 43329 | } |
| 43330 | } |
| 43331 | pcacheDump(p); |
| 43332 | } |
| 43333 | |
| 43334 | /* |
| 43335 | ** Wrapper around the pluggable caches xUnpin method. If the cache is |
| 43336 | ** being used for an in-memory database, this function is a no-op. |
| 43337 | */ |
| 43338 | static void pcacheUnpin(PgHdr *p){ |
| 43339 | if( p->pCache->bPurgeable ){ |
| 43340 | pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno)); |
| 43341 | sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); |
| 43342 | pcacheDump(p->pCache); |
| 43343 | } |
| 43344 | } |
| 43345 | |
| 43346 | /* |
| 43347 | ** Compute the number of pages of cache requested. p->szCache is the |
| @@ -43277,10 +43407,11 @@ | |
| 43407 | p->eCreate = 2; |
| 43408 | p->xStress = xStress; |
| 43409 | p->pStress = pStress; |
| 43410 | p->szCache = 100; |
| 43411 | p->szSpill = 1; |
| 43412 | pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable)); |
| 43413 | return sqlite3PcacheSetPageSize(p, szPage); |
| 43414 | } |
| 43415 | |
| 43416 | /* |
| 43417 | ** Change the page size for PCache object. The caller must ensure that there |
| @@ -43299,10 +43430,11 @@ | |
| 43430 | if( pCache->pCache ){ |
| 43431 | sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); |
| 43432 | } |
| 43433 | pCache->pCache = pNew; |
| 43434 | pCache->szPage = szPage; |
| 43435 | pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage)); |
| 43436 | } |
| 43437 | return SQLITE_OK; |
| 43438 | } |
| 43439 | |
| 43440 | /* |
| @@ -43333,15 +43465,17 @@ | |
| 43465 | PCache *pCache, /* Obtain the page from this cache */ |
| 43466 | Pgno pgno, /* Page number to obtain */ |
| 43467 | int createFlag /* If true, create page if it does not exist already */ |
| 43468 | ){ |
| 43469 | int eCreate; |
| 43470 | sqlite3_pcache_page *pRes; |
| 43471 | |
| 43472 | assert( pCache!=0 ); |
| 43473 | assert( pCache->pCache!=0 ); |
| 43474 | assert( createFlag==3 || createFlag==0 ); |
| 43475 | assert( pgno>0 ); |
| 43476 | assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) ); |
| 43477 | |
| 43478 | /* eCreate defines what to do if the page does not exist. |
| 43479 | ** 0 Do not allocate a new page. (createFlag==0) |
| 43480 | ** 1 Allocate a new page if doing so is inexpensive. |
| 43481 | ** (createFlag==1 AND bPurgeable AND pDirty) |
| @@ -43350,11 +43484,14 @@ | |
| 43484 | */ |
| 43485 | eCreate = createFlag & pCache->eCreate; |
| 43486 | assert( eCreate==0 || eCreate==1 || eCreate==2 ); |
| 43487 | assert( createFlag==0 || pCache->eCreate==eCreate ); |
| 43488 | assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); |
| 43489 | pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); |
| 43490 | pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno, |
| 43491 | createFlag?" create":"",pRes)); |
| 43492 | return pRes; |
| 43493 | } |
| 43494 | |
| 43495 | /* |
| 43496 | ** If the sqlite3PcacheFetch() routine is unable to allocate a new |
| 43497 | ** page because no clean pages are available for reuse and the cache |
| @@ -43377,11 +43514,15 @@ | |
| 43514 | if( sqlite3PcachePagecount(pCache)>pCache->szSpill ){ |
| 43515 | /* Find a dirty page to write-out and recycle. First try to find a |
| 43516 | ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC |
| 43517 | ** cleared), but if that is not possible settle for any other |
| 43518 | ** unreferenced dirty page. |
| 43519 | ** |
| 43520 | ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC |
| 43521 | ** flag is currently referenced, then the following may leave pSynced |
| 43522 | ** set incorrectly (pointing to other than the LRU page with NEED_SYNC |
| 43523 | ** cleared). This is Ok, as pSynced is just an optimization. */ |
| 43524 | for(pPg=pCache->pSynced; |
| 43525 | pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); |
| 43526 | pPg=pPg->pDirtyPrev |
| 43527 | ); |
| 43528 | pCache->pSynced = pPg; |
| @@ -43395,11 +43536,13 @@ | |
| 43536 | "spill page %d making room for %d - cache used: %d/%d", |
| 43537 | pPg->pgno, pgno, |
| 43538 | sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), |
| 43539 | numberOfCachePages(pCache)); |
| 43540 | #endif |
| 43541 | pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno)); |
| 43542 | rc = pCache->xStress(pCache->pStress, pPg); |
| 43543 | pcacheDump(pCache); |
| 43544 | if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ |
| 43545 | return rc; |
| 43546 | } |
| 43547 | } |
| 43548 | } |
| @@ -43455,10 +43598,11 @@ | |
| 43598 | if( !pPgHdr->pPage ){ |
| 43599 | return pcacheFetchFinishWithInit(pCache, pgno, pPage); |
| 43600 | } |
| 43601 | pCache->nRefSum++; |
| 43602 | pPgHdr->nRef++; |
| 43603 | assert( sqlite3PcachePageSanity(pPgHdr) ); |
| 43604 | return pPgHdr; |
| 43605 | } |
| 43606 | |
| 43607 | /* |
| 43608 | ** Decrement the reference count on a page. If the page is clean and the |
| @@ -43468,12 +43612,15 @@ | |
| 43612 | assert( p->nRef>0 ); |
| 43613 | p->pCache->nRefSum--; |
| 43614 | if( (--p->nRef)==0 ){ |
| 43615 | if( p->flags&PGHDR_CLEAN ){ |
| 43616 | pcacheUnpin(p); |
| 43617 | }else if( p->pDirtyPrev!=0 ){ /*OPTIMIZATION-IF-FALSE*/ |
| 43618 | /* Move the page to the head of the dirty list. If p->pDirtyPrev==0, |
| 43619 | ** then page p is already at the head of the dirty list and the |
| 43620 | ** following call would be a no-op. Hence the OPTIMIZATION-IF-FALSE |
| 43621 | ** tag above. */ |
| 43622 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); |
| 43623 | } |
| 43624 | } |
| 43625 | } |
| 43626 | |
| @@ -43480,10 +43627,11 @@ | |
| 43627 | /* |
| 43628 | ** Increase the reference count of a supplied page by 1. |
| 43629 | */ |
| 43630 | SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){ |
| 43631 | assert(p->nRef>0); |
| 43632 | assert( sqlite3PcachePageSanity(p) ); |
| 43633 | p->nRef++; |
| 43634 | p->pCache->nRefSum++; |
| 43635 | } |
| 43636 | |
| 43637 | /* |
| @@ -43491,10 +43639,11 @@ | |
| 43639 | ** page. This function deletes that reference, so after it returns the |
| 43640 | ** page pointed to by p is invalid. |
| 43641 | */ |
| 43642 | SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ |
| 43643 | assert( p->nRef==1 ); |
| 43644 | assert( sqlite3PcachePageSanity(p) ); |
| 43645 | if( p->flags&PGHDR_DIRTY ){ |
| 43646 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); |
| 43647 | } |
| 43648 | p->pCache->nRefSum--; |
| 43649 | sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1); |
| @@ -43504,30 +43653,36 @@ | |
| 43653 | ** Make sure the page is marked as dirty. If it isn't dirty already, |
| 43654 | ** make it so. |
| 43655 | */ |
| 43656 | SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ |
| 43657 | assert( p->nRef>0 ); |
| 43658 | assert( sqlite3PcachePageSanity(p) ); |
| 43659 | if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/ |
| 43660 | p->flags &= ~PGHDR_DONT_WRITE; |
| 43661 | if( p->flags & PGHDR_CLEAN ){ |
| 43662 | p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); |
| 43663 | pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno)); |
| 43664 | assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); |
| 43665 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); |
| 43666 | } |
| 43667 | assert( sqlite3PcachePageSanity(p) ); |
| 43668 | } |
| 43669 | } |
| 43670 | |
| 43671 | /* |
| 43672 | ** Make sure the page is marked as clean. If it isn't clean already, |
| 43673 | ** make it so. |
| 43674 | */ |
| 43675 | SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){ |
| 43676 | assert( sqlite3PcachePageSanity(p) ); |
| 43677 | if( ALWAYS((p->flags & PGHDR_DIRTY)!=0) ){ |
| 43678 | assert( (p->flags & PGHDR_CLEAN)==0 ); |
| 43679 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); |
| 43680 | p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); |
| 43681 | p->flags |= PGHDR_CLEAN; |
| 43682 | pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno)); |
| 43683 | assert( sqlite3PcachePageSanity(p) ); |
| 43684 | if( p->nRef==0 ){ |
| 43685 | pcacheUnpin(p); |
| 43686 | } |
| 43687 | } |
| 43688 | } |
| @@ -43535,10 +43690,11 @@ | |
| 43690 | /* |
| 43691 | ** Make every page in the cache clean. |
| 43692 | */ |
| 43693 | SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){ |
| 43694 | PgHdr *p; |
| 43695 | pcacheTrace(("%p.CLEAN-ALL\n",pCache)); |
| 43696 | while( (p = pCache->pDirty)!=0 ){ |
| 43697 | sqlite3PcacheMakeClean(p); |
| 43698 | } |
| 43699 | } |
| 43700 | |
| @@ -43545,10 +43701,11 @@ | |
| 43701 | /* |
| 43702 | ** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages. |
| 43703 | */ |
| 43704 | SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache *pCache){ |
| 43705 | PgHdr *p; |
| 43706 | pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache)); |
| 43707 | for(p=pCache->pDirty; p; p=p->pDirtyNext){ |
| 43708 | p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE); |
| 43709 | } |
| 43710 | pCache->pSynced = pCache->pDirtyTail; |
| 43711 | } |
| @@ -43569,10 +43726,12 @@ | |
| 43726 | */ |
| 43727 | SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ |
| 43728 | PCache *pCache = p->pCache; |
| 43729 | assert( p->nRef>0 ); |
| 43730 | assert( newPgno>0 ); |
| 43731 | assert( sqlite3PcachePageSanity(p) ); |
| 43732 | pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno)); |
| 43733 | sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); |
| 43734 | p->pgno = newPgno; |
| 43735 | if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ |
| 43736 | pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); |
| 43737 | } |
| @@ -43589,10 +43748,11 @@ | |
| 43748 | */ |
| 43749 | SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ |
| 43750 | if( pCache->pCache ){ |
| 43751 | PgHdr *p; |
| 43752 | PgHdr *pNext; |
| 43753 | pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno)); |
| 43754 | for(p=pCache->pDirty; p; p=pNext){ |
| 43755 | pNext = p->pDirtyNext; |
| 43756 | /* This routine never gets call with a positive pgno except right |
| 43757 | ** after sqlite3PcacheCleanAll(). So if there are dirty pages, |
| 43758 | ** it must be that pgno==0. |
| @@ -43619,10 +43779,11 @@ | |
| 43779 | /* |
| 43780 | ** Close a cache. |
| 43781 | */ |
| 43782 | SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){ |
| 43783 | assert( pCache->pCache!=0 ); |
| 43784 | pcacheTrace(("%p.CLOSE\n",pCache)); |
| 43785 | sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); |
| 43786 | } |
| 43787 | |
| 43788 | /* |
| 43789 | ** Discard the contents of the cache. |
| @@ -47585,24 +47746,28 @@ | |
| 47746 | } |
| 47747 | |
| 47748 | static int pager_truncate(Pager *pPager, Pgno nPage); |
| 47749 | |
| 47750 | /* |
| 47751 | ** The write transaction open on pPager is being committed (bCommit==1) |
| 47752 | ** or rolled back (bCommit==0). |
| 47753 | ** |
| 47754 | ** Return TRUE if and only if all dirty pages should be flushed to disk. |
| 47755 | ** |
| 47756 | ** Rules: |
| 47757 | ** |
| 47758 | ** * For non-TEMP databases, always sync to disk. This is necessary |
| 47759 | ** for transactions to be durable. |
| 47760 | ** |
| 47761 | ** * Sync TEMP database only on a COMMIT (not a ROLLBACK) when the backing |
| 47762 | ** file has been created already (via a spill on pagerStress()) and |
| 47763 | ** when the number of dirty pages in memory exceeds 25% of the total |
| 47764 | ** cache size. |
| 47765 | */ |
| 47766 | static int pagerFlushOnCommit(Pager *pPager, int bCommit){ |
| 47767 | if( pPager->tempFile==0 ) return 1; |
| 47768 | if( !bCommit ) return 0; |
| 47769 | if( !isOpen(pPager->fd) ) return 0; |
| 47770 | return (sqlite3PCachePercentDirty(pPager->pPCache)>=25); |
| 47771 | } |
| 47772 | |
| 47773 | /* |
| @@ -47706,11 +47871,11 @@ | |
| 47871 | } |
| 47872 | pPager->journalOff = 0; |
| 47873 | }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST |
| 47874 | || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) |
| 47875 | ){ |
| 47876 | rc = zeroJournalHdr(pPager, hasMaster||pPager->tempFile); |
| 47877 | pPager->journalOff = 0; |
| 47878 | }else{ |
| 47879 | /* This branch may be executed with Pager.journalMode==MEMORY if |
| 47880 | ** a hot-journal was just rolled back. In this case the journal |
| 47881 | ** file should be closed and deleted. If this connection writes to |
| @@ -47741,16 +47906,18 @@ | |
| 47906 | #endif |
| 47907 | |
| 47908 | sqlite3BitvecDestroy(pPager->pInJournal); |
| 47909 | pPager->pInJournal = 0; |
| 47910 | pPager->nRec = 0; |
| 47911 | if( rc==SQLITE_OK ){ |
| 47912 | if( pagerFlushOnCommit(pPager, bCommit) ){ |
| 47913 | sqlite3PcacheCleanAll(pPager->pPCache); |
| 47914 | }else{ |
| 47915 | sqlite3PcacheClearWritable(pPager->pPCache); |
| 47916 | } |
| 47917 | sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); |
| 47918 | } |
| 47919 | |
| 47920 | if( pagerUseWal(pPager) ){ |
| 47921 | /* Drop the WAL write-lock, if any. Also, if the connection was in |
| 47922 | ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE |
| 47923 | ** lock held on the database file. |
| @@ -48080,11 +48247,10 @@ | |
| 48247 | pPager->doNotSpill |= SPILLFLAG_ROLLBACK; |
| 48248 | rc = sqlite3PagerGet(pPager, pgno, &pPg, 1); |
| 48249 | assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 ); |
| 48250 | pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK; |
| 48251 | if( rc!=SQLITE_OK ) return rc; |
| 48252 | sqlite3PcacheMakeDirty(pPg); |
| 48253 | } |
| 48254 | if( pPg ){ |
| 48255 | /* No page should ever be explicitly rolled back that is in use, except |
| 48256 | ** for page 1 which is held in use in order to keep the lock on the |
| @@ -48094,37 +48260,14 @@ | |
| 48260 | */ |
| 48261 | void *pData; |
| 48262 | pData = pPg->pData; |
| 48263 | memcpy(pData, (u8*)aData, pPager->pageSize); |
| 48264 | pPager->xReiniter(pPg); |
| 48265 | /* It used to be that sqlite3PcacheMakeClean(pPg) was called here. But |
| 48266 | ** that call was dangerous and had no detectable benefit since the cache |
| 48267 | ** is normally cleaned by sqlite3PcacheCleanAll() after rollback and so |
| 48268 | ** has been removed. */ |
| 48269 | pager_set_pagehash(pPg); |
| 48270 | |
| 48271 | /* If this was page 1, then restore the value of Pager.dbFileVers. |
| 48272 | ** Do this before any decoding. */ |
| 48273 | if( pgno==1 ){ |
| @@ -51731,10 +51874,11 @@ | |
| 51874 | if( !pPager->tempFile && (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ |
| 51875 | PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager))); |
| 51876 | IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) |
| 51877 | pPg->flags |= PGHDR_DONT_WRITE; |
| 51878 | pPg->flags &= ~PGHDR_WRITEABLE; |
| 51879 | testcase( pPg->flags & PGHDR_NEED_SYNC ); |
| 51880 | pager_set_pagehash(pPg); |
| 51881 | } |
| 51882 | } |
| 51883 | |
| 51884 | /* |
| @@ -51926,21 +52070,21 @@ | |
| 52070 | |
| 52071 | /* If a prior error occurred, report that error again. */ |
| 52072 | if( NEVER(pPager->errCode) ) return pPager->errCode; |
| 52073 | |
| 52074 | /* Provide the ability to easily simulate an I/O error during testing */ |
| 52075 | if( sqlite3FaultSim(400) ) return SQLITE_IOERR; |
| 52076 | |
| 52077 | PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", |
| 52078 | pPager->zFilename, zMaster, pPager->dbSize)); |
| 52079 | |
| 52080 | /* If no database changes have been made, return early. */ |
| 52081 | if( pPager->eState<PAGER_WRITER_CACHEMOD ) return SQLITE_OK; |
| 52082 | |
| 52083 | assert( MEMDB==0 || pPager->tempFile ); |
| 52084 | assert( isOpen(pPager->fd) || pPager->tempFile ); |
| 52085 | if( 0==pagerFlushOnCommit(pPager, 1) ){ |
| 52086 | /* If this is an in-memory db, or no pages have been written to, or this |
| 52087 | ** function has already been called, it is mostly a no-op. However, any |
| 52088 | ** backup in progress needs to be restarted. */ |
| 52089 | sqlite3BackupRestart(pPager->pBackup); |
| 52090 | }else{ |
| @@ -52561,10 +52705,11 @@ | |
| 52705 | assert( assert_pager_state(pPager) ); |
| 52706 | |
| 52707 | /* In order to be able to rollback, an in-memory database must journal |
| 52708 | ** the page we are moving from. |
| 52709 | */ |
| 52710 | assert( pPager->tempFile || !MEMDB ); |
| 52711 | if( pPager->tempFile ){ |
| 52712 | rc = sqlite3PagerWrite(pPg); |
| 52713 | if( rc ) return rc; |
| 52714 | } |
| 52715 | |
| @@ -52635,12 +52780,11 @@ | |
| 52780 | |
| 52781 | /* For an in-memory database, make sure the original page continues |
| 52782 | ** to exist, in case the transaction needs to roll back. Use pPgOld |
| 52783 | ** as the original page since it has already been allocated. |
| 52784 | */ |
| 52785 | if( pPager->tempFile && pPgOld ){ |
| 52786 | sqlite3PcacheMove(pPgOld, origPgno); |
| 52787 | sqlite3PagerUnrefNotNull(pPgOld); |
| 52788 | } |
| 52789 | |
| 52790 | if( needSyncPgno ){ |
| @@ -59256,15 +59400,15 @@ | |
| 59400 | flagByte &= ~PTF_LEAF; |
| 59401 | pPage->childPtrSize = 4-4*pPage->leaf; |
| 59402 | pPage->xCellSize = cellSizePtr; |
| 59403 | pBt = pPage->pBt; |
| 59404 | if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ |
| 59405 | /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an |
| 59406 | ** interior table b-tree page. */ |
| 59407 | assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); |
| 59408 | /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a |
| 59409 | ** leaf table b-tree page. */ |
| 59410 | assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); |
| 59411 | pPage->intKey = 1; |
| 59412 | if( pPage->leaf ){ |
| 59413 | pPage->intKeyLeaf = 1; |
| 59414 | pPage->xParseCell = btreeParseCellPtr; |
| @@ -59274,15 +59418,15 @@ | |
| 59418 | pPage->xParseCell = btreeParseCellPtrNoPayload; |
| 59419 | } |
| 59420 | pPage->maxLocal = pBt->maxLeaf; |
| 59421 | pPage->minLocal = pBt->minLeaf; |
| 59422 | }else if( flagByte==PTF_ZERODATA ){ |
| 59423 | /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an |
| 59424 | ** interior index b-tree page. */ |
| 59425 | assert( (PTF_ZERODATA)==2 ); |
| 59426 | /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a |
| 59427 | ** leaf index b-tree page. */ |
| 59428 | assert( (PTF_ZERODATA|PTF_LEAF)==10 ); |
| 59429 | pPage->intKey = 0; |
| 59430 | pPage->intKeyLeaf = 0; |
| 59431 | pPage->xParseCell = btreeParseCellPtrIndex; |
| 59432 | pPage->maxLocal = pBt->maxLocal; |
| @@ -192850,11 +192994,11 @@ | |
| 192994 | int nArg, /* Number of args */ |
| 192995 | sqlite3_value **apUnused /* Function arguments */ |
| 192996 | ){ |
| 192997 | assert( nArg==0 ); |
| 192998 | UNUSED_PARAM2(nArg, apUnused); |
| 192999 | sqlite3_result_text(pCtx, "fts5: 2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2", -1, SQLITE_TRANSIENT); |
| 193000 | } |
| 193001 | |
| 193002 | static int fts5Init(sqlite3 *db){ |
| 193003 | static const sqlite3_module fts5Mod = { |
| 193004 | /* iVersion */ 2, |
| 193005 |
+1
-1
| --- src/sqlite3.h | ||
| +++ src/sqlite3.h | ||
| @@ -111,11 +111,11 @@ | ||
| 111 | 111 | ** [sqlite3_libversion_number()], [sqlite3_sourceid()], |
| 112 | 112 | ** [sqlite_version()] and [sqlite_source_id()]. |
| 113 | 113 | */ |
| 114 | 114 | #define SQLITE_VERSION "3.13.0" |
| 115 | 115 | #define SQLITE_VERSION_NUMBER 3013000 |
| 116 | -#define SQLITE_SOURCE_ID "2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea" | |
| 116 | +#define SQLITE_SOURCE_ID "2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2" | |
| 117 | 117 | |
| 118 | 118 | /* |
| 119 | 119 | ** CAPI3REF: Run-Time Library Version Numbers |
| 120 | 120 | ** KEYWORDS: sqlite3_version, sqlite3_sourceid |
| 121 | 121 | ** |
| 122 | 122 |
| --- src/sqlite3.h | |
| +++ src/sqlite3.h | |
| @@ -111,11 +111,11 @@ | |
| 111 | ** [sqlite3_libversion_number()], [sqlite3_sourceid()], |
| 112 | ** [sqlite_version()] and [sqlite_source_id()]. |
| 113 | */ |
| 114 | #define SQLITE_VERSION "3.13.0" |
| 115 | #define SQLITE_VERSION_NUMBER 3013000 |
| 116 | #define SQLITE_SOURCE_ID "2016-05-09 19:03:42 14e53d0e2f62d82ae1d64a72fd9711548e3bf5ea" |
| 117 | |
| 118 | /* |
| 119 | ** CAPI3REF: Run-Time Library Version Numbers |
| 120 | ** KEYWORDS: sqlite3_version, sqlite3_sourceid |
| 121 | ** |
| 122 |
| --- src/sqlite3.h | |
| +++ src/sqlite3.h | |
| @@ -111,11 +111,11 @@ | |
| 111 | ** [sqlite3_libversion_number()], [sqlite3_sourceid()], |
| 112 | ** [sqlite_version()] and [sqlite_source_id()]. |
| 113 | */ |
| 114 | #define SQLITE_VERSION "3.13.0" |
| 115 | #define SQLITE_VERSION_NUMBER 3013000 |
| 116 | #define SQLITE_SOURCE_ID "2016-05-18 10:57:30 fc49f556e48970561d7ab6a2f24fdd7d9eb81ff2" |
| 117 | |
| 118 | /* |
| 119 | ** CAPI3REF: Run-Time Library Version Numbers |
| 120 | ** KEYWORDS: sqlite3_version, sqlite3_sourceid |
| 121 | ** |
| 122 |
+162
-86
| --- src/wiki.c | ||
| +++ src/wiki.c | ||
| @@ -122,20 +122,35 @@ | ||
| 122 | 122 | static int is_sandbox(const char *zPagename){ |
| 123 | 123 | return fossil_stricmp(zPagename,"sandbox")==0 || |
| 124 | 124 | fossil_stricmp(zPagename,"sand box")==0; |
| 125 | 125 | } |
| 126 | 126 | |
| 127 | +/* | |
| 128 | +** Formal, common and short names for the various wiki styles. | |
| 129 | +*/ | |
| 130 | +static const char *const azStyles[] = { | |
| 131 | + "text/x-fossil-wiki", "Fossil Wiki", "wiki", | |
| 132 | + "text/x-markdown", "Markdown", "markdown", | |
| 133 | + "text/plain", "Plain Text", "plain" | |
| 134 | +}; | |
| 135 | + | |
| 127 | 136 | /* |
| 128 | 137 | ** Only allow certain mimetypes through. |
| 129 | 138 | ** All others become "text/x-fossil-wiki" |
| 130 | 139 | */ |
| 131 | 140 | const char *wiki_filter_mimetypes(const char *zMimetype){ |
| 132 | - if( zMimetype!=0 && | |
| 133 | - ( fossil_strcmp(zMimetype, "text/x-markdown")==0 | |
| 134 | - || fossil_strcmp(zMimetype, "text/plain")==0 ) | |
| 135 | - ){ | |
| 136 | - return zMimetype; | |
| 141 | + if( zMimetype!=0 ){ | |
| 142 | + int i; | |
| 143 | + for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=3){ | |
| 144 | + if( fossil_strcmp(zMimetype,azStyles[i+2])==0 ){ | |
| 145 | + return azStyles[i]; | |
| 146 | + } | |
| 147 | + } | |
| 148 | + if( fossil_strcmp(zMimetype, "text/x-markdown")==0 | |
| 149 | + || fossil_strcmp(zMimetype, "text/plain")==0 ){ | |
| 150 | + return zMimetype; | |
| 151 | + } | |
| 137 | 152 | } |
| 138 | 153 | return "text/x-fossil-wiki"; |
| 139 | 154 | } |
| 140 | 155 | |
| 141 | 156 | /* |
| @@ -412,27 +427,18 @@ | ||
| 412 | 427 | db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", nrid); |
| 413 | 428 | db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", nrid); |
| 414 | 429 | manifest_crosslink(nrid, pWiki, MC_NONE); |
| 415 | 430 | } |
| 416 | 431 | |
| 417 | -/* | |
| 418 | -** Formal names and common names for the various wiki styles. | |
| 419 | -*/ | |
| 420 | -static const char *const azStyles[] = { | |
| 421 | - "text/x-fossil-wiki", "Fossil Wiki", | |
| 422 | - "text/x-markdown", "Markdown", | |
| 423 | - "text/plain", "Plain Text" | |
| 424 | -}; | |
| 425 | - | |
| 426 | 432 | /* |
| 427 | 433 | ** Output a selection box from which the user can select the |
| 428 | 434 | ** wiki mimetype. |
| 429 | 435 | */ |
| 430 | 436 | void mimetype_option_menu(const char *zMimetype){ |
| 431 | 437 | unsigned i; |
| 432 | 438 | @ <select name="mimetype" size="1"> |
| 433 | - for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=2){ | |
| 439 | + for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=3){ | |
| 434 | 440 | if( fossil_strcmp(zMimetype,azStyles[i])==0 ){ |
| 435 | 441 | @ <option value="%s(azStyles[i])" selected>%s(azStyles[i+1])</option> |
| 436 | 442 | }else{ |
| 437 | 443 | @ <option value="%s(azStyles[i])">%s(azStyles[i+1])</option> |
| 438 | 444 | } |
| @@ -1068,47 +1074,26 @@ | ||
| 1068 | 1074 | style_footer(); |
| 1069 | 1075 | } |
| 1070 | 1076 | |
| 1071 | 1077 | /* |
| 1072 | 1078 | ** Add a new wiki page to the repository. The page name is |
| 1073 | -** given by the zPageName parameter. isNew must be true to create | |
| 1074 | -** a new page. If no previous page with the name zPageName exists | |
| 1075 | -** and isNew is false, then this routine throws an error. | |
| 1079 | +** given by the zPageName parameter. rid must be zero to create | |
| 1080 | +** a new page otherwise the page identified by rid is updated. | |
| 1076 | 1081 | ** |
| 1077 | 1082 | ** The content of the new page is given by the blob pContent. |
| 1078 | 1083 | ** |
| 1079 | 1084 | ** zMimeType specifies the N-card for the wiki page. If it is 0, |
| 1080 | 1085 | ** empty, or "text/x-fossil-wiki" (the default format) then it is |
| 1081 | 1086 | ** ignored. |
| 1082 | 1087 | */ |
| 1083 | -int wiki_cmd_commit(const char *zPageName, int isNew, Blob *pContent, | |
| 1088 | +int wiki_cmd_commit(const char *zPageName, int rid, Blob *pContent, | |
| 1084 | 1089 | const char *zMimeType, int localUser){ |
| 1085 | 1090 | Blob wiki; /* Wiki page content */ |
| 1086 | 1091 | Blob cksum; /* wiki checksum */ |
| 1087 | - int rid; /* artifact ID of parent page */ | |
| 1088 | 1092 | char *zDate; /* timestamp */ |
| 1089 | 1093 | char *zUuid; /* uuid for rid */ |
| 1090 | 1094 | |
| 1091 | - rid = db_int(0, | |
| 1092 | - "SELECT x.rid FROM tag t, tagxref x" | |
| 1093 | - " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'" | |
| 1094 | - " ORDER BY x.mtime DESC LIMIT 1", | |
| 1095 | - zPageName | |
| 1096 | - ); | |
| 1097 | - if( rid==0 && !isNew ){ | |
| 1098 | -#ifdef FOSSIL_ENABLE_JSON | |
| 1099 | - g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND; | |
| 1100 | -#endif | |
| 1101 | - fossil_fatal("no such wiki page: %s", zPageName); | |
| 1102 | - } | |
| 1103 | - if( rid!=0 && isNew ){ | |
| 1104 | -#ifdef FOSSIL_ENABLE_JSON | |
| 1105 | - g.json.resultCode = FSL_JSON_E_RESOURCE_ALREADY_EXISTS; | |
| 1106 | -#endif | |
| 1107 | - fossil_fatal("wiki page %s already exists", zPageName); | |
| 1108 | - } | |
| 1109 | - | |
| 1110 | 1095 | blob_zero(&wiki); |
| 1111 | 1096 | zDate = date_in_standard_format("now"); |
| 1112 | 1097 | blob_appendf(&wiki, "D %s\n", zDate); |
| 1113 | 1098 | free(zDate); |
| 1114 | 1099 | blob_appendf(&wiki, "L %F\n", zPageName ); |
| @@ -1133,47 +1118,112 @@ | ||
| 1133 | 1118 | db_begin_transaction(); |
| 1134 | 1119 | wiki_put(&wiki, 0, wiki_need_moderation(localUser)); |
| 1135 | 1120 | db_end_transaction(0); |
| 1136 | 1121 | return 1; |
| 1137 | 1122 | } |
| 1123 | + | |
| 1124 | +/* | |
| 1125 | +** Determine the rid for a tech note given either its id or its | |
| 1126 | +** timestamp. Returns 0 if there is no such item and -1 if the details | |
| 1127 | +** are ambiguous and could refer to multiple items. | |
| 1128 | +*/ | |
| 1129 | +int wiki_technote_to_rid(const char *zETime) { | |
| 1130 | + int rid=0; /* Artifact ID of the tech note */ | |
| 1131 | + int nETime = strlen(zETime); | |
| 1132 | + Stmt q; | |
| 1133 | + if( nETime>=4 && nETime<=UUID_SIZE && validate16(zETime, nETime) ){ | |
| 1134 | + char zUuid[UUID_SIZE+1]; | |
| 1135 | + memcpy(zUuid, zETime, nETime+1); | |
| 1136 | + canonical16(zUuid, nETime); | |
| 1137 | + db_prepare(&q, | |
| 1138 | + "SELECT e.objid" | |
| 1139 | + " FROM event e, tag t" | |
| 1140 | + " WHERE e.type='e' AND e.tagid IS NOT NULL AND t.tagid=e.tagid" | |
| 1141 | + " AND t.tagname GLOB 'event-%q*'", | |
| 1142 | + zUuid | |
| 1143 | + ); | |
| 1144 | + if( db_step(&q)==SQLITE_ROW ){ | |
| 1145 | + rid = db_column_int(&q, 0); | |
| 1146 | + if( db_step(&q)==SQLITE_ROW ) rid = -1; | |
| 1147 | + } | |
| 1148 | + db_finalize(&q); | |
| 1149 | + } | |
| 1150 | + if (!rid) { | |
| 1151 | + if (strlen(zETime)>4) { | |
| 1152 | + rid = db_int(0, "SELECT objid" | |
| 1153 | + " FROM event" | |
| 1154 | + " WHERE datetime(mtime)=datetime('%q')" | |
| 1155 | + " AND type='e'" | |
| 1156 | + " AND tagid IS NOT NULL" | |
| 1157 | + " ORDER BY objid DESC LIMIT 1", | |
| 1158 | + zETime); | |
| 1159 | + } | |
| 1160 | + } | |
| 1161 | + return rid; | |
| 1162 | +} | |
| 1138 | 1163 | |
| 1139 | 1164 | /* |
| 1140 | 1165 | ** COMMAND: wiki* |
| 1141 | 1166 | ** |
| 1142 | 1167 | ** Usage: %fossil wiki (export|create|commit|list) WikiName |
| 1143 | 1168 | ** |
| 1144 | 1169 | ** Run various subcommands to work with wiki entries or tech notes. |
| 1145 | 1170 | ** |
| 1146 | -** %fossil wiki export ?PAGENAME? ?FILE? [-t|--technote DATETIME ] | |
| 1171 | +** %fossil wiki export PAGENAME ?FILE? | |
| 1172 | +** %fossil wiki export ?FILE? -t|--technote DATETIME|TECHNOTE-ID | |
| 1147 | 1173 | ** |
| 1148 | -** Sends the latest version of either the PAGENAME wiki entry | |
| 1149 | -** or the DATETIME tech note to the given file or standard | |
| 1150 | -** output. One of PAGENAME or DATETIME must be specified. | |
| 1174 | +** Sends the latest version of either a wiki page or of a tech note | |
| 1175 | +** to the given file or standard output. | |
| 1176 | +** If PAGENAME is provided, the wiki page will be output. For | |
| 1177 | +** a tech note either DATETIME or TECHNOTE-ID must be specified. If | |
| 1178 | +** DATETIME is used, the most recently modified tech note with that | |
| 1179 | +** DATETIME will be sent. | |
| 1151 | 1180 | ** |
| 1152 | 1181 | ** %fossil wiki (create|commit) PAGENAME ?FILE? ?OPTIONS? |
| 1153 | 1182 | ** |
| 1154 | 1183 | ** Create a new or commit changes to an existing wiki page or |
| 1155 | -** technote from FILE or from standard input. | |
| 1184 | +** technote from FILE or from standard input. PAGENAME is the | |
| 1185 | +** name of the wiki entry or the timeline comment of the | |
| 1186 | +** technote. | |
| 1156 | 1187 | ** |
| 1157 | 1188 | ** Options: |
| 1158 | -** -M|--mimetype TEXT-FORMAT The mimetype of the update defaulting | |
| 1159 | -** to the type used by the previous version | |
| 1160 | -** of the page or text/x-fossil-wiki. | |
| 1161 | -** -t|--technote DATETIME Specifies the timestamp of the technote | |
| 1162 | -** to be created or updated. | |
| 1189 | +** -M|--mimetype TEXT-FORMAT The mime type of the update. | |
| 1190 | +** Defaults to the type used by | |
| 1191 | +** the previous version of the | |
| 1192 | +** page, or text/x-fossil-wiki. | |
| 1193 | +** Valid values are: text/x-fossil-wiki, | |
| 1194 | +** text/markdown and text/plain. fossil, | |
| 1195 | +** markdown or plain can be specified as | |
| 1196 | +** synonyms of these values. | |
| 1197 | +** -t|--technote DATETIME Specifies the timestamp of | |
| 1198 | +** the technote to be created or | |
| 1199 | +** updated. When updating a tech note | |
| 1200 | +** the most recently modified tech note | |
| 1201 | +** with the specified timestamp will be | |
| 1202 | +** updated. | |
| 1203 | +** -t|--technote TECHNOTE-ID Specifies the technote to be | |
| 1204 | +** updated by its technote id. | |
| 1163 | 1205 | ** --technote-tags TAGS The set of tags for a technote. |
| 1164 | -** --technote-bgcolor COLOR The color used for the technote on the | |
| 1165 | -** timeline. | |
| 1206 | +** --technote-bgcolor COLOR The color used for the technote | |
| 1207 | +** on the timeline. | |
| 1166 | 1208 | ** |
| 1167 | -** %fossil wiki list ?--technote? | |
| 1168 | -** %fossil wiki ls ?--technote? | |
| 1209 | +** %fossil wiki list ?OPTIONS? | |
| 1210 | +** %fossil wiki ls ?OPTIONS? | |
| 1169 | 1211 | ** |
| 1170 | 1212 | ** Lists all wiki entries, one per line, ordered |
| 1171 | -** case-insensitively by name. The --technote flag | |
| 1172 | -** specifies that technotes will be listed instead of | |
| 1173 | -** the wiki entries, which will be listed in order | |
| 1174 | -** timestamp. | |
| 1213 | +** case-insensitively by name. | |
| 1214 | +** | |
| 1215 | +** Options: | |
| 1216 | +** -t|--technote Technotes will be listed instead of | |
| 1217 | +** pages. The technotes will be in order | |
| 1218 | +** of timestamp with the most recent | |
| 1219 | +** first. | |
| 1220 | +** -s|--show-technote-ids The id of the tech note will be listed | |
| 1221 | +** along side the timestamp. The tech note | |
| 1222 | +** id will be the first word on each line. | |
| 1223 | +** This option only applies if the | |
| 1224 | +** --technote option is also specified. | |
| 1175 | 1225 | ** |
| 1176 | 1226 | */ |
| 1177 | 1227 | void wiki_cmd(void){ |
| 1178 | 1228 | int n; |
| 1179 | 1229 | db_find_and_open_repository(0, 0); |
| @@ -1213,22 +1263,21 @@ | ||
| 1213 | 1263 | fossil_fatal("wiki page [%s] not found",zPageName); |
| 1214 | 1264 | } |
| 1215 | 1265 | zFile = (g.argc==4) ? "-" : g.argv[4]; |
| 1216 | 1266 | }else{ |
| 1217 | 1267 | if( (g.argc!=3) && (g.argc!=4) ){ |
| 1218 | - usage("export ?FILE? --technote DATETIME"); | |
| 1268 | + usage("export ?FILE? --technote DATETIME|TECHNOTE-ID"); | |
| 1219 | 1269 | } |
| 1220 | - rid = db_int(0, "SELECT objid FROM event" | |
| 1221 | - " WHERE datetime(mtime)=datetime('%q') AND type='e'" | |
| 1222 | - " ORDER BY mtime DESC LIMIT 1", | |
| 1223 | - zETime | |
| 1224 | - ); | |
| 1270 | + rid = wiki_technote_to_rid(zETime); | |
| 1271 | + if (rid == -1) { | |
| 1272 | + fossil_fatal("ambiguous tech note id: %s", zETime); | |
| 1273 | + } | |
| 1225 | 1274 | if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){ |
| 1226 | 1275 | zBody = pWiki->zWiki; |
| 1227 | 1276 | } |
| 1228 | 1277 | if( zBody==0 ){ |
| 1229 | - fossil_fatal("technote not found"); | |
| 1278 | + fossil_fatal("technote [%s] not found",zETime); | |
| 1230 | 1279 | } |
| 1231 | 1280 | zFile = (g.argc==3) ? "-" : g.argv[3]; |
| 1232 | 1281 | } |
| 1233 | 1282 | for(i=strlen(zBody); i>0 && fossil_isspace(zBody[i-1]); i--){} |
| 1234 | 1283 | zBody[i] = 0; |
| @@ -1270,43 +1319,58 @@ | ||
| 1270 | 1319 | if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0 |
| 1271 | 1320 | && (pWiki->zMimetype && *pWiki->zMimetype)){ |
| 1272 | 1321 | zMimeType = pWiki->zMimetype; |
| 1273 | 1322 | } |
| 1274 | 1323 | }else{ |
| 1275 | - rid = db_int(0, "SELECT objid FROM event" | |
| 1276 | - " WHERE datetime(mtime)=datetime('%q') AND type='e'" | |
| 1277 | - " ORDER BY mtime DESC LIMIT 1", | |
| 1278 | - zPageName | |
| 1279 | - ); | |
| 1324 | + rid = wiki_technote_to_rid(zETime); | |
| 1280 | 1325 | if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 |
| 1281 | 1326 | && (pWiki->zMimetype && *pWiki->zMimetype)){ |
| 1282 | 1327 | zMimeType = pWiki->zMimetype; |
| 1283 | 1328 | } |
| 1284 | 1329 | } |
| 1330 | + }else{ | |
| 1331 | + zMimeType = wiki_filter_mimetypes(zMimeType); | |
| 1332 | + } | |
| 1333 | + if( g.argv[2][1]=='r' && rid>0 ){ | |
| 1334 | + if ( !zETime ){ | |
| 1335 | + fossil_fatal("wiki page %s already exists", zPageName); | |
| 1336 | + }else{ | |
| 1337 | + /* Creating a tech note with same timestamp is permitted | |
| 1338 | + and should create a new tech note */ | |
| 1339 | + rid = 0; | |
| 1340 | + } | |
| 1341 | + }else if( g.argv[2][1]=='o' && rid == 0 ){ | |
| 1342 | + if ( !zETime ){ | |
| 1343 | + fossil_fatal("no such wiki page: %s", zPageName); | |
| 1344 | + }else{ | |
| 1345 | + fossil_fatal("no such tech note: %s", zETime); | |
| 1346 | + } | |
| 1285 | 1347 | } |
| 1348 | + | |
| 1286 | 1349 | if( !zETime ){ |
| 1350 | + wiki_cmd_commit(zPageName, rid, &content, zMimeType, 1); | |
| 1287 | 1351 | if( g.argv[2][1]=='r' ){ |
| 1288 | - wiki_cmd_commit(zPageName, 1, &content, zMimeType, 1); | |
| 1289 | 1352 | fossil_print("Created new wiki page %s.\n", zPageName); |
| 1290 | 1353 | }else{ |
| 1291 | - wiki_cmd_commit(zPageName, 0, &content, zMimeType, 1); | |
| 1292 | 1354 | fossil_print("Updated wiki page %s.\n", zPageName); |
| 1293 | 1355 | } |
| 1294 | 1356 | }else{ |
| 1295 | - char *zMETime; /* Normalized, mutable version of zETime */ | |
| 1296 | - zMETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))", | |
| 1297 | - zETime); | |
| 1298 | - if( g.argv[2][1]=='r' ){ | |
| 1299 | - event_cmd_commit(zMETime, 1, &content, zMimeType, zPageName, | |
| 1300 | - zTags, zClr); | |
| 1301 | - fossil_print("Created new tech note %s.\n", zMETime); | |
| 1302 | - }else{ | |
| 1303 | - event_cmd_commit(zMETime, 0, &content, zMimeType, zPageName, | |
| 1304 | - zTags, zClr); | |
| 1305 | - fossil_print("Updated tech note %s.\n", zMETime); | |
| 1306 | - } | |
| 1307 | - free(zMETime); | |
| 1357 | + if( rid != -1 ){ | |
| 1358 | + char *zMETime; /* Normalized, mutable version of zETime */ | |
| 1359 | + zMETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))", | |
| 1360 | + zETime); | |
| 1361 | + event_cmd_commit(zMETime, rid, &content, zMimeType, zPageName, | |
| 1362 | + zTags, zClr); | |
| 1363 | + if( g.argv[2][1]=='r' ){ | |
| 1364 | + fossil_print("Created new tech note %s.\n", zMETime); | |
| 1365 | + }else{ | |
| 1366 | + fossil_print("Updated tech note %s.\n", zMETime); | |
| 1367 | + } | |
| 1368 | + free(zMETime); | |
| 1369 | + }else{ | |
| 1370 | + fossil_fatal("ambiguous tech note id: %s", zETime); | |
| 1371 | + } | |
| 1308 | 1372 | } |
| 1309 | 1373 | manifest_destroy(pWiki); |
| 1310 | 1374 | blob_reset(&content); |
| 1311 | 1375 | }else if( strncmp(g.argv[2],"delete",n)==0 ){ |
| 1312 | 1376 | if( g.argc!=5 ){ |
| @@ -1314,23 +1378,35 @@ | ||
| 1314 | 1378 | } |
| 1315 | 1379 | fossil_fatal("delete not yet implemented."); |
| 1316 | 1380 | }else if(( strncmp(g.argv[2],"list",n)==0 ) |
| 1317 | 1381 | || ( strncmp(g.argv[2],"ls",n)==0 )){ |
| 1318 | 1382 | Stmt q; |
| 1383 | + int showIds = 0; | |
| 1384 | + | |
| 1319 | 1385 | if ( !find_option("technote","t",0) ){ |
| 1320 | 1386 | db_prepare(&q, |
| 1321 | 1387 | "SELECT substr(tagname, 6) FROM tag WHERE tagname GLOB 'wiki-*'" |
| 1322 | 1388 | " ORDER BY lower(tagname) /*sort*/" |
| 1323 | 1389 | ); |
| 1324 | 1390 | }else{ |
| 1391 | + showIds = find_option("show-technote-ids","s",0)!=0; | |
| 1325 | 1392 | db_prepare(&q, |
| 1326 | - "SELECT datetime(mtime) FROM event WHERE type='e'" | |
| 1327 | - " ORDER BY mtime /*sort*/" | |
| 1393 | + "SELECT datetime(e.mtime), substr(t.tagname,7)" | |
| 1394 | + " FROM event e, tag t" | |
| 1395 | + " WHERE e.type='e'" | |
| 1396 | + " AND e.tagid IS NOT NULL" | |
| 1397 | + " AND t.tagid=e.tagid" | |
| 1398 | + " ORDER BY e.mtime DESC /*sort*/" | |
| 1328 | 1399 | ); |
| 1329 | 1400 | } |
| 1401 | + | |
| 1330 | 1402 | while( db_step(&q)==SQLITE_ROW ){ |
| 1331 | 1403 | const char *zName = db_column_text(&q, 0); |
| 1404 | + if (showIds) { | |
| 1405 | + const char *zUuid = db_column_text(&q, 1); | |
| 1406 | + fossil_print("%s ",zUuid); | |
| 1407 | + } | |
| 1332 | 1408 | fossil_print( "%s\n",zName); |
| 1333 | 1409 | } |
| 1334 | 1410 | db_finalize(&q); |
| 1335 | 1411 | }else{ |
| 1336 | 1412 | goto wiki_cmd_usage; |
| 1337 | 1413 |
| --- src/wiki.c | |
| +++ src/wiki.c | |
| @@ -122,20 +122,35 @@ | |
| 122 | static int is_sandbox(const char *zPagename){ |
| 123 | return fossil_stricmp(zPagename,"sandbox")==0 || |
| 124 | fossil_stricmp(zPagename,"sand box")==0; |
| 125 | } |
| 126 | |
| 127 | /* |
| 128 | ** Only allow certain mimetypes through. |
| 129 | ** All others become "text/x-fossil-wiki" |
| 130 | */ |
| 131 | const char *wiki_filter_mimetypes(const char *zMimetype){ |
| 132 | if( zMimetype!=0 && |
| 133 | ( fossil_strcmp(zMimetype, "text/x-markdown")==0 |
| 134 | || fossil_strcmp(zMimetype, "text/plain")==0 ) |
| 135 | ){ |
| 136 | return zMimetype; |
| 137 | } |
| 138 | return "text/x-fossil-wiki"; |
| 139 | } |
| 140 | |
| 141 | /* |
| @@ -412,27 +427,18 @@ | |
| 412 | db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", nrid); |
| 413 | db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", nrid); |
| 414 | manifest_crosslink(nrid, pWiki, MC_NONE); |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | ** Formal names and common names for the various wiki styles. |
| 419 | */ |
| 420 | static const char *const azStyles[] = { |
| 421 | "text/x-fossil-wiki", "Fossil Wiki", |
| 422 | "text/x-markdown", "Markdown", |
| 423 | "text/plain", "Plain Text" |
| 424 | }; |
| 425 | |
| 426 | /* |
| 427 | ** Output a selection box from which the user can select the |
| 428 | ** wiki mimetype. |
| 429 | */ |
| 430 | void mimetype_option_menu(const char *zMimetype){ |
| 431 | unsigned i; |
| 432 | @ <select name="mimetype" size="1"> |
| 433 | for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=2){ |
| 434 | if( fossil_strcmp(zMimetype,azStyles[i])==0 ){ |
| 435 | @ <option value="%s(azStyles[i])" selected>%s(azStyles[i+1])</option> |
| 436 | }else{ |
| 437 | @ <option value="%s(azStyles[i])">%s(azStyles[i+1])</option> |
| 438 | } |
| @@ -1068,47 +1074,26 @@ | |
| 1068 | style_footer(); |
| 1069 | } |
| 1070 | |
| 1071 | /* |
| 1072 | ** Add a new wiki page to the repository. The page name is |
| 1073 | ** given by the zPageName parameter. isNew must be true to create |
| 1074 | ** a new page. If no previous page with the name zPageName exists |
| 1075 | ** and isNew is false, then this routine throws an error. |
| 1076 | ** |
| 1077 | ** The content of the new page is given by the blob pContent. |
| 1078 | ** |
| 1079 | ** zMimeType specifies the N-card for the wiki page. If it is 0, |
| 1080 | ** empty, or "text/x-fossil-wiki" (the default format) then it is |
| 1081 | ** ignored. |
| 1082 | */ |
| 1083 | int wiki_cmd_commit(const char *zPageName, int isNew, Blob *pContent, |
| 1084 | const char *zMimeType, int localUser){ |
| 1085 | Blob wiki; /* Wiki page content */ |
| 1086 | Blob cksum; /* wiki checksum */ |
| 1087 | int rid; /* artifact ID of parent page */ |
| 1088 | char *zDate; /* timestamp */ |
| 1089 | char *zUuid; /* uuid for rid */ |
| 1090 | |
| 1091 | rid = db_int(0, |
| 1092 | "SELECT x.rid FROM tag t, tagxref x" |
| 1093 | " WHERE x.tagid=t.tagid AND t.tagname='wiki-%q'" |
| 1094 | " ORDER BY x.mtime DESC LIMIT 1", |
| 1095 | zPageName |
| 1096 | ); |
| 1097 | if( rid==0 && !isNew ){ |
| 1098 | #ifdef FOSSIL_ENABLE_JSON |
| 1099 | g.json.resultCode = FSL_JSON_E_RESOURCE_NOT_FOUND; |
| 1100 | #endif |
| 1101 | fossil_fatal("no such wiki page: %s", zPageName); |
| 1102 | } |
| 1103 | if( rid!=0 && isNew ){ |
| 1104 | #ifdef FOSSIL_ENABLE_JSON |
| 1105 | g.json.resultCode = FSL_JSON_E_RESOURCE_ALREADY_EXISTS; |
| 1106 | #endif |
| 1107 | fossil_fatal("wiki page %s already exists", zPageName); |
| 1108 | } |
| 1109 | |
| 1110 | blob_zero(&wiki); |
| 1111 | zDate = date_in_standard_format("now"); |
| 1112 | blob_appendf(&wiki, "D %s\n", zDate); |
| 1113 | free(zDate); |
| 1114 | blob_appendf(&wiki, "L %F\n", zPageName ); |
| @@ -1133,47 +1118,112 @@ | |
| 1133 | db_begin_transaction(); |
| 1134 | wiki_put(&wiki, 0, wiki_need_moderation(localUser)); |
| 1135 | db_end_transaction(0); |
| 1136 | return 1; |
| 1137 | } |
| 1138 | |
| 1139 | /* |
| 1140 | ** COMMAND: wiki* |
| 1141 | ** |
| 1142 | ** Usage: %fossil wiki (export|create|commit|list) WikiName |
| 1143 | ** |
| 1144 | ** Run various subcommands to work with wiki entries or tech notes. |
| 1145 | ** |
| 1146 | ** %fossil wiki export ?PAGENAME? ?FILE? [-t|--technote DATETIME ] |
| 1147 | ** |
| 1148 | ** Sends the latest version of either the PAGENAME wiki entry |
| 1149 | ** or the DATETIME tech note to the given file or standard |
| 1150 | ** output. One of PAGENAME or DATETIME must be specified. |
| 1151 | ** |
| 1152 | ** %fossil wiki (create|commit) PAGENAME ?FILE? ?OPTIONS? |
| 1153 | ** |
| 1154 | ** Create a new or commit changes to an existing wiki page or |
| 1155 | ** technote from FILE or from standard input. |
| 1156 | ** |
| 1157 | ** Options: |
| 1158 | ** -M|--mimetype TEXT-FORMAT The mimetype of the update defaulting |
| 1159 | ** to the type used by the previous version |
| 1160 | ** of the page or text/x-fossil-wiki. |
| 1161 | ** -t|--technote DATETIME Specifies the timestamp of the technote |
| 1162 | ** to be created or updated. |
| 1163 | ** --technote-tags TAGS The set of tags for a technote. |
| 1164 | ** --technote-bgcolor COLOR The color used for the technote on the |
| 1165 | ** timeline. |
| 1166 | ** |
| 1167 | ** %fossil wiki list ?--technote? |
| 1168 | ** %fossil wiki ls ?--technote? |
| 1169 | ** |
| 1170 | ** Lists all wiki entries, one per line, ordered |
| 1171 | ** case-insensitively by name. The --technote flag |
| 1172 | ** specifies that technotes will be listed instead of |
| 1173 | ** the wiki entries, which will be listed in order |
| 1174 | ** timestamp. |
| 1175 | ** |
| 1176 | */ |
| 1177 | void wiki_cmd(void){ |
| 1178 | int n; |
| 1179 | db_find_and_open_repository(0, 0); |
| @@ -1213,22 +1263,21 @@ | |
| 1213 | fossil_fatal("wiki page [%s] not found",zPageName); |
| 1214 | } |
| 1215 | zFile = (g.argc==4) ? "-" : g.argv[4]; |
| 1216 | }else{ |
| 1217 | if( (g.argc!=3) && (g.argc!=4) ){ |
| 1218 | usage("export ?FILE? --technote DATETIME"); |
| 1219 | } |
| 1220 | rid = db_int(0, "SELECT objid FROM event" |
| 1221 | " WHERE datetime(mtime)=datetime('%q') AND type='e'" |
| 1222 | " ORDER BY mtime DESC LIMIT 1", |
| 1223 | zETime |
| 1224 | ); |
| 1225 | if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){ |
| 1226 | zBody = pWiki->zWiki; |
| 1227 | } |
| 1228 | if( zBody==0 ){ |
| 1229 | fossil_fatal("technote not found"); |
| 1230 | } |
| 1231 | zFile = (g.argc==3) ? "-" : g.argv[3]; |
| 1232 | } |
| 1233 | for(i=strlen(zBody); i>0 && fossil_isspace(zBody[i-1]); i--){} |
| 1234 | zBody[i] = 0; |
| @@ -1270,43 +1319,58 @@ | |
| 1270 | if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0 |
| 1271 | && (pWiki->zMimetype && *pWiki->zMimetype)){ |
| 1272 | zMimeType = pWiki->zMimetype; |
| 1273 | } |
| 1274 | }else{ |
| 1275 | rid = db_int(0, "SELECT objid FROM event" |
| 1276 | " WHERE datetime(mtime)=datetime('%q') AND type='e'" |
| 1277 | " ORDER BY mtime DESC LIMIT 1", |
| 1278 | zPageName |
| 1279 | ); |
| 1280 | if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 |
| 1281 | && (pWiki->zMimetype && *pWiki->zMimetype)){ |
| 1282 | zMimeType = pWiki->zMimetype; |
| 1283 | } |
| 1284 | } |
| 1285 | } |
| 1286 | if( !zETime ){ |
| 1287 | if( g.argv[2][1]=='r' ){ |
| 1288 | wiki_cmd_commit(zPageName, 1, &content, zMimeType, 1); |
| 1289 | fossil_print("Created new wiki page %s.\n", zPageName); |
| 1290 | }else{ |
| 1291 | wiki_cmd_commit(zPageName, 0, &content, zMimeType, 1); |
| 1292 | fossil_print("Updated wiki page %s.\n", zPageName); |
| 1293 | } |
| 1294 | }else{ |
| 1295 | char *zMETime; /* Normalized, mutable version of zETime */ |
| 1296 | zMETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))", |
| 1297 | zETime); |
| 1298 | if( g.argv[2][1]=='r' ){ |
| 1299 | event_cmd_commit(zMETime, 1, &content, zMimeType, zPageName, |
| 1300 | zTags, zClr); |
| 1301 | fossil_print("Created new tech note %s.\n", zMETime); |
| 1302 | }else{ |
| 1303 | event_cmd_commit(zMETime, 0, &content, zMimeType, zPageName, |
| 1304 | zTags, zClr); |
| 1305 | fossil_print("Updated tech note %s.\n", zMETime); |
| 1306 | } |
| 1307 | free(zMETime); |
| 1308 | } |
| 1309 | manifest_destroy(pWiki); |
| 1310 | blob_reset(&content); |
| 1311 | }else if( strncmp(g.argv[2],"delete",n)==0 ){ |
| 1312 | if( g.argc!=5 ){ |
| @@ -1314,23 +1378,35 @@ | |
| 1314 | } |
| 1315 | fossil_fatal("delete not yet implemented."); |
| 1316 | }else if(( strncmp(g.argv[2],"list",n)==0 ) |
| 1317 | || ( strncmp(g.argv[2],"ls",n)==0 )){ |
| 1318 | Stmt q; |
| 1319 | if ( !find_option("technote","t",0) ){ |
| 1320 | db_prepare(&q, |
| 1321 | "SELECT substr(tagname, 6) FROM tag WHERE tagname GLOB 'wiki-*'" |
| 1322 | " ORDER BY lower(tagname) /*sort*/" |
| 1323 | ); |
| 1324 | }else{ |
| 1325 | db_prepare(&q, |
| 1326 | "SELECT datetime(mtime) FROM event WHERE type='e'" |
| 1327 | " ORDER BY mtime /*sort*/" |
| 1328 | ); |
| 1329 | } |
| 1330 | while( db_step(&q)==SQLITE_ROW ){ |
| 1331 | const char *zName = db_column_text(&q, 0); |
| 1332 | fossil_print( "%s\n",zName); |
| 1333 | } |
| 1334 | db_finalize(&q); |
| 1335 | }else{ |
| 1336 | goto wiki_cmd_usage; |
| 1337 |
| --- src/wiki.c | |
| +++ src/wiki.c | |
| @@ -122,20 +122,35 @@ | |
| 122 | static int is_sandbox(const char *zPagename){ |
| 123 | return fossil_stricmp(zPagename,"sandbox")==0 || |
| 124 | fossil_stricmp(zPagename,"sand box")==0; |
| 125 | } |
| 126 | |
| 127 | /* |
| 128 | ** Formal, common and short names for the various wiki styles. |
| 129 | */ |
| 130 | static const char *const azStyles[] = { |
| 131 | "text/x-fossil-wiki", "Fossil Wiki", "wiki", |
| 132 | "text/x-markdown", "Markdown", "markdown", |
| 133 | "text/plain", "Plain Text", "plain" |
| 134 | }; |
| 135 | |
| 136 | /* |
| 137 | ** Only allow certain mimetypes through. |
| 138 | ** All others become "text/x-fossil-wiki" |
| 139 | */ |
| 140 | const char *wiki_filter_mimetypes(const char *zMimetype){ |
| 141 | if( zMimetype!=0 ){ |
| 142 | int i; |
| 143 | for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=3){ |
| 144 | if( fossil_strcmp(zMimetype,azStyles[i+2])==0 ){ |
| 145 | return azStyles[i]; |
| 146 | } |
| 147 | } |
| 148 | if( fossil_strcmp(zMimetype, "text/x-markdown")==0 |
| 149 | || fossil_strcmp(zMimetype, "text/plain")==0 ){ |
| 150 | return zMimetype; |
| 151 | } |
| 152 | } |
| 153 | return "text/x-fossil-wiki"; |
| 154 | } |
| 155 | |
| 156 | /* |
| @@ -412,27 +427,18 @@ | |
| 427 | db_multi_exec("INSERT OR IGNORE INTO unsent VALUES(%d)", nrid); |
| 428 | db_multi_exec("INSERT OR IGNORE INTO unclustered VALUES(%d);", nrid); |
| 429 | manifest_crosslink(nrid, pWiki, MC_NONE); |
| 430 | } |
| 431 | |
| 432 | /* |
| 433 | ** Output a selection box from which the user can select the |
| 434 | ** wiki mimetype. |
| 435 | */ |
| 436 | void mimetype_option_menu(const char *zMimetype){ |
| 437 | unsigned i; |
| 438 | @ <select name="mimetype" size="1"> |
| 439 | for(i=0; i<sizeof(azStyles)/sizeof(azStyles[0]); i+=3){ |
| 440 | if( fossil_strcmp(zMimetype,azStyles[i])==0 ){ |
| 441 | @ <option value="%s(azStyles[i])" selected>%s(azStyles[i+1])</option> |
| 442 | }else{ |
| 443 | @ <option value="%s(azStyles[i])">%s(azStyles[i+1])</option> |
| 444 | } |
| @@ -1068,47 +1074,26 @@ | |
| 1074 | style_footer(); |
| 1075 | } |
| 1076 | |
| 1077 | /* |
| 1078 | ** Add a new wiki page to the repository. The page name is |
| 1079 | ** given by the zPageName parameter. rid must be zero to create |
| 1080 | ** a new page otherwise the page identified by rid is updated. |
| 1081 | ** |
| 1082 | ** The content of the new page is given by the blob pContent. |
| 1083 | ** |
| 1084 | ** zMimeType specifies the N-card for the wiki page. If it is 0, |
| 1085 | ** empty, or "text/x-fossil-wiki" (the default format) then it is |
| 1086 | ** ignored. |
| 1087 | */ |
| 1088 | int wiki_cmd_commit(const char *zPageName, int rid, Blob *pContent, |
| 1089 | const char *zMimeType, int localUser){ |
| 1090 | Blob wiki; /* Wiki page content */ |
| 1091 | Blob cksum; /* wiki checksum */ |
| 1092 | char *zDate; /* timestamp */ |
| 1093 | char *zUuid; /* uuid for rid */ |
| 1094 | |
| 1095 | blob_zero(&wiki); |
| 1096 | zDate = date_in_standard_format("now"); |
| 1097 | blob_appendf(&wiki, "D %s\n", zDate); |
| 1098 | free(zDate); |
| 1099 | blob_appendf(&wiki, "L %F\n", zPageName ); |
| @@ -1133,47 +1118,112 @@ | |
| 1118 | db_begin_transaction(); |
| 1119 | wiki_put(&wiki, 0, wiki_need_moderation(localUser)); |
| 1120 | db_end_transaction(0); |
| 1121 | return 1; |
| 1122 | } |
| 1123 | |
| 1124 | /* |
| 1125 | ** Determine the rid for a tech note given either its id or its |
| 1126 | ** timestamp. Returns 0 if there is no such item and -1 if the details |
| 1127 | ** are ambiguous and could refer to multiple items. |
| 1128 | */ |
| 1129 | int wiki_technote_to_rid(const char *zETime) { |
| 1130 | int rid=0; /* Artifact ID of the tech note */ |
| 1131 | int nETime = strlen(zETime); |
| 1132 | Stmt q; |
| 1133 | if( nETime>=4 && nETime<=UUID_SIZE && validate16(zETime, nETime) ){ |
| 1134 | char zUuid[UUID_SIZE+1]; |
| 1135 | memcpy(zUuid, zETime, nETime+1); |
| 1136 | canonical16(zUuid, nETime); |
| 1137 | db_prepare(&q, |
| 1138 | "SELECT e.objid" |
| 1139 | " FROM event e, tag t" |
| 1140 | " WHERE e.type='e' AND e.tagid IS NOT NULL AND t.tagid=e.tagid" |
| 1141 | " AND t.tagname GLOB 'event-%q*'", |
| 1142 | zUuid |
| 1143 | ); |
| 1144 | if( db_step(&q)==SQLITE_ROW ){ |
| 1145 | rid = db_column_int(&q, 0); |
| 1146 | if( db_step(&q)==SQLITE_ROW ) rid = -1; |
| 1147 | } |
| 1148 | db_finalize(&q); |
| 1149 | } |
| 1150 | if (!rid) { |
| 1151 | if (strlen(zETime)>4) { |
| 1152 | rid = db_int(0, "SELECT objid" |
| 1153 | " FROM event" |
| 1154 | " WHERE datetime(mtime)=datetime('%q')" |
| 1155 | " AND type='e'" |
| 1156 | " AND tagid IS NOT NULL" |
| 1157 | " ORDER BY objid DESC LIMIT 1", |
| 1158 | zETime); |
| 1159 | } |
| 1160 | } |
| 1161 | return rid; |
| 1162 | } |
| 1163 | |
| 1164 | /* |
| 1165 | ** COMMAND: wiki* |
| 1166 | ** |
| 1167 | ** Usage: %fossil wiki (export|create|commit|list) WikiName |
| 1168 | ** |
| 1169 | ** Run various subcommands to work with wiki entries or tech notes. |
| 1170 | ** |
| 1171 | ** %fossil wiki export PAGENAME ?FILE? |
| 1172 | ** %fossil wiki export ?FILE? -t|--technote DATETIME|TECHNOTE-ID |
| 1173 | ** |
| 1174 | ** Sends the latest version of either a wiki page or of a tech note |
| 1175 | ** to the given file or standard output. |
| 1176 | ** If PAGENAME is provided, the wiki page will be output. For |
| 1177 | ** a tech note either DATETIME or TECHNOTE-ID must be specified. If |
| 1178 | ** DATETIME is used, the most recently modified tech note with that |
| 1179 | ** DATETIME will be sent. |
| 1180 | ** |
| 1181 | ** %fossil wiki (create|commit) PAGENAME ?FILE? ?OPTIONS? |
| 1182 | ** |
| 1183 | ** Create a new or commit changes to an existing wiki page or |
| 1184 | ** technote from FILE or from standard input. PAGENAME is the |
| 1185 | ** name of the wiki entry or the timeline comment of the |
| 1186 | ** technote. |
| 1187 | ** |
| 1188 | ** Options: |
| 1189 | ** -M|--mimetype TEXT-FORMAT The mime type of the update. |
| 1190 | ** Defaults to the type used by |
| 1191 | ** the previous version of the |
| 1192 | ** page, or text/x-fossil-wiki. |
| 1193 | ** Valid values are: text/x-fossil-wiki, |
| 1194 | ** text/markdown and text/plain. fossil, |
| 1195 | ** markdown or plain can be specified as |
| 1196 | ** synonyms of these values. |
| 1197 | ** -t|--technote DATETIME Specifies the timestamp of |
| 1198 | ** the technote to be created or |
| 1199 | ** updated. When updating a tech note |
| 1200 | ** the most recently modified tech note |
| 1201 | ** with the specified timestamp will be |
| 1202 | ** updated. |
| 1203 | ** -t|--technote TECHNOTE-ID Specifies the technote to be |
| 1204 | ** updated by its technote id. |
| 1205 | ** --technote-tags TAGS The set of tags for a technote. |
| 1206 | ** --technote-bgcolor COLOR The color used for the technote |
| 1207 | ** on the timeline. |
| 1208 | ** |
| 1209 | ** %fossil wiki list ?OPTIONS? |
| 1210 | ** %fossil wiki ls ?OPTIONS? |
| 1211 | ** |
| 1212 | ** Lists all wiki entries, one per line, ordered |
| 1213 | ** case-insensitively by name. |
| 1214 | ** |
| 1215 | ** Options: |
| 1216 | ** -t|--technote Technotes will be listed instead of |
| 1217 | ** pages. The technotes will be in order |
| 1218 | ** of timestamp with the most recent |
| 1219 | ** first. |
| 1220 | ** -s|--show-technote-ids The id of the tech note will be listed |
| 1221 | ** along side the timestamp. The tech note |
| 1222 | ** id will be the first word on each line. |
| 1223 | ** This option only applies if the |
| 1224 | ** --technote option is also specified. |
| 1225 | ** |
| 1226 | */ |
| 1227 | void wiki_cmd(void){ |
| 1228 | int n; |
| 1229 | db_find_and_open_repository(0, 0); |
| @@ -1213,22 +1263,21 @@ | |
| 1263 | fossil_fatal("wiki page [%s] not found",zPageName); |
| 1264 | } |
| 1265 | zFile = (g.argc==4) ? "-" : g.argv[4]; |
| 1266 | }else{ |
| 1267 | if( (g.argc!=3) && (g.argc!=4) ){ |
| 1268 | usage("export ?FILE? --technote DATETIME|TECHNOTE-ID"); |
| 1269 | } |
| 1270 | rid = wiki_technote_to_rid(zETime); |
| 1271 | if (rid == -1) { |
| 1272 | fossil_fatal("ambiguous tech note id: %s", zETime); |
| 1273 | } |
| 1274 | if( (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 ){ |
| 1275 | zBody = pWiki->zWiki; |
| 1276 | } |
| 1277 | if( zBody==0 ){ |
| 1278 | fossil_fatal("technote [%s] not found",zETime); |
| 1279 | } |
| 1280 | zFile = (g.argc==3) ? "-" : g.argv[3]; |
| 1281 | } |
| 1282 | for(i=strlen(zBody); i>0 && fossil_isspace(zBody[i-1]); i--){} |
| 1283 | zBody[i] = 0; |
| @@ -1270,43 +1319,58 @@ | |
| 1319 | if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_WIKI, 0))!=0 |
| 1320 | && (pWiki->zMimetype && *pWiki->zMimetype)){ |
| 1321 | zMimeType = pWiki->zMimetype; |
| 1322 | } |
| 1323 | }else{ |
| 1324 | rid = wiki_technote_to_rid(zETime); |
| 1325 | if(rid>0 && (pWiki = manifest_get(rid, CFTYPE_EVENT, 0))!=0 |
| 1326 | && (pWiki->zMimetype && *pWiki->zMimetype)){ |
| 1327 | zMimeType = pWiki->zMimetype; |
| 1328 | } |
| 1329 | } |
| 1330 | }else{ |
| 1331 | zMimeType = wiki_filter_mimetypes(zMimeType); |
| 1332 | } |
| 1333 | if( g.argv[2][1]=='r' && rid>0 ){ |
| 1334 | if ( !zETime ){ |
| 1335 | fossil_fatal("wiki page %s already exists", zPageName); |
| 1336 | }else{ |
| 1337 | /* Creating a tech note with same timestamp is permitted |
| 1338 | and should create a new tech note */ |
| 1339 | rid = 0; |
| 1340 | } |
| 1341 | }else if( g.argv[2][1]=='o' && rid == 0 ){ |
| 1342 | if ( !zETime ){ |
| 1343 | fossil_fatal("no such wiki page: %s", zPageName); |
| 1344 | }else{ |
| 1345 | fossil_fatal("no such tech note: %s", zETime); |
| 1346 | } |
| 1347 | } |
| 1348 | |
| 1349 | if( !zETime ){ |
| 1350 | wiki_cmd_commit(zPageName, rid, &content, zMimeType, 1); |
| 1351 | if( g.argv[2][1]=='r' ){ |
| 1352 | fossil_print("Created new wiki page %s.\n", zPageName); |
| 1353 | }else{ |
| 1354 | fossil_print("Updated wiki page %s.\n", zPageName); |
| 1355 | } |
| 1356 | }else{ |
| 1357 | if( rid != -1 ){ |
| 1358 | char *zMETime; /* Normalized, mutable version of zETime */ |
| 1359 | zMETime = db_text(0, "SELECT coalesce(datetime(%Q),datetime('now'))", |
| 1360 | zETime); |
| 1361 | event_cmd_commit(zMETime, rid, &content, zMimeType, zPageName, |
| 1362 | zTags, zClr); |
| 1363 | if( g.argv[2][1]=='r' ){ |
| 1364 | fossil_print("Created new tech note %s.\n", zMETime); |
| 1365 | }else{ |
| 1366 | fossil_print("Updated tech note %s.\n", zMETime); |
| 1367 | } |
| 1368 | free(zMETime); |
| 1369 | }else{ |
| 1370 | fossil_fatal("ambiguous tech note id: %s", zETime); |
| 1371 | } |
| 1372 | } |
| 1373 | manifest_destroy(pWiki); |
| 1374 | blob_reset(&content); |
| 1375 | }else if( strncmp(g.argv[2],"delete",n)==0 ){ |
| 1376 | if( g.argc!=5 ){ |
| @@ -1314,23 +1378,35 @@ | |
| 1378 | } |
| 1379 | fossil_fatal("delete not yet implemented."); |
| 1380 | }else if(( strncmp(g.argv[2],"list",n)==0 ) |
| 1381 | || ( strncmp(g.argv[2],"ls",n)==0 )){ |
| 1382 | Stmt q; |
| 1383 | int showIds = 0; |
| 1384 | |
| 1385 | if ( !find_option("technote","t",0) ){ |
| 1386 | db_prepare(&q, |
| 1387 | "SELECT substr(tagname, 6) FROM tag WHERE tagname GLOB 'wiki-*'" |
| 1388 | " ORDER BY lower(tagname) /*sort*/" |
| 1389 | ); |
| 1390 | }else{ |
| 1391 | showIds = find_option("show-technote-ids","s",0)!=0; |
| 1392 | db_prepare(&q, |
| 1393 | "SELECT datetime(e.mtime), substr(t.tagname,7)" |
| 1394 | " FROM event e, tag t" |
| 1395 | " WHERE e.type='e'" |
| 1396 | " AND e.tagid IS NOT NULL" |
| 1397 | " AND t.tagid=e.tagid" |
| 1398 | " ORDER BY e.mtime DESC /*sort*/" |
| 1399 | ); |
| 1400 | } |
| 1401 | |
| 1402 | while( db_step(&q)==SQLITE_ROW ){ |
| 1403 | const char *zName = db_column_text(&q, 0); |
| 1404 | if (showIds) { |
| 1405 | const char *zUuid = db_column_text(&q, 1); |
| 1406 | fossil_print("%s ",zUuid); |
| 1407 | } |
| 1408 | fossil_print( "%s\n",zName); |
| 1409 | } |
| 1410 | db_finalize(&q); |
| 1411 | }else{ |
| 1412 | goto wiki_cmd_usage; |
| 1413 |
+1
-1
| --- test/merge6.test | ||
| +++ test/merge6.test | ||
| @@ -62,10 +62,10 @@ | ||
| 62 | 62 | fossil ls |
| 63 | 63 | |
| 64 | 64 | test merge_multi-4 {[normalize_result] eq {f1 |
| 65 | 65 | f2 |
| 66 | 66 | f3 |
| 67 | -f4}} knownBug | |
| 67 | +f4}} | |
| 68 | 68 | |
| 69 | 69 | ############################################################################### |
| 70 | 70 | |
| 71 | 71 | test_cleanup |
| 72 | 72 | |
| 73 | 73 | ADDED test/merge_exe.test |
| --- test/merge6.test | |
| +++ test/merge6.test | |
| @@ -62,10 +62,10 @@ | |
| 62 | fossil ls |
| 63 | |
| 64 | test merge_multi-4 {[normalize_result] eq {f1 |
| 65 | f2 |
| 66 | f3 |
| 67 | f4}} knownBug |
| 68 | |
| 69 | ############################################################################### |
| 70 | |
| 71 | test_cleanup |
| 72 | |
| 73 | DDED test/merge_exe.test |
| --- test/merge6.test | |
| +++ test/merge6.test | |
| @@ -62,10 +62,10 @@ | |
| 62 | fossil ls |
| 63 | |
| 64 | test merge_multi-4 {[normalize_result] eq {f1 |
| 65 | f2 |
| 66 | f3 |
| 67 | f4}} |
| 68 | |
| 69 | ############################################################################### |
| 70 | |
| 71 | test_cleanup |
| 72 | |
| 73 | DDED test/merge_exe.test |
+93
| --- a/test/merge_exe.test | ||
| +++ b/test/merge_exe.test | ||
| @@ -0,0 +1,93 @@ | ||
| 1 | +# | |
| 2 | +# Copyright (c) 2016 D. Richard Hipp | |
| 3 | +# | |
| 4 | +# This program is free software; you can redistribute it and/or | |
| 5 | +# modify it under the terms of the Simplified BSD License (also | |
| 6 | +# known as the "2-Clause License" or "FreeBSD License".) | |
| 7 | +# | |
| 8 | +# This program is distributed in the hope that it will be useful, | |
| 9 | +# but without any warranty; without even the implied warranty of | |
| 10 | +# merchantability or fitness for a particular purpose. | |
| 11 | +# | |
| 12 | +# Author contact information: | |
| 13 | +# [email protected] | |
| 14 | +# http://www.hwaci.com/drh/ | |
| 15 | +# | |
| 16 | +############################################################################ | |
| 17 | +# | |
| 18 | +# Testing changes to a file's execute bit caused by a merge | |
| 19 | +# | |
| 20 | + | |
| 21 | +if {$tcl_platform(platform) eq "unix"} { | |
| 22 | + proc setx {fn isexe} { | |
| 23 | + file attributes $fn -permissions [expr {$isexe ? "+" : "-"}]x | |
| 24 | + } | |
| 25 | + | |
| 26 | + proc test_exe {fn expected} { | |
| 27 | + test merge_exe-$fn {[file executable $fn]==$expected} | |
| 28 | + } | |
| 29 | +} else { | |
| 30 | + # WARNING: This is a hack for setting and testing a file's execute bit | |
| 31 | + # on Windows. Never operate directly on Fossil database files like this | |
| 32 | + # unless you really need to and really know what you're doing. | |
| 33 | + | |
| 34 | + proc query {sql} { | |
| 35 | + return [exec $::fossilexe sqlite3 --no-repository _FOSSIL_ $sql] | |
| 36 | + } | |
| 37 | + | |
| 38 | + proc setx {fn isexe} { | |
| 39 | + set isexe [expr {bool($isexe)}] | |
| 40 | + query "UPDATE vfile SET isexe=$isexe WHERE pathname='$fn'" | |
| 41 | + } | |
| 42 | + | |
| 43 | + proc test_exe {fn expected} { | |
| 44 | + set result [query "SELECT isexe FROM vfile WHERE pathname='$fn'"] | |
| 45 | + test merge_exe-$fn {$result==$expected} | |
| 46 | + } | |
| 47 | +} | |
| 48 | + | |
| 49 | +test_setup | |
| 50 | + | |
| 51 | +write_file f1 "line" | |
| 52 | +write_file f2 "line" | |
| 53 | +write_file f3 "line" | |
| 54 | +write_file f4 "line" | |
| 55 | +fossil addremove | |
| 56 | +setx f3 1 | |
| 57 | +setx f4 1 | |
| 58 | +fossil commit -m "add files" | |
| 59 | + | |
| 60 | +write_file f0 "f0" | |
| 61 | +fossil add f0 | |
| 62 | +setx f0 1 | |
| 63 | +fossil mv --hard f1 f1n | |
| 64 | +setx f1n 1 | |
| 65 | +write_file f2 "line\nline2" | |
| 66 | +setx f2 1 | |
| 67 | +write_file f3 "line\nline2" | |
| 68 | +setx f3 0 | |
| 69 | +setx f4 0 | |
| 70 | +fossil commit -b b -m "changes" | |
| 71 | + | |
| 72 | +fossil update trunk | |
| 73 | +write_file f3 "line3\nline" | |
| 74 | +fossil commit -m "edit f3" | |
| 75 | + | |
| 76 | +fossil merge b | |
| 77 | +test_status_list merge_exe-mrg $RESULT { | |
| 78 | + EXECUTABLE f1 | |
| 79 | + EXECUTABLE f2 | |
| 80 | + UNEXEC f3 | |
| 81 | + UNEXEC f4 | |
| 82 | + UPDATE f2 | |
| 83 | + MERGE f3 | |
| 84 | + RENAME f1 -> f1n | |
| 85 | + ADDED f0 | |
| 86 | +} | |
| 87 | +foreach {fn isexe} {f0 1 f1n 1 f2 1 f3 0 f4 0} { | |
| 88 | + test_exe $fn $isexe | |
| 89 | +} | |
| 90 | + | |
| 91 | +############################################################################### | |
| 92 | + | |
| 93 | +test_cleanup |
| --- a/test/merge_exe.test | |
| +++ b/test/merge_exe.test | |
| @@ -0,0 +1,93 @@ | |
| --- a/test/merge_exe.test | |
| +++ b/test/merge_exe.test | |
| @@ -0,0 +1,93 @@ | |
| 1 | # |
| 2 | # Copyright (c) 2016 D. Richard Hipp |
| 3 | # |
| 4 | # This program is free software; you can redistribute it and/or |
| 5 | # modify it under the terms of the Simplified BSD License (also |
| 6 | # known as the "2-Clause License" or "FreeBSD License".) |
| 7 | # |
| 8 | # This program is distributed in the hope that it will be useful, |
| 9 | # but without any warranty; without even the implied warranty of |
| 10 | # merchantability or fitness for a particular purpose. |
| 11 | # |
| 12 | # Author contact information: |
| 13 | # [email protected] |
| 14 | # http://www.hwaci.com/drh/ |
| 15 | # |
| 16 | ############################################################################ |
| 17 | # |
| 18 | # Testing changes to a file's execute bit caused by a merge |
| 19 | # |
| 20 | |
| 21 | if {$tcl_platform(platform) eq "unix"} { |
| 22 | proc setx {fn isexe} { |
| 23 | file attributes $fn -permissions [expr {$isexe ? "+" : "-"}]x |
| 24 | } |
| 25 | |
| 26 | proc test_exe {fn expected} { |
| 27 | test merge_exe-$fn {[file executable $fn]==$expected} |
| 28 | } |
| 29 | } else { |
| 30 | # WARNING: This is a hack for setting and testing a file's execute bit |
| 31 | # on Windows. Never operate directly on Fossil database files like this |
| 32 | # unless you really need to and really know what you're doing. |
| 33 | |
| 34 | proc query {sql} { |
| 35 | return [exec $::fossilexe sqlite3 --no-repository _FOSSIL_ $sql] |
| 36 | } |
| 37 | |
| 38 | proc setx {fn isexe} { |
| 39 | set isexe [expr {bool($isexe)}] |
| 40 | query "UPDATE vfile SET isexe=$isexe WHERE pathname='$fn'" |
| 41 | } |
| 42 | |
| 43 | proc test_exe {fn expected} { |
| 44 | set result [query "SELECT isexe FROM vfile WHERE pathname='$fn'"] |
| 45 | test merge_exe-$fn {$result==$expected} |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | test_setup |
| 50 | |
| 51 | write_file f1 "line" |
| 52 | write_file f2 "line" |
| 53 | write_file f3 "line" |
| 54 | write_file f4 "line" |
| 55 | fossil addremove |
| 56 | setx f3 1 |
| 57 | setx f4 1 |
| 58 | fossil commit -m "add files" |
| 59 | |
| 60 | write_file f0 "f0" |
| 61 | fossil add f0 |
| 62 | setx f0 1 |
| 63 | fossil mv --hard f1 f1n |
| 64 | setx f1n 1 |
| 65 | write_file f2 "line\nline2" |
| 66 | setx f2 1 |
| 67 | write_file f3 "line\nline2" |
| 68 | setx f3 0 |
| 69 | setx f4 0 |
| 70 | fossil commit -b b -m "changes" |
| 71 | |
| 72 | fossil update trunk |
| 73 | write_file f3 "line3\nline" |
| 74 | fossil commit -m "edit f3" |
| 75 | |
| 76 | fossil merge b |
| 77 | test_status_list merge_exe-mrg $RESULT { |
| 78 | EXECUTABLE f1 |
| 79 | EXECUTABLE f2 |
| 80 | UNEXEC f3 |
| 81 | UNEXEC f4 |
| 82 | UPDATE f2 |
| 83 | MERGE f3 |
| 84 | RENAME f1 -> f1n |
| 85 | ADDED f0 |
| 86 | } |
| 87 | foreach {fn isexe} {f0 1 f1n 1 f2 1 f3 0 f4 0} { |
| 88 | test_exe $fn $isexe |
| 89 | } |
| 90 | |
| 91 | ############################################################################### |
| 92 | |
| 93 | test_cleanup |
+331
-61
| --- test/merge_renames.test | ||
| +++ test/merge_renames.test | ||
| @@ -1,9 +1,14 @@ | ||
| 1 | 1 | # |
| 2 | 2 | # Tests for merging with renames |
| 3 | 3 | # |
| 4 | 4 | # |
| 5 | + | |
| 6 | +proc commit_id {version} { | |
| 7 | + regexp -line {^artifact:\s+(\S+)} [fossil whatis $version] - id | |
| 8 | + return $id | |
| 9 | +} | |
| 5 | 10 | |
| 6 | 11 | require_no_open_checkout |
| 7 | 12 | |
| 8 | 13 | ###################################### |
| 9 | 14 | # Test 1 # |
| @@ -31,12 +36,11 @@ | ||
| 31 | 36 | |
| 32 | 37 | write_file f1 "line6" |
| 33 | 38 | fossil commit -m "c4" |
| 34 | 39 | |
| 35 | 40 | fossil update pivot |
| 36 | -fossil mv f1 f2 | |
| 37 | -file rename -force f1 f2 | |
| 41 | +fossil mv --hard f1 f2 | |
| 38 | 42 | fossil commit -b rename -m "c5" |
| 39 | 43 | |
| 40 | 44 | fossil merge trunk |
| 41 | 45 | fossil commit -m "trunk merged" |
| 42 | 46 | |
| @@ -44,26 +48,11 @@ | ||
| 44 | 48 | write_file f3 "someline" |
| 45 | 49 | fossil add f3 |
| 46 | 50 | fossil commit -b branch2 -m "newbranch" |
| 47 | 51 | |
| 48 | 52 | fossil merge trunk |
| 49 | -puts $RESULT | |
| 50 | - | |
| 51 | -set deletes 0 | |
| 52 | -foreach {status filename} $RESULT { | |
| 53 | - if {$status=="DELETE"} { | |
| 54 | - set deletes [expr $deletes + 1] | |
| 55 | - } | |
| 56 | -} | |
| 57 | - | |
| 58 | -if {$deletes!=0} { | |
| 59 | - # failed | |
| 60 | - protOut "Error, the merge should not delete any file" | |
| 61 | - test merge_renames-1 0 | |
| 62 | -} else { | |
| 63 | - test merge_renames-1 1 | |
| 64 | -} | |
| 53 | +test_status_list merge_renames-1 $RESULT {UPDATE f1} | |
| 65 | 54 | |
| 66 | 55 | ###################################### |
| 67 | 56 | # Test 2 # |
| 68 | 57 | # Reported: Ticket [74413366fe5067] # |
| 69 | 58 | ###################################### |
| @@ -77,12 +66,11 @@ | ||
| 77 | 66 | |
| 78 | 67 | write_file f2 "line2" |
| 79 | 68 | fossil add f2 |
| 80 | 69 | fossil commit -m "newfile" |
| 81 | 70 | |
| 82 | -fossil mv f2 f2new | |
| 83 | -file rename -force f2 f2new | |
| 71 | +fossil mv --hard f2 f2new | |
| 84 | 72 | fossil commit -m "rename" |
| 85 | 73 | |
| 86 | 74 | fossil update pivot |
| 87 | 75 | write_file f1 "line3" |
| 88 | 76 | fossil commit -b branch -m "change" |
| @@ -91,27 +79,11 @@ | ||
| 91 | 79 | fossil commit -m "trunk merged" |
| 92 | 80 | |
| 93 | 81 | fossil update trunk |
| 94 | 82 | |
| 95 | 83 | fossil merge branch |
| 96 | -puts $RESULT | |
| 97 | - | |
| 98 | -# Not a nice way to check, but I don't know more tcl now | |
| 99 | -set deletes 0 | |
| 100 | -foreach {status filename} $RESULT { | |
| 101 | - if {$status=="DELETE"} { | |
| 102 | - set deletes [expr $deletes + 1] | |
| 103 | - } | |
| 104 | -} | |
| 105 | - | |
| 106 | -if {$deletes!=0} { | |
| 107 | - # failed | |
| 108 | - protOut "Error, the merge should not delete any file" | |
| 109 | - test merge_renames-2 0 | |
| 110 | -} else { | |
| 111 | - test merge_renames-2 1 | |
| 112 | -} | |
| 84 | +test_status_list merge_renames-2 $RESULT {UPDATE f1} | |
| 113 | 85 | |
| 114 | 86 | ###################################### |
| 115 | 87 | # Test 3 # |
| 116 | 88 | # Reported: Ticket [30b28cf351] # |
| 117 | 89 | ###################################### |
| @@ -125,12 +97,11 @@ | ||
| 125 | 97 | |
| 126 | 98 | write_file f2 "line2" |
| 127 | 99 | fossil add f2 |
| 128 | 100 | fossil commit -m "newfile" |
| 129 | 101 | |
| 130 | -fossil mv f2 f2new | |
| 131 | -file rename -force f2 f2new | |
| 102 | +fossil mv --hard f2 f2new | |
| 132 | 103 | fossil commit -m "rename" |
| 133 | 104 | |
| 134 | 105 | fossil update pivot |
| 135 | 106 | write_file f1 "line3" |
| 136 | 107 | fossil commit -b branch -m "change" |
| @@ -139,34 +110,33 @@ | ||
| 139 | 110 | fossil commit -m "trunk merged" |
| 140 | 111 | |
| 141 | 112 | fossil update trunk |
| 142 | 113 | |
| 143 | 114 | fossil merge branch |
| 144 | -puts $RESULT | |
| 145 | - | |
| 146 | -# Not a nice way to check, but I don't know more tcl now | |
| 147 | -set deletes 0 | |
| 148 | -foreach {status filename} $RESULT { | |
| 149 | - if {$status=="DELETE"} { | |
| 150 | - set deletes [expr $deletes + 1] | |
| 151 | - } | |
| 152 | -} | |
| 153 | - | |
| 154 | -if {$deletes!=0} { | |
| 155 | - # failed | |
| 156 | - protOut "Error, the merge should not delete any file" | |
| 157 | - test merge_renames-3 0 | |
| 158 | -} else { | |
| 159 | - test merge_renames-3 1 | |
| 160 | -} | |
| 115 | +test_status_list merge_renames-3 $RESULT {UPDATE f1} | |
| 161 | 116 | |
| 162 | 117 | ###################################### |
| 163 | 118 | # Test 4 # |
| 164 | 119 | # Reported: Ticket [67176c3aa4] # |
| 165 | 120 | ###################################### |
| 166 | 121 | |
| 167 | -# TO BE WRITTEN. | |
| 122 | +test_setup | |
| 123 | + | |
| 124 | +write_file f1 "f1" | |
| 125 | +fossil add f1 | |
| 126 | +fossil commit -m "add f1" | |
| 127 | + | |
| 128 | +write_file f1 "f1.1" | |
| 129 | +fossil commit --branch b -m "change f1" | |
| 130 | + | |
| 131 | +fossil update trunk | |
| 132 | +fossil mv --hard f1 f2 | |
| 133 | +fossil commit -m "f1 -> f2" | |
| 134 | + | |
| 135 | +fossil merge b | |
| 136 | +test_status_list merge_renames-4-1 $RESULT {UPDATE f2} | |
| 137 | +test_file_contents merge_renames-4-2 f2 "f1.1" | |
| 168 | 138 | |
| 169 | 139 | ###################################### |
| 170 | 140 | # Test 5 # |
| 171 | 141 | # Handle Rename/Add via Merge # |
| 172 | 142 | ###################################### |
| @@ -180,25 +150,325 @@ | ||
| 180 | 150 | write_file f3 "f3 line" |
| 181 | 151 | fossil add f3 |
| 182 | 152 | fossil commit -m "branch file" -b branch_for_f3 |
| 183 | 153 | |
| 184 | 154 | fossil update trunk |
| 185 | -fossil mv f1 f2 | |
| 186 | -file rename -force f1 f2 | |
| 155 | +fossil mv --hard f1 f2 | |
| 187 | 156 | write_file f1 "new f1 line" |
| 188 | 157 | fossil add f1 |
| 189 | 158 | fossil commit -m "rename and add file with old name" |
| 190 | 159 | |
| 191 | 160 | fossil update branch_for_f3 |
| 192 | 161 | fossil merge trunk |
| 162 | +test_status_list merge_renames-5-1 $RESULT { | |
| 163 | + RENAME f1 -> f2 | |
| 164 | + ADDED f1 | |
| 165 | +} | |
| 166 | + | |
| 193 | 167 | fossil commit -m "trunk merged, should have 3 files" |
| 194 | 168 | |
| 195 | 169 | fossil ls |
| 196 | 170 | |
| 197 | -test merge_renames-5 {[normalize_result] eq {f1 | |
| 171 | +test merge_renames-5-2 {[normalize_result] eq {f1 | |
| 198 | 172 | f2 |
| 199 | -f3}} knownBug | |
| 173 | +f3}} | |
| 174 | + | |
| 175 | +##################################### | |
| 176 | +# Test 6 # | |
| 177 | +# Merging a branch multiple times # | |
| 178 | +##################################### | |
| 179 | + | |
| 180 | +test_setup | |
| 181 | + | |
| 182 | +write_file f1 "f1" | |
| 183 | +fossil add f1 | |
| 184 | +fossil commit -m "add f1" | |
| 185 | + | |
| 186 | +fossil mv --hard f1 f2 | |
| 187 | +fossil commit -b b -m "f1 -> f2" | |
| 188 | + | |
| 189 | +fossil update trunk | |
| 190 | +write_file f3 "f3" | |
| 191 | +write_file f4 "f4" | |
| 192 | +fossil add f3 f4 | |
| 193 | +fossil ci -m "add f3, f4" | |
| 194 | + | |
| 195 | +fossil mv --hard f3 f3-old | |
| 196 | +fossil mv --hard f4 f3 | |
| 197 | +fossil mv --hard f3-old f4 | |
| 198 | +fossil ci -m "swap f3 and f4" | |
| 199 | + | |
| 200 | +write_file f1 "f1.1" | |
| 201 | +fossil commit -m "edit f1" | |
| 202 | + | |
| 203 | +fossil update b | |
| 204 | +fossil merge trunk | |
| 205 | +fossil commit -m "merge trunk" | |
| 206 | + | |
| 207 | +fossil update trunk | |
| 208 | +write_file f1 "f1.2" | |
| 209 | +write_file f3 "f3.1" | |
| 210 | +write_file f4 "f4.1" | |
| 211 | +fossil commit -m "edit f1, f4" | |
| 212 | + | |
| 213 | +fossil update b | |
| 214 | +fossil merge trunk | |
| 215 | +test_status_list merge_renames-6-1 $RESULT { | |
| 216 | + UPDATE f2 | |
| 217 | + UPDATE f3 | |
| 218 | + UPDATE f4 | |
| 219 | +} | |
| 220 | +test_file_contents merge_renames-6-2 f2 "f1.2" | |
| 221 | +test_file_contents merge_renames-6-3 f3 "f3.1" | |
| 222 | +test_file_contents merge_renames-6-4 f4 "f4.1" | |
| 223 | + | |
| 224 | +######################################################################## | |
| 225 | +# Test 7 # | |
| 226 | +# Merging with an uncommitted rename of a file that has been renamed # | |
| 227 | +# in the merged branch and adding a new file with the original name # | |
| 228 | +######################################################################## | |
| 229 | + | |
| 230 | +test_setup | |
| 231 | + | |
| 232 | +write_file f1 "f1" | |
| 233 | +fossil add f1 | |
| 234 | +fossil commit -m "add f1" | |
| 235 | + | |
| 236 | +fossil mv --hard f1 f2 | |
| 237 | +write_file f2 "f2" | |
| 238 | +fossil commit -b b -m "f1 -> f2, edit f2" | |
| 239 | + | |
| 240 | +fossil update trunk | |
| 241 | +fossil mv --hard f1 f3 | |
| 242 | +write_file f1 "f1.1" | |
| 243 | +fossil add f1 | |
| 244 | +fossil merge b | |
| 245 | +test_status_list merge_renames-7-1 $RESULT {UPDATE f3} | |
| 246 | +test_file_contents merge_renames-7-2 f1 "f1.1" | |
| 247 | +test_file_contents merge_renames-7-3 f3 "f2" | |
| 248 | + | |
| 249 | +###################################################### | |
| 250 | +# Test 8 # | |
| 251 | +# Merging two branches that both add the same file # | |
| 252 | +###################################################### | |
| 253 | + | |
| 254 | +test_setup | |
| 255 | + | |
| 256 | +write_file f1 "f1.1" | |
| 257 | +fossil add f1 | |
| 258 | +fossil commit -b b1 -m "add f1" | |
| 259 | + | |
| 260 | +fossil update trunk | |
| 261 | +write_file f1 "f1.2" | |
| 262 | +fossil add f1 | |
| 263 | +fossil commit -b b2 -m "add f1" | |
| 264 | + | |
| 265 | +fossil update trunk | |
| 266 | +fossil merge b1 | |
| 267 | +fossil merge b2 | |
| 268 | +test_status_list merge_renames-8-1 $RESULT { | |
| 269 | + WARNING: no common ancestor for f1 | |
| 270 | +} | |
| 271 | + | |
| 272 | +fossil revert | |
| 273 | +fossil merge --integrate b1 | |
| 274 | +fossil merge b2 | |
| 275 | +test_status_list merge_renames-8-2 $RESULT { | |
| 276 | + WARNING: no common ancestor for f1 | |
| 277 | +} | |
| 278 | + | |
| 279 | +############################################# | |
| 280 | +# Test 9 # | |
| 281 | +# Merging a delete/rename/add combination # | |
| 282 | +############################################# | |
| 283 | + | |
| 284 | +test_setup | |
| 285 | + | |
| 286 | +write_file f1 "f1" | |
| 287 | +write_file f2 "f2" | |
| 288 | +fossil add f1 f2 | |
| 289 | +fossil commit -m "add files" | |
| 290 | + | |
| 291 | +fossil rm --hard f2 | |
| 292 | +fossil commit -b b -m "delete f2" | |
| 293 | + | |
| 294 | +fossil mv --hard f1 f2 | |
| 295 | +fossil commit -m "f1 -> f2" | |
| 296 | + | |
| 297 | +write_file f1 "f1.1" | |
| 298 | +fossil add f1 | |
| 299 | +fossil commit -m "add new f1" | |
| 300 | + | |
| 301 | +fossil update trunk | |
| 302 | +fossil merge b | |
| 303 | +set expectedMerge { | |
| 304 | + DELETE f2 | |
| 305 | + RENAME f1 -> f2 | |
| 306 | + ADDED f1 | |
| 307 | +} | |
| 308 | +test_status_list merge_renames-9-1 $RESULT $expectedMerge | |
| 309 | +fossil changes | |
| 310 | +test_status_list merge_renames-9-2 $RESULT " | |
| 311 | + MERGED_WITH [commit_id b] | |
| 312 | + ADDED_BY_MERGE f1 | |
| 313 | + RENAMED f2 | |
| 314 | + DELETED f2 (overwritten by rename) | |
| 315 | +" | |
| 316 | +test_file_contents merge_renames-9-3 f1 "f1.1" | |
| 317 | +test_file_contents merge_renames-9-4 f2 "f1" | |
| 318 | + | |
| 319 | +# Undo and ensure a dry run merge results in no changes | |
| 320 | +fossil undo | |
| 321 | +test_status_list merge_renames-9-5 $RESULT { | |
| 322 | + UNDO f1 | |
| 323 | + UNDO f2 | |
| 324 | +} | |
| 325 | +fossil merge -n b | |
| 326 | +test_status_list merge_renames-9-6 $RESULT " | |
| 327 | + $expectedMerge | |
| 328 | + REMINDER: this was a dry run - no files were actually changed. | |
| 329 | +" | |
| 330 | +test merge_renames-9-7 {[fossil changes] eq ""} | |
| 331 | + | |
| 332 | +################################################################### | |
| 333 | +# Test 10 # | |
| 334 | +# Merge swapped filenames, backout the swap, then merge changes # | |
| 335 | +################################################################### | |
| 336 | + | |
| 337 | +test_setup | |
| 338 | + | |
| 339 | +write_file f1 "f1" | |
| 340 | +write_file f2 "f2" | |
| 341 | +fossil add f1 f2 | |
| 342 | +fossil commit -m "add files" ;# N | |
| 343 | + | |
| 344 | +fossil mv --hard f1 f1-tmp | |
| 345 | +fossil mv --hard f2 f1 | |
| 346 | +fossil mv --hard f1-tmp f2 | |
| 347 | +fossil commit -b b -m "swap f1, f2" ;# P | |
| 348 | + | |
| 349 | +fossil update trunk | |
| 350 | +fossil merge b | |
| 351 | +test_status_list merge_renames-10-1 $RESULT { | |
| 352 | + RENAME f1 -> f2 | |
| 353 | + RENAME f2 -> f1 | |
| 354 | +} | |
| 355 | +test_file_contents merge_renames-10-2 f1 "f2" | |
| 356 | +test_file_contents merge_renames-10-3 f2 "f1" | |
| 357 | +fossil commit -m "merge b" | |
| 358 | + | |
| 359 | +fossil update b | |
| 360 | +write_file f1 f1.1 | |
| 361 | +write_file f2 f2.1 | |
| 362 | +fossil commit -m "edit" ;# M | |
| 363 | + | |
| 364 | +fossil update trunk | |
| 365 | +fossil merge --backout trunk | |
| 366 | +test_status_list merge_renames-10-4 $RESULT { | |
| 367 | + RENAME f1 -> f2 | |
| 368 | + RENAME f2 -> f1 | |
| 369 | +} | |
| 370 | +test_file_contents merge_renames-10-5 f1 "f1" | |
| 371 | +test_file_contents merge_renames-10-6 f2 "f2" | |
| 372 | +test_status_list merge_renames-10-7 [fossil changes] " | |
| 373 | + RENAMED f1 | |
| 374 | + RENAMED f2 | |
| 375 | + BACKOUT [commit_id trunk] | |
| 376 | +" | |
| 377 | +fossil commit -m "swap back" ;# V | |
| 378 | + | |
| 379 | +fossil merge b | |
| 380 | +test_status_list merge_renames-10-8 $RESULT { | |
| 381 | + UPDATE f1 | |
| 382 | + UPDATE f2 | |
| 383 | +} | |
| 384 | + | |
| 385 | +test_file_contents merge_renames-10-9 f1 "f2.1" | |
| 386 | +test_file_contents merge_renames-10-10 f2 "f1.1" | |
| 387 | + | |
| 388 | +############################################ | |
| 389 | +# Test 11 # | |
| 390 | +# Specifying a baseline # | |
| 391 | +############################################ | |
| 392 | + | |
| 393 | +test_setup | |
| 394 | + | |
| 395 | +write_file f1 "line" | |
| 396 | +fossil add f1 | |
| 397 | +fossil commit -m "add f1" | |
| 398 | + | |
| 399 | +write_file f1 "line\nline2" | |
| 400 | +fossil commit -b b -m "edit f2" --tag p1 | |
| 401 | + | |
| 402 | +fossil mv --hard f1 f2 | |
| 403 | +fossil commit -m "f1 -> f2" | |
| 404 | + | |
| 405 | +write_file f2 "line\nline2\nline3" | |
| 406 | +fossil commit -m "edit f2" --tag p2 | |
| 407 | + | |
| 408 | +write_file f2 "line\nline2\nline3\nline4" | |
| 409 | +fossil commit -m "edit f2" | |
| 410 | + | |
| 411 | +fossil update trunk | |
| 412 | +fossil merge --baseline p1 b | |
| 413 | +test_status_list merge_renames-11-1 $RESULT { | |
| 414 | + MERGE f1 | |
| 415 | + RENAME f1 -> f2 | |
| 416 | +} | |
| 417 | +test_file_contents merge_renames-11-2 f2 "line\nline3\nline4" | |
| 418 | +fossil revert | |
| 419 | +fossil merge --baseline p2 b | |
| 420 | +test_status_list merge_renames-11-3 $RESULT {MERGE f1} | |
| 421 | +test_file_contents merge_renames-11-4 f1 "line\nline4" | |
| 422 | + | |
| 423 | +################################################################# | |
| 424 | +# Test 12 # | |
| 425 | +# Merge involving a pivot that isn't a first-parent ancestor # | |
| 426 | +# of either the checked-out commit or the commit being merged # | |
| 427 | +################################################################# | |
| 428 | + | |
| 429 | +test_setup | |
| 430 | + | |
| 431 | +write_file f1 "f1\n" | |
| 432 | +fossil add f1 | |
| 433 | +fossil commit -m "add f1" --tag n | |
| 434 | + | |
| 435 | +fossil mv --hard f1 f1n | |
| 436 | +fossil commit -m "f1 -> f1n" | |
| 437 | + | |
| 438 | +fossil mv --hard f1n f1v | |
| 439 | +write_file f1v "f1v\n" | |
| 440 | +fossil commit -b v -m "f1n -> f1v, edit f1v" | |
| 441 | + | |
| 442 | +fossil update trunk | |
| 443 | +fossil mv --hard f1n f1m | |
| 444 | +fossil commit -b m -m "f1n -> f1m" | |
| 445 | + | |
| 446 | +fossil update n | |
| 447 | +fossil mv --hard f1 f1p | |
| 448 | +write_file f1p "f1\np" | |
| 449 | +fossil commit -b p -m "f1 -> f1p, edit f1p" | |
| 450 | + | |
| 451 | +fossil update m | |
| 452 | +fossil merge p | |
| 453 | +test_status_list merge_renames-12-1 $RESULT {UPDATE f1m} | |
| 454 | +test_file_contents merge_renames-12-2 f1m "f1\np" | |
| 455 | +fossil commit -m "merge p" | |
| 456 | + | |
| 457 | +write_file f1m "f1\nm" | |
| 458 | +fossil commit -m "edit f1m" | |
| 459 | + | |
| 460 | +fossil update v | |
| 461 | +fossil merge p | |
| 462 | +test_status_list merge_renames-12-3 $RESULT {MERGE f1v} | |
| 463 | +test_file_contents merge_renames-12-4 f1v "f1v\np" | |
| 464 | +fossil commit -m "merge p" | |
| 465 | + | |
| 466 | +fossil merge m | |
| 467 | +test_status_list merge_renames-12-5 $RESULT {MERGE f1v} | |
| 468 | +test_file_contents merge_renames-12-6 f1v "f1v\nm" | |
| 469 | +fossil commit -m "merge m" | |
| 200 | 470 | |
| 201 | 471 | ###################################### |
| 202 | 472 | # |
| 203 | 473 | # Tests for troubles not specifically linked with renames but that I'd like to |
| 204 | 474 | # write: |
| 205 | 475 |
| --- test/merge_renames.test | |
| +++ test/merge_renames.test | |
| @@ -1,9 +1,14 @@ | |
| 1 | # |
| 2 | # Tests for merging with renames |
| 3 | # |
| 4 | # |
| 5 | |
| 6 | require_no_open_checkout |
| 7 | |
| 8 | ###################################### |
| 9 | # Test 1 # |
| @@ -31,12 +36,11 @@ | |
| 31 | |
| 32 | write_file f1 "line6" |
| 33 | fossil commit -m "c4" |
| 34 | |
| 35 | fossil update pivot |
| 36 | fossil mv f1 f2 |
| 37 | file rename -force f1 f2 |
| 38 | fossil commit -b rename -m "c5" |
| 39 | |
| 40 | fossil merge trunk |
| 41 | fossil commit -m "trunk merged" |
| 42 | |
| @@ -44,26 +48,11 @@ | |
| 44 | write_file f3 "someline" |
| 45 | fossil add f3 |
| 46 | fossil commit -b branch2 -m "newbranch" |
| 47 | |
| 48 | fossil merge trunk |
| 49 | puts $RESULT |
| 50 | |
| 51 | set deletes 0 |
| 52 | foreach {status filename} $RESULT { |
| 53 | if {$status=="DELETE"} { |
| 54 | set deletes [expr $deletes + 1] |
| 55 | } |
| 56 | } |
| 57 | |
| 58 | if {$deletes!=0} { |
| 59 | # failed |
| 60 | protOut "Error, the merge should not delete any file" |
| 61 | test merge_renames-1 0 |
| 62 | } else { |
| 63 | test merge_renames-1 1 |
| 64 | } |
| 65 | |
| 66 | ###################################### |
| 67 | # Test 2 # |
| 68 | # Reported: Ticket [74413366fe5067] # |
| 69 | ###################################### |
| @@ -77,12 +66,11 @@ | |
| 77 | |
| 78 | write_file f2 "line2" |
| 79 | fossil add f2 |
| 80 | fossil commit -m "newfile" |
| 81 | |
| 82 | fossil mv f2 f2new |
| 83 | file rename -force f2 f2new |
| 84 | fossil commit -m "rename" |
| 85 | |
| 86 | fossil update pivot |
| 87 | write_file f1 "line3" |
| 88 | fossil commit -b branch -m "change" |
| @@ -91,27 +79,11 @@ | |
| 91 | fossil commit -m "trunk merged" |
| 92 | |
| 93 | fossil update trunk |
| 94 | |
| 95 | fossil merge branch |
| 96 | puts $RESULT |
| 97 | |
| 98 | # Not a nice way to check, but I don't know more tcl now |
| 99 | set deletes 0 |
| 100 | foreach {status filename} $RESULT { |
| 101 | if {$status=="DELETE"} { |
| 102 | set deletes [expr $deletes + 1] |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | if {$deletes!=0} { |
| 107 | # failed |
| 108 | protOut "Error, the merge should not delete any file" |
| 109 | test merge_renames-2 0 |
| 110 | } else { |
| 111 | test merge_renames-2 1 |
| 112 | } |
| 113 | |
| 114 | ###################################### |
| 115 | # Test 3 # |
| 116 | # Reported: Ticket [30b28cf351] # |
| 117 | ###################################### |
| @@ -125,12 +97,11 @@ | |
| 125 | |
| 126 | write_file f2 "line2" |
| 127 | fossil add f2 |
| 128 | fossil commit -m "newfile" |
| 129 | |
| 130 | fossil mv f2 f2new |
| 131 | file rename -force f2 f2new |
| 132 | fossil commit -m "rename" |
| 133 | |
| 134 | fossil update pivot |
| 135 | write_file f1 "line3" |
| 136 | fossil commit -b branch -m "change" |
| @@ -139,34 +110,33 @@ | |
| 139 | fossil commit -m "trunk merged" |
| 140 | |
| 141 | fossil update trunk |
| 142 | |
| 143 | fossil merge branch |
| 144 | puts $RESULT |
| 145 | |
| 146 | # Not a nice way to check, but I don't know more tcl now |
| 147 | set deletes 0 |
| 148 | foreach {status filename} $RESULT { |
| 149 | if {$status=="DELETE"} { |
| 150 | set deletes [expr $deletes + 1] |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | if {$deletes!=0} { |
| 155 | # failed |
| 156 | protOut "Error, the merge should not delete any file" |
| 157 | test merge_renames-3 0 |
| 158 | } else { |
| 159 | test merge_renames-3 1 |
| 160 | } |
| 161 | |
| 162 | ###################################### |
| 163 | # Test 4 # |
| 164 | # Reported: Ticket [67176c3aa4] # |
| 165 | ###################################### |
| 166 | |
| 167 | # TO BE WRITTEN. |
| 168 | |
| 169 | ###################################### |
| 170 | # Test 5 # |
| 171 | # Handle Rename/Add via Merge # |
| 172 | ###################################### |
| @@ -180,25 +150,325 @@ | |
| 180 | write_file f3 "f3 line" |
| 181 | fossil add f3 |
| 182 | fossil commit -m "branch file" -b branch_for_f3 |
| 183 | |
| 184 | fossil update trunk |
| 185 | fossil mv f1 f2 |
| 186 | file rename -force f1 f2 |
| 187 | write_file f1 "new f1 line" |
| 188 | fossil add f1 |
| 189 | fossil commit -m "rename and add file with old name" |
| 190 | |
| 191 | fossil update branch_for_f3 |
| 192 | fossil merge trunk |
| 193 | fossil commit -m "trunk merged, should have 3 files" |
| 194 | |
| 195 | fossil ls |
| 196 | |
| 197 | test merge_renames-5 {[normalize_result] eq {f1 |
| 198 | f2 |
| 199 | f3}} knownBug |
| 200 | |
| 201 | ###################################### |
| 202 | # |
| 203 | # Tests for troubles not specifically linked with renames but that I'd like to |
| 204 | # write: |
| 205 |
| --- test/merge_renames.test | |
| +++ test/merge_renames.test | |
| @@ -1,9 +1,14 @@ | |
| 1 | # |
| 2 | # Tests for merging with renames |
| 3 | # |
| 4 | # |
| 5 | |
| 6 | proc commit_id {version} { |
| 7 | regexp -line {^artifact:\s+(\S+)} [fossil whatis $version] - id |
| 8 | return $id |
| 9 | } |
| 10 | |
| 11 | require_no_open_checkout |
| 12 | |
| 13 | ###################################### |
| 14 | # Test 1 # |
| @@ -31,12 +36,11 @@ | |
| 36 | |
| 37 | write_file f1 "line6" |
| 38 | fossil commit -m "c4" |
| 39 | |
| 40 | fossil update pivot |
| 41 | fossil mv --hard f1 f2 |
| 42 | fossil commit -b rename -m "c5" |
| 43 | |
| 44 | fossil merge trunk |
| 45 | fossil commit -m "trunk merged" |
| 46 | |
| @@ -44,26 +48,11 @@ | |
| 48 | write_file f3 "someline" |
| 49 | fossil add f3 |
| 50 | fossil commit -b branch2 -m "newbranch" |
| 51 | |
| 52 | fossil merge trunk |
| 53 | test_status_list merge_renames-1 $RESULT {UPDATE f1} |
| 54 | |
| 55 | ###################################### |
| 56 | # Test 2 # |
| 57 | # Reported: Ticket [74413366fe5067] # |
| 58 | ###################################### |
| @@ -77,12 +66,11 @@ | |
| 66 | |
| 67 | write_file f2 "line2" |
| 68 | fossil add f2 |
| 69 | fossil commit -m "newfile" |
| 70 | |
| 71 | fossil mv --hard f2 f2new |
| 72 | fossil commit -m "rename" |
| 73 | |
| 74 | fossil update pivot |
| 75 | write_file f1 "line3" |
| 76 | fossil commit -b branch -m "change" |
| @@ -91,27 +79,11 @@ | |
| 79 | fossil commit -m "trunk merged" |
| 80 | |
| 81 | fossil update trunk |
| 82 | |
| 83 | fossil merge branch |
| 84 | test_status_list merge_renames-2 $RESULT {UPDATE f1} |
| 85 | |
| 86 | ###################################### |
| 87 | # Test 3 # |
| 88 | # Reported: Ticket [30b28cf351] # |
| 89 | ###################################### |
| @@ -125,12 +97,11 @@ | |
| 97 | |
| 98 | write_file f2 "line2" |
| 99 | fossil add f2 |
| 100 | fossil commit -m "newfile" |
| 101 | |
| 102 | fossil mv --hard f2 f2new |
| 103 | fossil commit -m "rename" |
| 104 | |
| 105 | fossil update pivot |
| 106 | write_file f1 "line3" |
| 107 | fossil commit -b branch -m "change" |
| @@ -139,34 +110,33 @@ | |
| 110 | fossil commit -m "trunk merged" |
| 111 | |
| 112 | fossil update trunk |
| 113 | |
| 114 | fossil merge branch |
| 115 | test_status_list merge_renames-3 $RESULT {UPDATE f1} |
| 116 | |
| 117 | ###################################### |
| 118 | # Test 4 # |
| 119 | # Reported: Ticket [67176c3aa4] # |
| 120 | ###################################### |
| 121 | |
| 122 | test_setup |
| 123 | |
| 124 | write_file f1 "f1" |
| 125 | fossil add f1 |
| 126 | fossil commit -m "add f1" |
| 127 | |
| 128 | write_file f1 "f1.1" |
| 129 | fossil commit --branch b -m "change f1" |
| 130 | |
| 131 | fossil update trunk |
| 132 | fossil mv --hard f1 f2 |
| 133 | fossil commit -m "f1 -> f2" |
| 134 | |
| 135 | fossil merge b |
| 136 | test_status_list merge_renames-4-1 $RESULT {UPDATE f2} |
| 137 | test_file_contents merge_renames-4-2 f2 "f1.1" |
| 138 | |
| 139 | ###################################### |
| 140 | # Test 5 # |
| 141 | # Handle Rename/Add via Merge # |
| 142 | ###################################### |
| @@ -180,25 +150,325 @@ | |
| 150 | write_file f3 "f3 line" |
| 151 | fossil add f3 |
| 152 | fossil commit -m "branch file" -b branch_for_f3 |
| 153 | |
| 154 | fossil update trunk |
| 155 | fossil mv --hard f1 f2 |
| 156 | write_file f1 "new f1 line" |
| 157 | fossil add f1 |
| 158 | fossil commit -m "rename and add file with old name" |
| 159 | |
| 160 | fossil update branch_for_f3 |
| 161 | fossil merge trunk |
| 162 | test_status_list merge_renames-5-1 $RESULT { |
| 163 | RENAME f1 -> f2 |
| 164 | ADDED f1 |
| 165 | } |
| 166 | |
| 167 | fossil commit -m "trunk merged, should have 3 files" |
| 168 | |
| 169 | fossil ls |
| 170 | |
| 171 | test merge_renames-5-2 {[normalize_result] eq {f1 |
| 172 | f2 |
| 173 | f3}} |
| 174 | |
| 175 | ##################################### |
| 176 | # Test 6 # |
| 177 | # Merging a branch multiple times # |
| 178 | ##################################### |
| 179 | |
| 180 | test_setup |
| 181 | |
| 182 | write_file f1 "f1" |
| 183 | fossil add f1 |
| 184 | fossil commit -m "add f1" |
| 185 | |
| 186 | fossil mv --hard f1 f2 |
| 187 | fossil commit -b b -m "f1 -> f2" |
| 188 | |
| 189 | fossil update trunk |
| 190 | write_file f3 "f3" |
| 191 | write_file f4 "f4" |
| 192 | fossil add f3 f4 |
| 193 | fossil ci -m "add f3, f4" |
| 194 | |
| 195 | fossil mv --hard f3 f3-old |
| 196 | fossil mv --hard f4 f3 |
| 197 | fossil mv --hard f3-old f4 |
| 198 | fossil ci -m "swap f3 and f4" |
| 199 | |
| 200 | write_file f1 "f1.1" |
| 201 | fossil commit -m "edit f1" |
| 202 | |
| 203 | fossil update b |
| 204 | fossil merge trunk |
| 205 | fossil commit -m "merge trunk" |
| 206 | |
| 207 | fossil update trunk |
| 208 | write_file f1 "f1.2" |
| 209 | write_file f3 "f3.1" |
| 210 | write_file f4 "f4.1" |
| 211 | fossil commit -m "edit f1, f4" |
| 212 | |
| 213 | fossil update b |
| 214 | fossil merge trunk |
| 215 | test_status_list merge_renames-6-1 $RESULT { |
| 216 | UPDATE f2 |
| 217 | UPDATE f3 |
| 218 | UPDATE f4 |
| 219 | } |
| 220 | test_file_contents merge_renames-6-2 f2 "f1.2" |
| 221 | test_file_contents merge_renames-6-3 f3 "f3.1" |
| 222 | test_file_contents merge_renames-6-4 f4 "f4.1" |
| 223 | |
| 224 | ######################################################################## |
| 225 | # Test 7 # |
| 226 | # Merging with an uncommitted rename of a file that has been renamed # |
| 227 | # in the merged branch and adding a new file with the original name # |
| 228 | ######################################################################## |
| 229 | |
| 230 | test_setup |
| 231 | |
| 232 | write_file f1 "f1" |
| 233 | fossil add f1 |
| 234 | fossil commit -m "add f1" |
| 235 | |
| 236 | fossil mv --hard f1 f2 |
| 237 | write_file f2 "f2" |
| 238 | fossil commit -b b -m "f1 -> f2, edit f2" |
| 239 | |
| 240 | fossil update trunk |
| 241 | fossil mv --hard f1 f3 |
| 242 | write_file f1 "f1.1" |
| 243 | fossil add f1 |
| 244 | fossil merge b |
| 245 | test_status_list merge_renames-7-1 $RESULT {UPDATE f3} |
| 246 | test_file_contents merge_renames-7-2 f1 "f1.1" |
| 247 | test_file_contents merge_renames-7-3 f3 "f2" |
| 248 | |
| 249 | ###################################################### |
| 250 | # Test 8 # |
| 251 | # Merging two branches that both add the same file # |
| 252 | ###################################################### |
| 253 | |
| 254 | test_setup |
| 255 | |
| 256 | write_file f1 "f1.1" |
| 257 | fossil add f1 |
| 258 | fossil commit -b b1 -m "add f1" |
| 259 | |
| 260 | fossil update trunk |
| 261 | write_file f1 "f1.2" |
| 262 | fossil add f1 |
| 263 | fossil commit -b b2 -m "add f1" |
| 264 | |
| 265 | fossil update trunk |
| 266 | fossil merge b1 |
| 267 | fossil merge b2 |
| 268 | test_status_list merge_renames-8-1 $RESULT { |
| 269 | WARNING: no common ancestor for f1 |
| 270 | } |
| 271 | |
| 272 | fossil revert |
| 273 | fossil merge --integrate b1 |
| 274 | fossil merge b2 |
| 275 | test_status_list merge_renames-8-2 $RESULT { |
| 276 | WARNING: no common ancestor for f1 |
| 277 | } |
| 278 | |
| 279 | ############################################# |
| 280 | # Test 9 # |
| 281 | # Merging a delete/rename/add combination # |
| 282 | ############################################# |
| 283 | |
| 284 | test_setup |
| 285 | |
| 286 | write_file f1 "f1" |
| 287 | write_file f2 "f2" |
| 288 | fossil add f1 f2 |
| 289 | fossil commit -m "add files" |
| 290 | |
| 291 | fossil rm --hard f2 |
| 292 | fossil commit -b b -m "delete f2" |
| 293 | |
| 294 | fossil mv --hard f1 f2 |
| 295 | fossil commit -m "f1 -> f2" |
| 296 | |
| 297 | write_file f1 "f1.1" |
| 298 | fossil add f1 |
| 299 | fossil commit -m "add new f1" |
| 300 | |
| 301 | fossil update trunk |
| 302 | fossil merge b |
| 303 | set expectedMerge { |
| 304 | DELETE f2 |
| 305 | RENAME f1 -> f2 |
| 306 | ADDED f1 |
| 307 | } |
| 308 | test_status_list merge_renames-9-1 $RESULT $expectedMerge |
| 309 | fossil changes |
| 310 | test_status_list merge_renames-9-2 $RESULT " |
| 311 | MERGED_WITH [commit_id b] |
| 312 | ADDED_BY_MERGE f1 |
| 313 | RENAMED f2 |
| 314 | DELETED f2 (overwritten by rename) |
| 315 | " |
| 316 | test_file_contents merge_renames-9-3 f1 "f1.1" |
| 317 | test_file_contents merge_renames-9-4 f2 "f1" |
| 318 | |
| 319 | # Undo and ensure a dry run merge results in no changes |
| 320 | fossil undo |
| 321 | test_status_list merge_renames-9-5 $RESULT { |
| 322 | UNDO f1 |
| 323 | UNDO f2 |
| 324 | } |
| 325 | fossil merge -n b |
| 326 | test_status_list merge_renames-9-6 $RESULT " |
| 327 | $expectedMerge |
| 328 | REMINDER: this was a dry run - no files were actually changed. |
| 329 | " |
| 330 | test merge_renames-9-7 {[fossil changes] eq ""} |
| 331 | |
| 332 | ################################################################### |
| 333 | # Test 10 # |
| 334 | # Merge swapped filenames, backout the swap, then merge changes # |
| 335 | ################################################################### |
| 336 | |
| 337 | test_setup |
| 338 | |
| 339 | write_file f1 "f1" |
| 340 | write_file f2 "f2" |
| 341 | fossil add f1 f2 |
| 342 | fossil commit -m "add files" ;# N |
| 343 | |
| 344 | fossil mv --hard f1 f1-tmp |
| 345 | fossil mv --hard f2 f1 |
| 346 | fossil mv --hard f1-tmp f2 |
| 347 | fossil commit -b b -m "swap f1, f2" ;# P |
| 348 | |
| 349 | fossil update trunk |
| 350 | fossil merge b |
| 351 | test_status_list merge_renames-10-1 $RESULT { |
| 352 | RENAME f1 -> f2 |
| 353 | RENAME f2 -> f1 |
| 354 | } |
| 355 | test_file_contents merge_renames-10-2 f1 "f2" |
| 356 | test_file_contents merge_renames-10-3 f2 "f1" |
| 357 | fossil commit -m "merge b" |
| 358 | |
| 359 | fossil update b |
| 360 | write_file f1 f1.1 |
| 361 | write_file f2 f2.1 |
| 362 | fossil commit -m "edit" ;# M |
| 363 | |
| 364 | fossil update trunk |
| 365 | fossil merge --backout trunk |
| 366 | test_status_list merge_renames-10-4 $RESULT { |
| 367 | RENAME f1 -> f2 |
| 368 | RENAME f2 -> f1 |
| 369 | } |
| 370 | test_file_contents merge_renames-10-5 f1 "f1" |
| 371 | test_file_contents merge_renames-10-6 f2 "f2" |
| 372 | test_status_list merge_renames-10-7 [fossil changes] " |
| 373 | RENAMED f1 |
| 374 | RENAMED f2 |
| 375 | BACKOUT [commit_id trunk] |
| 376 | " |
| 377 | fossil commit -m "swap back" ;# V |
| 378 | |
| 379 | fossil merge b |
| 380 | test_status_list merge_renames-10-8 $RESULT { |
| 381 | UPDATE f1 |
| 382 | UPDATE f2 |
| 383 | } |
| 384 | |
| 385 | test_file_contents merge_renames-10-9 f1 "f2.1" |
| 386 | test_file_contents merge_renames-10-10 f2 "f1.1" |
| 387 | |
| 388 | ############################################ |
| 389 | # Test 11 # |
| 390 | # Specifying a baseline # |
| 391 | ############################################ |
| 392 | |
| 393 | test_setup |
| 394 | |
| 395 | write_file f1 "line" |
| 396 | fossil add f1 |
| 397 | fossil commit -m "add f1" |
| 398 | |
| 399 | write_file f1 "line\nline2" |
| 400 | fossil commit -b b -m "edit f2" --tag p1 |
| 401 | |
| 402 | fossil mv --hard f1 f2 |
| 403 | fossil commit -m "f1 -> f2" |
| 404 | |
| 405 | write_file f2 "line\nline2\nline3" |
| 406 | fossil commit -m "edit f2" --tag p2 |
| 407 | |
| 408 | write_file f2 "line\nline2\nline3\nline4" |
| 409 | fossil commit -m "edit f2" |
| 410 | |
| 411 | fossil update trunk |
| 412 | fossil merge --baseline p1 b |
| 413 | test_status_list merge_renames-11-1 $RESULT { |
| 414 | MERGE f1 |
| 415 | RENAME f1 -> f2 |
| 416 | } |
| 417 | test_file_contents merge_renames-11-2 f2 "line\nline3\nline4" |
| 418 | fossil revert |
| 419 | fossil merge --baseline p2 b |
| 420 | test_status_list merge_renames-11-3 $RESULT {MERGE f1} |
| 421 | test_file_contents merge_renames-11-4 f1 "line\nline4" |
| 422 | |
| 423 | ################################################################# |
| 424 | # Test 12 # |
| 425 | # Merge involving a pivot that isn't a first-parent ancestor # |
| 426 | # of either the checked-out commit or the commit being merged # |
| 427 | ################################################################# |
| 428 | |
| 429 | test_setup |
| 430 | |
| 431 | write_file f1 "f1\n" |
| 432 | fossil add f1 |
| 433 | fossil commit -m "add f1" --tag n |
| 434 | |
| 435 | fossil mv --hard f1 f1n |
| 436 | fossil commit -m "f1 -> f1n" |
| 437 | |
| 438 | fossil mv --hard f1n f1v |
| 439 | write_file f1v "f1v\n" |
| 440 | fossil commit -b v -m "f1n -> f1v, edit f1v" |
| 441 | |
| 442 | fossil update trunk |
| 443 | fossil mv --hard f1n f1m |
| 444 | fossil commit -b m -m "f1n -> f1m" |
| 445 | |
| 446 | fossil update n |
| 447 | fossil mv --hard f1 f1p |
| 448 | write_file f1p "f1\np" |
| 449 | fossil commit -b p -m "f1 -> f1p, edit f1p" |
| 450 | |
| 451 | fossil update m |
| 452 | fossil merge p |
| 453 | test_status_list merge_renames-12-1 $RESULT {UPDATE f1m} |
| 454 | test_file_contents merge_renames-12-2 f1m "f1\np" |
| 455 | fossil commit -m "merge p" |
| 456 | |
| 457 | write_file f1m "f1\nm" |
| 458 | fossil commit -m "edit f1m" |
| 459 | |
| 460 | fossil update v |
| 461 | fossil merge p |
| 462 | test_status_list merge_renames-12-3 $RESULT {MERGE f1v} |
| 463 | test_file_contents merge_renames-12-4 f1v "f1v\np" |
| 464 | fossil commit -m "merge p" |
| 465 | |
| 466 | fossil merge m |
| 467 | test_status_list merge_renames-12-5 $RESULT {MERGE f1v} |
| 468 | test_file_contents merge_renames-12-6 f1v "f1v\nm" |
| 469 | fossil commit -m "merge m" |
| 470 | |
| 471 | ###################################### |
| 472 | # |
| 473 | # Tests for troubles not specifically linked with renames but that I'd like to |
| 474 | # write: |
| 475 |
+19
| --- test/tester.tcl | ||
| +++ test/tester.tcl | ||
| @@ -390,10 +390,29 @@ | ||
| 390 | 390 | protOut " Expected:\n [join $expected "\n "]" 1 |
| 391 | 391 | protOut " Got:\n [join $result "\n "]" 1 |
| 392 | 392 | test $name 0 $constraints |
| 393 | 393 | } |
| 394 | 394 | } |
| 395 | + | |
| 396 | +# Perform a test on the contents of a file | |
| 397 | +# | |
| 398 | +proc test_file_contents {name path expected {constraints ""}} { | |
| 399 | + if {[file exists $path]} { | |
| 400 | + set result [read_file $path] | |
| 401 | + set passed [expr {$result eq $expected}] | |
| 402 | + if {!$passed} { | |
| 403 | + set expectedLines [split $expected "\n"] | |
| 404 | + set resultLines [split $result "\n"] | |
| 405 | + protOut " Expected:\n [join $expectedLines "\n "]" 1 | |
| 406 | + protOut " Got:\n [join $resultLines "\n "]" 1 | |
| 407 | + } | |
| 408 | + } else { | |
| 409 | + set passed 0 | |
| 410 | + protOut " File does not exist: $path" 1 | |
| 411 | + } | |
| 412 | + test $name $passed $constraints | |
| 413 | +} | |
| 395 | 414 | |
| 396 | 415 | # Append all arguments into a single value and then returns it. |
| 397 | 416 | # |
| 398 | 417 | proc appendArgs {args} { |
| 399 | 418 | eval append result $args |
| 400 | 419 | |
| 401 | 420 | ADDED test/wiki.test |
| 402 | 421 | ADDED www/encryptedrepos.wiki |
| --- test/tester.tcl | |
| +++ test/tester.tcl | |
| @@ -390,10 +390,29 @@ | |
| 390 | protOut " Expected:\n [join $expected "\n "]" 1 |
| 391 | protOut " Got:\n [join $result "\n "]" 1 |
| 392 | test $name 0 $constraints |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | # Append all arguments into a single value and then returns it. |
| 397 | # |
| 398 | proc appendArgs {args} { |
| 399 | eval append result $args |
| 400 | |
| 401 | DDED test/wiki.test |
| 402 | DDED www/encryptedrepos.wiki |
| --- test/tester.tcl | |
| +++ test/tester.tcl | |
| @@ -390,10 +390,29 @@ | |
| 390 | protOut " Expected:\n [join $expected "\n "]" 1 |
| 391 | protOut " Got:\n [join $result "\n "]" 1 |
| 392 | test $name 0 $constraints |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | # Perform a test on the contents of a file |
| 397 | # |
| 398 | proc test_file_contents {name path expected {constraints ""}} { |
| 399 | if {[file exists $path]} { |
| 400 | set result [read_file $path] |
| 401 | set passed [expr {$result eq $expected}] |
| 402 | if {!$passed} { |
| 403 | set expectedLines [split $expected "\n"] |
| 404 | set resultLines [split $result "\n"] |
| 405 | protOut " Expected:\n [join $expectedLines "\n "]" 1 |
| 406 | protOut " Got:\n [join $resultLines "\n "]" 1 |
| 407 | } |
| 408 | } else { |
| 409 | set passed 0 |
| 410 | protOut " File does not exist: $path" 1 |
| 411 | } |
| 412 | test $name $passed $constraints |
| 413 | } |
| 414 | |
| 415 | # Append all arguments into a single value and then returns it. |
| 416 | # |
| 417 | proc appendArgs {args} { |
| 418 | eval append result $args |
| 419 | |
| 420 | DDED test/wiki.test |
| 421 | DDED www/encryptedrepos.wiki |
+334
| --- a/test/wiki.test | ||
| +++ b/test/wiki.test | ||
| @@ -0,0 +1,334 @@ | ||
| 1 | +# | |
| 2 | +# Copyright (c) 2016 D. Richard Hipp | |
| 3 | +# | |
| 4 | +# This program is free software; you can redistribute it and/or | |
| 5 | +# modify it under the terms of the Simplified BSD License (also | |
| 6 | +# known as the "2-Clause License" or "FreeBSD License".) | |
| 7 | +# | |
| 8 | +# This program is distributed in the hope that it will be useful, | |
| 9 | +# but without any warranty; without even the implied warranty of | |
| 10 | +# merchantability or fitness for a particular purpose. | |
| 11 | +# | |
| 12 | +# Author contact information: | |
| 13 | +# [email protected] | |
| 14 | +# http://www.hwaci.com/drh/ | |
| 15 | +# | |
| 16 | +############################################################################ | |
| 17 | +# | |
| 18 | +# Test wiki and attachment comman (c) 2016 D. Richard Hipp | |
| 19 | +# | |
| 20 | +# This program is free software; you can redistribute it and/or | |
| 21 | +# modify it under the terms of the Simplified BSD License (also | |
| 22 | +# known as the "2buted in the hoperegsub -all { +\n} $x \n x | |
| 23 | + (c) 2016 D. Richard Hipp | |
| 24 | +# | |
| 25 | +# # | |
| 26 | +# Copyright (c) 201 that it will be usefuy \n y | |
| 27 | + (c) 2016 D. Richard # | |
| 28 | +# Coi.com | |
| 29 | +# http://www.hwaci.com/drh/ | |
| 30 | +# | |
| 31 | +############################################################################ | |
| 32 | +# | |
| 33 | +# Test wiki and attachment command Support | |
| 34 | +# | |
| 35 | + | |
| 36 | +test_setup | |
| 37 | + | |
| 38 | +# Disable backoffice for this test, otherwise its process lingers for some | |
| 39 | +# time after the test has completed. | |
| 40 | +# Perhaps, this should be done in test_setup and enabled explicitly only | |
| 41 | +# when needed. | |
| 42 | +fossil set backoffice-disable 1 | |
| 43 | + | |
| 44 | +# Return true if two files are similar (i.e not only compress trailing spaces | |
| 45 | +# from a lset CODE [regex regsub -all { +\n} $x \n x | |
| 46 | + No info link found [read_file $b] | |
| 47 | + regsub -all { +http << "GET /artifact/$info D. Richard Hip# | |
| 48 | +# Copyright {a b} { | |
| 49 | + set x "" | |
| 50 | + if {[file exists $a]} { | |
| 51 | + set x [read_file $a] | |
| 52 | + regsub -all { +\n} $x \n x | |
| 53 | + regsub -all {\n$} $x {} x | |
| 54 | + } | |
| 55 | + set y "" | |
| 56 | + if {[file exists $b]} { | |
| 57 | + set y [read_file $b] | |
| 58 | + regsub -all { +\n} $y \n y | |
| 59 | + regsub -all {\n$} $y {} y | |
| 60 | + } | |
| 61 | + return [expr {$x==$y}] | |
| 62 | +} | |
| 63 | + | |
| 64 | +# Return the mime type in the manifest for a given wiki page | |
| 65 | +# Defaults to "error: some text" if the manifest can't be located and | |
| 66 | +# "text/x-fossil-wiki" (the default mimetype for rendering) | |
| 67 | +# if the N card is omitted in the manifest. | |
| 68 | +# Note: Makes fossil calls, so $CODE and $RESULT will be corrupted | |
| 69 | +proc get_mime_type {name} { | |
| 70 | + global CODE RESULT | |
| 71 | + fossil http << "GET /wiki?name=$name" | |
| 72 | + if {$CODE != 0} { | |
| 73 | + return "error: /wiki?name=$name $CODE $RESULT" | |
| 74 | + } | |
| 75 | + fossil whatis --type w $name | |
| 76 | + if {$CODE != 0} { | |
| 77 | + return "error: fossil whatis --type w $name $CODE $RESULT" | |
| 78 | + } | |
| 79 | + set CODE [regexp -line {^artifact:\s*([0-9a-f]+)$} $RESULT match info] | |
| 80 | + if {$CODE == 0} { | |
| 81 | + return "error: whatis returned no info for wiki page $name" | |
| 82 | + } | |
| 83 | + fossil artifact $info | |
| 84 | + if {$CODE != 0} { | |
| 85 | + return "error: fossil artifact $info $CODE $RESULT" | |
| 86 | + } | |
| 87 | + set CODE [regexp -line {^N (.*)$} $RESULT match mimetype] | |
| 88 | + if {$CODE == 0} { | |
| 89 | + return "text/x-fossil-wiki" | |
| 90 | + } | |
| 91 | + return $mimetype | |
| 92 | +} | |
| 93 | + | |
| 94 | + | |
| 95 | +############################################################################### | |
| 96 | +# Initially there should be no wiki entries | |
| 97 | +fossil wiki list | |
| 98 | +test wiki-0 {[normalize_result] eq {}} | |
| 99 | + | |
| 100 | +############################################################################### | |
| 101 | +# Adding an entry should add it to the wiki list | |
| 102 | +write_file f1 "first wiki note" | |
| 103 | +fossil wiki create tcltest f1 | |
| 104 | +test wiki-1 {$CODE == 0} | |
| 105 | +fossil wiki list | |
| 106 | +test wiki-2 {[normalize_result] eq {tcltest}} | |
| 107 | + | |
| 108 | +############################################################################### | |
| 109 | +# Trying to add the same entry should fail | |
| 110 | +fossil wiki create tcltest f1 -expectError | |
| 111 | +test wiki-3 {$CODE != 0} | |
| 112 | + | |
| 113 | +############################################################################### | |
| 114 | +# exporting the wiki page should give back similar text | |
| 115 | +fossil wiki export tcltest a1 | |
| 116 | +test wiki-4 {[similar_file f1 a1]} | |
| 117 | + | |
| 118 | +############################################################################### | |
| 119 | +# commiting a change to an existing page should replace the page on export | |
| 120 | +write_file f2 "second version of the page" | |
| 121 | +fossil wiki commit tcltest f2 | |
| 122 | +test wiki-5 {$CODE == 0} | |
| 123 | +fossil wiki export tcltest a2 | |
| 124 | +test wiki-6 {[similar_file f2 a2]} | |
| 125 | + | |
| 126 | +############################################################################### | |
| 127 | +# But we shouldn't be able to update non-existant pages | |
| 128 | +fossil wiki commit doesntexist f1 -expectError | |
| 129 | +test wiki-7 {$CODE != 0} | |
| 130 | + | |
| 131 | +############################################################################### | |
| 132 | +# There shouldn't be any tech notes at this point | |
| 133 | +fossil wiki list --technote | |
| 134 | +test wiki-8 {[normalize_result] eq {}} | |
| 135 | + | |
| 136 | +############################################################################### | |
| 137 | +# Creating a tech note with a specified timestamp should add a technote | |
| 138 | +write_file f3 "A technote" | |
| 139 | +f ossil wiki create technote f3 --technote {2016-01-01 12:34} | |
| 140 | +test wiki-9 {$CODE == 0} | |
| 141 | +fossil wiki list --technote | |
| 142 | +test wiki-10 {[normalize_result] eq {2016-01-01 12:34:00}} | |
| 143 | +fossil wiki list --technote --show-technote-ids | |
| 144 | +set technotelist [split $RESULT "\n"] | |
| 145 | +set veryfirsttechnoteid [lindex [split [lindex $technotelist 0]] 0] | |
| 146 | + | |
| 147 | +############################################################################### | |
| 148 | +# exporting that technote should give back similar text | |
| 149 | +fossil wiki export a3 --technote {2016-01-01 12:34:00} | |
| 150 | +test wiki-11 {[similar_file f3 a3]} | |
| 151 | + | |
| 152 | +############################################################################### | |
| 153 | +# Trying to add a technote with the same timestamp should succeed and create a | |
| 154 | +# second tech note | |
| 155 | +fossil wiki create 2ndnote f3 -technote {2016-01-01 12:34} | |
| 156 | +test wiki-13 {$CODE == 0} | |
| 157 | +fossil wiki list --technote | |
| 158 | +set technotelist [split $RESULT "\n"] | |
| 159 | +test wiki-13.1 {[llength $technotelist] == 2} | |
| 160 | + | |
| 161 | +############################################################################### | |
| 162 | +# commiting a change to an existing technote should replace the page on export | |
| 163 | +# (this should update th rt | |
| 164 | +# (this should update the tech note from wiki-13 as that the most recently | |
| 165 | +# updated one, that should also be the one exported by the export command) | |
| 166 | +write_file f4 "technote 2nd variant" | |
| 167 | +fossil wiki commit technote f4 --technote {2016-01-01 12:34} | |
| 168 | +test wiki-14 {$CODE == 0} | |
| 169 | +fossil wiki export a4 --technote {2016-01-01 12:34} | |
| 170 | +test wiki-15 {[similar_file f4 a4]} | |
| 171 | +# Also check that the tech note with the same timestamp, but modified less | |
| 172 | +# recently still has its original text | |
| 173 | +fossil wiki export a4.1 --technote $veryfirsttechnoteid | |
| 174 | +test wiki-15.1 {[similar_file f3 a4.1]} | |
| 175 | + | |
| 176 | +############################################################################### | |
| 177 | +# But we shouldn't be able to update non-existant pages | |
| 178 | +fossil wiki commit doesntexist f1 -expectError | |
| 179 | +test wiki-16 {$CODE != 0} | |
| 180 | + | |
| 181 | +############################################################################### | |
| 182 | +# Check specifying tags for a technote is OK | |
| 183 | +write_file f5 "technote with tags" | |
| 184 | +fossil wiki create {tagged technote} f5 --technote {2016-01-02 12:34} --technote-tags {A B} | |
| 185 | +test wiki-17 {$CODE == 0} | |
| 186 | +write_file f5.1 "editted and tagged technote" | |
| 187 | +fossil wiki commit {tagged technote} f5 --technote {2016-01-02 12:34} --t note {2016-01 -03 12:34} --technote-bgcolor blue | |
| 188 | +test wiki-28 {$CODE == 0} | |
| 189 | + | |
| 190 | +############################################################################### | |
| 191 | +# _file f7 "Different timestamps" | |
| 192 | +fossil wiki create technotenow f7 --technote {2016-01-04 12:34:56+00:00} | |
| 193 | +test wiki-29 {$CODE == 0} | |
| 194 | + | |
| 195 | +############################################################################### | |
| 196 | +# Check a technote appears on the timeline | |
| 197 | +write_file f8 "Contents of a 'unique' tech note" | |
| 198 | +fossil wiki create {Unique technote} f8 --technote {2016-01-05 01:02:03} | |
| 199 | +fossil timeline | |
| 200 | +test wiki-30 {[string match *Unique*technote* $RESULT]} | |
| 201 | + | |
| 202 | +############################################################################### | |
| 203 | +# Check for a collision between an attachment and a note, this was a | |
| 204 | +# bug that resulted from some code treating the attachment entry as if it | |
| 205 | +# were a technote when it isn't really. | |
| 206 | +# | |
| 207 | +# First, wait for the top of the next second so the attachment | |
| 208 | +# happens at a known time, then add an attachment to an existing note | |
| 209 | +# and a new note immediately after. | |
| 210 | + | |
| 211 | +set t0 [clock seconds] | |
| 212 | +while {$t0 == [clock seconds]} { | |
| 213 | + after 100 | |
| 214 | +} | |
| 215 | +set t1 [clock format [clock seconds] -gmt 1 -format "%Y-%m-%d %H:%M:%S"] | |
| 216 | +write_fil -%m-%d %H:%M:%S"] | |
| 217 | +write_file f9 "Timestamp: $t1" | |
| 218 | +fossil attachment add f9 --technote {2016-01-05 01:02:03} | |
| 219 | +test wiki-31 {$CODE == 0} | |
| 220 | +fossil wiki create {A CODE == 0} | |
| 221 | +# | |
| 222 | +# Now waste time until the next second so that the remaining tests | |
| 223 | +# don't have to worry about a potential collision | |
| 224 | +set t0 [clock seconds] | |
| 225 | +while {$t0 == [clock seconds]} { | |
| 226 | + after 100 | |
| 227 | +} | |
| 228 | + | |
| 229 | +############################################################################### | |
| 230 | +# Check a technote with no timestamp cannot be created, but that | |
| 231 | +# "now " is a valid stamp. | |
| 232 | +set t2 [clock format [clock seconds] -gmt 1 -format # Copyright (c) 2016 D. Richard Hiiotelist [llength $technotelified timest ki create technotenow f7 --technote {2016-01-04 12:34:56+00:00} | |
| 233 | +test wiki-29 {$CODE == 0} | |
| 234 | + | |
| 235 | +############################################################################### | |
| 236 | +# Check a technote appears on the timeline | |
| 237 | +write_file f8 "Contents of a 'unique' tech note" | |
| 238 | +fossil wiki create {Unique technote} f8 --technote {2016-01-05 01:02:03} | |
| 239 | +fossil timeline | |
| 240 | +test wiki-30 {[string match *Unique*technote* $RESULT]} | |
| 241 | + | |
| 242 | +############################################################################### | |
| 243 | +# Check for a collision between an attachment and a note, this was a | |
| 244 | +# bug that resulted from some code treating the attachment entry as if it | |
| 245 | +# were a technote when it isn't really. | |
| 246 | +# | |
| 247 | +# First, wait for the top of the next second so the attachment | |
| 248 | +# happens at a known time, then add an attachment to an existing note | |
| 249 | +# and a new note immediately after. | |
| 250 | + | |
| 251 | +set t0 [clock seconds] | |
| 252 | +while {$t0 == [clock seconds]} { | |
| 253 | + after 100 | |
| 254 | +} | |
| 255 | +set t1 [clock format [clock seconds] -gmt 1 -format "%Y-%m-%d %H:%M:%S"] | |
| 256 | +write_file f9 "Timestamp: $t1" | |
| 257 | +fossil attachment add f9 --technote {2016-01-05 01:02:03} | |
| 258 | +test wiki-31 {$CODE == 0} | |
| 259 | +fossil wiki create {Attachment collision} f9 --technote now | |
| 260 | +test wiki-32 {$CODE == 0} | |
| 261 | +# | |
| 262 | +# Now waste time until the next second so that the remaining tests | |
| 263 | +# don't have to worry about a potential collision | |
| 264 | +set t0 [clock seconds] | |
| 265 | +while {$t0 == [clock seconds]} { | |
| 266 | + after 100 | |
| 267 | +} | |
| 268 | + | |
| 269 | +############################################################################### | |
| 270 | +# Check a technote with no timestamp cannot be created, but that | |
| 271 | +# "now" is a valid stamp. | |
| 272 | +s et t2 [clock format [clock seconds] -gmt 1 -format "%Y-%m-%d %H:%M:%S"] | |
| 273 | +write_file f10 "Even unstampted notes are delivered.\nStamped $t2" | |
| 274 | +fossil wiki create "Unstamped Note" f10 --technote -expectError | |
| 275 | +test wiki-33 {$CODE != 0} | |
| 276 | +fossil wiki create "Unstamped Note" f10 --technote now | |
| 277 | +test wiki-34 {$CODE == 0} | |
| 278 | +fossil wiki list -t | |
| 279 | +test wiki-35 {[string match "*$t2*" $RESULT]} | |
| 280 | + | |
| 281 | +############################################################################### | |
| 282 | +# Check an attachment to it in the same second works. | |
| 283 | +write_file f11 "Time Stamp was $t2" | |
| 284 | +fossil attachment add f11 --technote $t2 | |
| 285 | +test wiki-36 {$CODE == 0} | |
| 286 | +fossil timeline | |
| 287 | +test wiki-36-1 {$CODE == 0} | |
| 288 | +fossil wiki list -t | |
| 289 | +test wiki-36-2 {$CODE == 0} | |
| 290 | + | |
| 291 | +########################################################################### #technotelist [split $RESULT "\n"] | |
| 292 | +for {set i 0} {$i < [llength $technotelist]} {incr i} { | |
| 293 | + set fullid [lindex $technotelist $i] | |
| 294 | + set id [string range $fullid 0 3] | |
| 295 | + dict incr idcounts $id | |
| 296 | + if {[dict get $idcounts $id] > $maxcount} { | |
| 297 | + set maxid $id | |
| 298 | + incr maxcount | |
| 299 | + } | |
| 300 | +} | |
| 301 | +# get i so that, as a julian date, it is in the 1800s, i.e., older than | |
| 302 | +# any other tech note, but after 1 AD | |
| 303 | +set i 2400000 | |
| 304 | +while {$maxcount < 2} { | |
| 305 | + # keep getting older | |
| 306 | + incr i -1 | |
| 307 | + write_file f13 "A tech note with timestamp of jday=$i" | |
| 308 | + fossil wiki create "timestamp of $i" f13 --technote "$i" | |
| 309 | + fossil wiki list --technote --show-technote-ids | |
| 310 | + set technotelist [split $RESULT "\n"] | |
| 311 | + set oldesttechnoteid [lindex [split [lindex $technotelist [llength $technotelist]-1]] 0] | |
| 312 | + set id [string range $oldesttechnoteid 0 3] | |
| 313 | + dict incr idcounts $id | |
| 314 | + if {[dict get $idcounts $id] > $maxcount} { | |
| 315 | + set maxid $id | |
| 316 | + incr maxcount | |
| 317 | + } | |
| 318 | +} | |
| 319 | +# Save the duplicate id for this and later tests | |
| 320 | +set duplicateid $maxid | |
| 321 | +fossil wiki export a13 --technote $duplicateid -expectError | |
| 322 | +test wiki-42 {$CODE != 0} | |
| 323 | + | |
| 324 | +############################################################################### | |
| 325 | +# Check we can update technote by its id | |
| 326 | +write_file f14 "Updated text for the really old tech note" | |
| 327 | +fossil wiki commit {Old tech note} f14 --technote $anoldtechnoteid | |
| 328 | +fossil wiki export a14 --technote $anoldtechnoteid | |
| 329 | +test wiki-43 {[similar_file f14 a14]} | |
| 330 | + | |
| 331 | +############################################################################### | |
| 332 | +# Check we can add attachments to a technote by its id | |
| 333 | +fossil attachment add fa --technote $anoldtechnoteid | |
| 334 | +test |
| --- a/test/wiki.test | |
| +++ b/test/wiki.test | |
| @@ -0,0 +1,334 @@ | |
| --- a/test/wiki.test | |
| +++ b/test/wiki.test | |
| @@ -0,0 +1,334 @@ | |
| 1 | # |
| 2 | # Copyright (c) 2016 D. Richard Hipp |
| 3 | # |
| 4 | # This program is free software; you can redistribute it and/or |
| 5 | # modify it under the terms of the Simplified BSD License (also |
| 6 | # known as the "2-Clause License" or "FreeBSD License".) |
| 7 | # |
| 8 | # This program is distributed in the hope that it will be useful, |
| 9 | # but without any warranty; without even the implied warranty of |
| 10 | # merchantability or fitness for a particular purpose. |
| 11 | # |
| 12 | # Author contact information: |
| 13 | # [email protected] |
| 14 | # http://www.hwaci.com/drh/ |
| 15 | # |
| 16 | ############################################################################ |
| 17 | # |
| 18 | # Test wiki and attachment comman (c) 2016 D. Richard Hipp |
| 19 | # |
| 20 | # This program is free software; you can redistribute it and/or |
| 21 | # modify it under the terms of the Simplified BSD License (also |
| 22 | # known as the "2buted in the hoperegsub -all { +\n} $x \n x |
| 23 | (c) 2016 D. Richard Hipp |
| 24 | # |
| 25 | # # |
| 26 | # Copyright (c) 201 that it will be usefuy \n y |
| 27 | (c) 2016 D. Richard # |
| 28 | # Coi.com |
| 29 | # http://www.hwaci.com/drh/ |
| 30 | # |
| 31 | ############################################################################ |
| 32 | # |
| 33 | # Test wiki and attachment command Support |
| 34 | # |
| 35 | |
| 36 | test_setup |
| 37 | |
| 38 | # Disable backoffice for this test, otherwise its process lingers for some |
| 39 | # time after the test has completed. |
| 40 | # Perhaps, this should be done in test_setup and enabled explicitly only |
| 41 | # when needed. |
| 42 | fossil set backoffice-disable 1 |
| 43 | |
| 44 | # Return true if two files are similar (i.e not only compress trailing spaces |
| 45 | # from a lset CODE [regex regsub -all { +\n} $x \n x |
| 46 | No info link found [read_file $b] |
| 47 | regsub -all { +http << "GET /artifact/$info D. Richard Hip# |
| 48 | # Copyright {a b} { |
| 49 | set x "" |
| 50 | if {[file exists $a]} { |
| 51 | set x [read_file $a] |
| 52 | regsub -all { +\n} $x \n x |
| 53 | regsub -all {\n$} $x {} x |
| 54 | } |
| 55 | set y "" |
| 56 | if {[file exists $b]} { |
| 57 | set y [read_file $b] |
| 58 | regsub -all { +\n} $y \n y |
| 59 | regsub -all {\n$} $y {} y |
| 60 | } |
| 61 | return [expr {$x==$y}] |
| 62 | } |
| 63 | |
| 64 | # Return the mime type in the manifest for a given wiki page |
| 65 | # Defaults to "error: some text" if the manifest can't be located and |
| 66 | # "text/x-fossil-wiki" (the default mimetype for rendering) |
| 67 | # if the N card is omitted in the manifest. |
| 68 | # Note: Makes fossil calls, so $CODE and $RESULT will be corrupted |
| 69 | proc get_mime_type {name} { |
| 70 | global CODE RESULT |
| 71 | fossil http << "GET /wiki?name=$name" |
| 72 | if {$CODE != 0} { |
| 73 | return "error: /wiki?name=$name $CODE $RESULT" |
| 74 | } |
| 75 | fossil whatis --type w $name |
| 76 | if {$CODE != 0} { |
| 77 | return "error: fossil whatis --type w $name $CODE $RESULT" |
| 78 | } |
| 79 | set CODE [regexp -line {^artifact:\s*([0-9a-f]+)$} $RESULT match info] |
| 80 | if {$CODE == 0} { |
| 81 | return "error: whatis returned no info for wiki page $name" |
| 82 | } |
| 83 | fossil artifact $info |
| 84 | if {$CODE != 0} { |
| 85 | return "error: fossil artifact $info $CODE $RESULT" |
| 86 | } |
| 87 | set CODE [regexp -line {^N (.*)$} $RESULT match mimetype] |
| 88 | if {$CODE == 0} { |
| 89 | return "text/x-fossil-wiki" |
| 90 | } |
| 91 | return $mimetype |
| 92 | } |
| 93 | |
| 94 | |
| 95 | ############################################################################### |
| 96 | # Initially there should be no wiki entries |
| 97 | fossil wiki list |
| 98 | test wiki-0 {[normalize_result] eq {}} |
| 99 | |
| 100 | ############################################################################### |
| 101 | # Adding an entry should add it to the wiki list |
| 102 | write_file f1 "first wiki note" |
| 103 | fossil wiki create tcltest f1 |
| 104 | test wiki-1 {$CODE == 0} |
| 105 | fossil wiki list |
| 106 | test wiki-2 {[normalize_result] eq {tcltest}} |
| 107 | |
| 108 | ############################################################################### |
| 109 | # Trying to add the same entry should fail |
| 110 | fossil wiki create tcltest f1 -expectError |
| 111 | test wiki-3 {$CODE != 0} |
| 112 | |
| 113 | ############################################################################### |
| 114 | # exporting the wiki page should give back similar text |
| 115 | fossil wiki export tcltest a1 |
| 116 | test wiki-4 {[similar_file f1 a1]} |
| 117 | |
| 118 | ############################################################################### |
| 119 | # commiting a change to an existing page should replace the page on export |
| 120 | write_file f2 "second version of the page" |
| 121 | fossil wiki commit tcltest f2 |
| 122 | test wiki-5 {$CODE == 0} |
| 123 | fossil wiki export tcltest a2 |
| 124 | test wiki-6 {[similar_file f2 a2]} |
| 125 | |
| 126 | ############################################################################### |
| 127 | # But we shouldn't be able to update non-existant pages |
| 128 | fossil wiki commit doesntexist f1 -expectError |
| 129 | test wiki-7 {$CODE != 0} |
| 130 | |
| 131 | ############################################################################### |
| 132 | # There shouldn't be any tech notes at this point |
| 133 | fossil wiki list --technote |
| 134 | test wiki-8 {[normalize_result] eq {}} |
| 135 | |
| 136 | ############################################################################### |
| 137 | # Creating a tech note with a specified timestamp should add a technote |
| 138 | write_file f3 "A technote" |
| 139 | f ossil wiki create technote f3 --technote {2016-01-01 12:34} |
| 140 | test wiki-9 {$CODE == 0} |
| 141 | fossil wiki list --technote |
| 142 | test wiki-10 {[normalize_result] eq {2016-01-01 12:34:00}} |
| 143 | fossil wiki list --technote --show-technote-ids |
| 144 | set technotelist [split $RESULT "\n"] |
| 145 | set veryfirsttechnoteid [lindex [split [lindex $technotelist 0]] 0] |
| 146 | |
| 147 | ############################################################################### |
| 148 | # exporting that technote should give back similar text |
| 149 | fossil wiki export a3 --technote {2016-01-01 12:34:00} |
| 150 | test wiki-11 {[similar_file f3 a3]} |
| 151 | |
| 152 | ############################################################################### |
| 153 | # Trying to add a technote with the same timestamp should succeed and create a |
| 154 | # second tech note |
| 155 | fossil wiki create 2ndnote f3 -technote {2016-01-01 12:34} |
| 156 | test wiki-13 {$CODE == 0} |
| 157 | fossil wiki list --technote |
| 158 | set technotelist [split $RESULT "\n"] |
| 159 | test wiki-13.1 {[llength $technotelist] == 2} |
| 160 | |
| 161 | ############################################################################### |
| 162 | # commiting a change to an existing technote should replace the page on export |
| 163 | # (this should update th rt |
| 164 | # (this should update the tech note from wiki-13 as that the most recently |
| 165 | # updated one, that should also be the one exported by the export command) |
| 166 | write_file f4 "technote 2nd variant" |
| 167 | fossil wiki commit technote f4 --technote {2016-01-01 12:34} |
| 168 | test wiki-14 {$CODE == 0} |
| 169 | fossil wiki export a4 --technote {2016-01-01 12:34} |
| 170 | test wiki-15 {[similar_file f4 a4]} |
| 171 | # Also check that the tech note with the same timestamp, but modified less |
| 172 | # recently still has its original text |
| 173 | fossil wiki export a4.1 --technote $veryfirsttechnoteid |
| 174 | test wiki-15.1 {[similar_file f3 a4.1]} |
| 175 | |
| 176 | ############################################################################### |
| 177 | # But we shouldn't be able to update non-existant pages |
| 178 | fossil wiki commit doesntexist f1 -expectError |
| 179 | test wiki-16 {$CODE != 0} |
| 180 | |
| 181 | ############################################################################### |
| 182 | # Check specifying tags for a technote is OK |
| 183 | write_file f5 "technote with tags" |
| 184 | fossil wiki create {tagged technote} f5 --technote {2016-01-02 12:34} --technote-tags {A B} |
| 185 | test wiki-17 {$CODE == 0} |
| 186 | write_file f5.1 "editted and tagged technote" |
| 187 | fossil wiki commit {tagged technote} f5 --technote {2016-01-02 12:34} --t note {2016-01 -03 12:34} --technote-bgcolor blue |
| 188 | test wiki-28 {$CODE == 0} |
| 189 | |
| 190 | ############################################################################### |
| 191 | # _file f7 "Different timestamps" |
| 192 | fossil wiki create technotenow f7 --technote {2016-01-04 12:34:56+00:00} |
| 193 | test wiki-29 {$CODE == 0} |
| 194 | |
| 195 | ############################################################################### |
| 196 | # Check a technote appears on the timeline |
| 197 | write_file f8 "Contents of a 'unique' tech note" |
| 198 | fossil wiki create {Unique technote} f8 --technote {2016-01-05 01:02:03} |
| 199 | fossil timeline |
| 200 | test wiki-30 {[string match *Unique*technote* $RESULT]} |
| 201 | |
| 202 | ############################################################################### |
| 203 | # Check for a collision between an attachment and a note, this was a |
| 204 | # bug that resulted from some code treating the attachment entry as if it |
| 205 | # were a technote when it isn't really. |
| 206 | # |
| 207 | # First, wait for the top of the next second so the attachment |
| 208 | # happens at a known time, then add an attachment to an existing note |
| 209 | # and a new note immediately after. |
| 210 | |
| 211 | set t0 [clock seconds] |
| 212 | while {$t0 == [clock seconds]} { |
| 213 | after 100 |
| 214 | } |
| 215 | set t1 [clock format [clock seconds] -gmt 1 -format "%Y-%m-%d %H:%M:%S"] |
| 216 | write_fil -%m-%d %H:%M:%S"] |
| 217 | write_file f9 "Timestamp: $t1" |
| 218 | fossil attachment add f9 --technote {2016-01-05 01:02:03} |
| 219 | test wiki-31 {$CODE == 0} |
| 220 | fossil wiki create {A CODE == 0} |
| 221 | # |
| 222 | # Now waste time until the next second so that the remaining tests |
| 223 | # don't have to worry about a potential collision |
| 224 | set t0 [clock seconds] |
| 225 | while {$t0 == [clock seconds]} { |
| 226 | after 100 |
| 227 | } |
| 228 | |
| 229 | ############################################################################### |
| 230 | # Check a technote with no timestamp cannot be created, but that |
| 231 | # "now " is a valid stamp. |
| 232 | set t2 [clock format [clock seconds] -gmt 1 -format # Copyright (c) 2016 D. Richard Hiiotelist [llength $technotelified timest ki create technotenow f7 --technote {2016-01-04 12:34:56+00:00} |
| 233 | test wiki-29 {$CODE == 0} |
| 234 | |
| 235 | ############################################################################### |
| 236 | # Check a technote appears on the timeline |
| 237 | write_file f8 "Contents of a 'unique' tech note" |
| 238 | fossil wiki create {Unique technote} f8 --technote {2016-01-05 01:02:03} |
| 239 | fossil timeline |
| 240 | test wiki-30 {[string match *Unique*technote* $RESULT]} |
| 241 | |
| 242 | ############################################################################### |
| 243 | # Check for a collision between an attachment and a note, this was a |
| 244 | # bug that resulted from some code treating the attachment entry as if it |
| 245 | # were a technote when it isn't really. |
| 246 | # |
| 247 | # First, wait for the top of the next second so the attachment |
| 248 | # happens at a known time, then add an attachment to an existing note |
| 249 | # and a new note immediately after. |
| 250 | |
| 251 | set t0 [clock seconds] |
| 252 | while {$t0 == [clock seconds]} { |
| 253 | after 100 |
| 254 | } |
| 255 | set t1 [clock format [clock seconds] -gmt 1 -format "%Y-%m-%d %H:%M:%S"] |
| 256 | write_file f9 "Timestamp: $t1" |
| 257 | fossil attachment add f9 --technote {2016-01-05 01:02:03} |
| 258 | test wiki-31 {$CODE == 0} |
| 259 | fossil wiki create {Attachment collision} f9 --technote now |
| 260 | test wiki-32 {$CODE == 0} |
| 261 | # |
| 262 | # Now waste time until the next second so that the remaining tests |
| 263 | # don't have to worry about a potential collision |
| 264 | set t0 [clock seconds] |
| 265 | while {$t0 == [clock seconds]} { |
| 266 | after 100 |
| 267 | } |
| 268 | |
| 269 | ############################################################################### |
| 270 | # Check a technote with no timestamp cannot be created, but that |
| 271 | # "now" is a valid stamp. |
| 272 | s et t2 [clock format [clock seconds] -gmt 1 -format "%Y-%m-%d %H:%M:%S"] |
| 273 | write_file f10 "Even unstampted notes are delivered.\nStamped $t2" |
| 274 | fossil wiki create "Unstamped Note" f10 --technote -expectError |
| 275 | test wiki-33 {$CODE != 0} |
| 276 | fossil wiki create "Unstamped Note" f10 --technote now |
| 277 | test wiki-34 {$CODE == 0} |
| 278 | fossil wiki list -t |
| 279 | test wiki-35 {[string match "*$t2*" $RESULT]} |
| 280 | |
| 281 | ############################################################################### |
| 282 | # Check an attachment to it in the same second works. |
| 283 | write_file f11 "Time Stamp was $t2" |
| 284 | fossil attachment add f11 --technote $t2 |
| 285 | test wiki-36 {$CODE == 0} |
| 286 | fossil timeline |
| 287 | test wiki-36-1 {$CODE == 0} |
| 288 | fossil wiki list -t |
| 289 | test wiki-36-2 {$CODE == 0} |
| 290 | |
| 291 | ########################################################################### #technotelist [split $RESULT "\n"] |
| 292 | for {set i 0} {$i < [llength $technotelist]} {incr i} { |
| 293 | set fullid [lindex $technotelist $i] |
| 294 | set id [string range $fullid 0 3] |
| 295 | dict incr idcounts $id |
| 296 | if {[dict get $idcounts $id] > $maxcount} { |
| 297 | set maxid $id |
| 298 | incr maxcount |
| 299 | } |
| 300 | } |
| 301 | # get i so that, as a julian date, it is in the 1800s, i.e., older than |
| 302 | # any other tech note, but after 1 AD |
| 303 | set i 2400000 |
| 304 | while {$maxcount < 2} { |
| 305 | # keep getting older |
| 306 | incr i -1 |
| 307 | write_file f13 "A tech note with timestamp of jday=$i" |
| 308 | fossil wiki create "timestamp of $i" f13 --technote "$i" |
| 309 | fossil wiki list --technote --show-technote-ids |
| 310 | set technotelist [split $RESULT "\n"] |
| 311 | set oldesttechnoteid [lindex [split [lindex $technotelist [llength $technotelist]-1]] 0] |
| 312 | set id [string range $oldesttechnoteid 0 3] |
| 313 | dict incr idcounts $id |
| 314 | if {[dict get $idcounts $id] > $maxcount} { |
| 315 | set maxid $id |
| 316 | incr maxcount |
| 317 | } |
| 318 | } |
| 319 | # Save the duplicate id for this and later tests |
| 320 | set duplicateid $maxid |
| 321 | fossil wiki export a13 --technote $duplicateid -expectError |
| 322 | test wiki-42 {$CODE != 0} |
| 323 | |
| 324 | ############################################################################### |
| 325 | # Check we can update technote by its id |
| 326 | write_file f14 "Updated text for the really old tech note" |
| 327 | fossil wiki commit {Old tech note} f14 --technote $anoldtechnoteid |
| 328 | fossil wiki export a14 --technote $anoldtechnoteid |
| 329 | test wiki-43 {[similar_file f14 a14]} |
| 330 | |
| 331 | ############################################################################### |
| 332 | # Check we can add attachments to a technote by its id |
| 333 | fossil attachment add fa --technote $anoldtechnoteid |
| 334 | test |
+20
| --- a/www/encryptedrepos.wiki | ||
| +++ b/www/encryptedrepos.wiki | ||
| @@ -0,0 +1,20 @@ | ||
| 1 | +<title>How To Use Encr<h2>Int Settingcrypted repository.<title>How To Use Encr<h2>Introduction</h2><blockquote> | |
| 2 | +Fossil can be compiled so that it works with encrypted repositories using | |
| 3 | +the [https://www.sqlite.org/see/doc/trunk/www/readme.wiki|SQLite Encryption Extension]. | |
| 4 | +This technical note explains the process. | |
| 5 | +</blockquote> | |
| 6 | +<h2>Building An Enc<blockquote> | |
| 7 | +The SQLite Encryption ExtensionSQLite Encryption Extension (SEE) is proprietary software and requires | |
| 8 | +[http://www.hwaci.com/cgi-bin/see-step1 | |
| 9 | +Assuming you have an SEE license, the first step of compiling Fossil to | |
| 10 | +use SEE is to create an SEE-enabled version of the SQLite database source code. | |
| 11 | +This alternative SQLite database source file should be called "sqlite3-see.c" | |
| 12 | +and should be placed in the src/ subfolder of the Fossil sourrces, right beside | |
| 13 | +the public-domain "sqlite3.c" source fitps://www.sqlite.org/see/doc/trunk/www/readme.wiki|SQLite Encryption Extension]. | |
| 14 | +This technical note explains the process. | |
| 15 | +</blockquote> | |
| 16 | +<h2>Building An Enc<blockquote> | |
| 17 | +The SQLite Encryption ExtensionSQLite Encryption Extension (SEE) is proprietary software and requires | |
| 18 | +[http://www.hwaci.com/cgi-bin/see-step1 | |
| 19 | +Assuming you have an SEE license, the first step of compiling Fossil to | |
| 20 | +use SEE is to create an SEE-enabled version of the SQLite database source couse of requires retyping and "fossil ui |
| --- a/www/encryptedrepos.wiki | |
| +++ b/www/encryptedrepos.wiki | |
| @@ -0,0 +1,20 @@ | |
| --- a/www/encryptedrepos.wiki | |
| +++ b/www/encryptedrepos.wiki | |
| @@ -0,0 +1,20 @@ | |
| 1 | <title>How To Use Encr<h2>Int Settingcrypted repository.<title>How To Use Encr<h2>Introduction</h2><blockquote> |
| 2 | Fossil can be compiled so that it works with encrypted repositories using |
| 3 | the [https://www.sqlite.org/see/doc/trunk/www/readme.wiki|SQLite Encryption Extension]. |
| 4 | This technical note explains the process. |
| 5 | </blockquote> |
| 6 | <h2>Building An Enc<blockquote> |
| 7 | The SQLite Encryption ExtensionSQLite Encryption Extension (SEE) is proprietary software and requires |
| 8 | [http://www.hwaci.com/cgi-bin/see-step1 |
| 9 | Assuming you have an SEE license, the first step of compiling Fossil to |
| 10 | use SEE is to create an SEE-enabled version of the SQLite database source code. |
| 11 | This alternative SQLite database source file should be called "sqlite3-see.c" |
| 12 | and should be placed in the src/ subfolder of the Fossil sourrces, right beside |
| 13 | the public-domain "sqlite3.c" source fitps://www.sqlite.org/see/doc/trunk/www/readme.wiki|SQLite Encryption Extension]. |
| 14 | This technical note explains the process. |
| 15 | </blockquote> |
| 16 | <h2>Building An Enc<blockquote> |
| 17 | The SQLite Encryption ExtensionSQLite Encryption Extension (SEE) is proprietary software and requires |
| 18 | [http://www.hwaci.com/cgi-bin/see-step1 |
| 19 | Assuming you have an SEE license, the first step of compiling Fossil to |
| 20 | use SEE is to create an SEE-enabled version of the SQLite database source couse of requires retyping and "fossil ui |