Fossil SCM

Merge version-2.13

george 2020-11-17 17:55 wiki-history merge
Commit 98b447109e9ef3e0d4b5c494d3fd4f9628f52e0946381b474d96d74f5e9f7817
+20 -5
--- src/alerts.c
+++ src/alerts.c
@@ -1561,24 +1561,39 @@
15611561
** Either shutdown or completely delete a subscription entry given
15621562
** by the hex value zName. Then paint a webpage that explains that
15631563
** the entry has been removed.
15641564
*/
15651565
static void alert_unsubscribe(int sid){
1566
- char *zEmail;
1567
- zEmail = db_text(0, "SELECT semail FROM subscriber"
1568
- " WHERE subscriberId=%d", sid);
1566
+ const char *zEmail = 0;
1567
+ const char *zLogin = 0;
1568
+ int uid = 0;
1569
+ Stmt q;
1570
+ db_prepare(&q, "SELECT semail, suname FROM subscriber"
1571
+ " WHERE subscriberId=%d", sid);
1572
+ if( db_step(&q)==SQLITE_ROW ){
1573
+ zEmail = db_column_text(&q, 0);
1574
+ zLogin = db_column_text(&q, 1);
1575
+ uid = db_int(0, "SELECT uid FROM user WHERE login=%Q", zLogin);
1576
+ }
15691577
if( zEmail==0 ){
15701578
style_header("Unsubscribe Fail");
15711579
@ <p>Unable to locate a subscriber with the requested key</p>
15721580
}else{
1581
+
15731582
db_multi_exec(
15741583
"DELETE FROM subscriber WHERE subscriberId=%d", sid
15751584
);
15761585
style_header("Unsubscribed");
1577
- @ <p>The "%h(zEmail)" email address has been delisted.
1578
- @ All traces of that email address have been removed</p>
1586
+ @ <p>The "%h(zEmail)" email address has been unsubscribed and the
1587
+ @ corresponding row in the subscriber table has been deleted.<p>
1588
+ if( uid && g.perm.Admin ){
1589
+ @ <p>You may also want to
1590
+ @ <a href="%R/setup_uedit?id=%d(uid)">edit or delete
1591
+ @ the corresponding user "%h(zLogin)"</a></p>
1592
+ }
15791593
}
1594
+ db_finalize(&q);
15801595
style_footer();
15811596
return;
15821597
}
15831598
15841599
/*
15851600
--- src/alerts.c
+++ src/alerts.c
@@ -1561,24 +1561,39 @@
1561 ** Either shutdown or completely delete a subscription entry given
1562 ** by the hex value zName. Then paint a webpage that explains that
1563 ** the entry has been removed.
1564 */
1565 static void alert_unsubscribe(int sid){
1566 char *zEmail;
1567 zEmail = db_text(0, "SELECT semail FROM subscriber"
1568 " WHERE subscriberId=%d", sid);
 
 
 
 
 
 
 
 
1569 if( zEmail==0 ){
1570 style_header("Unsubscribe Fail");
1571 @ <p>Unable to locate a subscriber with the requested key</p>
1572 }else{
 
1573 db_multi_exec(
1574 "DELETE FROM subscriber WHERE subscriberId=%d", sid
1575 );
1576 style_header("Unsubscribed");
1577 @ <p>The "%h(zEmail)" email address has been delisted.
1578 @ All traces of that email address have been removed</p>
 
 
 
 
 
1579 }
 
1580 style_footer();
1581 return;
1582 }
1583
1584 /*
1585
--- src/alerts.c
+++ src/alerts.c
@@ -1561,24 +1561,39 @@
1561 ** Either shutdown or completely delete a subscription entry given
1562 ** by the hex value zName. Then paint a webpage that explains that
1563 ** the entry has been removed.
1564 */
1565 static void alert_unsubscribe(int sid){
1566 const char *zEmail = 0;
1567 const char *zLogin = 0;
1568 int uid = 0;
1569 Stmt q;
1570 db_prepare(&q, "SELECT semail, suname FROM subscriber"
1571 " WHERE subscriberId=%d", sid);
1572 if( db_step(&q)==SQLITE_ROW ){
1573 zEmail = db_column_text(&q, 0);
1574 zLogin = db_column_text(&q, 1);
1575 uid = db_int(0, "SELECT uid FROM user WHERE login=%Q", zLogin);
1576 }
1577 if( zEmail==0 ){
1578 style_header("Unsubscribe Fail");
1579 @ <p>Unable to locate a subscriber with the requested key</p>
1580 }else{
1581
1582 db_multi_exec(
1583 "DELETE FROM subscriber WHERE subscriberId=%d", sid
1584 );
1585 style_header("Unsubscribed");
1586 @ <p>The "%h(zEmail)" email address has been unsubscribed and the
1587 @ corresponding row in the subscriber table has been deleted.<p>
1588 if( uid && g.perm.Admin ){
1589 @ <p>You may also want to
1590 @ <a href="%R/setup_uedit?id=%d(uid)">edit or delete
1591 @ the corresponding user "%h(zLogin)"</a></p>
1592 }
1593 }
1594 db_finalize(&q);
1595 style_footer();
1596 return;
1597 }
1598
1599 /*
1600
+12 -6
--- src/allrepo.c
+++ src/allrepo.c
@@ -137,13 +137,13 @@
137137
** when one of the following commands are run against the repository:
138138
** clone, info, pull, push, or sync. Even previously ignored repositories
139139
** are added back to the list of repositories by these commands.
140140
**
141141
** Options:
142
-** --showfile Show the repository or checkout being operated upon.
143
-** --dontstop Continue with other repositories even after an error.
144
-** --dry-run If given, display instead of run actions.
142
+** --dry-run If given, display instead of run actions.
143
+** --showfile Show the repository or checkout being operated upon.
144
+** --stop-on-error Halt immediately if any subprocess fails.
145145
*/
146146
void all_cmd(void){
147147
int n;
148148
Stmt q;
149149
const char *zCmd;
@@ -151,14 +151,16 @@
151151
Blob extra;
152152
int useCheckouts = 0;
153153
int quiet = 0;
154154
int dryRunFlag = 0;
155155
int showFile = find_option("showfile",0,0)!=0;
156
- int stopOnError = find_option("dontstop",0,0)==0;
156
+ int stopOnError;
157157
int nToDel = 0;
158158
int showLabel = 0;
159159
160
+ (void)find_option("dontstop",0,0); /* Legacy. Now the default */
161
+ stopOnError = find_option("stop-on-error",0,0)!=0;
160162
dryRunFlag = find_option("dry-run","n",0)!=0;
161163
if( !dryRunFlag ){
162164
dryRunFlag = find_option("test",0,0)!=0; /* deprecated */
163165
}
164166
@@ -416,12 +418,16 @@
416418
fossil_print("%s\n", zSyscmd);
417419
fflush(stdout);
418420
}
419421
rc = dryRunFlag ? 0 : fossil_system(zSyscmd);
420422
free(zSyscmd);
421
- if( stopOnError && rc ){
422
- break;
423
+ if( rc ){
424
+ if( stopOnError ) break;
425
+ /* If there is an error, pause briefly, but do not stop. The brief
426
+ ** pause is so that if the prior command failed with Ctrl-C then there
427
+ ** will be time to stop the whole thing with a second Ctrl-C. */
428
+ sqlite3_sleep(330);
423429
}
424430
}
425431
db_finalize(&q);
426432
427433
blob_reset(&extra);
428434
--- src/allrepo.c
+++ src/allrepo.c
@@ -137,13 +137,13 @@
137 ** when one of the following commands are run against the repository:
138 ** clone, info, pull, push, or sync. Even previously ignored repositories
139 ** are added back to the list of repositories by these commands.
140 **
141 ** Options:
142 ** --showfile Show the repository or checkout being operated upon.
143 ** --dontstop Continue with other repositories even after an error.
144 ** --dry-run If given, display instead of run actions.
145 */
146 void all_cmd(void){
147 int n;
148 Stmt q;
149 const char *zCmd;
@@ -151,14 +151,16 @@
151 Blob extra;
152 int useCheckouts = 0;
153 int quiet = 0;
154 int dryRunFlag = 0;
155 int showFile = find_option("showfile",0,0)!=0;
156 int stopOnError = find_option("dontstop",0,0)==0;
157 int nToDel = 0;
158 int showLabel = 0;
159
 
 
160 dryRunFlag = find_option("dry-run","n",0)!=0;
161 if( !dryRunFlag ){
162 dryRunFlag = find_option("test",0,0)!=0; /* deprecated */
163 }
164
@@ -416,12 +418,16 @@
416 fossil_print("%s\n", zSyscmd);
417 fflush(stdout);
418 }
419 rc = dryRunFlag ? 0 : fossil_system(zSyscmd);
420 free(zSyscmd);
421 if( stopOnError && rc ){
422 break;
 
 
 
 
423 }
424 }
425 db_finalize(&q);
426
427 blob_reset(&extra);
428
--- src/allrepo.c
+++ src/allrepo.c
@@ -137,13 +137,13 @@
137 ** when one of the following commands are run against the repository:
138 ** clone, info, pull, push, or sync. Even previously ignored repositories
139 ** are added back to the list of repositories by these commands.
140 **
141 ** Options:
142 ** --dry-run If given, display instead of run actions.
143 ** --showfile Show the repository or checkout being operated upon.
144 ** --stop-on-error Halt immediately if any subprocess fails.
145 */
146 void all_cmd(void){
147 int n;
148 Stmt q;
149 const char *zCmd;
@@ -151,14 +151,16 @@
151 Blob extra;
152 int useCheckouts = 0;
153 int quiet = 0;
154 int dryRunFlag = 0;
155 int showFile = find_option("showfile",0,0)!=0;
156 int stopOnError;
157 int nToDel = 0;
158 int showLabel = 0;
159
160 (void)find_option("dontstop",0,0); /* Legacy. Now the default */
161 stopOnError = find_option("stop-on-error",0,0)!=0;
162 dryRunFlag = find_option("dry-run","n",0)!=0;
163 if( !dryRunFlag ){
164 dryRunFlag = find_option("test",0,0)!=0; /* deprecated */
165 }
166
@@ -416,12 +418,16 @@
418 fossil_print("%s\n", zSyscmd);
419 fflush(stdout);
420 }
421 rc = dryRunFlag ? 0 : fossil_system(zSyscmd);
422 free(zSyscmd);
423 if( rc ){
424 if( stopOnError ) break;
425 /* If there is an error, pause briefly, but do not stop. The brief
426 ** pause is so that if the prior command failed with Ctrl-C then there
427 ** will be time to stop the whole thing with a second Ctrl-C. */
428 sqlite3_sleep(330);
429 }
430 }
431 db_finalize(&q);
432
433 blob_reset(&extra);
434
+5 -5
--- src/diff.c
+++ src/diff.c
@@ -2483,13 +2483,13 @@
24832483
ann.aVers[i].zBgColor = mprintf("#%06x", clr);
24842484
}
24852485
24862486
@ <div id="annotation_log" style='display:%s(showLog?"block":"none");'>
24872487
if( zOrigin ){
2488
- zLink = href("%R/finfo?name=%t&ci=%!S&orig=%!S",zFilename,zCI,zOrigin);
2488
+ zLink = href("%R/finfo?name=%t&from=%!S&to=%!S",zFilename,zCI,zOrigin);
24892489
}else{
2490
- zLink = href("%R/finfo?name=%t&ci=%!S",zFilename,zCI);
2490
+ zLink = href("%R/finfo?name=%t&from=%!S",zFilename,zCI);
24912491
}
24922492
@ <h2>Versions of %z(zLink)%h(zFilename)</a> analyzed:</h2>
24932493
@ <ol>
24942494
for(p=ann.aVers, i=0; i<ann.nVers; i++, p++){
24952495
@ <li><span style='background-color:%s(p->zBgColor);'>%s(p->zDate)
@@ -2502,21 +2502,21 @@
25022502
@ </div>
25032503
25042504
if( !ann.bMoreToDo ){
25052505
assert( ann.origId==0 ); /* bMoreToDo always set for a point-to-point */
25062506
@ <h2>Origin for each line in
2507
- @ %z(href("%R/finfo?name=%h&ci=%!S", zFilename, zCI))%h(zFilename)</a>
2507
+ @ %z(href("%R/finfo?name=%h&from=%!S", zFilename, zCI))%h(zFilename)</a>
25082508
@ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>:</h2>
25092509
}else if( ann.origId>0 ){
25102510
@ <h2>Lines of
2511
- @ %z(href("%R/finfo?name=%h&ci=%!S", zFilename, zCI))%h(zFilename)</a>
2511
+ @ %z(href("%R/finfo?name=%h&from=%!S", zFilename, zCI))%h(zFilename)</a>
25122512
@ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>
25132513
@ that are changed by the sequence of edits moving toward
25142514
@ check-in %z(href("%R/info/%!S",zOrigin))%S(zOrigin)</a>:</h2>
25152515
}else{
25162516
@ <h2>Lines added by the %d(ann.nVers) most recent ancestors of
2517
- @ %z(href("%R/finfo?name=%h&ci=%!S", zFilename, zCI))%h(zFilename)</a>
2517
+ @ %z(href("%R/finfo?name=%h&from=%!S", zFilename, zCI))%h(zFilename)</a>
25182518
@ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>:</h2>
25192519
}
25202520
@ <pre>
25212521
szHash = 10;
25222522
for(i=0; i<ann.nOrig; i++){
25232523
--- src/diff.c
+++ src/diff.c
@@ -2483,13 +2483,13 @@
2483 ann.aVers[i].zBgColor = mprintf("#%06x", clr);
2484 }
2485
2486 @ <div id="annotation_log" style='display:%s(showLog?"block":"none");'>
2487 if( zOrigin ){
2488 zLink = href("%R/finfo?name=%t&ci=%!S&orig=%!S",zFilename,zCI,zOrigin);
2489 }else{
2490 zLink = href("%R/finfo?name=%t&ci=%!S",zFilename,zCI);
2491 }
2492 @ <h2>Versions of %z(zLink)%h(zFilename)</a> analyzed:</h2>
2493 @ <ol>
2494 for(p=ann.aVers, i=0; i<ann.nVers; i++, p++){
2495 @ <li><span style='background-color:%s(p->zBgColor);'>%s(p->zDate)
@@ -2502,21 +2502,21 @@
2502 @ </div>
2503
2504 if( !ann.bMoreToDo ){
2505 assert( ann.origId==0 ); /* bMoreToDo always set for a point-to-point */
2506 @ <h2>Origin for each line in
2507 @ %z(href("%R/finfo?name=%h&ci=%!S", zFilename, zCI))%h(zFilename)</a>
2508 @ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>:</h2>
2509 }else if( ann.origId>0 ){
2510 @ <h2>Lines of
2511 @ %z(href("%R/finfo?name=%h&ci=%!S", zFilename, zCI))%h(zFilename)</a>
2512 @ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>
2513 @ that are changed by the sequence of edits moving toward
2514 @ check-in %z(href("%R/info/%!S",zOrigin))%S(zOrigin)</a>:</h2>
2515 }else{
2516 @ <h2>Lines added by the %d(ann.nVers) most recent ancestors of
2517 @ %z(href("%R/finfo?name=%h&ci=%!S", zFilename, zCI))%h(zFilename)</a>
2518 @ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>:</h2>
2519 }
2520 @ <pre>
2521 szHash = 10;
2522 for(i=0; i<ann.nOrig; i++){
2523
--- src/diff.c
+++ src/diff.c
@@ -2483,13 +2483,13 @@
2483 ann.aVers[i].zBgColor = mprintf("#%06x", clr);
2484 }
2485
2486 @ <div id="annotation_log" style='display:%s(showLog?"block":"none");'>
2487 if( zOrigin ){
2488 zLink = href("%R/finfo?name=%t&from=%!S&to=%!S",zFilename,zCI,zOrigin);
2489 }else{
2490 zLink = href("%R/finfo?name=%t&from=%!S",zFilename,zCI);
2491 }
2492 @ <h2>Versions of %z(zLink)%h(zFilename)</a> analyzed:</h2>
2493 @ <ol>
2494 for(p=ann.aVers, i=0; i<ann.nVers; i++, p++){
2495 @ <li><span style='background-color:%s(p->zBgColor);'>%s(p->zDate)
@@ -2502,21 +2502,21 @@
2502 @ </div>
2503
2504 if( !ann.bMoreToDo ){
2505 assert( ann.origId==0 ); /* bMoreToDo always set for a point-to-point */
2506 @ <h2>Origin for each line in
2507 @ %z(href("%R/finfo?name=%h&from=%!S", zFilename, zCI))%h(zFilename)</a>
2508 @ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>:</h2>
2509 }else if( ann.origId>0 ){
2510 @ <h2>Lines of
2511 @ %z(href("%R/finfo?name=%h&from=%!S", zFilename, zCI))%h(zFilename)</a>
2512 @ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>
2513 @ that are changed by the sequence of edits moving toward
2514 @ check-in %z(href("%R/info/%!S",zOrigin))%S(zOrigin)</a>:</h2>
2515 }else{
2516 @ <h2>Lines added by the %d(ann.nVers) most recent ancestors of
2517 @ %z(href("%R/finfo?name=%h&from=%!S", zFilename, zCI))%h(zFilename)</a>
2518 @ from check-in %z(href("%R/info/%!S",zCI))%S(zCI)</a>:</h2>
2519 }
2520 @ <pre>
2521 szHash = 10;
2522 for(i=0; i<ann.nOrig; i++){
2523
+6
--- src/doc.c
+++ src/doc.c
@@ -190,10 +190,11 @@
190190
{ "pdb", 3, "chemical/x-pdb" },
191191
{ "pdf", 3, "application/pdf" },
192192
{ "pgm", 3, "image/x-portable-graymap" },
193193
{ "pgn", 3, "application/x-chess-pgn" },
194194
{ "pgp", 3, "application/pgp" },
195
+ { "pikchr", 6, "text/x-pikchr" },
195196
{ "pl", 2, "application/x-perl" },
196197
{ "pm", 2, "application/x-perl" },
197198
{ "png", 3, "image/png" },
198199
{ "pnm", 3, "image/x-portable-anymap" },
199200
{ "pot", 3, "application/mspowerpoint" },
@@ -794,10 +795,15 @@
794795
&& doc_is_embedded_html(pBody, &title) ){
795796
if( blob_size(&title)==0 ) blob_append(&title,zFilename,-1);
796797
style_header("%s", blob_str(&title));
797798
convert_href_and_output(pBody);
798799
document_emit_js();
800
+ style_footer();
801
+ }else if( fossil_strcmp(zMime, "text/x-pikchr")==0 ){
802
+ style_adunit_config(ADUNIT_RIGHT_OK);
803
+ style_header("%s", zDefaultTitle);
804
+ wiki_render_by_mimetype(pBody, zMime);
799805
style_footer();
800806
#ifdef FOSSIL_ENABLE_TH1_DOCS
801807
}else if( Th_AreDocsEnabled() &&
802808
fossil_strcmp(zMime, "application/x-th1")==0 ){
803809
int raw = P("raw")!=0;
804810
--- src/doc.c
+++ src/doc.c
@@ -190,10 +190,11 @@
190 { "pdb", 3, "chemical/x-pdb" },
191 { "pdf", 3, "application/pdf" },
192 { "pgm", 3, "image/x-portable-graymap" },
193 { "pgn", 3, "application/x-chess-pgn" },
194 { "pgp", 3, "application/pgp" },
 
195 { "pl", 2, "application/x-perl" },
196 { "pm", 2, "application/x-perl" },
197 { "png", 3, "image/png" },
198 { "pnm", 3, "image/x-portable-anymap" },
199 { "pot", 3, "application/mspowerpoint" },
@@ -794,10 +795,15 @@
794 && doc_is_embedded_html(pBody, &title) ){
795 if( blob_size(&title)==0 ) blob_append(&title,zFilename,-1);
796 style_header("%s", blob_str(&title));
797 convert_href_and_output(pBody);
798 document_emit_js();
 
 
 
 
 
799 style_footer();
800 #ifdef FOSSIL_ENABLE_TH1_DOCS
801 }else if( Th_AreDocsEnabled() &&
802 fossil_strcmp(zMime, "application/x-th1")==0 ){
803 int raw = P("raw")!=0;
804
--- src/doc.c
+++ src/doc.c
@@ -190,10 +190,11 @@
190 { "pdb", 3, "chemical/x-pdb" },
191 { "pdf", 3, "application/pdf" },
192 { "pgm", 3, "image/x-portable-graymap" },
193 { "pgn", 3, "application/x-chess-pgn" },
194 { "pgp", 3, "application/pgp" },
195 { "pikchr", 6, "text/x-pikchr" },
196 { "pl", 2, "application/x-perl" },
197 { "pm", 2, "application/x-perl" },
198 { "png", 3, "image/png" },
199 { "pnm", 3, "image/x-portable-anymap" },
200 { "pot", 3, "application/mspowerpoint" },
@@ -794,10 +795,15 @@
795 && doc_is_embedded_html(pBody, &title) ){
796 if( blob_size(&title)==0 ) blob_append(&title,zFilename,-1);
797 style_header("%s", blob_str(&title));
798 convert_href_and_output(pBody);
799 document_emit_js();
800 style_footer();
801 }else if( fossil_strcmp(zMime, "text/x-pikchr")==0 ){
802 style_adunit_config(ADUNIT_RIGHT_OK);
803 style_header("%s", zDefaultTitle);
804 wiki_render_by_mimetype(pBody, zMime);
805 style_footer();
806 #ifdef FOSSIL_ENABLE_TH1_DOCS
807 }else if( Th_AreDocsEnabled() &&
808 fossil_strcmp(zMime, "application/x-th1")==0 ){
809 int raw = P("raw")!=0;
810
+2 -1
--- src/file.c
+++ src/file.c
@@ -1308,17 +1308,18 @@
13081308
int reset
13091309
){
13101310
char zBuf[200];
13111311
char *z;
13121312
Blob x;
1313
+ char *zFull;
13131314
int rc;
13141315
sqlite3_int64 iMtime;
13151316
struct fossilStat testFileStat;
13161317
memset(zBuf, 0, sizeof(zBuf));
13171318
blob_zero(&x);
13181319
file_canonical_name(zPath, &x, slash);
1319
- char *zFull = blob_str(&x);
1320
+ zFull = blob_str(&x);
13201321
fossil_print("[%s] -> [%s]\n", zPath, zFull);
13211322
memset(&testFileStat, 0, sizeof(struct fossilStat));
13221323
rc = fossil_stat(zPath, &testFileStat, 0);
13231324
fossil_print(" stat_rc = %d\n", rc);
13241325
sqlite3_snprintf(sizeof(zBuf), zBuf, "%lld", testFileStat.st_size);
13251326
--- src/file.c
+++ src/file.c
@@ -1308,17 +1308,18 @@
1308 int reset
1309 ){
1310 char zBuf[200];
1311 char *z;
1312 Blob x;
 
1313 int rc;
1314 sqlite3_int64 iMtime;
1315 struct fossilStat testFileStat;
1316 memset(zBuf, 0, sizeof(zBuf));
1317 blob_zero(&x);
1318 file_canonical_name(zPath, &x, slash);
1319 char *zFull = blob_str(&x);
1320 fossil_print("[%s] -> [%s]\n", zPath, zFull);
1321 memset(&testFileStat, 0, sizeof(struct fossilStat));
1322 rc = fossil_stat(zPath, &testFileStat, 0);
1323 fossil_print(" stat_rc = %d\n", rc);
1324 sqlite3_snprintf(sizeof(zBuf), zBuf, "%lld", testFileStat.st_size);
1325
--- src/file.c
+++ src/file.c
@@ -1308,17 +1308,18 @@
1308 int reset
1309 ){
1310 char zBuf[200];
1311 char *z;
1312 Blob x;
1313 char *zFull;
1314 int rc;
1315 sqlite3_int64 iMtime;
1316 struct fossilStat testFileStat;
1317 memset(zBuf, 0, sizeof(zBuf));
1318 blob_zero(&x);
1319 file_canonical_name(zPath, &x, slash);
1320 zFull = blob_str(&x);
1321 fossil_print("[%s] -> [%s]\n", zPath, zFull);
1322 memset(&testFileStat, 0, sizeof(struct fossilStat));
1323 rc = fossil_stat(zPath, &testFileStat, 0);
1324 fossil_print(" stat_rc = %d\n", rc);
1325 sqlite3_snprintf(sizeof(zBuf), zBuf, "%lld", testFileStat.st_size);
1326
+195 -106
--- src/finfo.c
+++ src/finfo.c
@@ -271,41 +271,57 @@
271271
/* Values for the debug= query parameter to finfo */
272272
#define FINFO_DEBUG_MLINK 0x01
273273
274274
/*
275275
** WEBPAGE: finfo
276
-** URL: /finfo?name=FILENAME
276
+** Usage:
277
+** * /finfo?name=FILENAME
278
+** * /finfo?name=FILENAME&ci=HASH
277279
**
278
-** Show the change history for a single file.
280
+** Show the change history for a single file. The name=FILENAME query
281
+** parameter gives the filename and is a required parameter. If the
282
+** ci=HASH parameter is also supplied, then the FILENAME,HASH combination
283
+** identifies a particular version of a file, and in that case all changes
284
+** to that one file version are tracked across both edits and renames.
285
+** If only the name=FILENAME parameter is supplied (if ci=HASH is omitted)
286
+** then the graph shows all changes to any file while it happened
287
+** to be called FILENAME and changes are not tracked across renames.
279288
**
280289
** Additional query parameters:
281290
**
282
-** a=DATETIME Only show changes after DATETIME
283
-** b=DATETIME Only show changes before DATETIME
284
-** m=HASH Mark this particular file version
285
-** n=NUM Show the first NUM changes only
286
-** brbg Background color by branch name
287
-** ubg Background color by user name
288
-** ci=HASH Ancestors of a particular check-in
289
-** orig=HASH If both ci and orig are supplied, only show those
290
-** changes on a direct path from orig to ci.
291
-** showid Show RID values for debugging
291
+** a=DATETIME Only show changes after DATETIME
292
+** b=DATETIME Only show changes before DATETIME
293
+** ci=HASH identify a particular version of a file and then
294
+** track changes to that file across renames
295
+** m=HASH Mark this particular file version.
296
+** n=NUM Show the first NUM changes only
297
+** name=FILENAME (Required) name of file whose history to show
298
+** brbg Background color by branch name
299
+** ubg Background color by user name
300
+** from=HASH Ancestors only (not descendents) of the version of
301
+** the file in this particular check-in.
302
+** to=HASH If both from= and to= are supplied, only show those
303
+** changes on the direct path between the two given
304
+** checkins.
305
+** showid Show RID values for debugging
306
+** showsql Show the SQL query used to gather the data for
307
+** the graph
292308
**
293
-** DATETIME may be "now" or "YYYY-MM-DDTHH:MM:SS.SSS". If in
294
-** year-month-day form, it may be truncated, and it may also name a
295
-** timezone offset from UTC as "-HH:MM" (westward) or "+HH:MM"
296
-** (eastward). Either no timezone suffix or "Z" means UTC.
309
+** DATETIME may be in any of usual formats, including "now",
310
+** "YYYY-MM-DDTHH:MM:SS.SSS", "YYYYMMDDHHMM", and others.
297311
*/
298312
void finfo_page(void){
299313
Stmt q;
300314
const char *zFilename = PD("name","");
301315
char zPrevDate[20];
302316
const char *zA;
303317
const char *zB;
304318
int n;
305
- int baseCheckin;
306
- int origCheckin = 0;
319
+ int ridFrom;
320
+ int ridTo = 0;
321
+ int ridCi = 0;
322
+ const char *zCI = P("ci");
307323
int fnid;
308324
Blob title;
309325
Blob sql;
310326
HQuery url;
311327
GraphContext *pGraph;
@@ -317,18 +333,22 @@
317333
int iTableId = timeline_tableid();
318334
int tmFlags = 0; /* Viewing mode */
319335
const char *zStyle; /* Viewing mode name */
320336
const char *zMark; /* Mark this version of the file */
321337
int selRid = 0; /* RID of the marked file version */
338
+ int mxfnid; /* Maximum filename.fnid value */
322339
323340
login_check_credentials();
324341
if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
325342
fnid = db_int(0, "SELECT fnid FROM filename WHERE name=%Q", zFilename);
343
+ ridCi = zCI ? name_to_rid_www("ci") : 0;
326344
if( fnid==0 ){
327345
style_header("No such file");
346
+ }else if( ridCi==0 ){
347
+ style_header("All files named \"%s\"", zFilename);
328348
}else{
329
- style_header("History for %s", zFilename);
349
+ style_header("History of %s of %s",zFilename, zCI);
330350
}
331351
login_anonymous_available();
332352
tmFlags = timeline_ss_submenu();
333353
if( tmFlags & TIMELINE_COLUMNAR ){
334354
zStyle = "Columnar";
@@ -342,11 +362,11 @@
342362
zStyle = "Modern";
343363
}
344364
url_initialize(&url, "finfo");
345365
if( brBg ) url_add_parameter(&url, "brbg", 0);
346366
if( uBg ) url_add_parameter(&url, "ubg", 0);
347
- baseCheckin = name_to_rid_www("ci");
367
+ ridFrom = name_to_rid_www("from");
348368
zPrevDate[0] = 0;
349369
cookie_render();
350370
if( fnid==0 ){
351371
@ No such file: %h(zFilename)
352372
style_footer();
@@ -353,54 +373,105 @@
353373
return;
354374
}
355375
if( g.perm.Admin ){
356376
style_submenu_element("MLink Table", "%R/mlink?name=%t", zFilename);
357377
}
358
- if( baseCheckin ){
359
- if( P("orig")!=0 ){
360
- origCheckin = name_to_typed_rid(P("orig"),"ci");
361
- path_shortest_stored_in_ancestor_table(origCheckin, baseCheckin);
378
+ if( ridFrom ){
379
+ if( P("to")!=0 ){
380
+ ridTo = name_to_typed_rid(P("to"),"ci");
381
+ path_shortest_stored_in_ancestor_table(ridFrom,ridTo);
362382
}else{
363
- compute_direct_ancestors(baseCheckin);
383
+ compute_direct_ancestors(ridFrom);
364384
}
365385
}
366386
url_add_parameter(&url, "name", zFilename);
367387
blob_zero(&sql);
388
+ if( ridCi ){
389
+ /* If we will be tracking changes across renames, some extra temp
390
+ ** tables (implemented as CTEs) are required */
391
+ blob_append_sql(&sql,
392
+ /* The clade(fid,fnid) table is the set of all (fid,fnid) pairs
393
+ ** that should participate in the output. Clade is computed by
394
+ ** walking the graph of mlink edges.
395
+ */
396
+ "WITH RECURSIVE clade(fid,fnid) AS (\n"
397
+ " SELECT blob.rid, %d FROM blob\n" /* %d is fnid */
398
+ " WHERE blob.uuid=(SELECT uuid FROM files_of_checkin(%Q)"
399
+ " WHERE filename=%Q)\n" /* %Q is the filename */
400
+ " UNION\n"
401
+ " SELECT mlink.fid, mlink.fnid\n"
402
+ " FROM clade, mlink\n"
403
+ " WHERE clade.fid=mlink.pid\n"
404
+ " AND ((mlink.pfnid=0 AND mlink.fnid=clade.fnid)\n"
405
+ " OR mlink.pfnid=clade.fnid)\n"
406
+ " AND (mlink.fid>0 OR NOT EXISTS(SELECT 1 FROM mlink AS mx"
407
+ " WHERE mx.mid=mlink.mid AND mx.pid=mlink.pid"
408
+ " AND mx.fid>0 AND mx.pfnid=mlink.fnid))\n"
409
+ " UNION\n"
410
+ " SELECT mlink.pid,"
411
+ " CASE WHEN mlink.pfnid>0 THEN mlink.pfnid ELSE mlink.fnid END\n"
412
+ " FROM clade, mlink\n"
413
+ " WHERE mlink.pid>0\n"
414
+ " AND mlink.fid=clade.fid\n"
415
+ " AND mlink.fnid=clade.fnid\n"
416
+ ")\n",
417
+ fnid, zCI, zFilename
418
+ );
419
+ }else{
420
+ /* This is the case for all files with a given name. We will still
421
+ ** create a "clade(fid,fnid)" table that identifies all participates
422
+ ** in the output graph, so that subsequent queries can all be the same,
423
+ ** but in the case the clade table is much simplier, being just a
424
+ ** single direct query against the mlink table.
425
+ */
426
+ blob_append_sql(&sql,
427
+ "WITH clade(fid,fnid) AS (\n"
428
+ " SELECT DISTINCT fid, %d\n"
429
+ " FROM mlink\n"
430
+ " WHERE fnid=%d)",
431
+ fnid, fnid
432
+ );
433
+ }
368434
blob_append_sql(&sql,
369
- "SELECT"
370
- " datetime(min(event.mtime),toLocal())," /* Date of change */
371
- " coalesce(event.ecomment, event.comment)," /* Check-in comment */
372
- " coalesce(event.euser, event.user)," /* User who made chng */
373
- " mlink.pid," /* Parent file rid */
374
- " mlink.fid," /* File rid */
375
- " (SELECT uuid FROM blob WHERE rid=mlink.pid)," /* Parent file hash */
376
- " blob.uuid," /* Current file hash */
377
- " (SELECT uuid FROM blob WHERE rid=mlink.mid)," /* Check-in hash */
378
- " event.bgcolor," /* Background color */
379
- " (SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0"
380
- " AND tagxref.rid=mlink.mid)," /* Branchname */
381
- " mlink.mid," /* check-in ID */
382
- " mlink.pfnid," /* Previous filename */
383
- " blob.size" /* File size */
384
- " FROM mlink, event, blob"
385
- " WHERE mlink.fnid=%d"
386
- " AND event.objid=mlink.mid"
387
- " AND mlink.fid=blob.rid",
388
- TAG_BRANCH, fnid
435
+ "SELECT\n"
436
+ " datetime(min(event.mtime),toLocal()),\n" /* Date of change */
437
+ " coalesce(event.ecomment, event.comment),\n" /* Check-in comment */
438
+ " coalesce(event.euser, event.user),\n" /* User who made chng */
439
+ " mlink.pid,\n" /* Parent file rid */
440
+ " mlink.fid,\n" /* File rid */
441
+ " (SELECT uuid FROM blob WHERE rid=mlink.pid),\n" /* Parent file hash */
442
+ " blob.uuid,\n" /* Current file hash */
443
+ " (SELECT uuid FROM blob WHERE rid=mlink.mid),\n" /* Check-in hash */
444
+ " event.bgcolor,\n" /* Background color */
445
+ " (SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0"
446
+ " AND tagxref.rid=mlink.mid),\n" /* Branchname */
447
+ " mlink.mid,\n" /* check-in ID */
448
+ " mlink.pfnid,\n" /* Previous filename */
449
+ " blob.size,\n" /* File size */
450
+ " mlink.fnid,\n" /* Current filename */
451
+ " filename.name\n" /* Current filename */
452
+ "FROM clade CROSS JOIN mlink, event"
453
+ " LEFT JOIN blob ON blob.rid=clade.fid"
454
+ " LEFT JOIN filename ON filename.fnid=clade.fnid\n"
455
+ "WHERE mlink.fnid=clade.fnid AND mlink.fid=clade.fid\n"
456
+ " AND event.objid=mlink.mid\n",
457
+ TAG_BRANCH
389458
);
390459
if( (zA = P("a"))!=0 ){
391
- blob_append_sql(&sql, " AND event.mtime>=julianday('%q')", zA);
460
+ blob_append_sql(&sql, " AND event.mtime>=%.16g\n",
461
+ symbolic_name_to_mtime(zA,0));
392462
url_add_parameter(&url, "a", zA);
393463
}
394464
if( (zB = P("b"))!=0 ){
395
- blob_append_sql(&sql, " AND event.mtime<=julianday('%q')", zB);
465
+ blob_append_sql(&sql, " AND event.mtime<=%.16g\n",
466
+ symbolic_name_to_mtime(zB,0));
396467
url_add_parameter(&url, "b", zB);
397468
}
398
- if( baseCheckin ){
469
+ if( ridFrom ){
399470
blob_append_sql(&sql,
400
- " AND mlink.mid IN (SELECT rid FROM ancestor)"
401
- " GROUP BY mlink.fid"
471
+ " AND mlink.mid IN (SELECT rid FROM ancestor)\n"
472
+ "GROUP BY mlink.fid\n"
402473
);
403474
}else{
404475
/* We only want each version of a file to appear on the graph once,
405476
** at its earliest appearance. All the other times that it gets merged
406477
** into this or that branch can be ignored. An exception is for when
@@ -408,33 +479,35 @@
408479
** is deleted in multiple places, we want to show each deletion, so
409480
** use a "fake fid" which is derived from the parent-fid for grouping.
410481
** The same fake-fid must be used on the graph.
411482
*/
412483
blob_append_sql(&sql,
413
- " GROUP BY"
414
- " CASE WHEN mlink.fid>0 THEN mlink.fid ELSE mlink.pid+1000000000 END"
484
+ "GROUP BY"
485
+ " CASE WHEN mlink.fid>0 THEN mlink.fid ELSE mlink.pid+1000000000 END,"
486
+ " mlink.fnid\n"
415487
);
416488
}
417
- blob_append_sql(&sql, " ORDER BY event.mtime DESC /*sort*/");
489
+ blob_append_sql(&sql, "ORDER BY event.mtime DESC");
418490
if( (n = atoi(PD("n","0")))>0 ){
419491
blob_append_sql(&sql, " LIMIT %d", n);
420492
url_add_parameter(&url, "n", P("n"));
421493
}
494
+ blob_append_sql(&sql, " /*sort*/\n");
422495
db_prepare(&q, "%s", blob_sql_text(&sql));
423496
if( P("showsql")!=0 ){
424
- @ <p>SQL: %h(blob_str(&sql))</p>
497
+ @ <p>SQL: <blockquote><pre>%h(blob_str(&sql))</blockquote></pre>
425498
}
426499
zMark = P("m");
427500
if( zMark ){
428501
selRid = symbolic_name_to_rid(zMark, "*");
429502
}
430503
blob_reset(&sql);
431504
blob_zero(&title);
432
- if( baseCheckin ){
433
- char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", baseCheckin);
505
+ if( ridFrom ){
506
+ char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", ridFrom);
434507
char *zLink = href("%R/info/%!S", zUuid);
435
- if( origCheckin ){
508
+ if( ridTo ){
436509
blob_appendf(&title, "Changes to file ");
437510
}else if( n>0 ){
438511
blob_appendf(&title, "First %d ancestors of file ", n);
439512
}else{
440513
blob_appendf(&title, "Ancestors of file ");
@@ -441,20 +514,26 @@
441514
}
442515
blob_appendf(&title,"%z%h</a>",
443516
href("%R/file?name=%T&ci=%!S", zFilename, zUuid),
444517
zFilename);
445518
if( fShowId ) blob_appendf(&title, " (%d)", fnid);
446
- blob_append(&title, origCheckin ? " between " : " from ", -1);
519
+ blob_append(&title, ridTo ? " between " : " from ", -1);
447520
blob_appendf(&title, "check-in %z%S</a>", zLink, zUuid);
448
- if( fShowId ) blob_appendf(&title, " (%d)", baseCheckin);
521
+ if( fShowId ) blob_appendf(&title, " (%d)", ridFrom);
449522
fossil_free(zUuid);
450
- if( origCheckin ){
451
- zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", origCheckin);
523
+ if( ridTo ){
524
+ zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", ridTo);
452525
zLink = href("%R/info/%!S", zUuid);
453526
blob_appendf(&title, " and check-in %z%S</a>", zLink, zUuid);
454527
fossil_free(zUuid);
455528
}
529
+ }else if( ridCi ){
530
+ blob_appendf(&title, "History of the file that is called ");
531
+ hyperlinked_path(zFilename, &title, 0, "tree", "", LINKPATH_FILE);
532
+ if( fShowId ) blob_appendf(&title, " (%d)", fnid);
533
+ blob_appendf(&title, " at checkin %z%h</a>",
534
+ href("%R/info?name=%t",zCI), zCI);
456535
}else{
457536
blob_appendf(&title, "History for ");
458537
hyperlinked_path(zFilename, &title, 0, "tree", "", LINKPATH_FILE);
459538
if( fShowId ) blob_appendf(&title, " (%d)", fnid);
460539
}
@@ -463,22 +542,25 @@
463542
}
464543
@ <h2>%b(&title)</h2>
465544
blob_reset(&title);
466545
pGraph = graph_init();
467546
@ <table id="timelineTable%d(iTableId)" class="timelineTable">
468
- if( baseCheckin ){
547
+ mxfnid = db_int(0, "SELECT max(fnid) FROM filename");
548
+ if( ridFrom ){
469549
db_prepare(&qparent,
470
- "SELECT DISTINCT pid FROM mlink"
550
+ "SELECT DISTINCT pid*%d+CASE WHEN pfnid>0 THEN pfnid ELSE fnid END"
551
+ " FROM mlink"
471552
" WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
472553
" AND pmid IN (SELECT rid FROM ancestor)"
473
- " ORDER BY isaux /*sort*/"
554
+ " ORDER BY isaux /*sort*/", mxfnid+1
474555
);
475556
}else{
476557
db_prepare(&qparent,
477
- "SELECT DISTINCT pid FROM mlink"
558
+ "SELECT DISTINCT pid*%d+CASE WHEN pfnid>0 THEN pfnid ELSE fnid END"
559
+ " FROM mlink"
478560
" WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
479
- " ORDER BY isaux /*sort*/"
561
+ " ORDER BY isaux /*sort*/", mxfnid+1
480562
);
481563
}
482564
while( db_step(&q)==SQLITE_ROW ){
483565
const char *zDate = db_column_text(&q, 0);
484566
const char *zCom = db_column_text(&q, 1);
@@ -491,32 +573,35 @@
491573
const char *zBgClr = db_column_text(&q, 8);
492574
const char *zBr = db_column_text(&q, 9);
493575
int fmid = db_column_int(&q, 10);
494576
int pfnid = db_column_int(&q, 11);
495577
int szFile = db_column_int(&q, 12);
578
+ int fnid = db_column_int(&q, 13);
579
+ const char *zFName = db_column_text(&q,14);
496580
int gidx;
497581
char zTime[10];
498582
int nParent = 0;
499
- int aParent[GR_MAX_RAIL];
583
+ GraphRowId aParent[GR_MAX_RAIL];
500584
501585
db_bind_int(&qparent, ":fid", frid);
502586
db_bind_int(&qparent, ":mid", fmid);
503587
db_bind_int(&qparent, ":fnid", fnid);
504588
while( db_step(&qparent)==SQLITE_ROW && nParent<count(aParent) ){
505
- aParent[nParent] = db_column_int(&qparent, 0);
589
+ aParent[nParent] = db_column_int64(&qparent, 0);
506590
nParent++;
507591
}
508592
db_reset(&qparent);
509593
if( zBr==0 ) zBr = "trunk";
510594
if( uBg ){
511595
zBgClr = hash_color(zUser);
512596
}else if( brBg || zBgClr==0 || zBgClr[0]==0 ){
513597
zBgClr = strcmp(zBr,"trunk")==0 ? "" : hash_color(zBr);
514598
}
515
- gidx = graph_add_row(pGraph, frid>0 ? frid : fpid+1000000000,
516
- nParent, 0, aParent, zBr, zBgClr,
517
- zUuid, 0);
599
+ gidx = graph_add_row(pGraph,
600
+ frid>0 ? (GraphRowId)frid*(mxfnid+1)+fnid : fpid+1000000000,
601
+ nParent, 0, aParent, zBr, zBgClr,
602
+ zUuid, 0);
518603
if( strncmp(zDate, zPrevDate, 10) ){
519604
sqlite3_snprintf(sizeof(zPrevDate), zPrevDate, "%.10s", zDate);
520605
@ <tr><td>
521606
@ <div class="divider timelineDate">%s(zPrevDate)</div>
522607
@ </td><td></td><td></td></tr>
@@ -527,11 +612,11 @@
527612
@ <tr class='timelineSelected'>
528613
}else{
529614
@ <tr>
530615
}
531616
@ <td class="timelineTime">\
532
- @ %z(href("%R/file?name=%T&ci=%!S",zFilename,zCkin))%s(zTime)</a></td>
617
+ @ %z(href("%R/file?name=%T&ci=%!S",zFName,zCkin))%s(zTime)</a></td>
533618
@ <td class="timelineGraph"><div id="m%d(gidx)" class="tl-nodemark"></div>
534619
@ </td>
535620
if( zBgClr && zBgClr[0] ){
536621
@ <td class="timeline%s(zStyle)Cell" id='mc%d(gidx)'>
537622
}else{
@@ -539,10 +624,35 @@
539624
}
540625
if( tmFlags & TIMELINE_COMPACT ){
541626
@ <span class='timelineCompactComment' data-id='%d(frid)'>
542627
}else{
543628
@ <span class='timeline%s(zStyle)Comment'>
629
+ if( pfnid ){
630
+ char *zPrevName = db_text(0,"SELECT name FROM filename WHERE fnid=%d",
631
+ pfnid);
632
+ @ <b>Renamed</b> %h(zPrevName) &rarr; %h(zFName).
633
+ fossil_free(zPrevName);
634
+ }
635
+ if( zUuid && ridTo==0 && nParent==0 ){
636
+ @ <b>Added:</b>
637
+ }
638
+ if( zUuid==0 ){
639
+ char *zNewName;
640
+ zNewName = db_text(0,
641
+ "SELECT name FROM filename WHERE fnid = "
642
+ " (SELECT fnid FROM mlink"
643
+ " WHERE mid=%d"
644
+ " AND pfnid IN (SELECT fnid FROM filename WHERE name=%Q))",
645
+ fmid, zFName);
646
+ if( zNewName ){
647
+ @ <b>Renamed</b> to
648
+ @ %z(href("%R/finfo?name=%t",zNewName))%h(zNewName)</a>.
649
+ fossil_free(zNewName);
650
+ }else{
651
+ @ <b>Deleted:</b>
652
+ }
653
+ }
544654
if( (tmFlags & TIMELINE_VERBOSE)!=0 && zUuid ){
545655
hyperlink_to_version(zUuid);
546656
@ part of check-in \
547657
hyperlink_to_version(zCkin);
548658
}
@@ -564,11 +674,12 @@
564674
cgi_printf("<span class='clutter' id='detail-%d'>",frid);
565675
}
566676
cgi_printf("<span class='timeline%sDetail'>", zStyle);
567677
if( tmFlags & (TIMELINE_COMPACT|TIMELINE_VERBOSE) ) cgi_printf("(");
568678
if( zUuid && (tmFlags & TIMELINE_VERBOSE)==0 ){
569
- @ file:&nbsp;%z(href("%R/file?name=%T&ci=%!S",zFilename,zCkin))[%S(zUuid)]</a>
679
+ @ file:&nbsp;%z(href("%R/file?name=%T&ci=%!S",zFName,zCkin))\
680
+ @ [%S(zUuid)]</a>
570681
if( fShowId ){
571682
int srcId = delta_source_rid(frid);
572683
if( srcId>0 ){
573684
@ id:&nbsp;%d(frid)&larr;%d(srcId)
574685
}else{
@@ -587,63 +698,41 @@
587698
if( tmFlags & (TIMELINE_COMPACT|TIMELINE_VERBOSE) ){
588699
@ size:&nbsp;%d(szFile))
589700
}else{
590701
@ size:&nbsp;%d(szFile)
591702
}
592
- if( zUuid && origCheckin==0 ){
593
- if( nParent==0 ){
594
- @ <b>Added</b>
595
- }else if( pfnid ){
596
- char *zPrevName = db_text(0,"SELECT name FROM filename WHERE fnid=%d",
597
- pfnid);
598
- @ <b>Renamed</b> from
599
- @ %z(href("%R/finfo?name=%t", zPrevName))%h(zPrevName)</a>
600
- }
601
- }
602
- if( zUuid==0 ){
603
- char *zNewName;
604
- zNewName = db_text(0,
605
- "SELECT name FROM filename WHERE fnid = "
606
- " (SELECT fnid FROM mlink"
607
- " WHERE mid=%d"
608
- " AND pfnid IN (SELECT fnid FROM filename WHERE name=%Q))",
609
- fmid, zFilename);
610
- if( zNewName ){
611
- @ <b>Renamed</b> to
612
- @ %z(href("%R/finfo?name=%t",zNewName))%h(zNewName)</a>
613
- fossil_free(zNewName);
614
- }else{
615
- @ <b>Deleted</b>
616
- }
617
- }
618703
if( g.perm.Hyperlink && zUuid ){
619
- const char *z = zFilename;
704
+ const char *z = zFName;
620705
@ <span id='links-%d(frid)'><span class='timelineExtraLinks'>
621706
@ %z(href("%R/annotate?filename=%h&checkin=%s",z,zCkin))
622707
@ [annotate]</a>
623708
@ %z(href("%R/blame?filename=%h&checkin=%s",z,zCkin))
624709
@ [blame]</a>
625710
@ %z(href("%R/timeline?n=all&uf=%!S",zUuid))[check-ins&nbsp;using]</a>
626711
if( fpid>0 ){
627712
@ %z(href("%R/fdiff?v1=%!S&v2=%!S",zPUuid,zUuid))[diff]</a>
628713
}
629
- if( fileedit_is_editable(zFilename) ){
630
- @ %z(href("%R/fileedit?filename=%T&checkin=%!S",zFilename,zCkin))[edit]</a>
714
+ if( fileedit_is_editable(zFName) ){
715
+ @ %z(href("%R/fileedit?filename=%T&checkin=%!S",zFName,zCkin))\
716
+ @ [edit]</a>
631717
}
632718
@ </span></span>
633719
}
634720
if( fDebug & FINFO_DEBUG_MLINK ){
635721
int ii;
636722
char *zAncLink;
637
- @ <br />fid=%d(frid) pid=%d(fpid) mid=%d(fmid)
723
+ @ <br />fid=%d(frid) \
724
+ @ graph-id=%lld(frid>0?(GraphRowId)frid*(mxfnid+1)+fnid:fpid+1000000000) \
725
+ @ pid=%d(fpid) mid=%d(fmid) fnid=%d(fnid) \
726
+ @ pfnid=%d(pfnid) mxfnid=%d(mxfnid)
638727
if( nParent>0 ){
639
- @ parents=%d(aParent[0])
728
+ @ parents=%lld(aParent[0])
640729
for(ii=1; ii<nParent; ii++){
641
- @ %d(aParent[ii])
730
+ @ %lld(aParent[ii])
642731
}
643732
}
644
- zAncLink = href("%R/finfo?name=%T&ci=%!S&debug=1",zFilename,zCkin);
733
+ zAncLink = href("%R/finfo?name=%T&from=%!S&debug=1",zFName,zCkin);
645734
@ %z(zAncLink)[ancestry]</a>
646735
}
647736
tag_private_status(frid);
648737
/* End timelineDetail */
649738
if( tmFlags & TIMELINE_COMPACT ){
650739
--- src/finfo.c
+++ src/finfo.c
@@ -271,41 +271,57 @@
271 /* Values for the debug= query parameter to finfo */
272 #define FINFO_DEBUG_MLINK 0x01
273
274 /*
275 ** WEBPAGE: finfo
276 ** URL: /finfo?name=FILENAME
 
 
277 **
278 ** Show the change history for a single file.
 
 
 
 
 
 
 
279 **
280 ** Additional query parameters:
281 **
282 ** a=DATETIME Only show changes after DATETIME
283 ** b=DATETIME Only show changes before DATETIME
284 ** m=HASH Mark this particular file version
285 ** n=NUM Show the first NUM changes only
286 ** brbg Background color by branch name
287 ** ubg Background color by user name
288 ** ci=HASH Ancestors of a particular check-in
289 ** orig=HASH If both ci and orig are supplied, only show those
290 ** changes on a direct path from orig to ci.
291 ** showid Show RID values for debugging
 
 
 
 
 
 
 
292 **
293 ** DATETIME may be "now" or "YYYY-MM-DDTHH:MM:SS.SSS". If in
294 ** year-month-day form, it may be truncated, and it may also name a
295 ** timezone offset from UTC as "-HH:MM" (westward) or "+HH:MM"
296 ** (eastward). Either no timezone suffix or "Z" means UTC.
297 */
298 void finfo_page(void){
299 Stmt q;
300 const char *zFilename = PD("name","");
301 char zPrevDate[20];
302 const char *zA;
303 const char *zB;
304 int n;
305 int baseCheckin;
306 int origCheckin = 0;
 
 
307 int fnid;
308 Blob title;
309 Blob sql;
310 HQuery url;
311 GraphContext *pGraph;
@@ -317,18 +333,22 @@
317 int iTableId = timeline_tableid();
318 int tmFlags = 0; /* Viewing mode */
319 const char *zStyle; /* Viewing mode name */
320 const char *zMark; /* Mark this version of the file */
321 int selRid = 0; /* RID of the marked file version */
 
322
323 login_check_credentials();
324 if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
325 fnid = db_int(0, "SELECT fnid FROM filename WHERE name=%Q", zFilename);
 
326 if( fnid==0 ){
327 style_header("No such file");
 
 
328 }else{
329 style_header("History for %s", zFilename);
330 }
331 login_anonymous_available();
332 tmFlags = timeline_ss_submenu();
333 if( tmFlags & TIMELINE_COLUMNAR ){
334 zStyle = "Columnar";
@@ -342,11 +362,11 @@
342 zStyle = "Modern";
343 }
344 url_initialize(&url, "finfo");
345 if( brBg ) url_add_parameter(&url, "brbg", 0);
346 if( uBg ) url_add_parameter(&url, "ubg", 0);
347 baseCheckin = name_to_rid_www("ci");
348 zPrevDate[0] = 0;
349 cookie_render();
350 if( fnid==0 ){
351 @ No such file: %h(zFilename)
352 style_footer();
@@ -353,54 +373,105 @@
353 return;
354 }
355 if( g.perm.Admin ){
356 style_submenu_element("MLink Table", "%R/mlink?name=%t", zFilename);
357 }
358 if( baseCheckin ){
359 if( P("orig")!=0 ){
360 origCheckin = name_to_typed_rid(P("orig"),"ci");
361 path_shortest_stored_in_ancestor_table(origCheckin, baseCheckin);
362 }else{
363 compute_direct_ancestors(baseCheckin);
364 }
365 }
366 url_add_parameter(&url, "name", zFilename);
367 blob_zero(&sql);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368 blob_append_sql(&sql,
369 "SELECT"
370 " datetime(min(event.mtime),toLocal())," /* Date of change */
371 " coalesce(event.ecomment, event.comment)," /* Check-in comment */
372 " coalesce(event.euser, event.user)," /* User who made chng */
373 " mlink.pid," /* Parent file rid */
374 " mlink.fid," /* File rid */
375 " (SELECT uuid FROM blob WHERE rid=mlink.pid)," /* Parent file hash */
376 " blob.uuid," /* Current file hash */
377 " (SELECT uuid FROM blob WHERE rid=mlink.mid)," /* Check-in hash */
378 " event.bgcolor," /* Background color */
379 " (SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0"
380 " AND tagxref.rid=mlink.mid)," /* Branchname */
381 " mlink.mid," /* check-in ID */
382 " mlink.pfnid," /* Previous filename */
383 " blob.size" /* File size */
384 " FROM mlink, event, blob"
385 " WHERE mlink.fnid=%d"
386 " AND event.objid=mlink.mid"
387 " AND mlink.fid=blob.rid",
388 TAG_BRANCH, fnid
 
 
 
389 );
390 if( (zA = P("a"))!=0 ){
391 blob_append_sql(&sql, " AND event.mtime>=julianday('%q')", zA);
 
392 url_add_parameter(&url, "a", zA);
393 }
394 if( (zB = P("b"))!=0 ){
395 blob_append_sql(&sql, " AND event.mtime<=julianday('%q')", zB);
 
396 url_add_parameter(&url, "b", zB);
397 }
398 if( baseCheckin ){
399 blob_append_sql(&sql,
400 " AND mlink.mid IN (SELECT rid FROM ancestor)"
401 " GROUP BY mlink.fid"
402 );
403 }else{
404 /* We only want each version of a file to appear on the graph once,
405 ** at its earliest appearance. All the other times that it gets merged
406 ** into this or that branch can be ignored. An exception is for when
@@ -408,33 +479,35 @@
408 ** is deleted in multiple places, we want to show each deletion, so
409 ** use a "fake fid" which is derived from the parent-fid for grouping.
410 ** The same fake-fid must be used on the graph.
411 */
412 blob_append_sql(&sql,
413 " GROUP BY"
414 " CASE WHEN mlink.fid>0 THEN mlink.fid ELSE mlink.pid+1000000000 END"
 
415 );
416 }
417 blob_append_sql(&sql, " ORDER BY event.mtime DESC /*sort*/");
418 if( (n = atoi(PD("n","0")))>0 ){
419 blob_append_sql(&sql, " LIMIT %d", n);
420 url_add_parameter(&url, "n", P("n"));
421 }
 
422 db_prepare(&q, "%s", blob_sql_text(&sql));
423 if( P("showsql")!=0 ){
424 @ <p>SQL: %h(blob_str(&sql))</p>
425 }
426 zMark = P("m");
427 if( zMark ){
428 selRid = symbolic_name_to_rid(zMark, "*");
429 }
430 blob_reset(&sql);
431 blob_zero(&title);
432 if( baseCheckin ){
433 char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", baseCheckin);
434 char *zLink = href("%R/info/%!S", zUuid);
435 if( origCheckin ){
436 blob_appendf(&title, "Changes to file ");
437 }else if( n>0 ){
438 blob_appendf(&title, "First %d ancestors of file ", n);
439 }else{
440 blob_appendf(&title, "Ancestors of file ");
@@ -441,20 +514,26 @@
441 }
442 blob_appendf(&title,"%z%h</a>",
443 href("%R/file?name=%T&ci=%!S", zFilename, zUuid),
444 zFilename);
445 if( fShowId ) blob_appendf(&title, " (%d)", fnid);
446 blob_append(&title, origCheckin ? " between " : " from ", -1);
447 blob_appendf(&title, "check-in %z%S</a>", zLink, zUuid);
448 if( fShowId ) blob_appendf(&title, " (%d)", baseCheckin);
449 fossil_free(zUuid);
450 if( origCheckin ){
451 zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", origCheckin);
452 zLink = href("%R/info/%!S", zUuid);
453 blob_appendf(&title, " and check-in %z%S</a>", zLink, zUuid);
454 fossil_free(zUuid);
455 }
 
 
 
 
 
 
456 }else{
457 blob_appendf(&title, "History for ");
458 hyperlinked_path(zFilename, &title, 0, "tree", "", LINKPATH_FILE);
459 if( fShowId ) blob_appendf(&title, " (%d)", fnid);
460 }
@@ -463,22 +542,25 @@
463 }
464 @ <h2>%b(&title)</h2>
465 blob_reset(&title);
466 pGraph = graph_init();
467 @ <table id="timelineTable%d(iTableId)" class="timelineTable">
468 if( baseCheckin ){
 
469 db_prepare(&qparent,
470 "SELECT DISTINCT pid FROM mlink"
 
471 " WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
472 " AND pmid IN (SELECT rid FROM ancestor)"
473 " ORDER BY isaux /*sort*/"
474 );
475 }else{
476 db_prepare(&qparent,
477 "SELECT DISTINCT pid FROM mlink"
 
478 " WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
479 " ORDER BY isaux /*sort*/"
480 );
481 }
482 while( db_step(&q)==SQLITE_ROW ){
483 const char *zDate = db_column_text(&q, 0);
484 const char *zCom = db_column_text(&q, 1);
@@ -491,32 +573,35 @@
491 const char *zBgClr = db_column_text(&q, 8);
492 const char *zBr = db_column_text(&q, 9);
493 int fmid = db_column_int(&q, 10);
494 int pfnid = db_column_int(&q, 11);
495 int szFile = db_column_int(&q, 12);
 
 
496 int gidx;
497 char zTime[10];
498 int nParent = 0;
499 int aParent[GR_MAX_RAIL];
500
501 db_bind_int(&qparent, ":fid", frid);
502 db_bind_int(&qparent, ":mid", fmid);
503 db_bind_int(&qparent, ":fnid", fnid);
504 while( db_step(&qparent)==SQLITE_ROW && nParent<count(aParent) ){
505 aParent[nParent] = db_column_int(&qparent, 0);
506 nParent++;
507 }
508 db_reset(&qparent);
509 if( zBr==0 ) zBr = "trunk";
510 if( uBg ){
511 zBgClr = hash_color(zUser);
512 }else if( brBg || zBgClr==0 || zBgClr[0]==0 ){
513 zBgClr = strcmp(zBr,"trunk")==0 ? "" : hash_color(zBr);
514 }
515 gidx = graph_add_row(pGraph, frid>0 ? frid : fpid+1000000000,
516 nParent, 0, aParent, zBr, zBgClr,
517 zUuid, 0);
 
518 if( strncmp(zDate, zPrevDate, 10) ){
519 sqlite3_snprintf(sizeof(zPrevDate), zPrevDate, "%.10s", zDate);
520 @ <tr><td>
521 @ <div class="divider timelineDate">%s(zPrevDate)</div>
522 @ </td><td></td><td></td></tr>
@@ -527,11 +612,11 @@
527 @ <tr class='timelineSelected'>
528 }else{
529 @ <tr>
530 }
531 @ <td class="timelineTime">\
532 @ %z(href("%R/file?name=%T&ci=%!S",zFilename,zCkin))%s(zTime)</a></td>
533 @ <td class="timelineGraph"><div id="m%d(gidx)" class="tl-nodemark"></div>
534 @ </td>
535 if( zBgClr && zBgClr[0] ){
536 @ <td class="timeline%s(zStyle)Cell" id='mc%d(gidx)'>
537 }else{
@@ -539,10 +624,35 @@
539 }
540 if( tmFlags & TIMELINE_COMPACT ){
541 @ <span class='timelineCompactComment' data-id='%d(frid)'>
542 }else{
543 @ <span class='timeline%s(zStyle)Comment'>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544 if( (tmFlags & TIMELINE_VERBOSE)!=0 && zUuid ){
545 hyperlink_to_version(zUuid);
546 @ part of check-in \
547 hyperlink_to_version(zCkin);
548 }
@@ -564,11 +674,12 @@
564 cgi_printf("<span class='clutter' id='detail-%d'>",frid);
565 }
566 cgi_printf("<span class='timeline%sDetail'>", zStyle);
567 if( tmFlags & (TIMELINE_COMPACT|TIMELINE_VERBOSE) ) cgi_printf("(");
568 if( zUuid && (tmFlags & TIMELINE_VERBOSE)==0 ){
569 @ file:&nbsp;%z(href("%R/file?name=%T&ci=%!S",zFilename,zCkin))[%S(zUuid)]</a>
 
570 if( fShowId ){
571 int srcId = delta_source_rid(frid);
572 if( srcId>0 ){
573 @ id:&nbsp;%d(frid)&larr;%d(srcId)
574 }else{
@@ -587,63 +698,41 @@
587 if( tmFlags & (TIMELINE_COMPACT|TIMELINE_VERBOSE) ){
588 @ size:&nbsp;%d(szFile))
589 }else{
590 @ size:&nbsp;%d(szFile)
591 }
592 if( zUuid && origCheckin==0 ){
593 if( nParent==0 ){
594 @ <b>Added</b>
595 }else if( pfnid ){
596 char *zPrevName = db_text(0,"SELECT name FROM filename WHERE fnid=%d",
597 pfnid);
598 @ <b>Renamed</b> from
599 @ %z(href("%R/finfo?name=%t", zPrevName))%h(zPrevName)</a>
600 }
601 }
602 if( zUuid==0 ){
603 char *zNewName;
604 zNewName = db_text(0,
605 "SELECT name FROM filename WHERE fnid = "
606 " (SELECT fnid FROM mlink"
607 " WHERE mid=%d"
608 " AND pfnid IN (SELECT fnid FROM filename WHERE name=%Q))",
609 fmid, zFilename);
610 if( zNewName ){
611 @ <b>Renamed</b> to
612 @ %z(href("%R/finfo?name=%t",zNewName))%h(zNewName)</a>
613 fossil_free(zNewName);
614 }else{
615 @ <b>Deleted</b>
616 }
617 }
618 if( g.perm.Hyperlink && zUuid ){
619 const char *z = zFilename;
620 @ <span id='links-%d(frid)'><span class='timelineExtraLinks'>
621 @ %z(href("%R/annotate?filename=%h&checkin=%s",z,zCkin))
622 @ [annotate]</a>
623 @ %z(href("%R/blame?filename=%h&checkin=%s",z,zCkin))
624 @ [blame]</a>
625 @ %z(href("%R/timeline?n=all&uf=%!S",zUuid))[check-ins&nbsp;using]</a>
626 if( fpid>0 ){
627 @ %z(href("%R/fdiff?v1=%!S&v2=%!S",zPUuid,zUuid))[diff]</a>
628 }
629 if( fileedit_is_editable(zFilename) ){
630 @ %z(href("%R/fileedit?filename=%T&checkin=%!S",zFilename,zCkin))[edit]</a>
 
631 }
632 @ </span></span>
633 }
634 if( fDebug & FINFO_DEBUG_MLINK ){
635 int ii;
636 char *zAncLink;
637 @ <br />fid=%d(frid) pid=%d(fpid) mid=%d(fmid)
 
 
 
638 if( nParent>0 ){
639 @ parents=%d(aParent[0])
640 for(ii=1; ii<nParent; ii++){
641 @ %d(aParent[ii])
642 }
643 }
644 zAncLink = href("%R/finfo?name=%T&ci=%!S&debug=1",zFilename,zCkin);
645 @ %z(zAncLink)[ancestry]</a>
646 }
647 tag_private_status(frid);
648 /* End timelineDetail */
649 if( tmFlags & TIMELINE_COMPACT ){
650
--- src/finfo.c
+++ src/finfo.c
@@ -271,41 +271,57 @@
271 /* Values for the debug= query parameter to finfo */
272 #define FINFO_DEBUG_MLINK 0x01
273
274 /*
275 ** WEBPAGE: finfo
276 ** Usage:
277 ** * /finfo?name=FILENAME
278 ** * /finfo?name=FILENAME&ci=HASH
279 **
280 ** Show the change history for a single file. The name=FILENAME query
281 ** parameter gives the filename and is a required parameter. If the
282 ** ci=HASH parameter is also supplied, then the FILENAME,HASH combination
283 ** identifies a particular version of a file, and in that case all changes
284 ** to that one file version are tracked across both edits and renames.
285 ** If only the name=FILENAME parameter is supplied (if ci=HASH is omitted)
286 ** then the graph shows all changes to any file while it happened
287 ** to be called FILENAME and changes are not tracked across renames.
288 **
289 ** Additional query parameters:
290 **
291 ** a=DATETIME Only show changes after DATETIME
292 ** b=DATETIME Only show changes before DATETIME
293 ** ci=HASH identify a particular version of a file and then
294 ** track changes to that file across renames
295 ** m=HASH Mark this particular file version.
296 ** n=NUM Show the first NUM changes only
297 ** name=FILENAME (Required) name of file whose history to show
298 ** brbg Background color by branch name
299 ** ubg Background color by user name
300 ** from=HASH Ancestors only (not descendents) of the version of
301 ** the file in this particular check-in.
302 ** to=HASH If both from= and to= are supplied, only show those
303 ** changes on the direct path between the two given
304 ** checkins.
305 ** showid Show RID values for debugging
306 ** showsql Show the SQL query used to gather the data for
307 ** the graph
308 **
309 ** DATETIME may be in any of usual formats, including "now",
310 ** "YYYY-MM-DDTHH:MM:SS.SSS", "YYYYMMDDHHMM", and others.
 
 
311 */
312 void finfo_page(void){
313 Stmt q;
314 const char *zFilename = PD("name","");
315 char zPrevDate[20];
316 const char *zA;
317 const char *zB;
318 int n;
319 int ridFrom;
320 int ridTo = 0;
321 int ridCi = 0;
322 const char *zCI = P("ci");
323 int fnid;
324 Blob title;
325 Blob sql;
326 HQuery url;
327 GraphContext *pGraph;
@@ -317,18 +333,22 @@
333 int iTableId = timeline_tableid();
334 int tmFlags = 0; /* Viewing mode */
335 const char *zStyle; /* Viewing mode name */
336 const char *zMark; /* Mark this version of the file */
337 int selRid = 0; /* RID of the marked file version */
338 int mxfnid; /* Maximum filename.fnid value */
339
340 login_check_credentials();
341 if( !g.perm.Read ){ login_needed(g.anon.Read); return; }
342 fnid = db_int(0, "SELECT fnid FROM filename WHERE name=%Q", zFilename);
343 ridCi = zCI ? name_to_rid_www("ci") : 0;
344 if( fnid==0 ){
345 style_header("No such file");
346 }else if( ridCi==0 ){
347 style_header("All files named \"%s\"", zFilename);
348 }else{
349 style_header("History of %s of %s",zFilename, zCI);
350 }
351 login_anonymous_available();
352 tmFlags = timeline_ss_submenu();
353 if( tmFlags & TIMELINE_COLUMNAR ){
354 zStyle = "Columnar";
@@ -342,11 +362,11 @@
362 zStyle = "Modern";
363 }
364 url_initialize(&url, "finfo");
365 if( brBg ) url_add_parameter(&url, "brbg", 0);
366 if( uBg ) url_add_parameter(&url, "ubg", 0);
367 ridFrom = name_to_rid_www("from");
368 zPrevDate[0] = 0;
369 cookie_render();
370 if( fnid==0 ){
371 @ No such file: %h(zFilename)
372 style_footer();
@@ -353,54 +373,105 @@
373 return;
374 }
375 if( g.perm.Admin ){
376 style_submenu_element("MLink Table", "%R/mlink?name=%t", zFilename);
377 }
378 if( ridFrom ){
379 if( P("to")!=0 ){
380 ridTo = name_to_typed_rid(P("to"),"ci");
381 path_shortest_stored_in_ancestor_table(ridFrom,ridTo);
382 }else{
383 compute_direct_ancestors(ridFrom);
384 }
385 }
386 url_add_parameter(&url, "name", zFilename);
387 blob_zero(&sql);
388 if( ridCi ){
389 /* If we will be tracking changes across renames, some extra temp
390 ** tables (implemented as CTEs) are required */
391 blob_append_sql(&sql,
392 /* The clade(fid,fnid) table is the set of all (fid,fnid) pairs
393 ** that should participate in the output. Clade is computed by
394 ** walking the graph of mlink edges.
395 */
396 "WITH RECURSIVE clade(fid,fnid) AS (\n"
397 " SELECT blob.rid, %d FROM blob\n" /* %d is fnid */
398 " WHERE blob.uuid=(SELECT uuid FROM files_of_checkin(%Q)"
399 " WHERE filename=%Q)\n" /* %Q is the filename */
400 " UNION\n"
401 " SELECT mlink.fid, mlink.fnid\n"
402 " FROM clade, mlink\n"
403 " WHERE clade.fid=mlink.pid\n"
404 " AND ((mlink.pfnid=0 AND mlink.fnid=clade.fnid)\n"
405 " OR mlink.pfnid=clade.fnid)\n"
406 " AND (mlink.fid>0 OR NOT EXISTS(SELECT 1 FROM mlink AS mx"
407 " WHERE mx.mid=mlink.mid AND mx.pid=mlink.pid"
408 " AND mx.fid>0 AND mx.pfnid=mlink.fnid))\n"
409 " UNION\n"
410 " SELECT mlink.pid,"
411 " CASE WHEN mlink.pfnid>0 THEN mlink.pfnid ELSE mlink.fnid END\n"
412 " FROM clade, mlink\n"
413 " WHERE mlink.pid>0\n"
414 " AND mlink.fid=clade.fid\n"
415 " AND mlink.fnid=clade.fnid\n"
416 ")\n",
417 fnid, zCI, zFilename
418 );
419 }else{
420 /* This is the case for all files with a given name. We will still
421 ** create a "clade(fid,fnid)" table that identifies all participates
422 ** in the output graph, so that subsequent queries can all be the same,
423 ** but in the case the clade table is much simplier, being just a
424 ** single direct query against the mlink table.
425 */
426 blob_append_sql(&sql,
427 "WITH clade(fid,fnid) AS (\n"
428 " SELECT DISTINCT fid, %d\n"
429 " FROM mlink\n"
430 " WHERE fnid=%d)",
431 fnid, fnid
432 );
433 }
434 blob_append_sql(&sql,
435 "SELECT\n"
436 " datetime(min(event.mtime),toLocal()),\n" /* Date of change */
437 " coalesce(event.ecomment, event.comment),\n" /* Check-in comment */
438 " coalesce(event.euser, event.user),\n" /* User who made chng */
439 " mlink.pid,\n" /* Parent file rid */
440 " mlink.fid,\n" /* File rid */
441 " (SELECT uuid FROM blob WHERE rid=mlink.pid),\n" /* Parent file hash */
442 " blob.uuid,\n" /* Current file hash */
443 " (SELECT uuid FROM blob WHERE rid=mlink.mid),\n" /* Check-in hash */
444 " event.bgcolor,\n" /* Background color */
445 " (SELECT value FROM tagxref WHERE tagid=%d AND tagtype>0"
446 " AND tagxref.rid=mlink.mid),\n" /* Branchname */
447 " mlink.mid,\n" /* check-in ID */
448 " mlink.pfnid,\n" /* Previous filename */
449 " blob.size,\n" /* File size */
450 " mlink.fnid,\n" /* Current filename */
451 " filename.name\n" /* Current filename */
452 "FROM clade CROSS JOIN mlink, event"
453 " LEFT JOIN blob ON blob.rid=clade.fid"
454 " LEFT JOIN filename ON filename.fnid=clade.fnid\n"
455 "WHERE mlink.fnid=clade.fnid AND mlink.fid=clade.fid\n"
456 " AND event.objid=mlink.mid\n",
457 TAG_BRANCH
458 );
459 if( (zA = P("a"))!=0 ){
460 blob_append_sql(&sql, " AND event.mtime>=%.16g\n",
461 symbolic_name_to_mtime(zA,0));
462 url_add_parameter(&url, "a", zA);
463 }
464 if( (zB = P("b"))!=0 ){
465 blob_append_sql(&sql, " AND event.mtime<=%.16g\n",
466 symbolic_name_to_mtime(zB,0));
467 url_add_parameter(&url, "b", zB);
468 }
469 if( ridFrom ){
470 blob_append_sql(&sql,
471 " AND mlink.mid IN (SELECT rid FROM ancestor)\n"
472 "GROUP BY mlink.fid\n"
473 );
474 }else{
475 /* We only want each version of a file to appear on the graph once,
476 ** at its earliest appearance. All the other times that it gets merged
477 ** into this or that branch can be ignored. An exception is for when
@@ -408,33 +479,35 @@
479 ** is deleted in multiple places, we want to show each deletion, so
480 ** use a "fake fid" which is derived from the parent-fid for grouping.
481 ** The same fake-fid must be used on the graph.
482 */
483 blob_append_sql(&sql,
484 "GROUP BY"
485 " CASE WHEN mlink.fid>0 THEN mlink.fid ELSE mlink.pid+1000000000 END,"
486 " mlink.fnid\n"
487 );
488 }
489 blob_append_sql(&sql, "ORDER BY event.mtime DESC");
490 if( (n = atoi(PD("n","0")))>0 ){
491 blob_append_sql(&sql, " LIMIT %d", n);
492 url_add_parameter(&url, "n", P("n"));
493 }
494 blob_append_sql(&sql, " /*sort*/\n");
495 db_prepare(&q, "%s", blob_sql_text(&sql));
496 if( P("showsql")!=0 ){
497 @ <p>SQL: <blockquote><pre>%h(blob_str(&sql))</blockquote></pre>
498 }
499 zMark = P("m");
500 if( zMark ){
501 selRid = symbolic_name_to_rid(zMark, "*");
502 }
503 blob_reset(&sql);
504 blob_zero(&title);
505 if( ridFrom ){
506 char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", ridFrom);
507 char *zLink = href("%R/info/%!S", zUuid);
508 if( ridTo ){
509 blob_appendf(&title, "Changes to file ");
510 }else if( n>0 ){
511 blob_appendf(&title, "First %d ancestors of file ", n);
512 }else{
513 blob_appendf(&title, "Ancestors of file ");
@@ -441,20 +514,26 @@
514 }
515 blob_appendf(&title,"%z%h</a>",
516 href("%R/file?name=%T&ci=%!S", zFilename, zUuid),
517 zFilename);
518 if( fShowId ) blob_appendf(&title, " (%d)", fnid);
519 blob_append(&title, ridTo ? " between " : " from ", -1);
520 blob_appendf(&title, "check-in %z%S</a>", zLink, zUuid);
521 if( fShowId ) blob_appendf(&title, " (%d)", ridFrom);
522 fossil_free(zUuid);
523 if( ridTo ){
524 zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", ridTo);
525 zLink = href("%R/info/%!S", zUuid);
526 blob_appendf(&title, " and check-in %z%S</a>", zLink, zUuid);
527 fossil_free(zUuid);
528 }
529 }else if( ridCi ){
530 blob_appendf(&title, "History of the file that is called ");
531 hyperlinked_path(zFilename, &title, 0, "tree", "", LINKPATH_FILE);
532 if( fShowId ) blob_appendf(&title, " (%d)", fnid);
533 blob_appendf(&title, " at checkin %z%h</a>",
534 href("%R/info?name=%t",zCI), zCI);
535 }else{
536 blob_appendf(&title, "History for ");
537 hyperlinked_path(zFilename, &title, 0, "tree", "", LINKPATH_FILE);
538 if( fShowId ) blob_appendf(&title, " (%d)", fnid);
539 }
@@ -463,22 +542,25 @@
542 }
543 @ <h2>%b(&title)</h2>
544 blob_reset(&title);
545 pGraph = graph_init();
546 @ <table id="timelineTable%d(iTableId)" class="timelineTable">
547 mxfnid = db_int(0, "SELECT max(fnid) FROM filename");
548 if( ridFrom ){
549 db_prepare(&qparent,
550 "SELECT DISTINCT pid*%d+CASE WHEN pfnid>0 THEN pfnid ELSE fnid END"
551 " FROM mlink"
552 " WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
553 " AND pmid IN (SELECT rid FROM ancestor)"
554 " ORDER BY isaux /*sort*/", mxfnid+1
555 );
556 }else{
557 db_prepare(&qparent,
558 "SELECT DISTINCT pid*%d+CASE WHEN pfnid>0 THEN pfnid ELSE fnid END"
559 " FROM mlink"
560 " WHERE fid=:fid AND mid=:mid AND pid>0 AND fnid=:fnid"
561 " ORDER BY isaux /*sort*/", mxfnid+1
562 );
563 }
564 while( db_step(&q)==SQLITE_ROW ){
565 const char *zDate = db_column_text(&q, 0);
566 const char *zCom = db_column_text(&q, 1);
@@ -491,32 +573,35 @@
573 const char *zBgClr = db_column_text(&q, 8);
574 const char *zBr = db_column_text(&q, 9);
575 int fmid = db_column_int(&q, 10);
576 int pfnid = db_column_int(&q, 11);
577 int szFile = db_column_int(&q, 12);
578 int fnid = db_column_int(&q, 13);
579 const char *zFName = db_column_text(&q,14);
580 int gidx;
581 char zTime[10];
582 int nParent = 0;
583 GraphRowId aParent[GR_MAX_RAIL];
584
585 db_bind_int(&qparent, ":fid", frid);
586 db_bind_int(&qparent, ":mid", fmid);
587 db_bind_int(&qparent, ":fnid", fnid);
588 while( db_step(&qparent)==SQLITE_ROW && nParent<count(aParent) ){
589 aParent[nParent] = db_column_int64(&qparent, 0);
590 nParent++;
591 }
592 db_reset(&qparent);
593 if( zBr==0 ) zBr = "trunk";
594 if( uBg ){
595 zBgClr = hash_color(zUser);
596 }else if( brBg || zBgClr==0 || zBgClr[0]==0 ){
597 zBgClr = strcmp(zBr,"trunk")==0 ? "" : hash_color(zBr);
598 }
599 gidx = graph_add_row(pGraph,
600 frid>0 ? (GraphRowId)frid*(mxfnid+1)+fnid : fpid+1000000000,
601 nParent, 0, aParent, zBr, zBgClr,
602 zUuid, 0);
603 if( strncmp(zDate, zPrevDate, 10) ){
604 sqlite3_snprintf(sizeof(zPrevDate), zPrevDate, "%.10s", zDate);
605 @ <tr><td>
606 @ <div class="divider timelineDate">%s(zPrevDate)</div>
607 @ </td><td></td><td></td></tr>
@@ -527,11 +612,11 @@
612 @ <tr class='timelineSelected'>
613 }else{
614 @ <tr>
615 }
616 @ <td class="timelineTime">\
617 @ %z(href("%R/file?name=%T&ci=%!S",zFName,zCkin))%s(zTime)</a></td>
618 @ <td class="timelineGraph"><div id="m%d(gidx)" class="tl-nodemark"></div>
619 @ </td>
620 if( zBgClr && zBgClr[0] ){
621 @ <td class="timeline%s(zStyle)Cell" id='mc%d(gidx)'>
622 }else{
@@ -539,10 +624,35 @@
624 }
625 if( tmFlags & TIMELINE_COMPACT ){
626 @ <span class='timelineCompactComment' data-id='%d(frid)'>
627 }else{
628 @ <span class='timeline%s(zStyle)Comment'>
629 if( pfnid ){
630 char *zPrevName = db_text(0,"SELECT name FROM filename WHERE fnid=%d",
631 pfnid);
632 @ <b>Renamed</b> %h(zPrevName) &rarr; %h(zFName).
633 fossil_free(zPrevName);
634 }
635 if( zUuid && ridTo==0 && nParent==0 ){
636 @ <b>Added:</b>
637 }
638 if( zUuid==0 ){
639 char *zNewName;
640 zNewName = db_text(0,
641 "SELECT name FROM filename WHERE fnid = "
642 " (SELECT fnid FROM mlink"
643 " WHERE mid=%d"
644 " AND pfnid IN (SELECT fnid FROM filename WHERE name=%Q))",
645 fmid, zFName);
646 if( zNewName ){
647 @ <b>Renamed</b> to
648 @ %z(href("%R/finfo?name=%t",zNewName))%h(zNewName)</a>.
649 fossil_free(zNewName);
650 }else{
651 @ <b>Deleted:</b>
652 }
653 }
654 if( (tmFlags & TIMELINE_VERBOSE)!=0 && zUuid ){
655 hyperlink_to_version(zUuid);
656 @ part of check-in \
657 hyperlink_to_version(zCkin);
658 }
@@ -564,11 +674,12 @@
674 cgi_printf("<span class='clutter' id='detail-%d'>",frid);
675 }
676 cgi_printf("<span class='timeline%sDetail'>", zStyle);
677 if( tmFlags & (TIMELINE_COMPACT|TIMELINE_VERBOSE) ) cgi_printf("(");
678 if( zUuid && (tmFlags & TIMELINE_VERBOSE)==0 ){
679 @ file:&nbsp;%z(href("%R/file?name=%T&ci=%!S",zFName,zCkin))\
680 @ [%S(zUuid)]</a>
681 if( fShowId ){
682 int srcId = delta_source_rid(frid);
683 if( srcId>0 ){
684 @ id:&nbsp;%d(frid)&larr;%d(srcId)
685 }else{
@@ -587,63 +698,41 @@
698 if( tmFlags & (TIMELINE_COMPACT|TIMELINE_VERBOSE) ){
699 @ size:&nbsp;%d(szFile))
700 }else{
701 @ size:&nbsp;%d(szFile)
702 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703 if( g.perm.Hyperlink && zUuid ){
704 const char *z = zFName;
705 @ <span id='links-%d(frid)'><span class='timelineExtraLinks'>
706 @ %z(href("%R/annotate?filename=%h&checkin=%s",z,zCkin))
707 @ [annotate]</a>
708 @ %z(href("%R/blame?filename=%h&checkin=%s",z,zCkin))
709 @ [blame]</a>
710 @ %z(href("%R/timeline?n=all&uf=%!S",zUuid))[check-ins&nbsp;using]</a>
711 if( fpid>0 ){
712 @ %z(href("%R/fdiff?v1=%!S&v2=%!S",zPUuid,zUuid))[diff]</a>
713 }
714 if( fileedit_is_editable(zFName) ){
715 @ %z(href("%R/fileedit?filename=%T&checkin=%!S",zFName,zCkin))\
716 @ [edit]</a>
717 }
718 @ </span></span>
719 }
720 if( fDebug & FINFO_DEBUG_MLINK ){
721 int ii;
722 char *zAncLink;
723 @ <br />fid=%d(frid) \
724 @ graph-id=%lld(frid>0?(GraphRowId)frid*(mxfnid+1)+fnid:fpid+1000000000) \
725 @ pid=%d(fpid) mid=%d(fmid) fnid=%d(fnid) \
726 @ pfnid=%d(pfnid) mxfnid=%d(mxfnid)
727 if( nParent>0 ){
728 @ parents=%lld(aParent[0])
729 for(ii=1; ii<nParent; ii++){
730 @ %lld(aParent[ii])
731 }
732 }
733 zAncLink = href("%R/finfo?name=%T&from=%!S&debug=1",zFName,zCkin);
734 @ %z(zAncLink)[ancestry]</a>
735 }
736 tag_private_status(frid);
737 /* End timelineDetail */
738 if( tmFlags & TIMELINE_COMPACT ){
739
+24 -12
--- src/graph.c
+++ src/graph.c
@@ -41,10 +41,22 @@
4141
** the rails used for merge arrows.
4242
*/
4343
4444
#if INTERFACE
4545
46
+/*
47
+** The type of integer identifiers for rows of the graph.
48
+**
49
+** For a normal /timeline graph, the identifiers are never that big
50
+** an an ordinary 32-bit int will work fine. But for the /finfo page,
51
+** the identifier is a combination of the BLOB.RID and the FILENAME.FNID
52
+** values, and so it can become quite large for repos that have both many
53
+** check-ins and many files. For this reason, we make the identifier
54
+** a 64-bit integer, to dramatically reduce the risk of an overflow.
55
+*/
56
+typedef sqlite3_int64 GraphRowId;
57
+
4658
#define GR_MAX_RAIL 40 /* Max number of "rails" to display */
4759
4860
/* The graph appears vertically beside a timeline. Each row in the
4961
** timeline corresponds to a row in the graph. GraphRow.idx is 0 for
5062
** the top-most row and increases moving down. Hence (in the absence of
@@ -52,16 +64,16 @@
5264
**
5365
** The nParent field is -1 for entires that do not participate in the graph
5466
** but which are included just so that we can capture their background color.
5567
*/
5668
struct GraphRow {
57
- int rid; /* The rid for the check-in */
69
+ GraphRowId rid; /* The rid for the check-in */
5870
i8 nParent; /* Number of parents. */
5971
i8 nCherrypick; /* Subset of aParent that are cherrypicks */
6072
i8 nNonCherrypick; /* Number of non-cherrypick parents */
6173
u8 nMergeChild; /* Number of merge children */
62
- int *aParent; /* Array of parents. 0 element is primary .*/
74
+ GraphRowId *aParent; /* Array of parents. 0 element is primary .*/
6375
char *zBranch; /* Branch name */
6476
char *zBgClr; /* Background Color */
6577
char zUuid[HNAME_MAX+1]; /* Check-in for file ID */
6678
6779
GraphRow *pNext; /* Next row down in the list of all rows */
@@ -176,11 +188,11 @@
176188
}
177189
178190
/*
179191
** Look up the row with rid.
180192
*/
181
-static GraphRow *hashFind(GraphContext *p, int rid){
193
+static GraphRow *hashFind(GraphContext *p, GraphRowId rid){
182194
int h = rid % p->nHash;
183195
while( p->apHash[h] && p->apHash[h]->rid!=rid ){
184196
h++;
185197
if( h>=p->nHash ) h = 0;
186198
}
@@ -212,14 +224,14 @@
212224
/*
213225
** Add a new row to the graph context. Rows are added from top to bottom.
214226
*/
215227
int graph_add_row(
216228
GraphContext *p, /* The context to which the row is added */
217
- int rid, /* RID for the check-in */
229
+ GraphRowId rid, /* RID for the check-in */
218230
int nParent, /* Number of parents */
219231
int nCherrypick, /* How many of aParent[] are actually cherrypicks */
220
- int *aParent, /* Array of parents */
232
+ GraphRowId *aParent, /* Array of parents */
221233
const char *zBranch, /* Branch for this check-in */
222234
const char *zBgClr, /* Background color. NULL or "" for white. */
223235
const char *zUuid, /* hash name of the object being graphed */
224236
int isLeaf /* True if this row is a leaf */
225237
){
@@ -229,11 +241,11 @@
229241
230242
if( p->nErr ) return 0;
231243
nByte = sizeof(GraphRow);
232244
if( nParent>0 ) nByte += sizeof(pRow->aParent[0])*nParent;
233245
pRow = (GraphRow*)safeMalloc( nByte );
234
- pRow->aParent = nParent>0 ? (int*)&pRow[1] : 0;
246
+ pRow->aParent = nParent>0 ? (GraphRowId*)&pRow[1] : 0;
235247
pRow->rid = rid;
236248
if( nCherrypick>=nParent ){
237249
nCherrypick = nParent-1; /* Safety. Should never happen. */
238250
}
239251
pRow->nParent = nParent;
@@ -440,11 +452,11 @@
440452
441453
/* If mergeRiserFrom[X]==Y that means rail X holds a merge riser
442454
** coming up from the bottom of the graph from off-screen check-in Y
443455
** where Y is the RID. There is no riser on rail X if mergeRiserFrom[X]==0.
444456
*/
445
- int mergeRiserFrom[GR_MAX_RAIL];
457
+ GraphRowId mergeRiserFrom[GR_MAX_RAIL];
446458
447459
if( p==0 || p->pFirst==0 || p->nErr ) return;
448460
p->nErr = 1; /* Assume an error until proven otherwise */
449461
450462
/* Initialize all rows */
@@ -514,11 +526,11 @@
514526
iBest = i;
515527
}
516528
}
517529
i = pRow->nNonCherrypick;
518530
if( iBest>i ){
519
- int x = pRow->aParent[i];
531
+ GraphRowId x = pRow->aParent[i];
520532
pRow->aParent[i] = pRow->aParent[iBest];
521533
pRow->aParent[iBest] = x;
522534
}
523535
}
524536
if( pRow->nNonCherrypick>2 ){
@@ -534,11 +546,11 @@
534546
iDeepest = pParent->idx;
535547
iBest = i;
536548
}
537549
}
538550
if( iBest>1 ){
539
- int x = pRow->aParent[1];
551
+ GraphRowId x = pRow->aParent[1];
540552
pRow->aParent[1] = pRow->aParent[iBest];
541553
pRow->aParent[iBest] = x;
542554
}
543555
}
544556
}
@@ -554,11 +566,11 @@
554566
if( pParent==0 ) continue; /* Parent off-screen */
555567
if( pParent->zBranch==pRow->zBranch ) continue; /* Same branch */
556568
for(i=1; i<pRow->nNonCherrypick; i++){
557569
pParent = hashFind(p, pRow->aParent[i]);
558570
if( pParent && pParent->zBranch==pRow->zBranch ){
559
- int t = pRow->aParent[0];
571
+ GraphRowId t = pRow->aParent[0];
560572
pRow->aParent[0] = pRow->aParent[i];
561573
pRow->aParent[i] = t;
562574
break;
563575
}
564576
}
@@ -652,11 +664,11 @@
652664
}
653665
654666
/* Assign rails to all rows that are still unassigned.
655667
*/
656668
for(pRow=p->pLast; pRow; pRow=pRow->pPrev){
657
- int parentRid;
669
+ GraphRowId parentRid;
658670
659671
if( pRow->iRail>=0 ){
660672
if( pRow->pChild==0 && !pRow->timeWarp ){
661673
if( !omitDescenders && count_nonbranch_children(pRow->rid)!=0 ){
662674
riser_to_top(pRow);
@@ -726,11 +738,11 @@
726738
for(pRow=p->pFirst; pRow; pRow=pRow->pNext){
727739
int iReuseIdx = -1;
728740
int iReuseRail = -1;
729741
int isCherrypick = 0;
730742
for(i=1; i<pRow->nParent; i++){
731
- int parentRid = pRow->aParent[i];
743
+ GraphRowId parentRid = pRow->aParent[i];
732744
if( i==pRow->nNonCherrypick ){
733745
/* Because full merges are laid out before cherrypicks,
734746
** it is ok to use a full-merge raise for a cherrypick.
735747
** See the graph on check-in 8ac66ef33b464d28 for example
736748
** iReuseIdx = -1;
737749
--- src/graph.c
+++ src/graph.c
@@ -41,10 +41,22 @@
41 ** the rails used for merge arrows.
42 */
43
44 #if INTERFACE
45
 
 
 
 
 
 
 
 
 
 
 
 
46 #define GR_MAX_RAIL 40 /* Max number of "rails" to display */
47
48 /* The graph appears vertically beside a timeline. Each row in the
49 ** timeline corresponds to a row in the graph. GraphRow.idx is 0 for
50 ** the top-most row and increases moving down. Hence (in the absence of
@@ -52,16 +64,16 @@
52 **
53 ** The nParent field is -1 for entires that do not participate in the graph
54 ** but which are included just so that we can capture their background color.
55 */
56 struct GraphRow {
57 int rid; /* The rid for the check-in */
58 i8 nParent; /* Number of parents. */
59 i8 nCherrypick; /* Subset of aParent that are cherrypicks */
60 i8 nNonCherrypick; /* Number of non-cherrypick parents */
61 u8 nMergeChild; /* Number of merge children */
62 int *aParent; /* Array of parents. 0 element is primary .*/
63 char *zBranch; /* Branch name */
64 char *zBgClr; /* Background Color */
65 char zUuid[HNAME_MAX+1]; /* Check-in for file ID */
66
67 GraphRow *pNext; /* Next row down in the list of all rows */
@@ -176,11 +188,11 @@
176 }
177
178 /*
179 ** Look up the row with rid.
180 */
181 static GraphRow *hashFind(GraphContext *p, int rid){
182 int h = rid % p->nHash;
183 while( p->apHash[h] && p->apHash[h]->rid!=rid ){
184 h++;
185 if( h>=p->nHash ) h = 0;
186 }
@@ -212,14 +224,14 @@
212 /*
213 ** Add a new row to the graph context. Rows are added from top to bottom.
214 */
215 int graph_add_row(
216 GraphContext *p, /* The context to which the row is added */
217 int rid, /* RID for the check-in */
218 int nParent, /* Number of parents */
219 int nCherrypick, /* How many of aParent[] are actually cherrypicks */
220 int *aParent, /* Array of parents */
221 const char *zBranch, /* Branch for this check-in */
222 const char *zBgClr, /* Background color. NULL or "" for white. */
223 const char *zUuid, /* hash name of the object being graphed */
224 int isLeaf /* True if this row is a leaf */
225 ){
@@ -229,11 +241,11 @@
229
230 if( p->nErr ) return 0;
231 nByte = sizeof(GraphRow);
232 if( nParent>0 ) nByte += sizeof(pRow->aParent[0])*nParent;
233 pRow = (GraphRow*)safeMalloc( nByte );
234 pRow->aParent = nParent>0 ? (int*)&pRow[1] : 0;
235 pRow->rid = rid;
236 if( nCherrypick>=nParent ){
237 nCherrypick = nParent-1; /* Safety. Should never happen. */
238 }
239 pRow->nParent = nParent;
@@ -440,11 +452,11 @@
440
441 /* If mergeRiserFrom[X]==Y that means rail X holds a merge riser
442 ** coming up from the bottom of the graph from off-screen check-in Y
443 ** where Y is the RID. There is no riser on rail X if mergeRiserFrom[X]==0.
444 */
445 int mergeRiserFrom[GR_MAX_RAIL];
446
447 if( p==0 || p->pFirst==0 || p->nErr ) return;
448 p->nErr = 1; /* Assume an error until proven otherwise */
449
450 /* Initialize all rows */
@@ -514,11 +526,11 @@
514 iBest = i;
515 }
516 }
517 i = pRow->nNonCherrypick;
518 if( iBest>i ){
519 int x = pRow->aParent[i];
520 pRow->aParent[i] = pRow->aParent[iBest];
521 pRow->aParent[iBest] = x;
522 }
523 }
524 if( pRow->nNonCherrypick>2 ){
@@ -534,11 +546,11 @@
534 iDeepest = pParent->idx;
535 iBest = i;
536 }
537 }
538 if( iBest>1 ){
539 int x = pRow->aParent[1];
540 pRow->aParent[1] = pRow->aParent[iBest];
541 pRow->aParent[iBest] = x;
542 }
543 }
544 }
@@ -554,11 +566,11 @@
554 if( pParent==0 ) continue; /* Parent off-screen */
555 if( pParent->zBranch==pRow->zBranch ) continue; /* Same branch */
556 for(i=1; i<pRow->nNonCherrypick; i++){
557 pParent = hashFind(p, pRow->aParent[i]);
558 if( pParent && pParent->zBranch==pRow->zBranch ){
559 int t = pRow->aParent[0];
560 pRow->aParent[0] = pRow->aParent[i];
561 pRow->aParent[i] = t;
562 break;
563 }
564 }
@@ -652,11 +664,11 @@
652 }
653
654 /* Assign rails to all rows that are still unassigned.
655 */
656 for(pRow=p->pLast; pRow; pRow=pRow->pPrev){
657 int parentRid;
658
659 if( pRow->iRail>=0 ){
660 if( pRow->pChild==0 && !pRow->timeWarp ){
661 if( !omitDescenders && count_nonbranch_children(pRow->rid)!=0 ){
662 riser_to_top(pRow);
@@ -726,11 +738,11 @@
726 for(pRow=p->pFirst; pRow; pRow=pRow->pNext){
727 int iReuseIdx = -1;
728 int iReuseRail = -1;
729 int isCherrypick = 0;
730 for(i=1; i<pRow->nParent; i++){
731 int parentRid = pRow->aParent[i];
732 if( i==pRow->nNonCherrypick ){
733 /* Because full merges are laid out before cherrypicks,
734 ** it is ok to use a full-merge raise for a cherrypick.
735 ** See the graph on check-in 8ac66ef33b464d28 for example
736 ** iReuseIdx = -1;
737
--- src/graph.c
+++ src/graph.c
@@ -41,10 +41,22 @@
41 ** the rails used for merge arrows.
42 */
43
44 #if INTERFACE
45
46 /*
47 ** The type of integer identifiers for rows of the graph.
48 **
49 ** For a normal /timeline graph, the identifiers are never that big
50 ** an an ordinary 32-bit int will work fine. But for the /finfo page,
51 ** the identifier is a combination of the BLOB.RID and the FILENAME.FNID
52 ** values, and so it can become quite large for repos that have both many
53 ** check-ins and many files. For this reason, we make the identifier
54 ** a 64-bit integer, to dramatically reduce the risk of an overflow.
55 */
56 typedef sqlite3_int64 GraphRowId;
57
58 #define GR_MAX_RAIL 40 /* Max number of "rails" to display */
59
60 /* The graph appears vertically beside a timeline. Each row in the
61 ** timeline corresponds to a row in the graph. GraphRow.idx is 0 for
62 ** the top-most row and increases moving down. Hence (in the absence of
@@ -52,16 +64,16 @@
64 **
65 ** The nParent field is -1 for entires that do not participate in the graph
66 ** but which are included just so that we can capture their background color.
67 */
68 struct GraphRow {
69 GraphRowId rid; /* The rid for the check-in */
70 i8 nParent; /* Number of parents. */
71 i8 nCherrypick; /* Subset of aParent that are cherrypicks */
72 i8 nNonCherrypick; /* Number of non-cherrypick parents */
73 u8 nMergeChild; /* Number of merge children */
74 GraphRowId *aParent; /* Array of parents. 0 element is primary .*/
75 char *zBranch; /* Branch name */
76 char *zBgClr; /* Background Color */
77 char zUuid[HNAME_MAX+1]; /* Check-in for file ID */
78
79 GraphRow *pNext; /* Next row down in the list of all rows */
@@ -176,11 +188,11 @@
188 }
189
190 /*
191 ** Look up the row with rid.
192 */
193 static GraphRow *hashFind(GraphContext *p, GraphRowId rid){
194 int h = rid % p->nHash;
195 while( p->apHash[h] && p->apHash[h]->rid!=rid ){
196 h++;
197 if( h>=p->nHash ) h = 0;
198 }
@@ -212,14 +224,14 @@
224 /*
225 ** Add a new row to the graph context. Rows are added from top to bottom.
226 */
227 int graph_add_row(
228 GraphContext *p, /* The context to which the row is added */
229 GraphRowId rid, /* RID for the check-in */
230 int nParent, /* Number of parents */
231 int nCherrypick, /* How many of aParent[] are actually cherrypicks */
232 GraphRowId *aParent, /* Array of parents */
233 const char *zBranch, /* Branch for this check-in */
234 const char *zBgClr, /* Background color. NULL or "" for white. */
235 const char *zUuid, /* hash name of the object being graphed */
236 int isLeaf /* True if this row is a leaf */
237 ){
@@ -229,11 +241,11 @@
241
242 if( p->nErr ) return 0;
243 nByte = sizeof(GraphRow);
244 if( nParent>0 ) nByte += sizeof(pRow->aParent[0])*nParent;
245 pRow = (GraphRow*)safeMalloc( nByte );
246 pRow->aParent = nParent>0 ? (GraphRowId*)&pRow[1] : 0;
247 pRow->rid = rid;
248 if( nCherrypick>=nParent ){
249 nCherrypick = nParent-1; /* Safety. Should never happen. */
250 }
251 pRow->nParent = nParent;
@@ -440,11 +452,11 @@
452
453 /* If mergeRiserFrom[X]==Y that means rail X holds a merge riser
454 ** coming up from the bottom of the graph from off-screen check-in Y
455 ** where Y is the RID. There is no riser on rail X if mergeRiserFrom[X]==0.
456 */
457 GraphRowId mergeRiserFrom[GR_MAX_RAIL];
458
459 if( p==0 || p->pFirst==0 || p->nErr ) return;
460 p->nErr = 1; /* Assume an error until proven otherwise */
461
462 /* Initialize all rows */
@@ -514,11 +526,11 @@
526 iBest = i;
527 }
528 }
529 i = pRow->nNonCherrypick;
530 if( iBest>i ){
531 GraphRowId x = pRow->aParent[i];
532 pRow->aParent[i] = pRow->aParent[iBest];
533 pRow->aParent[iBest] = x;
534 }
535 }
536 if( pRow->nNonCherrypick>2 ){
@@ -534,11 +546,11 @@
546 iDeepest = pParent->idx;
547 iBest = i;
548 }
549 }
550 if( iBest>1 ){
551 GraphRowId x = pRow->aParent[1];
552 pRow->aParent[1] = pRow->aParent[iBest];
553 pRow->aParent[iBest] = x;
554 }
555 }
556 }
@@ -554,11 +566,11 @@
566 if( pParent==0 ) continue; /* Parent off-screen */
567 if( pParent->zBranch==pRow->zBranch ) continue; /* Same branch */
568 for(i=1; i<pRow->nNonCherrypick; i++){
569 pParent = hashFind(p, pRow->aParent[i]);
570 if( pParent && pParent->zBranch==pRow->zBranch ){
571 GraphRowId t = pRow->aParent[0];
572 pRow->aParent[0] = pRow->aParent[i];
573 pRow->aParent[i] = t;
574 break;
575 }
576 }
@@ -652,11 +664,11 @@
664 }
665
666 /* Assign rails to all rows that are still unassigned.
667 */
668 for(pRow=p->pLast; pRow; pRow=pRow->pPrev){
669 GraphRowId parentRid;
670
671 if( pRow->iRail>=0 ){
672 if( pRow->pChild==0 && !pRow->timeWarp ){
673 if( !omitDescenders && count_nonbranch_children(pRow->rid)!=0 ){
674 riser_to_top(pRow);
@@ -726,11 +738,11 @@
738 for(pRow=p->pFirst; pRow; pRow=pRow->pNext){
739 int iReuseIdx = -1;
740 int iReuseRail = -1;
741 int isCherrypick = 0;
742 for(i=1; i<pRow->nParent; i++){
743 GraphRowId parentRid = pRow->aParent[i];
744 if( i==pRow->nNonCherrypick ){
745 /* Because full merges are laid out before cherrypicks,
746 ** it is ok to use a full-merge raise for a cherrypick.
747 ** See the graph on check-in 8ac66ef33b464d28 for example
748 ** iReuseIdx = -1;
749
+51 -20
--- src/info.c
+++ src/info.c
@@ -368,10 +368,11 @@
368368
/*
369369
** Write a line of web-page output that shows changes that have occurred
370370
** to a file between two check-ins.
371371
*/
372372
static void append_file_change_line(
373
+ const char *zCkin, /* The checkin on which the change occurs */
373374
const char *zName, /* Name of the file that has changed */
374375
const char *zOld, /* blob.uuid before change. NULL for added files */
375376
const char *zNew, /* blob.uuid after change. NULL for deletes */
376377
const char *zOldName, /* Prior name. NULL if no name change. */
377378
u64 diffFlags, /* Flags for text_diff(). Zero to omit diffs */
@@ -401,19 +402,23 @@
401402
append_diff(zOld, zNew, diffFlags, pRe);
402403
}
403404
}else{
404405
if( zOld && zNew ){
405406
if( fossil_strcmp(zOld, zNew)!=0 ){
406
- @ Modified %z(href("%R/finfo?name=%T&m=%!S",zName,zNew))%h(zName)</a>
407
+ @ Modified %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zNew,zCkin))\
408
+ @ %h(zName)</a>
407409
@ from %z(href("%R/artifact/%!S",zOld))[%S(zOld)]</a>
408410
@ to %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
409411
}else if( zOldName!=0 && fossil_strcmp(zName,zOldName)!=0 ){
410412
@ Name change
411
- @ from %z(href("%R/finfo?name=%T&m=%!S",zOldName,zOld))%h(zOldName)</a>
412
- @ to %z(href("%R/finfo?name=%T&m=%!S",zName,zNew))%h(zName)</a>.
413
+ @ from %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zOldName,zOld,zCkin))\
414
+ @ %h(zOldName)</a>
415
+ @ to %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zNew,zCkin))\
416
+ @ %h(zName)</a>.
413417
}else{
414
- @ %z(href("%R/finfo?name=%T&m=%!S",zName,zNew))%h(zName)</a> became
418
+ @ %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zNew,zCkin))\
419
+ @ %h(zName)</a> became
415420
if( mperm==PERM_EXE ){
416421
@ executable with contents
417422
}else if( mperm==PERM_LNK ){
418423
@ a symlink with target
419424
}else{
@@ -420,15 +425,15 @@
420425
@ a regular file with contents
421426
}
422427
@ %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
423428
}
424429
}else if( zOld ){
425
- @ Deleted %z(href("%R/finfo?name=%T&m=%!S",zName,zOld))%h(zName)</a>
426
- @ version %z(href("%R/artifact/%!S",zOld))[%S(zOld)]</a>.
430
+ @ Deleted %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zOld,zCkin))\
431
+ @ %h(zName)</a> version %z(href("%R/artifact/%!S",zOld))[%S(zOld)]</a>.
427432
}else{
428
- @ Added %z(href("%R/finfo?name=%T&m=%!S",zName,zNew))%h(zName)</a>
429
- @ version %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
433
+ @ Added %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zNew,zCkin))\
434
+ @ %h(zName)</a> version %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
430435
}
431436
if( diffFlags ){
432437
append_diff(zOld, zNew, diffFlags, pRe);
433438
}else if( zOld && zNew && fossil_strcmp(zOld,zNew)!=0 ){
434439
@ &nbsp;&nbsp;
@@ -929,11 +934,12 @@
929934
const char *zName = db_column_text(&q3,0);
930935
int mperm = db_column_int(&q3, 1);
931936
const char *zOld = db_column_text(&q3,2);
932937
const char *zNew = db_column_text(&q3,3);
933938
const char *zOldName = db_column_text(&q3, 4);
934
- append_file_change_line(zName, zOld, zNew, zOldName, diffFlags,pRe,mperm);
939
+ append_file_change_line(zUuid, zName, zOld, zNew, zOldName,
940
+ diffFlags,pRe,mperm);
935941
}
936942
db_finalize(&q3);
937943
append_diff_javascript(diffType==2);
938944
cookie_render();
939945
style_footer();
@@ -1292,17 +1298,17 @@
12921298
}else{
12931299
cmp = fossil_strcmp(pFileFrom->zName, pFileTo->zName);
12941300
}
12951301
if( cmp<0 ){
12961302
if( !zGlob || sqlite3_strglob(zGlob, pFileFrom->zName)==0 ){
1297
- append_file_change_line(pFileFrom->zName,
1303
+ append_file_change_line(zFrom, pFileFrom->zName,
12981304
pFileFrom->zUuid, 0, 0, diffFlags, pRe, 0);
12991305
}
13001306
pFileFrom = manifest_file_next(pFrom, 0);
13011307
}else if( cmp>0 ){
13021308
if( !zGlob || sqlite3_strglob(zGlob, pFileTo->zName)==0 ){
1303
- append_file_change_line(pFileTo->zName,
1309
+ append_file_change_line(zTo, pFileTo->zName,
13041310
0, pFileTo->zUuid, 0, diffFlags, pRe,
13051311
manifest_file_mperm(pFileTo));
13061312
}
13071313
pFileTo = manifest_file_next(pTo, 0);
13081314
}else if( fossil_strcmp(pFileFrom->zUuid, pFileTo->zUuid)==0 ){
@@ -1309,11 +1315,11 @@
13091315
pFileFrom = manifest_file_next(pFrom, 0);
13101316
pFileTo = manifest_file_next(pTo, 0);
13111317
}else{
13121318
if(!zGlob || (sqlite3_strglob(zGlob, pFileFrom->zName)==0
13131319
|| sqlite3_strglob(zGlob, pFileTo->zName)==0) ){
1314
- append_file_change_line(pFileFrom->zName,
1320
+ append_file_change_line(zFrom, pFileFrom->zName,
13151321
pFileFrom->zUuid,
13161322
pFileTo->zUuid, 0, diffFlags, pRe,
13171323
manifest_file_mperm(pFileTo));
13181324
}
13191325
pFileFrom = manifest_file_next(pFrom, 0);
@@ -1419,11 +1425,12 @@
14191425
bNeedBase = 0;
14201426
style_set_current_page("doc/%S/%s",zVers,zName);
14211427
}
14221428
}
14231429
objType |= OBJTYPE_CONTENT;
1424
- @ %z(href("%R/finfo?name=%T&m=%!S",zName,zUuid))%h(zName)</a>
1430
+ @ %z(href("%R/finfo?name=%T&ci=%!S&m=%!S",zName,zVers,zUuid))\
1431
+ @ %h(zName)</a>
14251432
tag_private_status(rid);
14261433
if( showDetail ){
14271434
@ <ul>
14281435
}
14291436
prevName = fossil_strdup(zName);
@@ -2277,10 +2284,11 @@
22772284
rid = artifact_from_ci_and_filename(0);
22782285
}
22792286
22802287
if( rid==0 ){ /* Artifact not found */
22812288
if( isFile ){
2289
+ Stmt q;
22822290
/* For /file, also check to see if name= refers to a directory,
22832291
** and if so, do a listing for that directory */
22842292
int nName = (int)strlen(zName);
22852293
if( nName && zName[nName-1]=='/' ) nName--;
22862294
if( db_exists(
@@ -2290,18 +2298,39 @@
22902298
) ){
22912299
if( P("ci")==0 ) cgi_set_query_parameter("ci","tip");
22922300
page_tree();
22932301
return;
22942302
}
2295
- style_header("No such file");
2296
- @ File '%h(zName)' does not exist in this repository.
2303
+ /* No directory found, look for an historic version of the file
2304
+ ** that was subsequently deleted. */
2305
+ db_prepare(&q,
2306
+ "SELECT fid, uuid FROM mlink, filename, event, blob"
2307
+ " WHERE filename.name=%Q"
2308
+ " AND mlink.fnid=filename.fnid AND mlink.fid>0"
2309
+ " AND event.objid=mlink.mid"
2310
+ " AND blob.rid=mlink.mid"
2311
+ " ORDER BY event.mtime DESC",
2312
+ zName
2313
+ );
2314
+ if( db_step(&q)==SQLITE_ROW ){
2315
+ rid = db_column_int(&q, 0);
2316
+ zCI = zCIUuid = fossil_strdup(db_column_text(&q, 1));
2317
+ url_add_parameter(&url, "ci", zCI);
2318
+ }
2319
+ db_finalize(&q);
2320
+ if( rid==0 ){
2321
+ style_header("No such file");
2322
+ @ File '%h(zName)' does not exist in this repository.
2323
+ }
22972324
}else{
22982325
style_header("No such artifact");
22992326
@ Artifact '%h(zName)' does not exist in this repository.
23002327
}
2301
- style_footer();
2302
- return;
2328
+ if( rid==0 ){
2329
+ style_footer();
2330
+ return;
2331
+ }
23032332
}
23042333
23052334
if( descOnly || P("verbose")!=0 ){
23062335
url_add_parameter(&url, "verbose", "1");
23072336
objdescFlags |= OBJDESC_DETAIL;
@@ -2311,11 +2340,11 @@
23112340
23122341
asText = P("txt")!=0;
23132342
if( isFile ){
23142343
if( zCI==0 || fossil_strcmp(zCI,"tip")==0 ){
23152344
zCI = "tip";
2316
- @ <h2>File %z(href("%R/finfo?name=%T&m=tip",zName))%h(zName)</a>
2345
+ @ <h2>File %z(href("%R/finfo?name=%T&m&ci=tip",zName))%h(zName)</a>
23172346
@ from the %z(href("%R/info/tip"))latest check-in</a></h2>
23182347
}else{
23192348
const char *zPath;
23202349
Blob path;
23212350
blob_zero(&path);
@@ -2412,13 +2441,15 @@
24122441
}else{
24132442
renderAsHtml = 1;
24142443
style_submenu_element("Text", "%s", url_render(&url, "txt", "1", 0, 0));
24152444
}
24162445
}else if( fossil_strcmp(zMime, "text/x-fossil-wiki")==0
2417
- || fossil_strcmp(zMime, "text/x-markdown")==0 ){
2446
+ || fossil_strcmp(zMime, "text/x-markdown")==0
2447
+ || fossil_strcmp(zMime, "text/x-pikchr")==0 ){
24182448
if( asText ){
2419
- style_submenu_element("Wiki", "%s", url_render(&url, "txt", 0, 0, 0));
2449
+ style_submenu_element(zMime[7]=='p' ? "Pikchr" : "Wiki",
2450
+ "%s", url_render(&url, "txt", 0, 0, 0));
24202451
}else{
24212452
renderAsWiki = 1;
24222453
style_submenu_element("Text", "%s", url_render(&url, "txt", "1", 0, 0));
24232454
}
24242455
}else if( fossil_strcmp(zMime, "image/svg+xml")==0 ){
24252456
--- src/info.c
+++ src/info.c
@@ -368,10 +368,11 @@
368 /*
369 ** Write a line of web-page output that shows changes that have occurred
370 ** to a file between two check-ins.
371 */
372 static void append_file_change_line(
 
373 const char *zName, /* Name of the file that has changed */
374 const char *zOld, /* blob.uuid before change. NULL for added files */
375 const char *zNew, /* blob.uuid after change. NULL for deletes */
376 const char *zOldName, /* Prior name. NULL if no name change. */
377 u64 diffFlags, /* Flags for text_diff(). Zero to omit diffs */
@@ -401,19 +402,23 @@
401 append_diff(zOld, zNew, diffFlags, pRe);
402 }
403 }else{
404 if( zOld && zNew ){
405 if( fossil_strcmp(zOld, zNew)!=0 ){
406 @ Modified %z(href("%R/finfo?name=%T&m=%!S",zName,zNew))%h(zName)</a>
 
407 @ from %z(href("%R/artifact/%!S",zOld))[%S(zOld)]</a>
408 @ to %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
409 }else if( zOldName!=0 && fossil_strcmp(zName,zOldName)!=0 ){
410 @ Name change
411 @ from %z(href("%R/finfo?name=%T&m=%!S",zOldName,zOld))%h(zOldName)</a>
412 @ to %z(href("%R/finfo?name=%T&m=%!S",zName,zNew))%h(zName)</a>.
 
 
413 }else{
414 @ %z(href("%R/finfo?name=%T&m=%!S",zName,zNew))%h(zName)</a> became
 
415 if( mperm==PERM_EXE ){
416 @ executable with contents
417 }else if( mperm==PERM_LNK ){
418 @ a symlink with target
419 }else{
@@ -420,15 +425,15 @@
420 @ a regular file with contents
421 }
422 @ %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
423 }
424 }else if( zOld ){
425 @ Deleted %z(href("%R/finfo?name=%T&m=%!S",zName,zOld))%h(zName)</a>
426 @ version %z(href("%R/artifact/%!S",zOld))[%S(zOld)]</a>.
427 }else{
428 @ Added %z(href("%R/finfo?name=%T&m=%!S",zName,zNew))%h(zName)</a>
429 @ version %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
430 }
431 if( diffFlags ){
432 append_diff(zOld, zNew, diffFlags, pRe);
433 }else if( zOld && zNew && fossil_strcmp(zOld,zNew)!=0 ){
434 @ &nbsp;&nbsp;
@@ -929,11 +934,12 @@
929 const char *zName = db_column_text(&q3,0);
930 int mperm = db_column_int(&q3, 1);
931 const char *zOld = db_column_text(&q3,2);
932 const char *zNew = db_column_text(&q3,3);
933 const char *zOldName = db_column_text(&q3, 4);
934 append_file_change_line(zName, zOld, zNew, zOldName, diffFlags,pRe,mperm);
 
935 }
936 db_finalize(&q3);
937 append_diff_javascript(diffType==2);
938 cookie_render();
939 style_footer();
@@ -1292,17 +1298,17 @@
1292 }else{
1293 cmp = fossil_strcmp(pFileFrom->zName, pFileTo->zName);
1294 }
1295 if( cmp<0 ){
1296 if( !zGlob || sqlite3_strglob(zGlob, pFileFrom->zName)==0 ){
1297 append_file_change_line(pFileFrom->zName,
1298 pFileFrom->zUuid, 0, 0, diffFlags, pRe, 0);
1299 }
1300 pFileFrom = manifest_file_next(pFrom, 0);
1301 }else if( cmp>0 ){
1302 if( !zGlob || sqlite3_strglob(zGlob, pFileTo->zName)==0 ){
1303 append_file_change_line(pFileTo->zName,
1304 0, pFileTo->zUuid, 0, diffFlags, pRe,
1305 manifest_file_mperm(pFileTo));
1306 }
1307 pFileTo = manifest_file_next(pTo, 0);
1308 }else if( fossil_strcmp(pFileFrom->zUuid, pFileTo->zUuid)==0 ){
@@ -1309,11 +1315,11 @@
1309 pFileFrom = manifest_file_next(pFrom, 0);
1310 pFileTo = manifest_file_next(pTo, 0);
1311 }else{
1312 if(!zGlob || (sqlite3_strglob(zGlob, pFileFrom->zName)==0
1313 || sqlite3_strglob(zGlob, pFileTo->zName)==0) ){
1314 append_file_change_line(pFileFrom->zName,
1315 pFileFrom->zUuid,
1316 pFileTo->zUuid, 0, diffFlags, pRe,
1317 manifest_file_mperm(pFileTo));
1318 }
1319 pFileFrom = manifest_file_next(pFrom, 0);
@@ -1419,11 +1425,12 @@
1419 bNeedBase = 0;
1420 style_set_current_page("doc/%S/%s",zVers,zName);
1421 }
1422 }
1423 objType |= OBJTYPE_CONTENT;
1424 @ %z(href("%R/finfo?name=%T&m=%!S",zName,zUuid))%h(zName)</a>
 
1425 tag_private_status(rid);
1426 if( showDetail ){
1427 @ <ul>
1428 }
1429 prevName = fossil_strdup(zName);
@@ -2277,10 +2284,11 @@
2277 rid = artifact_from_ci_and_filename(0);
2278 }
2279
2280 if( rid==0 ){ /* Artifact not found */
2281 if( isFile ){
 
2282 /* For /file, also check to see if name= refers to a directory,
2283 ** and if so, do a listing for that directory */
2284 int nName = (int)strlen(zName);
2285 if( nName && zName[nName-1]=='/' ) nName--;
2286 if( db_exists(
@@ -2290,18 +2298,39 @@
2290 ) ){
2291 if( P("ci")==0 ) cgi_set_query_parameter("ci","tip");
2292 page_tree();
2293 return;
2294 }
2295 style_header("No such file");
2296 @ File '%h(zName)' does not exist in this repository.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2297 }else{
2298 style_header("No such artifact");
2299 @ Artifact '%h(zName)' does not exist in this repository.
2300 }
2301 style_footer();
2302 return;
 
 
2303 }
2304
2305 if( descOnly || P("verbose")!=0 ){
2306 url_add_parameter(&url, "verbose", "1");
2307 objdescFlags |= OBJDESC_DETAIL;
@@ -2311,11 +2340,11 @@
2311
2312 asText = P("txt")!=0;
2313 if( isFile ){
2314 if( zCI==0 || fossil_strcmp(zCI,"tip")==0 ){
2315 zCI = "tip";
2316 @ <h2>File %z(href("%R/finfo?name=%T&m=tip",zName))%h(zName)</a>
2317 @ from the %z(href("%R/info/tip"))latest check-in</a></h2>
2318 }else{
2319 const char *zPath;
2320 Blob path;
2321 blob_zero(&path);
@@ -2412,13 +2441,15 @@
2412 }else{
2413 renderAsHtml = 1;
2414 style_submenu_element("Text", "%s", url_render(&url, "txt", "1", 0, 0));
2415 }
2416 }else if( fossil_strcmp(zMime, "text/x-fossil-wiki")==0
2417 || fossil_strcmp(zMime, "text/x-markdown")==0 ){
 
2418 if( asText ){
2419 style_submenu_element("Wiki", "%s", url_render(&url, "txt", 0, 0, 0));
 
2420 }else{
2421 renderAsWiki = 1;
2422 style_submenu_element("Text", "%s", url_render(&url, "txt", "1", 0, 0));
2423 }
2424 }else if( fossil_strcmp(zMime, "image/svg+xml")==0 ){
2425
--- src/info.c
+++ src/info.c
@@ -368,10 +368,11 @@
368 /*
369 ** Write a line of web-page output that shows changes that have occurred
370 ** to a file between two check-ins.
371 */
372 static void append_file_change_line(
373 const char *zCkin, /* The checkin on which the change occurs */
374 const char *zName, /* Name of the file that has changed */
375 const char *zOld, /* blob.uuid before change. NULL for added files */
376 const char *zNew, /* blob.uuid after change. NULL for deletes */
377 const char *zOldName, /* Prior name. NULL if no name change. */
378 u64 diffFlags, /* Flags for text_diff(). Zero to omit diffs */
@@ -401,19 +402,23 @@
402 append_diff(zOld, zNew, diffFlags, pRe);
403 }
404 }else{
405 if( zOld && zNew ){
406 if( fossil_strcmp(zOld, zNew)!=0 ){
407 @ Modified %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zNew,zCkin))\
408 @ %h(zName)</a>
409 @ from %z(href("%R/artifact/%!S",zOld))[%S(zOld)]</a>
410 @ to %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
411 }else if( zOldName!=0 && fossil_strcmp(zName,zOldName)!=0 ){
412 @ Name change
413 @ from %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zOldName,zOld,zCkin))\
414 @ %h(zOldName)</a>
415 @ to %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zNew,zCkin))\
416 @ %h(zName)</a>.
417 }else{
418 @ %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zNew,zCkin))\
419 @ %h(zName)</a> became
420 if( mperm==PERM_EXE ){
421 @ executable with contents
422 }else if( mperm==PERM_LNK ){
423 @ a symlink with target
424 }else{
@@ -420,15 +425,15 @@
425 @ a regular file with contents
426 }
427 @ %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
428 }
429 }else if( zOld ){
430 @ Deleted %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zOld,zCkin))\
431 @ %h(zName)</a> version %z(href("%R/artifact/%!S",zOld))[%S(zOld)]</a>.
432 }else{
433 @ Added %z(href("%R/finfo?name=%T&m=%!S&ci=%!S",zName,zNew,zCkin))\
434 @ %h(zName)</a> version %z(href("%R/artifact/%!S",zNew))[%S(zNew)]</a>.
435 }
436 if( diffFlags ){
437 append_diff(zOld, zNew, diffFlags, pRe);
438 }else if( zOld && zNew && fossil_strcmp(zOld,zNew)!=0 ){
439 @ &nbsp;&nbsp;
@@ -929,11 +934,12 @@
934 const char *zName = db_column_text(&q3,0);
935 int mperm = db_column_int(&q3, 1);
936 const char *zOld = db_column_text(&q3,2);
937 const char *zNew = db_column_text(&q3,3);
938 const char *zOldName = db_column_text(&q3, 4);
939 append_file_change_line(zUuid, zName, zOld, zNew, zOldName,
940 diffFlags,pRe,mperm);
941 }
942 db_finalize(&q3);
943 append_diff_javascript(diffType==2);
944 cookie_render();
945 style_footer();
@@ -1292,17 +1298,17 @@
1298 }else{
1299 cmp = fossil_strcmp(pFileFrom->zName, pFileTo->zName);
1300 }
1301 if( cmp<0 ){
1302 if( !zGlob || sqlite3_strglob(zGlob, pFileFrom->zName)==0 ){
1303 append_file_change_line(zFrom, pFileFrom->zName,
1304 pFileFrom->zUuid, 0, 0, diffFlags, pRe, 0);
1305 }
1306 pFileFrom = manifest_file_next(pFrom, 0);
1307 }else if( cmp>0 ){
1308 if( !zGlob || sqlite3_strglob(zGlob, pFileTo->zName)==0 ){
1309 append_file_change_line(zTo, pFileTo->zName,
1310 0, pFileTo->zUuid, 0, diffFlags, pRe,
1311 manifest_file_mperm(pFileTo));
1312 }
1313 pFileTo = manifest_file_next(pTo, 0);
1314 }else if( fossil_strcmp(pFileFrom->zUuid, pFileTo->zUuid)==0 ){
@@ -1309,11 +1315,11 @@
1315 pFileFrom = manifest_file_next(pFrom, 0);
1316 pFileTo = manifest_file_next(pTo, 0);
1317 }else{
1318 if(!zGlob || (sqlite3_strglob(zGlob, pFileFrom->zName)==0
1319 || sqlite3_strglob(zGlob, pFileTo->zName)==0) ){
1320 append_file_change_line(zFrom, pFileFrom->zName,
1321 pFileFrom->zUuid,
1322 pFileTo->zUuid, 0, diffFlags, pRe,
1323 manifest_file_mperm(pFileTo));
1324 }
1325 pFileFrom = manifest_file_next(pFrom, 0);
@@ -1419,11 +1425,12 @@
1425 bNeedBase = 0;
1426 style_set_current_page("doc/%S/%s",zVers,zName);
1427 }
1428 }
1429 objType |= OBJTYPE_CONTENT;
1430 @ %z(href("%R/finfo?name=%T&ci=%!S&m=%!S",zName,zVers,zUuid))\
1431 @ %h(zName)</a>
1432 tag_private_status(rid);
1433 if( showDetail ){
1434 @ <ul>
1435 }
1436 prevName = fossil_strdup(zName);
@@ -2277,10 +2284,11 @@
2284 rid = artifact_from_ci_and_filename(0);
2285 }
2286
2287 if( rid==0 ){ /* Artifact not found */
2288 if( isFile ){
2289 Stmt q;
2290 /* For /file, also check to see if name= refers to a directory,
2291 ** and if so, do a listing for that directory */
2292 int nName = (int)strlen(zName);
2293 if( nName && zName[nName-1]=='/' ) nName--;
2294 if( db_exists(
@@ -2290,18 +2298,39 @@
2298 ) ){
2299 if( P("ci")==0 ) cgi_set_query_parameter("ci","tip");
2300 page_tree();
2301 return;
2302 }
2303 /* No directory found, look for an historic version of the file
2304 ** that was subsequently deleted. */
2305 db_prepare(&q,
2306 "SELECT fid, uuid FROM mlink, filename, event, blob"
2307 " WHERE filename.name=%Q"
2308 " AND mlink.fnid=filename.fnid AND mlink.fid>0"
2309 " AND event.objid=mlink.mid"
2310 " AND blob.rid=mlink.mid"
2311 " ORDER BY event.mtime DESC",
2312 zName
2313 );
2314 if( db_step(&q)==SQLITE_ROW ){
2315 rid = db_column_int(&q, 0);
2316 zCI = zCIUuid = fossil_strdup(db_column_text(&q, 1));
2317 url_add_parameter(&url, "ci", zCI);
2318 }
2319 db_finalize(&q);
2320 if( rid==0 ){
2321 style_header("No such file");
2322 @ File '%h(zName)' does not exist in this repository.
2323 }
2324 }else{
2325 style_header("No such artifact");
2326 @ Artifact '%h(zName)' does not exist in this repository.
2327 }
2328 if( rid==0 ){
2329 style_footer();
2330 return;
2331 }
2332 }
2333
2334 if( descOnly || P("verbose")!=0 ){
2335 url_add_parameter(&url, "verbose", "1");
2336 objdescFlags |= OBJDESC_DETAIL;
@@ -2311,11 +2340,11 @@
2340
2341 asText = P("txt")!=0;
2342 if( isFile ){
2343 if( zCI==0 || fossil_strcmp(zCI,"tip")==0 ){
2344 zCI = "tip";
2345 @ <h2>File %z(href("%R/finfo?name=%T&m&ci=tip",zName))%h(zName)</a>
2346 @ from the %z(href("%R/info/tip"))latest check-in</a></h2>
2347 }else{
2348 const char *zPath;
2349 Blob path;
2350 blob_zero(&path);
@@ -2412,13 +2441,15 @@
2441 }else{
2442 renderAsHtml = 1;
2443 style_submenu_element("Text", "%s", url_render(&url, "txt", "1", 0, 0));
2444 }
2445 }else if( fossil_strcmp(zMime, "text/x-fossil-wiki")==0
2446 || fossil_strcmp(zMime, "text/x-markdown")==0
2447 || fossil_strcmp(zMime, "text/x-pikchr")==0 ){
2448 if( asText ){
2449 style_submenu_element(zMime[7]=='p' ? "Pikchr" : "Wiki",
2450 "%s", url_render(&url, "txt", 0, 0, 0));
2451 }else{
2452 renderAsWiki = 1;
2453 style_submenu_element("Text", "%s", url_render(&url, "txt", "1", 0, 0));
2454 }
2455 }else if( fossil_strcmp(zMime, "image/svg+xml")==0 ){
2456
+7 -2
--- src/main.c
+++ src/main.c
@@ -674,12 +674,12 @@
674674
}
675675
}
676676
#endif
677677
678678
fossil_limit_memory(1);
679
- if( sqlite3_libversion_number()<3033000 ){
680
- fossil_panic("Unsuitable SQLite version %s, must be at least 3.33.0",
679
+ if( sqlite3_libversion_number()<3034000 ){
680
+ fossil_panic("Unsuitable SQLite version %s, must be at least 3.34.0",
681681
sqlite3_libversion());
682682
}
683683
sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
684684
sqlite3_config(SQLITE_CONFIG_LOG, fossil_sqlite_log, 0);
685685
memset(&g, 0, sizeof(g));
@@ -1217,10 +1217,11 @@
12171217
blob_append(pOut, "FOSSIL_ENABLE_TCL_PRIVATE_STUBS\n", -1);
12181218
#endif
12191219
#if defined(FOSSIL_ENABLE_JSON)
12201220
blob_appendf(pOut, "JSON (API %s)\n", FOSSIL_JSON_API_VERSION);
12211221
#endif
1222
+ blob_append(pOut, "MARKDOWN\n", -1);
12221223
#if defined(BROKEN_MINGW_CMDLINE)
12231224
blob_append(pOut, "MBCS_COMMAND_LINE\n", -1);
12241225
#else
12251226
blob_append(pOut, "UNICODE_COMMAND_LINE\n", -1);
12261227
#endif
@@ -1331,10 +1332,13 @@
13311332
13321333
if( g.zBaseURL!=0 ) return;
13331334
if( zAltBase ){
13341335
int i, n, c;
13351336
g.zTop = g.zBaseURL = mprintf("%s", zAltBase);
1337
+ i = (int)strlen(g.zBaseURL);
1338
+ while( i>3 && g.zBaseURL[i-1]=='/' ){ i--; }
1339
+ g.zBaseURL[i] = 0;
13361340
if( strncmp(g.zTop, "http://", 7)==0 ){
13371341
/* it is HTTP, replace prefix with HTTPS. */
13381342
g.zHttpsURL = mprintf("https://%s", &g.zTop[7]);
13391343
}else if( strncmp(g.zTop, "https://", 8)==0 ){
13401344
/* it is already HTTPS, use it. */
@@ -1350,10 +1354,11 @@
13501354
g.zTop += i;
13511355
break;
13521356
}
13531357
}
13541358
}
1359
+ if( n==2 ) g.zTop = "";
13551360
if( g.zTop==g.zBaseURL ){
13561361
fossil_fatal("argument to --baseurl should be 'http://host/path'"
13571362
" or 'https://host/path'");
13581363
}
13591364
if( g.zTop[1]==0 ) g.zTop++;
13601365
--- src/main.c
+++ src/main.c
@@ -674,12 +674,12 @@
674 }
675 }
676 #endif
677
678 fossil_limit_memory(1);
679 if( sqlite3_libversion_number()<3033000 ){
680 fossil_panic("Unsuitable SQLite version %s, must be at least 3.33.0",
681 sqlite3_libversion());
682 }
683 sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
684 sqlite3_config(SQLITE_CONFIG_LOG, fossil_sqlite_log, 0);
685 memset(&g, 0, sizeof(g));
@@ -1217,10 +1217,11 @@
1217 blob_append(pOut, "FOSSIL_ENABLE_TCL_PRIVATE_STUBS\n", -1);
1218 #endif
1219 #if defined(FOSSIL_ENABLE_JSON)
1220 blob_appendf(pOut, "JSON (API %s)\n", FOSSIL_JSON_API_VERSION);
1221 #endif
 
1222 #if defined(BROKEN_MINGW_CMDLINE)
1223 blob_append(pOut, "MBCS_COMMAND_LINE\n", -1);
1224 #else
1225 blob_append(pOut, "UNICODE_COMMAND_LINE\n", -1);
1226 #endif
@@ -1331,10 +1332,13 @@
1331
1332 if( g.zBaseURL!=0 ) return;
1333 if( zAltBase ){
1334 int i, n, c;
1335 g.zTop = g.zBaseURL = mprintf("%s", zAltBase);
 
 
 
1336 if( strncmp(g.zTop, "http://", 7)==0 ){
1337 /* it is HTTP, replace prefix with HTTPS. */
1338 g.zHttpsURL = mprintf("https://%s", &g.zTop[7]);
1339 }else if( strncmp(g.zTop, "https://", 8)==0 ){
1340 /* it is already HTTPS, use it. */
@@ -1350,10 +1354,11 @@
1350 g.zTop += i;
1351 break;
1352 }
1353 }
1354 }
 
1355 if( g.zTop==g.zBaseURL ){
1356 fossil_fatal("argument to --baseurl should be 'http://host/path'"
1357 " or 'https://host/path'");
1358 }
1359 if( g.zTop[1]==0 ) g.zTop++;
1360
--- src/main.c
+++ src/main.c
@@ -674,12 +674,12 @@
674 }
675 }
676 #endif
677
678 fossil_limit_memory(1);
679 if( sqlite3_libversion_number()<3034000 ){
680 fossil_panic("Unsuitable SQLite version %s, must be at least 3.34.0",
681 sqlite3_libversion());
682 }
683 sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
684 sqlite3_config(SQLITE_CONFIG_LOG, fossil_sqlite_log, 0);
685 memset(&g, 0, sizeof(g));
@@ -1217,10 +1217,11 @@
1217 blob_append(pOut, "FOSSIL_ENABLE_TCL_PRIVATE_STUBS\n", -1);
1218 #endif
1219 #if defined(FOSSIL_ENABLE_JSON)
1220 blob_appendf(pOut, "JSON (API %s)\n", FOSSIL_JSON_API_VERSION);
1221 #endif
1222 blob_append(pOut, "MARKDOWN\n", -1);
1223 #if defined(BROKEN_MINGW_CMDLINE)
1224 blob_append(pOut, "MBCS_COMMAND_LINE\n", -1);
1225 #else
1226 blob_append(pOut, "UNICODE_COMMAND_LINE\n", -1);
1227 #endif
@@ -1331,10 +1332,13 @@
1332
1333 if( g.zBaseURL!=0 ) return;
1334 if( zAltBase ){
1335 int i, n, c;
1336 g.zTop = g.zBaseURL = mprintf("%s", zAltBase);
1337 i = (int)strlen(g.zBaseURL);
1338 while( i>3 && g.zBaseURL[i-1]=='/' ){ i--; }
1339 g.zBaseURL[i] = 0;
1340 if( strncmp(g.zTop, "http://", 7)==0 ){
1341 /* it is HTTP, replace prefix with HTTPS. */
1342 g.zHttpsURL = mprintf("https://%s", &g.zTop[7]);
1343 }else if( strncmp(g.zTop, "https://", 8)==0 ){
1344 /* it is already HTTPS, use it. */
@@ -1350,10 +1354,11 @@
1354 g.zTop += i;
1355 break;
1356 }
1357 }
1358 }
1359 if( n==2 ) g.zTop = "";
1360 if( g.zTop==g.zBaseURL ){
1361 fossil_fatal("argument to --baseurl should be 'http://host/path'"
1362 " or 'https://host/path'");
1363 }
1364 if( g.zTop[1]==0 ) g.zTop++;
1365
+14 -2
--- src/manifest.c
+++ src/manifest.c
@@ -1804,10 +1804,17 @@
18041804
for(i=1; i<pChild->nParent; i++){
18051805
pmid = uuid_to_rid(pChild->azParent[i], 0);
18061806
if( pmid<=0 ) continue;
18071807
add_mlink(pmid, 0, mid, pChild, 0);
18081808
}
1809
+ for(i=0; i<pChild->nCherrypick; i++){
1810
+ if( pChild->aCherrypick[i].zCPTarget[0]=='+'
1811
+ && (pmid = uuid_to_rid(pChild->aCherrypick[i].zCPTarget+1, 0))>0
1812
+ ){
1813
+ add_mlink(pmid, 0, mid, pChild, 0);
1814
+ }
1815
+ }
18091816
}
18101817
}
18111818
18121819
/*
18131820
** For a check-in with RID "rid" that has nParent parent check-ins given
@@ -1826,10 +1833,11 @@
18261833
){
18271834
int i;
18281835
int parentid = 0;
18291836
char zBaseId[30]; /* Baseline manifest RID for deltas. "NULL" otherwise */
18301837
Stmt q;
1838
+ int nLink;
18311839
18321840
if( p->zBaseline ){
18331841
sqlite3_snprintf(sizeof(zBaseId), zBaseId, "%d",
18341842
uuid_to_rid(p->zBaseline,1));
18351843
}else{
@@ -1842,20 +1850,24 @@
18421850
"VALUES(%d, %d, %d, %.17g, %s)",
18431851
pid, rid, i==0, p->rDate, zBaseId/*safe-for-%s*/);
18441852
if( i==0 ) parentid = pid;
18451853
}
18461854
add_mlink(parentid, 0, rid, p, 1);
1847
- if( nParent>1 ){
1855
+ nLink = nParent;
1856
+ for(i=0; i<p->nCherrypick; i++){
1857
+ if( p->aCherrypick[i].zCPTarget[0]=='+' ) nLink++;
1858
+ }
1859
+ if( nLink>1 ){
18481860
/* Change MLINK.PID from 0 to -1 for files that are added by merge. */
18491861
db_multi_exec(
18501862
"UPDATE mlink SET pid=-1"
18511863
" WHERE mid=%d"
18521864
" AND pid=0"
18531865
" AND fnid IN "
18541866
" (SELECT fnid FROM mlink WHERE mid=%d GROUP BY fnid"
18551867
" HAVING count(*)<%d)",
1856
- rid, rid, nParent
1868
+ rid, rid, nLink
18571869
);
18581870
}
18591871
db_prepare(&q, "SELECT cid, isprim FROM plink WHERE pid=%d", rid);
18601872
while( db_step(&q)==SQLITE_ROW ){
18611873
int cid = db_column_int(&q, 0);
18621874
--- src/manifest.c
+++ src/manifest.c
@@ -1804,10 +1804,17 @@
1804 for(i=1; i<pChild->nParent; i++){
1805 pmid = uuid_to_rid(pChild->azParent[i], 0);
1806 if( pmid<=0 ) continue;
1807 add_mlink(pmid, 0, mid, pChild, 0);
1808 }
 
 
 
 
 
 
 
1809 }
1810 }
1811
1812 /*
1813 ** For a check-in with RID "rid" that has nParent parent check-ins given
@@ -1826,10 +1833,11 @@
1826 ){
1827 int i;
1828 int parentid = 0;
1829 char zBaseId[30]; /* Baseline manifest RID for deltas. "NULL" otherwise */
1830 Stmt q;
 
1831
1832 if( p->zBaseline ){
1833 sqlite3_snprintf(sizeof(zBaseId), zBaseId, "%d",
1834 uuid_to_rid(p->zBaseline,1));
1835 }else{
@@ -1842,20 +1850,24 @@
1842 "VALUES(%d, %d, %d, %.17g, %s)",
1843 pid, rid, i==0, p->rDate, zBaseId/*safe-for-%s*/);
1844 if( i==0 ) parentid = pid;
1845 }
1846 add_mlink(parentid, 0, rid, p, 1);
1847 if( nParent>1 ){
 
 
 
 
1848 /* Change MLINK.PID from 0 to -1 for files that are added by merge. */
1849 db_multi_exec(
1850 "UPDATE mlink SET pid=-1"
1851 " WHERE mid=%d"
1852 " AND pid=0"
1853 " AND fnid IN "
1854 " (SELECT fnid FROM mlink WHERE mid=%d GROUP BY fnid"
1855 " HAVING count(*)<%d)",
1856 rid, rid, nParent
1857 );
1858 }
1859 db_prepare(&q, "SELECT cid, isprim FROM plink WHERE pid=%d", rid);
1860 while( db_step(&q)==SQLITE_ROW ){
1861 int cid = db_column_int(&q, 0);
1862
--- src/manifest.c
+++ src/manifest.c
@@ -1804,10 +1804,17 @@
1804 for(i=1; i<pChild->nParent; i++){
1805 pmid = uuid_to_rid(pChild->azParent[i], 0);
1806 if( pmid<=0 ) continue;
1807 add_mlink(pmid, 0, mid, pChild, 0);
1808 }
1809 for(i=0; i<pChild->nCherrypick; i++){
1810 if( pChild->aCherrypick[i].zCPTarget[0]=='+'
1811 && (pmid = uuid_to_rid(pChild->aCherrypick[i].zCPTarget+1, 0))>0
1812 ){
1813 add_mlink(pmid, 0, mid, pChild, 0);
1814 }
1815 }
1816 }
1817 }
1818
1819 /*
1820 ** For a check-in with RID "rid" that has nParent parent check-ins given
@@ -1826,10 +1833,11 @@
1833 ){
1834 int i;
1835 int parentid = 0;
1836 char zBaseId[30]; /* Baseline manifest RID for deltas. "NULL" otherwise */
1837 Stmt q;
1838 int nLink;
1839
1840 if( p->zBaseline ){
1841 sqlite3_snprintf(sizeof(zBaseId), zBaseId, "%d",
1842 uuid_to_rid(p->zBaseline,1));
1843 }else{
@@ -1842,20 +1850,24 @@
1850 "VALUES(%d, %d, %d, %.17g, %s)",
1851 pid, rid, i==0, p->rDate, zBaseId/*safe-for-%s*/);
1852 if( i==0 ) parentid = pid;
1853 }
1854 add_mlink(parentid, 0, rid, p, 1);
1855 nLink = nParent;
1856 for(i=0; i<p->nCherrypick; i++){
1857 if( p->aCherrypick[i].zCPTarget[0]=='+' ) nLink++;
1858 }
1859 if( nLink>1 ){
1860 /* Change MLINK.PID from 0 to -1 for files that are added by merge. */
1861 db_multi_exec(
1862 "UPDATE mlink SET pid=-1"
1863 " WHERE mid=%d"
1864 " AND pid=0"
1865 " AND fnid IN "
1866 " (SELECT fnid FROM mlink WHERE mid=%d GROUP BY fnid"
1867 " HAVING count(*)<%d)",
1868 rid, rid, nLink
1869 );
1870 }
1871 db_prepare(&q, "SELECT cid, isprim FROM plink WHERE pid=%d", rid);
1872 while( db_step(&q)==SQLITE_ROW ){
1873 int cid = db_column_int(&q, 0);
1874
+1 -1
--- src/merge3.c
+++ src/merge3.c
@@ -195,10 +195,11 @@
195195
int *aC2; /* Changes from pPivot to pV2 */
196196
int i1, i2; /* Index into aC1[] and aC2[] */
197197
int nCpy, nDel, nIns; /* Number of lines to copy, delete, or insert */
198198
int limit1, limit2; /* Sizes of aC1[] and aC2[] */
199199
int nConflict = 0; /* Number of merge conflicts seen so far */
200
+ int useCrLf = 0;
200201
201202
blob_zero(pOut); /* Merge results stored in pOut */
202203
203204
/* If both pV1 and pV2 start with a UTF-8 byte-order-mark (BOM),
204205
** keep it in the output. This should be secure enough not to cause
@@ -211,11 +212,10 @@
211212
212213
/* Check once to see if both pV1 and pV2 contains CR/LF endings.
213214
** If true, CR/LF pair will be used later to append the
214215
** boundary markers for merge conflicts.
215216
*/
216
- int useCrLf = 0;
217217
if( contains_crlf(pV1) && contains_crlf(pV2) ){
218218
useCrLf = 1;
219219
}
220220
221221
/* Compute the edits that occur from pPivot => pV1 (into aC1)
222222
--- src/merge3.c
+++ src/merge3.c
@@ -195,10 +195,11 @@
195 int *aC2; /* Changes from pPivot to pV2 */
196 int i1, i2; /* Index into aC1[] and aC2[] */
197 int nCpy, nDel, nIns; /* Number of lines to copy, delete, or insert */
198 int limit1, limit2; /* Sizes of aC1[] and aC2[] */
199 int nConflict = 0; /* Number of merge conflicts seen so far */
 
200
201 blob_zero(pOut); /* Merge results stored in pOut */
202
203 /* If both pV1 and pV2 start with a UTF-8 byte-order-mark (BOM),
204 ** keep it in the output. This should be secure enough not to cause
@@ -211,11 +212,10 @@
211
212 /* Check once to see if both pV1 and pV2 contains CR/LF endings.
213 ** If true, CR/LF pair will be used later to append the
214 ** boundary markers for merge conflicts.
215 */
216 int useCrLf = 0;
217 if( contains_crlf(pV1) && contains_crlf(pV2) ){
218 useCrLf = 1;
219 }
220
221 /* Compute the edits that occur from pPivot => pV1 (into aC1)
222
--- src/merge3.c
+++ src/merge3.c
@@ -195,10 +195,11 @@
195 int *aC2; /* Changes from pPivot to pV2 */
196 int i1, i2; /* Index into aC1[] and aC2[] */
197 int nCpy, nDel, nIns; /* Number of lines to copy, delete, or insert */
198 int limit1, limit2; /* Sizes of aC1[] and aC2[] */
199 int nConflict = 0; /* Number of merge conflicts seen so far */
200 int useCrLf = 0;
201
202 blob_zero(pOut); /* Merge results stored in pOut */
203
204 /* If both pV1 and pV2 start with a UTF-8 byte-order-mark (BOM),
205 ** keep it in the output. This should be secure enough not to cause
@@ -211,11 +212,10 @@
212
213 /* Check once to see if both pV1 and pV2 contains CR/LF endings.
214 ** If true, CR/LF pair will be used later to append the
215 ** boundary markers for merge conflicts.
216 */
 
217 if( contains_crlf(pV1) && contains_crlf(pV2) ){
218 useCrLf = 1;
219 }
220
221 /* Compute the edits that occur from pPivot => pV1 (into aC1)
222
+22 -4
--- src/name.c
+++ src/name.c
@@ -776,10 +776,11 @@
776776
}
777777
fossil_print("type: %s by %s on %s\n", zType, db_column_text(&q,2),
778778
db_column_text(&q, 1));
779779
fossil_print("comment: ");
780780
comment_print(db_column_text(&q,3), 0, 12, -1, get_comment_format());
781
+ cnt++;
781782
}
782783
db_finalize(&q);
783784
784785
/* Check to see if this object is used as a file in a check-in */
785786
db_prepare(&q,
@@ -798,10 +799,11 @@
798799
db_column_text(&q, 1),
799800
db_column_text(&q, 3),
800801
db_column_text(&q, 2));
801802
fossil_print(" ");
802803
comment_print(db_column_text(&q,4), 0, 12, -1, get_comment_format());
804
+ cnt++;
803805
}
804806
db_finalize(&q);
805807
806808
/* Check to see if this object is used as an attachment */
807809
db_prepare(&q,
@@ -833,12 +835,26 @@
833835
}
834836
fossil_print(" by user %s on %s\n",
835837
db_column_text(&q,2), db_column_text(&q,3));
836838
fossil_print(" ");
837839
comment_print(db_column_text(&q,1), 0, 12, -1, get_comment_format());
840
+ cnt++;
838841
}
839842
db_finalize(&q);
843
+
844
+ /* If other information available, try to describe the object */
845
+ if( cnt==0 ){
846
+ char *zWhere = mprintf("=%d", rid);
847
+ char *zDesc;
848
+ describe_artifacts(zWhere);
849
+ free(zWhere);
850
+ zDesc = db_text(0,
851
+ "SELECT printf('%%-12s%%s %%s',type||':',summary,substr(ref,1,16))"
852
+ " FROM description WHERE rid=%d", rid);
853
+ fossil_print("%s\n", zDesc);
854
+ fossil_free(zDesc);
855
+ }
840856
}
841857
842858
/*
843859
** COMMAND: whatis*
844860
**
@@ -1041,11 +1057,12 @@
10411057
" 'referenced by cluster', blob.uuid\n"
10421058
" FROM description, tagxref, blob\n"
10431059
" WHERE description.summary='unknown'\n"
10441060
" AND tagxref.tagid=(SELECT tagid FROM tag WHERE tagname='cluster')\n"
10451061
" AND blob.rid=tagxref.rid\n"
1046
- " AND content(blob.uuid) GLOB ('*M '||blob.uuid||'*');"
1062
+ " AND CAST(content(blob.uuid) AS text)"
1063
+ " GLOB ('*M '||description.uuid||'*');"
10471064
);
10481065
}
10491066
10501067
/*
10511068
** Create the description table if it does not already exists.
@@ -1218,21 +1235,22 @@
12181235
int describe_artifacts_to_stdout(const char *zWhere, const char *zLabel){
12191236
Stmt q;
12201237
int cnt = 0;
12211238
if( zWhere!=0 ) describe_artifacts(zWhere);
12221239
db_prepare(&q,
1223
- "SELECT uuid, summary, isPrivate\n"
1240
+ "SELECT uuid, summary, coalesce(ref,''), isPrivate\n"
12241241
" FROM description\n"
12251242
" ORDER BY ctime, type;"
12261243
);
12271244
while( db_step(&q)==SQLITE_ROW ){
12281245
if( zLabel ){
12291246
fossil_print("%s\n", zLabel);
12301247
zLabel = 0;
12311248
}
1232
- fossil_print(" %.16s %s", db_column_text(&q,0), db_column_text(&q,1));
1233
- if( db_column_int(&q,2) ) fossil_print(" (private)");
1249
+ fossil_print(" %.16s %s %s", db_column_text(&q,0),
1250
+ db_column_text(&q,1), db_column_text(&q,2));
1251
+ if( db_column_int(&q,3) ) fossil_print(" (private)");
12341252
fossil_print("\n");
12351253
cnt++;
12361254
}
12371255
db_finalize(&q);
12381256
if( zWhere!=0 ) db_multi_exec("DELETE FROM description;");
12391257
--- src/name.c
+++ src/name.c
@@ -776,10 +776,11 @@
776 }
777 fossil_print("type: %s by %s on %s\n", zType, db_column_text(&q,2),
778 db_column_text(&q, 1));
779 fossil_print("comment: ");
780 comment_print(db_column_text(&q,3), 0, 12, -1, get_comment_format());
 
781 }
782 db_finalize(&q);
783
784 /* Check to see if this object is used as a file in a check-in */
785 db_prepare(&q,
@@ -798,10 +799,11 @@
798 db_column_text(&q, 1),
799 db_column_text(&q, 3),
800 db_column_text(&q, 2));
801 fossil_print(" ");
802 comment_print(db_column_text(&q,4), 0, 12, -1, get_comment_format());
 
803 }
804 db_finalize(&q);
805
806 /* Check to see if this object is used as an attachment */
807 db_prepare(&q,
@@ -833,12 +835,26 @@
833 }
834 fossil_print(" by user %s on %s\n",
835 db_column_text(&q,2), db_column_text(&q,3));
836 fossil_print(" ");
837 comment_print(db_column_text(&q,1), 0, 12, -1, get_comment_format());
 
838 }
839 db_finalize(&q);
 
 
 
 
 
 
 
 
 
 
 
 
 
840 }
841
842 /*
843 ** COMMAND: whatis*
844 **
@@ -1041,11 +1057,12 @@
1041 " 'referenced by cluster', blob.uuid\n"
1042 " FROM description, tagxref, blob\n"
1043 " WHERE description.summary='unknown'\n"
1044 " AND tagxref.tagid=(SELECT tagid FROM tag WHERE tagname='cluster')\n"
1045 " AND blob.rid=tagxref.rid\n"
1046 " AND content(blob.uuid) GLOB ('*M '||blob.uuid||'*');"
 
1047 );
1048 }
1049
1050 /*
1051 ** Create the description table if it does not already exists.
@@ -1218,21 +1235,22 @@
1218 int describe_artifacts_to_stdout(const char *zWhere, const char *zLabel){
1219 Stmt q;
1220 int cnt = 0;
1221 if( zWhere!=0 ) describe_artifacts(zWhere);
1222 db_prepare(&q,
1223 "SELECT uuid, summary, isPrivate\n"
1224 " FROM description\n"
1225 " ORDER BY ctime, type;"
1226 );
1227 while( db_step(&q)==SQLITE_ROW ){
1228 if( zLabel ){
1229 fossil_print("%s\n", zLabel);
1230 zLabel = 0;
1231 }
1232 fossil_print(" %.16s %s", db_column_text(&q,0), db_column_text(&q,1));
1233 if( db_column_int(&q,2) ) fossil_print(" (private)");
 
1234 fossil_print("\n");
1235 cnt++;
1236 }
1237 db_finalize(&q);
1238 if( zWhere!=0 ) db_multi_exec("DELETE FROM description;");
1239
--- src/name.c
+++ src/name.c
@@ -776,10 +776,11 @@
776 }
777 fossil_print("type: %s by %s on %s\n", zType, db_column_text(&q,2),
778 db_column_text(&q, 1));
779 fossil_print("comment: ");
780 comment_print(db_column_text(&q,3), 0, 12, -1, get_comment_format());
781 cnt++;
782 }
783 db_finalize(&q);
784
785 /* Check to see if this object is used as a file in a check-in */
786 db_prepare(&q,
@@ -798,10 +799,11 @@
799 db_column_text(&q, 1),
800 db_column_text(&q, 3),
801 db_column_text(&q, 2));
802 fossil_print(" ");
803 comment_print(db_column_text(&q,4), 0, 12, -1, get_comment_format());
804 cnt++;
805 }
806 db_finalize(&q);
807
808 /* Check to see if this object is used as an attachment */
809 db_prepare(&q,
@@ -833,12 +835,26 @@
835 }
836 fossil_print(" by user %s on %s\n",
837 db_column_text(&q,2), db_column_text(&q,3));
838 fossil_print(" ");
839 comment_print(db_column_text(&q,1), 0, 12, -1, get_comment_format());
840 cnt++;
841 }
842 db_finalize(&q);
843
844 /* If other information available, try to describe the object */
845 if( cnt==0 ){
846 char *zWhere = mprintf("=%d", rid);
847 char *zDesc;
848 describe_artifacts(zWhere);
849 free(zWhere);
850 zDesc = db_text(0,
851 "SELECT printf('%%-12s%%s %%s',type||':',summary,substr(ref,1,16))"
852 " FROM description WHERE rid=%d", rid);
853 fossil_print("%s\n", zDesc);
854 fossil_free(zDesc);
855 }
856 }
857
858 /*
859 ** COMMAND: whatis*
860 **
@@ -1041,11 +1057,12 @@
1057 " 'referenced by cluster', blob.uuid\n"
1058 " FROM description, tagxref, blob\n"
1059 " WHERE description.summary='unknown'\n"
1060 " AND tagxref.tagid=(SELECT tagid FROM tag WHERE tagname='cluster')\n"
1061 " AND blob.rid=tagxref.rid\n"
1062 " AND CAST(content(blob.uuid) AS text)"
1063 " GLOB ('*M '||description.uuid||'*');"
1064 );
1065 }
1066
1067 /*
1068 ** Create the description table if it does not already exists.
@@ -1218,21 +1235,22 @@
1235 int describe_artifacts_to_stdout(const char *zWhere, const char *zLabel){
1236 Stmt q;
1237 int cnt = 0;
1238 if( zWhere!=0 ) describe_artifacts(zWhere);
1239 db_prepare(&q,
1240 "SELECT uuid, summary, coalesce(ref,''), isPrivate\n"
1241 " FROM description\n"
1242 " ORDER BY ctime, type;"
1243 );
1244 while( db_step(&q)==SQLITE_ROW ){
1245 if( zLabel ){
1246 fossil_print("%s\n", zLabel);
1247 zLabel = 0;
1248 }
1249 fossil_print(" %.16s %s %s", db_column_text(&q,0),
1250 db_column_text(&q,1), db_column_text(&q,2));
1251 if( db_column_int(&q,3) ) fossil_print(" (private)");
1252 fossil_print("\n");
1253 cnt++;
1254 }
1255 db_finalize(&q);
1256 if( zWhere!=0 ) db_multi_exec("DELETE FROM description;");
1257
+96 -39
--- src/pikchr.c
+++ src/pikchr.c
@@ -3612,11 +3612,10 @@
36123612
/* Methods for the "arrow" class */
36133613
static void arrowInit(Pik *p, PObj *pObj){
36143614
pObj->w = pik_value(p, "linewid",7,0);
36153615
pObj->h = pik_value(p, "lineht",6,0);
36163616
pObj->rad = pik_value(p, "linerad",7,0);
3617
- pObj->fill = -1.0;
36183617
pObj->rarrow = 1;
36193618
}
36203619
36213620
/* Methods for the "box" class */
36223621
static void boxInit(Pik *p, PObj *pObj){
@@ -4033,11 +4032,10 @@
40334032
/* Methods for the "line" class */
40344033
static void lineInit(Pik *p, PObj *pObj){
40354034
pObj->w = pik_value(p, "linewid",7,0);
40364035
pObj->h = pik_value(p, "lineht",6,0);
40374036
pObj->rad = pik_value(p, "linerad",7,0);
4038
- pObj->fill = -1.0;
40394037
}
40404038
static PPoint lineOffset(Pik *p, PObj *pObj, int cp){
40414039
#if 0
40424040
/* In legacy PIC, the .center of an unclosed line is half way between
40434041
** its .start and .end. */
@@ -4117,11 +4115,10 @@
41174115
/* Methods for the "spline" class */
41184116
static void splineInit(Pik *p, PObj *pObj){
41194117
pObj->w = pik_value(p, "linewid",7,0);
41204118
pObj->h = pik_value(p, "lineht",6,0);
41214119
pObj->rad = 1000;
4122
- pObj->fill = -1.0; /* Disable fill by default */
41234120
}
41244121
/* Return a point along the path from "f" to "t" that is r units
41254122
** prior to reaching "t", except if the path is less than 2*r total,
41264123
** return the midpoint.
41274124
*/
@@ -4146,27 +4143,35 @@
41464143
static void radiusPath(Pik *p, PObj *pObj, PNum r){
41474144
int i;
41484145
int n = pObj->nPath;
41494146
const PPoint *a = pObj->aPath;
41504147
PPoint m;
4148
+ PPoint an = a[n-1];
41514149
int isMid = 0;
4150
+ int iLast = pObj->bClose ? n : n-1;
41524151
41534152
pik_append_xy(p,"<path d=\"M", a[0].x, a[0].y);
41544153
m = radiusMidpoint(a[0], a[1], r, &isMid);
41554154
pik_append_xy(p," L ",m.x,m.y);
4156
- for(i=1; i<n-1; i++){
4157
- m = radiusMidpoint(a[i+1],a[i],r, &isMid);
4155
+ for(i=1; i<iLast; i++){
4156
+ an = i<n-1 ? a[i+1] : a[0];
4157
+ m = radiusMidpoint(an,a[i],r, &isMid);
41584158
pik_append_xy(p," Q ",a[i].x,a[i].y);
41594159
pik_append_xy(p," ",m.x,m.y);
41604160
if( !isMid ){
4161
- m = radiusMidpoint(a[i],a[i+1],r, &isMid);
4161
+ m = radiusMidpoint(a[i],an,r, &isMid);
41624162
pik_append_xy(p," L ",m.x,m.y);
41634163
}
41644164
}
4165
- pik_append_xy(p," L ",a[i].x,a[i].y);
4165
+ pik_append_xy(p," L ",an.x,an.y);
4166
+ if( pObj->bClose ){
4167
+ pik_append(p,"Z",1);
4168
+ }else{
4169
+ pObj->fill = -1.0;
4170
+ }
41664171
pik_append(p,"\" ",-1);
4167
- pik_append_style(p,pObj,0);
4172
+ pik_append_style(p,pObj,pObj->bClose);
41684173
pik_append(p,"\" />\n", -1);
41694174
}
41704175
static void splineRender(Pik *p, PObj *pObj){
41714176
if( pObj->sw>0.0 ){
41724177
int n = pObj->nPath;
@@ -4713,67 +4718,108 @@
47134718
aTxt[i].eCode |= aFree[iSlot++];
47144719
}
47154720
}
47164721
}
47174722
}
4723
+
4724
+/* Return the font scaling factor associated with the input text attribute.
4725
+*/
4726
+static PNum pik_font_scale(PToken *t){
4727
+ PNum scale = 1.0;
4728
+ if( t->eCode & TP_BIG ) scale *= 1.25;
4729
+ if( t->eCode & TP_SMALL ) scale *= 0.8;
4730
+ if( t->eCode & TP_XTRA ) scale *= scale;
4731
+ return scale;
4732
+}
47184733
47194734
/* Append multiple <text> SVG elements for the text fields of the PObj.
47204735
** Parameters:
47214736
**
47224737
** p The Pik object into which we are rendering
47234738
**
4724
-** pObj Object containing the text to be rendered
4739
+** pObj Object containing the text to be rendered
47254740
**
47264741
** pBox If not NULL, do no rendering at all. Instead
47274742
** expand the box object so that it will include all
47284743
** of the text.
47294744
*/
47304745
static void pik_append_txt(Pik *p, PObj *pObj, PBox *pBox){
4731
- PNum dy; /* Half the height of a single line of text */
4732
- PNum dy2; /* Extra vertical space around the center */
47334746
PNum jw; /* Justification margin relative to center */
4747
+ PNum ha2 = 0.0; /* Height of the top row of text */
4748
+ PNum ha1 = 0.0; /* Height of the second "above" row */
4749
+ PNum hc = 0.0; /* Height of the center row */
4750
+ PNum hb1 = 0.0; /* Height of the first "below" row of text */
4751
+ PNum hb2 = 0.0; /* Height of the second "below" row */
47344752
int n, i, nz;
4735
- PNum x, y, orig_y;
4753
+ PNum x, y, orig_y, s;
47364754
const char *z;
47374755
PToken *aTxt;
4738
- int hasCenter = 0;
4756
+ unsigned allMask = 0;
47394757
47404758
if( p->nErr ) return;
47414759
if( pObj->nTxt==0 ) return;
47424760
aTxt = pObj->aTxt;
4743
- dy = 0.5*p->charHeight;
47444761
n = pObj->nTxt;
47454762
pik_txt_vertical_layout(pObj);
47464763
x = pObj->ptAt.x;
4747
- for(i=0; i<n; i++){
4748
- if( (pObj->aTxt[i].eCode & TP_CENTER)!=0 ) hasCenter = 1;
4749
- }
4750
- if( hasCenter ){
4751
- dy2 = dy;
4752
- }else if( pObj->type->isLine ){
4753
- dy2 = pObj->sw;
4754
- }else{
4755
- dy2 = 0.0;
4764
+ for(i=0; i<n; i++) allMask |= pObj->aTxt[i].eCode;
4765
+ if( pObj->type->isLine ) hc = pObj->sw*1.5;
4766
+ if( allMask & TP_CENTER ){
4767
+ for(i=0; i<n; i++){
4768
+ if( pObj->aTxt[i].eCode & TP_CENTER ){
4769
+ s = pik_font_scale(pObj->aTxt+i);
4770
+ if( hc<s*p->charHeight ) hc = s*p->charHeight;
4771
+ }
4772
+ }
4773
+ }
4774
+ if( allMask & TP_ABOVE ){
4775
+ for(i=0; i<n; i++){
4776
+ if( pObj->aTxt[i].eCode & TP_ABOVE ){
4777
+ s = pik_font_scale(pObj->aTxt+i)*p->charHeight;
4778
+ if( ha1<s ) ha1 = s;
4779
+ }
4780
+ }
4781
+ if( allMask & TP_ABOVE2 ){
4782
+ for(i=0; i<n; i++){
4783
+ if( pObj->aTxt[i].eCode & TP_ABOVE2 ){
4784
+ s = pik_font_scale(pObj->aTxt+i)*p->charHeight;
4785
+ if( ha2<s ) ha2 = s;
4786
+ }
4787
+ }
4788
+ }
4789
+ }
4790
+ if( allMask & TP_BELOW ){
4791
+ for(i=0; i<n; i++){
4792
+ if( pObj->aTxt[i].eCode & TP_BELOW ){
4793
+ s = pik_font_scale(pObj->aTxt+i)*p->charHeight;
4794
+ if( hb1<s ) hb1 = s;
4795
+ }
4796
+ }
4797
+ if( allMask & TP_BELOW2 ){
4798
+ for(i=0; i<n; i++){
4799
+ if( pObj->aTxt[i].eCode & TP_BELOW2 ){
4800
+ s = pik_font_scale(pObj->aTxt+i)*p->charHeight;
4801
+ if( hb2<s ) hb2 = s;
4802
+ }
4803
+ }
4804
+ }
47564805
}
47574806
if( pObj->type->eJust==1 ){
47584807
jw = 0.5*(pObj->w - 0.5*(p->charWidth + pObj->sw));
47594808
}else{
47604809
jw = 0.0;
47614810
}
47624811
for(i=0; i<n; i++){
47634812
PToken *t = &aTxt[i];
4764
- PNum xtraFontScale = 1.0;
4765
- orig_y = pObj->ptAt.y;
4813
+ PNum xtraFontScale = pik_font_scale(t);
47664814
PNum nx = 0;
4815
+ orig_y = pObj->ptAt.y;
47674816
y = 0;
4768
- if( t->eCode & TP_ABOVE2 ) y += dy2 + 3*dy;
4769
- if( t->eCode & TP_ABOVE ) y += dy2 + dy;
4770
- if( t->eCode & TP_BELOW ) y -= dy2 + dy;
4771
- if( t->eCode & TP_BELOW2 ) y -= dy2 + 3*dy;
4772
- if( t->eCode & TP_BIG ) xtraFontScale *= 1.25;
4773
- if( t->eCode & TP_SMALL ) xtraFontScale *= 0.8;
4774
- if( t->eCode & TP_XTRA ) xtraFontScale *= xtraFontScale;
4817
+ if( t->eCode & TP_ABOVE2 ) y += 0.5*hc + ha1 + 0.5*ha2;
4818
+ if( t->eCode & TP_ABOVE ) y += 0.5*hc + 0.5*ha1;
4819
+ if( t->eCode & TP_BELOW ) y -= 0.5*hc + 0.5*hb1;
4820
+ if( t->eCode & TP_BELOW2 ) y -= 0.5*hc + hb1 + 0.5*hb2;
47754821
if( t->eCode & TP_LJUST ) nx -= jw;
47764822
if( t->eCode & TP_RJUST ) nx += jw;
47774823
47784824
if( pBox!=0 ){
47794825
/* If pBox is not NULL, do not draw any <text>. Instead, just expand
@@ -5244,12 +5290,13 @@
52445290
pNew->type->xInit(p, pNew);
52455291
pik_add_txt(p, pStr, pStr->eCode);
52465292
return pNew;
52475293
}
52485294
if( pId ){
5295
+ const PClass *pClass;
52495296
pNew->errTok = *pId;
5250
- const PClass *pClass = pik_find_class(pId);
5297
+ pClass = pik_find_class(pId);
52515298
if( pClass ){
52525299
pNew->type = pClass;
52535300
pNew->sw = pik_value(p, "thickness",9,0);
52545301
pNew->fill = pik_value(p, "fill",4,0);
52555302
pNew->color = pik_value(p, "color",5,0);
@@ -5998,11 +6045,18 @@
59986045
if( pObj->type->xFit==0 ) return;
59996046
pik_bbox_init(&bbox);
60006047
pik_compute_layout_settings(p);
60016048
pik_append_txt(p, pObj, &bbox);
60026049
w = (eWhich & 1)!=0 ? (bbox.ne.x - bbox.sw.x) + p->charWidth : 0;
6003
- h = (eWhich & 2)!=0 ? (bbox.ne.y - bbox.sw.y) + 0.5*p->charHeight : 0;
6050
+ if( eWhich & 2 ){
6051
+ PNum h1, h2;
6052
+ h1 = (bbox.ne.y - pObj->ptAt.y);
6053
+ h2 = (pObj->ptAt.y - bbox.sw.y);
6054
+ h = 2.0*( h1<h2 ? h2 : h1 ) + 0.5*p->charHeight;
6055
+ }else{
6056
+ h = 0;
6057
+ }
60046058
pObj->type->xFit(p, pObj, w, h);
60056059
pObj->mProp |= A_FIT;
60066060
}
60076061
60086062
/* Set a local variable name to "val".
@@ -6673,18 +6727,18 @@
66736727
bMoreToDo = 0;
66746728
iThisLayer = iNextLayer;
66756729
iNextLayer = 0x7fffffff;
66766730
for(i=0; i<pList->n; i++){
66776731
PObj *pObj = pList->a[i];
6732
+ void (*xRender)(Pik*,PObj*);
66786733
if( pObj->iLayer>iThisLayer ){
66796734
if( pObj->iLayer<iNextLayer ) iNextLayer = pObj->iLayer;
66806735
bMoreToDo = 1;
66816736
continue; /* Defer until another round */
66826737
}else if( pObj->iLayer<iThisLayer ){
66836738
continue;
66846739
}
6685
- void (*xRender)(Pik*,PObj*);
66866740
if( mDebug & 1 ) pik_elem_render(p, pObj);
66876741
xRender = pObj->type->xRender;
66886742
if( xRender ){
66896743
xRender(p, pObj);
66906744
}
@@ -7404,11 +7458,14 @@
74047458
}else{
74057459
p->aCtx[p->nCtx++] = token;
74067460
pik_tokenize(p, &aParam[token.eCode], pParser, 0);
74077461
p->nCtx--;
74087462
}
7409
- }else if( token.eType==T_ID && (pMac = pik_find_macro(p,&token))!=0 ){
7463
+ }else if( token.eType==T_ID
7464
+ && (token.n = (unsigned short)(sz & 0xffff),
7465
+ (pMac = pik_find_macro(p,&token))!=0)
7466
+ ){
74107467
PToken args[9];
74117468
unsigned int j = i+sz;
74127469
if( pMac->inUse ){
74137470
pik_error(p, &pMac->macroName, "recursive macro definition");
74147471
break;
@@ -7633,10 +7690,11 @@
76337690
}
76347691
sz = fread(zIn, 1, sz, in);
76357692
fclose(in);
76367693
zIn[sz] = 0;
76377694
zOut = pikchr(zIn, "pikchr", mFlags, &w, &h);
7695
+ if( w<0 ) exitCode = 1;
76387696
if( zOut==0 ){
76397697
fprintf(stderr, "pikchr() returns NULL. Out of memory?\n");
76407698
if( !bDontStop ) exit(1);
76417699
}else if( bSvgOnly ){
76427700
printf("%s\n", zOut);
@@ -7646,11 +7704,10 @@
76467704
zHtmlHdr = 0;
76477705
}
76487706
printf("<h1>File %s</h1>\n", argv[i]);
76497707
if( w<0 ){
76507708
printf("<p>ERROR</p>\n%s\n", zOut);
7651
- exitCode = 1;
76527709
}else{
76537710
printf("<div id=\"svg-%d\" onclick=\"toggleHidden('svg-%d')\">\n",i,i);
76547711
printf("<div style='border:3px solid lightgray;max-width:%dpx;'>\n",w);
76557712
printf("%s</div>\n", zOut);
76567713
printf("<pre class='hidden'>");
@@ -7662,11 +7719,11 @@
76627719
free(zIn);
76637720
}
76647721
if( !bSvgOnly ){
76657722
printf("</body></html>\n");
76667723
}
7667
- return exitCode;
7724
+ return exitCode ? EXIT_FAILURE : EXIT_SUCCESS;
76687725
}
76697726
#endif /* PIKCHR_SHELL */
76707727
76717728
#ifdef PIKCHR_TCL
76727729
#include <tcl.h>
@@ -7726,6 +7783,6 @@
77267783
77277784
77287785
#endif /* PIKCHR_TCL */
77297786
77307787
7731
-#line 7756 "pikchr.c"
7788
+#line 7813 "pikchr.c"
77327789
--- src/pikchr.c
+++ src/pikchr.c
@@ -3612,11 +3612,10 @@
3612 /* Methods for the "arrow" class */
3613 static void arrowInit(Pik *p, PObj *pObj){
3614 pObj->w = pik_value(p, "linewid",7,0);
3615 pObj->h = pik_value(p, "lineht",6,0);
3616 pObj->rad = pik_value(p, "linerad",7,0);
3617 pObj->fill = -1.0;
3618 pObj->rarrow = 1;
3619 }
3620
3621 /* Methods for the "box" class */
3622 static void boxInit(Pik *p, PObj *pObj){
@@ -4033,11 +4032,10 @@
4033 /* Methods for the "line" class */
4034 static void lineInit(Pik *p, PObj *pObj){
4035 pObj->w = pik_value(p, "linewid",7,0);
4036 pObj->h = pik_value(p, "lineht",6,0);
4037 pObj->rad = pik_value(p, "linerad",7,0);
4038 pObj->fill = -1.0;
4039 }
4040 static PPoint lineOffset(Pik *p, PObj *pObj, int cp){
4041 #if 0
4042 /* In legacy PIC, the .center of an unclosed line is half way between
4043 ** its .start and .end. */
@@ -4117,11 +4115,10 @@
4117 /* Methods for the "spline" class */
4118 static void splineInit(Pik *p, PObj *pObj){
4119 pObj->w = pik_value(p, "linewid",7,0);
4120 pObj->h = pik_value(p, "lineht",6,0);
4121 pObj->rad = 1000;
4122 pObj->fill = -1.0; /* Disable fill by default */
4123 }
4124 /* Return a point along the path from "f" to "t" that is r units
4125 ** prior to reaching "t", except if the path is less than 2*r total,
4126 ** return the midpoint.
4127 */
@@ -4146,27 +4143,35 @@
4146 static void radiusPath(Pik *p, PObj *pObj, PNum r){
4147 int i;
4148 int n = pObj->nPath;
4149 const PPoint *a = pObj->aPath;
4150 PPoint m;
 
4151 int isMid = 0;
 
4152
4153 pik_append_xy(p,"<path d=\"M", a[0].x, a[0].y);
4154 m = radiusMidpoint(a[0], a[1], r, &isMid);
4155 pik_append_xy(p," L ",m.x,m.y);
4156 for(i=1; i<n-1; i++){
4157 m = radiusMidpoint(a[i+1],a[i],r, &isMid);
 
4158 pik_append_xy(p," Q ",a[i].x,a[i].y);
4159 pik_append_xy(p," ",m.x,m.y);
4160 if( !isMid ){
4161 m = radiusMidpoint(a[i],a[i+1],r, &isMid);
4162 pik_append_xy(p," L ",m.x,m.y);
4163 }
4164 }
4165 pik_append_xy(p," L ",a[i].x,a[i].y);
 
 
 
 
 
4166 pik_append(p,"\" ",-1);
4167 pik_append_style(p,pObj,0);
4168 pik_append(p,"\" />\n", -1);
4169 }
4170 static void splineRender(Pik *p, PObj *pObj){
4171 if( pObj->sw>0.0 ){
4172 int n = pObj->nPath;
@@ -4713,67 +4718,108 @@
4713 aTxt[i].eCode |= aFree[iSlot++];
4714 }
4715 }
4716 }
4717 }
 
 
 
 
 
 
 
 
 
 
4718
4719 /* Append multiple <text> SVG elements for the text fields of the PObj.
4720 ** Parameters:
4721 **
4722 ** p The Pik object into which we are rendering
4723 **
4724 ** pObj Object containing the text to be rendered
4725 **
4726 ** pBox If not NULL, do no rendering at all. Instead
4727 ** expand the box object so that it will include all
4728 ** of the text.
4729 */
4730 static void pik_append_txt(Pik *p, PObj *pObj, PBox *pBox){
4731 PNum dy; /* Half the height of a single line of text */
4732 PNum dy2; /* Extra vertical space around the center */
4733 PNum jw; /* Justification margin relative to center */
 
 
 
 
 
4734 int n, i, nz;
4735 PNum x, y, orig_y;
4736 const char *z;
4737 PToken *aTxt;
4738 int hasCenter = 0;
4739
4740 if( p->nErr ) return;
4741 if( pObj->nTxt==0 ) return;
4742 aTxt = pObj->aTxt;
4743 dy = 0.5*p->charHeight;
4744 n = pObj->nTxt;
4745 pik_txt_vertical_layout(pObj);
4746 x = pObj->ptAt.x;
4747 for(i=0; i<n; i++){
4748 if( (pObj->aTxt[i].eCode & TP_CENTER)!=0 ) hasCenter = 1;
4749 }
4750 if( hasCenter ){
4751 dy2 = dy;
4752 }else if( pObj->type->isLine ){
4753 dy2 = pObj->sw;
4754 }else{
4755 dy2 = 0.0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4756 }
4757 if( pObj->type->eJust==1 ){
4758 jw = 0.5*(pObj->w - 0.5*(p->charWidth + pObj->sw));
4759 }else{
4760 jw = 0.0;
4761 }
4762 for(i=0; i<n; i++){
4763 PToken *t = &aTxt[i];
4764 PNum xtraFontScale = 1.0;
4765 orig_y = pObj->ptAt.y;
4766 PNum nx = 0;
 
4767 y = 0;
4768 if( t->eCode & TP_ABOVE2 ) y += dy2 + 3*dy;
4769 if( t->eCode & TP_ABOVE ) y += dy2 + dy;
4770 if( t->eCode & TP_BELOW ) y -= dy2 + dy;
4771 if( t->eCode & TP_BELOW2 ) y -= dy2 + 3*dy;
4772 if( t->eCode & TP_BIG ) xtraFontScale *= 1.25;
4773 if( t->eCode & TP_SMALL ) xtraFontScale *= 0.8;
4774 if( t->eCode & TP_XTRA ) xtraFontScale *= xtraFontScale;
4775 if( t->eCode & TP_LJUST ) nx -= jw;
4776 if( t->eCode & TP_RJUST ) nx += jw;
4777
4778 if( pBox!=0 ){
4779 /* If pBox is not NULL, do not draw any <text>. Instead, just expand
@@ -5244,12 +5290,13 @@
5244 pNew->type->xInit(p, pNew);
5245 pik_add_txt(p, pStr, pStr->eCode);
5246 return pNew;
5247 }
5248 if( pId ){
 
5249 pNew->errTok = *pId;
5250 const PClass *pClass = pik_find_class(pId);
5251 if( pClass ){
5252 pNew->type = pClass;
5253 pNew->sw = pik_value(p, "thickness",9,0);
5254 pNew->fill = pik_value(p, "fill",4,0);
5255 pNew->color = pik_value(p, "color",5,0);
@@ -5998,11 +6045,18 @@
5998 if( pObj->type->xFit==0 ) return;
5999 pik_bbox_init(&bbox);
6000 pik_compute_layout_settings(p);
6001 pik_append_txt(p, pObj, &bbox);
6002 w = (eWhich & 1)!=0 ? (bbox.ne.x - bbox.sw.x) + p->charWidth : 0;
6003 h = (eWhich & 2)!=0 ? (bbox.ne.y - bbox.sw.y) + 0.5*p->charHeight : 0;
 
 
 
 
 
 
 
6004 pObj->type->xFit(p, pObj, w, h);
6005 pObj->mProp |= A_FIT;
6006 }
6007
6008 /* Set a local variable name to "val".
@@ -6673,18 +6727,18 @@
6673 bMoreToDo = 0;
6674 iThisLayer = iNextLayer;
6675 iNextLayer = 0x7fffffff;
6676 for(i=0; i<pList->n; i++){
6677 PObj *pObj = pList->a[i];
 
6678 if( pObj->iLayer>iThisLayer ){
6679 if( pObj->iLayer<iNextLayer ) iNextLayer = pObj->iLayer;
6680 bMoreToDo = 1;
6681 continue; /* Defer until another round */
6682 }else if( pObj->iLayer<iThisLayer ){
6683 continue;
6684 }
6685 void (*xRender)(Pik*,PObj*);
6686 if( mDebug & 1 ) pik_elem_render(p, pObj);
6687 xRender = pObj->type->xRender;
6688 if( xRender ){
6689 xRender(p, pObj);
6690 }
@@ -7404,11 +7458,14 @@
7404 }else{
7405 p->aCtx[p->nCtx++] = token;
7406 pik_tokenize(p, &aParam[token.eCode], pParser, 0);
7407 p->nCtx--;
7408 }
7409 }else if( token.eType==T_ID && (pMac = pik_find_macro(p,&token))!=0 ){
 
 
 
7410 PToken args[9];
7411 unsigned int j = i+sz;
7412 if( pMac->inUse ){
7413 pik_error(p, &pMac->macroName, "recursive macro definition");
7414 break;
@@ -7633,10 +7690,11 @@
7633 }
7634 sz = fread(zIn, 1, sz, in);
7635 fclose(in);
7636 zIn[sz] = 0;
7637 zOut = pikchr(zIn, "pikchr", mFlags, &w, &h);
 
7638 if( zOut==0 ){
7639 fprintf(stderr, "pikchr() returns NULL. Out of memory?\n");
7640 if( !bDontStop ) exit(1);
7641 }else if( bSvgOnly ){
7642 printf("%s\n", zOut);
@@ -7646,11 +7704,10 @@
7646 zHtmlHdr = 0;
7647 }
7648 printf("<h1>File %s</h1>\n", argv[i]);
7649 if( w<0 ){
7650 printf("<p>ERROR</p>\n%s\n", zOut);
7651 exitCode = 1;
7652 }else{
7653 printf("<div id=\"svg-%d\" onclick=\"toggleHidden('svg-%d')\">\n",i,i);
7654 printf("<div style='border:3px solid lightgray;max-width:%dpx;'>\n",w);
7655 printf("%s</div>\n", zOut);
7656 printf("<pre class='hidden'>");
@@ -7662,11 +7719,11 @@
7662 free(zIn);
7663 }
7664 if( !bSvgOnly ){
7665 printf("</body></html>\n");
7666 }
7667 return exitCode;
7668 }
7669 #endif /* PIKCHR_SHELL */
7670
7671 #ifdef PIKCHR_TCL
7672 #include <tcl.h>
@@ -7726,6 +7783,6 @@
7726
7727
7728 #endif /* PIKCHR_TCL */
7729
7730
7731 #line 7756 "pikchr.c"
7732
--- src/pikchr.c
+++ src/pikchr.c
@@ -3612,11 +3612,10 @@
3612 /* Methods for the "arrow" class */
3613 static void arrowInit(Pik *p, PObj *pObj){
3614 pObj->w = pik_value(p, "linewid",7,0);
3615 pObj->h = pik_value(p, "lineht",6,0);
3616 pObj->rad = pik_value(p, "linerad",7,0);
 
3617 pObj->rarrow = 1;
3618 }
3619
3620 /* Methods for the "box" class */
3621 static void boxInit(Pik *p, PObj *pObj){
@@ -4033,11 +4032,10 @@
4032 /* Methods for the "line" class */
4033 static void lineInit(Pik *p, PObj *pObj){
4034 pObj->w = pik_value(p, "linewid",7,0);
4035 pObj->h = pik_value(p, "lineht",6,0);
4036 pObj->rad = pik_value(p, "linerad",7,0);
 
4037 }
4038 static PPoint lineOffset(Pik *p, PObj *pObj, int cp){
4039 #if 0
4040 /* In legacy PIC, the .center of an unclosed line is half way between
4041 ** its .start and .end. */
@@ -4117,11 +4115,10 @@
4115 /* Methods for the "spline" class */
4116 static void splineInit(Pik *p, PObj *pObj){
4117 pObj->w = pik_value(p, "linewid",7,0);
4118 pObj->h = pik_value(p, "lineht",6,0);
4119 pObj->rad = 1000;
 
4120 }
4121 /* Return a point along the path from "f" to "t" that is r units
4122 ** prior to reaching "t", except if the path is less than 2*r total,
4123 ** return the midpoint.
4124 */
@@ -4146,27 +4143,35 @@
4143 static void radiusPath(Pik *p, PObj *pObj, PNum r){
4144 int i;
4145 int n = pObj->nPath;
4146 const PPoint *a = pObj->aPath;
4147 PPoint m;
4148 PPoint an = a[n-1];
4149 int isMid = 0;
4150 int iLast = pObj->bClose ? n : n-1;
4151
4152 pik_append_xy(p,"<path d=\"M", a[0].x, a[0].y);
4153 m = radiusMidpoint(a[0], a[1], r, &isMid);
4154 pik_append_xy(p," L ",m.x,m.y);
4155 for(i=1; i<iLast; i++){
4156 an = i<n-1 ? a[i+1] : a[0];
4157 m = radiusMidpoint(an,a[i],r, &isMid);
4158 pik_append_xy(p," Q ",a[i].x,a[i].y);
4159 pik_append_xy(p," ",m.x,m.y);
4160 if( !isMid ){
4161 m = radiusMidpoint(a[i],an,r, &isMid);
4162 pik_append_xy(p," L ",m.x,m.y);
4163 }
4164 }
4165 pik_append_xy(p," L ",an.x,an.y);
4166 if( pObj->bClose ){
4167 pik_append(p,"Z",1);
4168 }else{
4169 pObj->fill = -1.0;
4170 }
4171 pik_append(p,"\" ",-1);
4172 pik_append_style(p,pObj,pObj->bClose);
4173 pik_append(p,"\" />\n", -1);
4174 }
4175 static void splineRender(Pik *p, PObj *pObj){
4176 if( pObj->sw>0.0 ){
4177 int n = pObj->nPath;
@@ -4713,67 +4718,108 @@
4718 aTxt[i].eCode |= aFree[iSlot++];
4719 }
4720 }
4721 }
4722 }
4723
4724 /* Return the font scaling factor associated with the input text attribute.
4725 */
4726 static PNum pik_font_scale(PToken *t){
4727 PNum scale = 1.0;
4728 if( t->eCode & TP_BIG ) scale *= 1.25;
4729 if( t->eCode & TP_SMALL ) scale *= 0.8;
4730 if( t->eCode & TP_XTRA ) scale *= scale;
4731 return scale;
4732 }
4733
4734 /* Append multiple <text> SVG elements for the text fields of the PObj.
4735 ** Parameters:
4736 **
4737 ** p The Pik object into which we are rendering
4738 **
4739 ** pObj Object containing the text to be rendered
4740 **
4741 ** pBox If not NULL, do no rendering at all. Instead
4742 ** expand the box object so that it will include all
4743 ** of the text.
4744 */
4745 static void pik_append_txt(Pik *p, PObj *pObj, PBox *pBox){
 
 
4746 PNum jw; /* Justification margin relative to center */
4747 PNum ha2 = 0.0; /* Height of the top row of text */
4748 PNum ha1 = 0.0; /* Height of the second "above" row */
4749 PNum hc = 0.0; /* Height of the center row */
4750 PNum hb1 = 0.0; /* Height of the first "below" row of text */
4751 PNum hb2 = 0.0; /* Height of the second "below" row */
4752 int n, i, nz;
4753 PNum x, y, orig_y, s;
4754 const char *z;
4755 PToken *aTxt;
4756 unsigned allMask = 0;
4757
4758 if( p->nErr ) return;
4759 if( pObj->nTxt==0 ) return;
4760 aTxt = pObj->aTxt;
 
4761 n = pObj->nTxt;
4762 pik_txt_vertical_layout(pObj);
4763 x = pObj->ptAt.x;
4764 for(i=0; i<n; i++) allMask |= pObj->aTxt[i].eCode;
4765 if( pObj->type->isLine ) hc = pObj->sw*1.5;
4766 if( allMask & TP_CENTER ){
4767 for(i=0; i<n; i++){
4768 if( pObj->aTxt[i].eCode & TP_CENTER ){
4769 s = pik_font_scale(pObj->aTxt+i);
4770 if( hc<s*p->charHeight ) hc = s*p->charHeight;
4771 }
4772 }
4773 }
4774 if( allMask & TP_ABOVE ){
4775 for(i=0; i<n; i++){
4776 if( pObj->aTxt[i].eCode & TP_ABOVE ){
4777 s = pik_font_scale(pObj->aTxt+i)*p->charHeight;
4778 if( ha1<s ) ha1 = s;
4779 }
4780 }
4781 if( allMask & TP_ABOVE2 ){
4782 for(i=0; i<n; i++){
4783 if( pObj->aTxt[i].eCode & TP_ABOVE2 ){
4784 s = pik_font_scale(pObj->aTxt+i)*p->charHeight;
4785 if( ha2<s ) ha2 = s;
4786 }
4787 }
4788 }
4789 }
4790 if( allMask & TP_BELOW ){
4791 for(i=0; i<n; i++){
4792 if( pObj->aTxt[i].eCode & TP_BELOW ){
4793 s = pik_font_scale(pObj->aTxt+i)*p->charHeight;
4794 if( hb1<s ) hb1 = s;
4795 }
4796 }
4797 if( allMask & TP_BELOW2 ){
4798 for(i=0; i<n; i++){
4799 if( pObj->aTxt[i].eCode & TP_BELOW2 ){
4800 s = pik_font_scale(pObj->aTxt+i)*p->charHeight;
4801 if( hb2<s ) hb2 = s;
4802 }
4803 }
4804 }
4805 }
4806 if( pObj->type->eJust==1 ){
4807 jw = 0.5*(pObj->w - 0.5*(p->charWidth + pObj->sw));
4808 }else{
4809 jw = 0.0;
4810 }
4811 for(i=0; i<n; i++){
4812 PToken *t = &aTxt[i];
4813 PNum xtraFontScale = pik_font_scale(t);
 
4814 PNum nx = 0;
4815 orig_y = pObj->ptAt.y;
4816 y = 0;
4817 if( t->eCode & TP_ABOVE2 ) y += 0.5*hc + ha1 + 0.5*ha2;
4818 if( t->eCode & TP_ABOVE ) y += 0.5*hc + 0.5*ha1;
4819 if( t->eCode & TP_BELOW ) y -= 0.5*hc + 0.5*hb1;
4820 if( t->eCode & TP_BELOW2 ) y -= 0.5*hc + hb1 + 0.5*hb2;
 
 
 
4821 if( t->eCode & TP_LJUST ) nx -= jw;
4822 if( t->eCode & TP_RJUST ) nx += jw;
4823
4824 if( pBox!=0 ){
4825 /* If pBox is not NULL, do not draw any <text>. Instead, just expand
@@ -5244,12 +5290,13 @@
5290 pNew->type->xInit(p, pNew);
5291 pik_add_txt(p, pStr, pStr->eCode);
5292 return pNew;
5293 }
5294 if( pId ){
5295 const PClass *pClass;
5296 pNew->errTok = *pId;
5297 pClass = pik_find_class(pId);
5298 if( pClass ){
5299 pNew->type = pClass;
5300 pNew->sw = pik_value(p, "thickness",9,0);
5301 pNew->fill = pik_value(p, "fill",4,0);
5302 pNew->color = pik_value(p, "color",5,0);
@@ -5998,11 +6045,18 @@
6045 if( pObj->type->xFit==0 ) return;
6046 pik_bbox_init(&bbox);
6047 pik_compute_layout_settings(p);
6048 pik_append_txt(p, pObj, &bbox);
6049 w = (eWhich & 1)!=0 ? (bbox.ne.x - bbox.sw.x) + p->charWidth : 0;
6050 if( eWhich & 2 ){
6051 PNum h1, h2;
6052 h1 = (bbox.ne.y - pObj->ptAt.y);
6053 h2 = (pObj->ptAt.y - bbox.sw.y);
6054 h = 2.0*( h1<h2 ? h2 : h1 ) + 0.5*p->charHeight;
6055 }else{
6056 h = 0;
6057 }
6058 pObj->type->xFit(p, pObj, w, h);
6059 pObj->mProp |= A_FIT;
6060 }
6061
6062 /* Set a local variable name to "val".
@@ -6673,18 +6727,18 @@
6727 bMoreToDo = 0;
6728 iThisLayer = iNextLayer;
6729 iNextLayer = 0x7fffffff;
6730 for(i=0; i<pList->n; i++){
6731 PObj *pObj = pList->a[i];
6732 void (*xRender)(Pik*,PObj*);
6733 if( pObj->iLayer>iThisLayer ){
6734 if( pObj->iLayer<iNextLayer ) iNextLayer = pObj->iLayer;
6735 bMoreToDo = 1;
6736 continue; /* Defer until another round */
6737 }else if( pObj->iLayer<iThisLayer ){
6738 continue;
6739 }
 
6740 if( mDebug & 1 ) pik_elem_render(p, pObj);
6741 xRender = pObj->type->xRender;
6742 if( xRender ){
6743 xRender(p, pObj);
6744 }
@@ -7404,11 +7458,14 @@
7458 }else{
7459 p->aCtx[p->nCtx++] = token;
7460 pik_tokenize(p, &aParam[token.eCode], pParser, 0);
7461 p->nCtx--;
7462 }
7463 }else if( token.eType==T_ID
7464 && (token.n = (unsigned short)(sz & 0xffff),
7465 (pMac = pik_find_macro(p,&token))!=0)
7466 ){
7467 PToken args[9];
7468 unsigned int j = i+sz;
7469 if( pMac->inUse ){
7470 pik_error(p, &pMac->macroName, "recursive macro definition");
7471 break;
@@ -7633,10 +7690,11 @@
7690 }
7691 sz = fread(zIn, 1, sz, in);
7692 fclose(in);
7693 zIn[sz] = 0;
7694 zOut = pikchr(zIn, "pikchr", mFlags, &w, &h);
7695 if( w<0 ) exitCode = 1;
7696 if( zOut==0 ){
7697 fprintf(stderr, "pikchr() returns NULL. Out of memory?\n");
7698 if( !bDontStop ) exit(1);
7699 }else if( bSvgOnly ){
7700 printf("%s\n", zOut);
@@ -7646,11 +7704,10 @@
7704 zHtmlHdr = 0;
7705 }
7706 printf("<h1>File %s</h1>\n", argv[i]);
7707 if( w<0 ){
7708 printf("<p>ERROR</p>\n%s\n", zOut);
 
7709 }else{
7710 printf("<div id=\"svg-%d\" onclick=\"toggleHidden('svg-%d')\">\n",i,i);
7711 printf("<div style='border:3px solid lightgray;max-width:%dpx;'>\n",w);
7712 printf("%s</div>\n", zOut);
7713 printf("<pre class='hidden'>");
@@ -7662,11 +7719,11 @@
7719 free(zIn);
7720 }
7721 if( !bSvgOnly ){
7722 printf("</body></html>\n");
7723 }
7724 return exitCode ? EXIT_FAILURE : EXIT_SUCCESS;
7725 }
7726 #endif /* PIKCHR_SHELL */
7727
7728 #ifdef PIKCHR_TCL
7729 #include <tcl.h>
@@ -7726,6 +7783,6 @@
7783
7784
7785 #endif /* PIKCHR_TCL */
7786
7787
7788 #line 7813 "pikchr.c"
7789
+1 -1
--- src/printf.c
+++ src/printf.c
@@ -1149,15 +1149,15 @@
11491149
abort();
11501150
exit(rc);
11511151
}
11521152
NORETURN void fossil_fatal(const char *zFormat, ...){
11531153
static int once = 0;
1154
+ va_list ap;
11541155
char *z;
11551156
int rc = 1;
11561157
if( once ) exit(1);
11571158
once = 1;
1158
- va_list ap;
11591159
mainInFatalError = 1;
11601160
va_start(ap, zFormat);
11611161
z = vmprintf(zFormat, ap);
11621162
va_end(ap);
11631163
rc = fossil_print_error(rc, z);
11641164
--- src/printf.c
+++ src/printf.c
@@ -1149,15 +1149,15 @@
1149 abort();
1150 exit(rc);
1151 }
1152 NORETURN void fossil_fatal(const char *zFormat, ...){
1153 static int once = 0;
 
1154 char *z;
1155 int rc = 1;
1156 if( once ) exit(1);
1157 once = 1;
1158 va_list ap;
1159 mainInFatalError = 1;
1160 va_start(ap, zFormat);
1161 z = vmprintf(zFormat, ap);
1162 va_end(ap);
1163 rc = fossil_print_error(rc, z);
1164
--- src/printf.c
+++ src/printf.c
@@ -1149,15 +1149,15 @@
1149 abort();
1150 exit(rc);
1151 }
1152 NORETURN void fossil_fatal(const char *zFormat, ...){
1153 static int once = 0;
1154 va_list ap;
1155 char *z;
1156 int rc = 1;
1157 if( once ) exit(1);
1158 once = 1;
 
1159 mainInFatalError = 1;
1160 va_start(ap, zFormat);
1161 z = vmprintf(zFormat, ap);
1162 va_end(ap);
1163 rc = fossil_print_error(rc, z);
1164
--- src/setupuser.c
+++ src/setupuser.c
@@ -316,10 +316,15 @@
316316
if( P("delete") && cgi_csrf_safe(1) ){
317317
int n;
318318
if( P("verifydelete") ){
319319
/* Verified delete user request */
320320
db_unprotect(PROTECT_USER);
321
+ if( db_table_exists("repository","subscriber") ){
322
+ /* Also delete any subscriptions associated with this user */
323
+ db_multi_exec("DELETE FROM subscriber WHERE suname="
324
+ "(SELECT login FROM user WHERE uid=%d)", uid);
325
+ }
321326
db_multi_exec("DELETE FROM user WHERE uid=%d", uid);
322327
db_protect_pop();
323328
moderation_disapprove_for_missing_users();
324329
admin_log("Deleted user [%s] (uid %d).",
325330
PD("login","???")/*safe-for-%s*/, uid);
326331
--- src/setupuser.c
+++ src/setupuser.c
@@ -316,10 +316,15 @@
316 if( P("delete") && cgi_csrf_safe(1) ){
317 int n;
318 if( P("verifydelete") ){
319 /* Verified delete user request */
320 db_unprotect(PROTECT_USER);
 
 
 
 
 
321 db_multi_exec("DELETE FROM user WHERE uid=%d", uid);
322 db_protect_pop();
323 moderation_disapprove_for_missing_users();
324 admin_log("Deleted user [%s] (uid %d).",
325 PD("login","???")/*safe-for-%s*/, uid);
326
--- src/setupuser.c
+++ src/setupuser.c
@@ -316,10 +316,15 @@
316 if( P("delete") && cgi_csrf_safe(1) ){
317 int n;
318 if( P("verifydelete") ){
319 /* Verified delete user request */
320 db_unprotect(PROTECT_USER);
321 if( db_table_exists("repository","subscriber") ){
322 /* Also delete any subscriptions associated with this user */
323 db_multi_exec("DELETE FROM subscriber WHERE suname="
324 "(SELECT login FROM user WHERE uid=%d)", uid);
325 }
326 db_multi_exec("DELETE FROM user WHERE uid=%d", uid);
327 db_protect_pop();
328 moderation_disapprove_for_missing_users();
329 admin_log("Deleted user [%s] (uid %d).",
330 PD("login","???")/*safe-for-%s*/, uid);
331
+62 -27
--- src/shell.c
+++ src/shell.c
@@ -11198,10 +11198,12 @@
1119811198
#define SHFLG_PreserveRowid 0x00000008 /* .dump preserves rowid values */
1119911199
#define SHFLG_Newlines 0x00000010 /* .dump --newline flag */
1120011200
#define SHFLG_CountChanges 0x00000020 /* .changes setting */
1120111201
#define SHFLG_Echo 0x00000040 /* .echo or --echo setting */
1120211202
#define SHFLG_HeaderSet 0x00000080 /* .header has been used */
11203
+#define SHFLG_DumpDataOnly 0x00000100 /* .dump show data only */
11204
+#define SHFLG_DumpNoSys 0x00000200 /* .dump omits system tables */
1120311205
1120411206
/*
1120511207
** Macros for testing and setting shellFlgs
1120611208
*/
1120711209
#define ShellHasFlag(P,X) (((P)->shellFlgs & (X))!=0)
@@ -13718,23 +13720,29 @@
1371813720
int rc;
1371913721
const char *zTable;
1372013722
const char *zType;
1372113723
const char *zSql;
1372213724
ShellState *p = (ShellState *)pArg;
13725
+ int dataOnly;
13726
+ int noSys;
1372313727
1372413728
UNUSED_PARAMETER(azNotUsed);
1372513729
if( nArg!=3 || azArg==0 ) return 0;
1372613730
zTable = azArg[0];
1372713731
zType = azArg[1];
1372813732
zSql = azArg[2];
13733
+ dataOnly = (p->shellFlgs & SHFLG_DumpDataOnly)!=0;
13734
+ noSys = (p->shellFlgs & SHFLG_DumpNoSys)!=0;
1372913735
13730
- if( strcmp(zTable, "sqlite_sequence")==0 ){
13731
- raw_printf(p->out, "DELETE FROM sqlite_sequence;\n");
13732
- }else if( sqlite3_strglob("sqlite_stat?", zTable)==0 ){
13733
- raw_printf(p->out, "ANALYZE sqlite_schema;\n");
13736
+ if( strcmp(zTable, "sqlite_sequence")==0 && !noSys ){
13737
+ if( !dataOnly ) raw_printf(p->out, "DELETE FROM sqlite_sequence;\n");
13738
+ }else if( sqlite3_strglob("sqlite_stat?", zTable)==0 && !noSys ){
13739
+ if( !dataOnly ) raw_printf(p->out, "ANALYZE sqlite_schema;\n");
1373413740
}else if( strncmp(zTable, "sqlite_", 7)==0 ){
1373513741
return 0;
13742
+ }else if( dataOnly ){
13743
+ /* no-op */
1373613744
}else if( strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){
1373713745
char *zIns;
1373813746
if( !p->writableSchema ){
1373913747
raw_printf(p->out, "PRAGMA writable_schema=ON;\n");
1374013748
p->writableSchema = 1;
@@ -13903,12 +13911,14 @@
1390313911
".databases List names and files of attached databases",
1390413912
".dbconfig ?op? ?val? List or change sqlite3_db_config() options",
1390513913
".dbinfo ?DB? Show status information about the database",
1390613914
".dump ?TABLE? Render database content as SQL",
1390713915
" Options:",
13908
- " --preserve-rowids Include ROWID values in the output",
13916
+ " --data-only Output only INSERT statements",
1390913917
" --newlines Allow unescaped newline characters in output",
13918
+ " --nosys Omit system tables (ex: \"sqlite_stat1\")",
13919
+ " --preserve-rowids Include ROWID values in the output",
1391013920
" TABLE is a LIKE pattern for the tables to dump",
1391113921
" Additional LIKE patterns can be given in subsequent arguments",
1391213922
".echo on|off Turn command echo on or off",
1391313923
".eqp on|off|full|... Enable or disable automatic EXPLAIN QUERY PLAN",
1391413924
" Other Modes:",
@@ -14029,12 +14039,13 @@
1402914039
#endif
1403014040
".restore ?DB? FILE Restore content of DB (default \"main\") from FILE",
1403114041
".save FILE Write in-memory database into FILE",
1403214042
".scanstats on|off Turn sqlite3_stmt_scanstatus() metrics on or off",
1403314043
".schema ?PATTERN? Show the CREATE statements matching PATTERN",
14034
- " Options:",
14035
- " --indent Try to pretty-print the schema",
14044
+ " Options:",
14045
+ " --indent Try to pretty-print the schema",
14046
+ " --nosys Omit objects whose names start with \"sqlite_\"",
1403614047
".selftest ?OPTIONS? Run tests defined in the SELFTEST table",
1403714048
" Options:",
1403814049
" --init Create a new SELFTEST table",
1403914050
" -v Verbose output",
1404014051
".separator COL ?ROW? Change the column and row separators",
@@ -17705,11 +17716,13 @@
1770517716
char *zLike = 0;
1770617717
char *zSql;
1770717718
int i;
1770817719
int savedShowHeader = p->showHeader;
1770917720
int savedShellFlags = p->shellFlgs;
17710
- ShellClearFlag(p, SHFLG_PreserveRowid|SHFLG_Newlines|SHFLG_Echo);
17721
+ ShellClearFlag(p,
17722
+ SHFLG_PreserveRowid|SHFLG_Newlines|SHFLG_Echo
17723
+ |SHFLG_DumpDataOnly|SHFLG_DumpNoSys);
1771117724
for(i=1; i<nArg; i++){
1771217725
if( azArg[i][0]=='-' ){
1771317726
const char *z = azArg[i]+1;
1771417727
if( z[0]=='-' ) z++;
1771517728
if( strcmp(z,"preserve-rowids")==0 ){
@@ -17724,10 +17737,16 @@
1772417737
#endif
1772517738
}else
1772617739
if( strcmp(z,"newlines")==0 ){
1772717740
ShellSetFlag(p, SHFLG_Newlines);
1772817741
}else
17742
+ if( strcmp(z,"data-only")==0 ){
17743
+ ShellSetFlag(p, SHFLG_DumpDataOnly);
17744
+ }else
17745
+ if( strcmp(z,"nosys")==0 ){
17746
+ ShellSetFlag(p, SHFLG_DumpNoSys);
17747
+ }else
1772917748
{
1773017749
raw_printf(stderr, "Unknown option \"%s\" on \".dump\"\n", azArg[i]);
1773117750
rc = 1;
1773217751
sqlite3_free(zLike);
1773317752
goto meta_command_exit;
@@ -17740,15 +17759,17 @@
1774017759
}
1774117760
}
1774217761
1774317762
open_db(p, 0);
1774417763
17745
- /* When playing back a "dump", the content might appear in an order
17746
- ** which causes immediate foreign key constraints to be violated.
17747
- ** So disable foreign-key constraint enforcement to prevent problems. */
17748
- raw_printf(p->out, "PRAGMA foreign_keys=OFF;\n");
17749
- raw_printf(p->out, "BEGIN TRANSACTION;\n");
17764
+ if( (p->shellFlgs & SHFLG_DumpDataOnly)==0 ){
17765
+ /* When playing back a "dump", the content might appear in an order
17766
+ ** which causes immediate foreign key constraints to be violated.
17767
+ ** So disable foreign-key constraint enforcement to prevent problems. */
17768
+ raw_printf(p->out, "PRAGMA foreign_keys=OFF;\n");
17769
+ raw_printf(p->out, "BEGIN TRANSACTION;\n");
17770
+ }
1775017771
p->writableSchema = 0;
1775117772
p->showHeader = 0;
1775217773
/* Set writable_schema=ON since doing so forces SQLite to initialize
1775317774
** as much of the schema as it can even if the sqlite_schema table is
1775417775
** corrupt. */
@@ -17762,26 +17783,30 @@
1776217783
" ORDER BY tbl_name='sqlite_sequence', rowid",
1776317784
zLike
1776417785
);
1776517786
run_schema_dump_query(p,zSql);
1776617787
sqlite3_free(zSql);
17767
- zSql = sqlite3_mprintf(
17768
- "SELECT sql FROM sqlite_schema "
17769
- "WHERE (%s) AND sql NOT NULL"
17770
- " AND type IN ('index','trigger','view')",
17771
- zLike
17772
- );
17773
- run_table_dump_query(p, zSql);
17774
- sqlite3_free(zSql);
17788
+ if( (p->shellFlgs & SHFLG_DumpDataOnly)==0 ){
17789
+ zSql = sqlite3_mprintf(
17790
+ "SELECT sql FROM sqlite_schema "
17791
+ "WHERE (%s) AND sql NOT NULL"
17792
+ " AND type IN ('index','trigger','view')",
17793
+ zLike
17794
+ );
17795
+ run_table_dump_query(p, zSql);
17796
+ sqlite3_free(zSql);
17797
+ }
1777517798
sqlite3_free(zLike);
1777617799
if( p->writableSchema ){
1777717800
raw_printf(p->out, "PRAGMA writable_schema=OFF;\n");
1777817801
p->writableSchema = 0;
1777917802
}
1778017803
sqlite3_exec(p->db, "PRAGMA writable_schema=OFF;", 0, 0, 0);
1778117804
sqlite3_exec(p->db, "RELEASE dump;", 0, 0, 0);
17782
- raw_printf(p->out, p->nErr?"ROLLBACK; -- due to errors\n":"COMMIT;\n");
17805
+ if( (p->shellFlgs & SHFLG_DumpDataOnly)==0 ){
17806
+ raw_printf(p->out, p->nErr?"ROLLBACK; -- due to errors\n":"COMMIT;\n");
17807
+ }
1778317808
p->showHeader = savedShowHeader;
1778417809
p->shellFlgs = savedShellFlags;
1778517810
}else
1778617811
1778717812
if( c=='e' && strncmp(azArg[0], "echo", n)==0 ){
@@ -18216,20 +18241,20 @@
1821618241
utf8_printf(p->out, "\n");
1821718242
}
1821818243
while( (nSkip--)>0 ){
1821918244
while( xRead(&sCtx) && sCtx.cTerm==sCtx.cColSep ){}
1822018245
}
18221
- zSql = sqlite3_mprintf("SELECT * FROM %s", zTable);
18246
+ zSql = sqlite3_mprintf("SELECT * FROM \"%w\"", zTable);
1822218247
if( zSql==0 ){
1822318248
import_cleanup(&sCtx);
1822418249
shell_out_of_memory();
1822518250
}
1822618251
nByte = strlen30(zSql);
1822718252
rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
1822818253
import_append_char(&sCtx, 0); /* To ensure sCtx.z is allocated */
1822918254
if( rc && sqlite3_strglob("no such table: *", sqlite3_errmsg(p->db))==0 ){
18230
- char *zCreate = sqlite3_mprintf("CREATE TABLE %s", zTable);
18255
+ char *zCreate = sqlite3_mprintf("CREATE TABLE \"%w\"", zTable);
1823118256
char cSep = '(';
1823218257
while( xRead(&sCtx) ){
1823318258
zCreate = sqlite3_mprintf("%z%c\n \"%w\" TEXT", zCreate, cSep, sCtx.z);
1823418259
cSep = ',';
1823518260
if( sCtx.cTerm!=sCtx.cColSep ) break;
@@ -18246,11 +18271,11 @@
1824618271
utf8_printf(p->out, "%s\n", zCreate);
1824718272
}
1824818273
rc = sqlite3_exec(p->db, zCreate, 0, 0, 0);
1824918274
sqlite3_free(zCreate);
1825018275
if( rc ){
18251
- utf8_printf(stderr, "CREATE TABLE %s(...) failed: %s\n", zTable,
18276
+ utf8_printf(stderr, "CREATE TABLE \"%s\"(...) failed: %s\n", zTable,
1825218277
sqlite3_errmsg(p->db));
1825318278
import_cleanup(&sCtx);
1825418279
rc = 1;
1825518280
goto meta_command_exit;
1825618281
}
@@ -19102,10 +19127,11 @@
1910219127
char *zErrMsg = 0;
1910319128
const char *zDiv = "(";
1910419129
const char *zName = 0;
1910519130
int iSchema = 0;
1910619131
int bDebug = 0;
19132
+ int bNoSystemTabs = 0;
1910719133
int ii;
1910819134
1910919135
open_db(p, 0);
1911019136
memcpy(&data, p, sizeof(data));
1911119137
data.showHeader = 0;
@@ -19114,14 +19140,20 @@
1911419140
for(ii=1; ii<nArg; ii++){
1911519141
if( optionMatch(azArg[ii],"indent") ){
1911619142
data.cMode = data.mode = MODE_Pretty;
1911719143
}else if( optionMatch(azArg[ii],"debug") ){
1911819144
bDebug = 1;
19145
+ }else if( optionMatch(azArg[ii],"nosys") ){
19146
+ bNoSystemTabs = 1;
19147
+ }else if( azArg[ii][0]=='-' ){
19148
+ utf8_printf(stderr, "Unknown option: \"%s\"\n", azArg[ii]);
19149
+ rc = 1;
19150
+ goto meta_command_exit;
1911919151
}else if( zName==0 ){
1912019152
zName = azArg[ii];
1912119153
}else{
19122
- raw_printf(stderr, "Usage: .schema ?--indent? ?LIKE-PATTERN?\n");
19154
+ raw_printf(stderr, "Usage: .schema ?--indent? ?--nosys? ?LIKE-PATTERN?\n");
1912319155
rc = 1;
1912419156
goto meta_command_exit;
1912519157
}
1912619158
}
1912719159
if( zName!=0 ){
@@ -19203,11 +19235,14 @@
1920319235
appendText(&sSelect, " ESCAPE '\\' ", 0);
1920419236
}
1920519237
appendText(&sSelect, " AND ", 0);
1920619238
sqlite3_free(zQarg);
1920719239
}
19208
- appendText(&sSelect, "type!='meta' AND sql IS NOT NULL"
19240
+ if( bNoSystemTabs ){
19241
+ appendText(&sSelect, "name NOT LIKE 'sqlite_%%' AND ", 0);
19242
+ }
19243
+ appendText(&sSelect, "sql IS NOT NULL"
1920919244
" ORDER BY snum, rowid", 0);
1921019245
if( bDebug ){
1921119246
utf8_printf(p->out, "SQL: %s;\n", sSelect.z);
1921219247
}else{
1921319248
rc = sqlite3_exec(p->db, sSelect.z, callback, &data, &zErrMsg);
1921419249
--- src/shell.c
+++ src/shell.c
@@ -11198,10 +11198,12 @@
11198 #define SHFLG_PreserveRowid 0x00000008 /* .dump preserves rowid values */
11199 #define SHFLG_Newlines 0x00000010 /* .dump --newline flag */
11200 #define SHFLG_CountChanges 0x00000020 /* .changes setting */
11201 #define SHFLG_Echo 0x00000040 /* .echo or --echo setting */
11202 #define SHFLG_HeaderSet 0x00000080 /* .header has been used */
 
 
11203
11204 /*
11205 ** Macros for testing and setting shellFlgs
11206 */
11207 #define ShellHasFlag(P,X) (((P)->shellFlgs & (X))!=0)
@@ -13718,23 +13720,29 @@
13718 int rc;
13719 const char *zTable;
13720 const char *zType;
13721 const char *zSql;
13722 ShellState *p = (ShellState *)pArg;
 
 
13723
13724 UNUSED_PARAMETER(azNotUsed);
13725 if( nArg!=3 || azArg==0 ) return 0;
13726 zTable = azArg[0];
13727 zType = azArg[1];
13728 zSql = azArg[2];
 
 
13729
13730 if( strcmp(zTable, "sqlite_sequence")==0 ){
13731 raw_printf(p->out, "DELETE FROM sqlite_sequence;\n");
13732 }else if( sqlite3_strglob("sqlite_stat?", zTable)==0 ){
13733 raw_printf(p->out, "ANALYZE sqlite_schema;\n");
13734 }else if( strncmp(zTable, "sqlite_", 7)==0 ){
13735 return 0;
 
 
13736 }else if( strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){
13737 char *zIns;
13738 if( !p->writableSchema ){
13739 raw_printf(p->out, "PRAGMA writable_schema=ON;\n");
13740 p->writableSchema = 1;
@@ -13903,12 +13911,14 @@
13903 ".databases List names and files of attached databases",
13904 ".dbconfig ?op? ?val? List or change sqlite3_db_config() options",
13905 ".dbinfo ?DB? Show status information about the database",
13906 ".dump ?TABLE? Render database content as SQL",
13907 " Options:",
13908 " --preserve-rowids Include ROWID values in the output",
13909 " --newlines Allow unescaped newline characters in output",
 
 
13910 " TABLE is a LIKE pattern for the tables to dump",
13911 " Additional LIKE patterns can be given in subsequent arguments",
13912 ".echo on|off Turn command echo on or off",
13913 ".eqp on|off|full|... Enable or disable automatic EXPLAIN QUERY PLAN",
13914 " Other Modes:",
@@ -14029,12 +14039,13 @@
14029 #endif
14030 ".restore ?DB? FILE Restore content of DB (default \"main\") from FILE",
14031 ".save FILE Write in-memory database into FILE",
14032 ".scanstats on|off Turn sqlite3_stmt_scanstatus() metrics on or off",
14033 ".schema ?PATTERN? Show the CREATE statements matching PATTERN",
14034 " Options:",
14035 " --indent Try to pretty-print the schema",
 
14036 ".selftest ?OPTIONS? Run tests defined in the SELFTEST table",
14037 " Options:",
14038 " --init Create a new SELFTEST table",
14039 " -v Verbose output",
14040 ".separator COL ?ROW? Change the column and row separators",
@@ -17705,11 +17716,13 @@
17705 char *zLike = 0;
17706 char *zSql;
17707 int i;
17708 int savedShowHeader = p->showHeader;
17709 int savedShellFlags = p->shellFlgs;
17710 ShellClearFlag(p, SHFLG_PreserveRowid|SHFLG_Newlines|SHFLG_Echo);
 
 
17711 for(i=1; i<nArg; i++){
17712 if( azArg[i][0]=='-' ){
17713 const char *z = azArg[i]+1;
17714 if( z[0]=='-' ) z++;
17715 if( strcmp(z,"preserve-rowids")==0 ){
@@ -17724,10 +17737,16 @@
17724 #endif
17725 }else
17726 if( strcmp(z,"newlines")==0 ){
17727 ShellSetFlag(p, SHFLG_Newlines);
17728 }else
 
 
 
 
 
 
17729 {
17730 raw_printf(stderr, "Unknown option \"%s\" on \".dump\"\n", azArg[i]);
17731 rc = 1;
17732 sqlite3_free(zLike);
17733 goto meta_command_exit;
@@ -17740,15 +17759,17 @@
17740 }
17741 }
17742
17743 open_db(p, 0);
17744
17745 /* When playing back a "dump", the content might appear in an order
17746 ** which causes immediate foreign key constraints to be violated.
17747 ** So disable foreign-key constraint enforcement to prevent problems. */
17748 raw_printf(p->out, "PRAGMA foreign_keys=OFF;\n");
17749 raw_printf(p->out, "BEGIN TRANSACTION;\n");
 
 
17750 p->writableSchema = 0;
17751 p->showHeader = 0;
17752 /* Set writable_schema=ON since doing so forces SQLite to initialize
17753 ** as much of the schema as it can even if the sqlite_schema table is
17754 ** corrupt. */
@@ -17762,26 +17783,30 @@
17762 " ORDER BY tbl_name='sqlite_sequence', rowid",
17763 zLike
17764 );
17765 run_schema_dump_query(p,zSql);
17766 sqlite3_free(zSql);
17767 zSql = sqlite3_mprintf(
17768 "SELECT sql FROM sqlite_schema "
17769 "WHERE (%s) AND sql NOT NULL"
17770 " AND type IN ('index','trigger','view')",
17771 zLike
17772 );
17773 run_table_dump_query(p, zSql);
17774 sqlite3_free(zSql);
 
 
17775 sqlite3_free(zLike);
17776 if( p->writableSchema ){
17777 raw_printf(p->out, "PRAGMA writable_schema=OFF;\n");
17778 p->writableSchema = 0;
17779 }
17780 sqlite3_exec(p->db, "PRAGMA writable_schema=OFF;", 0, 0, 0);
17781 sqlite3_exec(p->db, "RELEASE dump;", 0, 0, 0);
17782 raw_printf(p->out, p->nErr?"ROLLBACK; -- due to errors\n":"COMMIT;\n");
 
 
17783 p->showHeader = savedShowHeader;
17784 p->shellFlgs = savedShellFlags;
17785 }else
17786
17787 if( c=='e' && strncmp(azArg[0], "echo", n)==0 ){
@@ -18216,20 +18241,20 @@
18216 utf8_printf(p->out, "\n");
18217 }
18218 while( (nSkip--)>0 ){
18219 while( xRead(&sCtx) && sCtx.cTerm==sCtx.cColSep ){}
18220 }
18221 zSql = sqlite3_mprintf("SELECT * FROM %s", zTable);
18222 if( zSql==0 ){
18223 import_cleanup(&sCtx);
18224 shell_out_of_memory();
18225 }
18226 nByte = strlen30(zSql);
18227 rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
18228 import_append_char(&sCtx, 0); /* To ensure sCtx.z is allocated */
18229 if( rc && sqlite3_strglob("no such table: *", sqlite3_errmsg(p->db))==0 ){
18230 char *zCreate = sqlite3_mprintf("CREATE TABLE %s", zTable);
18231 char cSep = '(';
18232 while( xRead(&sCtx) ){
18233 zCreate = sqlite3_mprintf("%z%c\n \"%w\" TEXT", zCreate, cSep, sCtx.z);
18234 cSep = ',';
18235 if( sCtx.cTerm!=sCtx.cColSep ) break;
@@ -18246,11 +18271,11 @@
18246 utf8_printf(p->out, "%s\n", zCreate);
18247 }
18248 rc = sqlite3_exec(p->db, zCreate, 0, 0, 0);
18249 sqlite3_free(zCreate);
18250 if( rc ){
18251 utf8_printf(stderr, "CREATE TABLE %s(...) failed: %s\n", zTable,
18252 sqlite3_errmsg(p->db));
18253 import_cleanup(&sCtx);
18254 rc = 1;
18255 goto meta_command_exit;
18256 }
@@ -19102,10 +19127,11 @@
19102 char *zErrMsg = 0;
19103 const char *zDiv = "(";
19104 const char *zName = 0;
19105 int iSchema = 0;
19106 int bDebug = 0;
 
19107 int ii;
19108
19109 open_db(p, 0);
19110 memcpy(&data, p, sizeof(data));
19111 data.showHeader = 0;
@@ -19114,14 +19140,20 @@
19114 for(ii=1; ii<nArg; ii++){
19115 if( optionMatch(azArg[ii],"indent") ){
19116 data.cMode = data.mode = MODE_Pretty;
19117 }else if( optionMatch(azArg[ii],"debug") ){
19118 bDebug = 1;
 
 
 
 
 
 
19119 }else if( zName==0 ){
19120 zName = azArg[ii];
19121 }else{
19122 raw_printf(stderr, "Usage: .schema ?--indent? ?LIKE-PATTERN?\n");
19123 rc = 1;
19124 goto meta_command_exit;
19125 }
19126 }
19127 if( zName!=0 ){
@@ -19203,11 +19235,14 @@
19203 appendText(&sSelect, " ESCAPE '\\' ", 0);
19204 }
19205 appendText(&sSelect, " AND ", 0);
19206 sqlite3_free(zQarg);
19207 }
19208 appendText(&sSelect, "type!='meta' AND sql IS NOT NULL"
 
 
 
19209 " ORDER BY snum, rowid", 0);
19210 if( bDebug ){
19211 utf8_printf(p->out, "SQL: %s;\n", sSelect.z);
19212 }else{
19213 rc = sqlite3_exec(p->db, sSelect.z, callback, &data, &zErrMsg);
19214
--- src/shell.c
+++ src/shell.c
@@ -11198,10 +11198,12 @@
11198 #define SHFLG_PreserveRowid 0x00000008 /* .dump preserves rowid values */
11199 #define SHFLG_Newlines 0x00000010 /* .dump --newline flag */
11200 #define SHFLG_CountChanges 0x00000020 /* .changes setting */
11201 #define SHFLG_Echo 0x00000040 /* .echo or --echo setting */
11202 #define SHFLG_HeaderSet 0x00000080 /* .header has been used */
11203 #define SHFLG_DumpDataOnly 0x00000100 /* .dump show data only */
11204 #define SHFLG_DumpNoSys 0x00000200 /* .dump omits system tables */
11205
11206 /*
11207 ** Macros for testing and setting shellFlgs
11208 */
11209 #define ShellHasFlag(P,X) (((P)->shellFlgs & (X))!=0)
@@ -13718,23 +13720,29 @@
13720 int rc;
13721 const char *zTable;
13722 const char *zType;
13723 const char *zSql;
13724 ShellState *p = (ShellState *)pArg;
13725 int dataOnly;
13726 int noSys;
13727
13728 UNUSED_PARAMETER(azNotUsed);
13729 if( nArg!=3 || azArg==0 ) return 0;
13730 zTable = azArg[0];
13731 zType = azArg[1];
13732 zSql = azArg[2];
13733 dataOnly = (p->shellFlgs & SHFLG_DumpDataOnly)!=0;
13734 noSys = (p->shellFlgs & SHFLG_DumpNoSys)!=0;
13735
13736 if( strcmp(zTable, "sqlite_sequence")==0 && !noSys ){
13737 if( !dataOnly ) raw_printf(p->out, "DELETE FROM sqlite_sequence;\n");
13738 }else if( sqlite3_strglob("sqlite_stat?", zTable)==0 && !noSys ){
13739 if( !dataOnly ) raw_printf(p->out, "ANALYZE sqlite_schema;\n");
13740 }else if( strncmp(zTable, "sqlite_", 7)==0 ){
13741 return 0;
13742 }else if( dataOnly ){
13743 /* no-op */
13744 }else if( strncmp(zSql, "CREATE VIRTUAL TABLE", 20)==0 ){
13745 char *zIns;
13746 if( !p->writableSchema ){
13747 raw_printf(p->out, "PRAGMA writable_schema=ON;\n");
13748 p->writableSchema = 1;
@@ -13903,12 +13911,14 @@
13911 ".databases List names and files of attached databases",
13912 ".dbconfig ?op? ?val? List or change sqlite3_db_config() options",
13913 ".dbinfo ?DB? Show status information about the database",
13914 ".dump ?TABLE? Render database content as SQL",
13915 " Options:",
13916 " --data-only Output only INSERT statements",
13917 " --newlines Allow unescaped newline characters in output",
13918 " --nosys Omit system tables (ex: \"sqlite_stat1\")",
13919 " --preserve-rowids Include ROWID values in the output",
13920 " TABLE is a LIKE pattern for the tables to dump",
13921 " Additional LIKE patterns can be given in subsequent arguments",
13922 ".echo on|off Turn command echo on or off",
13923 ".eqp on|off|full|... Enable or disable automatic EXPLAIN QUERY PLAN",
13924 " Other Modes:",
@@ -14029,12 +14039,13 @@
14039 #endif
14040 ".restore ?DB? FILE Restore content of DB (default \"main\") from FILE",
14041 ".save FILE Write in-memory database into FILE",
14042 ".scanstats on|off Turn sqlite3_stmt_scanstatus() metrics on or off",
14043 ".schema ?PATTERN? Show the CREATE statements matching PATTERN",
14044 " Options:",
14045 " --indent Try to pretty-print the schema",
14046 " --nosys Omit objects whose names start with \"sqlite_\"",
14047 ".selftest ?OPTIONS? Run tests defined in the SELFTEST table",
14048 " Options:",
14049 " --init Create a new SELFTEST table",
14050 " -v Verbose output",
14051 ".separator COL ?ROW? Change the column and row separators",
@@ -17705,11 +17716,13 @@
17716 char *zLike = 0;
17717 char *zSql;
17718 int i;
17719 int savedShowHeader = p->showHeader;
17720 int savedShellFlags = p->shellFlgs;
17721 ShellClearFlag(p,
17722 SHFLG_PreserveRowid|SHFLG_Newlines|SHFLG_Echo
17723 |SHFLG_DumpDataOnly|SHFLG_DumpNoSys);
17724 for(i=1; i<nArg; i++){
17725 if( azArg[i][0]=='-' ){
17726 const char *z = azArg[i]+1;
17727 if( z[0]=='-' ) z++;
17728 if( strcmp(z,"preserve-rowids")==0 ){
@@ -17724,10 +17737,16 @@
17737 #endif
17738 }else
17739 if( strcmp(z,"newlines")==0 ){
17740 ShellSetFlag(p, SHFLG_Newlines);
17741 }else
17742 if( strcmp(z,"data-only")==0 ){
17743 ShellSetFlag(p, SHFLG_DumpDataOnly);
17744 }else
17745 if( strcmp(z,"nosys")==0 ){
17746 ShellSetFlag(p, SHFLG_DumpNoSys);
17747 }else
17748 {
17749 raw_printf(stderr, "Unknown option \"%s\" on \".dump\"\n", azArg[i]);
17750 rc = 1;
17751 sqlite3_free(zLike);
17752 goto meta_command_exit;
@@ -17740,15 +17759,17 @@
17759 }
17760 }
17761
17762 open_db(p, 0);
17763
17764 if( (p->shellFlgs & SHFLG_DumpDataOnly)==0 ){
17765 /* When playing back a "dump", the content might appear in an order
17766 ** which causes immediate foreign key constraints to be violated.
17767 ** So disable foreign-key constraint enforcement to prevent problems. */
17768 raw_printf(p->out, "PRAGMA foreign_keys=OFF;\n");
17769 raw_printf(p->out, "BEGIN TRANSACTION;\n");
17770 }
17771 p->writableSchema = 0;
17772 p->showHeader = 0;
17773 /* Set writable_schema=ON since doing so forces SQLite to initialize
17774 ** as much of the schema as it can even if the sqlite_schema table is
17775 ** corrupt. */
@@ -17762,26 +17783,30 @@
17783 " ORDER BY tbl_name='sqlite_sequence', rowid",
17784 zLike
17785 );
17786 run_schema_dump_query(p,zSql);
17787 sqlite3_free(zSql);
17788 if( (p->shellFlgs & SHFLG_DumpDataOnly)==0 ){
17789 zSql = sqlite3_mprintf(
17790 "SELECT sql FROM sqlite_schema "
17791 "WHERE (%s) AND sql NOT NULL"
17792 " AND type IN ('index','trigger','view')",
17793 zLike
17794 );
17795 run_table_dump_query(p, zSql);
17796 sqlite3_free(zSql);
17797 }
17798 sqlite3_free(zLike);
17799 if( p->writableSchema ){
17800 raw_printf(p->out, "PRAGMA writable_schema=OFF;\n");
17801 p->writableSchema = 0;
17802 }
17803 sqlite3_exec(p->db, "PRAGMA writable_schema=OFF;", 0, 0, 0);
17804 sqlite3_exec(p->db, "RELEASE dump;", 0, 0, 0);
17805 if( (p->shellFlgs & SHFLG_DumpDataOnly)==0 ){
17806 raw_printf(p->out, p->nErr?"ROLLBACK; -- due to errors\n":"COMMIT;\n");
17807 }
17808 p->showHeader = savedShowHeader;
17809 p->shellFlgs = savedShellFlags;
17810 }else
17811
17812 if( c=='e' && strncmp(azArg[0], "echo", n)==0 ){
@@ -18216,20 +18241,20 @@
18241 utf8_printf(p->out, "\n");
18242 }
18243 while( (nSkip--)>0 ){
18244 while( xRead(&sCtx) && sCtx.cTerm==sCtx.cColSep ){}
18245 }
18246 zSql = sqlite3_mprintf("SELECT * FROM \"%w\"", zTable);
18247 if( zSql==0 ){
18248 import_cleanup(&sCtx);
18249 shell_out_of_memory();
18250 }
18251 nByte = strlen30(zSql);
18252 rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0);
18253 import_append_char(&sCtx, 0); /* To ensure sCtx.z is allocated */
18254 if( rc && sqlite3_strglob("no such table: *", sqlite3_errmsg(p->db))==0 ){
18255 char *zCreate = sqlite3_mprintf("CREATE TABLE \"%w\"", zTable);
18256 char cSep = '(';
18257 while( xRead(&sCtx) ){
18258 zCreate = sqlite3_mprintf("%z%c\n \"%w\" TEXT", zCreate, cSep, sCtx.z);
18259 cSep = ',';
18260 if( sCtx.cTerm!=sCtx.cColSep ) break;
@@ -18246,11 +18271,11 @@
18271 utf8_printf(p->out, "%s\n", zCreate);
18272 }
18273 rc = sqlite3_exec(p->db, zCreate, 0, 0, 0);
18274 sqlite3_free(zCreate);
18275 if( rc ){
18276 utf8_printf(stderr, "CREATE TABLE \"%s\"(...) failed: %s\n", zTable,
18277 sqlite3_errmsg(p->db));
18278 import_cleanup(&sCtx);
18279 rc = 1;
18280 goto meta_command_exit;
18281 }
@@ -19102,10 +19127,11 @@
19127 char *zErrMsg = 0;
19128 const char *zDiv = "(";
19129 const char *zName = 0;
19130 int iSchema = 0;
19131 int bDebug = 0;
19132 int bNoSystemTabs = 0;
19133 int ii;
19134
19135 open_db(p, 0);
19136 memcpy(&data, p, sizeof(data));
19137 data.showHeader = 0;
@@ -19114,14 +19140,20 @@
19140 for(ii=1; ii<nArg; ii++){
19141 if( optionMatch(azArg[ii],"indent") ){
19142 data.cMode = data.mode = MODE_Pretty;
19143 }else if( optionMatch(azArg[ii],"debug") ){
19144 bDebug = 1;
19145 }else if( optionMatch(azArg[ii],"nosys") ){
19146 bNoSystemTabs = 1;
19147 }else if( azArg[ii][0]=='-' ){
19148 utf8_printf(stderr, "Unknown option: \"%s\"\n", azArg[ii]);
19149 rc = 1;
19150 goto meta_command_exit;
19151 }else if( zName==0 ){
19152 zName = azArg[ii];
19153 }else{
19154 raw_printf(stderr, "Usage: .schema ?--indent? ?--nosys? ?LIKE-PATTERN?\n");
19155 rc = 1;
19156 goto meta_command_exit;
19157 }
19158 }
19159 if( zName!=0 ){
@@ -19203,11 +19235,14 @@
19235 appendText(&sSelect, " ESCAPE '\\' ", 0);
19236 }
19237 appendText(&sSelect, " AND ", 0);
19238 sqlite3_free(zQarg);
19239 }
19240 if( bNoSystemTabs ){
19241 appendText(&sSelect, "name NOT LIKE 'sqlite_%%' AND ", 0);
19242 }
19243 appendText(&sSelect, "sql IS NOT NULL"
19244 " ORDER BY snum, rowid", 0);
19245 if( bDebug ){
19246 utf8_printf(p->out, "SQL: %s;\n", sSelect.z);
19247 }else{
19248 rc = sqlite3_exec(p->db, sSelect.z, callback, &data, &zErrMsg);
19249
+612 -192
--- src/sqlite3.c
+++ src/sqlite3.c
@@ -1171,11 +1171,11 @@
11711171
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
11721172
** [sqlite_version()] and [sqlite_source_id()].
11731173
*/
11741174
#define SQLITE_VERSION "3.34.0"
11751175
#define SQLITE_VERSION_NUMBER 3034000
1176
-#define SQLITE_SOURCE_ID "2020-09-30 18:06:51 4a43430fd23f88352c33b29c4c105b72f6dc821f94bf362040c41a1648c402e5"
1176
+#define SQLITE_SOURCE_ID "2020-10-19 20:49:54 75a0288871ccb2a69a636cbb328fe19045a0d0ef96a193ecd118b9a196784d2d"
11771177
11781178
/*
11791179
** CAPI3REF: Run-Time Library Version Numbers
11801180
** KEYWORDS: sqlite3_version sqlite3_sourceid
11811181
**
@@ -10290,22 +10290,29 @@
1029010290
1029110291
/*
1029210292
** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE
1029310293
**
1029410294
** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn]
10295
-** method of a [virtual table], then it returns true if and only if the
10295
+** method of a [virtual table], then it might return true if the
1029610296
** column is being fetched as part of an UPDATE operation during which the
10297
-** column value will not change. Applications might use this to substitute
10298
-** a return value that is less expensive to compute and that the corresponding
10297
+** column value will not change. The virtual table implementation can use
10298
+** this hint as permission to substitute a return value that is less
10299
+** expensive to compute and that the corresponding
1029910300
** [xUpdate] method understands as a "no-change" value.
1030010301
**
1030110302
** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that
1030210303
** the column is not changed by the UPDATE statement, then the xColumn
1030310304
** method can optionally return without setting a result, without calling
1030410305
** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces].
1030510306
** In that case, [sqlite3_value_nochange(X)] will return true for the
1030610307
** same column in the [xUpdate] method.
10308
+**
10309
+** The sqlite3_vtab_nochange() routine is an optimization. Virtual table
10310
+** implementations should continue to give a correct answer even if the
10311
+** sqlite3_vtab_nochange() interface were to always return false. In the
10312
+** current implementation, the sqlite3_vtab_nochange() interface does always
10313
+** returns false for the enhanced [UPDATE FROM] statement.
1030710314
*/
1030810315
SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
1030910316
1031010317
/*
1031110318
** CAPI3REF: Determine The Collation For a Virtual Table Constraint
@@ -46915,11 +46922,15 @@
4691546922
}else{
4691646923
/* Opens a file, only if it exists. */
4691746924
dwCreationDisposition = OPEN_EXISTING;
4691846925
}
4691946926
46920
- dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE;
46927
+ if( 0==sqlite3_uri_boolean(zName, "exclusive", 0) ){
46928
+ dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE;
46929
+ }else{
46930
+ dwShareMode = 0;
46931
+ }
4692146932
4692246933
if( isDelete ){
4692346934
#if SQLITE_OS_WINCE
4692446935
dwFlagsAndAttributes = FILE_ATTRIBUTE_HIDDEN;
4692546936
isTemp = 1;
@@ -48068,15 +48079,18 @@
4806848079
4806948080
/*
4807048081
** Close an memdb-file.
4807148082
**
4807248083
** The pData pointer is owned by the application, so there is nothing
48073
-** to free.
48084
+** to free. Unless the SQLITE_DESERIALIZE_FREEONCLOSE flag is set,
48085
+** in which case we own the pData pointer and need to free it.
4807448086
*/
4807548087
static int memdbClose(sqlite3_file *pFile){
4807648088
MemFile *p = (MemFile *)pFile;
48077
- if( p->mFlags & SQLITE_DESERIALIZE_FREEONCLOSE ) sqlite3_free(p->aData);
48089
+ if( p->mFlags & SQLITE_DESERIALIZE_FREEONCLOSE ){
48090
+ sqlite3_free(p->aData);
48091
+ }
4807848092
return SQLITE_OK;
4807948093
}
4808048094
4808148095
/*
4808248096
** Read data from an memdb-file.
@@ -48531,10 +48545,11 @@
4853148545
p = memdbFromDbSchema(db, zSchema);
4853248546
if( p==0 ){
4853348547
rc = SQLITE_ERROR;
4853448548
}else{
4853548549
p->aData = pData;
48550
+ pData = 0;
4853648551
p->sz = szDb;
4853748552
p->szAlloc = szBuf;
4853848553
p->szMax = szBuf;
4853948554
if( p->szMax<sqlite3GlobalConfig.mxMemdbSize ){
4854048555
p->szMax = sqlite3GlobalConfig.mxMemdbSize;
@@ -48543,10 +48558,13 @@
4854348558
rc = SQLITE_OK;
4854448559
}
4854548560
4854648561
end_deserialize:
4854748562
sqlite3_finalize(pStmt);
48563
+ if( pData && (mFlags & SQLITE_DESERIALIZE_FREEONCLOSE)!=0 ){
48564
+ sqlite3_free(pData);
48565
+ }
4854848566
sqlite3_mutex_leave(db->mutex);
4854948567
return rc;
4855048568
}
4855148569
4855248570
/*
@@ -88945,11 +88963,12 @@
8894588963
**
8894688964
** Begin a transaction on database P1 if a transaction is not already
8894788965
** active.
8894888966
** If P2 is non-zero, then a write-transaction is started, or if a
8894988967
** read-transaction is already active, it is upgraded to a write-transaction.
88950
-** If P2 is zero, then a read-transaction is started.
88968
+** If P2 is zero, then a read-transaction is started. If P2 is 2 or more
88969
+** then an exclusive transaction is started.
8895188970
**
8895288971
** P1 is the index of the database file on which the transaction is
8895388972
** started. Index 0 is the main database file and index 1 is the
8895488973
** file used for temporary tables. Indices of 2 or more are used for
8895588974
** attached databases.
@@ -88979,10 +88998,11 @@
8897988998
Btree *pBt;
8898088999
int iMeta = 0;
8898189000
8898289001
assert( p->bIsReader );
8898389002
assert( p->readOnly==0 || pOp->p2==0 );
89003
+ assert( pOp->p2>=0 && pOp->p2<=2 );
8898489004
assert( pOp->p1>=0 && pOp->p1<db->nDb );
8898589005
assert( DbMaskTest(p->btreeMask, pOp->p1) );
8898689006
if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){
8898789007
rc = SQLITE_READONLY;
8898889008
goto abort_due_to_error;
@@ -89842,22 +89862,22 @@
8984289862
}
8984389863
break;
8984489864
}
8984589865
8984689866
89847
-/* Opcode: SeekScan P1 * * * *
89867
+/* Opcode: SeekScan P1 P2 * * *
8984889868
** Synopsis: Scan-ahead up to P1 rows
8984989869
**
8985089870
** This opcode is a prefix opcode to OP_SeekGE. In other words, this
89851
-** opcode must be immediately followed by OP_SeekGE. Furthermore, the
89852
-** OP_SeekGE must be followed by OP_IdxGT. These constraints are
89871
+** opcode must be immediately followed by OP_SeekGE. This constraint is
8985389872
** checked by assert() statements.
8985489873
**
8985589874
** This opcode uses the P1 through P4 operands of the subsequent
8985689875
** OP_SeekGE. In the text that follows, the operands of the subsequent
8985789876
** OP_SeekGE opcode are denoted as SeekOP.P1 through SeekOP.P4. Only
89858
-** the P1 operand of this opcode is used, and it is denoted as This.P1.
89877
+** the P1 and P2 operands of this opcode are also used, and are called
89878
+** This.P1 and This.P2.
8985989879
**
8986089880
** This opcode helps to optimize IN operators on a multi-column index
8986189881
** where the IN operator is on the later terms of the index by avoiding
8986289882
** unnecessary seeks on the btree, substituting steps to the next row
8986389883
** of the b-tree instead. A correct answer is obtained if this opcode
@@ -89871,39 +89891,43 @@
8987189891
** then this opcode is a no-op and control passes through into the OP_SeekGE.
8987289892
**
8987389893
** If the SeekGE.P1 cursor is pointing to a valid row, then that row
8987489894
** might be the target row, or it might be near and slightly before the
8987589895
** target row. This opcode attempts to position the cursor on the target
89876
-** row by, perhaps stepping by invoking sqlite3BtreeStep() on the cursor
89896
+** row by, perhaps by invoking sqlite3BtreeStep() on the cursor
8987789897
** between 0 and This.P1 times.
8987889898
**
8987989899
** There are three possible outcomes from this opcode:<ol>
8988089900
**
8988189901
** <li> If after This.P1 steps, the cursor is still point to a place that
8988289902
** is earlier in the btree than the target row,
8988389903
** then fall through into the subsquence OP_SeekGE opcode.
8988489904
**
8988589905
** <li> If the cursor is successfully moved to the target row by 0 or more
89886
-** sqlite3BtreeNext() calls, then jump to the first instruction after the
89887
-** OP_IdxGT opcode - or in other words, skip the next two opcodes.
89906
+** sqlite3BtreeNext() calls, then jump to This.P2, which will land just
89907
+** past the OP_IdxGT opcode that follows the OP_SeekGE.
8988889908
**
8988989909
** <li> If the cursor ends up past the target row (indicating the the target
8989089910
** row does not exist in the btree) then jump to SeekOP.P2.
8989189911
** </ol>
8989289912
*/
8989389913
case OP_SeekScan: {
8989489914
VdbeCursor *pC;
8989589915
int res;
89896
- int n;
89916
+ int nStep;
8989789917
UnpackedRecord r;
8989889918
8989989919
assert( pOp[1].opcode==OP_SeekGE );
89900
- assert( pOp[2].opcode==OP_IdxGT );
89901
- assert( pOp[1].p1==pOp[2].p1 );
89902
- assert( pOp[1].p2==pOp[2].p2 );
89903
- assert( pOp[1].p3==pOp[2].p3 );
89904
- assert( pOp[1].p4.i==pOp[2].p4.i );
89920
+
89921
+ /* pOp->p2 points to the first instruction past the OP_IdxGT that
89922
+ ** follows the OP_SeekGE. */
89923
+ assert( pOp->p2>=(int)(pOp-aOp)+2 );
89924
+ assert( aOp[pOp->p2-1].opcode==OP_IdxGT );
89925
+ assert( pOp[1].p1==aOp[pOp->p2-1].p1 );
89926
+ assert( pOp[1].p2==aOp[pOp->p2-1].p2 );
89927
+ assert( pOp[1].p3==aOp[pOp->p2-1].p3 );
89928
+
8990589929
assert( pOp->p1>0 );
8990689930
pC = p->apCsr[pOp[1].p1];
8990789931
assert( pC!=0 );
8990889932
assert( pC->eCurType==CURTYPE_BTREE );
8990989933
assert( !pC->isTable );
@@ -89913,12 +89937,12 @@
8991389937
printf("... cursor not valid - fall through\n");
8991489938
}
8991589939
#endif
8991689940
break;
8991789941
}
89918
- n = pOp->p1;
89919
- assert( n>=1 );
89942
+ nStep = pOp->p1;
89943
+ assert( nStep>=1 );
8992089944
r.pKeyInfo = pC->pKeyInfo;
8992189945
r.nField = (u16)pOp[1].p4.i;
8992289946
r.default_rc = 0;
8992389947
r.aMem = &aMem[pOp[1].p3];
8992489948
#ifdef SQLITE_DEBUG
@@ -89936,37 +89960,37 @@
8993689960
if( rc ) goto abort_due_to_error;
8993789961
if( res>0 ){
8993889962
seekscan_search_fail:
8993989963
#ifdef SQLITE_DEBUG
8994089964
if( db->flags&SQLITE_VdbeTrace ){
89941
- printf("... %d steps and then skip\n", pOp->p1 - n);
89965
+ printf("... %d steps and then skip\n", pOp->p1 - nStep);
8994289966
}
8994389967
#endif
8994489968
VdbeBranchTaken(1,3);
8994589969
pOp++;
8994689970
goto jump_to_p2;
8994789971
}
8994889972
if( res==0 ){
8994989973
#ifdef SQLITE_DEBUG
8995089974
if( db->flags&SQLITE_VdbeTrace ){
89951
- printf("... %d steps and then success\n", pOp->p1 - n);
89975
+ printf("... %d steps and then success\n", pOp->p1 - nStep);
8995289976
}
8995389977
#endif
8995489978
VdbeBranchTaken(2,3);
89955
- pOp += 2;
89979
+ goto jump_to_p2;
8995689980
break;
8995789981
}
89958
- if( n<=0 ){
89982
+ if( nStep<=0 ){
8995989983
#ifdef SQLITE_DEBUG
8996089984
if( db->flags&SQLITE_VdbeTrace ){
8996189985
printf("... fall through after %d steps\n", pOp->p1);
8996289986
}
8996389987
#endif
8996489988
VdbeBranchTaken(0,3);
8996589989
break;
8996689990
}
89967
- n--;
89991
+ nStep--;
8996889992
rc = sqlite3BtreeNext(pC->uc.pCursor, 0);
8996989993
if( rc ){
8997089994
if( rc==SQLITE_DONE ){
8997189995
rc = SQLITE_OK;
8997289996
goto seekscan_search_fail;
@@ -100066,11 +100090,13 @@
100066100090
** SELECT * FROM t1 WHERE (select a from t1);
100067100091
*/
100068100092
SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){
100069100093
int op;
100070100094
while( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){
100071
- assert( pExpr->op==TK_COLLATE || pExpr->op==TK_IF_NULL_ROW );
100095
+ assert( pExpr->op==TK_COLLATE
100096
+ || pExpr->op==TK_IF_NULL_ROW
100097
+ || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) );
100072100098
pExpr = pExpr->pLeft;
100073100099
assert( pExpr!=0 );
100074100100
}
100075100101
op = pExpr->op;
100076100102
if( op==TK_SELECT ){
@@ -112360,11 +112386,11 @@
112360112386
Table *p;
112361112387
int i;
112362112388
char *zColl; /* Dequoted name of collation sequence */
112363112389
sqlite3 *db;
112364112390
112365
- if( (p = pParse->pNewTable)==0 ) return;
112391
+ if( (p = pParse->pNewTable)==0 || IN_RENAME_OBJECT ) return;
112366112392
i = p->nCol-1;
112367112393
db = pParse->db;
112368112394
zColl = sqlite3NameFromToken(db, pToken);
112369112395
if( !zColl ) return;
112370112396
@@ -115164,11 +115190,11 @@
115164115190
int i;
115165115191
struct SrcList_item *pItem;
115166115192
assert(pList || pParse->db->mallocFailed );
115167115193
if( pList ){
115168115194
for(i=0, pItem=pList->a; i<pList->nSrc; i++, pItem++){
115169
- if( pItem->iCursor>=0 ) break;
115195
+ if( pItem->iCursor>=0 ) continue;
115170115196
pItem->iCursor = pParse->nTab++;
115171115197
if( pItem->pSelect ){
115172115198
sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc);
115173115199
}
115174115200
}
@@ -115361,11 +115387,20 @@
115361115387
}
115362115388
v = sqlite3GetVdbe(pParse);
115363115389
if( !v ) return;
115364115390
if( type!=TK_DEFERRED ){
115365115391
for(i=0; i<db->nDb; i++){
115366
- sqlite3VdbeAddOp2(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1);
115392
+ int eTxnType;
115393
+ Btree *pBt = db->aDb[i].pBt;
115394
+ if( pBt && sqlite3BtreeIsReadonly(pBt) ){
115395
+ eTxnType = 0; /* Read txn */
115396
+ }else if( type==TK_EXCLUSIVE ){
115397
+ eTxnType = 2; /* Exclusive txn */
115398
+ }else{
115399
+ eTxnType = 1; /* Write txn */
115400
+ }
115401
+ sqlite3VdbeAddOp2(v, OP_Transaction, i, eTxnType);
115367115402
sqlite3VdbeUsesBtree(v, i);
115368115403
}
115369115404
}
115370115405
sqlite3VdbeAddOp0(v, OP_AutoCommit);
115371115406
}
@@ -117348,14 +117383,10 @@
117348117383
** opcode if it is present */
117349117384
sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity);
117350117385
}
117351117386
if( regOut ){
117352117387
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut);
117353
- if( pIdx->pTable->pSelect ){
117354
- const char *zAff = sqlite3IndexAffinityStr(pParse->db, pIdx);
117355
- sqlite3VdbeChangeP4(v, -1, zAff, P4_TRANSIENT);
117356
- }
117357117388
}
117358117389
sqlite3ReleaseTempRange(pParse, regBase, nCol);
117359117390
return regBase;
117360117391
}
117361117392
@@ -131965,10 +131996,11 @@
131965131996
){
131966131997
SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */
131967131998
int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */
131968131999
Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */
131969132000
Select *pSetup = p->pPrior; /* The setup query */
132001
+ Select *pFirstRec; /* Left-most recursive term */
131970132002
int addrTop; /* Top of the loop */
131971132003
int addrCont, addrBreak; /* CONTINUE and BREAK addresses */
131972132004
int iCurrent = 0; /* The Current table */
131973132005
int regCurrent; /* Register holding Current table */
131974132006
int iQueue; /* The Queue table */
@@ -132039,12 +132071,30 @@
132039132071
p->selFlags |= SF_UsesEphemeral;
132040132072
}
132041132073
132042132074
/* Detach the ORDER BY clause from the compound SELECT */
132043132075
p->pOrderBy = 0;
132076
+
132077
+ /* Figure out how many elements of the compound SELECT are part of the
132078
+ ** recursive query. Make sure no recursive elements use aggregate
132079
+ ** functions. Mark the recursive elements as UNION ALL even if they
132080
+ ** are really UNION because the distinctness will be enforced by the
132081
+ ** iDistinct table. pFirstRec is left pointing to the left-most
132082
+ ** recursive term of the CTE.
132083
+ */
132084
+ pFirstRec = p;
132085
+ for(pFirstRec=p; ALWAYS(pFirstRec!=0); pFirstRec=pFirstRec->pPrior){
132086
+ if( pFirstRec->selFlags & SF_Aggregate ){
132087
+ sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported");
132088
+ goto end_of_recursive_query;
132089
+ }
132090
+ pFirstRec->op = TK_ALL;
132091
+ if( (pFirstRec->pPrior->selFlags & SF_Recursive)==0 ) break;
132092
+ }
132044132093
132045132094
/* Store the results of the setup-query in Queue. */
132095
+ pSetup = pFirstRec->pPrior;
132046132096
pSetup->pNext = 0;
132047132097
ExplainQueryPlan((pParse, 1, "SETUP"));
132048132098
rc = sqlite3Select(pParse, pSetup, &destQueue);
132049132099
pSetup->pNext = p;
132050132100
if( rc ) goto end_of_recursive_query;
@@ -132073,19 +132123,15 @@
132073132123
sqlite3VdbeResolveLabel(v, addrCont);
132074132124
132075132125
/* Execute the recursive SELECT taking the single row in Current as
132076132126
** the value for the recursive-table. Store the results in the Queue.
132077132127
*/
132078
- if( p->selFlags & SF_Aggregate ){
132079
- sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported");
132080
- }else{
132081
- p->pPrior = 0;
132082
- ExplainQueryPlan((pParse, 1, "RECURSIVE STEP"));
132083
- sqlite3Select(pParse, p, &destQueue);
132084
- assert( p->pPrior==0 );
132085
- p->pPrior = pSetup;
132086
- }
132128
+ pFirstRec->pPrior = 0;
132129
+ ExplainQueryPlan((pParse, 1, "RECURSIVE STEP"));
132130
+ sqlite3Select(pParse, p, &destQueue);
132131
+ assert( pFirstRec->pPrior==0 );
132132
+ pFirstRec->pPrior = pSetup;
132087132133
132088132134
/* Keep running the loop until the Queue is empty */
132089132135
sqlite3VdbeGoto(v, addrTop);
132090132136
sqlite3VdbeResolveLabel(v, addrBreak);
132091132137
@@ -132149,10 +132195,20 @@
132149132195
p->nSelectRow = nRow;
132150132196
p = p->pNext;
132151132197
}
132152132198
return rc;
132153132199
}
132200
+
132201
+/*
132202
+** Return true if the SELECT statement which is known to be the recursive
132203
+** part of a recursive CTE still has its anchor terms attached. If the
132204
+** anchor terms have already been removed, then return false.
132205
+*/
132206
+static int hasAnchor(Select *p){
132207
+ while( p && (p->selFlags & SF_Recursive)!=0 ){ p = p->pPrior; }
132208
+ return p!=0;
132209
+}
132154132210
132155132211
/*
132156132212
** This routine is called to process a compound query form from
132157132213
** two or more separate queries using UNION, UNION ALL, EXCEPT, or
132158132214
** INTERSECT
@@ -132235,11 +132291,11 @@
132235132291
*/
132236132292
assert( p->pEList && pPrior->pEList );
132237132293
assert( p->pEList->nExpr==pPrior->pEList->nExpr );
132238132294
132239132295
#ifndef SQLITE_OMIT_CTE
132240
- if( p->selFlags & SF_Recursive ){
132296
+ if( (p->selFlags & SF_Recursive)!=0 && hasAnchor(p) ){
132241132297
generateWithRecursiveQuery(pParse, p, &dest);
132242132298
}else
132243132299
#endif
132244132300
132245132301
/* Compound SELECTs that have an ORDER BY clause are handled separately.
@@ -132326,10 +132382,11 @@
132326132382
assert( p->addrOpenEphm[0] == -1 );
132327132383
p->addrOpenEphm[0] = addr;
132328132384
findRightmost(p)->selFlags |= SF_UsesEphemeral;
132329132385
assert( p->pEList );
132330132386
}
132387
+
132331132388
132332132389
/* Code the SELECT statements to our left
132333132390
*/
132334132391
assert( !pPrior->pOrderBy );
132335132392
sqlite3SelectDestInit(&uniondest, priorOp, unionTab);
@@ -134419,12 +134476,14 @@
134419134476
if( pCte ){
134420134477
Table *pTab;
134421134478
ExprList *pEList;
134422134479
Select *pSel;
134423134480
Select *pLeft; /* Left-most SELECT statement */
134481
+ Select *pRecTerm; /* Left-most recursive term */
134424134482
int bMayRecursive; /* True if compound joined by UNION [ALL] */
134425134483
With *pSavedWith; /* Initial value of pParse->pWith */
134484
+ int iRecTab = -1; /* Cursor for recursive table */
134426134485
134427134486
/* If pCte->zCteErr is non-NULL at this point, then this is an illegal
134428134487
** recursive reference to CTE pCte. Leave an error in pParse and return
134429134488
** early. If pCte->zCteErr is NULL, then this is not a recursive reference.
134430134489
** In this case, proceed. */
@@ -134445,48 +134504,52 @@
134445134504
pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0);
134446134505
if( db->mallocFailed ) return SQLITE_NOMEM_BKPT;
134447134506
assert( pFrom->pSelect );
134448134507
134449134508
/* Check if this is a recursive CTE. */
134450
- pSel = pFrom->pSelect;
134509
+ pRecTerm = pSel = pFrom->pSelect;
134451134510
bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION );
134452
- if( bMayRecursive ){
134511
+ while( bMayRecursive && pRecTerm->op==pSel->op ){
134453134512
int i;
134454
- SrcList *pSrc = pFrom->pSelect->pSrc;
134513
+ SrcList *pSrc = pRecTerm->pSrc;
134514
+ assert( pRecTerm->pPrior!=0 );
134455134515
for(i=0; i<pSrc->nSrc; i++){
134456134516
struct SrcList_item *pItem = &pSrc->a[i];
134457134517
if( pItem->zDatabase==0
134458134518
&& pItem->zName!=0
134459134519
&& 0==sqlite3StrICmp(pItem->zName, pCte->zName)
134460
- ){
134520
+ ){
134461134521
pItem->pTab = pTab;
134462
- pItem->fg.isRecursive = 1;
134463134522
pTab->nTabRef++;
134464
- pSel->selFlags |= SF_Recursive;
134523
+ pItem->fg.isRecursive = 1;
134524
+ if( pRecTerm->selFlags & SF_Recursive ){
134525
+ sqlite3ErrorMsg(pParse,
134526
+ "multiple references to recursive table: %s", pCte->zName
134527
+ );
134528
+ return SQLITE_ERROR;
134529
+ }
134530
+ pRecTerm->selFlags |= SF_Recursive;
134531
+ if( iRecTab<0 ) iRecTab = pParse->nTab++;
134532
+ pItem->iCursor = iRecTab;
134465134533
}
134466134534
}
134467
- }
134468
-
134469
- /* Only one recursive reference is permitted. */
134470
- if( pTab->nTabRef>2 ){
134471
- sqlite3ErrorMsg(
134472
- pParse, "multiple references to recursive table: %s", pCte->zName
134473
- );
134474
- return SQLITE_ERROR;
134475
- }
134476
- assert( pTab->nTabRef==1 ||
134477
- ((pSel->selFlags&SF_Recursive) && pTab->nTabRef==2 ));
134535
+ if( (pRecTerm->selFlags & SF_Recursive)==0 ) break;
134536
+ pRecTerm = pRecTerm->pPrior;
134537
+ }
134478134538
134479134539
pCte->zCteErr = "circular reference: %s";
134480134540
pSavedWith = pParse->pWith;
134481134541
pParse->pWith = pWith;
134482
- if( bMayRecursive ){
134483
- Select *pPrior = pSel->pPrior;
134484
- assert( pPrior->pWith==0 );
134485
- pPrior->pWith = pSel->pWith;
134486
- sqlite3WalkSelect(pWalker, pPrior);
134487
- pPrior->pWith = 0;
134542
+ if( pSel->selFlags & SF_Recursive ){
134543
+ assert( pRecTerm!=0 );
134544
+ assert( (pRecTerm->selFlags & SF_Recursive)==0 );
134545
+ assert( pRecTerm->pNext!=0 );
134546
+ assert( (pRecTerm->pNext->selFlags & SF_Recursive)!=0 );
134547
+ assert( pRecTerm->pWith==0 );
134548
+ pRecTerm->pWith = pSel->pWith;
134549
+ sqlite3WalkSelect(pWalker, pRecTerm);
134550
+ pRecTerm->pWith = 0;
134488134551
}else{
134489134552
sqlite3WalkSelect(pWalker, pSel);
134490134553
}
134491134554
pParse->pWith = pWith;
134492134555
@@ -138158,11 +138221,11 @@
138158138221
pGrp = sqlite3ExprListAppend(pParse, pGrp, sqlite3ExprDup(db, pNew, 0));
138159138222
}
138160138223
#endif
138161138224
pList = sqlite3ExprListAppend(pParse, pList, pNew);
138162138225
}
138163
- eDest = SRT_Upfrom;
138226
+ eDest = IsVirtual(pTab) ? SRT_Table : SRT_Upfrom;
138164138227
}else if( pTab->pSelect ){
138165138228
for(i=0; i<pTab->nCol; i++){
138166138229
pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
138167138230
}
138168138231
eDest = SRT_Table;
@@ -139111,16 +139174,30 @@
139111139174
ephemTab = pParse->nTab++;
139112139175
addr= sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, nArg);
139113139176
regArg = pParse->nMem + 1;
139114139177
pParse->nMem += nArg;
139115139178
if( pSrc->nSrc>1 ){
139179
+ Index *pPk = 0;
139116139180
Expr *pRow;
139117139181
ExprList *pList;
139118
- if( pRowid ){
139119
- pRow = sqlite3ExprDup(db, pRowid, 0);
139182
+ if( HasRowid(pTab) ){
139183
+ if( pRowid ){
139184
+ pRow = sqlite3ExprDup(db, pRowid, 0);
139185
+ }else{
139186
+ pRow = sqlite3PExpr(pParse, TK_ROW, 0, 0);
139187
+ }
139120139188
}else{
139121
- pRow = sqlite3PExpr(pParse, TK_ROW, 0, 0);
139189
+ i16 iPk; /* PRIMARY KEY column */
139190
+ pPk = sqlite3PrimaryKeyIndex(pTab);
139191
+ assert( pPk!=0 );
139192
+ assert( pPk->nKeyCol==1 );
139193
+ iPk = pPk->aiColumn[0];
139194
+ if( aXRef[iPk]>=0 ){
139195
+ pRow = sqlite3ExprDup(db, pChanges->a[aXRef[iPk]].pExpr, 0);
139196
+ }else{
139197
+ pRow = exprRowColumn(pParse, iPk);
139198
+ }
139122139199
}
139123139200
pList = sqlite3ExprListAppend(pParse, 0, pRow);
139124139201
139125139202
for(i=0; i<pTab->nCol; i++){
139126139203
if( aXRef[i]>=0 ){
@@ -139130,11 +139207,11 @@
139130139207
}else{
139131139208
pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
139132139209
}
139133139210
}
139134139211
139135
- updateFromSelect(pParse, ephemTab, 0, pList, pSrc, pWhere, 0, 0);
139212
+ updateFromSelect(pParse, ephemTab, pPk, pList, pSrc, pWhere, 0, 0);
139136139213
sqlite3ExprListDelete(db, pList);
139137139214
eOnePass = ONEPASS_OFF;
139138139215
}else{
139139139216
regRec = ++pParse->nMem;
139140139217
regRowid = ++pParse->nMem;
@@ -142457,11 +142534,16 @@
142457142534
pIn->eEndLoopOp = OP_Noop;
142458142535
}
142459142536
pIn++;
142460142537
}
142461142538
}
142462
- if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){
142539
+ testcase( iEq>0
142540
+ && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0
142541
+ && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 );
142542
+ if( iEq>0
142543
+ && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0
142544
+ ){
142463142545
sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq);
142464142546
}
142465142547
}else{
142466142548
pLevel->u.in.nIn = 0;
142467142549
}
@@ -143507,10 +143589,11 @@
143507143589
char *zEndAff = 0; /* Affinity for end of range constraint */
143508143590
u8 bSeekPastNull = 0; /* True to seek past initial nulls */
143509143591
u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */
143510143592
int omitTable; /* True if we use the index only */
143511143593
int regBignull = 0; /* big-null flag register */
143594
+ int addrSeekScan = 0; /* Opcode of the OP_SeekScan, if any */
143512143595
143513143596
pIdx = pLoop->u.btree.pIndex;
143514143597
iIdxCur = pLevel->iIdxCur;
143515143598
assert( nEq>=pLoop->nSkip );
143516143599
@@ -143652,22 +143735,22 @@
143652143735
VdbeComment((v, "NULL-scan pass ctr"));
143653143736
}
143654143737
143655143738
op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev];
143656143739
assert( op!=0 );
143657
- if( (pLoop->wsFlags & WHERE_IN_SEEKSCAN)!=0 ){
143658
- assert( op==OP_SeekGE );
143740
+ if( (pLoop->wsFlags & WHERE_IN_SEEKSCAN)!=0 && op==OP_SeekGE ){
143659143741
assert( regBignull==0 );
143660143742
/* TUNING: The OP_SeekScan opcode seeks to reduce the number
143661143743
** of expensive seek operations by replacing a single seek with
143662143744
** 1 or more step operations. The question is, how many steps
143663143745
** should we try before giving up and going with a seek. The cost
143664143746
** of a seek is proportional to the logarithm of the of the number
143665143747
** of entries in the tree, so basing the number of steps to try
143666143748
** on the estimated number of rows in the btree seems like a good
143667143749
** guess. */
143668
- sqlite3VdbeAddOp1(v, OP_SeekScan, (pIdx->aiRowLogEst[0]+9)/10);
143750
+ addrSeekScan = sqlite3VdbeAddOp1(v, OP_SeekScan,
143751
+ (pIdx->aiRowLogEst[0]+9)/10);
143669143752
VdbeCoverage(v);
143670143753
}
143671143754
sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
143672143755
VdbeCoverage(v);
143673143756
VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind );
@@ -143748,10 +143831,11 @@
143748143831
sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
143749143832
testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT );
143750143833
testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE );
143751143834
testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT );
143752143835
testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
143836
+ if( addrSeekScan ) sqlite3VdbeJumpHere(v, addrSeekScan);
143753143837
}
143754143838
if( regBignull ){
143755143839
/* During a NULL-scan, check to see if we have reached the end of
143756143840
** the NULLs */
143757143841
assert( bSeekPastNull==!bStopAtNull );
@@ -147706,11 +147790,11 @@
147706147790
if( pTerm->wtFlags & TERM_CODED ) zType[3] = 'C';
147707147791
if( pTerm->eOperator & WO_SINGLE ){
147708147792
sqlite3_snprintf(sizeof(zLeft),zLeft,"left={%d:%d}",
147709147793
pTerm->leftCursor, pTerm->u.x.leftColumn);
147710147794
}else if( (pTerm->eOperator & WO_OR)!=0 && pTerm->u.pOrInfo!=0 ){
147711
- sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%lld",
147795
+ sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%llx",
147712147796
pTerm->u.pOrInfo->indexable);
147713147797
}else{
147714147798
sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor);
147715147799
}
147716147800
sqlite3DebugPrintf(
@@ -148484,11 +148568,11 @@
148484148568
}
148485148569
}else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){
148486148570
/* "x IN (value, value, ...)" */
148487148571
nIn = sqlite3LogEst(pExpr->x.pList->nExpr);
148488148572
}
148489
- if( pProbe->hasStat1 ){
148573
+ if( pProbe->hasStat1 && rLogSize>=10 ){
148490148574
LogEst M, logK, safetyMargin;
148491148575
/* Let:
148492148576
** N = the total number of rows in the table
148493148577
** K = the number of entries on the RHS of the IN operator
148494148578
** M = the number of rows in the table that match terms to the
@@ -148503,11 +148587,12 @@
148503148587
**
148504148588
** Our estimates for M, K, and N might be inaccurate, so we build in
148505148589
** a safety margin of 2 (LogEst: 10) that favors using the IN operator
148506148590
** with the index, as using an index has better worst-case behavior.
148507148591
** If we do not have real sqlite_stat1 data, always prefer to use
148508
- ** the index.
148592
+ ** the index. Do not bother with this optimization on very small
148593
+ ** tables (less than 2 rows) as it is pointless in that case.
148509148594
*/
148510148595
M = pProbe->aiRowLogEst[saved_nEq];
148511148596
logK = estLog(nIn);
148512148597
safetyMargin = 10; /* TUNING: extra weight for indexed IN */
148513148598
if( M + logK + safetyMargin < nIn + rLogSize ){
@@ -209277,10 +209362,11 @@
209277209362
int eDetail; /* FTS5_DETAIL_XXX value */
209278209363
char *zContentExprlist;
209279209364
Fts5Tokenizer *pTok;
209280209365
fts5_tokenizer *pTokApi;
209281209366
int bLock; /* True when table is preparing statement */
209367
+ int ePattern; /* FTS_PATTERN_XXX constant */
209282209368
209283209369
/* Values loaded from the %_config table */
209284209370
int iCookie; /* Incremented when %_config is modified */
209285209371
int pgsz; /* Approximate page size used in %_data */
209286209372
int nAutomerge; /* 'automerge' setting */
@@ -209297,21 +209383,23 @@
209297209383
int bPrefixIndex; /* True to use prefix-indexes */
209298209384
#endif
209299209385
};
209300209386
209301209387
/* Current expected value of %_config table 'version' field */
209302
-#define FTS5_CURRENT_VERSION 4
209388
+#define FTS5_CURRENT_VERSION 4
209303209389
209304209390
#define FTS5_CONTENT_NORMAL 0
209305209391
#define FTS5_CONTENT_NONE 1
209306209392
#define FTS5_CONTENT_EXTERNAL 2
209307209393
209308
-#define FTS5_DETAIL_FULL 0
209309
-#define FTS5_DETAIL_NONE 1
209310
-#define FTS5_DETAIL_COLUMNS 2
209394
+#define FTS5_DETAIL_FULL 0
209395
+#define FTS5_DETAIL_NONE 1
209396
+#define FTS5_DETAIL_COLUMNS 2
209311209397
209312
-
209398
+#define FTS5_PATTERN_NONE 0
209399
+#define FTS5_PATTERN_LIKE 65 /* matches SQLITE_INDEX_CONSTRAINT_LIKE */
209400
+#define FTS5_PATTERN_GLOB 66 /* matches SQLITE_INDEX_CONSTRAINT_GLOB */
209313209401
209314209402
static int sqlite3Fts5ConfigParse(
209315209403
Fts5Global*, sqlite3*, int, const char **, Fts5Config**, char**
209316209404
);
209317209405
static void sqlite3Fts5ConfigFree(Fts5Config*);
@@ -209647,12 +209735,11 @@
209647209735
209648209736
static int sqlite3Fts5GetTokenizer(
209649209737
Fts5Global*,
209650209738
const char **azArg,
209651209739
int nArg,
209652
- Fts5Tokenizer**,
209653
- fts5_tokenizer**,
209740
+ Fts5Config*,
209654209741
char **pzErr
209655209742
);
209656209743
209657209744
static Fts5Table *sqlite3Fts5TableFromCsrid(Fts5Global*, i64);
209658209745
@@ -209777,14 +209864,22 @@
209777209864
};
209778209865
209779209866
/* Parse a MATCH expression. */
209780209867
static int sqlite3Fts5ExprNew(
209781209868
Fts5Config *pConfig,
209869
+ int bPhraseToAnd,
209782209870
int iCol, /* Column on LHS of MATCH operator */
209783209871
const char *zExpr,
209784209872
Fts5Expr **ppNew,
209785209873
char **pzErr
209874
+);
209875
+static int sqlite3Fts5ExprPattern(
209876
+ Fts5Config *pConfig,
209877
+ int bGlob,
209878
+ int iCol,
209879
+ const char *zText,
209880
+ Fts5Expr **pp
209786209881
);
209787209882
209788209883
/*
209789209884
** for(rc = sqlite3Fts5ExprFirst(pExpr, pIdx, bDesc);
209790209885
** rc==SQLITE_OK && 0==sqlite3Fts5ExprEof(pExpr);
@@ -209890,10 +209985,14 @@
209890209985
/**************************************************************************
209891209986
** Interface to code in fts5_tokenizer.c.
209892209987
*/
209893209988
209894209989
static int sqlite3Fts5TokenizerInit(fts5_api*);
209990
+static int sqlite3Fts5TokenizerPattern(
209991
+ int (*xCreate)(void*, const char**, int, Fts5Tokenizer**),
209992
+ Fts5Tokenizer *pTok
209993
+);
209895209994
/*
209896209995
** End of interface to code in fts5_tokenizer.c.
209897209996
**************************************************************************/
209898209997
209899209998
/**************************************************************************
@@ -212869,11 +212968,11 @@
212869212968
if( p==0 ){
212870212969
*pzErr = sqlite3_mprintf("parse error in tokenize directive");
212871212970
rc = SQLITE_ERROR;
212872212971
}else{
212873212972
rc = sqlite3Fts5GetTokenizer(pGlobal,
212874
- (const char**)azArg, (int)nArg, &pConfig->pTok, &pConfig->pTokApi,
212973
+ (const char**)azArg, (int)nArg, pConfig,
212875212974
pzErr
212876212975
);
212877212976
}
212878212977
}
212879212978
}
@@ -212941,13 +213040,11 @@
212941213040
** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error
212942213041
** code if an error occurs.
212943213042
*/
212944213043
static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){
212945213044
assert( pConfig->pTok==0 && pConfig->pTokApi==0 );
212946
- return sqlite3Fts5GetTokenizer(
212947
- pGlobal, 0, 0, &pConfig->pTok, &pConfig->pTokApi, 0
212948
- );
213045
+ return sqlite3Fts5GetTokenizer(pGlobal, 0, 0, pConfig, 0);
212949213046
}
212950213047
212951213048
/*
212952213049
** Gobble up the first bareword or quoted word from the input buffer zIn.
212953213050
** Return a pointer to the character immediately following the last in
@@ -213635,10 +213732,11 @@
213635213732
char *zErr;
213636213733
int rc;
213637213734
int nPhrase; /* Size of apPhrase array */
213638213735
Fts5ExprPhrase **apPhrase; /* Array of all phrases */
213639213736
Fts5ExprNode *pExpr; /* Result of a successful parse */
213737
+ int bPhraseToAnd; /* Convert "a+b" to "a AND b" */
213640213738
};
213641213739
213642213740
static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){
213643213741
va_list ap;
213644213742
va_start(ap, zFmt);
@@ -213723,10 +213821,11 @@
213723213821
static void *fts5ParseAlloc(u64 t){ return sqlite3_malloc64((sqlite3_int64)t);}
213724213822
static void fts5ParseFree(void *p){ sqlite3_free(p); }
213725213823
213726213824
static int sqlite3Fts5ExprNew(
213727213825
Fts5Config *pConfig, /* FTS5 Configuration */
213826
+ int bPhraseToAnd,
213728213827
int iCol,
213729213828
const char *zExpr, /* Expression text */
213730213829
Fts5Expr **ppNew,
213731213830
char **pzErr
213732213831
){
@@ -213738,10 +213837,11 @@
213738213837
Fts5Expr *pNew;
213739213838
213740213839
*ppNew = 0;
213741213840
*pzErr = 0;
213742213841
memset(&sParse, 0, sizeof(sParse));
213842
+ sParse.bPhraseToAnd = bPhraseToAnd;
213743213843
pEngine = sqlite3Fts5ParserAlloc(fts5ParseAlloc);
213744213844
if( pEngine==0 ){ return SQLITE_NOMEM; }
213745213845
sParse.pConfig = pConfig;
213746213846
213747213847
do {
@@ -213780,10 +213880,11 @@
213780213880
}
213781213881
pNew->pIndex = 0;
213782213882
pNew->pConfig = pConfig;
213783213883
pNew->apExprPhrase = sParse.apPhrase;
213784213884
pNew->nPhrase = sParse.nPhrase;
213885
+ pNew->bDesc = 0;
213785213886
sParse.apPhrase = 0;
213786213887
}
213787213888
}else{
213788213889
sqlite3Fts5ParseNodeFree(sParse.pExpr);
213789213890
}
@@ -213790,10 +213891,85 @@
213790213891
213791213892
sqlite3_free(sParse.apPhrase);
213792213893
*pzErr = sParse.zErr;
213793213894
return sParse.rc;
213794213895
}
213896
+
213897
+/*
213898
+** This function is only called when using the special 'trigram' tokenizer.
213899
+** Argument zText contains the text of a LIKE or GLOB pattern matched
213900
+** against column iCol. This function creates and compiles an FTS5 MATCH
213901
+** expression that will match a superset of the rows matched by the LIKE or
213902
+** GLOB. If successful, SQLITE_OK is returned. Otherwise, an SQLite error
213903
+** code.
213904
+*/
213905
+static int sqlite3Fts5ExprPattern(
213906
+ Fts5Config *pConfig, int bGlob, int iCol, const char *zText, Fts5Expr **pp
213907
+){
213908
+ i64 nText = strlen(zText);
213909
+ char *zExpr = (char*)sqlite3_malloc64(nText*4 + 1);
213910
+ int rc = SQLITE_OK;
213911
+
213912
+ if( zExpr==0 ){
213913
+ rc = SQLITE_NOMEM;
213914
+ }else{
213915
+ char aSpec[3];
213916
+ int iOut = 0;
213917
+ int i = 0;
213918
+ int iFirst = 0;
213919
+
213920
+ if( bGlob==0 ){
213921
+ aSpec[0] = '_';
213922
+ aSpec[1] = '%';
213923
+ aSpec[2] = 0;
213924
+ }else{
213925
+ aSpec[0] = '*';
213926
+ aSpec[1] = '?';
213927
+ aSpec[2] = '[';
213928
+ }
213929
+
213930
+ while( i<=nText ){
213931
+ if( i==nText
213932
+ || zText[i]==aSpec[0] || zText[i]==aSpec[1] || zText[i]==aSpec[2]
213933
+ ){
213934
+ if( i-iFirst>=3 ){
213935
+ int jj;
213936
+ zExpr[iOut++] = '"';
213937
+ for(jj=iFirst; jj<i; jj++){
213938
+ zExpr[iOut++] = zText[jj];
213939
+ if( zText[jj]=='"' ) zExpr[iOut++] = '"';
213940
+ }
213941
+ zExpr[iOut++] = '"';
213942
+ zExpr[iOut++] = ' ';
213943
+ }
213944
+ if( zText[i]==aSpec[2] ){
213945
+ i += 2;
213946
+ if( zText[i-1]=='^' ) i++;
213947
+ while( i<nText && zText[i]!=']' ) i++;
213948
+ }
213949
+ iFirst = i+1;
213950
+ }
213951
+ i++;
213952
+ }
213953
+ if( iOut>0 ){
213954
+ int bAnd = 0;
213955
+ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){
213956
+ bAnd = 1;
213957
+ if( pConfig->eDetail==FTS5_DETAIL_NONE ){
213958
+ iCol = pConfig->nCol;
213959
+ }
213960
+ }
213961
+ zExpr[iOut] = '\0';
213962
+ rc = sqlite3Fts5ExprNew(pConfig, bAnd, iCol, zExpr, pp,pConfig->pzErrmsg);
213963
+ }else{
213964
+ *pp = 0;
213965
+ }
213966
+ sqlite3_free(zExpr);
213967
+ }
213968
+
213969
+ return rc;
213970
+}
213795213971
213796213972
/*
213797213973
** Free the expression node object passed as the only argument.
213798213974
*/
213799213975
static void sqlite3Fts5ParseNodeFree(Fts5ExprNode *p){
@@ -215167,10 +215343,24 @@
215167215343
215168215344
static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p){
215169215345
assert( pParse->pExpr==0 );
215170215346
pParse->pExpr = p;
215171215347
}
215348
+
215349
+static int parseGrowPhraseArray(Fts5Parse *pParse){
215350
+ if( (pParse->nPhrase % 8)==0 ){
215351
+ sqlite3_int64 nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8);
215352
+ Fts5ExprPhrase **apNew;
215353
+ apNew = (Fts5ExprPhrase**)sqlite3_realloc64(pParse->apPhrase, nByte);
215354
+ if( apNew==0 ){
215355
+ pParse->rc = SQLITE_NOMEM;
215356
+ return SQLITE_NOMEM;
215357
+ }
215358
+ pParse->apPhrase = apNew;
215359
+ }
215360
+ return SQLITE_OK;
215361
+}
215172215362
215173215363
/*
215174215364
** This function is called by the parser to process a string token. The
215175215365
** string may or may not be quoted. In any case it is tokenized and a
215176215366
** phrase object consisting of all tokens returned.
@@ -215203,20 +215393,13 @@
215203215393
fts5ExprPhraseFree(sCtx.pPhrase);
215204215394
sCtx.pPhrase = 0;
215205215395
}else{
215206215396
215207215397
if( pAppend==0 ){
215208
- if( (pParse->nPhrase % 8)==0 ){
215209
- sqlite3_int64 nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8);
215210
- Fts5ExprPhrase **apNew;
215211
- apNew = (Fts5ExprPhrase**)sqlite3_realloc64(pParse->apPhrase, nByte);
215212
- if( apNew==0 ){
215213
- pParse->rc = SQLITE_NOMEM;
215214
- fts5ExprPhraseFree(sCtx.pPhrase);
215215
- return 0;
215216
- }
215217
- pParse->apPhrase = apNew;
215398
+ if( parseGrowPhraseArray(pParse) ){
215399
+ fts5ExprPhraseFree(sCtx.pPhrase);
215400
+ return 0;
215218215401
}
215219215402
pParse->nPhrase++;
215220215403
}
215221215404
215222215405
if( sCtx.pPhrase==0 ){
@@ -215618,10 +215801,71 @@
215618215801
sqlite3_free(pSub);
215619215802
}else{
215620215803
p->apChild[p->nChild++] = pSub;
215621215804
}
215622215805
}
215806
+
215807
+/*
215808
+** This function is used when parsing LIKE or GLOB patterns against
215809
+** trigram indexes that specify either detail=column or detail=none.
215810
+** It converts a phrase:
215811
+**
215812
+** abc + def + ghi
215813
+**
215814
+** into an AND tree:
215815
+**
215816
+** abc AND def AND ghi
215817
+*/
215818
+static Fts5ExprNode *fts5ParsePhraseToAnd(
215819
+ Fts5Parse *pParse,
215820
+ Fts5ExprNearset *pNear
215821
+){
215822
+ int nTerm = pNear->apPhrase[0]->nTerm;
215823
+ int ii;
215824
+ int nByte;
215825
+ Fts5ExprNode *pRet;
215826
+
215827
+ assert( pNear->nPhrase==1 );
215828
+ assert( pParse->bPhraseToAnd );
215829
+
215830
+ nByte = sizeof(Fts5ExprNode) + nTerm*sizeof(Fts5ExprNode*);
215831
+ pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);
215832
+ if( pRet ){
215833
+ pRet->eType = FTS5_AND;
215834
+ pRet->nChild = nTerm;
215835
+ fts5ExprAssignXNext(pRet);
215836
+ pParse->nPhrase--;
215837
+ for(ii=0; ii<nTerm; ii++){
215838
+ Fts5ExprPhrase *pPhrase = (Fts5ExprPhrase*)sqlite3Fts5MallocZero(
215839
+ &pParse->rc, sizeof(Fts5ExprPhrase)
215840
+ );
215841
+ if( pPhrase ){
215842
+ if( parseGrowPhraseArray(pParse) ){
215843
+ fts5ExprPhraseFree(pPhrase);
215844
+ }else{
215845
+ pParse->apPhrase[pParse->nPhrase++] = pPhrase;
215846
+ pPhrase->nTerm = 1;
215847
+ pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup(
215848
+ &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1
215849
+ );
215850
+ pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING,
215851
+ 0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase)
215852
+ );
215853
+ }
215854
+ }
215855
+ }
215856
+
215857
+ if( pParse->rc ){
215858
+ sqlite3Fts5ParseNodeFree(pRet);
215859
+ pRet = 0;
215860
+ }else{
215861
+ sqlite3Fts5ParseNearsetFree(pNear);
215862
+ }
215863
+ }
215864
+
215865
+ return pRet;
215866
+}
215623215867
215624215868
/*
215625215869
** Allocate and return a new expression object. If anything goes wrong (i.e.
215626215870
** OOM error), leave an error code in pParse and return NULL.
215627215871
*/
@@ -215643,55 +215887,62 @@
215643215887
);
215644215888
if( eType==FTS5_STRING && pNear==0 ) return 0;
215645215889
if( eType!=FTS5_STRING && pLeft==0 ) return pRight;
215646215890
if( eType!=FTS5_STRING && pRight==0 ) return pLeft;
215647215891
215648
- if( eType==FTS5_NOT ){
215649
- nChild = 2;
215650
- }else if( eType==FTS5_AND || eType==FTS5_OR ){
215651
- nChild = 2;
215652
- if( pLeft->eType==eType ) nChild += pLeft->nChild-1;
215653
- if( pRight->eType==eType ) nChild += pRight->nChild-1;
215654
- }
215655
-
215656
- nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1);
215657
- pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);
215658
-
215659
- if( pRet ){
215660
- pRet->eType = eType;
215661
- pRet->pNear = pNear;
215662
- fts5ExprAssignXNext(pRet);
215663
- if( eType==FTS5_STRING ){
215664
- int iPhrase;
215665
- for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){
215666
- pNear->apPhrase[iPhrase]->pNode = pRet;
215667
- if( pNear->apPhrase[iPhrase]->nTerm==0 ){
215668
- pRet->xNext = 0;
215669
- pRet->eType = FTS5_EOF;
215670
- }
215671
- }
215672
-
215673
- if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL ){
215674
- Fts5ExprPhrase *pPhrase = pNear->apPhrase[0];
215675
- if( pNear->nPhrase!=1
215676
- || pPhrase->nTerm>1
215677
- || (pPhrase->nTerm>0 && pPhrase->aTerm[0].bFirst)
215678
- ){
215679
- assert( pParse->rc==SQLITE_OK );
215680
- pParse->rc = SQLITE_ERROR;
215681
- assert( pParse->zErr==0 );
215682
- pParse->zErr = sqlite3_mprintf(
215683
- "fts5: %s queries are not supported (detail!=full)",
215684
- pNear->nPhrase==1 ? "phrase": "NEAR"
215685
- );
215686
- sqlite3_free(pRet);
215687
- pRet = 0;
215688
- }
215689
- }
215690
- }else{
215691
- fts5ExprAddChildren(pRet, pLeft);
215692
- fts5ExprAddChildren(pRet, pRight);
215892
+ if( eType==FTS5_STRING
215893
+ && pParse->bPhraseToAnd
215894
+ && pNear->apPhrase[0]->nTerm>1
215895
+ ){
215896
+ pRet = fts5ParsePhraseToAnd(pParse, pNear);
215897
+ }else{
215898
+ if( eType==FTS5_NOT ){
215899
+ nChild = 2;
215900
+ }else if( eType==FTS5_AND || eType==FTS5_OR ){
215901
+ nChild = 2;
215902
+ if( pLeft->eType==eType ) nChild += pLeft->nChild-1;
215903
+ if( pRight->eType==eType ) nChild += pRight->nChild-1;
215904
+ }
215905
+
215906
+ nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1);
215907
+ pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);
215908
+
215909
+ if( pRet ){
215910
+ pRet->eType = eType;
215911
+ pRet->pNear = pNear;
215912
+ fts5ExprAssignXNext(pRet);
215913
+ if( eType==FTS5_STRING ){
215914
+ int iPhrase;
215915
+ for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){
215916
+ pNear->apPhrase[iPhrase]->pNode = pRet;
215917
+ if( pNear->apPhrase[iPhrase]->nTerm==0 ){
215918
+ pRet->xNext = 0;
215919
+ pRet->eType = FTS5_EOF;
215920
+ }
215921
+ }
215922
+
215923
+ if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL ){
215924
+ Fts5ExprPhrase *pPhrase = pNear->apPhrase[0];
215925
+ if( pNear->nPhrase!=1
215926
+ || pPhrase->nTerm>1
215927
+ || (pPhrase->nTerm>0 && pPhrase->aTerm[0].bFirst)
215928
+ ){
215929
+ assert( pParse->rc==SQLITE_OK );
215930
+ pParse->rc = SQLITE_ERROR;
215931
+ assert( pParse->zErr==0 );
215932
+ pParse->zErr = sqlite3_mprintf(
215933
+ "fts5: %s queries are not supported (detail!=full)",
215934
+ pNear->nPhrase==1 ? "phrase": "NEAR"
215935
+ );
215936
+ sqlite3_free(pRet);
215937
+ pRet = 0;
215938
+ }
215939
+ }
215940
+ }else{
215941
+ fts5ExprAddChildren(pRet, pLeft);
215942
+ fts5ExprAddChildren(pRet, pRight);
215943
+ }
215693215944
}
215694215945
}
215695215946
}
215696215947
215697215948
if( pRet==0 ){
@@ -216041,11 +216292,11 @@
216041216292
zExpr = (const char*)sqlite3_value_text(apVal[0]);
216042216293
if( zExpr==0 ) zExpr = "";
216043216294
216044216295
rc = sqlite3Fts5ConfigParse(pGlobal, db, nConfig, azConfig, &pConfig, &zErr);
216045216296
if( rc==SQLITE_OK ){
216046
- rc = sqlite3Fts5ExprNew(pConfig, pConfig->nCol, zExpr, &pExpr, &zErr);
216297
+ rc = sqlite3Fts5ExprNew(pConfig, 0, pConfig->nCol, zExpr, &pExpr, &zErr);
216047216298
}
216048216299
if( rc==SQLITE_OK ){
216049216300
char *zText;
216050216301
if( pExpr->pRoot->xNext==0 ){
216051216302
zText = sqlite3_mprintf("");
@@ -216746,12 +216997,13 @@
216746216997
pPtr = (u8*)p;
216747216998
216748216999
/* If this is a new rowid, append the 4-byte size field for the previous
216749217000
** entry, and the new rowid for this entry. */
216750217001
if( iRowid!=p->iRowid ){
217002
+ u64 iDiff = (u64)iRowid - (u64)p->iRowid;
216751217003
fts5HashAddPoslistSize(pHash, p, 0);
216752
- p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iRowid - p->iRowid);
217004
+ p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iDiff);
216753217005
p->iRowid = iRowid;
216754217006
bNew = 1;
216755217007
p->iSzPoslist = p->nData;
216756217008
if( pHash->eDetail!=FTS5_DETAIL_NONE ){
216757217009
p->nData += 1;
@@ -218722,11 +218974,11 @@
218722218974
n = pIter->iEndofDoclist;
218723218975
}
218724218976
218725218977
ASSERT_SZLEAF_OK(pIter->pLeaf);
218726218978
while( 1 ){
218727
- i64 iDelta = 0;
218979
+ u64 iDelta = 0;
218728218980
218729218981
if( eDetail==FTS5_DETAIL_NONE ){
218730218982
/* todo */
218731218983
if( i<n && a[i]==0 ){
218732218984
i++;
@@ -218737,11 +218989,11 @@
218737218989
int bDummy;
218738218990
i += fts5GetPoslistSize(&a[i], &nPos, &bDummy);
218739218991
i += nPos;
218740218992
}
218741218993
if( i>=n ) break;
218742
- i += fts5GetVarint(&a[i], (u64*)&iDelta);
218994
+ i += fts5GetVarint(&a[i], &iDelta);
218743218995
pIter->iRowid += iDelta;
218744218996
218745218997
/* If necessary, grow the pIter->aRowidOffset[] array. */
218746218998
if( iRowidOffset>=pIter->nRowidOffset ){
218747218999
int nNew = pIter->nRowidOffset + 8;
@@ -218836,20 +219088,20 @@
218836219088
UNUSED_PARAM(pbUnused);
218837219089
218838219090
if( pIter->iRowidOffset>0 ){
218839219091
u8 *a = pIter->pLeaf->p;
218840219092
int iOff;
218841
- i64 iDelta;
219093
+ u64 iDelta;
218842219094
218843219095
pIter->iRowidOffset--;
218844219096
pIter->iLeafOffset = pIter->aRowidOffset[pIter->iRowidOffset];
218845219097
fts5SegIterLoadNPos(p, pIter);
218846219098
iOff = pIter->iLeafOffset;
218847219099
if( p->pConfig->eDetail!=FTS5_DETAIL_NONE ){
218848219100
iOff += pIter->nPos;
218849219101
}
218850
- fts5GetVarint(&a[iOff], (u64*)&iDelta);
219102
+ fts5GetVarint(&a[iOff], &iDelta);
218851219103
pIter->iRowid -= iDelta;
218852219104
}else{
218853219105
fts5SegIterReverseNewPage(p, pIter);
218854219106
}
218855219107
}
@@ -224064,10 +224316,27 @@
224064224316
{
224065224317
pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_UNIQUE;
224066224318
}
224067224319
#endif
224068224320
}
224321
+
224322
+static int fts5UsePatternMatch(
224323
+ Fts5Config *pConfig,
224324
+ struct sqlite3_index_constraint *p
224325
+){
224326
+ assert( FTS5_PATTERN_GLOB==SQLITE_INDEX_CONSTRAINT_GLOB );
224327
+ assert( FTS5_PATTERN_LIKE==SQLITE_INDEX_CONSTRAINT_LIKE );
224328
+ if( pConfig->ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){
224329
+ return 1;
224330
+ }
224331
+ if( pConfig->ePattern==FTS5_PATTERN_LIKE
224332
+ && (p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB)
224333
+ ){
224334
+ return 1;
224335
+ }
224336
+ return 0;
224337
+}
224069224338
224070224339
/*
224071224340
** Implementation of the xBestIndex method for FTS5 tables. Within the
224072224341
** WHERE constraint, it searches for the following:
224073224342
**
@@ -224094,11 +224363,13 @@
224094224363
** idxStr is used to encode data from the WHERE clause. For each argument
224095224364
** passed to the xFilter method, the following is appended to idxStr:
224096224365
**
224097224366
** Match against table column: "m"
224098224367
** Match against rank column: "r"
224099
-** Match against other column: "<column-number>"
224368
+** Match against other column: "M<column-number>"
224369
+** LIKE against other column: "L<column-number>"
224370
+** GLOB against other column: "G<column-number>"
224100224371
** Equality constraint against the rowid: "="
224101224372
** A < or <= against the rowid: "<"
224102224373
** A > or >= against the rowid: ">"
224103224374
**
224104224375
** This function ensures that there is at most one "r" or "=". And that if
@@ -224155,11 +224426,11 @@
224155224426
"recursively defined fts5 content table"
224156224427
);
224157224428
return SQLITE_ERROR;
224158224429
}
224159224430
224160
- idxStr = (char*)sqlite3_malloc(pInfo->nConstraint * 6 + 1);
224431
+ idxStr = (char*)sqlite3_malloc(pInfo->nConstraint * 8 + 1);
224161224432
if( idxStr==0 ) return SQLITE_NOMEM;
224162224433
pInfo->idxStr = idxStr;
224163224434
pInfo->needToFreeIdxStr = 1;
224164224435
224165224436
for(i=0; i<pInfo->nConstraint; i++){
@@ -224179,29 +224450,33 @@
224179224450
}else{
224180224451
if( iCol==nCol+1 ){
224181224452
if( bSeenRank ) continue;
224182224453
idxStr[iIdxStr++] = 'r';
224183224454
bSeenRank = 1;
224184
- }else{
224455
+ }else if( iCol>=0 ){
224185224456
bSeenMatch = 1;
224186
- idxStr[iIdxStr++] = 'm';
224187
- if( iCol<nCol ){
224188
- sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol);
224189
- idxStr += strlen(&idxStr[iIdxStr]);
224190
- assert( idxStr[iIdxStr]=='\0' );
224191
- }
224457
+ idxStr[iIdxStr++] = 'M';
224458
+ sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol);
224459
+ idxStr += strlen(&idxStr[iIdxStr]);
224460
+ assert( idxStr[iIdxStr]=='\0' );
224192224461
}
224193224462
pInfo->aConstraintUsage[i].argvIndex = ++iCons;
224194224463
pInfo->aConstraintUsage[i].omit = 1;
224195224464
}
224196
- }
224197
- else if( p->usable && bSeenEq==0
224198
- && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0
224199
- ){
224200
- idxStr[iIdxStr++] = '=';
224201
- bSeenEq = 1;
224202
- pInfo->aConstraintUsage[i].argvIndex = ++iCons;
224465
+ }else if( p->usable ){
224466
+ if( iCol>=0 && iCol<nCol && fts5UsePatternMatch(pConfig, p) ){
224467
+ assert( p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB );
224468
+ idxStr[iIdxStr++] = p->op==FTS5_PATTERN_LIKE ? 'L' : 'G';
224469
+ sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol);
224470
+ idxStr += strlen(&idxStr[iIdxStr]);
224471
+ pInfo->aConstraintUsage[i].argvIndex = ++iCons;
224472
+ assert( idxStr[iIdxStr]=='\0' );
224473
+ }else if( bSeenEq==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0 ){
224474
+ idxStr[iIdxStr++] = '=';
224475
+ bSeenEq = 1;
224476
+ pInfo->aConstraintUsage[i].argvIndex = ++iCons;
224477
+ }
224203224478
}
224204224479
}
224205224480
224206224481
if( bSeenEq==0 ){
224207224482
for(i=0; i<pInfo->nConstraint; i++){
@@ -224830,41 +225105,55 @@
224830225105
for(i=0; i<nVal; i++){
224831225106
switch( idxStr[iIdxStr++] ){
224832225107
case 'r':
224833225108
pRank = apVal[i];
224834225109
break;
224835
- case 'm': {
225110
+ case 'M': {
224836225111
const char *zText = (const char*)sqlite3_value_text(apVal[i]);
224837225112
if( zText==0 ) zText = "";
224838
-
224839
- if( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' ){
224840
- iCol = 0;
224841
- do{
224842
- iCol = iCol*10 + (idxStr[iIdxStr]-'0');
224843
- iIdxStr++;
224844
- }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' );
224845
- }else{
224846
- iCol = pConfig->nCol;
224847
- }
225113
+ iCol = 0;
225114
+ do{
225115
+ iCol = iCol*10 + (idxStr[iIdxStr]-'0');
225116
+ iIdxStr++;
225117
+ }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' );
224848225118
224849225119
if( zText[0]=='*' ){
224850225120
/* The user has issued a query of the form "MATCH '*...'". This
224851225121
** indicates that the MATCH expression is not a full text query,
224852225122
** but a request for an internal parameter. */
224853225123
rc = fts5SpecialMatch(pTab, pCsr, &zText[1]);
224854225124
goto filter_out;
224855225125
}else{
224856225126
char **pzErr = &pTab->p.base.zErrMsg;
224857
- rc = sqlite3Fts5ExprNew(pConfig, iCol, zText, &pExpr, pzErr);
225127
+ rc = sqlite3Fts5ExprNew(pConfig, 0, iCol, zText, &pExpr, pzErr);
224858225128
if( rc==SQLITE_OK ){
224859225129
rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr);
224860225130
pExpr = 0;
224861225131
}
224862225132
if( rc!=SQLITE_OK ) goto filter_out;
224863225133
}
224864225134
224865225135
break;
225136
+ }
225137
+ case 'L':
225138
+ case 'G': {
225139
+ int bGlob = (idxStr[iIdxStr-1]=='G');
225140
+ const char *zText = (const char*)sqlite3_value_text(apVal[i]);
225141
+ iCol = 0;
225142
+ do{
225143
+ iCol = iCol*10 + (idxStr[iIdxStr]-'0');
225144
+ iIdxStr++;
225145
+ }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' );
225146
+ if( zText ){
225147
+ rc = sqlite3Fts5ExprPattern(pConfig, bGlob, iCol, zText, &pExpr);
225148
+ }
225149
+ if( rc==SQLITE_OK ){
225150
+ rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr);
225151
+ pExpr = 0;
225152
+ }
225153
+ if( rc!=SQLITE_OK ) goto filter_out;
225154
+ break;
224866225155
}
224867225156
case '=':
224868225157
pRowidEq = apVal[i];
224869225158
break;
224870225159
case '<':
@@ -226273,12 +226562,11 @@
226273226562
226274226563
static int sqlite3Fts5GetTokenizer(
226275226564
Fts5Global *pGlobal,
226276226565
const char **azArg,
226277226566
int nArg,
226278
- Fts5Tokenizer **ppTok,
226279
- fts5_tokenizer **ppTokApi,
226567
+ Fts5Config *pConfig,
226280226568
char **pzErr
226281226569
){
226282226570
Fts5TokenizerModule *pMod;
226283226571
int rc = SQLITE_OK;
226284226572
@@ -226286,20 +226574,26 @@
226286226574
if( pMod==0 ){
226287226575
assert( nArg>0 );
226288226576
rc = SQLITE_ERROR;
226289226577
*pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]);
226290226578
}else{
226291
- rc = pMod->x.xCreate(pMod->pUserData, &azArg[1], (nArg?nArg-1:0), ppTok);
226292
- *ppTokApi = &pMod->x;
226293
- if( rc!=SQLITE_OK && pzErr ){
226294
- *pzErr = sqlite3_mprintf("error in tokenizer constructor");
226579
+ rc = pMod->x.xCreate(
226580
+ pMod->pUserData, &azArg[1], (nArg?nArg-1:0), &pConfig->pTok
226581
+ );
226582
+ pConfig->pTokApi = &pMod->x;
226583
+ if( rc!=SQLITE_OK ){
226584
+ if( pzErr ) *pzErr = sqlite3_mprintf("error in tokenizer constructor");
226585
+ }else{
226586
+ pConfig->ePattern = sqlite3Fts5TokenizerPattern(
226587
+ pMod->x.xCreate, pConfig->pTok
226588
+ );
226295226589
}
226296226590
}
226297226591
226298226592
if( rc!=SQLITE_OK ){
226299
- *ppTokApi = 0;
226300
- *ppTok = 0;
226593
+ pConfig->pTokApi = 0;
226594
+ pConfig->pTok = 0;
226301226595
}
226302226596
226303226597
return rc;
226304226598
}
226305226599
@@ -226344,11 +226638,11 @@
226344226638
int nArg, /* Number of args */
226345226639
sqlite3_value **apUnused /* Function arguments */
226346226640
){
226347226641
assert( nArg==0 );
226348226642
UNUSED_PARAM2(nArg, apUnused);
226349
- sqlite3_result_text(pCtx, "fts5: 2020-09-30 18:06:51 4a43430fd23f88352c33b29c4c105b72f6dc821f94bf362040c41a1648c402e5", -1, SQLITE_TRANSIENT);
226643
+ sqlite3_result_text(pCtx, "fts5: 2020-10-12 18:09:16 7e17c2f4b7dc9b563d0b4da949bb134dc7c4fc9c86ce03891432a884ca6409d5", -1, SQLITE_TRANSIENT);
226350226644
}
226351226645
226352226646
/*
226353226647
** Return true if zName is the extension on one of the shadow tables used
226354226648
** by this module.
@@ -228897,10 +229191,135 @@
228897229191
sCtx.aBuf = p->aBuf;
228898229192
return p->tokenizer.xTokenize(
228899229193
p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb
228900229194
);
228901229195
}
229196
+
229197
+/**************************************************************************
229198
+** Start of trigram implementation.
229199
+*/
229200
+typedef struct TrigramTokenizer TrigramTokenizer;
229201
+struct TrigramTokenizer {
229202
+ int bFold; /* True to fold to lower-case */
229203
+};
229204
+
229205
+/*
229206
+** Free a trigram tokenizer.
229207
+*/
229208
+static void fts5TriDelete(Fts5Tokenizer *p){
229209
+ sqlite3_free(p);
229210
+}
229211
+
229212
+/*
229213
+** Allocate a trigram tokenizer.
229214
+*/
229215
+static int fts5TriCreate(
229216
+ void *pCtx,
229217
+ const char **azArg,
229218
+ int nArg,
229219
+ Fts5Tokenizer **ppOut
229220
+){
229221
+ int rc = SQLITE_OK;
229222
+ TrigramTokenizer *pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew));
229223
+ if( pNew==0 ){
229224
+ rc = SQLITE_NOMEM;
229225
+ }else{
229226
+ int i;
229227
+ pNew->bFold = 1;
229228
+ for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
229229
+ const char *zArg = azArg[i+1];
229230
+ if( 0==sqlite3_stricmp(azArg[i], "case_sensitive") ){
229231
+ if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1] ){
229232
+ rc = SQLITE_ERROR;
229233
+ }else{
229234
+ pNew->bFold = (zArg[0]=='0');
229235
+ }
229236
+ }else{
229237
+ rc = SQLITE_ERROR;
229238
+ }
229239
+ }
229240
+ if( rc!=SQLITE_OK ){
229241
+ fts5TriDelete((Fts5Tokenizer*)pNew);
229242
+ pNew = 0;
229243
+ }
229244
+ }
229245
+ *ppOut = (Fts5Tokenizer*)pNew;
229246
+ return rc;
229247
+}
229248
+
229249
+/*
229250
+** Trigram tokenizer tokenize routine.
229251
+*/
229252
+static int fts5TriTokenize(
229253
+ Fts5Tokenizer *pTok,
229254
+ void *pCtx,
229255
+ int flags,
229256
+ const char *pText, int nText,
229257
+ int (*xToken)(void*, int, const char*, int, int, int)
229258
+){
229259
+ TrigramTokenizer *p = (TrigramTokenizer*)pTok;
229260
+ int rc = SQLITE_OK;
229261
+ char aBuf[32];
229262
+ const unsigned char *zIn = (const unsigned char*)pText;
229263
+ const unsigned char *zEof = &zIn[nText];
229264
+ u32 iCode;
229265
+
229266
+ while( 1 ){
229267
+ char *zOut = aBuf;
229268
+ int iStart = zIn - (const unsigned char*)pText;
229269
+ const unsigned char *zNext;
229270
+
229271
+ READ_UTF8(zIn, zEof, iCode);
229272
+ if( iCode==0 ) break;
229273
+ zNext = zIn;
229274
+ if( zIn<zEof ){
229275
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
229276
+ WRITE_UTF8(zOut, iCode);
229277
+ READ_UTF8(zIn, zEof, iCode);
229278
+ if( iCode==0 ) break;
229279
+ }else{
229280
+ break;
229281
+ }
229282
+ if( zIn<zEof ){
229283
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
229284
+ WRITE_UTF8(zOut, iCode);
229285
+ READ_UTF8(zIn, zEof, iCode);
229286
+ if( iCode==0 ) break;
229287
+ if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
229288
+ WRITE_UTF8(zOut, iCode);
229289
+ }else{
229290
+ break;
229291
+ }
229292
+ rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf);
229293
+ if( rc!=SQLITE_OK ) break;
229294
+ zIn = zNext;
229295
+ }
229296
+
229297
+ return rc;
229298
+}
229299
+
229300
+/*
229301
+** Argument xCreate is a pointer to a constructor function for a tokenizer.
229302
+** pTok is a tokenizer previously created using the same method. This function
229303
+** returns one of FTS5_PATTERN_NONE, FTS5_PATTERN_LIKE or FTS5_PATTERN_GLOB
229304
+** indicating the style of pattern matching that the tokenizer can support.
229305
+** In practice, this is:
229306
+**
229307
+** "trigram" tokenizer, case_sensitive=1 - FTS5_PATTERN_GLOB
229308
+** "trigram" tokenizer, case_sensitive=0 (the default) - FTS5_PATTERN_LIKE
229309
+** all other tokenizers - FTS5_PATTERN_NONE
229310
+*/
229311
+static int sqlite3Fts5TokenizerPattern(
229312
+ int (*xCreate)(void*, const char**, int, Fts5Tokenizer**),
229313
+ Fts5Tokenizer *pTok
229314
+){
229315
+ if( xCreate==fts5TriCreate ){
229316
+ TrigramTokenizer *p = (TrigramTokenizer*)pTok;
229317
+ return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB;
229318
+ }
229319
+ return FTS5_PATTERN_NONE;
229320
+}
228902229321
228903229322
/*
228904229323
** Register all built-in tokenizers with FTS5.
228905229324
*/
228906229325
static int sqlite3Fts5TokenizerInit(fts5_api *pApi){
@@ -228909,10 +229328,11 @@
228909229328
fts5_tokenizer x;
228910229329
} aBuiltin[] = {
228911229330
{ "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}},
228912229331
{ "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }},
228913229332
{ "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }},
229333
+ { "trigram", {fts5TriCreate, fts5TriDelete, fts5TriTokenize}},
228914229334
};
228915229335
228916229336
int rc = SQLITE_OK; /* Return code */
228917229337
int i; /* To iterate through builtin functions */
228918229338
@@ -231140,12 +231560,12 @@
231140231560
}
231141231561
#endif /* SQLITE_CORE */
231142231562
#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */
231143231563
231144231564
/************** End of stmt.c ************************************************/
231145
-#if __LINE__!=231145
231565
+#if __LINE__!=231565
231146231566
#undef SQLITE_SOURCE_ID
231147
-#define SQLITE_SOURCE_ID "2020-09-30 18:06:51 4a43430fd23f88352c33b29c4c105b72f6dc821f94bf362040c41a1648c4alt2"
231567
+#define SQLITE_SOURCE_ID "2020-10-19 20:49:54 75a0288871ccb2a69a636cbb328fe19045a0d0ef96a193ecd118b9a19678alt2"
231148231568
#endif
231149231569
/* Return the source-id for this library */
231150231570
SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; }
231151231571
/************************** End of sqlite3.c ******************************/
231152231572
--- src/sqlite3.c
+++ src/sqlite3.c
@@ -1171,11 +1171,11 @@
1171 ** [sqlite3_libversion_number()], [sqlite3_sourceid()],
1172 ** [sqlite_version()] and [sqlite_source_id()].
1173 */
1174 #define SQLITE_VERSION "3.34.0"
1175 #define SQLITE_VERSION_NUMBER 3034000
1176 #define SQLITE_SOURCE_ID "2020-09-30 18:06:51 4a43430fd23f88352c33b29c4c105b72f6dc821f94bf362040c41a1648c402e5"
1177
1178 /*
1179 ** CAPI3REF: Run-Time Library Version Numbers
1180 ** KEYWORDS: sqlite3_version sqlite3_sourceid
1181 **
@@ -10290,22 +10290,29 @@
10290
10291 /*
10292 ** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE
10293 **
10294 ** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn]
10295 ** method of a [virtual table], then it returns true if and only if the
10296 ** column is being fetched as part of an UPDATE operation during which the
10297 ** column value will not change. Applications might use this to substitute
10298 ** a return value that is less expensive to compute and that the corresponding
 
10299 ** [xUpdate] method understands as a "no-change" value.
10300 **
10301 ** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that
10302 ** the column is not changed by the UPDATE statement, then the xColumn
10303 ** method can optionally return without setting a result, without calling
10304 ** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces].
10305 ** In that case, [sqlite3_value_nochange(X)] will return true for the
10306 ** same column in the [xUpdate] method.
 
 
 
 
 
 
10307 */
10308 SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
10309
10310 /*
10311 ** CAPI3REF: Determine The Collation For a Virtual Table Constraint
@@ -46915,11 +46922,15 @@
46915 }else{
46916 /* Opens a file, only if it exists. */
46917 dwCreationDisposition = OPEN_EXISTING;
46918 }
46919
46920 dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE;
 
 
 
 
46921
46922 if( isDelete ){
46923 #if SQLITE_OS_WINCE
46924 dwFlagsAndAttributes = FILE_ATTRIBUTE_HIDDEN;
46925 isTemp = 1;
@@ -48068,15 +48079,18 @@
48068
48069 /*
48070 ** Close an memdb-file.
48071 **
48072 ** The pData pointer is owned by the application, so there is nothing
48073 ** to free.
 
48074 */
48075 static int memdbClose(sqlite3_file *pFile){
48076 MemFile *p = (MemFile *)pFile;
48077 if( p->mFlags & SQLITE_DESERIALIZE_FREEONCLOSE ) sqlite3_free(p->aData);
 
 
48078 return SQLITE_OK;
48079 }
48080
48081 /*
48082 ** Read data from an memdb-file.
@@ -48531,10 +48545,11 @@
48531 p = memdbFromDbSchema(db, zSchema);
48532 if( p==0 ){
48533 rc = SQLITE_ERROR;
48534 }else{
48535 p->aData = pData;
 
48536 p->sz = szDb;
48537 p->szAlloc = szBuf;
48538 p->szMax = szBuf;
48539 if( p->szMax<sqlite3GlobalConfig.mxMemdbSize ){
48540 p->szMax = sqlite3GlobalConfig.mxMemdbSize;
@@ -48543,10 +48558,13 @@
48543 rc = SQLITE_OK;
48544 }
48545
48546 end_deserialize:
48547 sqlite3_finalize(pStmt);
 
 
 
48548 sqlite3_mutex_leave(db->mutex);
48549 return rc;
48550 }
48551
48552 /*
@@ -88945,11 +88963,12 @@
88945 **
88946 ** Begin a transaction on database P1 if a transaction is not already
88947 ** active.
88948 ** If P2 is non-zero, then a write-transaction is started, or if a
88949 ** read-transaction is already active, it is upgraded to a write-transaction.
88950 ** If P2 is zero, then a read-transaction is started.
 
88951 **
88952 ** P1 is the index of the database file on which the transaction is
88953 ** started. Index 0 is the main database file and index 1 is the
88954 ** file used for temporary tables. Indices of 2 or more are used for
88955 ** attached databases.
@@ -88979,10 +88998,11 @@
88979 Btree *pBt;
88980 int iMeta = 0;
88981
88982 assert( p->bIsReader );
88983 assert( p->readOnly==0 || pOp->p2==0 );
 
88984 assert( pOp->p1>=0 && pOp->p1<db->nDb );
88985 assert( DbMaskTest(p->btreeMask, pOp->p1) );
88986 if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){
88987 rc = SQLITE_READONLY;
88988 goto abort_due_to_error;
@@ -89842,22 +89862,22 @@
89842 }
89843 break;
89844 }
89845
89846
89847 /* Opcode: SeekScan P1 * * * *
89848 ** Synopsis: Scan-ahead up to P1 rows
89849 **
89850 ** This opcode is a prefix opcode to OP_SeekGE. In other words, this
89851 ** opcode must be immediately followed by OP_SeekGE. Furthermore, the
89852 ** OP_SeekGE must be followed by OP_IdxGT. These constraints are
89853 ** checked by assert() statements.
89854 **
89855 ** This opcode uses the P1 through P4 operands of the subsequent
89856 ** OP_SeekGE. In the text that follows, the operands of the subsequent
89857 ** OP_SeekGE opcode are denoted as SeekOP.P1 through SeekOP.P4. Only
89858 ** the P1 operand of this opcode is used, and it is denoted as This.P1.
 
89859 **
89860 ** This opcode helps to optimize IN operators on a multi-column index
89861 ** where the IN operator is on the later terms of the index by avoiding
89862 ** unnecessary seeks on the btree, substituting steps to the next row
89863 ** of the b-tree instead. A correct answer is obtained if this opcode
@@ -89871,39 +89891,43 @@
89871 ** then this opcode is a no-op and control passes through into the OP_SeekGE.
89872 **
89873 ** If the SeekGE.P1 cursor is pointing to a valid row, then that row
89874 ** might be the target row, or it might be near and slightly before the
89875 ** target row. This opcode attempts to position the cursor on the target
89876 ** row by, perhaps stepping by invoking sqlite3BtreeStep() on the cursor
89877 ** between 0 and This.P1 times.
89878 **
89879 ** There are three possible outcomes from this opcode:<ol>
89880 **
89881 ** <li> If after This.P1 steps, the cursor is still point to a place that
89882 ** is earlier in the btree than the target row,
89883 ** then fall through into the subsquence OP_SeekGE opcode.
89884 **
89885 ** <li> If the cursor is successfully moved to the target row by 0 or more
89886 ** sqlite3BtreeNext() calls, then jump to the first instruction after the
89887 ** OP_IdxGT opcode - or in other words, skip the next two opcodes.
89888 **
89889 ** <li> If the cursor ends up past the target row (indicating the the target
89890 ** row does not exist in the btree) then jump to SeekOP.P2.
89891 ** </ol>
89892 */
89893 case OP_SeekScan: {
89894 VdbeCursor *pC;
89895 int res;
89896 int n;
89897 UnpackedRecord r;
89898
89899 assert( pOp[1].opcode==OP_SeekGE );
89900 assert( pOp[2].opcode==OP_IdxGT );
89901 assert( pOp[1].p1==pOp[2].p1 );
89902 assert( pOp[1].p2==pOp[2].p2 );
89903 assert( pOp[1].p3==pOp[2].p3 );
89904 assert( pOp[1].p4.i==pOp[2].p4.i );
 
 
 
 
89905 assert( pOp->p1>0 );
89906 pC = p->apCsr[pOp[1].p1];
89907 assert( pC!=0 );
89908 assert( pC->eCurType==CURTYPE_BTREE );
89909 assert( !pC->isTable );
@@ -89913,12 +89937,12 @@
89913 printf("... cursor not valid - fall through\n");
89914 }
89915 #endif
89916 break;
89917 }
89918 n = pOp->p1;
89919 assert( n>=1 );
89920 r.pKeyInfo = pC->pKeyInfo;
89921 r.nField = (u16)pOp[1].p4.i;
89922 r.default_rc = 0;
89923 r.aMem = &aMem[pOp[1].p3];
89924 #ifdef SQLITE_DEBUG
@@ -89936,37 +89960,37 @@
89936 if( rc ) goto abort_due_to_error;
89937 if( res>0 ){
89938 seekscan_search_fail:
89939 #ifdef SQLITE_DEBUG
89940 if( db->flags&SQLITE_VdbeTrace ){
89941 printf("... %d steps and then skip\n", pOp->p1 - n);
89942 }
89943 #endif
89944 VdbeBranchTaken(1,3);
89945 pOp++;
89946 goto jump_to_p2;
89947 }
89948 if( res==0 ){
89949 #ifdef SQLITE_DEBUG
89950 if( db->flags&SQLITE_VdbeTrace ){
89951 printf("... %d steps and then success\n", pOp->p1 - n);
89952 }
89953 #endif
89954 VdbeBranchTaken(2,3);
89955 pOp += 2;
89956 break;
89957 }
89958 if( n<=0 ){
89959 #ifdef SQLITE_DEBUG
89960 if( db->flags&SQLITE_VdbeTrace ){
89961 printf("... fall through after %d steps\n", pOp->p1);
89962 }
89963 #endif
89964 VdbeBranchTaken(0,3);
89965 break;
89966 }
89967 n--;
89968 rc = sqlite3BtreeNext(pC->uc.pCursor, 0);
89969 if( rc ){
89970 if( rc==SQLITE_DONE ){
89971 rc = SQLITE_OK;
89972 goto seekscan_search_fail;
@@ -100066,11 +100090,13 @@
100066 ** SELECT * FROM t1 WHERE (select a from t1);
100067 */
100068 SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){
100069 int op;
100070 while( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){
100071 assert( pExpr->op==TK_COLLATE || pExpr->op==TK_IF_NULL_ROW );
 
 
100072 pExpr = pExpr->pLeft;
100073 assert( pExpr!=0 );
100074 }
100075 op = pExpr->op;
100076 if( op==TK_SELECT ){
@@ -112360,11 +112386,11 @@
112360 Table *p;
112361 int i;
112362 char *zColl; /* Dequoted name of collation sequence */
112363 sqlite3 *db;
112364
112365 if( (p = pParse->pNewTable)==0 ) return;
112366 i = p->nCol-1;
112367 db = pParse->db;
112368 zColl = sqlite3NameFromToken(db, pToken);
112369 if( !zColl ) return;
112370
@@ -115164,11 +115190,11 @@
115164 int i;
115165 struct SrcList_item *pItem;
115166 assert(pList || pParse->db->mallocFailed );
115167 if( pList ){
115168 for(i=0, pItem=pList->a; i<pList->nSrc; i++, pItem++){
115169 if( pItem->iCursor>=0 ) break;
115170 pItem->iCursor = pParse->nTab++;
115171 if( pItem->pSelect ){
115172 sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc);
115173 }
115174 }
@@ -115361,11 +115387,20 @@
115361 }
115362 v = sqlite3GetVdbe(pParse);
115363 if( !v ) return;
115364 if( type!=TK_DEFERRED ){
115365 for(i=0; i<db->nDb; i++){
115366 sqlite3VdbeAddOp2(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1);
 
 
 
 
 
 
 
 
 
115367 sqlite3VdbeUsesBtree(v, i);
115368 }
115369 }
115370 sqlite3VdbeAddOp0(v, OP_AutoCommit);
115371 }
@@ -117348,14 +117383,10 @@
117348 ** opcode if it is present */
117349 sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity);
117350 }
117351 if( regOut ){
117352 sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut);
117353 if( pIdx->pTable->pSelect ){
117354 const char *zAff = sqlite3IndexAffinityStr(pParse->db, pIdx);
117355 sqlite3VdbeChangeP4(v, -1, zAff, P4_TRANSIENT);
117356 }
117357 }
117358 sqlite3ReleaseTempRange(pParse, regBase, nCol);
117359 return regBase;
117360 }
117361
@@ -131965,10 +131996,11 @@
131965 ){
131966 SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */
131967 int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */
131968 Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */
131969 Select *pSetup = p->pPrior; /* The setup query */
 
131970 int addrTop; /* Top of the loop */
131971 int addrCont, addrBreak; /* CONTINUE and BREAK addresses */
131972 int iCurrent = 0; /* The Current table */
131973 int regCurrent; /* Register holding Current table */
131974 int iQueue; /* The Queue table */
@@ -132039,12 +132071,30 @@
132039 p->selFlags |= SF_UsesEphemeral;
132040 }
132041
132042 /* Detach the ORDER BY clause from the compound SELECT */
132043 p->pOrderBy = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132044
132045 /* Store the results of the setup-query in Queue. */
 
132046 pSetup->pNext = 0;
132047 ExplainQueryPlan((pParse, 1, "SETUP"));
132048 rc = sqlite3Select(pParse, pSetup, &destQueue);
132049 pSetup->pNext = p;
132050 if( rc ) goto end_of_recursive_query;
@@ -132073,19 +132123,15 @@
132073 sqlite3VdbeResolveLabel(v, addrCont);
132074
132075 /* Execute the recursive SELECT taking the single row in Current as
132076 ** the value for the recursive-table. Store the results in the Queue.
132077 */
132078 if( p->selFlags & SF_Aggregate ){
132079 sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported");
132080 }else{
132081 p->pPrior = 0;
132082 ExplainQueryPlan((pParse, 1, "RECURSIVE STEP"));
132083 sqlite3Select(pParse, p, &destQueue);
132084 assert( p->pPrior==0 );
132085 p->pPrior = pSetup;
132086 }
132087
132088 /* Keep running the loop until the Queue is empty */
132089 sqlite3VdbeGoto(v, addrTop);
132090 sqlite3VdbeResolveLabel(v, addrBreak);
132091
@@ -132149,10 +132195,20 @@
132149 p->nSelectRow = nRow;
132150 p = p->pNext;
132151 }
132152 return rc;
132153 }
 
 
 
 
 
 
 
 
 
 
132154
132155 /*
132156 ** This routine is called to process a compound query form from
132157 ** two or more separate queries using UNION, UNION ALL, EXCEPT, or
132158 ** INTERSECT
@@ -132235,11 +132291,11 @@
132235 */
132236 assert( p->pEList && pPrior->pEList );
132237 assert( p->pEList->nExpr==pPrior->pEList->nExpr );
132238
132239 #ifndef SQLITE_OMIT_CTE
132240 if( p->selFlags & SF_Recursive ){
132241 generateWithRecursiveQuery(pParse, p, &dest);
132242 }else
132243 #endif
132244
132245 /* Compound SELECTs that have an ORDER BY clause are handled separately.
@@ -132326,10 +132382,11 @@
132326 assert( p->addrOpenEphm[0] == -1 );
132327 p->addrOpenEphm[0] = addr;
132328 findRightmost(p)->selFlags |= SF_UsesEphemeral;
132329 assert( p->pEList );
132330 }
 
132331
132332 /* Code the SELECT statements to our left
132333 */
132334 assert( !pPrior->pOrderBy );
132335 sqlite3SelectDestInit(&uniondest, priorOp, unionTab);
@@ -134419,12 +134476,14 @@
134419 if( pCte ){
134420 Table *pTab;
134421 ExprList *pEList;
134422 Select *pSel;
134423 Select *pLeft; /* Left-most SELECT statement */
 
134424 int bMayRecursive; /* True if compound joined by UNION [ALL] */
134425 With *pSavedWith; /* Initial value of pParse->pWith */
 
134426
134427 /* If pCte->zCteErr is non-NULL at this point, then this is an illegal
134428 ** recursive reference to CTE pCte. Leave an error in pParse and return
134429 ** early. If pCte->zCteErr is NULL, then this is not a recursive reference.
134430 ** In this case, proceed. */
@@ -134445,48 +134504,52 @@
134445 pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0);
134446 if( db->mallocFailed ) return SQLITE_NOMEM_BKPT;
134447 assert( pFrom->pSelect );
134448
134449 /* Check if this is a recursive CTE. */
134450 pSel = pFrom->pSelect;
134451 bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION );
134452 if( bMayRecursive ){
134453 int i;
134454 SrcList *pSrc = pFrom->pSelect->pSrc;
 
134455 for(i=0; i<pSrc->nSrc; i++){
134456 struct SrcList_item *pItem = &pSrc->a[i];
134457 if( pItem->zDatabase==0
134458 && pItem->zName!=0
134459 && 0==sqlite3StrICmp(pItem->zName, pCte->zName)
134460 ){
134461 pItem->pTab = pTab;
134462 pItem->fg.isRecursive = 1;
134463 pTab->nTabRef++;
134464 pSel->selFlags |= SF_Recursive;
 
 
 
 
 
 
 
 
 
134465 }
134466 }
134467 }
134468
134469 /* Only one recursive reference is permitted. */
134470 if( pTab->nTabRef>2 ){
134471 sqlite3ErrorMsg(
134472 pParse, "multiple references to recursive table: %s", pCte->zName
134473 );
134474 return SQLITE_ERROR;
134475 }
134476 assert( pTab->nTabRef==1 ||
134477 ((pSel->selFlags&SF_Recursive) && pTab->nTabRef==2 ));
134478
134479 pCte->zCteErr = "circular reference: %s";
134480 pSavedWith = pParse->pWith;
134481 pParse->pWith = pWith;
134482 if( bMayRecursive ){
134483 Select *pPrior = pSel->pPrior;
134484 assert( pPrior->pWith==0 );
134485 pPrior->pWith = pSel->pWith;
134486 sqlite3WalkSelect(pWalker, pPrior);
134487 pPrior->pWith = 0;
 
 
 
134488 }else{
134489 sqlite3WalkSelect(pWalker, pSel);
134490 }
134491 pParse->pWith = pWith;
134492
@@ -138158,11 +138221,11 @@
138158 pGrp = sqlite3ExprListAppend(pParse, pGrp, sqlite3ExprDup(db, pNew, 0));
138159 }
138160 #endif
138161 pList = sqlite3ExprListAppend(pParse, pList, pNew);
138162 }
138163 eDest = SRT_Upfrom;
138164 }else if( pTab->pSelect ){
138165 for(i=0; i<pTab->nCol; i++){
138166 pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
138167 }
138168 eDest = SRT_Table;
@@ -139111,16 +139174,30 @@
139111 ephemTab = pParse->nTab++;
139112 addr= sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, nArg);
139113 regArg = pParse->nMem + 1;
139114 pParse->nMem += nArg;
139115 if( pSrc->nSrc>1 ){
 
139116 Expr *pRow;
139117 ExprList *pList;
139118 if( pRowid ){
139119 pRow = sqlite3ExprDup(db, pRowid, 0);
 
 
 
 
139120 }else{
139121 pRow = sqlite3PExpr(pParse, TK_ROW, 0, 0);
 
 
 
 
 
 
 
 
 
139122 }
139123 pList = sqlite3ExprListAppend(pParse, 0, pRow);
139124
139125 for(i=0; i<pTab->nCol; i++){
139126 if( aXRef[i]>=0 ){
@@ -139130,11 +139207,11 @@
139130 }else{
139131 pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
139132 }
139133 }
139134
139135 updateFromSelect(pParse, ephemTab, 0, pList, pSrc, pWhere, 0, 0);
139136 sqlite3ExprListDelete(db, pList);
139137 eOnePass = ONEPASS_OFF;
139138 }else{
139139 regRec = ++pParse->nMem;
139140 regRowid = ++pParse->nMem;
@@ -142457,11 +142534,16 @@
142457 pIn->eEndLoopOp = OP_Noop;
142458 }
142459 pIn++;
142460 }
142461 }
142462 if( iEq>0 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0 ){
 
 
 
 
 
142463 sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq);
142464 }
142465 }else{
142466 pLevel->u.in.nIn = 0;
142467 }
@@ -143507,10 +143589,11 @@
143507 char *zEndAff = 0; /* Affinity for end of range constraint */
143508 u8 bSeekPastNull = 0; /* True to seek past initial nulls */
143509 u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */
143510 int omitTable; /* True if we use the index only */
143511 int regBignull = 0; /* big-null flag register */
 
143512
143513 pIdx = pLoop->u.btree.pIndex;
143514 iIdxCur = pLevel->iIdxCur;
143515 assert( nEq>=pLoop->nSkip );
143516
@@ -143652,22 +143735,22 @@
143652 VdbeComment((v, "NULL-scan pass ctr"));
143653 }
143654
143655 op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev];
143656 assert( op!=0 );
143657 if( (pLoop->wsFlags & WHERE_IN_SEEKSCAN)!=0 ){
143658 assert( op==OP_SeekGE );
143659 assert( regBignull==0 );
143660 /* TUNING: The OP_SeekScan opcode seeks to reduce the number
143661 ** of expensive seek operations by replacing a single seek with
143662 ** 1 or more step operations. The question is, how many steps
143663 ** should we try before giving up and going with a seek. The cost
143664 ** of a seek is proportional to the logarithm of the of the number
143665 ** of entries in the tree, so basing the number of steps to try
143666 ** on the estimated number of rows in the btree seems like a good
143667 ** guess. */
143668 sqlite3VdbeAddOp1(v, OP_SeekScan, (pIdx->aiRowLogEst[0]+9)/10);
 
143669 VdbeCoverage(v);
143670 }
143671 sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
143672 VdbeCoverage(v);
143673 VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind );
@@ -143748,10 +143831,11 @@
143748 sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
143749 testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT );
143750 testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE );
143751 testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT );
143752 testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
 
143753 }
143754 if( regBignull ){
143755 /* During a NULL-scan, check to see if we have reached the end of
143756 ** the NULLs */
143757 assert( bSeekPastNull==!bStopAtNull );
@@ -147706,11 +147790,11 @@
147706 if( pTerm->wtFlags & TERM_CODED ) zType[3] = 'C';
147707 if( pTerm->eOperator & WO_SINGLE ){
147708 sqlite3_snprintf(sizeof(zLeft),zLeft,"left={%d:%d}",
147709 pTerm->leftCursor, pTerm->u.x.leftColumn);
147710 }else if( (pTerm->eOperator & WO_OR)!=0 && pTerm->u.pOrInfo!=0 ){
147711 sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%lld",
147712 pTerm->u.pOrInfo->indexable);
147713 }else{
147714 sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor);
147715 }
147716 sqlite3DebugPrintf(
@@ -148484,11 +148568,11 @@
148484 }
148485 }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){
148486 /* "x IN (value, value, ...)" */
148487 nIn = sqlite3LogEst(pExpr->x.pList->nExpr);
148488 }
148489 if( pProbe->hasStat1 ){
148490 LogEst M, logK, safetyMargin;
148491 /* Let:
148492 ** N = the total number of rows in the table
148493 ** K = the number of entries on the RHS of the IN operator
148494 ** M = the number of rows in the table that match terms to the
@@ -148503,11 +148587,12 @@
148503 **
148504 ** Our estimates for M, K, and N might be inaccurate, so we build in
148505 ** a safety margin of 2 (LogEst: 10) that favors using the IN operator
148506 ** with the index, as using an index has better worst-case behavior.
148507 ** If we do not have real sqlite_stat1 data, always prefer to use
148508 ** the index.
 
148509 */
148510 M = pProbe->aiRowLogEst[saved_nEq];
148511 logK = estLog(nIn);
148512 safetyMargin = 10; /* TUNING: extra weight for indexed IN */
148513 if( M + logK + safetyMargin < nIn + rLogSize ){
@@ -209277,10 +209362,11 @@
209277 int eDetail; /* FTS5_DETAIL_XXX value */
209278 char *zContentExprlist;
209279 Fts5Tokenizer *pTok;
209280 fts5_tokenizer *pTokApi;
209281 int bLock; /* True when table is preparing statement */
 
209282
209283 /* Values loaded from the %_config table */
209284 int iCookie; /* Incremented when %_config is modified */
209285 int pgsz; /* Approximate page size used in %_data */
209286 int nAutomerge; /* 'automerge' setting */
@@ -209297,21 +209383,23 @@
209297 int bPrefixIndex; /* True to use prefix-indexes */
209298 #endif
209299 };
209300
209301 /* Current expected value of %_config table 'version' field */
209302 #define FTS5_CURRENT_VERSION 4
209303
209304 #define FTS5_CONTENT_NORMAL 0
209305 #define FTS5_CONTENT_NONE 1
209306 #define FTS5_CONTENT_EXTERNAL 2
209307
209308 #define FTS5_DETAIL_FULL 0
209309 #define FTS5_DETAIL_NONE 1
209310 #define FTS5_DETAIL_COLUMNS 2
209311
209312
 
 
209313
209314 static int sqlite3Fts5ConfigParse(
209315 Fts5Global*, sqlite3*, int, const char **, Fts5Config**, char**
209316 );
209317 static void sqlite3Fts5ConfigFree(Fts5Config*);
@@ -209647,12 +209735,11 @@
209647
209648 static int sqlite3Fts5GetTokenizer(
209649 Fts5Global*,
209650 const char **azArg,
209651 int nArg,
209652 Fts5Tokenizer**,
209653 fts5_tokenizer**,
209654 char **pzErr
209655 );
209656
209657 static Fts5Table *sqlite3Fts5TableFromCsrid(Fts5Global*, i64);
209658
@@ -209777,14 +209864,22 @@
209777 };
209778
209779 /* Parse a MATCH expression. */
209780 static int sqlite3Fts5ExprNew(
209781 Fts5Config *pConfig,
 
209782 int iCol, /* Column on LHS of MATCH operator */
209783 const char *zExpr,
209784 Fts5Expr **ppNew,
209785 char **pzErr
 
 
 
 
 
 
 
209786 );
209787
209788 /*
209789 ** for(rc = sqlite3Fts5ExprFirst(pExpr, pIdx, bDesc);
209790 ** rc==SQLITE_OK && 0==sqlite3Fts5ExprEof(pExpr);
@@ -209890,10 +209985,14 @@
209890 /**************************************************************************
209891 ** Interface to code in fts5_tokenizer.c.
209892 */
209893
209894 static int sqlite3Fts5TokenizerInit(fts5_api*);
 
 
 
 
209895 /*
209896 ** End of interface to code in fts5_tokenizer.c.
209897 **************************************************************************/
209898
209899 /**************************************************************************
@@ -212869,11 +212968,11 @@
212869 if( p==0 ){
212870 *pzErr = sqlite3_mprintf("parse error in tokenize directive");
212871 rc = SQLITE_ERROR;
212872 }else{
212873 rc = sqlite3Fts5GetTokenizer(pGlobal,
212874 (const char**)azArg, (int)nArg, &pConfig->pTok, &pConfig->pTokApi,
212875 pzErr
212876 );
212877 }
212878 }
212879 }
@@ -212941,13 +213040,11 @@
212941 ** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error
212942 ** code if an error occurs.
212943 */
212944 static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){
212945 assert( pConfig->pTok==0 && pConfig->pTokApi==0 );
212946 return sqlite3Fts5GetTokenizer(
212947 pGlobal, 0, 0, &pConfig->pTok, &pConfig->pTokApi, 0
212948 );
212949 }
212950
212951 /*
212952 ** Gobble up the first bareword or quoted word from the input buffer zIn.
212953 ** Return a pointer to the character immediately following the last in
@@ -213635,10 +213732,11 @@
213635 char *zErr;
213636 int rc;
213637 int nPhrase; /* Size of apPhrase array */
213638 Fts5ExprPhrase **apPhrase; /* Array of all phrases */
213639 Fts5ExprNode *pExpr; /* Result of a successful parse */
 
213640 };
213641
213642 static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){
213643 va_list ap;
213644 va_start(ap, zFmt);
@@ -213723,10 +213821,11 @@
213723 static void *fts5ParseAlloc(u64 t){ return sqlite3_malloc64((sqlite3_int64)t);}
213724 static void fts5ParseFree(void *p){ sqlite3_free(p); }
213725
213726 static int sqlite3Fts5ExprNew(
213727 Fts5Config *pConfig, /* FTS5 Configuration */
 
213728 int iCol,
213729 const char *zExpr, /* Expression text */
213730 Fts5Expr **ppNew,
213731 char **pzErr
213732 ){
@@ -213738,10 +213837,11 @@
213738 Fts5Expr *pNew;
213739
213740 *ppNew = 0;
213741 *pzErr = 0;
213742 memset(&sParse, 0, sizeof(sParse));
 
213743 pEngine = sqlite3Fts5ParserAlloc(fts5ParseAlloc);
213744 if( pEngine==0 ){ return SQLITE_NOMEM; }
213745 sParse.pConfig = pConfig;
213746
213747 do {
@@ -213780,10 +213880,11 @@
213780 }
213781 pNew->pIndex = 0;
213782 pNew->pConfig = pConfig;
213783 pNew->apExprPhrase = sParse.apPhrase;
213784 pNew->nPhrase = sParse.nPhrase;
 
213785 sParse.apPhrase = 0;
213786 }
213787 }else{
213788 sqlite3Fts5ParseNodeFree(sParse.pExpr);
213789 }
@@ -213790,10 +213891,85 @@
213790
213791 sqlite3_free(sParse.apPhrase);
213792 *pzErr = sParse.zErr;
213793 return sParse.rc;
213794 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213795
213796 /*
213797 ** Free the expression node object passed as the only argument.
213798 */
213799 static void sqlite3Fts5ParseNodeFree(Fts5ExprNode *p){
@@ -215167,10 +215343,24 @@
215167
215168 static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p){
215169 assert( pParse->pExpr==0 );
215170 pParse->pExpr = p;
215171 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215172
215173 /*
215174 ** This function is called by the parser to process a string token. The
215175 ** string may or may not be quoted. In any case it is tokenized and a
215176 ** phrase object consisting of all tokens returned.
@@ -215203,20 +215393,13 @@
215203 fts5ExprPhraseFree(sCtx.pPhrase);
215204 sCtx.pPhrase = 0;
215205 }else{
215206
215207 if( pAppend==0 ){
215208 if( (pParse->nPhrase % 8)==0 ){
215209 sqlite3_int64 nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8);
215210 Fts5ExprPhrase **apNew;
215211 apNew = (Fts5ExprPhrase**)sqlite3_realloc64(pParse->apPhrase, nByte);
215212 if( apNew==0 ){
215213 pParse->rc = SQLITE_NOMEM;
215214 fts5ExprPhraseFree(sCtx.pPhrase);
215215 return 0;
215216 }
215217 pParse->apPhrase = apNew;
215218 }
215219 pParse->nPhrase++;
215220 }
215221
215222 if( sCtx.pPhrase==0 ){
@@ -215618,10 +215801,71 @@
215618 sqlite3_free(pSub);
215619 }else{
215620 p->apChild[p->nChild++] = pSub;
215621 }
215622 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215623
215624 /*
215625 ** Allocate and return a new expression object. If anything goes wrong (i.e.
215626 ** OOM error), leave an error code in pParse and return NULL.
215627 */
@@ -215643,55 +215887,62 @@
215643 );
215644 if( eType==FTS5_STRING && pNear==0 ) return 0;
215645 if( eType!=FTS5_STRING && pLeft==0 ) return pRight;
215646 if( eType!=FTS5_STRING && pRight==0 ) return pLeft;
215647
215648 if( eType==FTS5_NOT ){
215649 nChild = 2;
215650 }else if( eType==FTS5_AND || eType==FTS5_OR ){
215651 nChild = 2;
215652 if( pLeft->eType==eType ) nChild += pLeft->nChild-1;
215653 if( pRight->eType==eType ) nChild += pRight->nChild-1;
215654 }
215655
215656 nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1);
215657 pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);
215658
215659 if( pRet ){
215660 pRet->eType = eType;
215661 pRet->pNear = pNear;
215662 fts5ExprAssignXNext(pRet);
215663 if( eType==FTS5_STRING ){
215664 int iPhrase;
215665 for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){
215666 pNear->apPhrase[iPhrase]->pNode = pRet;
215667 if( pNear->apPhrase[iPhrase]->nTerm==0 ){
215668 pRet->xNext = 0;
215669 pRet->eType = FTS5_EOF;
215670 }
215671 }
215672
215673 if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL ){
215674 Fts5ExprPhrase *pPhrase = pNear->apPhrase[0];
215675 if( pNear->nPhrase!=1
215676 || pPhrase->nTerm>1
215677 || (pPhrase->nTerm>0 && pPhrase->aTerm[0].bFirst)
215678 ){
215679 assert( pParse->rc==SQLITE_OK );
215680 pParse->rc = SQLITE_ERROR;
215681 assert( pParse->zErr==0 );
215682 pParse->zErr = sqlite3_mprintf(
215683 "fts5: %s queries are not supported (detail!=full)",
215684 pNear->nPhrase==1 ? "phrase": "NEAR"
215685 );
215686 sqlite3_free(pRet);
215687 pRet = 0;
215688 }
215689 }
215690 }else{
215691 fts5ExprAddChildren(pRet, pLeft);
215692 fts5ExprAddChildren(pRet, pRight);
 
 
 
 
 
 
 
215693 }
215694 }
215695 }
215696
215697 if( pRet==0 ){
@@ -216041,11 +216292,11 @@
216041 zExpr = (const char*)sqlite3_value_text(apVal[0]);
216042 if( zExpr==0 ) zExpr = "";
216043
216044 rc = sqlite3Fts5ConfigParse(pGlobal, db, nConfig, azConfig, &pConfig, &zErr);
216045 if( rc==SQLITE_OK ){
216046 rc = sqlite3Fts5ExprNew(pConfig, pConfig->nCol, zExpr, &pExpr, &zErr);
216047 }
216048 if( rc==SQLITE_OK ){
216049 char *zText;
216050 if( pExpr->pRoot->xNext==0 ){
216051 zText = sqlite3_mprintf("");
@@ -216746,12 +216997,13 @@
216746 pPtr = (u8*)p;
216747
216748 /* If this is a new rowid, append the 4-byte size field for the previous
216749 ** entry, and the new rowid for this entry. */
216750 if( iRowid!=p->iRowid ){
 
216751 fts5HashAddPoslistSize(pHash, p, 0);
216752 p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iRowid - p->iRowid);
216753 p->iRowid = iRowid;
216754 bNew = 1;
216755 p->iSzPoslist = p->nData;
216756 if( pHash->eDetail!=FTS5_DETAIL_NONE ){
216757 p->nData += 1;
@@ -218722,11 +218974,11 @@
218722 n = pIter->iEndofDoclist;
218723 }
218724
218725 ASSERT_SZLEAF_OK(pIter->pLeaf);
218726 while( 1 ){
218727 i64 iDelta = 0;
218728
218729 if( eDetail==FTS5_DETAIL_NONE ){
218730 /* todo */
218731 if( i<n && a[i]==0 ){
218732 i++;
@@ -218737,11 +218989,11 @@
218737 int bDummy;
218738 i += fts5GetPoslistSize(&a[i], &nPos, &bDummy);
218739 i += nPos;
218740 }
218741 if( i>=n ) break;
218742 i += fts5GetVarint(&a[i], (u64*)&iDelta);
218743 pIter->iRowid += iDelta;
218744
218745 /* If necessary, grow the pIter->aRowidOffset[] array. */
218746 if( iRowidOffset>=pIter->nRowidOffset ){
218747 int nNew = pIter->nRowidOffset + 8;
@@ -218836,20 +219088,20 @@
218836 UNUSED_PARAM(pbUnused);
218837
218838 if( pIter->iRowidOffset>0 ){
218839 u8 *a = pIter->pLeaf->p;
218840 int iOff;
218841 i64 iDelta;
218842
218843 pIter->iRowidOffset--;
218844 pIter->iLeafOffset = pIter->aRowidOffset[pIter->iRowidOffset];
218845 fts5SegIterLoadNPos(p, pIter);
218846 iOff = pIter->iLeafOffset;
218847 if( p->pConfig->eDetail!=FTS5_DETAIL_NONE ){
218848 iOff += pIter->nPos;
218849 }
218850 fts5GetVarint(&a[iOff], (u64*)&iDelta);
218851 pIter->iRowid -= iDelta;
218852 }else{
218853 fts5SegIterReverseNewPage(p, pIter);
218854 }
218855 }
@@ -224064,10 +224316,27 @@
224064 {
224065 pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_UNIQUE;
224066 }
224067 #endif
224068 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224069
224070 /*
224071 ** Implementation of the xBestIndex method for FTS5 tables. Within the
224072 ** WHERE constraint, it searches for the following:
224073 **
@@ -224094,11 +224363,13 @@
224094 ** idxStr is used to encode data from the WHERE clause. For each argument
224095 ** passed to the xFilter method, the following is appended to idxStr:
224096 **
224097 ** Match against table column: "m"
224098 ** Match against rank column: "r"
224099 ** Match against other column: "<column-number>"
 
 
224100 ** Equality constraint against the rowid: "="
224101 ** A < or <= against the rowid: "<"
224102 ** A > or >= against the rowid: ">"
224103 **
224104 ** This function ensures that there is at most one "r" or "=". And that if
@@ -224155,11 +224426,11 @@
224155 "recursively defined fts5 content table"
224156 );
224157 return SQLITE_ERROR;
224158 }
224159
224160 idxStr = (char*)sqlite3_malloc(pInfo->nConstraint * 6 + 1);
224161 if( idxStr==0 ) return SQLITE_NOMEM;
224162 pInfo->idxStr = idxStr;
224163 pInfo->needToFreeIdxStr = 1;
224164
224165 for(i=0; i<pInfo->nConstraint; i++){
@@ -224179,29 +224450,33 @@
224179 }else{
224180 if( iCol==nCol+1 ){
224181 if( bSeenRank ) continue;
224182 idxStr[iIdxStr++] = 'r';
224183 bSeenRank = 1;
224184 }else{
224185 bSeenMatch = 1;
224186 idxStr[iIdxStr++] = 'm';
224187 if( iCol<nCol ){
224188 sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol);
224189 idxStr += strlen(&idxStr[iIdxStr]);
224190 assert( idxStr[iIdxStr]=='\0' );
224191 }
224192 }
224193 pInfo->aConstraintUsage[i].argvIndex = ++iCons;
224194 pInfo->aConstraintUsage[i].omit = 1;
224195 }
224196 }
224197 else if( p->usable && bSeenEq==0
224198 && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0
224199 ){
224200 idxStr[iIdxStr++] = '=';
224201 bSeenEq = 1;
224202 pInfo->aConstraintUsage[i].argvIndex = ++iCons;
 
 
 
 
 
 
224203 }
224204 }
224205
224206 if( bSeenEq==0 ){
224207 for(i=0; i<pInfo->nConstraint; i++){
@@ -224830,41 +225105,55 @@
224830 for(i=0; i<nVal; i++){
224831 switch( idxStr[iIdxStr++] ){
224832 case 'r':
224833 pRank = apVal[i];
224834 break;
224835 case 'm': {
224836 const char *zText = (const char*)sqlite3_value_text(apVal[i]);
224837 if( zText==0 ) zText = "";
224838
224839 if( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' ){
224840 iCol = 0;
224841 do{
224842 iCol = iCol*10 + (idxStr[iIdxStr]-'0');
224843 iIdxStr++;
224844 }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' );
224845 }else{
224846 iCol = pConfig->nCol;
224847 }
224848
224849 if( zText[0]=='*' ){
224850 /* The user has issued a query of the form "MATCH '*...'". This
224851 ** indicates that the MATCH expression is not a full text query,
224852 ** but a request for an internal parameter. */
224853 rc = fts5SpecialMatch(pTab, pCsr, &zText[1]);
224854 goto filter_out;
224855 }else{
224856 char **pzErr = &pTab->p.base.zErrMsg;
224857 rc = sqlite3Fts5ExprNew(pConfig, iCol, zText, &pExpr, pzErr);
224858 if( rc==SQLITE_OK ){
224859 rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr);
224860 pExpr = 0;
224861 }
224862 if( rc!=SQLITE_OK ) goto filter_out;
224863 }
224864
224865 break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224866 }
224867 case '=':
224868 pRowidEq = apVal[i];
224869 break;
224870 case '<':
@@ -226273,12 +226562,11 @@
226273
226274 static int sqlite3Fts5GetTokenizer(
226275 Fts5Global *pGlobal,
226276 const char **azArg,
226277 int nArg,
226278 Fts5Tokenizer **ppTok,
226279 fts5_tokenizer **ppTokApi,
226280 char **pzErr
226281 ){
226282 Fts5TokenizerModule *pMod;
226283 int rc = SQLITE_OK;
226284
@@ -226286,20 +226574,26 @@
226286 if( pMod==0 ){
226287 assert( nArg>0 );
226288 rc = SQLITE_ERROR;
226289 *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]);
226290 }else{
226291 rc = pMod->x.xCreate(pMod->pUserData, &azArg[1], (nArg?nArg-1:0), ppTok);
226292 *ppTokApi = &pMod->x;
226293 if( rc!=SQLITE_OK && pzErr ){
226294 *pzErr = sqlite3_mprintf("error in tokenizer constructor");
 
 
 
 
 
 
226295 }
226296 }
226297
226298 if( rc!=SQLITE_OK ){
226299 *ppTokApi = 0;
226300 *ppTok = 0;
226301 }
226302
226303 return rc;
226304 }
226305
@@ -226344,11 +226638,11 @@
226344 int nArg, /* Number of args */
226345 sqlite3_value **apUnused /* Function arguments */
226346 ){
226347 assert( nArg==0 );
226348 UNUSED_PARAM2(nArg, apUnused);
226349 sqlite3_result_text(pCtx, "fts5: 2020-09-30 18:06:51 4a43430fd23f88352c33b29c4c105b72f6dc821f94bf362040c41a1648c402e5", -1, SQLITE_TRANSIENT);
226350 }
226351
226352 /*
226353 ** Return true if zName is the extension on one of the shadow tables used
226354 ** by this module.
@@ -228897,10 +229191,135 @@
228897 sCtx.aBuf = p->aBuf;
228898 return p->tokenizer.xTokenize(
228899 p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb
228900 );
228901 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228902
228903 /*
228904 ** Register all built-in tokenizers with FTS5.
228905 */
228906 static int sqlite3Fts5TokenizerInit(fts5_api *pApi){
@@ -228909,10 +229328,11 @@
228909 fts5_tokenizer x;
228910 } aBuiltin[] = {
228911 { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}},
228912 { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }},
228913 { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }},
 
228914 };
228915
228916 int rc = SQLITE_OK; /* Return code */
228917 int i; /* To iterate through builtin functions */
228918
@@ -231140,12 +231560,12 @@
231140 }
231141 #endif /* SQLITE_CORE */
231142 #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */
231143
231144 /************** End of stmt.c ************************************************/
231145 #if __LINE__!=231145
231146 #undef SQLITE_SOURCE_ID
231147 #define SQLITE_SOURCE_ID "2020-09-30 18:06:51 4a43430fd23f88352c33b29c4c105b72f6dc821f94bf362040c41a1648c4alt2"
231148 #endif
231149 /* Return the source-id for this library */
231150 SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; }
231151 /************************** End of sqlite3.c ******************************/
231152
--- src/sqlite3.c
+++ src/sqlite3.c
@@ -1171,11 +1171,11 @@
1171 ** [sqlite3_libversion_number()], [sqlite3_sourceid()],
1172 ** [sqlite_version()] and [sqlite_source_id()].
1173 */
1174 #define SQLITE_VERSION "3.34.0"
1175 #define SQLITE_VERSION_NUMBER 3034000
1176 #define SQLITE_SOURCE_ID "2020-10-19 20:49:54 75a0288871ccb2a69a636cbb328fe19045a0d0ef96a193ecd118b9a196784d2d"
1177
1178 /*
1179 ** CAPI3REF: Run-Time Library Version Numbers
1180 ** KEYWORDS: sqlite3_version sqlite3_sourceid
1181 **
@@ -10290,22 +10290,29 @@
10290
10291 /*
10292 ** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE
10293 **
10294 ** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn]
10295 ** method of a [virtual table], then it might return true if the
10296 ** column is being fetched as part of an UPDATE operation during which the
10297 ** column value will not change. The virtual table implementation can use
10298 ** this hint as permission to substitute a return value that is less
10299 ** expensive to compute and that the corresponding
10300 ** [xUpdate] method understands as a "no-change" value.
10301 **
10302 ** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that
10303 ** the column is not changed by the UPDATE statement, then the xColumn
10304 ** method can optionally return without setting a result, without calling
10305 ** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces].
10306 ** In that case, [sqlite3_value_nochange(X)] will return true for the
10307 ** same column in the [xUpdate] method.
10308 **
10309 ** The sqlite3_vtab_nochange() routine is an optimization. Virtual table
10310 ** implementations should continue to give a correct answer even if the
10311 ** sqlite3_vtab_nochange() interface were to always return false. In the
10312 ** current implementation, the sqlite3_vtab_nochange() interface does always
10313 ** returns false for the enhanced [UPDATE FROM] statement.
10314 */
10315 SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
10316
10317 /*
10318 ** CAPI3REF: Determine The Collation For a Virtual Table Constraint
@@ -46915,11 +46922,15 @@
46922 }else{
46923 /* Opens a file, only if it exists. */
46924 dwCreationDisposition = OPEN_EXISTING;
46925 }
46926
46927 if( 0==sqlite3_uri_boolean(zName, "exclusive", 0) ){
46928 dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE;
46929 }else{
46930 dwShareMode = 0;
46931 }
46932
46933 if( isDelete ){
46934 #if SQLITE_OS_WINCE
46935 dwFlagsAndAttributes = FILE_ATTRIBUTE_HIDDEN;
46936 isTemp = 1;
@@ -48068,15 +48079,18 @@
48079
48080 /*
48081 ** Close an memdb-file.
48082 **
48083 ** The pData pointer is owned by the application, so there is nothing
48084 ** to free. Unless the SQLITE_DESERIALIZE_FREEONCLOSE flag is set,
48085 ** in which case we own the pData pointer and need to free it.
48086 */
48087 static int memdbClose(sqlite3_file *pFile){
48088 MemFile *p = (MemFile *)pFile;
48089 if( p->mFlags & SQLITE_DESERIALIZE_FREEONCLOSE ){
48090 sqlite3_free(p->aData);
48091 }
48092 return SQLITE_OK;
48093 }
48094
48095 /*
48096 ** Read data from an memdb-file.
@@ -48531,10 +48545,11 @@
48545 p = memdbFromDbSchema(db, zSchema);
48546 if( p==0 ){
48547 rc = SQLITE_ERROR;
48548 }else{
48549 p->aData = pData;
48550 pData = 0;
48551 p->sz = szDb;
48552 p->szAlloc = szBuf;
48553 p->szMax = szBuf;
48554 if( p->szMax<sqlite3GlobalConfig.mxMemdbSize ){
48555 p->szMax = sqlite3GlobalConfig.mxMemdbSize;
@@ -48543,10 +48558,13 @@
48558 rc = SQLITE_OK;
48559 }
48560
48561 end_deserialize:
48562 sqlite3_finalize(pStmt);
48563 if( pData && (mFlags & SQLITE_DESERIALIZE_FREEONCLOSE)!=0 ){
48564 sqlite3_free(pData);
48565 }
48566 sqlite3_mutex_leave(db->mutex);
48567 return rc;
48568 }
48569
48570 /*
@@ -88945,11 +88963,12 @@
88963 **
88964 ** Begin a transaction on database P1 if a transaction is not already
88965 ** active.
88966 ** If P2 is non-zero, then a write-transaction is started, or if a
88967 ** read-transaction is already active, it is upgraded to a write-transaction.
88968 ** If P2 is zero, then a read-transaction is started. If P2 is 2 or more
88969 ** then an exclusive transaction is started.
88970 **
88971 ** P1 is the index of the database file on which the transaction is
88972 ** started. Index 0 is the main database file and index 1 is the
88973 ** file used for temporary tables. Indices of 2 or more are used for
88974 ** attached databases.
@@ -88979,10 +88998,11 @@
88998 Btree *pBt;
88999 int iMeta = 0;
89000
89001 assert( p->bIsReader );
89002 assert( p->readOnly==0 || pOp->p2==0 );
89003 assert( pOp->p2>=0 && pOp->p2<=2 );
89004 assert( pOp->p1>=0 && pOp->p1<db->nDb );
89005 assert( DbMaskTest(p->btreeMask, pOp->p1) );
89006 if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){
89007 rc = SQLITE_READONLY;
89008 goto abort_due_to_error;
@@ -89842,22 +89862,22 @@
89862 }
89863 break;
89864 }
89865
89866
89867 /* Opcode: SeekScan P1 P2 * * *
89868 ** Synopsis: Scan-ahead up to P1 rows
89869 **
89870 ** This opcode is a prefix opcode to OP_SeekGE. In other words, this
89871 ** opcode must be immediately followed by OP_SeekGE. This constraint is
 
89872 ** checked by assert() statements.
89873 **
89874 ** This opcode uses the P1 through P4 operands of the subsequent
89875 ** OP_SeekGE. In the text that follows, the operands of the subsequent
89876 ** OP_SeekGE opcode are denoted as SeekOP.P1 through SeekOP.P4. Only
89877 ** the P1 and P2 operands of this opcode are also used, and are called
89878 ** This.P1 and This.P2.
89879 **
89880 ** This opcode helps to optimize IN operators on a multi-column index
89881 ** where the IN operator is on the later terms of the index by avoiding
89882 ** unnecessary seeks on the btree, substituting steps to the next row
89883 ** of the b-tree instead. A correct answer is obtained if this opcode
@@ -89871,39 +89891,43 @@
89891 ** then this opcode is a no-op and control passes through into the OP_SeekGE.
89892 **
89893 ** If the SeekGE.P1 cursor is pointing to a valid row, then that row
89894 ** might be the target row, or it might be near and slightly before the
89895 ** target row. This opcode attempts to position the cursor on the target
89896 ** row by, perhaps by invoking sqlite3BtreeStep() on the cursor
89897 ** between 0 and This.P1 times.
89898 **
89899 ** There are three possible outcomes from this opcode:<ol>
89900 **
89901 ** <li> If after This.P1 steps, the cursor is still point to a place that
89902 ** is earlier in the btree than the target row,
89903 ** then fall through into the subsquence OP_SeekGE opcode.
89904 **
89905 ** <li> If the cursor is successfully moved to the target row by 0 or more
89906 ** sqlite3BtreeNext() calls, then jump to This.P2, which will land just
89907 ** past the OP_IdxGT opcode that follows the OP_SeekGE.
89908 **
89909 ** <li> If the cursor ends up past the target row (indicating the the target
89910 ** row does not exist in the btree) then jump to SeekOP.P2.
89911 ** </ol>
89912 */
89913 case OP_SeekScan: {
89914 VdbeCursor *pC;
89915 int res;
89916 int nStep;
89917 UnpackedRecord r;
89918
89919 assert( pOp[1].opcode==OP_SeekGE );
89920
89921 /* pOp->p2 points to the first instruction past the OP_IdxGT that
89922 ** follows the OP_SeekGE. */
89923 assert( pOp->p2>=(int)(pOp-aOp)+2 );
89924 assert( aOp[pOp->p2-1].opcode==OP_IdxGT );
89925 assert( pOp[1].p1==aOp[pOp->p2-1].p1 );
89926 assert( pOp[1].p2==aOp[pOp->p2-1].p2 );
89927 assert( pOp[1].p3==aOp[pOp->p2-1].p3 );
89928
89929 assert( pOp->p1>0 );
89930 pC = p->apCsr[pOp[1].p1];
89931 assert( pC!=0 );
89932 assert( pC->eCurType==CURTYPE_BTREE );
89933 assert( !pC->isTable );
@@ -89913,12 +89937,12 @@
89937 printf("... cursor not valid - fall through\n");
89938 }
89939 #endif
89940 break;
89941 }
89942 nStep = pOp->p1;
89943 assert( nStep>=1 );
89944 r.pKeyInfo = pC->pKeyInfo;
89945 r.nField = (u16)pOp[1].p4.i;
89946 r.default_rc = 0;
89947 r.aMem = &aMem[pOp[1].p3];
89948 #ifdef SQLITE_DEBUG
@@ -89936,37 +89960,37 @@
89960 if( rc ) goto abort_due_to_error;
89961 if( res>0 ){
89962 seekscan_search_fail:
89963 #ifdef SQLITE_DEBUG
89964 if( db->flags&SQLITE_VdbeTrace ){
89965 printf("... %d steps and then skip\n", pOp->p1 - nStep);
89966 }
89967 #endif
89968 VdbeBranchTaken(1,3);
89969 pOp++;
89970 goto jump_to_p2;
89971 }
89972 if( res==0 ){
89973 #ifdef SQLITE_DEBUG
89974 if( db->flags&SQLITE_VdbeTrace ){
89975 printf("... %d steps and then success\n", pOp->p1 - nStep);
89976 }
89977 #endif
89978 VdbeBranchTaken(2,3);
89979 goto jump_to_p2;
89980 break;
89981 }
89982 if( nStep<=0 ){
89983 #ifdef SQLITE_DEBUG
89984 if( db->flags&SQLITE_VdbeTrace ){
89985 printf("... fall through after %d steps\n", pOp->p1);
89986 }
89987 #endif
89988 VdbeBranchTaken(0,3);
89989 break;
89990 }
89991 nStep--;
89992 rc = sqlite3BtreeNext(pC->uc.pCursor, 0);
89993 if( rc ){
89994 if( rc==SQLITE_DONE ){
89995 rc = SQLITE_OK;
89996 goto seekscan_search_fail;
@@ -100066,11 +100090,13 @@
100090 ** SELECT * FROM t1 WHERE (select a from t1);
100091 */
100092 SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){
100093 int op;
100094 while( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){
100095 assert( pExpr->op==TK_COLLATE
100096 || pExpr->op==TK_IF_NULL_ROW
100097 || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) );
100098 pExpr = pExpr->pLeft;
100099 assert( pExpr!=0 );
100100 }
100101 op = pExpr->op;
100102 if( op==TK_SELECT ){
@@ -112360,11 +112386,11 @@
112386 Table *p;
112387 int i;
112388 char *zColl; /* Dequoted name of collation sequence */
112389 sqlite3 *db;
112390
112391 if( (p = pParse->pNewTable)==0 || IN_RENAME_OBJECT ) return;
112392 i = p->nCol-1;
112393 db = pParse->db;
112394 zColl = sqlite3NameFromToken(db, pToken);
112395 if( !zColl ) return;
112396
@@ -115164,11 +115190,11 @@
115190 int i;
115191 struct SrcList_item *pItem;
115192 assert(pList || pParse->db->mallocFailed );
115193 if( pList ){
115194 for(i=0, pItem=pList->a; i<pList->nSrc; i++, pItem++){
115195 if( pItem->iCursor>=0 ) continue;
115196 pItem->iCursor = pParse->nTab++;
115197 if( pItem->pSelect ){
115198 sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc);
115199 }
115200 }
@@ -115361,11 +115387,20 @@
115387 }
115388 v = sqlite3GetVdbe(pParse);
115389 if( !v ) return;
115390 if( type!=TK_DEFERRED ){
115391 for(i=0; i<db->nDb; i++){
115392 int eTxnType;
115393 Btree *pBt = db->aDb[i].pBt;
115394 if( pBt && sqlite3BtreeIsReadonly(pBt) ){
115395 eTxnType = 0; /* Read txn */
115396 }else if( type==TK_EXCLUSIVE ){
115397 eTxnType = 2; /* Exclusive txn */
115398 }else{
115399 eTxnType = 1; /* Write txn */
115400 }
115401 sqlite3VdbeAddOp2(v, OP_Transaction, i, eTxnType);
115402 sqlite3VdbeUsesBtree(v, i);
115403 }
115404 }
115405 sqlite3VdbeAddOp0(v, OP_AutoCommit);
115406 }
@@ -117348,14 +117383,10 @@
117383 ** opcode if it is present */
117384 sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity);
117385 }
117386 if( regOut ){
117387 sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut);
 
 
 
 
117388 }
117389 sqlite3ReleaseTempRange(pParse, regBase, nCol);
117390 return regBase;
117391 }
117392
@@ -131965,10 +131996,11 @@
131996 ){
131997 SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */
131998 int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */
131999 Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */
132000 Select *pSetup = p->pPrior; /* The setup query */
132001 Select *pFirstRec; /* Left-most recursive term */
132002 int addrTop; /* Top of the loop */
132003 int addrCont, addrBreak; /* CONTINUE and BREAK addresses */
132004 int iCurrent = 0; /* The Current table */
132005 int regCurrent; /* Register holding Current table */
132006 int iQueue; /* The Queue table */
@@ -132039,12 +132071,30 @@
132071 p->selFlags |= SF_UsesEphemeral;
132072 }
132073
132074 /* Detach the ORDER BY clause from the compound SELECT */
132075 p->pOrderBy = 0;
132076
132077 /* Figure out how many elements of the compound SELECT are part of the
132078 ** recursive query. Make sure no recursive elements use aggregate
132079 ** functions. Mark the recursive elements as UNION ALL even if they
132080 ** are really UNION because the distinctness will be enforced by the
132081 ** iDistinct table. pFirstRec is left pointing to the left-most
132082 ** recursive term of the CTE.
132083 */
132084 pFirstRec = p;
132085 for(pFirstRec=p; ALWAYS(pFirstRec!=0); pFirstRec=pFirstRec->pPrior){
132086 if( pFirstRec->selFlags & SF_Aggregate ){
132087 sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported");
132088 goto end_of_recursive_query;
132089 }
132090 pFirstRec->op = TK_ALL;
132091 if( (pFirstRec->pPrior->selFlags & SF_Recursive)==0 ) break;
132092 }
132093
132094 /* Store the results of the setup-query in Queue. */
132095 pSetup = pFirstRec->pPrior;
132096 pSetup->pNext = 0;
132097 ExplainQueryPlan((pParse, 1, "SETUP"));
132098 rc = sqlite3Select(pParse, pSetup, &destQueue);
132099 pSetup->pNext = p;
132100 if( rc ) goto end_of_recursive_query;
@@ -132073,19 +132123,15 @@
132123 sqlite3VdbeResolveLabel(v, addrCont);
132124
132125 /* Execute the recursive SELECT taking the single row in Current as
132126 ** the value for the recursive-table. Store the results in the Queue.
132127 */
132128 pFirstRec->pPrior = 0;
132129 ExplainQueryPlan((pParse, 1, "RECURSIVE STEP"));
132130 sqlite3Select(pParse, p, &destQueue);
132131 assert( pFirstRec->pPrior==0 );
132132 pFirstRec->pPrior = pSetup;
 
 
 
 
132133
132134 /* Keep running the loop until the Queue is empty */
132135 sqlite3VdbeGoto(v, addrTop);
132136 sqlite3VdbeResolveLabel(v, addrBreak);
132137
@@ -132149,10 +132195,20 @@
132195 p->nSelectRow = nRow;
132196 p = p->pNext;
132197 }
132198 return rc;
132199 }
132200
132201 /*
132202 ** Return true if the SELECT statement which is known to be the recursive
132203 ** part of a recursive CTE still has its anchor terms attached. If the
132204 ** anchor terms have already been removed, then return false.
132205 */
132206 static int hasAnchor(Select *p){
132207 while( p && (p->selFlags & SF_Recursive)!=0 ){ p = p->pPrior; }
132208 return p!=0;
132209 }
132210
132211 /*
132212 ** This routine is called to process a compound query form from
132213 ** two or more separate queries using UNION, UNION ALL, EXCEPT, or
132214 ** INTERSECT
@@ -132235,11 +132291,11 @@
132291 */
132292 assert( p->pEList && pPrior->pEList );
132293 assert( p->pEList->nExpr==pPrior->pEList->nExpr );
132294
132295 #ifndef SQLITE_OMIT_CTE
132296 if( (p->selFlags & SF_Recursive)!=0 && hasAnchor(p) ){
132297 generateWithRecursiveQuery(pParse, p, &dest);
132298 }else
132299 #endif
132300
132301 /* Compound SELECTs that have an ORDER BY clause are handled separately.
@@ -132326,10 +132382,11 @@
132382 assert( p->addrOpenEphm[0] == -1 );
132383 p->addrOpenEphm[0] = addr;
132384 findRightmost(p)->selFlags |= SF_UsesEphemeral;
132385 assert( p->pEList );
132386 }
132387
132388
132389 /* Code the SELECT statements to our left
132390 */
132391 assert( !pPrior->pOrderBy );
132392 sqlite3SelectDestInit(&uniondest, priorOp, unionTab);
@@ -134419,12 +134476,14 @@
134476 if( pCte ){
134477 Table *pTab;
134478 ExprList *pEList;
134479 Select *pSel;
134480 Select *pLeft; /* Left-most SELECT statement */
134481 Select *pRecTerm; /* Left-most recursive term */
134482 int bMayRecursive; /* True if compound joined by UNION [ALL] */
134483 With *pSavedWith; /* Initial value of pParse->pWith */
134484 int iRecTab = -1; /* Cursor for recursive table */
134485
134486 /* If pCte->zCteErr is non-NULL at this point, then this is an illegal
134487 ** recursive reference to CTE pCte. Leave an error in pParse and return
134488 ** early. If pCte->zCteErr is NULL, then this is not a recursive reference.
134489 ** In this case, proceed. */
@@ -134445,48 +134504,52 @@
134504 pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0);
134505 if( db->mallocFailed ) return SQLITE_NOMEM_BKPT;
134506 assert( pFrom->pSelect );
134507
134508 /* Check if this is a recursive CTE. */
134509 pRecTerm = pSel = pFrom->pSelect;
134510 bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION );
134511 while( bMayRecursive && pRecTerm->op==pSel->op ){
134512 int i;
134513 SrcList *pSrc = pRecTerm->pSrc;
134514 assert( pRecTerm->pPrior!=0 );
134515 for(i=0; i<pSrc->nSrc; i++){
134516 struct SrcList_item *pItem = &pSrc->a[i];
134517 if( pItem->zDatabase==0
134518 && pItem->zName!=0
134519 && 0==sqlite3StrICmp(pItem->zName, pCte->zName)
134520 ){
134521 pItem->pTab = pTab;
 
134522 pTab->nTabRef++;
134523 pItem->fg.isRecursive = 1;
134524 if( pRecTerm->selFlags & SF_Recursive ){
134525 sqlite3ErrorMsg(pParse,
134526 "multiple references to recursive table: %s", pCte->zName
134527 );
134528 return SQLITE_ERROR;
134529 }
134530 pRecTerm->selFlags |= SF_Recursive;
134531 if( iRecTab<0 ) iRecTab = pParse->nTab++;
134532 pItem->iCursor = iRecTab;
134533 }
134534 }
134535 if( (pRecTerm->selFlags & SF_Recursive)==0 ) break;
134536 pRecTerm = pRecTerm->pPrior;
134537 }
 
 
 
 
 
 
 
 
134538
134539 pCte->zCteErr = "circular reference: %s";
134540 pSavedWith = pParse->pWith;
134541 pParse->pWith = pWith;
134542 if( pSel->selFlags & SF_Recursive ){
134543 assert( pRecTerm!=0 );
134544 assert( (pRecTerm->selFlags & SF_Recursive)==0 );
134545 assert( pRecTerm->pNext!=0 );
134546 assert( (pRecTerm->pNext->selFlags & SF_Recursive)!=0 );
134547 assert( pRecTerm->pWith==0 );
134548 pRecTerm->pWith = pSel->pWith;
134549 sqlite3WalkSelect(pWalker, pRecTerm);
134550 pRecTerm->pWith = 0;
134551 }else{
134552 sqlite3WalkSelect(pWalker, pSel);
134553 }
134554 pParse->pWith = pWith;
134555
@@ -138158,11 +138221,11 @@
138221 pGrp = sqlite3ExprListAppend(pParse, pGrp, sqlite3ExprDup(db, pNew, 0));
138222 }
138223 #endif
138224 pList = sqlite3ExprListAppend(pParse, pList, pNew);
138225 }
138226 eDest = IsVirtual(pTab) ? SRT_Table : SRT_Upfrom;
138227 }else if( pTab->pSelect ){
138228 for(i=0; i<pTab->nCol; i++){
138229 pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
138230 }
138231 eDest = SRT_Table;
@@ -139111,16 +139174,30 @@
139174 ephemTab = pParse->nTab++;
139175 addr= sqlite3VdbeAddOp2(v, OP_OpenEphemeral, ephemTab, nArg);
139176 regArg = pParse->nMem + 1;
139177 pParse->nMem += nArg;
139178 if( pSrc->nSrc>1 ){
139179 Index *pPk = 0;
139180 Expr *pRow;
139181 ExprList *pList;
139182 if( HasRowid(pTab) ){
139183 if( pRowid ){
139184 pRow = sqlite3ExprDup(db, pRowid, 0);
139185 }else{
139186 pRow = sqlite3PExpr(pParse, TK_ROW, 0, 0);
139187 }
139188 }else{
139189 i16 iPk; /* PRIMARY KEY column */
139190 pPk = sqlite3PrimaryKeyIndex(pTab);
139191 assert( pPk!=0 );
139192 assert( pPk->nKeyCol==1 );
139193 iPk = pPk->aiColumn[0];
139194 if( aXRef[iPk]>=0 ){
139195 pRow = sqlite3ExprDup(db, pChanges->a[aXRef[iPk]].pExpr, 0);
139196 }else{
139197 pRow = exprRowColumn(pParse, iPk);
139198 }
139199 }
139200 pList = sqlite3ExprListAppend(pParse, 0, pRow);
139201
139202 for(i=0; i<pTab->nCol; i++){
139203 if( aXRef[i]>=0 ){
@@ -139130,11 +139207,11 @@
139207 }else{
139208 pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i));
139209 }
139210 }
139211
139212 updateFromSelect(pParse, ephemTab, pPk, pList, pSrc, pWhere, 0, 0);
139213 sqlite3ExprListDelete(db, pList);
139214 eOnePass = ONEPASS_OFF;
139215 }else{
139216 regRec = ++pParse->nMem;
139217 regRowid = ++pParse->nMem;
@@ -142457,11 +142534,16 @@
142534 pIn->eEndLoopOp = OP_Noop;
142535 }
142536 pIn++;
142537 }
142538 }
142539 testcase( iEq>0
142540 && (pLoop->wsFlags & WHERE_IN_SEEKSCAN)==0
142541 && (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 );
142542 if( iEq>0
142543 && (pLoop->wsFlags & (WHERE_IN_SEEKSCAN|WHERE_VIRTUALTABLE))==0
142544 ){
142545 sqlite3VdbeAddOp3(v, OP_SeekHit, pLevel->iIdxCur, 0, iEq);
142546 }
142547 }else{
142548 pLevel->u.in.nIn = 0;
142549 }
@@ -143507,10 +143589,11 @@
143589 char *zEndAff = 0; /* Affinity for end of range constraint */
143590 u8 bSeekPastNull = 0; /* True to seek past initial nulls */
143591 u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */
143592 int omitTable; /* True if we use the index only */
143593 int regBignull = 0; /* big-null flag register */
143594 int addrSeekScan = 0; /* Opcode of the OP_SeekScan, if any */
143595
143596 pIdx = pLoop->u.btree.pIndex;
143597 iIdxCur = pLevel->iIdxCur;
143598 assert( nEq>=pLoop->nSkip );
143599
@@ -143652,22 +143735,22 @@
143735 VdbeComment((v, "NULL-scan pass ctr"));
143736 }
143737
143738 op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev];
143739 assert( op!=0 );
143740 if( (pLoop->wsFlags & WHERE_IN_SEEKSCAN)!=0 && op==OP_SeekGE ){
 
143741 assert( regBignull==0 );
143742 /* TUNING: The OP_SeekScan opcode seeks to reduce the number
143743 ** of expensive seek operations by replacing a single seek with
143744 ** 1 or more step operations. The question is, how many steps
143745 ** should we try before giving up and going with a seek. The cost
143746 ** of a seek is proportional to the logarithm of the of the number
143747 ** of entries in the tree, so basing the number of steps to try
143748 ** on the estimated number of rows in the btree seems like a good
143749 ** guess. */
143750 addrSeekScan = sqlite3VdbeAddOp1(v, OP_SeekScan,
143751 (pIdx->aiRowLogEst[0]+9)/10);
143752 VdbeCoverage(v);
143753 }
143754 sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
143755 VdbeCoverage(v);
143756 VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind );
@@ -143748,10 +143831,11 @@
143831 sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
143832 testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT );
143833 testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE );
143834 testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT );
143835 testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
143836 if( addrSeekScan ) sqlite3VdbeJumpHere(v, addrSeekScan);
143837 }
143838 if( regBignull ){
143839 /* During a NULL-scan, check to see if we have reached the end of
143840 ** the NULLs */
143841 assert( bSeekPastNull==!bStopAtNull );
@@ -147706,11 +147790,11 @@
147790 if( pTerm->wtFlags & TERM_CODED ) zType[3] = 'C';
147791 if( pTerm->eOperator & WO_SINGLE ){
147792 sqlite3_snprintf(sizeof(zLeft),zLeft,"left={%d:%d}",
147793 pTerm->leftCursor, pTerm->u.x.leftColumn);
147794 }else if( (pTerm->eOperator & WO_OR)!=0 && pTerm->u.pOrInfo!=0 ){
147795 sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%llx",
147796 pTerm->u.pOrInfo->indexable);
147797 }else{
147798 sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor);
147799 }
147800 sqlite3DebugPrintf(
@@ -148484,11 +148568,11 @@
148568 }
148569 }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){
148570 /* "x IN (value, value, ...)" */
148571 nIn = sqlite3LogEst(pExpr->x.pList->nExpr);
148572 }
148573 if( pProbe->hasStat1 && rLogSize>=10 ){
148574 LogEst M, logK, safetyMargin;
148575 /* Let:
148576 ** N = the total number of rows in the table
148577 ** K = the number of entries on the RHS of the IN operator
148578 ** M = the number of rows in the table that match terms to the
@@ -148503,11 +148587,12 @@
148587 **
148588 ** Our estimates for M, K, and N might be inaccurate, so we build in
148589 ** a safety margin of 2 (LogEst: 10) that favors using the IN operator
148590 ** with the index, as using an index has better worst-case behavior.
148591 ** If we do not have real sqlite_stat1 data, always prefer to use
148592 ** the index. Do not bother with this optimization on very small
148593 ** tables (less than 2 rows) as it is pointless in that case.
148594 */
148595 M = pProbe->aiRowLogEst[saved_nEq];
148596 logK = estLog(nIn);
148597 safetyMargin = 10; /* TUNING: extra weight for indexed IN */
148598 if( M + logK + safetyMargin < nIn + rLogSize ){
@@ -209277,10 +209362,11 @@
209362 int eDetail; /* FTS5_DETAIL_XXX value */
209363 char *zContentExprlist;
209364 Fts5Tokenizer *pTok;
209365 fts5_tokenizer *pTokApi;
209366 int bLock; /* True when table is preparing statement */
209367 int ePattern; /* FTS_PATTERN_XXX constant */
209368
209369 /* Values loaded from the %_config table */
209370 int iCookie; /* Incremented when %_config is modified */
209371 int pgsz; /* Approximate page size used in %_data */
209372 int nAutomerge; /* 'automerge' setting */
@@ -209297,21 +209383,23 @@
209383 int bPrefixIndex; /* True to use prefix-indexes */
209384 #endif
209385 };
209386
209387 /* Current expected value of %_config table 'version' field */
209388 #define FTS5_CURRENT_VERSION 4
209389
209390 #define FTS5_CONTENT_NORMAL 0
209391 #define FTS5_CONTENT_NONE 1
209392 #define FTS5_CONTENT_EXTERNAL 2
209393
209394 #define FTS5_DETAIL_FULL 0
209395 #define FTS5_DETAIL_NONE 1
209396 #define FTS5_DETAIL_COLUMNS 2
209397
209398 #define FTS5_PATTERN_NONE 0
209399 #define FTS5_PATTERN_LIKE 65 /* matches SQLITE_INDEX_CONSTRAINT_LIKE */
209400 #define FTS5_PATTERN_GLOB 66 /* matches SQLITE_INDEX_CONSTRAINT_GLOB */
209401
209402 static int sqlite3Fts5ConfigParse(
209403 Fts5Global*, sqlite3*, int, const char **, Fts5Config**, char**
209404 );
209405 static void sqlite3Fts5ConfigFree(Fts5Config*);
@@ -209647,12 +209735,11 @@
209735
209736 static int sqlite3Fts5GetTokenizer(
209737 Fts5Global*,
209738 const char **azArg,
209739 int nArg,
209740 Fts5Config*,
 
209741 char **pzErr
209742 );
209743
209744 static Fts5Table *sqlite3Fts5TableFromCsrid(Fts5Global*, i64);
209745
@@ -209777,14 +209864,22 @@
209864 };
209865
209866 /* Parse a MATCH expression. */
209867 static int sqlite3Fts5ExprNew(
209868 Fts5Config *pConfig,
209869 int bPhraseToAnd,
209870 int iCol, /* Column on LHS of MATCH operator */
209871 const char *zExpr,
209872 Fts5Expr **ppNew,
209873 char **pzErr
209874 );
209875 static int sqlite3Fts5ExprPattern(
209876 Fts5Config *pConfig,
209877 int bGlob,
209878 int iCol,
209879 const char *zText,
209880 Fts5Expr **pp
209881 );
209882
209883 /*
209884 ** for(rc = sqlite3Fts5ExprFirst(pExpr, pIdx, bDesc);
209885 ** rc==SQLITE_OK && 0==sqlite3Fts5ExprEof(pExpr);
@@ -209890,10 +209985,14 @@
209985 /**************************************************************************
209986 ** Interface to code in fts5_tokenizer.c.
209987 */
209988
209989 static int sqlite3Fts5TokenizerInit(fts5_api*);
209990 static int sqlite3Fts5TokenizerPattern(
209991 int (*xCreate)(void*, const char**, int, Fts5Tokenizer**),
209992 Fts5Tokenizer *pTok
209993 );
209994 /*
209995 ** End of interface to code in fts5_tokenizer.c.
209996 **************************************************************************/
209997
209998 /**************************************************************************
@@ -212869,11 +212968,11 @@
212968 if( p==0 ){
212969 *pzErr = sqlite3_mprintf("parse error in tokenize directive");
212970 rc = SQLITE_ERROR;
212971 }else{
212972 rc = sqlite3Fts5GetTokenizer(pGlobal,
212973 (const char**)azArg, (int)nArg, pConfig,
212974 pzErr
212975 );
212976 }
212977 }
212978 }
@@ -212941,13 +213040,11 @@
213040 ** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error
213041 ** code if an error occurs.
213042 */
213043 static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){
213044 assert( pConfig->pTok==0 && pConfig->pTokApi==0 );
213045 return sqlite3Fts5GetTokenizer(pGlobal, 0, 0, pConfig, 0);
 
 
213046 }
213047
213048 /*
213049 ** Gobble up the first bareword or quoted word from the input buffer zIn.
213050 ** Return a pointer to the character immediately following the last in
@@ -213635,10 +213732,11 @@
213732 char *zErr;
213733 int rc;
213734 int nPhrase; /* Size of apPhrase array */
213735 Fts5ExprPhrase **apPhrase; /* Array of all phrases */
213736 Fts5ExprNode *pExpr; /* Result of a successful parse */
213737 int bPhraseToAnd; /* Convert "a+b" to "a AND b" */
213738 };
213739
213740 static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){
213741 va_list ap;
213742 va_start(ap, zFmt);
@@ -213723,10 +213821,11 @@
213821 static void *fts5ParseAlloc(u64 t){ return sqlite3_malloc64((sqlite3_int64)t);}
213822 static void fts5ParseFree(void *p){ sqlite3_free(p); }
213823
213824 static int sqlite3Fts5ExprNew(
213825 Fts5Config *pConfig, /* FTS5 Configuration */
213826 int bPhraseToAnd,
213827 int iCol,
213828 const char *zExpr, /* Expression text */
213829 Fts5Expr **ppNew,
213830 char **pzErr
213831 ){
@@ -213738,10 +213837,11 @@
213837 Fts5Expr *pNew;
213838
213839 *ppNew = 0;
213840 *pzErr = 0;
213841 memset(&sParse, 0, sizeof(sParse));
213842 sParse.bPhraseToAnd = bPhraseToAnd;
213843 pEngine = sqlite3Fts5ParserAlloc(fts5ParseAlloc);
213844 if( pEngine==0 ){ return SQLITE_NOMEM; }
213845 sParse.pConfig = pConfig;
213846
213847 do {
@@ -213780,10 +213880,11 @@
213880 }
213881 pNew->pIndex = 0;
213882 pNew->pConfig = pConfig;
213883 pNew->apExprPhrase = sParse.apPhrase;
213884 pNew->nPhrase = sParse.nPhrase;
213885 pNew->bDesc = 0;
213886 sParse.apPhrase = 0;
213887 }
213888 }else{
213889 sqlite3Fts5ParseNodeFree(sParse.pExpr);
213890 }
@@ -213790,10 +213891,85 @@
213891
213892 sqlite3_free(sParse.apPhrase);
213893 *pzErr = sParse.zErr;
213894 return sParse.rc;
213895 }
213896
213897 /*
213898 ** This function is only called when using the special 'trigram' tokenizer.
213899 ** Argument zText contains the text of a LIKE or GLOB pattern matched
213900 ** against column iCol. This function creates and compiles an FTS5 MATCH
213901 ** expression that will match a superset of the rows matched by the LIKE or
213902 ** GLOB. If successful, SQLITE_OK is returned. Otherwise, an SQLite error
213903 ** code.
213904 */
213905 static int sqlite3Fts5ExprPattern(
213906 Fts5Config *pConfig, int bGlob, int iCol, const char *zText, Fts5Expr **pp
213907 ){
213908 i64 nText = strlen(zText);
213909 char *zExpr = (char*)sqlite3_malloc64(nText*4 + 1);
213910 int rc = SQLITE_OK;
213911
213912 if( zExpr==0 ){
213913 rc = SQLITE_NOMEM;
213914 }else{
213915 char aSpec[3];
213916 int iOut = 0;
213917 int i = 0;
213918 int iFirst = 0;
213919
213920 if( bGlob==0 ){
213921 aSpec[0] = '_';
213922 aSpec[1] = '%';
213923 aSpec[2] = 0;
213924 }else{
213925 aSpec[0] = '*';
213926 aSpec[1] = '?';
213927 aSpec[2] = '[';
213928 }
213929
213930 while( i<=nText ){
213931 if( i==nText
213932 || zText[i]==aSpec[0] || zText[i]==aSpec[1] || zText[i]==aSpec[2]
213933 ){
213934 if( i-iFirst>=3 ){
213935 int jj;
213936 zExpr[iOut++] = '"';
213937 for(jj=iFirst; jj<i; jj++){
213938 zExpr[iOut++] = zText[jj];
213939 if( zText[jj]=='"' ) zExpr[iOut++] = '"';
213940 }
213941 zExpr[iOut++] = '"';
213942 zExpr[iOut++] = ' ';
213943 }
213944 if( zText[i]==aSpec[2] ){
213945 i += 2;
213946 if( zText[i-1]=='^' ) i++;
213947 while( i<nText && zText[i]!=']' ) i++;
213948 }
213949 iFirst = i+1;
213950 }
213951 i++;
213952 }
213953 if( iOut>0 ){
213954 int bAnd = 0;
213955 if( pConfig->eDetail!=FTS5_DETAIL_FULL ){
213956 bAnd = 1;
213957 if( pConfig->eDetail==FTS5_DETAIL_NONE ){
213958 iCol = pConfig->nCol;
213959 }
213960 }
213961 zExpr[iOut] = '\0';
213962 rc = sqlite3Fts5ExprNew(pConfig, bAnd, iCol, zExpr, pp,pConfig->pzErrmsg);
213963 }else{
213964 *pp = 0;
213965 }
213966 sqlite3_free(zExpr);
213967 }
213968
213969 return rc;
213970 }
213971
213972 /*
213973 ** Free the expression node object passed as the only argument.
213974 */
213975 static void sqlite3Fts5ParseNodeFree(Fts5ExprNode *p){
@@ -215167,10 +215343,24 @@
215343
215344 static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p){
215345 assert( pParse->pExpr==0 );
215346 pParse->pExpr = p;
215347 }
215348
215349 static int parseGrowPhraseArray(Fts5Parse *pParse){
215350 if( (pParse->nPhrase % 8)==0 ){
215351 sqlite3_int64 nByte = sizeof(Fts5ExprPhrase*) * (pParse->nPhrase + 8);
215352 Fts5ExprPhrase **apNew;
215353 apNew = (Fts5ExprPhrase**)sqlite3_realloc64(pParse->apPhrase, nByte);
215354 if( apNew==0 ){
215355 pParse->rc = SQLITE_NOMEM;
215356 return SQLITE_NOMEM;
215357 }
215358 pParse->apPhrase = apNew;
215359 }
215360 return SQLITE_OK;
215361 }
215362
215363 /*
215364 ** This function is called by the parser to process a string token. The
215365 ** string may or may not be quoted. In any case it is tokenized and a
215366 ** phrase object consisting of all tokens returned.
@@ -215203,20 +215393,13 @@
215393 fts5ExprPhraseFree(sCtx.pPhrase);
215394 sCtx.pPhrase = 0;
215395 }else{
215396
215397 if( pAppend==0 ){
215398 if( parseGrowPhraseArray(pParse) ){
215399 fts5ExprPhraseFree(sCtx.pPhrase);
215400 return 0;
 
 
 
 
 
 
 
215401 }
215402 pParse->nPhrase++;
215403 }
215404
215405 if( sCtx.pPhrase==0 ){
@@ -215618,10 +215801,71 @@
215801 sqlite3_free(pSub);
215802 }else{
215803 p->apChild[p->nChild++] = pSub;
215804 }
215805 }
215806
215807 /*
215808 ** This function is used when parsing LIKE or GLOB patterns against
215809 ** trigram indexes that specify either detail=column or detail=none.
215810 ** It converts a phrase:
215811 **
215812 ** abc + def + ghi
215813 **
215814 ** into an AND tree:
215815 **
215816 ** abc AND def AND ghi
215817 */
215818 static Fts5ExprNode *fts5ParsePhraseToAnd(
215819 Fts5Parse *pParse,
215820 Fts5ExprNearset *pNear
215821 ){
215822 int nTerm = pNear->apPhrase[0]->nTerm;
215823 int ii;
215824 int nByte;
215825 Fts5ExprNode *pRet;
215826
215827 assert( pNear->nPhrase==1 );
215828 assert( pParse->bPhraseToAnd );
215829
215830 nByte = sizeof(Fts5ExprNode) + nTerm*sizeof(Fts5ExprNode*);
215831 pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);
215832 if( pRet ){
215833 pRet->eType = FTS5_AND;
215834 pRet->nChild = nTerm;
215835 fts5ExprAssignXNext(pRet);
215836 pParse->nPhrase--;
215837 for(ii=0; ii<nTerm; ii++){
215838 Fts5ExprPhrase *pPhrase = (Fts5ExprPhrase*)sqlite3Fts5MallocZero(
215839 &pParse->rc, sizeof(Fts5ExprPhrase)
215840 );
215841 if( pPhrase ){
215842 if( parseGrowPhraseArray(pParse) ){
215843 fts5ExprPhraseFree(pPhrase);
215844 }else{
215845 pParse->apPhrase[pParse->nPhrase++] = pPhrase;
215846 pPhrase->nTerm = 1;
215847 pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup(
215848 &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1
215849 );
215850 pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING,
215851 0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase)
215852 );
215853 }
215854 }
215855 }
215856
215857 if( pParse->rc ){
215858 sqlite3Fts5ParseNodeFree(pRet);
215859 pRet = 0;
215860 }else{
215861 sqlite3Fts5ParseNearsetFree(pNear);
215862 }
215863 }
215864
215865 return pRet;
215866 }
215867
215868 /*
215869 ** Allocate and return a new expression object. If anything goes wrong (i.e.
215870 ** OOM error), leave an error code in pParse and return NULL.
215871 */
@@ -215643,55 +215887,62 @@
215887 );
215888 if( eType==FTS5_STRING && pNear==0 ) return 0;
215889 if( eType!=FTS5_STRING && pLeft==0 ) return pRight;
215890 if( eType!=FTS5_STRING && pRight==0 ) return pLeft;
215891
215892 if( eType==FTS5_STRING
215893 && pParse->bPhraseToAnd
215894 && pNear->apPhrase[0]->nTerm>1
215895 ){
215896 pRet = fts5ParsePhraseToAnd(pParse, pNear);
215897 }else{
215898 if( eType==FTS5_NOT ){
215899 nChild = 2;
215900 }else if( eType==FTS5_AND || eType==FTS5_OR ){
215901 nChild = 2;
215902 if( pLeft->eType==eType ) nChild += pLeft->nChild-1;
215903 if( pRight->eType==eType ) nChild += pRight->nChild-1;
215904 }
215905
215906 nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1);
215907 pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte);
215908
215909 if( pRet ){
215910 pRet->eType = eType;
215911 pRet->pNear = pNear;
215912 fts5ExprAssignXNext(pRet);
215913 if( eType==FTS5_STRING ){
215914 int iPhrase;
215915 for(iPhrase=0; iPhrase<pNear->nPhrase; iPhrase++){
215916 pNear->apPhrase[iPhrase]->pNode = pRet;
215917 if( pNear->apPhrase[iPhrase]->nTerm==0 ){
215918 pRet->xNext = 0;
215919 pRet->eType = FTS5_EOF;
215920 }
215921 }
215922
215923 if( pParse->pConfig->eDetail!=FTS5_DETAIL_FULL ){
215924 Fts5ExprPhrase *pPhrase = pNear->apPhrase[0];
215925 if( pNear->nPhrase!=1
215926 || pPhrase->nTerm>1
215927 || (pPhrase->nTerm>0 && pPhrase->aTerm[0].bFirst)
215928 ){
215929 assert( pParse->rc==SQLITE_OK );
215930 pParse->rc = SQLITE_ERROR;
215931 assert( pParse->zErr==0 );
215932 pParse->zErr = sqlite3_mprintf(
215933 "fts5: %s queries are not supported (detail!=full)",
215934 pNear->nPhrase==1 ? "phrase": "NEAR"
215935 );
215936 sqlite3_free(pRet);
215937 pRet = 0;
215938 }
215939 }
215940 }else{
215941 fts5ExprAddChildren(pRet, pLeft);
215942 fts5ExprAddChildren(pRet, pRight);
215943 }
215944 }
215945 }
215946 }
215947
215948 if( pRet==0 ){
@@ -216041,11 +216292,11 @@
216292 zExpr = (const char*)sqlite3_value_text(apVal[0]);
216293 if( zExpr==0 ) zExpr = "";
216294
216295 rc = sqlite3Fts5ConfigParse(pGlobal, db, nConfig, azConfig, &pConfig, &zErr);
216296 if( rc==SQLITE_OK ){
216297 rc = sqlite3Fts5ExprNew(pConfig, 0, pConfig->nCol, zExpr, &pExpr, &zErr);
216298 }
216299 if( rc==SQLITE_OK ){
216300 char *zText;
216301 if( pExpr->pRoot->xNext==0 ){
216302 zText = sqlite3_mprintf("");
@@ -216746,12 +216997,13 @@
216997 pPtr = (u8*)p;
216998
216999 /* If this is a new rowid, append the 4-byte size field for the previous
217000 ** entry, and the new rowid for this entry. */
217001 if( iRowid!=p->iRowid ){
217002 u64 iDiff = (u64)iRowid - (u64)p->iRowid;
217003 fts5HashAddPoslistSize(pHash, p, 0);
217004 p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iDiff);
217005 p->iRowid = iRowid;
217006 bNew = 1;
217007 p->iSzPoslist = p->nData;
217008 if( pHash->eDetail!=FTS5_DETAIL_NONE ){
217009 p->nData += 1;
@@ -218722,11 +218974,11 @@
218974 n = pIter->iEndofDoclist;
218975 }
218976
218977 ASSERT_SZLEAF_OK(pIter->pLeaf);
218978 while( 1 ){
218979 u64 iDelta = 0;
218980
218981 if( eDetail==FTS5_DETAIL_NONE ){
218982 /* todo */
218983 if( i<n && a[i]==0 ){
218984 i++;
@@ -218737,11 +218989,11 @@
218989 int bDummy;
218990 i += fts5GetPoslistSize(&a[i], &nPos, &bDummy);
218991 i += nPos;
218992 }
218993 if( i>=n ) break;
218994 i += fts5GetVarint(&a[i], &iDelta);
218995 pIter->iRowid += iDelta;
218996
218997 /* If necessary, grow the pIter->aRowidOffset[] array. */
218998 if( iRowidOffset>=pIter->nRowidOffset ){
218999 int nNew = pIter->nRowidOffset + 8;
@@ -218836,20 +219088,20 @@
219088 UNUSED_PARAM(pbUnused);
219089
219090 if( pIter->iRowidOffset>0 ){
219091 u8 *a = pIter->pLeaf->p;
219092 int iOff;
219093 u64 iDelta;
219094
219095 pIter->iRowidOffset--;
219096 pIter->iLeafOffset = pIter->aRowidOffset[pIter->iRowidOffset];
219097 fts5SegIterLoadNPos(p, pIter);
219098 iOff = pIter->iLeafOffset;
219099 if( p->pConfig->eDetail!=FTS5_DETAIL_NONE ){
219100 iOff += pIter->nPos;
219101 }
219102 fts5GetVarint(&a[iOff], &iDelta);
219103 pIter->iRowid -= iDelta;
219104 }else{
219105 fts5SegIterReverseNewPage(p, pIter);
219106 }
219107 }
@@ -224064,10 +224316,27 @@
224316 {
224317 pIdxInfo->idxFlags |= SQLITE_INDEX_SCAN_UNIQUE;
224318 }
224319 #endif
224320 }
224321
224322 static int fts5UsePatternMatch(
224323 Fts5Config *pConfig,
224324 struct sqlite3_index_constraint *p
224325 ){
224326 assert( FTS5_PATTERN_GLOB==SQLITE_INDEX_CONSTRAINT_GLOB );
224327 assert( FTS5_PATTERN_LIKE==SQLITE_INDEX_CONSTRAINT_LIKE );
224328 if( pConfig->ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){
224329 return 1;
224330 }
224331 if( pConfig->ePattern==FTS5_PATTERN_LIKE
224332 && (p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB)
224333 ){
224334 return 1;
224335 }
224336 return 0;
224337 }
224338
224339 /*
224340 ** Implementation of the xBestIndex method for FTS5 tables. Within the
224341 ** WHERE constraint, it searches for the following:
224342 **
@@ -224094,11 +224363,13 @@
224363 ** idxStr is used to encode data from the WHERE clause. For each argument
224364 ** passed to the xFilter method, the following is appended to idxStr:
224365 **
224366 ** Match against table column: "m"
224367 ** Match against rank column: "r"
224368 ** Match against other column: "M<column-number>"
224369 ** LIKE against other column: "L<column-number>"
224370 ** GLOB against other column: "G<column-number>"
224371 ** Equality constraint against the rowid: "="
224372 ** A < or <= against the rowid: "<"
224373 ** A > or >= against the rowid: ">"
224374 **
224375 ** This function ensures that there is at most one "r" or "=". And that if
@@ -224155,11 +224426,11 @@
224426 "recursively defined fts5 content table"
224427 );
224428 return SQLITE_ERROR;
224429 }
224430
224431 idxStr = (char*)sqlite3_malloc(pInfo->nConstraint * 8 + 1);
224432 if( idxStr==0 ) return SQLITE_NOMEM;
224433 pInfo->idxStr = idxStr;
224434 pInfo->needToFreeIdxStr = 1;
224435
224436 for(i=0; i<pInfo->nConstraint; i++){
@@ -224179,29 +224450,33 @@
224450 }else{
224451 if( iCol==nCol+1 ){
224452 if( bSeenRank ) continue;
224453 idxStr[iIdxStr++] = 'r';
224454 bSeenRank = 1;
224455 }else if( iCol>=0 ){
224456 bSeenMatch = 1;
224457 idxStr[iIdxStr++] = 'M';
224458 sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol);
224459 idxStr += strlen(&idxStr[iIdxStr]);
224460 assert( idxStr[iIdxStr]=='\0' );
 
 
224461 }
224462 pInfo->aConstraintUsage[i].argvIndex = ++iCons;
224463 pInfo->aConstraintUsage[i].omit = 1;
224464 }
224465 }else if( p->usable ){
224466 if( iCol>=0 && iCol<nCol && fts5UsePatternMatch(pConfig, p) ){
224467 assert( p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB );
224468 idxStr[iIdxStr++] = p->op==FTS5_PATTERN_LIKE ? 'L' : 'G';
224469 sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol);
224470 idxStr += strlen(&idxStr[iIdxStr]);
224471 pInfo->aConstraintUsage[i].argvIndex = ++iCons;
224472 assert( idxStr[iIdxStr]=='\0' );
224473 }else if( bSeenEq==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0 ){
224474 idxStr[iIdxStr++] = '=';
224475 bSeenEq = 1;
224476 pInfo->aConstraintUsage[i].argvIndex = ++iCons;
224477 }
224478 }
224479 }
224480
224481 if( bSeenEq==0 ){
224482 for(i=0; i<pInfo->nConstraint; i++){
@@ -224830,41 +225105,55 @@
225105 for(i=0; i<nVal; i++){
225106 switch( idxStr[iIdxStr++] ){
225107 case 'r':
225108 pRank = apVal[i];
225109 break;
225110 case 'M': {
225111 const char *zText = (const char*)sqlite3_value_text(apVal[i]);
225112 if( zText==0 ) zText = "";
225113 iCol = 0;
225114 do{
225115 iCol = iCol*10 + (idxStr[iIdxStr]-'0');
225116 iIdxStr++;
225117 }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' );
 
 
 
 
 
225118
225119 if( zText[0]=='*' ){
225120 /* The user has issued a query of the form "MATCH '*...'". This
225121 ** indicates that the MATCH expression is not a full text query,
225122 ** but a request for an internal parameter. */
225123 rc = fts5SpecialMatch(pTab, pCsr, &zText[1]);
225124 goto filter_out;
225125 }else{
225126 char **pzErr = &pTab->p.base.zErrMsg;
225127 rc = sqlite3Fts5ExprNew(pConfig, 0, iCol, zText, &pExpr, pzErr);
225128 if( rc==SQLITE_OK ){
225129 rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr);
225130 pExpr = 0;
225131 }
225132 if( rc!=SQLITE_OK ) goto filter_out;
225133 }
225134
225135 break;
225136 }
225137 case 'L':
225138 case 'G': {
225139 int bGlob = (idxStr[iIdxStr-1]=='G');
225140 const char *zText = (const char*)sqlite3_value_text(apVal[i]);
225141 iCol = 0;
225142 do{
225143 iCol = iCol*10 + (idxStr[iIdxStr]-'0');
225144 iIdxStr++;
225145 }while( idxStr[iIdxStr]>='0' && idxStr[iIdxStr]<='9' );
225146 if( zText ){
225147 rc = sqlite3Fts5ExprPattern(pConfig, bGlob, iCol, zText, &pExpr);
225148 }
225149 if( rc==SQLITE_OK ){
225150 rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr);
225151 pExpr = 0;
225152 }
225153 if( rc!=SQLITE_OK ) goto filter_out;
225154 break;
225155 }
225156 case '=':
225157 pRowidEq = apVal[i];
225158 break;
225159 case '<':
@@ -226273,12 +226562,11 @@
226562
226563 static int sqlite3Fts5GetTokenizer(
226564 Fts5Global *pGlobal,
226565 const char **azArg,
226566 int nArg,
226567 Fts5Config *pConfig,
 
226568 char **pzErr
226569 ){
226570 Fts5TokenizerModule *pMod;
226571 int rc = SQLITE_OK;
226572
@@ -226286,20 +226574,26 @@
226574 if( pMod==0 ){
226575 assert( nArg>0 );
226576 rc = SQLITE_ERROR;
226577 *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]);
226578 }else{
226579 rc = pMod->x.xCreate(
226580 pMod->pUserData, &azArg[1], (nArg?nArg-1:0), &pConfig->pTok
226581 );
226582 pConfig->pTokApi = &pMod->x;
226583 if( rc!=SQLITE_OK ){
226584 if( pzErr ) *pzErr = sqlite3_mprintf("error in tokenizer constructor");
226585 }else{
226586 pConfig->ePattern = sqlite3Fts5TokenizerPattern(
226587 pMod->x.xCreate, pConfig->pTok
226588 );
226589 }
226590 }
226591
226592 if( rc!=SQLITE_OK ){
226593 pConfig->pTokApi = 0;
226594 pConfig->pTok = 0;
226595 }
226596
226597 return rc;
226598 }
226599
@@ -226344,11 +226638,11 @@
226638 int nArg, /* Number of args */
226639 sqlite3_value **apUnused /* Function arguments */
226640 ){
226641 assert( nArg==0 );
226642 UNUSED_PARAM2(nArg, apUnused);
226643 sqlite3_result_text(pCtx, "fts5: 2020-10-12 18:09:16 7e17c2f4b7dc9b563d0b4da949bb134dc7c4fc9c86ce03891432a884ca6409d5", -1, SQLITE_TRANSIENT);
226644 }
226645
226646 /*
226647 ** Return true if zName is the extension on one of the shadow tables used
226648 ** by this module.
@@ -228897,10 +229191,135 @@
229191 sCtx.aBuf = p->aBuf;
229192 return p->tokenizer.xTokenize(
229193 p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb
229194 );
229195 }
229196
229197 /**************************************************************************
229198 ** Start of trigram implementation.
229199 */
229200 typedef struct TrigramTokenizer TrigramTokenizer;
229201 struct TrigramTokenizer {
229202 int bFold; /* True to fold to lower-case */
229203 };
229204
229205 /*
229206 ** Free a trigram tokenizer.
229207 */
229208 static void fts5TriDelete(Fts5Tokenizer *p){
229209 sqlite3_free(p);
229210 }
229211
229212 /*
229213 ** Allocate a trigram tokenizer.
229214 */
229215 static int fts5TriCreate(
229216 void *pCtx,
229217 const char **azArg,
229218 int nArg,
229219 Fts5Tokenizer **ppOut
229220 ){
229221 int rc = SQLITE_OK;
229222 TrigramTokenizer *pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew));
229223 if( pNew==0 ){
229224 rc = SQLITE_NOMEM;
229225 }else{
229226 int i;
229227 pNew->bFold = 1;
229228 for(i=0; rc==SQLITE_OK && i<nArg; i+=2){
229229 const char *zArg = azArg[i+1];
229230 if( 0==sqlite3_stricmp(azArg[i], "case_sensitive") ){
229231 if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1] ){
229232 rc = SQLITE_ERROR;
229233 }else{
229234 pNew->bFold = (zArg[0]=='0');
229235 }
229236 }else{
229237 rc = SQLITE_ERROR;
229238 }
229239 }
229240 if( rc!=SQLITE_OK ){
229241 fts5TriDelete((Fts5Tokenizer*)pNew);
229242 pNew = 0;
229243 }
229244 }
229245 *ppOut = (Fts5Tokenizer*)pNew;
229246 return rc;
229247 }
229248
229249 /*
229250 ** Trigram tokenizer tokenize routine.
229251 */
229252 static int fts5TriTokenize(
229253 Fts5Tokenizer *pTok,
229254 void *pCtx,
229255 int flags,
229256 const char *pText, int nText,
229257 int (*xToken)(void*, int, const char*, int, int, int)
229258 ){
229259 TrigramTokenizer *p = (TrigramTokenizer*)pTok;
229260 int rc = SQLITE_OK;
229261 char aBuf[32];
229262 const unsigned char *zIn = (const unsigned char*)pText;
229263 const unsigned char *zEof = &zIn[nText];
229264 u32 iCode;
229265
229266 while( 1 ){
229267 char *zOut = aBuf;
229268 int iStart = zIn - (const unsigned char*)pText;
229269 const unsigned char *zNext;
229270
229271 READ_UTF8(zIn, zEof, iCode);
229272 if( iCode==0 ) break;
229273 zNext = zIn;
229274 if( zIn<zEof ){
229275 if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
229276 WRITE_UTF8(zOut, iCode);
229277 READ_UTF8(zIn, zEof, iCode);
229278 if( iCode==0 ) break;
229279 }else{
229280 break;
229281 }
229282 if( zIn<zEof ){
229283 if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
229284 WRITE_UTF8(zOut, iCode);
229285 READ_UTF8(zIn, zEof, iCode);
229286 if( iCode==0 ) break;
229287 if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0);
229288 WRITE_UTF8(zOut, iCode);
229289 }else{
229290 break;
229291 }
229292 rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf);
229293 if( rc!=SQLITE_OK ) break;
229294 zIn = zNext;
229295 }
229296
229297 return rc;
229298 }
229299
229300 /*
229301 ** Argument xCreate is a pointer to a constructor function for a tokenizer.
229302 ** pTok is a tokenizer previously created using the same method. This function
229303 ** returns one of FTS5_PATTERN_NONE, FTS5_PATTERN_LIKE or FTS5_PATTERN_GLOB
229304 ** indicating the style of pattern matching that the tokenizer can support.
229305 ** In practice, this is:
229306 **
229307 ** "trigram" tokenizer, case_sensitive=1 - FTS5_PATTERN_GLOB
229308 ** "trigram" tokenizer, case_sensitive=0 (the default) - FTS5_PATTERN_LIKE
229309 ** all other tokenizers - FTS5_PATTERN_NONE
229310 */
229311 static int sqlite3Fts5TokenizerPattern(
229312 int (*xCreate)(void*, const char**, int, Fts5Tokenizer**),
229313 Fts5Tokenizer *pTok
229314 ){
229315 if( xCreate==fts5TriCreate ){
229316 TrigramTokenizer *p = (TrigramTokenizer*)pTok;
229317 return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB;
229318 }
229319 return FTS5_PATTERN_NONE;
229320 }
229321
229322 /*
229323 ** Register all built-in tokenizers with FTS5.
229324 */
229325 static int sqlite3Fts5TokenizerInit(fts5_api *pApi){
@@ -228909,10 +229328,11 @@
229328 fts5_tokenizer x;
229329 } aBuiltin[] = {
229330 { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}},
229331 { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }},
229332 { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }},
229333 { "trigram", {fts5TriCreate, fts5TriDelete, fts5TriTokenize}},
229334 };
229335
229336 int rc = SQLITE_OK; /* Return code */
229337 int i; /* To iterate through builtin functions */
229338
@@ -231140,12 +231560,12 @@
231560 }
231561 #endif /* SQLITE_CORE */
231562 #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */
231563
231564 /************** End of stmt.c ************************************************/
231565 #if __LINE__!=231565
231566 #undef SQLITE_SOURCE_ID
231567 #define SQLITE_SOURCE_ID "2020-10-19 20:49:54 75a0288871ccb2a69a636cbb328fe19045a0d0ef96a193ecd118b9a19678alt2"
231568 #endif
231569 /* Return the source-id for this library */
231570 SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; }
231571 /************************** End of sqlite3.c ******************************/
231572
+11 -4
--- src/sqlite3.h
+++ src/sqlite3.h
@@ -123,11 +123,11 @@
123123
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
124124
** [sqlite_version()] and [sqlite_source_id()].
125125
*/
126126
#define SQLITE_VERSION "3.34.0"
127127
#define SQLITE_VERSION_NUMBER 3034000
128
-#define SQLITE_SOURCE_ID "2020-09-30 18:06:51 4a43430fd23f88352c33b29c4c105b72f6dc821f94bf362040c41a1648c402e5"
128
+#define SQLITE_SOURCE_ID "2020-10-19 20:49:54 75a0288871ccb2a69a636cbb328fe19045a0d0ef96a193ecd118b9a196784d2d"
129129
130130
/*
131131
** CAPI3REF: Run-Time Library Version Numbers
132132
** KEYWORDS: sqlite3_version sqlite3_sourceid
133133
**
@@ -9242,22 +9242,29 @@
92429242
92439243
/*
92449244
** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE
92459245
**
92469246
** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn]
9247
-** method of a [virtual table], then it returns true if and only if the
9247
+** method of a [virtual table], then it might return true if the
92489248
** column is being fetched as part of an UPDATE operation during which the
9249
-** column value will not change. Applications might use this to substitute
9250
-** a return value that is less expensive to compute and that the corresponding
9249
+** column value will not change. The virtual table implementation can use
9250
+** this hint as permission to substitute a return value that is less
9251
+** expensive to compute and that the corresponding
92519252
** [xUpdate] method understands as a "no-change" value.
92529253
**
92539254
** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that
92549255
** the column is not changed by the UPDATE statement, then the xColumn
92559256
** method can optionally return without setting a result, without calling
92569257
** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces].
92579258
** In that case, [sqlite3_value_nochange(X)] will return true for the
92589259
** same column in the [xUpdate] method.
9260
+**
9261
+** The sqlite3_vtab_nochange() routine is an optimization. Virtual table
9262
+** implementations should continue to give a correct answer even if the
9263
+** sqlite3_vtab_nochange() interface were to always return false. In the
9264
+** current implementation, the sqlite3_vtab_nochange() interface does always
9265
+** returns false for the enhanced [UPDATE FROM] statement.
92599266
*/
92609267
SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
92619268
92629269
/*
92639270
** CAPI3REF: Determine The Collation For a Virtual Table Constraint
92649271
--- src/sqlite3.h
+++ src/sqlite3.h
@@ -123,11 +123,11 @@
123 ** [sqlite3_libversion_number()], [sqlite3_sourceid()],
124 ** [sqlite_version()] and [sqlite_source_id()].
125 */
126 #define SQLITE_VERSION "3.34.0"
127 #define SQLITE_VERSION_NUMBER 3034000
128 #define SQLITE_SOURCE_ID "2020-09-30 18:06:51 4a43430fd23f88352c33b29c4c105b72f6dc821f94bf362040c41a1648c402e5"
129
130 /*
131 ** CAPI3REF: Run-Time Library Version Numbers
132 ** KEYWORDS: sqlite3_version sqlite3_sourceid
133 **
@@ -9242,22 +9242,29 @@
9242
9243 /*
9244 ** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE
9245 **
9246 ** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn]
9247 ** method of a [virtual table], then it returns true if and only if the
9248 ** column is being fetched as part of an UPDATE operation during which the
9249 ** column value will not change. Applications might use this to substitute
9250 ** a return value that is less expensive to compute and that the corresponding
 
9251 ** [xUpdate] method understands as a "no-change" value.
9252 **
9253 ** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that
9254 ** the column is not changed by the UPDATE statement, then the xColumn
9255 ** method can optionally return without setting a result, without calling
9256 ** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces].
9257 ** In that case, [sqlite3_value_nochange(X)] will return true for the
9258 ** same column in the [xUpdate] method.
 
 
 
 
 
 
9259 */
9260 SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
9261
9262 /*
9263 ** CAPI3REF: Determine The Collation For a Virtual Table Constraint
9264
--- src/sqlite3.h
+++ src/sqlite3.h
@@ -123,11 +123,11 @@
123 ** [sqlite3_libversion_number()], [sqlite3_sourceid()],
124 ** [sqlite_version()] and [sqlite_source_id()].
125 */
126 #define SQLITE_VERSION "3.34.0"
127 #define SQLITE_VERSION_NUMBER 3034000
128 #define SQLITE_SOURCE_ID "2020-10-19 20:49:54 75a0288871ccb2a69a636cbb328fe19045a0d0ef96a193ecd118b9a196784d2d"
129
130 /*
131 ** CAPI3REF: Run-Time Library Version Numbers
132 ** KEYWORDS: sqlite3_version sqlite3_sourceid
133 **
@@ -9242,22 +9242,29 @@
9242
9243 /*
9244 ** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE
9245 **
9246 ** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn]
9247 ** method of a [virtual table], then it might return true if the
9248 ** column is being fetched as part of an UPDATE operation during which the
9249 ** column value will not change. The virtual table implementation can use
9250 ** this hint as permission to substitute a return value that is less
9251 ** expensive to compute and that the corresponding
9252 ** [xUpdate] method understands as a "no-change" value.
9253 **
9254 ** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that
9255 ** the column is not changed by the UPDATE statement, then the xColumn
9256 ** method can optionally return without setting a result, without calling
9257 ** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces].
9258 ** In that case, [sqlite3_value_nochange(X)] will return true for the
9259 ** same column in the [xUpdate] method.
9260 **
9261 ** The sqlite3_vtab_nochange() routine is an optimization. Virtual table
9262 ** implementations should continue to give a correct answer even if the
9263 ** sqlite3_vtab_nochange() interface were to always return false. In the
9264 ** current implementation, the sqlite3_vtab_nochange() interface does always
9265 ** returns false for the enhanced [UPDATE FROM] statement.
9266 */
9267 SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
9268
9269 /*
9270 ** CAPI3REF: Determine The Collation For a Virtual Table Constraint
9271
+2 -2
--- src/timeline.c
+++ src/timeline.c
@@ -484,11 +484,11 @@
484484
}
485485
}
486486
if( zType[0]=='c' && pGraph ){
487487
int nParent = 0;
488488
int nCherrypick = 0;
489
- int aParent[GR_MAX_RAIL];
489
+ GraphRowId aParent[GR_MAX_RAIL];
490490
static Stmt qparent;
491491
db_static_prepare(&qparent,
492492
"SELECT pid FROM plink"
493493
" WHERE cid=:rid AND pid NOT IN phantom"
494494
" ORDER BY isprim DESC /*sort*/"
@@ -2075,11 +2075,11 @@
20752075
addFileGlobDescription(zChng, &desc);
20762076
}else if( (p_rid || d_rid) && g.perm.Read && zTagSql==0 ){
20772077
/* If p= or d= is present, ignore all other parameters other than n= */
20782078
char *zUuid;
20792079
const char *zCiName;
2080
- int np, nd;
2080
+ int np = 0, nd;
20812081
const char *zBackTo = 0;
20822082
int ridBackTo = 0;
20832083
20842084
tmFlags |= TIMELINE_XMERGE | TIMELINE_FILLGAPS;
20852085
if( p_rid && d_rid ){
20862086
--- src/timeline.c
+++ src/timeline.c
@@ -484,11 +484,11 @@
484 }
485 }
486 if( zType[0]=='c' && pGraph ){
487 int nParent = 0;
488 int nCherrypick = 0;
489 int aParent[GR_MAX_RAIL];
490 static Stmt qparent;
491 db_static_prepare(&qparent,
492 "SELECT pid FROM plink"
493 " WHERE cid=:rid AND pid NOT IN phantom"
494 " ORDER BY isprim DESC /*sort*/"
@@ -2075,11 +2075,11 @@
2075 addFileGlobDescription(zChng, &desc);
2076 }else if( (p_rid || d_rid) && g.perm.Read && zTagSql==0 ){
2077 /* If p= or d= is present, ignore all other parameters other than n= */
2078 char *zUuid;
2079 const char *zCiName;
2080 int np, nd;
2081 const char *zBackTo = 0;
2082 int ridBackTo = 0;
2083
2084 tmFlags |= TIMELINE_XMERGE | TIMELINE_FILLGAPS;
2085 if( p_rid && d_rid ){
2086
--- src/timeline.c
+++ src/timeline.c
@@ -484,11 +484,11 @@
484 }
485 }
486 if( zType[0]=='c' && pGraph ){
487 int nParent = 0;
488 int nCherrypick = 0;
489 GraphRowId aParent[GR_MAX_RAIL];
490 static Stmt qparent;
491 db_static_prepare(&qparent,
492 "SELECT pid FROM plink"
493 " WHERE cid=:rid AND pid NOT IN phantom"
494 " ORDER BY isprim DESC /*sort*/"
@@ -2075,11 +2075,11 @@
2075 addFileGlobDescription(zChng, &desc);
2076 }else if( (p_rid || d_rid) && g.perm.Read && zTagSql==0 ){
2077 /* If p= or d= is present, ignore all other parameters other than n= */
2078 char *zUuid;
2079 const char *zCiName;
2080 int np = 0, nd;
2081 const char *zBackTo = 0;
2082 int ridBackTo = 0;
2083
2084 tmFlags |= TIMELINE_XMERGE | TIMELINE_FILLGAPS;
2085 if( p_rid && d_rid ){
2086
+15
--- src/wiki.c
+++ src/wiki.c
@@ -186,10 +186,11 @@
186186
/*
187187
** Render wiki text according to its mimetype.
188188
**
189189
** text/x-fossil-wiki Fossil wiki
190190
** text/x-markdown Markdown
191
+** text/x-pikchr Pikchr
191192
** anything else... Plain text
192193
**
193194
** If zMimetype is a null pointer, then use "text/x-fossil-wiki".
194195
*/
195196
void wiki_render_by_mimetype(Blob *pWiki, const char *zMimetype){
@@ -199,10 +200,24 @@
199200
Blob tail = BLOB_INITIALIZER;
200201
markdown_to_html(pWiki, 0, &tail);
201202
safe_html(&tail);
202203
@ %s(blob_str(&tail))
203204
blob_reset(&tail);
205
+ }else if( fossil_strcmp(zMimetype, "text/x-pikchr")==0 ){
206
+ const char *zPikchr = blob_str(pWiki);
207
+ int w, h;
208
+ char *zOut = pikchr(zPikchr, "pikchr", 0, &w, &h);
209
+ if( w>0 ){
210
+ @ <div class="pikchr-svg" style="max-width:%d(w)px">
211
+ @ %s(zOut)
212
+ @ </div>
213
+ }else{
214
+ @ <pre class='error'>\n">
215
+ @ %s(zOut);
216
+ @ </pre>
217
+ }
218
+ free(zOut);
204219
}else{
205220
@ <pre class='textPlain'>
206221
@ %h(blob_str(pWiki))
207222
@ </pre>
208223
}
209224
210225
ADDED tools/fossil-autocomplete.zsh
--- src/wiki.c
+++ src/wiki.c
@@ -186,10 +186,11 @@
186 /*
187 ** Render wiki text according to its mimetype.
188 **
189 ** text/x-fossil-wiki Fossil wiki
190 ** text/x-markdown Markdown
 
191 ** anything else... Plain text
192 **
193 ** If zMimetype is a null pointer, then use "text/x-fossil-wiki".
194 */
195 void wiki_render_by_mimetype(Blob *pWiki, const char *zMimetype){
@@ -199,10 +200,24 @@
199 Blob tail = BLOB_INITIALIZER;
200 markdown_to_html(pWiki, 0, &tail);
201 safe_html(&tail);
202 @ %s(blob_str(&tail))
203 blob_reset(&tail);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204 }else{
205 @ <pre class='textPlain'>
206 @ %h(blob_str(pWiki))
207 @ </pre>
208 }
209
210 DDED tools/fossil-autocomplete.zsh
--- src/wiki.c
+++ src/wiki.c
@@ -186,10 +186,11 @@
186 /*
187 ** Render wiki text according to its mimetype.
188 **
189 ** text/x-fossil-wiki Fossil wiki
190 ** text/x-markdown Markdown
191 ** text/x-pikchr Pikchr
192 ** anything else... Plain text
193 **
194 ** If zMimetype is a null pointer, then use "text/x-fossil-wiki".
195 */
196 void wiki_render_by_mimetype(Blob *pWiki, const char *zMimetype){
@@ -199,10 +200,24 @@
200 Blob tail = BLOB_INITIALIZER;
201 markdown_to_html(pWiki, 0, &tail);
202 safe_html(&tail);
203 @ %s(blob_str(&tail))
204 blob_reset(&tail);
205 }else if( fossil_strcmp(zMimetype, "text/x-pikchr")==0 ){
206 const char *zPikchr = blob_str(pWiki);
207 int w, h;
208 char *zOut = pikchr(zPikchr, "pikchr", 0, &w, &h);
209 if( w>0 ){
210 @ <div class="pikchr-svg" style="max-width:%d(w)px">
211 @ %s(zOut)
212 @ </div>
213 }else{
214 @ <pre class='error'>\n">
215 @ %s(zOut);
216 @ </pre>
217 }
218 free(zOut);
219 }else{
220 @ <pre class='textPlain'>
221 @ %h(blob_str(pWiki))
222 @ </pre>
223 }
224
225 DDED tools/fossil-autocomplete.zsh
+15
--- src/wiki.c
+++ src/wiki.c
@@ -186,10 +186,11 @@
186186
/*
187187
** Render wiki text according to its mimetype.
188188
**
189189
** text/x-fossil-wiki Fossil wiki
190190
** text/x-markdown Markdown
191
+** text/x-pikchr Pikchr
191192
** anything else... Plain text
192193
**
193194
** If zMimetype is a null pointer, then use "text/x-fossil-wiki".
194195
*/
195196
void wiki_render_by_mimetype(Blob *pWiki, const char *zMimetype){
@@ -199,10 +200,24 @@
199200
Blob tail = BLOB_INITIALIZER;
200201
markdown_to_html(pWiki, 0, &tail);
201202
safe_html(&tail);
202203
@ %s(blob_str(&tail))
203204
blob_reset(&tail);
205
+ }else if( fossil_strcmp(zMimetype, "text/x-pikchr")==0 ){
206
+ const char *zPikchr = blob_str(pWiki);
207
+ int w, h;
208
+ char *zOut = pikchr(zPikchr, "pikchr", 0, &w, &h);
209
+ if( w>0 ){
210
+ @ <div class="pikchr-svg" style="max-width:%d(w)px">
211
+ @ %s(zOut)
212
+ @ </div>
213
+ }else{
214
+ @ <pre class='error'>\n">
215
+ @ %s(zOut);
216
+ @ </pre>
217
+ }
218
+ free(zOut);
204219
}else{
205220
@ <pre class='textPlain'>
206221
@ %h(blob_str(pWiki))
207222
@ </pre>
208223
}
209224
210225
ADDED tools/fossil-autocomplete.zsh
--- src/wiki.c
+++ src/wiki.c
@@ -186,10 +186,11 @@
186 /*
187 ** Render wiki text according to its mimetype.
188 **
189 ** text/x-fossil-wiki Fossil wiki
190 ** text/x-markdown Markdown
 
191 ** anything else... Plain text
192 **
193 ** If zMimetype is a null pointer, then use "text/x-fossil-wiki".
194 */
195 void wiki_render_by_mimetype(Blob *pWiki, const char *zMimetype){
@@ -199,10 +200,24 @@
199 Blob tail = BLOB_INITIALIZER;
200 markdown_to_html(pWiki, 0, &tail);
201 safe_html(&tail);
202 @ %s(blob_str(&tail))
203 blob_reset(&tail);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204 }else{
205 @ <pre class='textPlain'>
206 @ %h(blob_str(pWiki))
207 @ </pre>
208 }
209
210 DDED tools/fossil-autocomplete.zsh
--- src/wiki.c
+++ src/wiki.c
@@ -186,10 +186,11 @@
186 /*
187 ** Render wiki text according to its mimetype.
188 **
189 ** text/x-fossil-wiki Fossil wiki
190 ** text/x-markdown Markdown
191 ** text/x-pikchr Pikchr
192 ** anything else... Plain text
193 **
194 ** If zMimetype is a null pointer, then use "text/x-fossil-wiki".
195 */
196 void wiki_render_by_mimetype(Blob *pWiki, const char *zMimetype){
@@ -199,10 +200,24 @@
200 Blob tail = BLOB_INITIALIZER;
201 markdown_to_html(pWiki, 0, &tail);
202 safe_html(&tail);
203 @ %s(blob_str(&tail))
204 blob_reset(&tail);
205 }else if( fossil_strcmp(zMimetype, "text/x-pikchr")==0 ){
206 const char *zPikchr = blob_str(pWiki);
207 int w, h;
208 char *zOut = pikchr(zPikchr, "pikchr", 0, &w, &h);
209 if( w>0 ){
210 @ <div class="pikchr-svg" style="max-width:%d(w)px">
211 @ %s(zOut)
212 @ </div>
213 }else{
214 @ <pre class='error'>\n">
215 @ %s(zOut);
216 @ </pre>
217 }
218 free(zOut);
219 }else{
220 @ <pre class='textPlain'>
221 @ %h(blob_str(pWiki))
222 @ </pre>
223 }
224
225 DDED tools/fossil-autocomplete.zsh
--- a/tools/fossil-autocomplete.zsh
+++ b/tools/fossil-autocomplete.zsh
@@ -0,0 +1,186 @@
1
+#compdef fossil
2
+# Origin: https://chiselapp.com/user/lifepillar/repository/fossil-zsh-completion
3
+#################################################################################
4
+# #
5
+# Copyright 2020 Lifepillar #
6
+# #
7
+# Permission is hereby granted, free of charge, to any person obtaining a copy #
8
+# of this software and associated documentation files (the "Software"), to deal #
9
+# in the Software without restriction, including without limitation the rights #
10
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
11
+# copies of the Software, and to permit persons to whom the Software is #
12
+# furnished to do so, subject to the following conditions: #
13
+# #
14
+# The above copyright notice and this permission notice shall be included in #
15
+# all copies or substantial portions of the Software. #
16
+# #
17
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
18
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
19
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
20
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
21
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
22
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
23
+# SOFTWARE. #
24
+# #
25
+#################################################################################
26
+
27
+# To reload the completion function after it has been modified:
28
+#
29
+# $ unfunction _fossil
30
+# $ autoload -U _fossil
31
+#
32
+# See also: http://zsh.sourceforge.net/Doc/Release/Completion-System.html
33
+# See also: https://github.com/zsh-users/zsh-completions/blob/master/zsh-completions-howto.org
34
+
35
+################################################################################
36
+# Functions that help build this completion file #
37
+################################################################################
38
+
39
+# This function can be used to generate scaffolding code for the options of all
40
+# the commands. Copy and paste the result at the suitable spot in this script
41
+# to update it. To parse all commands:
42
+#
43
+# __fossil_parse_help -a
44
+#
45
+# To parse all test commands:
46
+#
47
+# __fossil_parse_help -t
48
+#
49
+# NOTE: The code must be adapted manually. Diff with previous version!
50
+function __fossil_parse_help() {
51
+ echo ' case "$words[1]" in'
52
+ for c in `fossil help $1 | xargs -n1 | sort`;
53
+ do
54
+ echo " ($c)"
55
+ echo ' _arguments \\'
56
+ __fossil_format_options $c;
57
+ echo " '(- *)'--help'[Show help and exit]' \\"
58
+ echo " '*:files:_files'"
59
+ echo ''
60
+ echo ' ;;'
61
+ done;
62
+ echo ' esac'
63
+ echo ' ;;'
64
+}
65
+
66
+# Extract the options of a command and format it in a way that can be used in
67
+# a ZSH completion script.
68
+# Use `__fossil_format_options -o` to extract the common options.
69
+function __fossil_format_options() {
70
+ fossil help $1 2>&1 \
71
+ | grep '^\s\{1,3\}-' \
72
+ | sed -E 's/^ +//' \
73
+ | awk -F ' +' '{
74
+ v=match($1,/\|/)
75
+ split($1,y,"|")
76
+ printf " "
77
+ if (v>0)
78
+ printf "\"(--help %s %s)\"{%s,%s}",y[1],y[2],y[1],y[2];
79
+ else
80
+ printf "\"(--help %s)\"%s",y[1],y[1];
81
+ $1=""
82
+ gsub(/^ +| +$/,"",$0);
83
+ gsub(/^ +| +$/,"",$0);
84
+ gsub(/\x27/,"\x27\"\x27\"\x27",$0);
85
+ print "\x27["$0"]\x27 \\";
86
+ }'
87
+}
88
+
89
+
90
+################################################################################
91
+# Helper functions used for completion. #
92
+################################################################################
93
+
94
+function __fossil_commands() {
95
+ fossil help --all
96
+}
97
+
98
+function __fossil_test_commands() {
99
+ fossil help --test
100
+}
101
+
102
+function __fossil_all_commands() {
103
+ __fossil_commands
104
+ __fossil_test_commands
105
+}
106
+
107
+function __fossil_users() {
108
+ fossil user ls 2>/dev/null | awk '{print $1}'
109
+}
110
+
111
+function __fossil_branches() {
112
+ fossil branch ls -a 2>/dev/null | sed 's/\* *//'
113
+}
114
+
115
+function __fossil_tags() {
116
+ fossil tag ls 2>/dev/null
117
+}
118
+
119
+function __fossil_repos() {
120
+ ls | grep .fossil
121
+ fossil all ls 2>/dev/null
122
+}
123
+
124
+function __fossil_remotes() {
125
+ fossil remote list 2>/dev/null | awk '{print $1}'
126
+}
127
+
128
+function __fossil_wiki_pages() {
129
+ fossil wiki list 2>/dev/null
130
+}
131
+
132
+function __fossil_areas() {
133
+ compadd all email project shun skin ticket user alias subscriber
134
+ return 0
135
+}
136
+
137
+function __fossil_settings() {
138
+ fossil help --setting
139
+}
140
+
141
+function __fossil_urls() {
142
+ local u
143
+ u=($(__fossil_remotes))
144
+ compadd -a u
145
+ compadd -S '' file:// http:// https:// ssh://
146
+ return 0
147
+}
148
+
149
+################################################################################
150
+# Main #
151
+################################################################################
152
+
153
+function _fossil() {
154
+ local context state state_descr line
155
+ typeset -A opt_args
156
+
157
+ local -a _common_options
158
+ # Scaffolding code for common options can be generated with `__fossil_format_options -o`.
159
+ _common_options=(
160
+ "(--help --args)"--args'[FILENAME Read additional arguments and options from FILENAME]:file:_files'
161
+ "(--help --cgitrace)"--cgitrace'[Active CGI tracing]'
162
+ "(--help --comfmtflags --comment-format)"--comfmtflags'[VALUE Set comment formatting flags to VALUE]:value:'
163
+ "(--help --comment-format --comfmtflags)"--comment-format'[VALUE Alias for --comfmtflags]:value:'
164
+ "(--help --errorlog)"--errorlog'[FILENAME Log errors to FILENAME]:file:_files'
165
+ "(- --help ;;
166
+ (zip)
167
+ on the command rather than running it]'
168
+ "(--help --httptrace)"--httptrace'[Trace outbound HTTP requests]'
169
+ "(--help --localtime)"--localtime'[Display times using the local timezone]'
170
+ "(--help --no-th-hook)"--no-th-hook'[Do not run TH1 hooks]'
171
+ "(--help --quiet)"--quiet'[Reduce the amount of output]'
172
+ "(--help --sqlstats)"--sqlstats'[Show SQL usage statistics when done]'
173
+ "(--help --sqltrace)"--sqltrace'[Trace all SQL commands]'
174
+ "(--help --sshtrace)"--sshtrace'[Trace SSH activity]'
175
+ "(--help --ssl-identity)"--ssl-identity'[NAME Set the SSL identity to NAME]:name:'
176
+ "(--help --systemtrace)"--systemtrace'[Trace calls to system()]'
177
+ "(--help --user -U)"{--user,-U}'[USER Make the default user be USER]:user:($(__fossil_users))'
178
+ "(--help --utc)"--utc'[Display times using UTC]'
179
+ "(--help --vfs)"--vfs'[NAME Cause SQLite to use the NAME VFS]:name:'
180
+ )
181
+
182
+ local -a _fossil_clean_options
183
+ _fossil_clean_options=(
184
+ "(--help --allckouts)"--allckouts'[Check for empty directories within any checkouts]'
185
+ "(--help --case-sensitive)"--case-sensitive'[BOOL Override case-sensitive setting]:bool:(yes no)'
186
+ "(--help --dirsonl
--- a/tools/fossil-autocomplete.zsh
+++ b/tools/fossil-autocomplete.zsh
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/tools/fossil-autocomplete.zsh
+++ b/tools/fossil-autocomplete.zsh
@@ -0,0 +1,186 @@
1 #compdef fossil
2 # Origin: https://chiselapp.com/user/lifepillar/repository/fossil-zsh-completion
3 #################################################################################
4 # #
5 # Copyright 2020 Lifepillar #
6 # #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy #
8 # of this software and associated documentation files (the "Software"), to deal #
9 # in the Software without restriction, including without limitation the rights #
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
11 # copies of the Software, and to permit persons to whom the Software is #
12 # furnished to do so, subject to the following conditions: #
13 # #
14 # The above copyright notice and this permission notice shall be included in #
15 # all copies or substantial portions of the Software. #
16 # #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
23 # SOFTWARE. #
24 # #
25 #################################################################################
26
27 # To reload the completion function after it has been modified:
28 #
29 # $ unfunction _fossil
30 # $ autoload -U _fossil
31 #
32 # See also: http://zsh.sourceforge.net/Doc/Release/Completion-System.html
33 # See also: https://github.com/zsh-users/zsh-completions/blob/master/zsh-completions-howto.org
34
35 ################################################################################
36 # Functions that help build this completion file #
37 ################################################################################
38
39 # This function can be used to generate scaffolding code for the options of all
40 # the commands. Copy and paste the result at the suitable spot in this script
41 # to update it. To parse all commands:
42 #
43 # __fossil_parse_help -a
44 #
45 # To parse all test commands:
46 #
47 # __fossil_parse_help -t
48 #
49 # NOTE: The code must be adapted manually. Diff with previous version!
50 function __fossil_parse_help() {
51 echo ' case "$words[1]" in'
52 for c in `fossil help $1 | xargs -n1 | sort`;
53 do
54 echo " ($c)"
55 echo ' _arguments \\'
56 __fossil_format_options $c;
57 echo " '(- *)'--help'[Show help and exit]' \\"
58 echo " '*:files:_files'"
59 echo ''
60 echo ' ;;'
61 done;
62 echo ' esac'
63 echo ' ;;'
64 }
65
66 # Extract the options of a command and format it in a way that can be used in
67 # a ZSH completion script.
68 # Use `__fossil_format_options -o` to extract the common options.
69 function __fossil_format_options() {
70 fossil help $1 2>&1 \
71 | grep '^\s\{1,3\}-' \
72 | sed -E 's/^ +//' \
73 | awk -F ' +' '{
74 v=match($1,/\|/)
75 split($1,y,"|")
76 printf " "
77 if (v>0)
78 printf "\"(--help %s %s)\"{%s,%s}",y[1],y[2],y[1],y[2];
79 else
80 printf "\"(--help %s)\"%s",y[1],y[1];
81 $1=""
82 gsub(/^ +| +$/,"",$0);
83 gsub(/^ +| +$/,"",$0);
84 gsub(/\x27/,"\x27\"\x27\"\x27",$0);
85 print "\x27["$0"]\x27 \\";
86 }'
87 }
88
89
90 ################################################################################
91 # Helper functions used for completion. #
92 ################################################################################
93
94 function __fossil_commands() {
95 fossil help --all
96 }
97
98 function __fossil_test_commands() {
99 fossil help --test
100 }
101
102 function __fossil_all_commands() {
103 __fossil_commands
104 __fossil_test_commands
105 }
106
107 function __fossil_users() {
108 fossil user ls 2>/dev/null | awk '{print $1}'
109 }
110
111 function __fossil_branches() {
112 fossil branch ls -a 2>/dev/null | sed 's/\* *//'
113 }
114
115 function __fossil_tags() {
116 fossil tag ls 2>/dev/null
117 }
118
119 function __fossil_repos() {
120 ls | grep .fossil
121 fossil all ls 2>/dev/null
122 }
123
124 function __fossil_remotes() {
125 fossil remote list 2>/dev/null | awk '{print $1}'
126 }
127
128 function __fossil_wiki_pages() {
129 fossil wiki list 2>/dev/null
130 }
131
132 function __fossil_areas() {
133 compadd all email project shun skin ticket user alias subscriber
134 return 0
135 }
136
137 function __fossil_settings() {
138 fossil help --setting
139 }
140
141 function __fossil_urls() {
142 local u
143 u=($(__fossil_remotes))
144 compadd -a u
145 compadd -S '' file:// http:// https:// ssh://
146 return 0
147 }
148
149 ################################################################################
150 # Main #
151 ################################################################################
152
153 function _fossil() {
154 local context state state_descr line
155 typeset -A opt_args
156
157 local -a _common_options
158 # Scaffolding code for common options can be generated with `__fossil_format_options -o`.
159 _common_options=(
160 "(--help --args)"--args'[FILENAME Read additional arguments and options from FILENAME]:file:_files'
161 "(--help --cgitrace)"--cgitrace'[Active CGI tracing]'
162 "(--help --comfmtflags --comment-format)"--comfmtflags'[VALUE Set comment formatting flags to VALUE]:value:'
163 "(--help --comment-format --comfmtflags)"--comment-format'[VALUE Alias for --comfmtflags]:value:'
164 "(--help --errorlog)"--errorlog'[FILENAME Log errors to FILENAME]:file:_files'
165 "(- --help ;;
166 (zip)
167 on the command rather than running it]'
168 "(--help --httptrace)"--httptrace'[Trace outbound HTTP requests]'
169 "(--help --localtime)"--localtime'[Display times using the local timezone]'
170 "(--help --no-th-hook)"--no-th-hook'[Do not run TH1 hooks]'
171 "(--help --quiet)"--quiet'[Reduce the amount of output]'
172 "(--help --sqlstats)"--sqlstats'[Show SQL usage statistics when done]'
173 "(--help --sqltrace)"--sqltrace'[Trace all SQL commands]'
174 "(--help --sshtrace)"--sshtrace'[Trace SSH activity]'
175 "(--help --ssl-identity)"--ssl-identity'[NAME Set the SSL identity to NAME]:name:'
176 "(--help --systemtrace)"--systemtrace'[Trace calls to system()]'
177 "(--help --user -U)"{--user,-U}'[USER Make the default user be USER]:user:($(__fossil_users))'
178 "(--help --utc)"--utc'[Display times using UTC]'
179 "(--help --vfs)"--vfs'[NAME Cause SQLite to use the NAME VFS]:name:'
180 )
181
182 local -a _fossil_clean_options
183 _fossil_clean_options=(
184 "(--help --allckouts)"--allckouts'[Check for empty directories within any checkouts]'
185 "(--help --case-sensitive)"--case-sensitive'[BOOL Override case-sensitive setting]:bool:(yes no)'
186 "(--help --dirsonl
--- win/Makefile.mingw.mistachkin
+++ win/Makefile.mingw.mistachkin
@@ -485,10 +485,11 @@
485485
$(SRCDIR)/http_socket.c \
486486
$(SRCDIR)/http_ssl.c \
487487
$(SRCDIR)/http_transport.c \
488488
$(SRCDIR)/import.c \
489489
$(SRCDIR)/info.c \
490
+ $(SRCDIR)/interwiki.c \
490491
$(SRCDIR)/json.c \
491492
$(SRCDIR)/json_artifact.c \
492493
$(SRCDIR)/json_branch.c \
493494
$(SRCDIR)/json_config.c \
494495
$(SRCDIR)/json_diff.c \
@@ -515,10 +516,12 @@
515516
$(SRCDIR)/merge3.c \
516517
$(SRCDIR)/moderate.c \
517518
$(SRCDIR)/name.c \
518519
$(SRCDIR)/path.c \
519520
$(SRCDIR)/piechart.c \
521
+ $(SRCDIR)/pikchr.c \
522
+ $(SRCDIR)/pikchrshow.c \
520523
$(SRCDIR)/pivot.c \
521524
$(SRCDIR)/popen.c \
522525
$(SRCDIR)/pqueue.c \
523526
$(SRCDIR)/printf.c \
524527
$(SRCDIR)/publish.c \
@@ -641,14 +644,17 @@
641644
$(SRCDIR)/fossil.dom.js \
642645
$(SRCDIR)/fossil.fetch.js \
643646
$(SRCDIR)/fossil.numbered-lines.js \
644647
$(SRCDIR)/fossil.page.fileedit.js \
645648
$(SRCDIR)/fossil.page.forumpost.js \
649
+ $(SRCDIR)/fossil.page.pikchrshow.js \
646650
$(SRCDIR)/fossil.page.wikiedit.js \
651
+ $(SRCDIR)/fossil.pikchr.js \
647652
$(SRCDIR)/fossil.popupwidget.js \
648653
$(SRCDIR)/fossil.storage.js \
649654
$(SRCDIR)/fossil.tabs.js \
655
+ $(SRCDIR)/fossil.wikiedit-wysiwyg.js \
650656
$(SRCDIR)/graph.js \
651657
$(SRCDIR)/href.js \
652658
$(SRCDIR)/login.js \
653659
$(SRCDIR)/markdown.md \
654660
$(SRCDIR)/menu.js \
@@ -737,10 +743,11 @@
737743
$(OBJDIR)/http_socket_.c \
738744
$(OBJDIR)/http_ssl_.c \
739745
$(OBJDIR)/http_transport_.c \
740746
$(OBJDIR)/import_.c \
741747
$(OBJDIR)/info_.c \
748
+ $(OBJDIR)/interwiki_.c \
742749
$(OBJDIR)/json_.c \
743750
$(OBJDIR)/json_artifact_.c \
744751
$(OBJDIR)/json_branch_.c \
745752
$(OBJDIR)/json_config_.c \
746753
$(OBJDIR)/json_diff_.c \
@@ -767,10 +774,12 @@
767774
$(OBJDIR)/merge3_.c \
768775
$(OBJDIR)/moderate_.c \
769776
$(OBJDIR)/name_.c \
770777
$(OBJDIR)/path_.c \
771778
$(OBJDIR)/piechart_.c \
779
+ $(OBJDIR)/pikchr_.c \
780
+ $(OBJDIR)/pikchrshow_.c \
772781
$(OBJDIR)/pivot_.c \
773782
$(OBJDIR)/popen_.c \
774783
$(OBJDIR)/pqueue_.c \
775784
$(OBJDIR)/printf_.c \
776785
$(OBJDIR)/publish_.c \
@@ -882,10 +891,11 @@
882891
$(OBJDIR)/http_socket.o \
883892
$(OBJDIR)/http_ssl.o \
884893
$(OBJDIR)/http_transport.o \
885894
$(OBJDIR)/import.o \
886895
$(OBJDIR)/info.o \
896
+ $(OBJDIR)/interwiki.o \
887897
$(OBJDIR)/json.o \
888898
$(OBJDIR)/json_artifact.o \
889899
$(OBJDIR)/json_branch.o \
890900
$(OBJDIR)/json_config.o \
891901
$(OBJDIR)/json_diff.o \
@@ -912,10 +922,12 @@
912922
$(OBJDIR)/merge3.o \
913923
$(OBJDIR)/moderate.o \
914924
$(OBJDIR)/name.o \
915925
$(OBJDIR)/path.o \
916926
$(OBJDIR)/piechart.o \
927
+ $(OBJDIR)/pikchr.o \
928
+ $(OBJDIR)/pikchrshow.o \
917929
$(OBJDIR)/pivot.o \
918930
$(OBJDIR)/popen.o \
919931
$(OBJDIR)/pqueue.o \
920932
$(OBJDIR)/printf.o \
921933
$(OBJDIR)/publish.o \
@@ -1242,10 +1254,11 @@
12421254
$(OBJDIR)/http_socket_.c:$(OBJDIR)/http_socket.h \
12431255
$(OBJDIR)/http_ssl_.c:$(OBJDIR)/http_ssl.h \
12441256
$(OBJDIR)/http_transport_.c:$(OBJDIR)/http_transport.h \
12451257
$(OBJDIR)/import_.c:$(OBJDIR)/import.h \
12461258
$(OBJDIR)/info_.c:$(OBJDIR)/info.h \
1259
+ $(OBJDIR)/interwiki_.c:$(OBJDIR)/interwiki.h \
12471260
$(OBJDIR)/json_.c:$(OBJDIR)/json.h \
12481261
$(OBJDIR)/json_artifact_.c:$(OBJDIR)/json_artifact.h \
12491262
$(OBJDIR)/json_branch_.c:$(OBJDIR)/json_branch.h \
12501263
$(OBJDIR)/json_config_.c:$(OBJDIR)/json_config.h \
12511264
$(OBJDIR)/json_diff_.c:$(OBJDIR)/json_diff.h \
@@ -1272,10 +1285,12 @@
12721285
$(OBJDIR)/merge3_.c:$(OBJDIR)/merge3.h \
12731286
$(OBJDIR)/moderate_.c:$(OBJDIR)/moderate.h \
12741287
$(OBJDIR)/name_.c:$(OBJDIR)/name.h \
12751288
$(OBJDIR)/path_.c:$(OBJDIR)/path.h \
12761289
$(OBJDIR)/piechart_.c:$(OBJDIR)/piechart.h \
1290
+ $(OBJDIR)/pikchr_.c:$(OBJDIR)/pikchr.h \
1291
+ $(OBJDIR)/pikchrshow_.c:$(OBJDIR)/pikchrshow.h \
12771292
$(OBJDIR)/pivot_.c:$(OBJDIR)/pivot.h \
12781293
$(OBJDIR)/popen_.c:$(OBJDIR)/popen.h \
12791294
$(OBJDIR)/pqueue_.c:$(OBJDIR)/pqueue.h \
12801295
$(OBJDIR)/printf_.c:$(OBJDIR)/printf.h \
12811296
$(OBJDIR)/publish_.c:$(OBJDIR)/publish.h \
@@ -1806,10 +1821,18 @@
18061821
18071822
$(OBJDIR)/info.o: $(OBJDIR)/info_.c $(OBJDIR)/info.h $(SRCDIR)/config.h
18081823
$(XTCC) -o $(OBJDIR)/info.o -c $(OBJDIR)/info_.c
18091824
18101825
$(OBJDIR)/info.h: $(OBJDIR)/headers
1826
+
1827
+$(OBJDIR)/interwiki_.c: $(SRCDIR)/interwiki.c $(TRANSLATE)
1828
+ $(TRANSLATE) $(SRCDIR)/interwiki.c >$@
1829
+
1830
+$(OBJDIR)/interwiki.o: $(OBJDIR)/interwiki_.c $(OBJDIR)/interwiki.h $(SRCDIR)/config.h
1831
+ $(XTCC) -o $(OBJDIR)/interwiki.o -c $(OBJDIR)/interwiki_.c
1832
+
1833
+$(OBJDIR)/interwiki.h: $(OBJDIR)/headers
18111834
18121835
$(OBJDIR)/json_.c: $(SRCDIR)/json.c $(TRANSLATE)
18131836
$(TRANSLATE) $(SRCDIR)/json.c >$@
18141837
18151838
$(OBJDIR)/json.o: $(OBJDIR)/json_.c $(OBJDIR)/json.h $(SRCDIR)/config.h
@@ -2046,10 +2069,26 @@
20462069
20472070
$(OBJDIR)/piechart.o: $(OBJDIR)/piechart_.c $(OBJDIR)/piechart.h $(SRCDIR)/config.h
20482071
$(XTCC) -o $(OBJDIR)/piechart.o -c $(OBJDIR)/piechart_.c
20492072
20502073
$(OBJDIR)/piechart.h: $(OBJDIR)/headers
2074
+
2075
+$(OBJDIR)/pikchr_.c: $(SRCDIR)/pikchr.c $(TRANSLATE)
2076
+ $(TRANSLATE) $(SRCDIR)/pikchr.c >$@
2077
+
2078
+$(OBJDIR)/pikchr.o: $(OBJDIR)/pikchr_.c $(OBJDIR)/pikchr.h $(SRCDIR)/config.h
2079
+ $(XTCC) -o $(OBJDIR)/pikchr.o -c $(OBJDIR)/pikchr_.c
2080
+
2081
+$(OBJDIR)/pikchr.h: $(OBJDIR)/headers
2082
+
2083
+$(OBJDIR)/pikchrshow_.c: $(SRCDIR)/pikchrshow.c $(TRANSLATE)
2084
+ $(TRANSLATE) $(SRCDIR)/pikchrshow.c >$@
2085
+
2086
+$(OBJDIR)/pikchrshow.o: $(OBJDIR)/pikchrshow_.c $(OBJDIR)/pikchrshow.h $(SRCDIR)/config.h
2087
+ $(XTCC) -o $(OBJDIR)/pikchrshow.o -c $(OBJDIR)/pikchrshow_.c
2088
+
2089
+$(OBJDIR)/pikchrshow.h: $(OBJDIR)/headers
20512090
20522091
$(OBJDIR)/pivot_.c: $(SRCDIR)/pivot.c $(TRANSLATE)
20532092
$(TRANSLATE) $(SRCDIR)/pivot.c >$@
20542093
20552094
$(OBJDIR)/pivot.o: $(OBJDIR)/pivot_.c $(OBJDIR)/pivot.h $(SRCDIR)/config.h
20562095
--- win/Makefile.mingw.mistachkin
+++ win/Makefile.mingw.mistachkin
@@ -485,10 +485,11 @@
485 $(SRCDIR)/http_socket.c \
486 $(SRCDIR)/http_ssl.c \
487 $(SRCDIR)/http_transport.c \
488 $(SRCDIR)/import.c \
489 $(SRCDIR)/info.c \
 
490 $(SRCDIR)/json.c \
491 $(SRCDIR)/json_artifact.c \
492 $(SRCDIR)/json_branch.c \
493 $(SRCDIR)/json_config.c \
494 $(SRCDIR)/json_diff.c \
@@ -515,10 +516,12 @@
515 $(SRCDIR)/merge3.c \
516 $(SRCDIR)/moderate.c \
517 $(SRCDIR)/name.c \
518 $(SRCDIR)/path.c \
519 $(SRCDIR)/piechart.c \
 
 
520 $(SRCDIR)/pivot.c \
521 $(SRCDIR)/popen.c \
522 $(SRCDIR)/pqueue.c \
523 $(SRCDIR)/printf.c \
524 $(SRCDIR)/publish.c \
@@ -641,14 +644,17 @@
641 $(SRCDIR)/fossil.dom.js \
642 $(SRCDIR)/fossil.fetch.js \
643 $(SRCDIR)/fossil.numbered-lines.js \
644 $(SRCDIR)/fossil.page.fileedit.js \
645 $(SRCDIR)/fossil.page.forumpost.js \
 
646 $(SRCDIR)/fossil.page.wikiedit.js \
 
647 $(SRCDIR)/fossil.popupwidget.js \
648 $(SRCDIR)/fossil.storage.js \
649 $(SRCDIR)/fossil.tabs.js \
 
650 $(SRCDIR)/graph.js \
651 $(SRCDIR)/href.js \
652 $(SRCDIR)/login.js \
653 $(SRCDIR)/markdown.md \
654 $(SRCDIR)/menu.js \
@@ -737,10 +743,11 @@
737 $(OBJDIR)/http_socket_.c \
738 $(OBJDIR)/http_ssl_.c \
739 $(OBJDIR)/http_transport_.c \
740 $(OBJDIR)/import_.c \
741 $(OBJDIR)/info_.c \
 
742 $(OBJDIR)/json_.c \
743 $(OBJDIR)/json_artifact_.c \
744 $(OBJDIR)/json_branch_.c \
745 $(OBJDIR)/json_config_.c \
746 $(OBJDIR)/json_diff_.c \
@@ -767,10 +774,12 @@
767 $(OBJDIR)/merge3_.c \
768 $(OBJDIR)/moderate_.c \
769 $(OBJDIR)/name_.c \
770 $(OBJDIR)/path_.c \
771 $(OBJDIR)/piechart_.c \
 
 
772 $(OBJDIR)/pivot_.c \
773 $(OBJDIR)/popen_.c \
774 $(OBJDIR)/pqueue_.c \
775 $(OBJDIR)/printf_.c \
776 $(OBJDIR)/publish_.c \
@@ -882,10 +891,11 @@
882 $(OBJDIR)/http_socket.o \
883 $(OBJDIR)/http_ssl.o \
884 $(OBJDIR)/http_transport.o \
885 $(OBJDIR)/import.o \
886 $(OBJDIR)/info.o \
 
887 $(OBJDIR)/json.o \
888 $(OBJDIR)/json_artifact.o \
889 $(OBJDIR)/json_branch.o \
890 $(OBJDIR)/json_config.o \
891 $(OBJDIR)/json_diff.o \
@@ -912,10 +922,12 @@
912 $(OBJDIR)/merge3.o \
913 $(OBJDIR)/moderate.o \
914 $(OBJDIR)/name.o \
915 $(OBJDIR)/path.o \
916 $(OBJDIR)/piechart.o \
 
 
917 $(OBJDIR)/pivot.o \
918 $(OBJDIR)/popen.o \
919 $(OBJDIR)/pqueue.o \
920 $(OBJDIR)/printf.o \
921 $(OBJDIR)/publish.o \
@@ -1242,10 +1254,11 @@
1242 $(OBJDIR)/http_socket_.c:$(OBJDIR)/http_socket.h \
1243 $(OBJDIR)/http_ssl_.c:$(OBJDIR)/http_ssl.h \
1244 $(OBJDIR)/http_transport_.c:$(OBJDIR)/http_transport.h \
1245 $(OBJDIR)/import_.c:$(OBJDIR)/import.h \
1246 $(OBJDIR)/info_.c:$(OBJDIR)/info.h \
 
1247 $(OBJDIR)/json_.c:$(OBJDIR)/json.h \
1248 $(OBJDIR)/json_artifact_.c:$(OBJDIR)/json_artifact.h \
1249 $(OBJDIR)/json_branch_.c:$(OBJDIR)/json_branch.h \
1250 $(OBJDIR)/json_config_.c:$(OBJDIR)/json_config.h \
1251 $(OBJDIR)/json_diff_.c:$(OBJDIR)/json_diff.h \
@@ -1272,10 +1285,12 @@
1272 $(OBJDIR)/merge3_.c:$(OBJDIR)/merge3.h \
1273 $(OBJDIR)/moderate_.c:$(OBJDIR)/moderate.h \
1274 $(OBJDIR)/name_.c:$(OBJDIR)/name.h \
1275 $(OBJDIR)/path_.c:$(OBJDIR)/path.h \
1276 $(OBJDIR)/piechart_.c:$(OBJDIR)/piechart.h \
 
 
1277 $(OBJDIR)/pivot_.c:$(OBJDIR)/pivot.h \
1278 $(OBJDIR)/popen_.c:$(OBJDIR)/popen.h \
1279 $(OBJDIR)/pqueue_.c:$(OBJDIR)/pqueue.h \
1280 $(OBJDIR)/printf_.c:$(OBJDIR)/printf.h \
1281 $(OBJDIR)/publish_.c:$(OBJDIR)/publish.h \
@@ -1806,10 +1821,18 @@
1806
1807 $(OBJDIR)/info.o: $(OBJDIR)/info_.c $(OBJDIR)/info.h $(SRCDIR)/config.h
1808 $(XTCC) -o $(OBJDIR)/info.o -c $(OBJDIR)/info_.c
1809
1810 $(OBJDIR)/info.h: $(OBJDIR)/headers
 
 
 
 
 
 
 
 
1811
1812 $(OBJDIR)/json_.c: $(SRCDIR)/json.c $(TRANSLATE)
1813 $(TRANSLATE) $(SRCDIR)/json.c >$@
1814
1815 $(OBJDIR)/json.o: $(OBJDIR)/json_.c $(OBJDIR)/json.h $(SRCDIR)/config.h
@@ -2046,10 +2069,26 @@
2046
2047 $(OBJDIR)/piechart.o: $(OBJDIR)/piechart_.c $(OBJDIR)/piechart.h $(SRCDIR)/config.h
2048 $(XTCC) -o $(OBJDIR)/piechart.o -c $(OBJDIR)/piechart_.c
2049
2050 $(OBJDIR)/piechart.h: $(OBJDIR)/headers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2051
2052 $(OBJDIR)/pivot_.c: $(SRCDIR)/pivot.c $(TRANSLATE)
2053 $(TRANSLATE) $(SRCDIR)/pivot.c >$@
2054
2055 $(OBJDIR)/pivot.o: $(OBJDIR)/pivot_.c $(OBJDIR)/pivot.h $(SRCDIR)/config.h
2056
--- win/Makefile.mingw.mistachkin
+++ win/Makefile.mingw.mistachkin
@@ -485,10 +485,11 @@
485 $(SRCDIR)/http_socket.c \
486 $(SRCDIR)/http_ssl.c \
487 $(SRCDIR)/http_transport.c \
488 $(SRCDIR)/import.c \
489 $(SRCDIR)/info.c \
490 $(SRCDIR)/interwiki.c \
491 $(SRCDIR)/json.c \
492 $(SRCDIR)/json_artifact.c \
493 $(SRCDIR)/json_branch.c \
494 $(SRCDIR)/json_config.c \
495 $(SRCDIR)/json_diff.c \
@@ -515,10 +516,12 @@
516 $(SRCDIR)/merge3.c \
517 $(SRCDIR)/moderate.c \
518 $(SRCDIR)/name.c \
519 $(SRCDIR)/path.c \
520 $(SRCDIR)/piechart.c \
521 $(SRCDIR)/pikchr.c \
522 $(SRCDIR)/pikchrshow.c \
523 $(SRCDIR)/pivot.c \
524 $(SRCDIR)/popen.c \
525 $(SRCDIR)/pqueue.c \
526 $(SRCDIR)/printf.c \
527 $(SRCDIR)/publish.c \
@@ -641,14 +644,17 @@
644 $(SRCDIR)/fossil.dom.js \
645 $(SRCDIR)/fossil.fetch.js \
646 $(SRCDIR)/fossil.numbered-lines.js \
647 $(SRCDIR)/fossil.page.fileedit.js \
648 $(SRCDIR)/fossil.page.forumpost.js \
649 $(SRCDIR)/fossil.page.pikchrshow.js \
650 $(SRCDIR)/fossil.page.wikiedit.js \
651 $(SRCDIR)/fossil.pikchr.js \
652 $(SRCDIR)/fossil.popupwidget.js \
653 $(SRCDIR)/fossil.storage.js \
654 $(SRCDIR)/fossil.tabs.js \
655 $(SRCDIR)/fossil.wikiedit-wysiwyg.js \
656 $(SRCDIR)/graph.js \
657 $(SRCDIR)/href.js \
658 $(SRCDIR)/login.js \
659 $(SRCDIR)/markdown.md \
660 $(SRCDIR)/menu.js \
@@ -737,10 +743,11 @@
743 $(OBJDIR)/http_socket_.c \
744 $(OBJDIR)/http_ssl_.c \
745 $(OBJDIR)/http_transport_.c \
746 $(OBJDIR)/import_.c \
747 $(OBJDIR)/info_.c \
748 $(OBJDIR)/interwiki_.c \
749 $(OBJDIR)/json_.c \
750 $(OBJDIR)/json_artifact_.c \
751 $(OBJDIR)/json_branch_.c \
752 $(OBJDIR)/json_config_.c \
753 $(OBJDIR)/json_diff_.c \
@@ -767,10 +774,12 @@
774 $(OBJDIR)/merge3_.c \
775 $(OBJDIR)/moderate_.c \
776 $(OBJDIR)/name_.c \
777 $(OBJDIR)/path_.c \
778 $(OBJDIR)/piechart_.c \
779 $(OBJDIR)/pikchr_.c \
780 $(OBJDIR)/pikchrshow_.c \
781 $(OBJDIR)/pivot_.c \
782 $(OBJDIR)/popen_.c \
783 $(OBJDIR)/pqueue_.c \
784 $(OBJDIR)/printf_.c \
785 $(OBJDIR)/publish_.c \
@@ -882,10 +891,11 @@
891 $(OBJDIR)/http_socket.o \
892 $(OBJDIR)/http_ssl.o \
893 $(OBJDIR)/http_transport.o \
894 $(OBJDIR)/import.o \
895 $(OBJDIR)/info.o \
896 $(OBJDIR)/interwiki.o \
897 $(OBJDIR)/json.o \
898 $(OBJDIR)/json_artifact.o \
899 $(OBJDIR)/json_branch.o \
900 $(OBJDIR)/json_config.o \
901 $(OBJDIR)/json_diff.o \
@@ -912,10 +922,12 @@
922 $(OBJDIR)/merge3.o \
923 $(OBJDIR)/moderate.o \
924 $(OBJDIR)/name.o \
925 $(OBJDIR)/path.o \
926 $(OBJDIR)/piechart.o \
927 $(OBJDIR)/pikchr.o \
928 $(OBJDIR)/pikchrshow.o \
929 $(OBJDIR)/pivot.o \
930 $(OBJDIR)/popen.o \
931 $(OBJDIR)/pqueue.o \
932 $(OBJDIR)/printf.o \
933 $(OBJDIR)/publish.o \
@@ -1242,10 +1254,11 @@
1254 $(OBJDIR)/http_socket_.c:$(OBJDIR)/http_socket.h \
1255 $(OBJDIR)/http_ssl_.c:$(OBJDIR)/http_ssl.h \
1256 $(OBJDIR)/http_transport_.c:$(OBJDIR)/http_transport.h \
1257 $(OBJDIR)/import_.c:$(OBJDIR)/import.h \
1258 $(OBJDIR)/info_.c:$(OBJDIR)/info.h \
1259 $(OBJDIR)/interwiki_.c:$(OBJDIR)/interwiki.h \
1260 $(OBJDIR)/json_.c:$(OBJDIR)/json.h \
1261 $(OBJDIR)/json_artifact_.c:$(OBJDIR)/json_artifact.h \
1262 $(OBJDIR)/json_branch_.c:$(OBJDIR)/json_branch.h \
1263 $(OBJDIR)/json_config_.c:$(OBJDIR)/json_config.h \
1264 $(OBJDIR)/json_diff_.c:$(OBJDIR)/json_diff.h \
@@ -1272,10 +1285,12 @@
1285 $(OBJDIR)/merge3_.c:$(OBJDIR)/merge3.h \
1286 $(OBJDIR)/moderate_.c:$(OBJDIR)/moderate.h \
1287 $(OBJDIR)/name_.c:$(OBJDIR)/name.h \
1288 $(OBJDIR)/path_.c:$(OBJDIR)/path.h \
1289 $(OBJDIR)/piechart_.c:$(OBJDIR)/piechart.h \
1290 $(OBJDIR)/pikchr_.c:$(OBJDIR)/pikchr.h \
1291 $(OBJDIR)/pikchrshow_.c:$(OBJDIR)/pikchrshow.h \
1292 $(OBJDIR)/pivot_.c:$(OBJDIR)/pivot.h \
1293 $(OBJDIR)/popen_.c:$(OBJDIR)/popen.h \
1294 $(OBJDIR)/pqueue_.c:$(OBJDIR)/pqueue.h \
1295 $(OBJDIR)/printf_.c:$(OBJDIR)/printf.h \
1296 $(OBJDIR)/publish_.c:$(OBJDIR)/publish.h \
@@ -1806,10 +1821,18 @@
1821
1822 $(OBJDIR)/info.o: $(OBJDIR)/info_.c $(OBJDIR)/info.h $(SRCDIR)/config.h
1823 $(XTCC) -o $(OBJDIR)/info.o -c $(OBJDIR)/info_.c
1824
1825 $(OBJDIR)/info.h: $(OBJDIR)/headers
1826
1827 $(OBJDIR)/interwiki_.c: $(SRCDIR)/interwiki.c $(TRANSLATE)
1828 $(TRANSLATE) $(SRCDIR)/interwiki.c >$@
1829
1830 $(OBJDIR)/interwiki.o: $(OBJDIR)/interwiki_.c $(OBJDIR)/interwiki.h $(SRCDIR)/config.h
1831 $(XTCC) -o $(OBJDIR)/interwiki.o -c $(OBJDIR)/interwiki_.c
1832
1833 $(OBJDIR)/interwiki.h: $(OBJDIR)/headers
1834
1835 $(OBJDIR)/json_.c: $(SRCDIR)/json.c $(TRANSLATE)
1836 $(TRANSLATE) $(SRCDIR)/json.c >$@
1837
1838 $(OBJDIR)/json.o: $(OBJDIR)/json_.c $(OBJDIR)/json.h $(SRCDIR)/config.h
@@ -2046,10 +2069,26 @@
2069
2070 $(OBJDIR)/piechart.o: $(OBJDIR)/piechart_.c $(OBJDIR)/piechart.h $(SRCDIR)/config.h
2071 $(XTCC) -o $(OBJDIR)/piechart.o -c $(OBJDIR)/piechart_.c
2072
2073 $(OBJDIR)/piechart.h: $(OBJDIR)/headers
2074
2075 $(OBJDIR)/pikchr_.c: $(SRCDIR)/pikchr.c $(TRANSLATE)
2076 $(TRANSLATE) $(SRCDIR)/pikchr.c >$@
2077
2078 $(OBJDIR)/pikchr.o: $(OBJDIR)/pikchr_.c $(OBJDIR)/pikchr.h $(SRCDIR)/config.h
2079 $(XTCC) -o $(OBJDIR)/pikchr.o -c $(OBJDIR)/pikchr_.c
2080
2081 $(OBJDIR)/pikchr.h: $(OBJDIR)/headers
2082
2083 $(OBJDIR)/pikchrshow_.c: $(SRCDIR)/pikchrshow.c $(TRANSLATE)
2084 $(TRANSLATE) $(SRCDIR)/pikchrshow.c >$@
2085
2086 $(OBJDIR)/pikchrshow.o: $(OBJDIR)/pikchrshow_.c $(OBJDIR)/pikchrshow.h $(SRCDIR)/config.h
2087 $(XTCC) -o $(OBJDIR)/pikchrshow.o -c $(OBJDIR)/pikchrshow_.c
2088
2089 $(OBJDIR)/pikchrshow.h: $(OBJDIR)/headers
2090
2091 $(OBJDIR)/pivot_.c: $(SRCDIR)/pivot.c $(TRANSLATE)
2092 $(TRANSLATE) $(SRCDIR)/pivot.c >$@
2093
2094 $(OBJDIR)/pivot.o: $(OBJDIR)/pivot_.c $(OBJDIR)/pivot.h $(SRCDIR)/config.h
2095
+40 -27
--- www/backup.md
+++ www/backup.md
@@ -1,22 +1,29 @@
11
# Backing Up a Remote Fossil Repository
22
3
-Simply cloning a Fossil repository does not necessarily create a
4
-*complete* backup of the remote repository’s contents. With an existing
5
-clone, Fossil’s autosync feature isn’t enough to keep that clone fully
6
-up-to-date in a backup failover sense. This document explains what your
7
-clone may be missing and how to ensure that it is complete for cases
8
-where you wish to have a backup suitable for replacing it without data
9
-loss, should the need arise.
10
-
3
+One of the great benefits of Fossil and other [distributed version control systems][dvcs]
4
+is that cloning a repository makes a backup. If you are running a project with multiple
5
+developers who share their work using a [central server][server] and the server hardware
6
+catches fire or otherwise becomes unavailable, the clones of the repository on each developer
7
+workstation *may* serve as a suitable backup.
8
+
9
+[dvcs]: wikipedia:/wiki/Distributed_version_control
10
+[server]: ./server/whyuseaserver.wiki
11
+
12
+We say “may” because
13
+it turns out not everything in a Fossil repository is copied when cloning. You
14
+don’t even always get copies of all historical file artifacts. More than
15
+that, a Fossil repository typically contains
16
+other useful information that is not always shared as part of a clone, which might need
17
+to be backed up separately. To wit:
1118
1219
1320
## Sensitive Information
1421
1522
Fossil purposefully does not clone certain sensitive information unless
16
-you’re logged in with [setup] capability. As an example, a local clone
17
-may have a different `user` table than the remote, because only the
23
+you’re logged in as a user with [setup] capability. As an example, a local clone
24
+may have a different `user` table than the remote, because only a
1825
Setup user is allowed to see the full version for privacy and security
1926
reasons.
2027
2128
2229
## Configuration Drift
@@ -23,29 +30,29 @@
2330
2431
Fossil allows the local configuration in certain areas to differ from
2532
that of the remote. With the exception of the prior item, you get a copy
2633
of these configuration areas on initial clone, but after that, some
2734
remote configuration changes don’t sync down automatically, such as the
28
-remote’s skin. You have to ask for updates to these configuration areas
29
-explicitly.
35
+remote’s skin. You can ask for updates by running the
36
+[`fossil config pull skin`](./help?cmd=config) command, but that
37
+does not happen automatically during the course of normal development.
3038
3139
3240
## Private Branches
3341
3442
The very nature of Fossil’s [private branch feature][pbr] ensures that
3543
remote clones don’t get a copy of those branches. Normally this is
3644
exactly what you want, but in the case of making backups, you probably
37
-want these branches as well. One of the two backup methods below
45
+want to back up these branches as well. One of the two backup methods below
3846
provides this.
3947
4048
4149
## Shunned Artifacts
4250
4351
Fossil purposefully doesn’t sync [shunned artifacts][shun]. If you want
4452
your local clone to be a precise match to the remote, it needs to track
4553
changes to the shun table as well.
46
-
4754
4855
4956
## Unversioned Artifacts
5057
5158
Data in Fossil’s [unversioned artifacts table][uv] doesn’t sync down by
@@ -62,15 +69,15 @@
6269
simple central-and-clones model that is more common, there may be no
6370
single source of truth in the network because Fossil’s autosync feature
6471
isn’t transitive.
6572
6673
That is, if you cloned from server A, and then you stand that up on a
67
-server B, then if I clone from you as repository C, changes to B
74
+server B, then if I clone from your server as my repository C, your changes to B
6875
autosync up to A, but not down to me on C until I do something locally
6976
that triggers autosync. The inverse is also true: if I commit something
7077
on C, it will autosync up to B, but A won’t get a copy until someone on
71
-B does something to trigger autosync there.
78
+B does something to trigger a sync there.
7279
7380
An easy way to run into this problem is to set up failover servers
7481
`svr1` thru `svr3.example.com`, then set `svr2` and `svr3` up to sync
7582
with the first. If all of the users normally clone from `svr1`, their
7683
commits don’t get to `svr2` and `svr3` until something on one of the
@@ -89,12 +96,12 @@
8996
9097
# Solutions
9198
9299
The following script solves most of the above problems for the use case
93100
where you want a *nearly-complete* clone of the remote repository using nothing
94
-but the normal Fossil sync protocol. It requires that you be logged into
95
-the remote as a user with Setup capability.
101
+but the normal Fossil sync protocol. It only does so if you are logged into
102
+the remote as a user with Setup capability, however.
96103
97104
----
98105
99106
``` shell
100107
#!/bin/sh
@@ -105,22 +112,24 @@
105112
106113
----
107114
108115
The last step is needed to ensure that shunned artifacts on the remote
109116
are removed from the local clone. The second step includes
110
-`fossil conf pull shun`, so your repo won’t offer the shunned artifacts
111
-to others cloning from it, but the backup can’t be said to be “complete”
112
-if it contains information that the remote now lacks.
117
+`fossil conf pull shun`, but until those artifacts are actually rebuilt
118
+out of existence, your backup will be “more than complete” in the sense
119
+that it will continue to have information that the remote says should
120
+not exist any more. That would be not so much a “backup” as an
121
+“archive,” which might not be what you want.
113122
114123
This method doesn’t get you a copy of the remote’s
115124
[private branches][pbr], on purpose. It may also miss other info on the
116125
remote, such as SQL-level customizations that the sync protocol can’t
117
-see. (Some [ticket system customization][tkt] schemes do this.) You can
126
+see. (Some [ticket system customization][tkt] schemes rely on this ability, for example.) You can
118127
solve such problems if you have access to the remote server, which
119128
allows you to get a SQL-level backup. This requires Fossil 2.12 or
120
-newer, which added [the `backup` command][bu], which takes care of
121
-locking and transaction isolation to allow backing up an in-use
129
+newer, which added [the `backup` command][bu] to take care of
130
+locking and transaction isolation, allowing the user to safely back up an in-use
122131
repository.
123132
124133
If you have SSH access to the remote server, something like this will work:
125134
126135
----
@@ -160,11 +169,13 @@
160169
161170
This requires OpenSSL 1.1 or higher. If you’re on 1.0 or older, you
162171
won’t have the `-pbkdf2` and `-iter` options, and you may have to choose
163172
a different cipher algorithm; both changes are likely to weaken the
164173
encryption significantly, so you should install a newer version rather
165
-than work around the lack of these features. If you’re on macOS, which
174
+than work around the lack of these features.
175
+
176
+If you’re on macOS, which
166177
still ships 1.0 as of the time of this writing, [Homebrew][hb] offers
167178
the current version of OpenSSL, but to avoid a conflict with the platform
168179
version it’s [unlinked][hbul] by default, so you have to give an explicit
169180
path to its “cellar” directory:
170181
@@ -204,19 +215,21 @@
204215
205216
The last change is tricky: we used `fossil sql` above to ensure that
206217
we’re using the same version of SQLite to write the encrypted backup DB
207218
as was used to maintain the repository, but unfortunately, we can’t get
208219
the built-in SQLite shell to write a backup into an empty database.
220
+(As soon as it starts up, it goes looking for tables created by
221
+`fossil init` and fails with an error.)
209222
Therefore, we have to either run the restoration against a
210223
possibly-different version of SQLite and hope there are no
211224
incompatibilities, or we have to go out of our way to build a matching
212225
version of `sqlite3` before we can safely do the restoration.
213226
214227
Keep in mind that Fossil often acts as a dogfooding project for SQLite,
215228
making use of the latest features, so it is quite likely that a given
216
-random `sqlite3` binary in your `PATH` may be unable to understand the
217
-dump file created by the backup script!
229
+random `sqlite3` binary in your `PATH` will be unable to understand the
230
+file created by “`fossil sql .dump`”!
218231
219232
[bu]: /help?cmd=backup
220233
[grcp]: https://www.grc.com/passwords.htm
221234
[hb]: https://brew.sh
222235
[hbul]: https://docs.brew.sh/FAQ#what-does-keg-only-mean
223236
--- www/backup.md
+++ www/backup.md
@@ -1,22 +1,29 @@
1 # Backing Up a Remote Fossil Repository
2
3 Simply cloning a Fossil repository does not necessarily create a
4 *complete* backup of the remote repository’s contents. With an existing
5 clone, Fossil’s autosync feature isn’t enough to keep that clone fully
6 up-to-date in a backup failover sense. This document explains what your
7 clone may be missing and how to ensure that it is complete for cases
8 where you wish to have a backup suitable for replacing it without data
9 loss, should the need arise.
10
 
 
 
 
 
 
 
11
12
13 ## Sensitive Information
14
15 Fossil purposefully does not clone certain sensitive information unless
16 you’re logged in with [setup] capability. As an example, a local clone
17 may have a different `user` table than the remote, because only the
18 Setup user is allowed to see the full version for privacy and security
19 reasons.
20
21
22 ## Configuration Drift
@@ -23,29 +30,29 @@
23
24 Fossil allows the local configuration in certain areas to differ from
25 that of the remote. With the exception of the prior item, you get a copy
26 of these configuration areas on initial clone, but after that, some
27 remote configuration changes don’t sync down automatically, such as the
28 remote’s skin. You have to ask for updates to these configuration areas
29 explicitly.
 
30
31
32 ## Private Branches
33
34 The very nature of Fossil’s [private branch feature][pbr] ensures that
35 remote clones don’t get a copy of those branches. Normally this is
36 exactly what you want, but in the case of making backups, you probably
37 want these branches as well. One of the two backup methods below
38 provides this.
39
40
41 ## Shunned Artifacts
42
43 Fossil purposefully doesn’t sync [shunned artifacts][shun]. If you want
44 your local clone to be a precise match to the remote, it needs to track
45 changes to the shun table as well.
46
47
48
49 ## Unversioned Artifacts
50
51 Data in Fossil’s [unversioned artifacts table][uv] doesn’t sync down by
@@ -62,15 +69,15 @@
62 simple central-and-clones model that is more common, there may be no
63 single source of truth in the network because Fossil’s autosync feature
64 isn’t transitive.
65
66 That is, if you cloned from server A, and then you stand that up on a
67 server B, then if I clone from you as repository C, changes to B
68 autosync up to A, but not down to me on C until I do something locally
69 that triggers autosync. The inverse is also true: if I commit something
70 on C, it will autosync up to B, but A won’t get a copy until someone on
71 B does something to trigger autosync there.
72
73 An easy way to run into this problem is to set up failover servers
74 `svr1` thru `svr3.example.com`, then set `svr2` and `svr3` up to sync
75 with the first. If all of the users normally clone from `svr1`, their
76 commits don’t get to `svr2` and `svr3` until something on one of the
@@ -89,12 +96,12 @@
89
90 # Solutions
91
92 The following script solves most of the above problems for the use case
93 where you want a *nearly-complete* clone of the remote repository using nothing
94 but the normal Fossil sync protocol. It requires that you be logged into
95 the remote as a user with Setup capability.
96
97 ----
98
99 ``` shell
100 #!/bin/sh
@@ -105,22 +112,24 @@
105
106 ----
107
108 The last step is needed to ensure that shunned artifacts on the remote
109 are removed from the local clone. The second step includes
110 `fossil conf pull shun`, so your repo won’t offer the shunned artifacts
111 to others cloning from it, but the backup can’t be said to be “complete”
112 if it contains information that the remote now lacks.
 
 
113
114 This method doesn’t get you a copy of the remote’s
115 [private branches][pbr], on purpose. It may also miss other info on the
116 remote, such as SQL-level customizations that the sync protocol can’t
117 see. (Some [ticket system customization][tkt] schemes do this.) You can
118 solve such problems if you have access to the remote server, which
119 allows you to get a SQL-level backup. This requires Fossil 2.12 or
120 newer, which added [the `backup` command][bu], which takes care of
121 locking and transaction isolation to allow backing up an in-use
122 repository.
123
124 If you have SSH access to the remote server, something like this will work:
125
126 ----
@@ -160,11 +169,13 @@
160
161 This requires OpenSSL 1.1 or higher. If you’re on 1.0 or older, you
162 won’t have the `-pbkdf2` and `-iter` options, and you may have to choose
163 a different cipher algorithm; both changes are likely to weaken the
164 encryption significantly, so you should install a newer version rather
165 than work around the lack of these features. If you’re on macOS, which
 
 
166 still ships 1.0 as of the time of this writing, [Homebrew][hb] offers
167 the current version of OpenSSL, but to avoid a conflict with the platform
168 version it’s [unlinked][hbul] by default, so you have to give an explicit
169 path to its “cellar” directory:
170
@@ -204,19 +215,21 @@
204
205 The last change is tricky: we used `fossil sql` above to ensure that
206 we’re using the same version of SQLite to write the encrypted backup DB
207 as was used to maintain the repository, but unfortunately, we can’t get
208 the built-in SQLite shell to write a backup into an empty database.
 
 
209 Therefore, we have to either run the restoration against a
210 possibly-different version of SQLite and hope there are no
211 incompatibilities, or we have to go out of our way to build a matching
212 version of `sqlite3` before we can safely do the restoration.
213
214 Keep in mind that Fossil often acts as a dogfooding project for SQLite,
215 making use of the latest features, so it is quite likely that a given
216 random `sqlite3` binary in your `PATH` may be unable to understand the
217 dump file created by the backup script!
218
219 [bu]: /help?cmd=backup
220 [grcp]: https://www.grc.com/passwords.htm
221 [hb]: https://brew.sh
222 [hbul]: https://docs.brew.sh/FAQ#what-does-keg-only-mean
223
--- www/backup.md
+++ www/backup.md
@@ -1,22 +1,29 @@
1 # Backing Up a Remote Fossil Repository
2
3 One of the great benefits of Fossil and other [distributed version control systems][dvcs]
4 is that cloning a repository makes a backup. If you are running a project with multiple
5 developers who share their work using a [central server][server] and the server hardware
6 catches fire or otherwise becomes unavailable, the clones of the repository on each developer
7 workstation *may* serve as a suitable backup.
8
9 [dvcs]: wikipedia:/wiki/Distributed_version_control
10 [server]: ./server/whyuseaserver.wiki
11
12 We say “may” because
13 it turns out not everything in a Fossil repository is copied when cloning. You
14 don’t even always get copies of all historical file artifacts. More than
15 that, a Fossil repository typically contains
16 other useful information that is not always shared as part of a clone, which might need
17 to be backed up separately. To wit:
18
19
20 ## Sensitive Information
21
22 Fossil purposefully does not clone certain sensitive information unless
23 you’re logged in as a user with [setup] capability. As an example, a local clone
24 may have a different `user` table than the remote, because only a
25 Setup user is allowed to see the full version for privacy and security
26 reasons.
27
28
29 ## Configuration Drift
@@ -23,29 +30,29 @@
30
31 Fossil allows the local configuration in certain areas to differ from
32 that of the remote. With the exception of the prior item, you get a copy
33 of these configuration areas on initial clone, but after that, some
34 remote configuration changes don’t sync down automatically, such as the
35 remote’s skin. You can ask for updates by running the
36 [`fossil config pull skin`](./help?cmd=config) command, but that
37 does not happen automatically during the course of normal development.
38
39
40 ## Private Branches
41
42 The very nature of Fossil’s [private branch feature][pbr] ensures that
43 remote clones don’t get a copy of those branches. Normally this is
44 exactly what you want, but in the case of making backups, you probably
45 want to back up these branches as well. One of the two backup methods below
46 provides this.
47
48
49 ## Shunned Artifacts
50
51 Fossil purposefully doesn’t sync [shunned artifacts][shun]. If you want
52 your local clone to be a precise match to the remote, it needs to track
53 changes to the shun table as well.
 
54
55
56 ## Unversioned Artifacts
57
58 Data in Fossil’s [unversioned artifacts table][uv] doesn’t sync down by
@@ -62,15 +69,15 @@
69 simple central-and-clones model that is more common, there may be no
70 single source of truth in the network because Fossil’s autosync feature
71 isn’t transitive.
72
73 That is, if you cloned from server A, and then you stand that up on a
74 server B, then if I clone from your server as my repository C, your changes to B
75 autosync up to A, but not down to me on C until I do something locally
76 that triggers autosync. The inverse is also true: if I commit something
77 on C, it will autosync up to B, but A won’t get a copy until someone on
78 B does something to trigger a sync there.
79
80 An easy way to run into this problem is to set up failover servers
81 `svr1` thru `svr3.example.com`, then set `svr2` and `svr3` up to sync
82 with the first. If all of the users normally clone from `svr1`, their
83 commits don’t get to `svr2` and `svr3` until something on one of the
@@ -89,12 +96,12 @@
96
97 # Solutions
98
99 The following script solves most of the above problems for the use case
100 where you want a *nearly-complete* clone of the remote repository using nothing
101 but the normal Fossil sync protocol. It only does so if you are logged into
102 the remote as a user with Setup capability, however.
103
104 ----
105
106 ``` shell
107 #!/bin/sh
@@ -105,22 +112,24 @@
112
113 ----
114
115 The last step is needed to ensure that shunned artifacts on the remote
116 are removed from the local clone. The second step includes
117 `fossil conf pull shun`, but until those artifacts are actually rebuilt
118 out of existence, your backup will be “more than complete” in the sense
119 that it will continue to have information that the remote says should
120 not exist any more. That would be not so much a “backup” as an
121 “archive,” which might not be what you want.
122
123 This method doesn’t get you a copy of the remote’s
124 [private branches][pbr], on purpose. It may also miss other info on the
125 remote, such as SQL-level customizations that the sync protocol can’t
126 see. (Some [ticket system customization][tkt] schemes rely on this ability, for example.) You can
127 solve such problems if you have access to the remote server, which
128 allows you to get a SQL-level backup. This requires Fossil 2.12 or
129 newer, which added [the `backup` command][bu] to take care of
130 locking and transaction isolation, allowing the user to safely back up an in-use
131 repository.
132
133 If you have SSH access to the remote server, something like this will work:
134
135 ----
@@ -160,11 +169,13 @@
169
170 This requires OpenSSL 1.1 or higher. If you’re on 1.0 or older, you
171 won’t have the `-pbkdf2` and `-iter` options, and you may have to choose
172 a different cipher algorithm; both changes are likely to weaken the
173 encryption significantly, so you should install a newer version rather
174 than work around the lack of these features.
175
176 If you’re on macOS, which
177 still ships 1.0 as of the time of this writing, [Homebrew][hb] offers
178 the current version of OpenSSL, but to avoid a conflict with the platform
179 version it’s [unlinked][hbul] by default, so you have to give an explicit
180 path to its “cellar” directory:
181
@@ -204,19 +215,21 @@
215
216 The last change is tricky: we used `fossil sql` above to ensure that
217 we’re using the same version of SQLite to write the encrypted backup DB
218 as was used to maintain the repository, but unfortunately, we can’t get
219 the built-in SQLite shell to write a backup into an empty database.
220 (As soon as it starts up, it goes looking for tables created by
221 `fossil init` and fails with an error.)
222 Therefore, we have to either run the restoration against a
223 possibly-different version of SQLite and hope there are no
224 incompatibilities, or we have to go out of our way to build a matching
225 version of `sqlite3` before we can safely do the restoration.
226
227 Keep in mind that Fossil often acts as a dogfooding project for SQLite,
228 making use of the latest features, so it is quite likely that a given
229 random `sqlite3` binary in your `PATH` will be unable to understand the
230 file created by “`fossil sql .dump`”!
231
232 [bu]: /help?cmd=backup
233 [grcp]: https://www.grc.com/passwords.htm
234 [hb]: https://brew.sh
235 [hbul]: https://docs.brew.sh/FAQ#what-does-keg-only-mean
236
+477 -32
--- www/blockchain.md
+++ www/blockchain.md
@@ -1,32 +1,477 @@
1
-# Fossil As Blockchain
2
-
3
-Fossil is a version control system built around blockchain.
4
-
5
-Wikipedia defines "blockchain" as
6
-
7
->
8
- "a growing list of records, called blocks, which are linked using
9
- cryptography. Each block contains a cryptographic hash of the previous
10
- block, a timestamp, and transaction data..." [(1)][]
11
-
12
-
13
-By that definition, Fossil is clearly an implementation of blockchain.
14
-The blocks are ["manifests" artifacts](./fileformat.wiki#manifest).
15
-Each manifest has a SHA1 or SHA3 hash of its parent or parents,
16
-a timestamp, and other transactional data. The repository grows by
17
-adding new manifests onto the list.
18
-
19
-Some people have come to associate blockchain with cryptocurrency, however,
20
-and since Fossil has nothing to do with cryptocurrency, the claim that
21
-Fossil is built around blockchain is met with skepticism. The key thing
22
-to note here is that cryptocurrency implementations like BitCoin are
23
-built around blockchain, but they are not synonymous with blockchain.
24
-Blockchain is a much broader concept. Blockchain is a mechanism for
25
-constructing a distributed ledger of transactions.
26
-Yes, you can use a distributed
27
-ledger to implement a cryptocurrency, but you can also use a distributed
28
-ledger to implement a version control system, and probably many other kinds
29
-of applications as well. Blockchain is a much broader idea than
30
-cryptocurrency.
31
-
32
-[(1)]: https://en.wikipedia.org/wiki/Blockchain
1
+# Is Fossil A Blockchain?
2
+
3
+The Fossil version control system shares a lot of similarities with
4
+other blockchain based technologies, but it also differs from the more common
5
+sorts of blockchains. This document will discuss the term’s
6
+applicability, so you can decide whether applying the term to Fossil
7
+makes sense to you.
8
+
9
+
10
+## The Dictionary Argument
11
+
12
+The [Wikipedia definition of "blockchain"][bcwp] begins:
13
+
14
+>
15
+ "A blockchain…is a growing list of records, called blocks, which are linked using
16
+ cryptography. Each block contains a cryptographic hash of the previous
17
+ block, a timestamp, and transaction data (generally represented as a Merkle tree)."
18
+
19
+
20
+By that partial definition, Fossil is indeed a blockchain. The blocks
21
+are Fossil’s ["manifest" artifacts](./fileformat.wiki#manifest). Each
22
+manifest has a cryptographically-strong [SHA-1] or [SHA-3] hash linking it to
23
+one or more “parent” blocks. The manifest also contains a timestamp and
24
+the transactional data needed to express a commit to the repository. If
25
+you traverse the Fossil repository from the tips of its [DAG] to the
26
+root by following the parent hashes in each manifest, you will then have
27
+a Merkle tree. Point-for-point, Fossil follows that definition.
28
+
29
+Every change in Fossil starts by adding one or more manifests to
30
+the repository, extending this tree.
31
+
32
+[bcwp]: https://en.wikipedia.org/wiki/Blockchain
33
+[DAG]: https://en.wikipedia.org/wiki/Directed_acyclic_graph
34
+[SHA-1]: https://en.wikipedia.org/wiki/SHA-1
35
+[SHA-3]: https://en.wikipedia.org/wiki/SHA-3
36
+
37
+
38
+
39
+<a id="currency"></a>
40
+## Cryptocurrency
41
+
42
+Because blockchain technology was first popularized as Bitcoin, many
43
+people associate the term with cryptocurrency. Fossil has nothing to do
44
+with cryptocurrency, so a claim that “Fossil is a blockchain” may fail
45
+to communicate the speaker’s concepts clearly due to conflation with
46
+cryptocurrency.
47
+
48
+Cryptocurrency has several features and requirements that Fossil doesn’t
49
+provide, either because it doesn’t need them or because we haven’t
50
+gotten around to creating the feature. Whether these are essential to
51
+the definition of “blockchain” and thus disqualify Fossil as a blockchain
52
+is for you to decide.
53
+
54
+Cryptocurrencies must prevent three separate types of fraud to be useful:
55
+
56
+* **Type 1** is modification of existing currency. To draw an analogy
57
+ to paper money, we wish to prevent someone from using green and
58
+ black markers to draw extra zeroes on a US $10 bill so that it
59
+ claims to be a $100 bill.
60
+
61
+* **Type 2** is creation of new fraudulent currency that will pass
62
+ in commerce. To extend our analogy, it is the creation of new
63
+ US $10 bills. There are two sub-types to this fraud. In terms of
64
+ our analogy, they are:
65
+
66
+ * **Type 2a**: copying of an existing legitimate $10 bill
67
+ * **Type 2b**: printing a new $10 bill that is unlike an existing
68
+ legitimate one, yet which will still pass in commerce
69
+
70
+* **Type 3** is double-spending existing legitimate cryptocurrency.
71
+ There is no analogy in paper money due to its physical form; it is a
72
+ problem unique to digital currency due to its infinitely-copyable
73
+ nature.
74
+
75
+How does all of this compare to Fossil?
76
+
77
+1. <a id="signatures"></a>**Signatures.** Cryptocurrencies use a chain
78
+ of [digital signatures][dsig] to prevent Type 1 and Type 3 frauds. This
79
+ chain forms an additional link between the blocks, separate from the
80
+ hash chain that applies an ordering and lookup scheme to the blocks.
81
+ [_Blockchain: Simple Explanation_][bse] explains this “hash chain”
82
+ vs. “block chain” distinction in more detail.
83
+
84
+ These signatures prevent modification of the face value of each
85
+ transation (Type 1 fraud) by ensuring that only the one signing a
86
+ new block has the private signing key that could change an issued
87
+ block after the fact.
88
+
89
+ The fact that these signatures are also *chained* prevents Type
90
+ 3 frauds by making the *prior* owner of a block sign it over to
91
+ the new owner. To avoid an O(n²) auditing problem as a result,
92
+ cryptocurrencies add a separate chain of hashes to make checking
93
+ for double-spending quick and easy.
94
+
95
+ Fossil has [a disabled-by-default feature][cs] to call out to an
96
+ external copy of [PGP] or [GPG] to sign commit manifests before
97
+ inserting them into the repository. You may wish to couple that with
98
+ a server-side [after-receive hook][arh] to reject unsigned commits.
99
+
100
+ Although there are several distinctions you can draw between the way
101
+ Fossil’s commit signing scheme works and the way block signing works
102
+ in cryptocurrencies, only one is of material interest for our
103
+ purposes here: Fossil commit signatures apply only to a single
104
+ commit. Fossil does not sign one commit over to the next “owner” of
105
+ that commit in the way that a blockchain-based cryptocurrency must
106
+ when transferring currency from one user to another, beacuse there
107
+ is no useful analog to the double-spending problem in Fossil. The
108
+ closest you can come to this is double-insert of commits into the
109
+ blockchain, which we’ll address shortly.
110
+
111
+ What Fossil commit signatures actually do is provide in-tree forgery
112
+ prevention, both Type 1 and Type 2. You cannot modify existing
113
+ commits (Type 1 forgery) because you do not have the original
114
+ committer’s private signing key, and you cannot forge new commits
115
+ attesting to come from some other trusted committer (Type 2) because
116
+ you don’t have any of their private signing keys, either.
117
+ Cyrptocurrencies also use the work problem to prevent Type 2
118
+ forgeries, but the application of that to Fossil is a matter we get
119
+ to [later](#work).
120
+
121
+ Although you have complete control over the contents of your local
122
+ Fossil repository clone, you cannot perform Type 1 forgery on its
123
+ contents short of executing a [preimage attack][prei] on the hash
124
+ algorthm. ([SHA3-256][SHA-3] by default in the current version of
125
+ Fossil.) Even if you could, Fossil’s sync protocol will prevent the
126
+ modification from being pushed into another repository: the remote
127
+ Fossil instance says, “I’ve already got that one, thanks,” and
128
+ ignores the push. Thus, short of breaking into the remote server
129
+ and modifying the repository in place, you couldn’t even make use of
130
+ a preimage attack if you had that power. This is an attack on the
131
+ server itself, not on Fossil’s data structures, so while it is
132
+ useful to think through this problem, it is not helpful to answering
133
+ our questions here.
134
+
135
+ The Fossil sync protocol also prevents the closest analog to Type 3
136
+ frauds in Fossil: copying a commit manifest in your local repo clone
137
+ won’t result in a double-commit on sync.
138
+
139
+ In the absence of digital signatures, Fossil’s [RBAC system][caps]
140
+ restricts Type 2 forgery to trusted committers. Thus once again
141
+ we’re reduced to an infosec problem, not a data structure design
142
+ question. (Inversely, enabling commit clearsigning is a good idea
143
+ if you have committers on your repo whom you don’t trust not to
144
+ commit Type 2 frauds. But let us be clear: your choice of setting
145
+ does not answer the question of whether Fossil is a blockchain.)
146
+
147
+ If Fossil signatures prevent Type 1 and Type 2 frauds, you
148
+ may wonder why they are not enabled by default. It is because
149
+ they are defense-in-depth measures, not the minimum sufficient
150
+ measures needed to prevent repository fraud, unlike the equivalent
151
+ protections in a cryptocurrency blockcahin. Fossil provides its
152
+ primary protections through other means, so it doesn’t need to
153
+ mandate signatures.
154
+
155
+ Also, Fossil is not itself a [PKI], and there is no way for regular
156
+ users of Fossil to link it to a PKI, since doing so would likely
157
+ result in an unwanted [PII] disclosure. There is no email address
158
+ in a Fossil commit manifest that you could use to query one of the
159
+ public PGP keyservers, for example. It therefore becomes a local
160
+ policy matter as to whether you even *want* to have signatures,
161
+ because they’re not without their downsides.
162
+
163
+2. <a id="work"></a>**Work Contests.** Cryptocurrencies prevent Type 2b forgeries
164
+ by setting up some sort of contest that ensures that new coins can come
165
+ into existence only by doing some difficult work task. This “mining”
166
+ activity results in a coin that took considerable work to create,
167
+ which thus has economic value by being a) difficult to re-create,
168
+ and b) resistant to [debasement][dboc].
169
+
170
+ Fossil repositories are most often used to store the work product of
171
+ individuals, rather than cryptocoin mining machines. There is
172
+ generally no contest in trying to produce the most commits. There
173
+ may be an implicit contest to produce the “best” commits, but that
174
+ is a matter of project management, not something that can be
175
+ automatically mediated through objective measures.
176
+
177
+ Incentives to commit to the repository come from outside of Fossil;
178
+ they are not inherent to its nature, as with cryptocurrencies.
179
+ Moreover, there is no useful sense in which we could say that one
180
+ commit “re-creates” another. Commits are generally products of
181
+ individual human intellect, thus necessarily unique in all but
182
+ trivial cases. This is foundational to copyright law.
183
+
184
+3. <a id="lcr"></a>**Longest Chain Rule.** Cryptocurrencies generally
185
+ need some way to distinguish which blocks are legitimate and which
186
+ not. They do this in part by identifying the linear chain with the
187
+ greatest cumulative [work time](#work) as the legitimate chain. All
188
+ blocks not on that linear chain are considered “orphans” and are
189
+ ignored by the cryptocurrency software.
190
+
191
+ Its inverse is sometimes called the “51% attack” because a single
192
+ actor would have to do slightly more work than the entire rest of
193
+ the community using a given cryptocurrency in order for their fork
194
+ of the currency to be considered the legitimate fork. This argument
195
+ soothes concerns that a single bad actor could take over the
196
+ network.
197
+
198
+ The closest we can come to that notion in Fossil is the default
199
+ “trunk” branch, but there’s nothing in Fossil that delegitimizes
200
+ other branches just because they’re shorter, nor is there any way in
201
+ Fossil to score the amount of work that went into a commit. Indeed,
202
+ [forks and branches][fb] are *valuable and desirable* things in
203
+ Fossil.
204
+
205
+This much is certain: Fossil is definitely not a cryptocurrency. Whether
206
+this makes it “not a blockchain” is a subjective matter.
207
+
208
+[arh]: ./hooks.md
209
+[bse]: https://www.researchgate.net/publication/311572122_What_is_Blockchain_a_Gentle_Introduction
210
+[caps]: ./caps/
211
+[cs]: /help?cmd=clearsign
212
+[dboc]: https://en.wikipedia.org/wiki/Debasement
213
+[dsig]: https://en.wikipedia.org/wiki/Digital_signature
214
+[fb]: ./branching.wiki
215
+[GPG]: https://gnupg.org/
216
+[PGP]: https://www.openpgp.org/
217
+[PII]: https://en.wikipedia.org/wiki/Personal_data
218
+[PKI]: https://en.wikipedia.org/wiki/Public_key_infrastructure
219
+[pow]: https://en.wikipedia.org/wiki/Proof_of_work
220
+[prei]: https://en.wikipedia.org/wiki/Preimage_attack
221
+
222
+
223
+
224
+<a id="dlt"></a>
225
+## Distributed Ledgers
226
+
227
+Cryptocurrencies are an instance of [distributed ledger technology][dlt]. If
228
+we can convince ourselves that Fossil is also a distributed
229
+ledger, then we might think of Fossil as a peer technology,
230
+having at least some qualifications toward being considered a blockchain.
231
+
232
+A key tenet of DLT is that records be unmodifiable after they’re
233
+committed to the ledger, which matches quite well with Fossil’s design
234
+and everyday use cases. Fossil puts up multiple barriers to prevent
235
+modification of existing records and injection of incorrect records.
236
+
237
+Yet, Fossil also has [purge] and [shunning][shun]. Doesn’t that mean
238
+Fossil cannot be a distributed ledger?
239
+
240
+These features only remove existing commits from the repository. If you want a
241
+currency analogy, they are ways to burn a paper bill or to melt a [fiat
242
+coin][fc] down to slag. In a cryptocurrency, you can erase your “wallet”
243
+file, effectively destroying money in a similar way. These features
244
+do not permit forgery of either type described above: you can’t use them
245
+to change the value of existing commits (Type 1) or add new commits to
246
+the repository (Type 2).
247
+
248
+What if we removed those features from Fossil, creating an append-only
249
+Fossil variant? Is it a DLT then? Arguably still not, because [today’s Fossil
250
+is an AP-mode system][ctap] in the [CAP theorem][cap] sense, which means
251
+there can be no guaranteed consensus on the content of the ledger at any
252
+given time. If you had an AP-mode accounts receivable system, it could
253
+have different bottom-line totals at different sites, because you’ve
254
+cast away “C” to get AP-mode operation.
255
+
256
+Because of this, you could still not guarantee that the command
257
+“`fossil info tip`” gives the same result everywhere. A CA or CP-mode Fossil
258
+variant would guarantee that everyone got the same result. (Everyone not
259
+partitioned away from the majority of the network at any rate, in the CP
260
+case.)
261
+
262
+What are the prospects for CA-mode or CP-mode Fossil? [We don’t want
263
+CA-mode Fossil][ctca], but [CP-mode could be useful][ctcp]. Until the latter
264
+exists, this author believes Fossil is not a distributed ledger in a
265
+technologically defensible sense.
266
+
267
+The most common technologies answering to the label “blockchain” are all
268
+DLTs, so if Fossil is not a DLT, then it is not a blockchain in that
269
+sense.
270
+
271
+[ctap]: ./cap-theorem.md#ap
272
+[ctca]: ./cap-theorem.md#ca
273
+[ctcp]: ./cap-theorem.md#cp
274
+[cap]: https://en.wikipedia.org/wiki/CAP_theorem
275
+[dlt]: https://en.wikipedia.org/wiki/Distributed_ledger
276
+[DVCS]: https://en.wikipedia.org/wiki/Distributed_version_control
277
+[fc]: https://en.wikipedia.org/wiki/Fiat_money
278
+[purge]: /help?cmd=purge
279
+[shun]: ./shunning.wiki
280
+
281
+
282
+<a id="dpc"></a>
283
+## Distributed Partial Consensus
284
+
285
+If we can’t get DLT, can we at least get some kind of distributed
286
+consensus at the level of individual Fossil’s commits?
287
+
288
+Many blockchain based technologies have this property: given some
289
+element of the blockchain, you can make certain proofs that it either is
290
+a legitimate part of the whole blockchain, or it is not.
291
+
292
+Unfortunately, this author doesn’t see a way to do that with Fossil.
293
+Given only one “block” in Fossil’s putative “blockchain” — a commit, in
294
+Fossil terminology — all you can prove is whether it is internally
295
+consistent, that it is not corrupt. That then points you at the parent(s) of that
296
+commit, which you can repeat the exercise on, back to the root of the
297
+DAG. This is what the enabled-by-default [`repo-cksum` setting][rcks]
298
+does.
299
+
300
+If cryptocurrencies worked this way, you wouldn’t be able to prove that
301
+a given cryptocoin was legitimate without repeating the proof-of-work
302
+calculations for the entire cryptocurrency scheme! Instead, you only
303
+need to check a certain number of signatures and proofs-of-work in order
304
+to be reasonably certain that you are looking at a legitimate section of
305
+the whole blockchain.
306
+
307
+What would it even mean to prove that a given Fossil commit “*belongs*”
308
+to the repository you’ve extracted it from? For a software project,
309
+isn’t that tantamount to automatic code review, where the server would
310
+be able to reliably accept or reject a commit based solely on its
311
+content? That sounds nice, but this author believes we’ll need to invent
312
+[AGI] first.
313
+
314
+A better method to provide distributed consensus for Fossil would be to
315
+rely on the *natural* intelligence of its users: that is, distributed
316
+commit signing, so that a commit is accepted into the blockchain only
317
+once some number of users countersign it. This amounts to a code review
318
+feature, which Fossil doesn’t currently have.
319
+
320
+Solving that problem basically requires solving the [PKI] problem first,
321
+since you can’t verify the proofs of these signatures if you can’t first
322
+prove that the provided signatures belong to people you trust. This is a
323
+notoriously hard problem in its own right.
324
+
325
+A future version of Fossil could instead provide [consensus in the CAP
326
+sense][ctcp]. For instance, you could say that if a quorum of servers
327
+all have a given commit, it “belongs.” Fossil’s strong hashing tech
328
+would mean that querying whether a given commit is part of the
329
+“blockchain” would be as simple as going down the list of servers and
330
+sending each an HTTP GET `/info` query for the artifact ID, concluding
331
+that the commit is legitimate once you get enough HTTP 200 status codes back. All of this is
332
+hypothetical, because Fossil doesn’t do this today.
333
+
334
+[AGI]: https://en.wikipedia.org/wiki/Artificial_general_intelligence
335
+[rcks]: /help?cmd=repo-cksum
336
+
337
+
338
+
339
+<a id="anon"></a>
340
+## Anonymity
341
+
342
+Many blockchain based technologies go to extraordinary lengths to
343
+allow anonymous use of their service.
344
+
345
+As typically configured, Fossil does not: commits synced between servers
346
+always at least have a user name associated with them, which the remote
347
+system must accept through its [RBAC system][caps]. That system can run
348
+without having the user’s email address, but it’s needed if [email
349
+alerts][alert] are enabled on the server. The remote server logs the IP
350
+address of the commit for security reasons. That coupled with the
351
+timestamp on the commit could sufficiently deanonymize users in many
352
+common situations.
353
+
354
+It is possible to configure Fossil so it doesn’t do this:
355
+
356
+* You can give [Write capability][capi] to user category “nobody,” so
357
+ that anyone that can reach your server can push commits into its
358
+ repository.
359
+
360
+* You could give that capability to user category “anonymous” instead,
361
+ which requires that the user log in with a CAPTCHA, but which doesn’t
362
+ require that the user otherwise identify themselves.
363
+
364
+* You could enable [the `self-register` setting][sreg] and choose not to
365
+ enable [commit clear-signing][cs] so that anonymous users could push
366
+ commits into your repository under any name they want.
367
+
368
+On the server side, you can also [scrub] the logging that remembers
369
+where each commit came from.
370
+
371
+That info isn’t transmitted from the remote server on clone or pull.
372
+Instead, the size of the `rcvfrom` table after initial clone is 1: it
373
+contains the remote server’s IP address. On each pull containing new
374
+artifacts, your local `fossil` instance adds another entry to this
375
+table, likely with the same IP address unless the server has moved or
376
+you’re using [multiple remotes][mrep]. This table is far more
377
+interesting on the server side, containing the IP addresses of all
378
+contentful pushes; thus [the `scrub` command][scrub].
379
+
380
+Because Fossil doesn’t
381
+remember IP addresses in commit manifests or require commit signing, it
382
+allows at least *pseudonymous* commits. When someone clones a remote
383
+repository, they don’t learn the email address, IP address, or any other
384
+sort of [PII] of prior committers, on purpose.
385
+
386
+Some people say that private, permissioned blockchains (as you may
387
+imagine Fossil to be) are inherently problematic by the very reason that
388
+they don’t bake anonymous contribution into their core. The very
389
+existence of an RBAC is a moving piece that can break. Isn’t it better,
390
+the argument goes, to have a system that works even in the face of
391
+anonymous contribution, so that you don’t need an RBAC? Cryptocurrencies
392
+do this, for example: anyone can “mine” a new coin and push it into the
393
+blockchain, and there is no central authority restricting the transfer
394
+of cryptocurrency from one user to another.
395
+
396
+We can draw an analogy to encryption, where an algorithm is
397
+considered inherently insecure if it depends on keeping any information
398
+from an attacker other than the key. Encryption schemes that do
399
+otherwise are derided as “security through obscurity.”
400
+
401
+You may be wondering what any of this has to do with whether Fossil is a
402
+blockchain, but that is exactly the point: all of this is outside
403
+Fossil’s core hash-chained repository data structure. If you take the
404
+position that you don’t have a “blockchain” unless it allows anonymous
405
+contribution, with any needed restrictions provided only by the very
406
+structure of the managed data, then Fossil does not qualify.
407
+
408
+Why do some people care about this distinction? Consider Bitcoin,
409
+wherein an anonymous user cannot spam the blockchain with bogus coins
410
+because its [proof-of-work][pow] protocol allows such coins to be
411
+rejected immediately. There is no equivalent in Fossil: it has no
412
+technology that allows the receiving server to look at the content of a
413
+commit and automatically judge it to be “good.” Fossil relies on its
414
+RBAC system to provide such distinctions: if you have a commit bit, your
415
+commits are *ipso facto* judged “good,” insofar as any human work
416
+product can be so judged by a blob of compiled C code. This takes us
417
+back to the [digital ledger question](#dlt), where we can talk about
418
+what it means to later correct a bad commit that got through the RBAC
419
+check.
420
+
421
+We may be willing to accept pseudonymity, rather than full anonymity.
422
+If we configure Fossil as above, either bypassing the RBAC or abandoning
423
+human control over it, scrubbing IP addresses, etc., is it then a public
424
+permissionless blockchain in that sense?
425
+
426
+We think not, because there is no [longest chain rule](#lcr) or anything
427
+like it in Fossil.
428
+
429
+For a fair model of how a Fossil repository might behave under such
430
+conditions, consider GitHub: here one user can fork another’s repository
431
+and make an arbitrary number of commits to their public fork. Imagine
432
+this happens 10 times. How does someone come along later and
433
+*automatically* evaluate which of the 11 forks of the code (counting the
434
+original repository among their number) is the “best” one? For a
435
+computer software project, the best we could do to approximate this
436
+devolves to a [software project cost estimation problem][scost]. These
437
+methods are rather questionable in their own right, being mathematical
438
+judgement values on human work products, but even if we accept their
439
+usefulness, then we still cannot say which fork is better based solely
440
+on their scores under these metrics. We may well prefer to use the fork
441
+of a software program that took *less* effort, being smaller, more
442
+self-contained, and with a smaller attack surface.
443
+
444
+
445
+[alert]: ./alerts.md
446
+[capi]: ./caps/ref.html#i
447
+[mrep]: /help?cmd=remote
448
+[scost]: https://en.wikipedia.org/wiki/Software_development_effort_estimation
449
+[scrub]: /help?cmd=scrub
450
+[sreg]: /help?cmd=self-register
451
+
452
+
453
+# Conclusion
454
+
455
+This author believes it is technologically indefensible to call Fossil a
456
+“blockchain” in any sense likely to be understood by a majority of those
457
+you’re communicating with.
458
+
459
+Within a certain narrow scope, you can defend this usage, but if you do
460
+that, you’ve failed any goal that requires clear communication: it
461
+doesn’t work to use a term in a nonstandard way just because you can
462
+defend it. The people you’re communicating your ideas to must have the
463
+same concept of the terms you use.
464
+
465
+
466
+What term should you use instead? Fossil stores a DAG of hash-chained
467
+commits, so an indisputably correct term is a [Merkle tree][mt], named
468
+after [its inventor][drrm]. You could also use the more generic term
469
+“hash tree.”
470
+
471
+Fossil is a technological peer to many common sorts of blockchain
472
+technology. There is a lot of overlap in concepts and implementation
473
+details, but when speaking of what most people understand as
474
+“blockchain,” Fossil is not that.
475
+
476
+[drrm]: https://en.wikipedia.org/wiki/Ralph_Merkle
477
+[mt]: https://en.wikipedia.org/wiki/Merkle_tree
33478
--- www/blockchain.md
+++ www/blockchain.md
@@ -1,32 +1,477 @@
1 # Fossil As Blockchain
2
3 Fossil is a version control system built around blockchain.
4
5 Wikipedia defines "blockchain" as
6
7 >
8 "a growing list of records, called blocks, which are linked using
9 cryptography. Each block contains a cryptographic hash of the previous
10 block, a timestamp, and transaction data..." [(1)][]
11
12
13 By that definition, Fossil is clearly an implementation of blockchain.
14 The blocks are ["manifests" artifacts](./fileformat.wiki#manifest).
15 Each manifest has a SHA1 or SHA3 hash of its parent or parents,
16 a timestamp, and other transactional data. The repository grows by
17 adding new manifests onto the list.
18
19 Some people have come to associate blockchain with cryptocurrency, however,
20 and since Fossil has nothing to do with cryptocurrency, the claim that
21 Fossil is built around blockchain is met with skepticism. The key thing
22 to note here is that cryptocurrency implementations like BitCoin are
23 built around blockchain, but they are not synonymous with blockchain.
24 Blockchain is a much broader concept. Blockchain is a mechanism for
25 constructing a distributed ledger of transactions.
26 Yes, you can use a distributed
27 ledger to implement a cryptocurrency, but you can also use a distributed
28 ledger to implement a version control system, and probably many other kinds
29 of applications as well. Blockchain is a much broader idea than
30 cryptocurrency.
31
32 [(1)]: https://en.wikipedia.org/wiki/Blockchain
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
--- www/blockchain.md
+++ www/blockchain.md
@@ -1,32 +1,477 @@
1 # Is Fossil A Blockchain?
2
3 The Fossil version control system shares a lot of similarities with
4 other blockchain based technologies, but it also differs from the more common
5 sorts of blockchains. This document will discuss the term’s
6 applicability, so you can decide whether applying the term to Fossil
7 makes sense to you.
8
9
10 ## The Dictionary Argument
11
12 The [Wikipedia definition of "blockchain"][bcwp] begins:
13
14 >
15 "A blockchain…is a growing list of records, called blocks, which are linked using
16 cryptography. Each block contains a cryptographic hash of the previous
17 block, a timestamp, and transaction data (generally represented as a Merkle tree)."
18
19
20 By that partial definition, Fossil is indeed a blockchain. The blocks
21 are Fossil’s ["manifest" artifacts](./fileformat.wiki#manifest). Each
22 manifest has a cryptographically-strong [SHA-1] or [SHA-3] hash linking it to
23 one or more “parent” blocks. The manifest also contains a timestamp and
24 the transactional data needed to express a commit to the repository. If
25 you traverse the Fossil repository from the tips of its [DAG] to the
26 root by following the parent hashes in each manifest, you will then have
27 a Merkle tree. Point-for-point, Fossil follows that definition.
28
29 Every change in Fossil starts by adding one or more manifests to
30 the repository, extending this tree.
31
32 [bcwp]: https://en.wikipedia.org/wiki/Blockchain
33 [DAG]: https://en.wikipedia.org/wiki/Directed_acyclic_graph
34 [SHA-1]: https://en.wikipedia.org/wiki/SHA-1
35 [SHA-3]: https://en.wikipedia.org/wiki/SHA-3
36
37
38
39 <a id="currency"></a>
40 ## Cryptocurrency
41
42 Because blockchain technology was first popularized as Bitcoin, many
43 people associate the term with cryptocurrency. Fossil has nothing to do
44 with cryptocurrency, so a claim that “Fossil is a blockchain” may fail
45 to communicate the speaker’s concepts clearly due to conflation with
46 cryptocurrency.
47
48 Cryptocurrency has several features and requirements that Fossil doesn’t
49 provide, either because it doesn’t need them or because we haven’t
50 gotten around to creating the feature. Whether these are essential to
51 the definition of “blockchain” and thus disqualify Fossil as a blockchain
52 is for you to decide.
53
54 Cryptocurrencies must prevent three separate types of fraud to be useful:
55
56 * **Type 1** is modification of existing currency. To draw an analogy
57 to paper money, we wish to prevent someone from using green and
58 black markers to draw extra zeroes on a US $10 bill so that it
59 claims to be a $100 bill.
60
61 * **Type 2** is creation of new fraudulent currency that will pass
62 in commerce. To extend our analogy, it is the creation of new
63 US $10 bills. There are two sub-types to this fraud. In terms of
64 our analogy, they are:
65
66 * **Type 2a**: copying of an existing legitimate $10 bill
67 * **Type 2b**: printing a new $10 bill that is unlike an existing
68 legitimate one, yet which will still pass in commerce
69
70 * **Type 3** is double-spending existing legitimate cryptocurrency.
71 There is no analogy in paper money due to its physical form; it is a
72 problem unique to digital currency due to its infinitely-copyable
73 nature.
74
75 How does all of this compare to Fossil?
76
77 1. <a id="signatures"></a>**Signatures.** Cryptocurrencies use a chain
78 of [digital signatures][dsig] to prevent Type 1 and Type 3 frauds. This
79 chain forms an additional link between the blocks, separate from the
80 hash chain that applies an ordering and lookup scheme to the blocks.
81 [_Blockchain: Simple Explanation_][bse] explains this “hash chain”
82 vs. “block chain” distinction in more detail.
83
84 These signatures prevent modification of the face value of each
85 transation (Type 1 fraud) by ensuring that only the one signing a
86 new block has the private signing key that could change an issued
87 block after the fact.
88
89 The fact that these signatures are also *chained* prevents Type
90 3 frauds by making the *prior* owner of a block sign it over to
91 the new owner. To avoid an O(n²) auditing problem as a result,
92 cryptocurrencies add a separate chain of hashes to make checking
93 for double-spending quick and easy.
94
95 Fossil has [a disabled-by-default feature][cs] to call out to an
96 external copy of [PGP] or [GPG] to sign commit manifests before
97 inserting them into the repository. You may wish to couple that with
98 a server-side [after-receive hook][arh] to reject unsigned commits.
99
100 Although there are several distinctions you can draw between the way
101 Fossil’s commit signing scheme works and the way block signing works
102 in cryptocurrencies, only one is of material interest for our
103 purposes here: Fossil commit signatures apply only to a single
104 commit. Fossil does not sign one commit over to the next “owner” of
105 that commit in the way that a blockchain-based cryptocurrency must
106 when transferring currency from one user to another, beacuse there
107 is no useful analog to the double-spending problem in Fossil. The
108 closest you can come to this is double-insert of commits into the
109 blockchain, which we’ll address shortly.
110
111 What Fossil commit signatures actually do is provide in-tree forgery
112 prevention, both Type 1 and Type 2. You cannot modify existing
113 commits (Type 1 forgery) because you do not have the original
114 committer’s private signing key, and you cannot forge new commits
115 attesting to come from some other trusted committer (Type 2) because
116 you don’t have any of their private signing keys, either.
117 Cyrptocurrencies also use the work problem to prevent Type 2
118 forgeries, but the application of that to Fossil is a matter we get
119 to [later](#work).
120
121 Although you have complete control over the contents of your local
122 Fossil repository clone, you cannot perform Type 1 forgery on its
123 contents short of executing a [preimage attack][prei] on the hash
124 algorthm. ([SHA3-256][SHA-3] by default in the current version of
125 Fossil.) Even if you could, Fossil’s sync protocol will prevent the
126 modification from being pushed into another repository: the remote
127 Fossil instance says, “I’ve already got that one, thanks,” and
128 ignores the push. Thus, short of breaking into the remote server
129 and modifying the repository in place, you couldn’t even make use of
130 a preimage attack if you had that power. This is an attack on the
131 server itself, not on Fossil’s data structures, so while it is
132 useful to think through this problem, it is not helpful to answering
133 our questions here.
134
135 The Fossil sync protocol also prevents the closest analog to Type 3
136 frauds in Fossil: copying a commit manifest in your local repo clone
137 won’t result in a double-commit on sync.
138
139 In the absence of digital signatures, Fossil’s [RBAC system][caps]
140 restricts Type 2 forgery to trusted committers. Thus once again
141 we’re reduced to an infosec problem, not a data structure design
142 question. (Inversely, enabling commit clearsigning is a good idea
143 if you have committers on your repo whom you don’t trust not to
144 commit Type 2 frauds. But let us be clear: your choice of setting
145 does not answer the question of whether Fossil is a blockchain.)
146
147 If Fossil signatures prevent Type 1 and Type 2 frauds, you
148 may wonder why they are not enabled by default. It is because
149 they are defense-in-depth measures, not the minimum sufficient
150 measures needed to prevent repository fraud, unlike the equivalent
151 protections in a cryptocurrency blockcahin. Fossil provides its
152 primary protections through other means, so it doesn’t need to
153 mandate signatures.
154
155 Also, Fossil is not itself a [PKI], and there is no way for regular
156 users of Fossil to link it to a PKI, since doing so would likely
157 result in an unwanted [PII] disclosure. There is no email address
158 in a Fossil commit manifest that you could use to query one of the
159 public PGP keyservers, for example. It therefore becomes a local
160 policy matter as to whether you even *want* to have signatures,
161 because they’re not without their downsides.
162
163 2. <a id="work"></a>**Work Contests.** Cryptocurrencies prevent Type 2b forgeries
164 by setting up some sort of contest that ensures that new coins can come
165 into existence only by doing some difficult work task. This “mining”
166 activity results in a coin that took considerable work to create,
167 which thus has economic value by being a) difficult to re-create,
168 and b) resistant to [debasement][dboc].
169
170 Fossil repositories are most often used to store the work product of
171 individuals, rather than cryptocoin mining machines. There is
172 generally no contest in trying to produce the most commits. There
173 may be an implicit contest to produce the “best” commits, but that
174 is a matter of project management, not something that can be
175 automatically mediated through objective measures.
176
177 Incentives to commit to the repository come from outside of Fossil;
178 they are not inherent to its nature, as with cryptocurrencies.
179 Moreover, there is no useful sense in which we could say that one
180 commit “re-creates” another. Commits are generally products of
181 individual human intellect, thus necessarily unique in all but
182 trivial cases. This is foundational to copyright law.
183
184 3. <a id="lcr"></a>**Longest Chain Rule.** Cryptocurrencies generally
185 need some way to distinguish which blocks are legitimate and which
186 not. They do this in part by identifying the linear chain with the
187 greatest cumulative [work time](#work) as the legitimate chain. All
188 blocks not on that linear chain are considered “orphans” and are
189 ignored by the cryptocurrency software.
190
191 Its inverse is sometimes called the “51% attack” because a single
192 actor would have to do slightly more work than the entire rest of
193 the community using a given cryptocurrency in order for their fork
194 of the currency to be considered the legitimate fork. This argument
195 soothes concerns that a single bad actor could take over the
196 network.
197
198 The closest we can come to that notion in Fossil is the default
199 “trunk” branch, but there’s nothing in Fossil that delegitimizes
200 other branches just because they’re shorter, nor is there any way in
201 Fossil to score the amount of work that went into a commit. Indeed,
202 [forks and branches][fb] are *valuable and desirable* things in
203 Fossil.
204
205 This much is certain: Fossil is definitely not a cryptocurrency. Whether
206 this makes it “not a blockchain” is a subjective matter.
207
208 [arh]: ./hooks.md
209 [bse]: https://www.researchgate.net/publication/311572122_What_is_Blockchain_a_Gentle_Introduction
210 [caps]: ./caps/
211 [cs]: /help?cmd=clearsign
212 [dboc]: https://en.wikipedia.org/wiki/Debasement
213 [dsig]: https://en.wikipedia.org/wiki/Digital_signature
214 [fb]: ./branching.wiki
215 [GPG]: https://gnupg.org/
216 [PGP]: https://www.openpgp.org/
217 [PII]: https://en.wikipedia.org/wiki/Personal_data
218 [PKI]: https://en.wikipedia.org/wiki/Public_key_infrastructure
219 [pow]: https://en.wikipedia.org/wiki/Proof_of_work
220 [prei]: https://en.wikipedia.org/wiki/Preimage_attack
221
222
223
224 <a id="dlt"></a>
225 ## Distributed Ledgers
226
227 Cryptocurrencies are an instance of [distributed ledger technology][dlt]. If
228 we can convince ourselves that Fossil is also a distributed
229 ledger, then we might think of Fossil as a peer technology,
230 having at least some qualifications toward being considered a blockchain.
231
232 A key tenet of DLT is that records be unmodifiable after they’re
233 committed to the ledger, which matches quite well with Fossil’s design
234 and everyday use cases. Fossil puts up multiple barriers to prevent
235 modification of existing records and injection of incorrect records.
236
237 Yet, Fossil also has [purge] and [shunning][shun]. Doesn’t that mean
238 Fossil cannot be a distributed ledger?
239
240 These features only remove existing commits from the repository. If you want a
241 currency analogy, they are ways to burn a paper bill or to melt a [fiat
242 coin][fc] down to slag. In a cryptocurrency, you can erase your “wallet”
243 file, effectively destroying money in a similar way. These features
244 do not permit forgery of either type described above: you can’t use them
245 to change the value of existing commits (Type 1) or add new commits to
246 the repository (Type 2).
247
248 What if we removed those features from Fossil, creating an append-only
249 Fossil variant? Is it a DLT then? Arguably still not, because [today’s Fossil
250 is an AP-mode system][ctap] in the [CAP theorem][cap] sense, which means
251 there can be no guaranteed consensus on the content of the ledger at any
252 given time. If you had an AP-mode accounts receivable system, it could
253 have different bottom-line totals at different sites, because you’ve
254 cast away “C” to get AP-mode operation.
255
256 Because of this, you could still not guarantee that the command
257 “`fossil info tip`” gives the same result everywhere. A CA or CP-mode Fossil
258 variant would guarantee that everyone got the same result. (Everyone not
259 partitioned away from the majority of the network at any rate, in the CP
260 case.)
261
262 What are the prospects for CA-mode or CP-mode Fossil? [We don’t want
263 CA-mode Fossil][ctca], but [CP-mode could be useful][ctcp]. Until the latter
264 exists, this author believes Fossil is not a distributed ledger in a
265 technologically defensible sense.
266
267 The most common technologies answering to the label “blockchain” are all
268 DLTs, so if Fossil is not a DLT, then it is not a blockchain in that
269 sense.
270
271 [ctap]: ./cap-theorem.md#ap
272 [ctca]: ./cap-theorem.md#ca
273 [ctcp]: ./cap-theorem.md#cp
274 [cap]: https://en.wikipedia.org/wiki/CAP_theorem
275 [dlt]: https://en.wikipedia.org/wiki/Distributed_ledger
276 [DVCS]: https://en.wikipedia.org/wiki/Distributed_version_control
277 [fc]: https://en.wikipedia.org/wiki/Fiat_money
278 [purge]: /help?cmd=purge
279 [shun]: ./shunning.wiki
280
281
282 <a id="dpc"></a>
283 ## Distributed Partial Consensus
284
285 If we can’t get DLT, can we at least get some kind of distributed
286 consensus at the level of individual Fossil’s commits?
287
288 Many blockchain based technologies have this property: given some
289 element of the blockchain, you can make certain proofs that it either is
290 a legitimate part of the whole blockchain, or it is not.
291
292 Unfortunately, this author doesn’t see a way to do that with Fossil.
293 Given only one “block” in Fossil’s putative “blockchain” — a commit, in
294 Fossil terminology — all you can prove is whether it is internally
295 consistent, that it is not corrupt. That then points you at the parent(s) of that
296 commit, which you can repeat the exercise on, back to the root of the
297 DAG. This is what the enabled-by-default [`repo-cksum` setting][rcks]
298 does.
299
300 If cryptocurrencies worked this way, you wouldn’t be able to prove that
301 a given cryptocoin was legitimate without repeating the proof-of-work
302 calculations for the entire cryptocurrency scheme! Instead, you only
303 need to check a certain number of signatures and proofs-of-work in order
304 to be reasonably certain that you are looking at a legitimate section of
305 the whole blockchain.
306
307 What would it even mean to prove that a given Fossil commit “*belongs*”
308 to the repository you’ve extracted it from? For a software project,
309 isn’t that tantamount to automatic code review, where the server would
310 be able to reliably accept or reject a commit based solely on its
311 content? That sounds nice, but this author believes we’ll need to invent
312 [AGI] first.
313
314 A better method to provide distributed consensus for Fossil would be to
315 rely on the *natural* intelligence of its users: that is, distributed
316 commit signing, so that a commit is accepted into the blockchain only
317 once some number of users countersign it. This amounts to a code review
318 feature, which Fossil doesn’t currently have.
319
320 Solving that problem basically requires solving the [PKI] problem first,
321 since you can’t verify the proofs of these signatures if you can’t first
322 prove that the provided signatures belong to people you trust. This is a
323 notoriously hard problem in its own right.
324
325 A future version of Fossil could instead provide [consensus in the CAP
326 sense][ctcp]. For instance, you could say that if a quorum of servers
327 all have a given commit, it “belongs.” Fossil’s strong hashing tech
328 would mean that querying whether a given commit is part of the
329 “blockchain” would be as simple as going down the list of servers and
330 sending each an HTTP GET `/info` query for the artifact ID, concluding
331 that the commit is legitimate once you get enough HTTP 200 status codes back. All of this is
332 hypothetical, because Fossil doesn’t do this today.
333
334 [AGI]: https://en.wikipedia.org/wiki/Artificial_general_intelligence
335 [rcks]: /help?cmd=repo-cksum
336
337
338
339 <a id="anon"></a>
340 ## Anonymity
341
342 Many blockchain based technologies go to extraordinary lengths to
343 allow anonymous use of their service.
344
345 As typically configured, Fossil does not: commits synced between servers
346 always at least have a user name associated with them, which the remote
347 system must accept through its [RBAC system][caps]. That system can run
348 without having the user’s email address, but it’s needed if [email
349 alerts][alert] are enabled on the server. The remote server logs the IP
350 address of the commit for security reasons. That coupled with the
351 timestamp on the commit could sufficiently deanonymize users in many
352 common situations.
353
354 It is possible to configure Fossil so it doesn’t do this:
355
356 * You can give [Write capability][capi] to user category “nobody,” so
357 that anyone that can reach your server can push commits into its
358 repository.
359
360 * You could give that capability to user category “anonymous” instead,
361 which requires that the user log in with a CAPTCHA, but which doesn’t
362 require that the user otherwise identify themselves.
363
364 * You could enable [the `self-register` setting][sreg] and choose not to
365 enable [commit clear-signing][cs] so that anonymous users could push
366 commits into your repository under any name they want.
367
368 On the server side, you can also [scrub] the logging that remembers
369 where each commit came from.
370
371 That info isn’t transmitted from the remote server on clone or pull.
372 Instead, the size of the `rcvfrom` table after initial clone is 1: it
373 contains the remote server’s IP address. On each pull containing new
374 artifacts, your local `fossil` instance adds another entry to this
375 table, likely with the same IP address unless the server has moved or
376 you’re using [multiple remotes][mrep]. This table is far more
377 interesting on the server side, containing the IP addresses of all
378 contentful pushes; thus [the `scrub` command][scrub].
379
380 Because Fossil doesn’t
381 remember IP addresses in commit manifests or require commit signing, it
382 allows at least *pseudonymous* commits. When someone clones a remote
383 repository, they don’t learn the email address, IP address, or any other
384 sort of [PII] of prior committers, on purpose.
385
386 Some people say that private, permissioned blockchains (as you may
387 imagine Fossil to be) are inherently problematic by the very reason that
388 they don’t bake anonymous contribution into their core. The very
389 existence of an RBAC is a moving piece that can break. Isn’t it better,
390 the argument goes, to have a system that works even in the face of
391 anonymous contribution, so that you don’t need an RBAC? Cryptocurrencies
392 do this, for example: anyone can “mine” a new coin and push it into the
393 blockchain, and there is no central authority restricting the transfer
394 of cryptocurrency from one user to another.
395
396 We can draw an analogy to encryption, where an algorithm is
397 considered inherently insecure if it depends on keeping any information
398 from an attacker other than the key. Encryption schemes that do
399 otherwise are derided as “security through obscurity.”
400
401 You may be wondering what any of this has to do with whether Fossil is a
402 blockchain, but that is exactly the point: all of this is outside
403 Fossil’s core hash-chained repository data structure. If you take the
404 position that you don’t have a “blockchain” unless it allows anonymous
405 contribution, with any needed restrictions provided only by the very
406 structure of the managed data, then Fossil does not qualify.
407
408 Why do some people care about this distinction? Consider Bitcoin,
409 wherein an anonymous user cannot spam the blockchain with bogus coins
410 because its [proof-of-work][pow] protocol allows such coins to be
411 rejected immediately. There is no equivalent in Fossil: it has no
412 technology that allows the receiving server to look at the content of a
413 commit and automatically judge it to be “good.” Fossil relies on its
414 RBAC system to provide such distinctions: if you have a commit bit, your
415 commits are *ipso facto* judged “good,” insofar as any human work
416 product can be so judged by a blob of compiled C code. This takes us
417 back to the [digital ledger question](#dlt), where we can talk about
418 what it means to later correct a bad commit that got through the RBAC
419 check.
420
421 We may be willing to accept pseudonymity, rather than full anonymity.
422 If we configure Fossil as above, either bypassing the RBAC or abandoning
423 human control over it, scrubbing IP addresses, etc., is it then a public
424 permissionless blockchain in that sense?
425
426 We think not, because there is no [longest chain rule](#lcr) or anything
427 like it in Fossil.
428
429 For a fair model of how a Fossil repository might behave under such
430 conditions, consider GitHub: here one user can fork another’s repository
431 and make an arbitrary number of commits to their public fork. Imagine
432 this happens 10 times. How does someone come along later and
433 *automatically* evaluate which of the 11 forks of the code (counting the
434 original repository among their number) is the “best” one? For a
435 computer software project, the best we could do to approximate this
436 devolves to a [software project cost estimation problem][scost]. These
437 methods are rather questionable in their own right, being mathematical
438 judgement values on human work products, but even if we accept their
439 usefulness, then we still cannot say which fork is better based solely
440 on their scores under these metrics. We may well prefer to use the fork
441 of a software program that took *less* effort, being smaller, more
442 self-contained, and with a smaller attack surface.
443
444
445 [alert]: ./alerts.md
446 [capi]: ./caps/ref.html#i
447 [mrep]: /help?cmd=remote
448 [scost]: https://en.wikipedia.org/wiki/Software_development_effort_estimation
449 [scrub]: /help?cmd=scrub
450 [sreg]: /help?cmd=self-register
451
452
453 # Conclusion
454
455 This author believes it is technologically indefensible to call Fossil a
456 “blockchain” in any sense likely to be understood by a majority of those
457 you’re communicating with.
458
459 Within a certain narrow scope, you can defend this usage, but if you do
460 that, you’ve failed any goal that requires clear communication: it
461 doesn’t work to use a term in a nonstandard way just because you can
462 defend it. The people you’re communicating your ideas to must have the
463 same concept of the terms you use.
464
465
466 What term should you use instead? Fossil stores a DAG of hash-chained
467 commits, so an indisputably correct term is a [Merkle tree][mt], named
468 after [its inventor][drrm]. You could also use the more generic term
469 “hash tree.”
470
471 Fossil is a technological peer to many common sorts of blockchain
472 technology. There is a lot of overlap in concepts and implementation
473 details, but when speaking of what most people understand as
474 “blockchain,” Fossil is not that.
475
476 [drrm]: https://en.wikipedia.org/wiki/Ralph_Merkle
477 [mt]: https://en.wikipedia.org/wiki/Merkle_tree
478
--- www/cap-theorem.md
+++ www/cap-theorem.md
@@ -25,11 +25,15 @@
2525
time.
2626
2727
You may consider that going back online restores “C”, because upon sync,
2828
you’re now consistent with the repo you cloned from. But, if another
2929
user has gone offline in the meantime, and they’ve made commits to their
30
-disconnected repo, *you* aren’t consistent with *them.*
30
+disconnected repo, *you* aren’t consistent with *them.* Besides which,
31
+if another user commits to the central repo, that doesn’t push the
32
+change down to you automatically: even if all users of a Fossil system
33
+are online at the same instant, and they’re all using autosync, Fossil
34
+doesn’t guarantee consistency across the network.
3135
3236
There’s no getting around the CAP theorem!
3337
3438
[dvcs]: https://en.wikipedia.org/wiki/Distributed_version_control
3539
@@ -41,22 +45,27 @@
4145
4246
It means we get a system that is always consistent (C) and available (A)
4347
as long as there are no partitions (P).
4448
4549
That’s basically [CVS] and [Subversion][svn]: you can only continue
46
-working as long as your connection to the central repo server functions.
50
+working with the repository itself as long as your connection to the central repo server functions.
4751
48
-It’s rather trivial to talk about single-point-of-failure systems as
52
+It’s rather trivial to talk about single-point-of-failure systems like
53
+CVS or Subversion as
4954
CA-mode. Another common example used this way is a classical RDBMS, but
5055
aren’t we here to talk about distributed systems? What’s a good example
5156
of a *distributed* CA-mode system?
5257
5358
A better example is [Kafka], which in its default configuration assumes
5459
it being run on a corporate LAN in a single data center, so network
55
-partitions are exceedingly rare. In its application of CA mode, a
60
+partitions are exceedingly rare. It therefore sacrifices partition
61
+tolerance to get the advantages of CA-mode operation. In its particular application of
62
+this mode, a
5663
message isn’t “committed” until all running brokers have a copy of it,
57
-at which point the message becomes visible to the client(s).
64
+at which point the message becomes visible to the client(s). In that
65
+way, all clients always see the same message store as long as all of the
66
+Kafka servers are up and communicating.
5867
5968
How would that work in Fossil terms?
6069
6170
If there is only one central server and I clone it on my local laptop,
6271
then CA mode means I can only commit if the remote Fossil is available,
@@ -88,11 +97,11 @@
8897
say that commits must go to all of the spares as well as the active
8998
servers, but a loss of one active server requires that one warm spare
9099
come into active state, and all of the clients learn that the spare is
91100
now considered “active.” At this point, you have a CP-mode system, not a
92101
CA-mode system, because it’s now partition-tolerant (P) but it becomes
93
-unavailable, losing “A” when there aren’t enough active servers or warm
102
+unavailable when there aren’t enough active servers or warm
94103
spares to promote to active status.
95104
96105
CP is your classical [BFT] style distributed consensus system, where the
97106
system is available only if the client can contact a *majority* of the
98107
servers. This is a formalization of the warm spare concept above: with
99108
--- www/cap-theorem.md
+++ www/cap-theorem.md
@@ -25,11 +25,15 @@
25 time.
26
27 You may consider that going back online restores “C”, because upon sync,
28 you’re now consistent with the repo you cloned from. But, if another
29 user has gone offline in the meantime, and they’ve made commits to their
30 disconnected repo, *you* aren’t consistent with *them.*
 
 
 
 
31
32 There’s no getting around the CAP theorem!
33
34 [dvcs]: https://en.wikipedia.org/wiki/Distributed_version_control
35
@@ -41,22 +45,27 @@
41
42 It means we get a system that is always consistent (C) and available (A)
43 as long as there are no partitions (P).
44
45 That’s basically [CVS] and [Subversion][svn]: you can only continue
46 working as long as your connection to the central repo server functions.
47
48 It’s rather trivial to talk about single-point-of-failure systems as
 
49 CA-mode. Another common example used this way is a classical RDBMS, but
50 aren’t we here to talk about distributed systems? What’s a good example
51 of a *distributed* CA-mode system?
52
53 A better example is [Kafka], which in its default configuration assumes
54 it being run on a corporate LAN in a single data center, so network
55 partitions are exceedingly rare. In its application of CA mode, a
 
 
56 message isn’t “committed” until all running brokers have a copy of it,
57 at which point the message becomes visible to the client(s).
 
 
58
59 How would that work in Fossil terms?
60
61 If there is only one central server and I clone it on my local laptop,
62 then CA mode means I can only commit if the remote Fossil is available,
@@ -88,11 +97,11 @@
88 say that commits must go to all of the spares as well as the active
89 servers, but a loss of one active server requires that one warm spare
90 come into active state, and all of the clients learn that the spare is
91 now considered “active.” At this point, you have a CP-mode system, not a
92 CA-mode system, because it’s now partition-tolerant (P) but it becomes
93 unavailable, losing “A” when there aren’t enough active servers or warm
94 spares to promote to active status.
95
96 CP is your classical [BFT] style distributed consensus system, where the
97 system is available only if the client can contact a *majority* of the
98 servers. This is a formalization of the warm spare concept above: with
99
--- www/cap-theorem.md
+++ www/cap-theorem.md
@@ -25,11 +25,15 @@
25 time.
26
27 You may consider that going back online restores “C”, because upon sync,
28 you’re now consistent with the repo you cloned from. But, if another
29 user has gone offline in the meantime, and they’ve made commits to their
30 disconnected repo, *you* aren’t consistent with *them.* Besides which,
31 if another user commits to the central repo, that doesn’t push the
32 change down to you automatically: even if all users of a Fossil system
33 are online at the same instant, and they’re all using autosync, Fossil
34 doesn’t guarantee consistency across the network.
35
36 There’s no getting around the CAP theorem!
37
38 [dvcs]: https://en.wikipedia.org/wiki/Distributed_version_control
39
@@ -41,22 +45,27 @@
45
46 It means we get a system that is always consistent (C) and available (A)
47 as long as there are no partitions (P).
48
49 That’s basically [CVS] and [Subversion][svn]: you can only continue
50 working with the repository itself as long as your connection to the central repo server functions.
51
52 It’s rather trivial to talk about single-point-of-failure systems like
53 CVS or Subversion as
54 CA-mode. Another common example used this way is a classical RDBMS, but
55 aren’t we here to talk about distributed systems? What’s a good example
56 of a *distributed* CA-mode system?
57
58 A better example is [Kafka], which in its default configuration assumes
59 it being run on a corporate LAN in a single data center, so network
60 partitions are exceedingly rare. It therefore sacrifices partition
61 tolerance to get the advantages of CA-mode operation. In its particular application of
62 this mode, a
63 message isn’t “committed” until all running brokers have a copy of it,
64 at which point the message becomes visible to the client(s). In that
65 way, all clients always see the same message store as long as all of the
66 Kafka servers are up and communicating.
67
68 How would that work in Fossil terms?
69
70 If there is only one central server and I clone it on my local laptop,
71 then CA mode means I can only commit if the remote Fossil is available,
@@ -88,11 +97,11 @@
97 say that commits must go to all of the spares as well as the active
98 servers, but a loss of one active server requires that one warm spare
99 come into active state, and all of the clients learn that the spare is
100 now considered “active.” At this point, you have a CP-mode system, not a
101 CA-mode system, because it’s now partition-tolerant (P) but it becomes
102 unavailable when there aren’t enough active servers or warm
103 spares to promote to active status.
104
105 CP is your classical [BFT] style distributed consensus system, where the
106 system is available only if the client can contact a *majority* of the
107 servers. This is a formalization of the warm spare concept above: with
108
--- www/caps/admin-v-setup.md
+++ www/caps/admin-v-setup.md
@@ -1,6 +1,6 @@
1
-# Differences Between Setup and Admin User
1
+# Differences Between Setup and Admin Users
22
33
This document explains the distinction between [Setup users][caps] and
44
[Admin users][capa]. For other information about use types, see:
55
66
* [Administering User Capabilities](./)
@@ -46,12 +46,12 @@
4646
choices.
4747
4848
You can also look at the role of Admin from the other direction, up
4949
through the [user power hierarchy][ucap] rather than down from Setup. An
5050
Admin user is usually a “super-developer” role, given full control over
51
-the repository’s managed content: versioned artifacts in [the block
52
-chain][bc], [unversioned content][uv], forum posts, wiki articles,
51
+the repository’s managed content: versioned artifacts in [the hash tree][bc],
52
+[unversioned content][uv], forum posts, wiki articles,
5353
tickets, etc.
5454
5555
We’ll explore these distinctions in the rest of this document.
5656
5757
[bc]: ../blockchain.md
@@ -158,22 +158,22 @@
158158
system][shun] is to clean up after a spammer, and that's
159159
exactly the sort of administrivia we wish to delegate to Admin users.
160160
161161
Coupled with the Rebuild button on the same page, an Admin user has
162162
the power to delete the repository's entire
163
- [blockchain][bc]! This makes this feature a pretty good
163
+ [hash tree][bc]! This makes this feature a pretty good
164164
razor in deciding whether to grant someone Admin capability: do you
165165
trust that user to shun Fossil artifacts responsibly?
166166
167167
Realize that shunning is cooperative in Fossil. As long as there are
168168
surviving repository clones, an Admin-only user who deletes the
169
- whole blockchain has merely caused a nuisance. An Admin-only user
169
+ whole hash tree has merely caused a nuisance. An Admin-only user
170170
cannot permanently destroy the repository unless the Setup user has
171171
been so silly as to have no up-to-date clones.
172172
173
-* **Moderation**: According to the power hierarchy laid out at the top
174
- of this article, Admins are greater than Moderators, so control over
173
+* **Moderation**: According to [the user power hierarchy][ucap],
174
+ Admins are greater than Moderators, so control over
175175
what Moderators can do clearly belongs to both Admins and to the
176176
Setup user(s).
177177
178178
* **Status**: Although the Fossil `/stat` page is visible to every
179179
user with Read capability, there are several additional things this
@@ -229,27 +229,27 @@
229229
user with these powers, you should not grant that user Admin capability.
230230
231231
232232
## <a name="clones"></a>Clones and Backups
233233
234
-Keep in mind that Fossil is a *distributed* version control system,
235
-which means that a user known to Fossil might have Setup capability on
236
-one repository but be a mere "user" on one of its clones. The most
237
-common case is that when you clone a repository, even anonymously, you
238
-gain Setup power over the local clone.
234
+Fossil is a *distributed* version control system, which has direct
235
+effects on the “Setup user” concept in the face of clones. When you
236
+clone a repository, your local user becomes a Setup user on the local
237
+clone even if you are not one on the remote repository. This may be
238
+surprising to you, but it should also be sensible once you realize that
239
+your operating system will generally give you full control over the
240
+local repository file. What use trying to apply remote restrictions on
241
+the local file, then?
239242
240243
The distinctions above therefore are intransitive: they apply only
241244
within a single repository instance.
242245
243
-The exception to this is when the clone is done as a Setup user, since
244
-this also copies the `user` table on the initial clone. A user with
245
-Setup capability can subsequently say [`fossil conf pull all`][fcp] to
246
-update that table and everything else not normally synchronized between
247
-Fossil repositories. In this way, a Setup user can create multiple
248
-interchangeable clones. This is useful not only to guard against rogue
249
-Admin-only users, it is a useful element of a load balancing and
250
-failover system.
246
+Fossil behaves differently when you do a clone as a user with Setup
247
+capability on the remote repository, which primarily has effects on the
248
+fidelity of clone-as-backup, which we cover [elsewhere](../backup.md).
249
+We strongly encourage you to read that document if you expect to use a
250
+clone as a complete replacement for the remote repository.
251251
252252
253253
## <a name="apsu"></a>The All-Powerful Setup User
254254
255255
Setup users get [every user capability](./ref.html) of Fossil except for
@@ -344,11 +344,11 @@
344344
access on the host system, we almost certainly don't want to
345345
allow them to change such settings.</p>
346346
347347
* **SQL**: The Admin → SQL feature allows the Setup user to enter raw
348348
SQL queries against the Fossil repository via Fossil UI. This not
349
- only allows arbitrary ability to modify the repository blockchain
349
+ only allows arbitrary ability to modify the repository hash tree
350350
and its backing data tables, it can probably also be used to damage
351351
the host such as via `PRAGMA temp_store = FILE`.
352352
353353
* **Tickets**: This section allows input of aribtrary TH1 code that
354354
runs on the server, affecting the way the Fossil ticketing system
355355
--- www/caps/admin-v-setup.md
+++ www/caps/admin-v-setup.md
@@ -1,6 +1,6 @@
1 # Differences Between Setup and Admin User
2
3 This document explains the distinction between [Setup users][caps] and
4 [Admin users][capa]. For other information about use types, see:
5
6 * [Administering User Capabilities](./)
@@ -46,12 +46,12 @@
46 choices.
47
48 You can also look at the role of Admin from the other direction, up
49 through the [user power hierarchy][ucap] rather than down from Setup. An
50 Admin user is usually a “super-developer” role, given full control over
51 the repository’s managed content: versioned artifacts in [the block
52 chain][bc], [unversioned content][uv], forum posts, wiki articles,
53 tickets, etc.
54
55 We’ll explore these distinctions in the rest of this document.
56
57 [bc]: ../blockchain.md
@@ -158,22 +158,22 @@
158 system][shun] is to clean up after a spammer, and that's
159 exactly the sort of administrivia we wish to delegate to Admin users.
160
161 Coupled with the Rebuild button on the same page, an Admin user has
162 the power to delete the repository's entire
163 [blockchain][bc]! This makes this feature a pretty good
164 razor in deciding whether to grant someone Admin capability: do you
165 trust that user to shun Fossil artifacts responsibly?
166
167 Realize that shunning is cooperative in Fossil. As long as there are
168 surviving repository clones, an Admin-only user who deletes the
169 whole blockchain has merely caused a nuisance. An Admin-only user
170 cannot permanently destroy the repository unless the Setup user has
171 been so silly as to have no up-to-date clones.
172
173 * **Moderation**: According to the power hierarchy laid out at the top
174 of this article, Admins are greater than Moderators, so control over
175 what Moderators can do clearly belongs to both Admins and to the
176 Setup user(s).
177
178 * **Status**: Although the Fossil `/stat` page is visible to every
179 user with Read capability, there are several additional things this
@@ -229,27 +229,27 @@
229 user with these powers, you should not grant that user Admin capability.
230
231
232 ## <a name="clones"></a>Clones and Backups
233
234 Keep in mind that Fossil is a *distributed* version control system,
235 which means that a user known to Fossil might have Setup capability on
236 one repository but be a mere "user" on one of its clones. The most
237 common case is that when you clone a repository, even anonymously, you
238 gain Setup power over the local clone.
 
 
 
239
240 The distinctions above therefore are intransitive: they apply only
241 within a single repository instance.
242
243 The exception to this is when the clone is done as a Setup user, since
244 this also copies the `user` table on the initial clone. A user with
245 Setup capability can subsequently say [`fossil conf pull all`][fcp] to
246 update that table and everything else not normally synchronized between
247 Fossil repositories. In this way, a Setup user can create multiple
248 interchangeable clones. This is useful not only to guard against rogue
249 Admin-only users, it is a useful element of a load balancing and
250 failover system.
251
252
253 ## <a name="apsu"></a>The All-Powerful Setup User
254
255 Setup users get [every user capability](./ref.html) of Fossil except for
@@ -344,11 +344,11 @@
344 access on the host system, we almost certainly don't want to
345 allow them to change such settings.</p>
346
347 * **SQL**: The Admin → SQL feature allows the Setup user to enter raw
348 SQL queries against the Fossil repository via Fossil UI. This not
349 only allows arbitrary ability to modify the repository blockchain
350 and its backing data tables, it can probably also be used to damage
351 the host such as via `PRAGMA temp_store = FILE`.
352
353 * **Tickets**: This section allows input of aribtrary TH1 code that
354 runs on the server, affecting the way the Fossil ticketing system
355
--- www/caps/admin-v-setup.md
+++ www/caps/admin-v-setup.md
@@ -1,6 +1,6 @@
1 # Differences Between Setup and Admin Users
2
3 This document explains the distinction between [Setup users][caps] and
4 [Admin users][capa]. For other information about use types, see:
5
6 * [Administering User Capabilities](./)
@@ -46,12 +46,12 @@
46 choices.
47
48 You can also look at the role of Admin from the other direction, up
49 through the [user power hierarchy][ucap] rather than down from Setup. An
50 Admin user is usually a “super-developer” role, given full control over
51 the repository’s managed content: versioned artifacts in [the hash tree][bc],
52 [unversioned content][uv], forum posts, wiki articles,
53 tickets, etc.
54
55 We’ll explore these distinctions in the rest of this document.
56
57 [bc]: ../blockchain.md
@@ -158,22 +158,22 @@
158 system][shun] is to clean up after a spammer, and that's
159 exactly the sort of administrivia we wish to delegate to Admin users.
160
161 Coupled with the Rebuild button on the same page, an Admin user has
162 the power to delete the repository's entire
163 [hash tree][bc]! This makes this feature a pretty good
164 razor in deciding whether to grant someone Admin capability: do you
165 trust that user to shun Fossil artifacts responsibly?
166
167 Realize that shunning is cooperative in Fossil. As long as there are
168 surviving repository clones, an Admin-only user who deletes the
169 whole hash tree has merely caused a nuisance. An Admin-only user
170 cannot permanently destroy the repository unless the Setup user has
171 been so silly as to have no up-to-date clones.
172
173 * **Moderation**: According to [the user power hierarchy][ucap],
174 Admins are greater than Moderators, so control over
175 what Moderators can do clearly belongs to both Admins and to the
176 Setup user(s).
177
178 * **Status**: Although the Fossil `/stat` page is visible to every
179 user with Read capability, there are several additional things this
@@ -229,27 +229,27 @@
229 user with these powers, you should not grant that user Admin capability.
230
231
232 ## <a name="clones"></a>Clones and Backups
233
234 Fossil is a *distributed* version control system, which has direct
235 effects on the “Setup user” concept in the face of clones. When you
236 clone a repository, your local user becomes a Setup user on the local
237 clone even if you are not one on the remote repository. This may be
238 surprising to you, but it should also be sensible once you realize that
239 your operating system will generally give you full control over the
240 local repository file. What use trying to apply remote restrictions on
241 the local file, then?
242
243 The distinctions above therefore are intransitive: they apply only
244 within a single repository instance.
245
246 Fossil behaves differently when you do a clone as a user with Setup
247 capability on the remote repository, which primarily has effects on the
248 fidelity of clone-as-backup, which we cover [elsewhere](../backup.md).
249 We strongly encourage you to read that document if you expect to use a
250 clone as a complete replacement for the remote repository.
 
 
 
251
252
253 ## <a name="apsu"></a>The All-Powerful Setup User
254
255 Setup users get [every user capability](./ref.html) of Fossil except for
@@ -344,11 +344,11 @@
344 access on the host system, we almost certainly don't want to
345 allow them to change such settings.</p>
346
347 * **SQL**: The Admin → SQL feature allows the Setup user to enter raw
348 SQL queries against the Fossil repository via Fossil UI. This not
349 only allows arbitrary ability to modify the repository hash tree
350 and its backing data tables, it can probably also be used to damage
351 the host such as via `PRAGMA temp_store = FILE`.
352
353 * **Tickets**: This section allows input of aribtrary TH1 code that
354 runs on the server, affecting the way the Fossil ticketing system
355
--- www/caps/impl.md
+++ www/caps/impl.md
@@ -62,11 +62,11 @@
6262
3. You can purposely overwrite good timestamps with bad ones and push
6363
those changes up to the remote with no interference, even though
6464
Fossil tries to make that a Setup-only operation.
6565
6666
All of this falls out of two of Fossil’s design choices: sync is
67
-all-or-nothing, and [the Fossil block chain][bc] is immutable. Fossil
67
+all-or-nothing, and [the Fossil hash tree][bc] is immutable. Fossil
6868
would have to violate one or both of these principles to filter such
6969
problems out of incoming syncs.
7070
7171
We have considered auto-[shunning][shun] “bad” content on sync, but this
7272
is [difficult][asd] due to [the design of the sync protocol][dsp]. This
7373
--- www/caps/impl.md
+++ www/caps/impl.md
@@ -62,11 +62,11 @@
62 3. You can purposely overwrite good timestamps with bad ones and push
63 those changes up to the remote with no interference, even though
64 Fossil tries to make that a Setup-only operation.
65
66 All of this falls out of two of Fossil’s design choices: sync is
67 all-or-nothing, and [the Fossil block chain][bc] is immutable. Fossil
68 would have to violate one or both of these principles to filter such
69 problems out of incoming syncs.
70
71 We have considered auto-[shunning][shun] “bad” content on sync, but this
72 is [difficult][asd] due to [the design of the sync protocol][dsp]. This
73
--- www/caps/impl.md
+++ www/caps/impl.md
@@ -62,11 +62,11 @@
62 3. You can purposely overwrite good timestamps with bad ones and push
63 those changes up to the remote with no interference, even though
64 Fossil tries to make that a Setup-only operation.
65
66 All of this falls out of two of Fossil’s design choices: sync is
67 all-or-nothing, and [the Fossil hash tree][bc] is immutable. Fossil
68 would have to violate one or both of these principles to filter such
69 problems out of incoming syncs.
70
71 We have considered auto-[shunning][shun] “bad” content on sync, but this
72 is [difficult][asd] due to [the design of the sync protocol][dsp]. This
73
--- www/caps/ref.html
+++ www/caps/ref.html
@@ -77,11 +77,11 @@
7777
<th>d</th>
7878
<th>n/a</th>
7979
<td>
8080
Legacy capability letter from Fossil's forebear <a
8181
href="http://cvstrac.org/">CVSTrac</a>, which has no useful
82
- meaning in Fossil due to its durable blockchain nature. This
82
+ meaning in Fossil due to the nature of its durable Merkle tree design. This
8383
letter was assigned by default to Developer in repos created with
8484
Fossil 2.10 or earlier, but it has no effect in current or past
8585
versions of Fossil; we recommend that you remove it in case we
8686
ever reuse this letter for another purpose. See <a
8787
href="https://fossil-scm.org/forum/forumpost/43c78f4bef">this
8888
--- www/caps/ref.html
+++ www/caps/ref.html
@@ -77,11 +77,11 @@
77 <th>d</th>
78 <th>n/a</th>
79 <td>
80 Legacy capability letter from Fossil's forebear <a
81 href="http://cvstrac.org/">CVSTrac</a>, which has no useful
82 meaning in Fossil due to its durable blockchain nature. This
83 letter was assigned by default to Developer in repos created with
84 Fossil 2.10 or earlier, but it has no effect in current or past
85 versions of Fossil; we recommend that you remove it in case we
86 ever reuse this letter for another purpose. See <a
87 href="https://fossil-scm.org/forum/forumpost/43c78f4bef">this
88
--- www/caps/ref.html
+++ www/caps/ref.html
@@ -77,11 +77,11 @@
77 <th>d</th>
78 <th>n/a</th>
79 <td>
80 Legacy capability letter from Fossil's forebear <a
81 href="http://cvstrac.org/">CVSTrac</a>, which has no useful
82 meaning in Fossil due to the nature of its durable Merkle tree design. This
83 letter was assigned by default to Developer in repos created with
84 Fossil 2.10 or earlier, but it has no effect in current or past
85 versions of Fossil; we recommend that you remove it in case we
86 ever reuse this letter for another purpose. See <a
87 href="https://fossil-scm.org/forum/forumpost/43c78f4bef">this
88
+3 -4
--- www/cgi.wiki
+++ www/cgi.wiki
@@ -28,11 +28,12 @@
2828
<blockquote><verbatim>
2929
#!/usr/bin/fossil
3030
repository: /home/www/fossils/myproject.fossil
3131
</verbatim></blockquote>
3232
33
-Of course, pathnames will likely be different. The first line (the "shebang")
33
+Of course, pathnames will likely be different. The first line
34
+(the "[wikipedia:/wiki/Shebang_(Unix)|shebang]")
3435
always gives the name of the Fossil executable. Subsequent lines are of
3536
the form "<b>property:&nbsp;argument&nbsp;...</b>".
3637
The remainder of this document describes the available properties and
3738
their arguments.
3839
@@ -40,11 +41,11 @@
4041
<h2 id="repository">repository: <i>PATH</i></h2>
4142
4243
This property defines the Fossil repository that the server will use.
4344
Every Fossil CGI requires either this property or the
4445
[#directory|<b>directory:</b>] property (but not both).
45
-Many Fossil repository sets have this one property and no other.
46
+Many Fossil CGI scripts have this one property and no other.
4647
4748
<h2 id="directory">directory: <i>PATH</i></h2>
4849
4950
The PATH is the name of a directory that contains one or more Fossil
5051
repository files having the suffix ".fossil". If this property is used
@@ -104,12 +105,10 @@
104105
105106
This is a Boolean property.
106107
If it is present, [./caps/ref.html#s | setup capability]
107108
is granted to any HTTP request that
108109
comes in over a loopback interface, such as 127.0.0.1.
109
-If the PATH_INFO string is empty, Fossil will show a list
110
-of available Fossil repositories.
111110
112111
<h2 id="skin">skin: <i>NAME</i></h2>
113112
114113
If NAME is the name of one of the built-in skins supported by Fossil,
115114
then this option causes Fossil to display using that built-in skin,
116115
--- www/cgi.wiki
+++ www/cgi.wiki
@@ -28,11 +28,12 @@
28 <blockquote><verbatim>
29 #!/usr/bin/fossil
30 repository: /home/www/fossils/myproject.fossil
31 </verbatim></blockquote>
32
33 Of course, pathnames will likely be different. The first line (the "shebang")
 
34 always gives the name of the Fossil executable. Subsequent lines are of
35 the form "<b>property:&nbsp;argument&nbsp;...</b>".
36 The remainder of this document describes the available properties and
37 their arguments.
38
@@ -40,11 +41,11 @@
40 <h2 id="repository">repository: <i>PATH</i></h2>
41
42 This property defines the Fossil repository that the server will use.
43 Every Fossil CGI requires either this property or the
44 [#directory|<b>directory:</b>] property (but not both).
45 Many Fossil repository sets have this one property and no other.
46
47 <h2 id="directory">directory: <i>PATH</i></h2>
48
49 The PATH is the name of a directory that contains one or more Fossil
50 repository files having the suffix ".fossil". If this property is used
@@ -104,12 +105,10 @@
104
105 This is a Boolean property.
106 If it is present, [./caps/ref.html#s | setup capability]
107 is granted to any HTTP request that
108 comes in over a loopback interface, such as 127.0.0.1.
109 If the PATH_INFO string is empty, Fossil will show a list
110 of available Fossil repositories.
111
112 <h2 id="skin">skin: <i>NAME</i></h2>
113
114 If NAME is the name of one of the built-in skins supported by Fossil,
115 then this option causes Fossil to display using that built-in skin,
116
--- www/cgi.wiki
+++ www/cgi.wiki
@@ -28,11 +28,12 @@
28 <blockquote><verbatim>
29 #!/usr/bin/fossil
30 repository: /home/www/fossils/myproject.fossil
31 </verbatim></blockquote>
32
33 Of course, pathnames will likely be different. The first line
34 (the "[wikipedia:/wiki/Shebang_(Unix)|shebang]")
35 always gives the name of the Fossil executable. Subsequent lines are of
36 the form "<b>property:&nbsp;argument&nbsp;...</b>".
37 The remainder of this document describes the available properties and
38 their arguments.
39
@@ -40,11 +41,11 @@
41 <h2 id="repository">repository: <i>PATH</i></h2>
42
43 This property defines the Fossil repository that the server will use.
44 Every Fossil CGI requires either this property or the
45 [#directory|<b>directory:</b>] property (but not both).
46 Many Fossil CGI scripts have this one property and no other.
47
48 <h2 id="directory">directory: <i>PATH</i></h2>
49
50 The PATH is the name of a directory that contains one or more Fossil
51 repository files having the suffix ".fossil". If this property is used
@@ -104,12 +105,10 @@
105
106 This is a Boolean property.
107 If it is present, [./caps/ref.html#s | setup capability]
108 is granted to any HTTP request that
109 comes in over a loopback interface, such as 127.0.0.1.
 
 
110
111 <h2 id="skin">skin: <i>NAME</i></h2>
112
113 If NAME is the name of one of the built-in skins supported by Fossil,
114 then this option causes Fossil to display using that built-in skin,
115
+10 -2
--- www/changes.wiki
+++ www/changes.wiki
@@ -1,14 +1,14 @@
11
<title>Change Log</title>
22
33
<a name='v2_13'></a>
4
-<h2>Changes for Version 2.13 (pending)</h2>
4
+<h2>Changes for Version 2.13 (2020-11-01)</h2>
55
66
* Added support for [./interwiki.md|interwiki links].
77
* Enable &lt;del&gt; and &lt;ins&gt; markup in wiki.
88
* Improvements to the Forum threading display.
9
- * Added support for embedding [https://pikchr.org|pikchr]
9
+ * Added support for embedding [./pikchr.md|pikchr]
1010
markup in markdown and fossil-wiki content.
1111
* The new "[/help?cmd=pikchr|pikchr]" command can render
1212
pikchr scripts, optionally pre-processed with
1313
[/doc/trunk/www/th1.md|TH1] blocks and variables exactly like
1414
site skins are.
@@ -19,10 +19,18 @@
1919
initiate a preview and to toggle between the editor and preview
2020
tabs.
2121
* The <tt>/artifact</tt> and <tt>/file</tt> views, when in
2222
line-number mode, now support interactive selection of a range
2323
of lines to hyperlink to.
24
+ * Enhance the [/help?cmd=/finfo|/finfo] webpage so that when query
25
+ parameters identify both a filename and a checkin, the resulting
26
+ graph tracks the identified file across renames.
27
+ * The built-in SQLite is updated to an alpha of version 3.34.0, and
28
+ the minimum SQLite version is increased to 3.34.0 because the
29
+ /finfo change in the previous bullet depends on enhancements to
30
+ recursive common table expressions that are only available in
31
+ SQLite 3.34.0 and later.
2432
* Countless other minor refinements and documentation improvements.
2533
2634
<a name='v2_12'></a>
2735
<h2>Changes for Version 2.12.1 (2020-08-20)</h2>
2836
2937
--- www/changes.wiki
+++ www/changes.wiki
@@ -1,14 +1,14 @@
1 <title>Change Log</title>
2
3 <a name='v2_13'></a>
4 <h2>Changes for Version 2.13 (pending)</h2>
5
6 * Added support for [./interwiki.md|interwiki links].
7 * Enable &lt;del&gt; and &lt;ins&gt; markup in wiki.
8 * Improvements to the Forum threading display.
9 * Added support for embedding [https://pikchr.org|pikchr]
10 markup in markdown and fossil-wiki content.
11 * The new "[/help?cmd=pikchr|pikchr]" command can render
12 pikchr scripts, optionally pre-processed with
13 [/doc/trunk/www/th1.md|TH1] blocks and variables exactly like
14 site skins are.
@@ -19,10 +19,18 @@
19 initiate a preview and to toggle between the editor and preview
20 tabs.
21 * The <tt>/artifact</tt> and <tt>/file</tt> views, when in
22 line-number mode, now support interactive selection of a range
23 of lines to hyperlink to.
 
 
 
 
 
 
 
 
24 * Countless other minor refinements and documentation improvements.
25
26 <a name='v2_12'></a>
27 <h2>Changes for Version 2.12.1 (2020-08-20)</h2>
28
29
--- www/changes.wiki
+++ www/changes.wiki
@@ -1,14 +1,14 @@
1 <title>Change Log</title>
2
3 <a name='v2_13'></a>
4 <h2>Changes for Version 2.13 (2020-11-01)</h2>
5
6 * Added support for [./interwiki.md|interwiki links].
7 * Enable &lt;del&gt; and &lt;ins&gt; markup in wiki.
8 * Improvements to the Forum threading display.
9 * Added support for embedding [./pikchr.md|pikchr]
10 markup in markdown and fossil-wiki content.
11 * The new "[/help?cmd=pikchr|pikchr]" command can render
12 pikchr scripts, optionally pre-processed with
13 [/doc/trunk/www/th1.md|TH1] blocks and variables exactly like
14 site skins are.
@@ -19,10 +19,18 @@
19 initiate a preview and to toggle between the editor and preview
20 tabs.
21 * The <tt>/artifact</tt> and <tt>/file</tt> views, when in
22 line-number mode, now support interactive selection of a range
23 of lines to hyperlink to.
24 * Enhance the [/help?cmd=/finfo|/finfo] webpage so that when query
25 parameters identify both a filename and a checkin, the resulting
26 graph tracks the identified file across renames.
27 * The built-in SQLite is updated to an alpha of version 3.34.0, and
28 the minimum SQLite version is increased to 3.34.0 because the
29 /finfo change in the previous bullet depends on enhancements to
30 recursive common table expressions that are only available in
31 SQLite 3.34.0 and later.
32 * Countless other minor refinements and documentation improvements.
33
34 <a name='v2_12'></a>
35 <h2>Changes for Version 2.12.1 (2020-08-20)</h2>
36
37
--- www/checkin_names.wiki
+++ www/checkin_names.wiki
@@ -15,18 +15,18 @@
1515
<ul>
1616
<li> <b>tip</b>
1717
<li> <b>current</b>
1818
<li> <b>next</b>
1919
<li> <b>previous</b> or <b>prev</b>
20
-<li> <b>ckout</b> (<a href='./embeddeddocs.wiki'>embedded docs</a> only)
20
+<li> <b>ckout</b> (<a href='./embeddeddoc.wiki'>embedded docs</a> only)
2121
</ul>
2222
</ul>
2323
</td></tr>
2424
</table>
25
-Many Fossil [/help|commands] and [./webui.wiki | web-interface] URLs accept
25
+Many Fossil [/help|commands] and [./webui.wiki | web interface] URLs accept
2626
check-in names as an argument. For example, the "[/help/info|info]" command
27
-accepts an optional check-in name to identify the specific checkout
27
+accepts an optional check-in name to identify the specific check-in
2828
about which information is desired:
2929
3030
<blockquote>
3131
<tt>fossil info</tt> <i>checkin-name</i>
3232
</blockquote>
@@ -45,18 +45,18 @@
4545
document describes the various methods.
4646
4747
<h2 id="canonical">Canonical Check-in Name</h2>
4848
4949
The canonical name of a check-in is the hash of its
50
-[./fileformat.wiki#manifest | manifest] expressed as a 40-or-more character
51
-lowercase hexadecimal number. For example:
50
+[./fileformat.wiki#manifest | manifest] expressed as a
51
+[./hashes.md | long lowercase hexadecimal number]. For example:
5252
5353
<blockquote><pre>
5454
fossil info e5a734a19a9826973e1d073b49dc2a16aa2308f9
5555
</pre></blockquote>
5656
57
-The full 40+ character hash is unwieldy to remember and type, though,
57
+The full 40 or 64 character hash is unwieldy to remember and type, though,
5858
so Fossil also accepts a unique prefix of the hash, using any combination
5959
of upper and lower case letters, as long as the prefix is at least 4
6060
characters long. Hence the following commands all
6161
accomplish the same thing as the above:
6262
@@ -64,50 +64,47 @@
6464
fossil info e5a734a19a9
6565
fossil info E5a734A
6666
fossil info e5a7
6767
</blockquote>
6868
69
-Many web-interface screens identify check-ins by 10- or 16-character
69
+Many web interface screens identify check-ins by 10- or 16-character
7070
prefix of canonical name.
7171
7272
<h2 id="tags">Tags And Branch Names</h2>
7373
7474
Using a tag or branch name where a check-in name is expected causes
7575
Fossil to choose the most recent check-in with that tag or branch name.
76
-So, for example, as of this writing the most recent check-in that
77
-is tagged with "release" is [d0753799e44].
78
-So the command:
76
+So for example, the most recent check-in that
77
+is tagged with "release" as of this writing is [b98ce23d4fc].
78
+The command:
7979
8080
<blockquote><pre>
8181
fossil info release
8282
</pre></blockquote>
8383
84
-Results in the following input:
84
+…results in the following output:
8585
8686
<blockquote><pre>
87
-uuid: d0753799e447b795933e9f266233767d84aa1d84 2010-11-01 14:23:35 UTC
88
-parent: 4e1241f3236236187ad2a8f205323c05b98c9895 2010-10-31 21:51:11 UTC
89
-child: 4a094f46ade70bd9d1e4ffa48cbe94b4d3750aef 2010-11-01 18:52:37 UTC
90
-child: f4033ec09ee6bb2a73fa588c217527a1f311bd27 2010-11-01 23:38:34 UTC
91
-tags: trunk, release
92
-comment: Fix a typo in the file format documentation reported on the
93
- Tcl/Tk chatroom. (user: drh)
87
+hash: b98ce23d4fc3b734cdc058ee8a67e6dad675ca13 2020-08-20 13:27:04 UTC
88
+parent: 40feec329163103293d98dfcc2d119d1a16b227a 2020-08-20 13:01:51 UTC
89
+tags: release, branch-2.12, version-2.12.1
90
+comment: Version 2.12.1 (user: drh)
9491
</pre></blockquote>
9592
9693
There are multiple check-ins that are tagged with "release" but
97
-(as of this writing) the [d0753799e44]
94
+(as of this writing) the [b98ce23d4fc]
9895
check-in is the most recent so it is the one that is selected.
9996
100
-Note that unlike other command DVCSes, a "branch" in Fossil
101
-is not anything special; it is simply a sequence of check-ins that
102
-share a common tag. So the same mechanism that resolves tag names
97
+Note that unlike some other version control systems, a "branch" in Fossil
98
+is not anything special: it is simply a sequence of check-ins that
99
+share a common tag, so the same mechanism that resolves tag names
103100
also resolves branch names.
104101
105102
<a id="tagpfx"></a>
106
-Note also that there can (in theory) be an ambiguity between tag names
103
+Note also that there can — in theory, if rarely in practice — be an ambiguity between tag names
107104
and canonical names. Suppose, for example, you had a check-in with
108
-the canonical name deed28aa99a835f01fa06d5b4a41ecc2121bf419 and you
105
+the canonical name deed28aa99… and you
109106
also happened to have tagged a different check-in with "deed2". If
110107
you use the "deed2" name, does it choose the canonical name or the tag
111108
name? In such cases, you can prefix the tag name with "tag:".
112109
For example:
113110
@@ -114,17 +111,17 @@
114111
<blockquote><tt>
115112
fossil info tag:deed2
116113
</tt></blockquote>
117114
118115
The "tag:deed2" name will refer to the most recent check-in
119
-tagged with "deed2" not to the
116
+tagged with "deed2" rather than the
120117
check-in whose canonical name begins with "deed2".
121118
122119
<h2 id="whole-branches">Whole Branches</h2>
123120
124121
Usually when a branch name is specified, it means the latest check-in on
125
-that branch. But for some commands (ex: [/help/purge|purge]) a branch name
122
+that branch, but for some commands (ex: [/help/purge|purge]) a branch name
126123
on the argument means the earliest connected check-in on the branch. This
127124
seems confusing when being explained here, but it works out to be intuitive
128125
in practice.
129126
130127
For example, the command "fossil purge XYZ" means to purge the check-in XYZ
@@ -153,11 +150,11 @@
153150
6. <i>YYYYMMDDHHMM</i>
154151
7. <i>YYYYMMDDHHMMSS</i>
155152
156153
In the second through the fourth forms,
157154
the space between the day and the year can optionally be
158
-replaced by an uppercase <b>T</b> and the entire timestamp can
155
+replaced by an uppercase <b>T</b>, and the entire timestamp can
159156
optionally be followed by "<b>z</b>" or "<b>Z</b>". In the fourth
160157
form with fractional seconds, any number of digits may follow the
161158
decimal point, though due to precision limits only the first three
162159
digits will be significant. The final three pure-digit forms
163160
without punctuation are only valid if the number they encode is
@@ -164,11 +161,11 @@
164161
not also the prefix of an artifact hash.
165162
166163
In its default configuration, Fossil interprets and displays all dates
167164
in Universal Coordinated Time (UTC). This tends to work the best for
168165
distributed projects where participants are scattered around the globe.
169
-But there is an option on the Admin/Timeline page of the web-interface to
166
+But there is an option on the Admin/Timeline page of the web interface to
170167
switch to local time. The "<b>Z</b>" suffix on a timestamp check-in
171168
name is meaningless if Fossil is in the default mode of using UTC for
172169
everything, but if Fossil has been switched to local time mode, then the
173170
"<b>Z</b>" suffix means to interpret that particular timestamp using
174171
UTC instead of local time.
@@ -186,17 +183,21 @@
186183
187184
<blockquote>
188185
http://www.fossil-scm.org/fossil/doc/<b>trunk</b>/www/index.wiki
189186
</blockquote>
190187
191
-The bold component of that URL is a check-in name. To see what the
192
-Fossil website looked like on January 1, 2009, one has merely to change
188
+The bold component of that URL is a check-in name. To see the stored
189
+content of the Fossil website repository as of January 1, 2009, one has merely to change
193190
the URL to the following:
194191
195192
<blockquote>
196193
http://www.fossil-scm.org/fossil/doc/<b>2009-01-01</b>/www/index.wiki
197194
</blockquote>
195
+
196
+(Note that this won't roll you back to the <i>skin</i> and other
197
+cosmetic configurations as of that date. It also won't change screens
198
+like the timeline, which has an independent date selector.)
198199
199200
<h2 id="tag-ts">Tag And Timestamp</h2>
200201
201202
A check-in name can also take the form of a tag or branch name followed by
202203
a colon and then a timestamp. The combination means to take the most
@@ -206,39 +207,44 @@
206207
<blockquote>
207208
fossil update trunk:2010-07-01T14:30
208209
</blockquote>
209210
210211
Would cause Fossil to update the working check-out to be the most recent
211
-check-in on the trunk that is not more recent that 14:30 (UTC) on
212
+check-in on the trunk that is not more recent than 14:30 (UTC) on
212213
July 1, 2010.
213214
214215
<h2 id="root">Root Of A Branch</h2>
215216
216217
A branch name that begins with the "<tt>root:</tt>" prefix refers to the
217
-last check-in in the parent branch prior to the beginning of the branch.
218
+last check-in on the parent branch prior to the beginning of the branch.
218219
Such a label is useful, for example, in computing all diffs for a single
219220
branch. The following example will show all changes in the hypothetical
220221
branch "xyzzy":
221222
222223
<blockquote>
223224
fossil diff --from root:xyzzy --to xyzzy
224225
</blockquote>
225226
226227
<a id="merge-in"></a>
227
-A branch name that begins with the "<tt>merge-in:</tt>" prefix refers not
228
-to the root of the branch, but to the most recent merge-in for that branch
229
-from its parent. The most recent merge-in is the version to diff the branch
230
-against in order to see all changes in just the branch itself, omitting
228
+That doesn't do what you might expect after you merge the parent
229
+branch's changes into the child branch: the above command will include
230
+changes made on the parent branch as well.
231
+
232
+You can solve this by using the prefix "<tt>merge-in:</tt>" instead of
233
+"<tt>root:</tt>" to tell Fossil to find
234
+the most recent merge-in point for that branch.
235
+The resulting diff will then show only the changes in
236
+the branch itself, omitting
231237
any changes that have already been merged in from the parent branch.
232238
233239
234240
<h2 id="special">Special Tags</h2>
235241
236
-The tag "tip" means the most recent check-in. The "tip" tag is roughly
237
-equivalent to the timestamp tag "5000-01-01".
242
+The tag "tip" means the most recent check-in. The "tip" tag is practically
243
+equivalent to the timestamp "9999-12-31".
238244
239
-This special name works anywhere you can pass a "NAME", such as in in
245
+This special name works anywhere you can pass a "NAME", such as with
240246
<tt>/info</tt> URLs:
241247
242248
<blockquote><pre>
243249
http://localhost:8080/info/tip
244250
</pre></blockquote>
@@ -285,11 +291,11 @@
285291
# Exact matches on [#special | the special names]
286292
# [#timestamps | Timestamps], with preference to ISO8601 forms
287293
# [#tagpfx | tag:TAGNAME]
288294
# [#root | root:BRANCH]
289295
# [#merge-in | merge-in:BRANCH]
290
- # [#tag-ts | TAG:timestamp]
296
+ # [#tag-ts | TAGNAME:timestamp]
291297
# Full artifact hash or hash prefix.
292298
# Any other type of symbolic name that Fossil extracts from
293
- blockchain artifacts.
299
+ artifacts.
294300
295301
<div style="height:40em" id="this-space-intentionally-left-blank"></div>
296302
--- www/checkin_names.wiki
+++ www/checkin_names.wiki
@@ -15,18 +15,18 @@
15 <ul>
16 <li> <b>tip</b>
17 <li> <b>current</b>
18 <li> <b>next</b>
19 <li> <b>previous</b> or <b>prev</b>
20 <li> <b>ckout</b> (<a href='./embeddeddocs.wiki'>embedded docs</a> only)
21 </ul>
22 </ul>
23 </td></tr>
24 </table>
25 Many Fossil [/help|commands] and [./webui.wiki | web-interface] URLs accept
26 check-in names as an argument. For example, the "[/help/info|info]" command
27 accepts an optional check-in name to identify the specific checkout
28 about which information is desired:
29
30 <blockquote>
31 <tt>fossil info</tt> <i>checkin-name</i>
32 </blockquote>
@@ -45,18 +45,18 @@
45 document describes the various methods.
46
47 <h2 id="canonical">Canonical Check-in Name</h2>
48
49 The canonical name of a check-in is the hash of its
50 [./fileformat.wiki#manifest | manifest] expressed as a 40-or-more character
51 lowercase hexadecimal number. For example:
52
53 <blockquote><pre>
54 fossil info e5a734a19a9826973e1d073b49dc2a16aa2308f9
55 </pre></blockquote>
56
57 The full 40+ character hash is unwieldy to remember and type, though,
58 so Fossil also accepts a unique prefix of the hash, using any combination
59 of upper and lower case letters, as long as the prefix is at least 4
60 characters long. Hence the following commands all
61 accomplish the same thing as the above:
62
@@ -64,50 +64,47 @@
64 fossil info e5a734a19a9
65 fossil info E5a734A
66 fossil info e5a7
67 </blockquote>
68
69 Many web-interface screens identify check-ins by 10- or 16-character
70 prefix of canonical name.
71
72 <h2 id="tags">Tags And Branch Names</h2>
73
74 Using a tag or branch name where a check-in name is expected causes
75 Fossil to choose the most recent check-in with that tag or branch name.
76 So, for example, as of this writing the most recent check-in that
77 is tagged with "release" is [d0753799e44].
78 So the command:
79
80 <blockquote><pre>
81 fossil info release
82 </pre></blockquote>
83
84 Results in the following input:
85
86 <blockquote><pre>
87 uuid: d0753799e447b795933e9f266233767d84aa1d84 2010-11-01 14:23:35 UTC
88 parent: 4e1241f3236236187ad2a8f205323c05b98c9895 2010-10-31 21:51:11 UTC
89 child: 4a094f46ade70bd9d1e4ffa48cbe94b4d3750aef 2010-11-01 18:52:37 UTC
90 child: f4033ec09ee6bb2a73fa588c217527a1f311bd27 2010-11-01 23:38:34 UTC
91 tags: trunk, release
92 comment: Fix a typo in the file format documentation reported on the
93 Tcl/Tk chatroom. (user: drh)
94 </pre></blockquote>
95
96 There are multiple check-ins that are tagged with "release" but
97 (as of this writing) the [d0753799e44]
98 check-in is the most recent so it is the one that is selected.
99
100 Note that unlike other command DVCSes, a "branch" in Fossil
101 is not anything special; it is simply a sequence of check-ins that
102 share a common tag. So the same mechanism that resolves tag names
103 also resolves branch names.
104
105 <a id="tagpfx"></a>
106 Note also that there can (in theory) be an ambiguity between tag names
107 and canonical names. Suppose, for example, you had a check-in with
108 the canonical name deed28aa99a835f01fa06d5b4a41ecc2121bf419 and you
109 also happened to have tagged a different check-in with "deed2". If
110 you use the "deed2" name, does it choose the canonical name or the tag
111 name? In such cases, you can prefix the tag name with "tag:".
112 For example:
113
@@ -114,17 +111,17 @@
114 <blockquote><tt>
115 fossil info tag:deed2
116 </tt></blockquote>
117
118 The "tag:deed2" name will refer to the most recent check-in
119 tagged with "deed2" not to the
120 check-in whose canonical name begins with "deed2".
121
122 <h2 id="whole-branches">Whole Branches</h2>
123
124 Usually when a branch name is specified, it means the latest check-in on
125 that branch. But for some commands (ex: [/help/purge|purge]) a branch name
126 on the argument means the earliest connected check-in on the branch. This
127 seems confusing when being explained here, but it works out to be intuitive
128 in practice.
129
130 For example, the command "fossil purge XYZ" means to purge the check-in XYZ
@@ -153,11 +150,11 @@
153 6. <i>YYYYMMDDHHMM</i>
154 7. <i>YYYYMMDDHHMMSS</i>
155
156 In the second through the fourth forms,
157 the space between the day and the year can optionally be
158 replaced by an uppercase <b>T</b> and the entire timestamp can
159 optionally be followed by "<b>z</b>" or "<b>Z</b>". In the fourth
160 form with fractional seconds, any number of digits may follow the
161 decimal point, though due to precision limits only the first three
162 digits will be significant. The final three pure-digit forms
163 without punctuation are only valid if the number they encode is
@@ -164,11 +161,11 @@
164 not also the prefix of an artifact hash.
165
166 In its default configuration, Fossil interprets and displays all dates
167 in Universal Coordinated Time (UTC). This tends to work the best for
168 distributed projects where participants are scattered around the globe.
169 But there is an option on the Admin/Timeline page of the web-interface to
170 switch to local time. The "<b>Z</b>" suffix on a timestamp check-in
171 name is meaningless if Fossil is in the default mode of using UTC for
172 everything, but if Fossil has been switched to local time mode, then the
173 "<b>Z</b>" suffix means to interpret that particular timestamp using
174 UTC instead of local time.
@@ -186,17 +183,21 @@
186
187 <blockquote>
188 http://www.fossil-scm.org/fossil/doc/<b>trunk</b>/www/index.wiki
189 </blockquote>
190
191 The bold component of that URL is a check-in name. To see what the
192 Fossil website looked like on January 1, 2009, one has merely to change
193 the URL to the following:
194
195 <blockquote>
196 http://www.fossil-scm.org/fossil/doc/<b>2009-01-01</b>/www/index.wiki
197 </blockquote>
 
 
 
 
198
199 <h2 id="tag-ts">Tag And Timestamp</h2>
200
201 A check-in name can also take the form of a tag or branch name followed by
202 a colon and then a timestamp. The combination means to take the most
@@ -206,39 +207,44 @@
206 <blockquote>
207 fossil update trunk:2010-07-01T14:30
208 </blockquote>
209
210 Would cause Fossil to update the working check-out to be the most recent
211 check-in on the trunk that is not more recent that 14:30 (UTC) on
212 July 1, 2010.
213
214 <h2 id="root">Root Of A Branch</h2>
215
216 A branch name that begins with the "<tt>root:</tt>" prefix refers to the
217 last check-in in the parent branch prior to the beginning of the branch.
218 Such a label is useful, for example, in computing all diffs for a single
219 branch. The following example will show all changes in the hypothetical
220 branch "xyzzy":
221
222 <blockquote>
223 fossil diff --from root:xyzzy --to xyzzy
224 </blockquote>
225
226 <a id="merge-in"></a>
227 A branch name that begins with the "<tt>merge-in:</tt>" prefix refers not
228 to the root of the branch, but to the most recent merge-in for that branch
229 from its parent. The most recent merge-in is the version to diff the branch
230 against in order to see all changes in just the branch itself, omitting
 
 
 
 
 
231 any changes that have already been merged in from the parent branch.
232
233
234 <h2 id="special">Special Tags</h2>
235
236 The tag "tip" means the most recent check-in. The "tip" tag is roughly
237 equivalent to the timestamp tag "5000-01-01".
238
239 This special name works anywhere you can pass a "NAME", such as in in
240 <tt>/info</tt> URLs:
241
242 <blockquote><pre>
243 http://localhost:8080/info/tip
244 </pre></blockquote>
@@ -285,11 +291,11 @@
285 # Exact matches on [#special | the special names]
286 # [#timestamps | Timestamps], with preference to ISO8601 forms
287 # [#tagpfx | tag:TAGNAME]
288 # [#root | root:BRANCH]
289 # [#merge-in | merge-in:BRANCH]
290 # [#tag-ts | TAG:timestamp]
291 # Full artifact hash or hash prefix.
292 # Any other type of symbolic name that Fossil extracts from
293 blockchain artifacts.
294
295 <div style="height:40em" id="this-space-intentionally-left-blank"></div>
296
--- www/checkin_names.wiki
+++ www/checkin_names.wiki
@@ -15,18 +15,18 @@
15 <ul>
16 <li> <b>tip</b>
17 <li> <b>current</b>
18 <li> <b>next</b>
19 <li> <b>previous</b> or <b>prev</b>
20 <li> <b>ckout</b> (<a href='./embeddeddoc.wiki'>embedded docs</a> only)
21 </ul>
22 </ul>
23 </td></tr>
24 </table>
25 Many Fossil [/help|commands] and [./webui.wiki | web interface] URLs accept
26 check-in names as an argument. For example, the "[/help/info|info]" command
27 accepts an optional check-in name to identify the specific check-in
28 about which information is desired:
29
30 <blockquote>
31 <tt>fossil info</tt> <i>checkin-name</i>
32 </blockquote>
@@ -45,18 +45,18 @@
45 document describes the various methods.
46
47 <h2 id="canonical">Canonical Check-in Name</h2>
48
49 The canonical name of a check-in is the hash of its
50 [./fileformat.wiki#manifest | manifest] expressed as a
51 [./hashes.md | long lowercase hexadecimal number]. For example:
52
53 <blockquote><pre>
54 fossil info e5a734a19a9826973e1d073b49dc2a16aa2308f9
55 </pre></blockquote>
56
57 The full 40 or 64 character hash is unwieldy to remember and type, though,
58 so Fossil also accepts a unique prefix of the hash, using any combination
59 of upper and lower case letters, as long as the prefix is at least 4
60 characters long. Hence the following commands all
61 accomplish the same thing as the above:
62
@@ -64,50 +64,47 @@
64 fossil info e5a734a19a9
65 fossil info E5a734A
66 fossil info e5a7
67 </blockquote>
68
69 Many web interface screens identify check-ins by 10- or 16-character
70 prefix of canonical name.
71
72 <h2 id="tags">Tags And Branch Names</h2>
73
74 Using a tag or branch name where a check-in name is expected causes
75 Fossil to choose the most recent check-in with that tag or branch name.
76 So for example, the most recent check-in that
77 is tagged with "release" as of this writing is [b98ce23d4fc].
78 The command:
79
80 <blockquote><pre>
81 fossil info release
82 </pre></blockquote>
83
84 …results in the following output:
85
86 <blockquote><pre>
87 hash: b98ce23d4fc3b734cdc058ee8a67e6dad675ca13 2020-08-20 13:27:04 UTC
88 parent: 40feec329163103293d98dfcc2d119d1a16b227a 2020-08-20 13:01:51 UTC
89 tags: release, branch-2.12, version-2.12.1
90 comment: Version 2.12.1 (user: drh)
 
 
 
91 </pre></blockquote>
92
93 There are multiple check-ins that are tagged with "release" but
94 (as of this writing) the [b98ce23d4fc]
95 check-in is the most recent so it is the one that is selected.
96
97 Note that unlike some other version control systems, a "branch" in Fossil
98 is not anything special: it is simply a sequence of check-ins that
99 share a common tag, so the same mechanism that resolves tag names
100 also resolves branch names.
101
102 <a id="tagpfx"></a>
103 Note also that there can — in theory, if rarely in practice — be an ambiguity between tag names
104 and canonical names. Suppose, for example, you had a check-in with
105 the canonical name deed28aa99… and you
106 also happened to have tagged a different check-in with "deed2". If
107 you use the "deed2" name, does it choose the canonical name or the tag
108 name? In such cases, you can prefix the tag name with "tag:".
109 For example:
110
@@ -114,17 +111,17 @@
111 <blockquote><tt>
112 fossil info tag:deed2
113 </tt></blockquote>
114
115 The "tag:deed2" name will refer to the most recent check-in
116 tagged with "deed2" rather than the
117 check-in whose canonical name begins with "deed2".
118
119 <h2 id="whole-branches">Whole Branches</h2>
120
121 Usually when a branch name is specified, it means the latest check-in on
122 that branch, but for some commands (ex: [/help/purge|purge]) a branch name
123 on the argument means the earliest connected check-in on the branch. This
124 seems confusing when being explained here, but it works out to be intuitive
125 in practice.
126
127 For example, the command "fossil purge XYZ" means to purge the check-in XYZ
@@ -153,11 +150,11 @@
150 6. <i>YYYYMMDDHHMM</i>
151 7. <i>YYYYMMDDHHMMSS</i>
152
153 In the second through the fourth forms,
154 the space between the day and the year can optionally be
155 replaced by an uppercase <b>T</b>, and the entire timestamp can
156 optionally be followed by "<b>z</b>" or "<b>Z</b>". In the fourth
157 form with fractional seconds, any number of digits may follow the
158 decimal point, though due to precision limits only the first three
159 digits will be significant. The final three pure-digit forms
160 without punctuation are only valid if the number they encode is
@@ -164,11 +161,11 @@
161 not also the prefix of an artifact hash.
162
163 In its default configuration, Fossil interprets and displays all dates
164 in Universal Coordinated Time (UTC). This tends to work the best for
165 distributed projects where participants are scattered around the globe.
166 But there is an option on the Admin/Timeline page of the web interface to
167 switch to local time. The "<b>Z</b>" suffix on a timestamp check-in
168 name is meaningless if Fossil is in the default mode of using UTC for
169 everything, but if Fossil has been switched to local time mode, then the
170 "<b>Z</b>" suffix means to interpret that particular timestamp using
171 UTC instead of local time.
@@ -186,17 +183,21 @@
183
184 <blockquote>
185 http://www.fossil-scm.org/fossil/doc/<b>trunk</b>/www/index.wiki
186 </blockquote>
187
188 The bold component of that URL is a check-in name. To see the stored
189 content of the Fossil website repository as of January 1, 2009, one has merely to change
190 the URL to the following:
191
192 <blockquote>
193 http://www.fossil-scm.org/fossil/doc/<b>2009-01-01</b>/www/index.wiki
194 </blockquote>
195
196 (Note that this won't roll you back to the <i>skin</i> and other
197 cosmetic configurations as of that date. It also won't change screens
198 like the timeline, which has an independent date selector.)
199
200 <h2 id="tag-ts">Tag And Timestamp</h2>
201
202 A check-in name can also take the form of a tag or branch name followed by
203 a colon and then a timestamp. The combination means to take the most
@@ -206,39 +207,44 @@
207 <blockquote>
208 fossil update trunk:2010-07-01T14:30
209 </blockquote>
210
211 Would cause Fossil to update the working check-out to be the most recent
212 check-in on the trunk that is not more recent than 14:30 (UTC) on
213 July 1, 2010.
214
215 <h2 id="root">Root Of A Branch</h2>
216
217 A branch name that begins with the "<tt>root:</tt>" prefix refers to the
218 last check-in on the parent branch prior to the beginning of the branch.
219 Such a label is useful, for example, in computing all diffs for a single
220 branch. The following example will show all changes in the hypothetical
221 branch "xyzzy":
222
223 <blockquote>
224 fossil diff --from root:xyzzy --to xyzzy
225 </blockquote>
226
227 <a id="merge-in"></a>
228 That doesn't do what you might expect after you merge the parent
229 branch's changes into the child branch: the above command will include
230 changes made on the parent branch as well.
231
232 You can solve this by using the prefix "<tt>merge-in:</tt>" instead of
233 "<tt>root:</tt>" to tell Fossil to find
234 the most recent merge-in point for that branch.
235 The resulting diff will then show only the changes in
236 the branch itself, omitting
237 any changes that have already been merged in from the parent branch.
238
239
240 <h2 id="special">Special Tags</h2>
241
242 The tag "tip" means the most recent check-in. The "tip" tag is practically
243 equivalent to the timestamp "9999-12-31".
244
245 This special name works anywhere you can pass a "NAME", such as with
246 <tt>/info</tt> URLs:
247
248 <blockquote><pre>
249 http://localhost:8080/info/tip
250 </pre></blockquote>
@@ -285,11 +291,11 @@
291 # Exact matches on [#special | the special names]
292 # [#timestamps | Timestamps], with preference to ISO8601 forms
293 # [#tagpfx | tag:TAGNAME]
294 # [#root | root:BRANCH]
295 # [#merge-in | merge-in:BRANCH]
296 # [#tag-ts | TAGNAME:timestamp]
297 # Full artifact hash or hash prefix.
298 # Any other type of symbolic name that Fossil extracts from
299 artifacts.
300
301 <div style="height:40em" id="this-space-intentionally-left-blank"></div>
302
--- www/fileedit-page.md
+++ www/fileedit-page.md
@@ -56,11 +56,11 @@
5656
[xhr]: https://en.wikipedia.org/wiki/XMLHttpRequest
5757
5858
## `/fileedit` **Works by Creating Commits**
5959
6060
Thus any edits made via that page become a normal part of the
61
-repository's blockchain.
61
+repository.
6262
6363
## `/fileedit` is *Intended* for use with Embedded Docs
6464
6565
... and similar text files, and is most certainly
6666
**not intended for editing code**.
6767
--- www/fileedit-page.md
+++ www/fileedit-page.md
@@ -56,11 +56,11 @@
56 [xhr]: https://en.wikipedia.org/wiki/XMLHttpRequest
57
58 ## `/fileedit` **Works by Creating Commits**
59
60 Thus any edits made via that page become a normal part of the
61 repository's blockchain.
62
63 ## `/fileedit` is *Intended* for use with Embedded Docs
64
65 ... and similar text files, and is most certainly
66 **not intended for editing code**.
67
--- www/fileedit-page.md
+++ www/fileedit-page.md
@@ -56,11 +56,11 @@
56 [xhr]: https://en.wikipedia.org/wiki/XMLHttpRequest
57
58 ## `/fileedit` **Works by Creating Commits**
59
60 Thus any edits made via that page become a normal part of the
61 repository.
62
63 ## `/fileedit` is *Intended* for use with Embedded Docs
64
65 ... and similar text files, and is most certainly
66 **not intended for editing code**.
67
--- www/fossil-v-git.wiki
+++ www/fossil-v-git.wiki
@@ -230,11 +230,11 @@
230230
231231
The baseline data structures for Fossil and Git are the same, modulo
232232
formatting details. Both systems manage a
233233
[https://en.wikipedia.org/wiki/Directed_acyclic_graph | directed acyclic
234234
graph] (DAG) of [https://en.wikipedia.org/wiki/Merkle_tree | Merkle
235
-tree] / [./blockchain.md | block chain] structured check-in objects.
235
+tree] structured check-in objects.
236236
Check-ins are identified by a cryptographic hash of the check-in
237237
contents, and each check-in refers to its parent via <i>its</i> hash.
238238
239239
The difference is that Git stores its objects as individual files in the
240240
<tt>.git</tt> folder or compressed into bespoke
@@ -741,11 +741,11 @@
741741
repository immediately if successful, even though you haven't tested the
742742
change yet. It's possible to argue for such a design in a tool like Git
743743
which doesn't automatically push the change up to its parent, because
744744
you can still test the change before pushing local changes to the parent
745745
repo, but in the meantime you've made a durable change to your local Git
746
-repository's blockchain. You must do something drastic like <tt>git
746
+repository. You must do something drastic like <tt>git
747747
reset --hard</tt> to revert that rebase if it causes a problem. If you
748748
push your rebased local repo up to the parent without testing first,
749749
you've now committed the error on a public branch, effectively a
750750
violation of
751751
[https://www.atlassian.com/git/tutorials/merging-vs-rebasing#the-golden-rule-of-rebasing
@@ -759,11 +759,11 @@
759759
760760
Fossil cannot sensibly work that way because of its default-enabled
761761
autosync feature. Instead of jumping straight to the commit step, Fossil
762762
applies the proposed merge to the local working directory only,
763763
requiring a separate check-in step before the change is committed to the
764
-repository blockchain. This gives you a chance to test the change first,
764
+repository. This gives you a chance to test the change first,
765765
either manually or by running your software's automatic tests. (Ideally,
766766
both!)
767767
768768
Another difference is that because Fossil requires an explicit commit
769769
for a merge, it makes you give an explicit commit <i>message</i> for
770770
--- www/fossil-v-git.wiki
+++ www/fossil-v-git.wiki
@@ -230,11 +230,11 @@
230
231 The baseline data structures for Fossil and Git are the same, modulo
232 formatting details. Both systems manage a
233 [https://en.wikipedia.org/wiki/Directed_acyclic_graph | directed acyclic
234 graph] (DAG) of [https://en.wikipedia.org/wiki/Merkle_tree | Merkle
235 tree] / [./blockchain.md | block chain] structured check-in objects.
236 Check-ins are identified by a cryptographic hash of the check-in
237 contents, and each check-in refers to its parent via <i>its</i> hash.
238
239 The difference is that Git stores its objects as individual files in the
240 <tt>.git</tt> folder or compressed into bespoke
@@ -741,11 +741,11 @@
741 repository immediately if successful, even though you haven't tested the
742 change yet. It's possible to argue for such a design in a tool like Git
743 which doesn't automatically push the change up to its parent, because
744 you can still test the change before pushing local changes to the parent
745 repo, but in the meantime you've made a durable change to your local Git
746 repository's blockchain. You must do something drastic like <tt>git
747 reset --hard</tt> to revert that rebase if it causes a problem. If you
748 push your rebased local repo up to the parent without testing first,
749 you've now committed the error on a public branch, effectively a
750 violation of
751 [https://www.atlassian.com/git/tutorials/merging-vs-rebasing#the-golden-rule-of-rebasing
@@ -759,11 +759,11 @@
759
760 Fossil cannot sensibly work that way because of its default-enabled
761 autosync feature. Instead of jumping straight to the commit step, Fossil
762 applies the proposed merge to the local working directory only,
763 requiring a separate check-in step before the change is committed to the
764 repository blockchain. This gives you a chance to test the change first,
765 either manually or by running your software's automatic tests. (Ideally,
766 both!)
767
768 Another difference is that because Fossil requires an explicit commit
769 for a merge, it makes you give an explicit commit <i>message</i> for
770
--- www/fossil-v-git.wiki
+++ www/fossil-v-git.wiki
@@ -230,11 +230,11 @@
230
231 The baseline data structures for Fossil and Git are the same, modulo
232 formatting details. Both systems manage a
233 [https://en.wikipedia.org/wiki/Directed_acyclic_graph | directed acyclic
234 graph] (DAG) of [https://en.wikipedia.org/wiki/Merkle_tree | Merkle
235 tree] structured check-in objects.
236 Check-ins are identified by a cryptographic hash of the check-in
237 contents, and each check-in refers to its parent via <i>its</i> hash.
238
239 The difference is that Git stores its objects as individual files in the
240 <tt>.git</tt> folder or compressed into bespoke
@@ -741,11 +741,11 @@
741 repository immediately if successful, even though you haven't tested the
742 change yet. It's possible to argue for such a design in a tool like Git
743 which doesn't automatically push the change up to its parent, because
744 you can still test the change before pushing local changes to the parent
745 repo, but in the meantime you've made a durable change to your local Git
746 repository. You must do something drastic like <tt>git
747 reset --hard</tt> to revert that rebase if it causes a problem. If you
748 push your rebased local repo up to the parent without testing first,
749 you've now committed the error on a public branch, effectively a
750 violation of
751 [https://www.atlassian.com/git/tutorials/merging-vs-rebasing#the-golden-rule-of-rebasing
@@ -759,11 +759,11 @@
759
760 Fossil cannot sensibly work that way because of its default-enabled
761 autosync feature. Instead of jumping straight to the commit step, Fossil
762 applies the proposed merge to the local working directory only,
763 requiring a separate check-in step before the change is committed to the
764 repository. This gives you a chance to test the change first,
765 either manually or by running your software's automatic tests. (Ideally,
766 both!)
767
768 Another difference is that because Fossil requires an explicit commit
769 for a merge, it makes you give an explicit commit <i>message</i> for
770
+93 -56
--- www/gitusers.md
+++ www/gitusers.md
@@ -19,35 +19,44 @@
1919
[1]: https://fossil-scm.org/forum
2020
2121
Specific suggestions on how to improve this document are also welcomed,
2222
of course.
2323
24
+
25
+
26
+## <a id="term"></a> Terminology
27
+
28
+While we do try to explain Fossil-specific terminology inline here
29
+as-needed, you may find it helpful to skim [the Fossil glossary][gloss].
30
+It will give you another take on our definitions here, and it may help
31
+you to understand some of the other Fossil docs better.
32
+
33
+
34
+----
35
+
2436
2537
<a id="mwd"></a>
2638
## Repositories And Checkouts Are Distinct
2739
2840
A repository and a check-out are distinct concepts in Fossil, whereas
2941
the two are collocated by default with Git.
3042
31
-A Fossil repository is a SQLite database in
32
-which the entire history of a project is stored. A check-out is a
43
+A Fossil repository is a SQLite database storing
44
+the entire history of a project. A Fossil check-out is a
3345
directory that contains a snapshot of your project that you
3446
are currently working on, extracted for you from that database by the
3547
`fossil` program.
3648
37
-(See [the Fossil glossary][gloss] for more Fossil terms of art that may
38
-be unfamiliar to a Git user.)
39
-
4049
With Git, cloning a repository gets you what Fossil would call a
4150
check-out directory with the repository stored in a `.git` subdirectory
42
-of that check-out. There are methods to get more working directories
51
+of that check-out. There are methods to get additional working directories
4352
pointing at that same Git repository, but because it’s not designed into
4453
the core concept of the tool, Git tutorials usually advocate a
4554
switch-in-place working mode instead, so that is how most users end up
4655
working with it.
4756
48
-Fossil can operate in the Git mode, switching between versions in a
57
+You can use Fossil the same way, switching between versions in a
4958
single check-out directory:
5059
5160
fossil clone https://example.com/repo /path/to/repo.fossil
5261
mkdir work-dir
5362
cd work-dir
@@ -54,32 +63,41 @@
5463
fossil open /path/to/repo.fossil
5564
...work on trunk...
5665
fossil update my-other-branch # like “git checkout”
5766
...work on your other branch in the same directory...
5867
59
-As of Fossil 2.12, it can clone-and-open into a single directory, as Git
68
+As of Fossil 2.12, you can ask it to clone-and-open into a single directory, as Git
6069
always has done:
6170
6271
mkdir work-dir
6372
cd work-dir
6473
fossil open https://example.com/repo
6574
6675
Now you have “trunk” open in `work-dir`, with the repo file stored as
6776
`repo.fossil` in that same directory.
6877
69
-(Note that Fossil purposefully does not create the directory for you as
70
-Git does, because this feature is an extension of
71
-[the “open” command][open], which historically means “open in the
72
-current directory” in Fossil. It would be wrong for Fossil to create a
73
-subdirectory when passed a URI but not when passed any other parameter.)
78
+You may be expecting [`fossil clone`][clone] to create a directory for
79
+you like Git does, but because the repository is separate from the
80
+working directory, it does not do that, on purpose: you have to tell it
81
+where to store the repository file.
7482
75
-The repository file can be named anything you want, with a single
83
+The [`fossil open URI`][open] syntax is our compromise for users wanting
84
+a clone-and-open command. But, because Fossil’s `open` command
85
+historically opens into the current directory, and it won’t open a
86
+repository into a non-empty directory by default — as of Fossil 2.12,
87
+anyway — you have to create the directory manually and `cd` into it
88
+before opening it. If `fossil open URI` worked like `git clone`, that
89
+would mean `fossil open` has two different ways of working depending on
90
+the argument, which is a non-Fossil sort of thing to do. We strive for
91
+consistent behavior across commands and modes.
92
+
93
+The Fossil repository file can be named anything you want, with a single
7694
exception: if you’re going to use the [`fossil server DIRECTORY`][server]
7795
feature, the repositories need to have a "`.fossil`" suffix. That aside,
7896
you can follow any other convention that makes sense to you.
7997
80
-Many people choose to gather all of their Fossil repositories
98
+Many Fossil users gather all of their Fossil repositories
8199
in a single directory on their machine, such as "`~/museum`" or
82100
"`C:\Fossils`". This can help humans to keep their repositories
83101
organized, but Fossil itself doesn't really care. (Why “museum”?
84102
Because that is where one stores valuable fossils.)
85103
@@ -107,22 +125,23 @@
107125
“scratch” directory for experiments or brief bits of work you don’t want
108126
to do in the other check-out directories, and a directory for testing a
109127
user report of a bug in the trunk version as of last April Fool’s Day.
110128
Each check-out operates independently of the others.
111129
112
-This working style is especially useful when programming in languages
130
+This multiple-checkouts working style is especially useful when Fossil stores source code in programming languages
113131
where there is a “build” step that transforms source files into files
114
-you actually run or distribute. With Git, switching versions in a single
115
-working tree means you have to rebuild all outputs from the source files
116
-that differ between those versions. In the above Fossil working model,
132
+you actually run or distribute. With Git’s typical switch-in-place workflow,
133
+you have to rebuild all outputs from the source files
134
+that differ between those versions whenever you switch versions. In the above Fossil working model,
117135
you switch versions with a “`cd`” command instead, so that you only have
118136
to rebuild outputs from files you yourself change.
119137
120138
This style is also useful when a check-out directory may be tied up with
121139
some long-running process, as with the “test” example above, where you
122140
might need to run an hours-long brute-force replication script to tickle
123
-a [Heisenbug][hb], forcing it to show itself. While that runs, you can “`cd ../trunk`” and get back
141
+a [Heisenbug][hb], forcing it to show itself. While that runs, you can
142
+open a new terminal tab, “`cd ../trunk`”, and get back
124143
to work.
125144
126145
Git users may be initially confused by the `.fslckout` file at the root
127146
of a check-out directory.
128147
This is not the same thing as `.git`. It’s a per-checkout SQLite
@@ -135,31 +154,47 @@
135154
(In native Windows builds of Fossil, this file is called `_FOSSIL_`
136155
instead to get around the historical 3-character extension limit with
137156
certain legacy filesystems. “Native” here is a distinction to exclude
138157
Cygwin and WSL builds, which use `.fslckout`.)
139158
159
+[clone]: /help?cmd=clone
140160
[close]: /help?cmd=close
141161
[gloss]: ./whyusefossil.wiki#definitions
142162
[hb]: https://en.wikipedia.org/wiki/Heisenbug
143163
[open]: /help?cmd=open
144164
[set]: /help?cmd=setting
145165
[server]: /help?cmd=server
146166
[stash]: /help?cmd=stash
147167
[undo]: /help?cmd=undo
148168
169
+
170
+## <a id="log"></a> Fossil’s Timeline is the “Log”
171
+
172
+Git users often need to use the `git log` command to grovel through
173
+commit histories due to its [weak data model][wdm].
174
+
175
+Fossil parses a huge amount of information out of commits that allow it
176
+to produce its [timeline CLI][tlc] and [its `/timeline` web view][tlw],
177
+which generally have the info you would have to manually extract from
178
+`git log`.
179
+
180
+[tlc]: /help?cmd=timeline
181
+[tlw]: /help?cmd=/timeline
182
+[wdm]: ./fossil-v-git.wiki#durable
183
+
149184
150185
<a id="staging"></a>
151186
## There Is No Staging Area
152187
153188
Fossil omits the "Git index" or "staging area" concept. When you
154189
type "`fossil commit`" _all_ changes in your check-out are committed,
155190
automatically. There is no need for the "-a" option as with Git.
156191
157
-If you only want to commit just some of the changes, you can list the names
158
-of the files you want to commit as arguments, like this:
192
+If you only want to commit _some_ of the changes, list the names
193
+of the files or directories you want to commit as arguments, like this:
159194
160
- fossil commit src/main.c doc/readme.md
195
+ fossil commit src/feature.c doc/feature.md examples/feature
161196
162197
163198
<a id="bneed"></a>
164199
## Create Branches At Point Of Need, Rather Than Ahead of Need
165200
@@ -168,14 +203,16 @@
168203
169204
fossil commit --branch my-new-branch
170205
171206
If that commit is successful, your local check-out directory is then
172207
switched to the tip of that branch, so subsequent commits don’t need the
173
-“`--branch`” option. You have to switch back to the parent branch
174
-explicitly, as with
208
+“`--branch`” option. You simply say `fossil commit` again to continue
209
+adding commits to the tip of that branch.
175210
176
- fossil update trunk # return to parent, “trunk” in this case
211
+To switch back to the parent branch, say something like:
212
+
213
+ fossil update trunk # like “git checkout”
177214
178215
Fossil does also support the Git style, creating the branch ahead of
179216
need:
180217
181218
fossil branch new my-new-branch
@@ -202,27 +239,27 @@
202239
203240
<a id="autosync"></a>
204241
## Autosync
205242
206243
Fossil’s [autosync][wflow] feature, normally enabled, has no
207
-equivalent in Git. If you want Fossil to behave like Git, you will turn
244
+equivalent in Git. If you want Fossil to behave like Git, you can turn
208245
it off:
209246
210247
fossil set autosync 0
211248
212
-It’s better to understand what the feature does and why it is enabled by
249
+However, it’s better to understand what the feature does and why it is enabled by
213250
default.
214251
215252
When autosync is enabled, Fossil automatically pushes your changes
216253
to the remote server whenever you "`fossil commit`", and it
217254
pulls all remote changes down to your local clone of the repository as
218255
part of a "`fossil update`".
219256
This provides most of the advantages of a centralized version control
220257
system while retaining the advantages of distributed version control:
221258
222
-1. Your work stays synced up with your coworkers as long as your
223
- machine can connect to the remote repository, but at need, you can go
259
+1. Your work stays synced up with your coworkers’ efforts as long as your
260
+ machine can connect to the remote repository. At need, you can go
224261
off-network and continue work atop the last version you sync’d with
225262
the remote.
226263
227264
2. It provides immediate off-machine backup of your commits. Unlike
228265
centralized version control, though, you can still work while
@@ -245,16 +282,17 @@
245282
[setup]: ./caps/admin-v-setup.md#apsu
246283
[wflow]: ./concepts.wiki#workflow
247284
248285
249286
<a id="syncall"></a>
250
-## Syncing Is All-Or-Nothing
287
+## Sync Is All-Or-Nothing
251288
252289
Fossil does not support the concept of syncing, pushing, or pulling
253
-individual branches. When you sync/push/pull in Fossil, you sync/push/pull
254
-everything: all branches, all wiki, all tickets, all forum posts,
255
-all tags, all technotes… Everything.
290
+individual branches. When you sync/push/pull in Fossil, you
291
+sync/push/pull everything stored as artifacts in its hash tree:
292
+branches, tags, wiki articles, tickets, forum posts, technotes…
293
+[Almost everything][bu].
256294
257295
Furthermore, branch *names* sync automatically in Fossil, not just the
258296
content of those branches. This means this common Git command:
259297
260298
git push origin master
@@ -365,29 +403,34 @@
365403
fossil mv --hard old-name new-name
366404
367405
[mv]: /help?cmd=mv
368406
[rm]: /help?cmd=rm
369407
408
+
409
+----
410
+
370411
371412
<a id="morigin"></a>
372413
## Multiple "origin" Servers
373414
374415
In this final section of the document, we’ll go into a lot more detail
375416
to illustrate the points above, not just give a quick summary of this
376417
single difference.
377418
378
-Consider a common use case — at the time of this writing, during the
419
+Consider a common use case at the time of this writing — during the
379420
COVID-19 pandemic — where you’re working from home a lot, going into the
380
-office maybe one part-day a week. Let us also say you have no remote
421
+office one part-day a week only to do things that have to be done
422
+on-site at the office. Let us also say you have no remote
381423
access back into the work LAN, such as because your site IT is paranoid
382
-about security. You may still want off-machine backups of your commits,
383
-so what you want is the ability to quickly switch between the “home” and
424
+about security. You may still want off-machine backups of your commits
425
+while working from home,
426
+so you need the ability to quickly switch between the “home” and
384427
“work” remote repositories, with your laptop acting as a kind of
385428
[sneakernet][sn] link between the big development server at the office
386429
and your family’s home NAS.
387430
388
-### Git Method
431
+#### Git Method
389432
390433
We first need to clone the work repo down to our laptop, so we can work on it
391434
at home:
392435
393436
git clone https://dev-server.example.com/repo
@@ -408,11 +451,11 @@
408451
409452
Realize that this is carefully optimized down to these two long
410453
commands. In practice, typing these commands by hand, from memory, we’d
411454
expect a normal user to need to give four or more commands here instead.
412455
Packing the “`git init`” call into the “`ssh`” call is something more
413
-done in scripts and documentation examples than is done interactively,
456
+often done in scripts and documentation examples than done interactively,
414457
which then necessitates a third command before the push, “`exit`”.
415458
There’s also a good chance that you’ll forget the need for the `--bare`
416459
option here to avoid a fatal complaint from Git that the laptop can’t
417460
push into a non-empty repo. If you fall into this trap, among the many
418461
that Git lays for newbies, you have to nuke the incorrectly initted
@@ -453,42 +496,36 @@
453496
This example also shows a consequence of that fact that
454497
[Git doesn’t sync branch names](#syncall): you have to keep repeating
455498
yourself, “master, master.”
456499
457500
458
-### Fossil Method
501
+#### Fossil Method
459502
460503
Now we’re going to do the same thing as above using Fossil. We’ve broken
461504
the commands up into blocks corresponding to those above for comparison.
505
+
462506
We start the same way, cloning the work repo down to the laptop:
463507
464508
mkdir repo
465509
cd repo
466510
fossil open https://dev-server.example.com/repo
467511
fossil remote add work https://dev-server.example.com/repo
468512
469
-Unlike Git, Fossil’s “clone and open” feature doesn’t create the
470
-directory for you, so we need an extra `mkdir` call here that isn’t
471
-needed in the Git case. This is an indirect reflection of Fossil’s
472
-[multiple working directories](#mwd) design philosophy: its
473
-[`open` command][open] requires that you either issue it in an empty
474
-directory or one containing a prior closed check-out. In exchange for
475
-this extra command, we get the advantage of Fossil’s
476
-[superior handling][shwmd] of multiple working directories. To get the
477
-full power of this feature, you’d switch from the “`fossil open URI`”
478
-command form to the separate clone-and-open form shown in
479
-[the quick start guide][qs], which adds one more command.
480
-
481
-We can’t spin the longer final command as a trade-off giving us extra
482
-power, though: the simple fact is, Fossil currently has no short command
513
+We’ve chosen the “`fossil open URI`” syntax here rather than separate
514
+`clone` and `open` commands to make the parallel with Git clearer. [See
515
+above](#mwd) for more on that topic.
516
+
517
+The final command is longer than the Git equivalent because
518
+Fossil currently has no short command
483519
to rename an existing remote. Worse, unlike with Git, we can’t just keep
484520
using the default remote name because Fossil uses that slot in its
485521
configuration database to store the *current* remote name, so on
486522
switching from work to home, the home URL will overwrite the work URL if
487523
we don’t give it an explicit name first.
488524
489
-Keep these costs in perspective, however: they’re one-time setup costs,
525
+So far, the Fossil commands are longer, but keep these costs in perspective:
526
+they’re one-time setup costs,
490527
easily amortized to insignificance by the shorter day-to-day commands
491528
below.
492529
493530
On first beginning to work from home, we reverse-clone the Fossil repo
494531
up to the NAS:
495532
--- www/gitusers.md
+++ www/gitusers.md
@@ -19,35 +19,44 @@
19 [1]: https://fossil-scm.org/forum
20
21 Specific suggestions on how to improve this document are also welcomed,
22 of course.
23
 
 
 
 
 
 
 
 
 
 
 
 
24
25 <a id="mwd"></a>
26 ## Repositories And Checkouts Are Distinct
27
28 A repository and a check-out are distinct concepts in Fossil, whereas
29 the two are collocated by default with Git.
30
31 A Fossil repository is a SQLite database in
32 which the entire history of a project is stored. A check-out is a
33 directory that contains a snapshot of your project that you
34 are currently working on, extracted for you from that database by the
35 `fossil` program.
36
37 (See [the Fossil glossary][gloss] for more Fossil terms of art that may
38 be unfamiliar to a Git user.)
39
40 With Git, cloning a repository gets you what Fossil would call a
41 check-out directory with the repository stored in a `.git` subdirectory
42 of that check-out. There are methods to get more working directories
43 pointing at that same Git repository, but because it’s not designed into
44 the core concept of the tool, Git tutorials usually advocate a
45 switch-in-place working mode instead, so that is how most users end up
46 working with it.
47
48 Fossil can operate in the Git mode, switching between versions in a
49 single check-out directory:
50
51 fossil clone https://example.com/repo /path/to/repo.fossil
52 mkdir work-dir
53 cd work-dir
@@ -54,32 +63,41 @@
54 fossil open /path/to/repo.fossil
55 ...work on trunk...
56 fossil update my-other-branch # like “git checkout”
57 ...work on your other branch in the same directory...
58
59 As of Fossil 2.12, it can clone-and-open into a single directory, as Git
60 always has done:
61
62 mkdir work-dir
63 cd work-dir
64 fossil open https://example.com/repo
65
66 Now you have “trunk” open in `work-dir`, with the repo file stored as
67 `repo.fossil` in that same directory.
68
69 (Note that Fossil purposefully does not create the directory for you as
70 Git does, because this feature is an extension of
71 [the “open” command][open], which historically means “open in the
72 current directory” in Fossil. It would be wrong for Fossil to create a
73 subdirectory when passed a URI but not when passed any other parameter.)
74
75 The repository file can be named anything you want, with a single
 
 
 
 
 
 
 
 
 
 
76 exception: if you’re going to use the [`fossil server DIRECTORY`][server]
77 feature, the repositories need to have a "`.fossil`" suffix. That aside,
78 you can follow any other convention that makes sense to you.
79
80 Many people choose to gather all of their Fossil repositories
81 in a single directory on their machine, such as "`~/museum`" or
82 "`C:\Fossils`". This can help humans to keep their repositories
83 organized, but Fossil itself doesn't really care. (Why “museum”?
84 Because that is where one stores valuable fossils.)
85
@@ -107,22 +125,23 @@
107 “scratch” directory for experiments or brief bits of work you don’t want
108 to do in the other check-out directories, and a directory for testing a
109 user report of a bug in the trunk version as of last April Fool’s Day.
110 Each check-out operates independently of the others.
111
112 This working style is especially useful when programming in languages
113 where there is a “build” step that transforms source files into files
114 you actually run or distribute. With Git, switching versions in a single
115 working tree means you have to rebuild all outputs from the source files
116 that differ between those versions. In the above Fossil working model,
117 you switch versions with a “`cd`” command instead, so that you only have
118 to rebuild outputs from files you yourself change.
119
120 This style is also useful when a check-out directory may be tied up with
121 some long-running process, as with the “test” example above, where you
122 might need to run an hours-long brute-force replication script to tickle
123 a [Heisenbug][hb], forcing it to show itself. While that runs, you can “`cd ../trunk`” and get back
 
124 to work.
125
126 Git users may be initially confused by the `.fslckout` file at the root
127 of a check-out directory.
128 This is not the same thing as `.git`. It’s a per-checkout SQLite
@@ -135,31 +154,47 @@
135 (In native Windows builds of Fossil, this file is called `_FOSSIL_`
136 instead to get around the historical 3-character extension limit with
137 certain legacy filesystems. “Native” here is a distinction to exclude
138 Cygwin and WSL builds, which use `.fslckout`.)
139
 
140 [close]: /help?cmd=close
141 [gloss]: ./whyusefossil.wiki#definitions
142 [hb]: https://en.wikipedia.org/wiki/Heisenbug
143 [open]: /help?cmd=open
144 [set]: /help?cmd=setting
145 [server]: /help?cmd=server
146 [stash]: /help?cmd=stash
147 [undo]: /help?cmd=undo
148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
150 <a id="staging"></a>
151 ## There Is No Staging Area
152
153 Fossil omits the "Git index" or "staging area" concept. When you
154 type "`fossil commit`" _all_ changes in your check-out are committed,
155 automatically. There is no need for the "-a" option as with Git.
156
157 If you only want to commit just some of the changes, you can list the names
158 of the files you want to commit as arguments, like this:
159
160 fossil commit src/main.c doc/readme.md
161
162
163 <a id="bneed"></a>
164 ## Create Branches At Point Of Need, Rather Than Ahead of Need
165
@@ -168,14 +203,16 @@
168
169 fossil commit --branch my-new-branch
170
171 If that commit is successful, your local check-out directory is then
172 switched to the tip of that branch, so subsequent commits don’t need the
173 “`--branch`” option. You have to switch back to the parent branch
174 explicitly, as with
175
176 fossil update trunk # return to parent, “trunk” in this case
 
 
177
178 Fossil does also support the Git style, creating the branch ahead of
179 need:
180
181 fossil branch new my-new-branch
@@ -202,27 +239,27 @@
202
203 <a id="autosync"></a>
204 ## Autosync
205
206 Fossil’s [autosync][wflow] feature, normally enabled, has no
207 equivalent in Git. If you want Fossil to behave like Git, you will turn
208 it off:
209
210 fossil set autosync 0
211
212 It’s better to understand what the feature does and why it is enabled by
213 default.
214
215 When autosync is enabled, Fossil automatically pushes your changes
216 to the remote server whenever you "`fossil commit`", and it
217 pulls all remote changes down to your local clone of the repository as
218 part of a "`fossil update`".
219 This provides most of the advantages of a centralized version control
220 system while retaining the advantages of distributed version control:
221
222 1. Your work stays synced up with your coworkers as long as your
223 machine can connect to the remote repository, but at need, you can go
224 off-network and continue work atop the last version you sync’d with
225 the remote.
226
227 2. It provides immediate off-machine backup of your commits. Unlike
228 centralized version control, though, you can still work while
@@ -245,16 +282,17 @@
245 [setup]: ./caps/admin-v-setup.md#apsu
246 [wflow]: ./concepts.wiki#workflow
247
248
249 <a id="syncall"></a>
250 ## Syncing Is All-Or-Nothing
251
252 Fossil does not support the concept of syncing, pushing, or pulling
253 individual branches. When you sync/push/pull in Fossil, you sync/push/pull
254 everything: all branches, all wiki, all tickets, all forum posts,
255 all tags, all technotes… Everything.
 
256
257 Furthermore, branch *names* sync automatically in Fossil, not just the
258 content of those branches. This means this common Git command:
259
260 git push origin master
@@ -365,29 +403,34 @@
365 fossil mv --hard old-name new-name
366
367 [mv]: /help?cmd=mv
368 [rm]: /help?cmd=rm
369
 
 
 
370
371 <a id="morigin"></a>
372 ## Multiple "origin" Servers
373
374 In this final section of the document, we’ll go into a lot more detail
375 to illustrate the points above, not just give a quick summary of this
376 single difference.
377
378 Consider a common use case — at the time of this writing, during the
379 COVID-19 pandemic — where you’re working from home a lot, going into the
380 office maybe one part-day a week. Let us also say you have no remote
 
381 access back into the work LAN, such as because your site IT is paranoid
382 about security. You may still want off-machine backups of your commits,
383 so what you want is the ability to quickly switch between the “home” and
 
384 “work” remote repositories, with your laptop acting as a kind of
385 [sneakernet][sn] link between the big development server at the office
386 and your family’s home NAS.
387
388 ### Git Method
389
390 We first need to clone the work repo down to our laptop, so we can work on it
391 at home:
392
393 git clone https://dev-server.example.com/repo
@@ -408,11 +451,11 @@
408
409 Realize that this is carefully optimized down to these two long
410 commands. In practice, typing these commands by hand, from memory, we’d
411 expect a normal user to need to give four or more commands here instead.
412 Packing the “`git init`” call into the “`ssh`” call is something more
413 done in scripts and documentation examples than is done interactively,
414 which then necessitates a third command before the push, “`exit`”.
415 There’s also a good chance that you’ll forget the need for the `--bare`
416 option here to avoid a fatal complaint from Git that the laptop can’t
417 push into a non-empty repo. If you fall into this trap, among the many
418 that Git lays for newbies, you have to nuke the incorrectly initted
@@ -453,42 +496,36 @@
453 This example also shows a consequence of that fact that
454 [Git doesn’t sync branch names](#syncall): you have to keep repeating
455 yourself, “master, master.”
456
457
458 ### Fossil Method
459
460 Now we’re going to do the same thing as above using Fossil. We’ve broken
461 the commands up into blocks corresponding to those above for comparison.
 
462 We start the same way, cloning the work repo down to the laptop:
463
464 mkdir repo
465 cd repo
466 fossil open https://dev-server.example.com/repo
467 fossil remote add work https://dev-server.example.com/repo
468
469 Unlike Git, Fossil’s “clone and open” feature doesn’t create the
470 directory for you, so we need an extra `mkdir` call here that isn’t
471 needed in the Git case. This is an indirect reflection of Fossil’s
472 [multiple working directories](#mwd) design philosophy: its
473 [`open` command][open] requires that you either issue it in an empty
474 directory or one containing a prior closed check-out. In exchange for
475 this extra command, we get the advantage of Fossil’s
476 [superior handling][shwmd] of multiple working directories. To get the
477 full power of this feature, you’d switch from the “`fossil open URI`”
478 command form to the separate clone-and-open form shown in
479 [the quick start guide][qs], which adds one more command.
480
481 We can’t spin the longer final command as a trade-off giving us extra
482 power, though: the simple fact is, Fossil currently has no short command
483 to rename an existing remote. Worse, unlike with Git, we can’t just keep
484 using the default remote name because Fossil uses that slot in its
485 configuration database to store the *current* remote name, so on
486 switching from work to home, the home URL will overwrite the work URL if
487 we don’t give it an explicit name first.
488
489 Keep these costs in perspective, however: they’re one-time setup costs,
 
490 easily amortized to insignificance by the shorter day-to-day commands
491 below.
492
493 On first beginning to work from home, we reverse-clone the Fossil repo
494 up to the NAS:
495
--- www/gitusers.md
+++ www/gitusers.md
@@ -19,35 +19,44 @@
19 [1]: https://fossil-scm.org/forum
20
21 Specific suggestions on how to improve this document are also welcomed,
22 of course.
23
24
25
26 ## <a id="term"></a> Terminology
27
28 While we do try to explain Fossil-specific terminology inline here
29 as-needed, you may find it helpful to skim [the Fossil glossary][gloss].
30 It will give you another take on our definitions here, and it may help
31 you to understand some of the other Fossil docs better.
32
33
34 ----
35
36
37 <a id="mwd"></a>
38 ## Repositories And Checkouts Are Distinct
39
40 A repository and a check-out are distinct concepts in Fossil, whereas
41 the two are collocated by default with Git.
42
43 A Fossil repository is a SQLite database storing
44 the entire history of a project. A Fossil check-out is a
45 directory that contains a snapshot of your project that you
46 are currently working on, extracted for you from that database by the
47 `fossil` program.
48
 
 
 
49 With Git, cloning a repository gets you what Fossil would call a
50 check-out directory with the repository stored in a `.git` subdirectory
51 of that check-out. There are methods to get additional working directories
52 pointing at that same Git repository, but because it’s not designed into
53 the core concept of the tool, Git tutorials usually advocate a
54 switch-in-place working mode instead, so that is how most users end up
55 working with it.
56
57 You can use Fossil the same way, switching between versions in a
58 single check-out directory:
59
60 fossil clone https://example.com/repo /path/to/repo.fossil
61 mkdir work-dir
62 cd work-dir
@@ -54,32 +63,41 @@
63 fossil open /path/to/repo.fossil
64 ...work on trunk...
65 fossil update my-other-branch # like “git checkout”
66 ...work on your other branch in the same directory...
67
68 As of Fossil 2.12, you can ask it to clone-and-open into a single directory, as Git
69 always has done:
70
71 mkdir work-dir
72 cd work-dir
73 fossil open https://example.com/repo
74
75 Now you have “trunk” open in `work-dir`, with the repo file stored as
76 `repo.fossil` in that same directory.
77
78 You may be expecting [`fossil clone`][clone] to create a directory for
79 you like Git does, but because the repository is separate from the
80 working directory, it does not do that, on purpose: you have to tell it
81 where to store the repository file.
 
82
83 The [`fossil open URI`][open] syntax is our compromise for users wanting
84 a clone-and-open command. But, because Fossil’s `open` command
85 historically opens into the current directory, and it won’t open a
86 repository into a non-empty directory by default — as of Fossil 2.12,
87 anyway — you have to create the directory manually and `cd` into it
88 before opening it. If `fossil open URI` worked like `git clone`, that
89 would mean `fossil open` has two different ways of working depending on
90 the argument, which is a non-Fossil sort of thing to do. We strive for
91 consistent behavior across commands and modes.
92
93 The Fossil repository file can be named anything you want, with a single
94 exception: if you’re going to use the [`fossil server DIRECTORY`][server]
95 feature, the repositories need to have a "`.fossil`" suffix. That aside,
96 you can follow any other convention that makes sense to you.
97
98 Many Fossil users gather all of their Fossil repositories
99 in a single directory on their machine, such as "`~/museum`" or
100 "`C:\Fossils`". This can help humans to keep their repositories
101 organized, but Fossil itself doesn't really care. (Why “museum”?
102 Because that is where one stores valuable fossils.)
103
@@ -107,22 +125,23 @@
125 “scratch” directory for experiments or brief bits of work you don’t want
126 to do in the other check-out directories, and a directory for testing a
127 user report of a bug in the trunk version as of last April Fool’s Day.
128 Each check-out operates independently of the others.
129
130 This multiple-checkouts working style is especially useful when Fossil stores source code in programming languages
131 where there is a “build” step that transforms source files into files
132 you actually run or distribute. With Git’s typical switch-in-place workflow,
133 you have to rebuild all outputs from the source files
134 that differ between those versions whenever you switch versions. In the above Fossil working model,
135 you switch versions with a “`cd`” command instead, so that you only have
136 to rebuild outputs from files you yourself change.
137
138 This style is also useful when a check-out directory may be tied up with
139 some long-running process, as with the “test” example above, where you
140 might need to run an hours-long brute-force replication script to tickle
141 a [Heisenbug][hb], forcing it to show itself. While that runs, you can
142 open a new terminal tab, “`cd ../trunk`”, and get back
143 to work.
144
145 Git users may be initially confused by the `.fslckout` file at the root
146 of a check-out directory.
147 This is not the same thing as `.git`. It’s a per-checkout SQLite
@@ -135,31 +154,47 @@
154 (In native Windows builds of Fossil, this file is called `_FOSSIL_`
155 instead to get around the historical 3-character extension limit with
156 certain legacy filesystems. “Native” here is a distinction to exclude
157 Cygwin and WSL builds, which use `.fslckout`.)
158
159 [clone]: /help?cmd=clone
160 [close]: /help?cmd=close
161 [gloss]: ./whyusefossil.wiki#definitions
162 [hb]: https://en.wikipedia.org/wiki/Heisenbug
163 [open]: /help?cmd=open
164 [set]: /help?cmd=setting
165 [server]: /help?cmd=server
166 [stash]: /help?cmd=stash
167 [undo]: /help?cmd=undo
168
169
170 ## <a id="log"></a> Fossil’s Timeline is the “Log”
171
172 Git users often need to use the `git log` command to grovel through
173 commit histories due to its [weak data model][wdm].
174
175 Fossil parses a huge amount of information out of commits that allow it
176 to produce its [timeline CLI][tlc] and [its `/timeline` web view][tlw],
177 which generally have the info you would have to manually extract from
178 `git log`.
179
180 [tlc]: /help?cmd=timeline
181 [tlw]: /help?cmd=/timeline
182 [wdm]: ./fossil-v-git.wiki#durable
183
184
185 <a id="staging"></a>
186 ## There Is No Staging Area
187
188 Fossil omits the "Git index" or "staging area" concept. When you
189 type "`fossil commit`" _all_ changes in your check-out are committed,
190 automatically. There is no need for the "-a" option as with Git.
191
192 If you only want to commit _some_ of the changes, list the names
193 of the files or directories you want to commit as arguments, like this:
194
195 fossil commit src/feature.c doc/feature.md examples/feature
196
197
198 <a id="bneed"></a>
199 ## Create Branches At Point Of Need, Rather Than Ahead of Need
200
@@ -168,14 +203,16 @@
203
204 fossil commit --branch my-new-branch
205
206 If that commit is successful, your local check-out directory is then
207 switched to the tip of that branch, so subsequent commits don’t need the
208 “`--branch`” option. You simply say `fossil commit` again to continue
209 adding commits to the tip of that branch.
210
211 To switch back to the parent branch, say something like:
212
213 fossil update trunk # like “git checkout”
214
215 Fossil does also support the Git style, creating the branch ahead of
216 need:
217
218 fossil branch new my-new-branch
@@ -202,27 +239,27 @@
239
240 <a id="autosync"></a>
241 ## Autosync
242
243 Fossil’s [autosync][wflow] feature, normally enabled, has no
244 equivalent in Git. If you want Fossil to behave like Git, you can turn
245 it off:
246
247 fossil set autosync 0
248
249 However, it’s better to understand what the feature does and why it is enabled by
250 default.
251
252 When autosync is enabled, Fossil automatically pushes your changes
253 to the remote server whenever you "`fossil commit`", and it
254 pulls all remote changes down to your local clone of the repository as
255 part of a "`fossil update`".
256 This provides most of the advantages of a centralized version control
257 system while retaining the advantages of distributed version control:
258
259 1. Your work stays synced up with your coworkers’ efforts as long as your
260 machine can connect to the remote repository. At need, you can go
261 off-network and continue work atop the last version you sync’d with
262 the remote.
263
264 2. It provides immediate off-machine backup of your commits. Unlike
265 centralized version control, though, you can still work while
@@ -245,16 +282,17 @@
282 [setup]: ./caps/admin-v-setup.md#apsu
283 [wflow]: ./concepts.wiki#workflow
284
285
286 <a id="syncall"></a>
287 ## Sync Is All-Or-Nothing
288
289 Fossil does not support the concept of syncing, pushing, or pulling
290 individual branches. When you sync/push/pull in Fossil, you
291 sync/push/pull everything stored as artifacts in its hash tree:
292 branches, tags, wiki articles, tickets, forum posts, technotes…
293 [Almost everything][bu].
294
295 Furthermore, branch *names* sync automatically in Fossil, not just the
296 content of those branches. This means this common Git command:
297
298 git push origin master
@@ -365,29 +403,34 @@
403 fossil mv --hard old-name new-name
404
405 [mv]: /help?cmd=mv
406 [rm]: /help?cmd=rm
407
408
409 ----
410
411
412 <a id="morigin"></a>
413 ## Multiple "origin" Servers
414
415 In this final section of the document, we’ll go into a lot more detail
416 to illustrate the points above, not just give a quick summary of this
417 single difference.
418
419 Consider a common use case at the time of this writing — during the
420 COVID-19 pandemic — where you’re working from home a lot, going into the
421 office one part-day a week only to do things that have to be done
422 on-site at the office. Let us also say you have no remote
423 access back into the work LAN, such as because your site IT is paranoid
424 about security. You may still want off-machine backups of your commits
425 while working from home,
426 so you need the ability to quickly switch between the “home” and
427 “work” remote repositories, with your laptop acting as a kind of
428 [sneakernet][sn] link between the big development server at the office
429 and your family’s home NAS.
430
431 #### Git Method
432
433 We first need to clone the work repo down to our laptop, so we can work on it
434 at home:
435
436 git clone https://dev-server.example.com/repo
@@ -408,11 +451,11 @@
451
452 Realize that this is carefully optimized down to these two long
453 commands. In practice, typing these commands by hand, from memory, we’d
454 expect a normal user to need to give four or more commands here instead.
455 Packing the “`git init`” call into the “`ssh`” call is something more
456 often done in scripts and documentation examples than done interactively,
457 which then necessitates a third command before the push, “`exit`”.
458 There’s also a good chance that you’ll forget the need for the `--bare`
459 option here to avoid a fatal complaint from Git that the laptop can’t
460 push into a non-empty repo. If you fall into this trap, among the many
461 that Git lays for newbies, you have to nuke the incorrectly initted
@@ -453,42 +496,36 @@
496 This example also shows a consequence of that fact that
497 [Git doesn’t sync branch names](#syncall): you have to keep repeating
498 yourself, “master, master.”
499
500
501 #### Fossil Method
502
503 Now we’re going to do the same thing as above using Fossil. We’ve broken
504 the commands up into blocks corresponding to those above for comparison.
505
506 We start the same way, cloning the work repo down to the laptop:
507
508 mkdir repo
509 cd repo
510 fossil open https://dev-server.example.com/repo
511 fossil remote add work https://dev-server.example.com/repo
512
513 We’ve chosen the “`fossil open URI`” syntax here rather than separate
514 `clone` and `open` commands to make the parallel with Git clearer. [See
515 above](#mwd) for more on that topic.
516
517 The final command is longer than the Git equivalent because
518 Fossil currently has no short command
 
 
 
 
 
 
 
 
519 to rename an existing remote. Worse, unlike with Git, we can’t just keep
520 using the default remote name because Fossil uses that slot in its
521 configuration database to store the *current* remote name, so on
522 switching from work to home, the home URL will overwrite the work URL if
523 we don’t give it an explicit name first.
524
525 So far, the Fossil commands are longer, but keep these costs in perspective:
526 they’re one-time setup costs,
527 easily amortized to insignificance by the shorter day-to-day commands
528 below.
529
530 On first beginning to work from home, we reverse-clone the Fossil repo
531 up to the NAS:
532
--- www/mirrorlimitations.md
+++ www/mirrorlimitations.md
@@ -14,14 +14,14 @@
1414
as Wiki, Tickets, Technotes, and the Forum are not supported in Git,
1515
so those features are not included in an export.
1616
1717
Third-party Git based tooling may add some of these features (e.g.
1818
GitHub, GitLab) but because their data are not stored in the Git
19
-blockchain, there is no single destination for Fossil to convert its
19
+repository, there is no single destination for Fossil to convert its
2020
equivalent data *to*. For instance, Fossil tickets do not become GitHub
2121
issues, because that is a proprietary feature of GitHub separate from
22
-Git proper, stored outside the blockchain on the GitHub servers.
22
+Git proper, stored outside the repository on the GitHub servers.
2323
2424
You can also see the problem in its inverse case: you do not get a copy
2525
of your GitHub issues when cloning the Git repository. You *do* get the
2626
Fossil tickets, wiki, forum posts, etc. when cloning a remote Fossil
2727
repo.
@@ -29,14 +29,14 @@
2929
## (2) Cherrypick Merges
3030
3131
The Git client supports cherrypick merges but does not record the
3232
cherrypick parent(s).
3333
34
-Fossil tracks cherrypick merges in its blockchain and displays
34
+Fossil tracks cherrypick merges in its repository and displays
3535
cherrypicks in its timeline. (As an example, the dashed lines
3636
[here](/timeline?c=0a9f12ce6655b7a5) are cherrypicks.) Because Git does
37
-not have a way to represent this same information in its blockchain, the
37
+not have a way to represent this same information in its repository, the
3838
history of Fossil cherrypicks cannot be exported to Git, only their
3939
direct effects on the managed file data.
4040
4141
## (3) Named Branches
4242
@@ -73,11 +73,11 @@
7373
7474
## (5) Amendments To Check-ins
7575
7676
Check-ins are immutable in both Fossil and Git.
7777
However, Fossil has a mechanism by which tags can be added to
78
-its blockchain to provide after-the-fact corrections to prior check-ins.
78
+its repository to provide after-the-fact corrections to prior check-ins.
7979
8080
For example, tags can be added to check-ins that correct typos in the
8181
check-in comment. The original check-in is immutable and so the
8282
original comment is preserved in addition to the correction. But
8383
software that displays the check-ins knows to look for the comment-change
8484
--- www/mirrorlimitations.md
+++ www/mirrorlimitations.md
@@ -14,14 +14,14 @@
14 as Wiki, Tickets, Technotes, and the Forum are not supported in Git,
15 so those features are not included in an export.
16
17 Third-party Git based tooling may add some of these features (e.g.
18 GitHub, GitLab) but because their data are not stored in the Git
19 blockchain, there is no single destination for Fossil to convert its
20 equivalent data *to*. For instance, Fossil tickets do not become GitHub
21 issues, because that is a proprietary feature of GitHub separate from
22 Git proper, stored outside the blockchain on the GitHub servers.
23
24 You can also see the problem in its inverse case: you do not get a copy
25 of your GitHub issues when cloning the Git repository. You *do* get the
26 Fossil tickets, wiki, forum posts, etc. when cloning a remote Fossil
27 repo.
@@ -29,14 +29,14 @@
29 ## (2) Cherrypick Merges
30
31 The Git client supports cherrypick merges but does not record the
32 cherrypick parent(s).
33
34 Fossil tracks cherrypick merges in its blockchain and displays
35 cherrypicks in its timeline. (As an example, the dashed lines
36 [here](/timeline?c=0a9f12ce6655b7a5) are cherrypicks.) Because Git does
37 not have a way to represent this same information in its blockchain, the
38 history of Fossil cherrypicks cannot be exported to Git, only their
39 direct effects on the managed file data.
40
41 ## (3) Named Branches
42
@@ -73,11 +73,11 @@
73
74 ## (5) Amendments To Check-ins
75
76 Check-ins are immutable in both Fossil and Git.
77 However, Fossil has a mechanism by which tags can be added to
78 its blockchain to provide after-the-fact corrections to prior check-ins.
79
80 For example, tags can be added to check-ins that correct typos in the
81 check-in comment. The original check-in is immutable and so the
82 original comment is preserved in addition to the correction. But
83 software that displays the check-ins knows to look for the comment-change
84
--- www/mirrorlimitations.md
+++ www/mirrorlimitations.md
@@ -14,14 +14,14 @@
14 as Wiki, Tickets, Technotes, and the Forum are not supported in Git,
15 so those features are not included in an export.
16
17 Third-party Git based tooling may add some of these features (e.g.
18 GitHub, GitLab) but because their data are not stored in the Git
19 repository, there is no single destination for Fossil to convert its
20 equivalent data *to*. For instance, Fossil tickets do not become GitHub
21 issues, because that is a proprietary feature of GitHub separate from
22 Git proper, stored outside the repository on the GitHub servers.
23
24 You can also see the problem in its inverse case: you do not get a copy
25 of your GitHub issues when cloning the Git repository. You *do* get the
26 Fossil tickets, wiki, forum posts, etc. when cloning a remote Fossil
27 repo.
@@ -29,14 +29,14 @@
29 ## (2) Cherrypick Merges
30
31 The Git client supports cherrypick merges but does not record the
32 cherrypick parent(s).
33
34 Fossil tracks cherrypick merges in its repository and displays
35 cherrypicks in its timeline. (As an example, the dashed lines
36 [here](/timeline?c=0a9f12ce6655b7a5) are cherrypicks.) Because Git does
37 not have a way to represent this same information in its repository, the
38 history of Fossil cherrypicks cannot be exported to Git, only their
39 direct effects on the managed file data.
40
41 ## (3) Named Branches
42
@@ -73,11 +73,11 @@
73
74 ## (5) Amendments To Check-ins
75
76 Check-ins are immutable in both Fossil and Git.
77 However, Fossil has a mechanism by which tags can be added to
78 its repository to provide after-the-fact corrections to prior check-ins.
79
80 For example, tags can be added to check-ins that correct typos in the
81 check-in comment. The original check-in is immutable and so the
82 original comment is preserved in addition to the correction. But
83 software that displays the check-ins knows to look for the comment-change
84
+1 -1
--- www/mkindex.tcl
+++ www/mkindex.tcl
@@ -14,11 +14,11 @@
1414
alerts.md {Email Alerts And Notifications}
1515
antibot.wiki {Defense against Spiders and Bots}
1616
backoffice.md {The "Backoffice" mechanism of Fossil}
1717
backup.md {Backing Up a Remote Fossil Repository}
1818
blame.wiki {The Annotate/Blame Algorithm Of Fossil}
19
- blockchain.md {Fossil As Blockchain}
19
+ blockchain.md {Is Fossil A Blockchain?}
2020
branching.wiki {Branching, Forking, Merging, and Tagging}
2121
bugtheory.wiki {Bug Tracking In Fossil}
2222
build.wiki {Compiling and Installing Fossil}
2323
cap-theorem.md {Fossil and the CAP Theorem}
2424
caps/ {Administering User Capabilities}
2525
--- www/mkindex.tcl
+++ www/mkindex.tcl
@@ -14,11 +14,11 @@
14 alerts.md {Email Alerts And Notifications}
15 antibot.wiki {Defense against Spiders and Bots}
16 backoffice.md {The "Backoffice" mechanism of Fossil}
17 backup.md {Backing Up a Remote Fossil Repository}
18 blame.wiki {The Annotate/Blame Algorithm Of Fossil}
19 blockchain.md {Fossil As Blockchain}
20 branching.wiki {Branching, Forking, Merging, and Tagging}
21 bugtheory.wiki {Bug Tracking In Fossil}
22 build.wiki {Compiling and Installing Fossil}
23 cap-theorem.md {Fossil and the CAP Theorem}
24 caps/ {Administering User Capabilities}
25
--- www/mkindex.tcl
+++ www/mkindex.tcl
@@ -14,11 +14,11 @@
14 alerts.md {Email Alerts And Notifications}
15 antibot.wiki {Defense against Spiders and Bots}
16 backoffice.md {The "Backoffice" mechanism of Fossil}
17 backup.md {Backing Up a Remote Fossil Repository}
18 blame.wiki {The Annotate/Blame Algorithm Of Fossil}
19 blockchain.md {Is Fossil A Blockchain?}
20 branching.wiki {Branching, Forking, Merging, and Tagging}
21 bugtheory.wiki {Bug Tracking In Fossil}
22 build.wiki {Compiling and Installing Fossil}
23 cap-theorem.md {Fossil and the CAP Theorem}
24 caps/ {Administering User Capabilities}
25
--- www/permutedindex.html
+++ www/permutedindex.html
@@ -41,11 +41,11 @@
4141
<li><a href="backoffice.md">Backoffice mechanism of Fossil &mdash; The</a></li>
4242
<li><a href="fossil_prompt.wiki">Bash Prompt &mdash; Fossilized</a></li>
4343
<li><a href="whyusefossil.wiki"><b>Benefits Of Version Control</b></a></li>
4444
<li><a href="caps/admin-v-setup.md">Between Setup and Admin Users &mdash; Differences</a></li>
4545
<li><a href="hashpolicy.wiki">Between SHA1 and SHA3-256 &mdash; Hash Policy: Choosing</a></li>
46
-<li><a href="blockchain.md">Blockchain &mdash; Fossil As</a></li>
46
+<li><a href="blockchain.md">Blockchain? &mdash; Is Fossil A</a></li>
4747
<li><a href="antibot.wiki">Bots &mdash; Defense against Spiders and</a></li>
4848
<li><a href="private.wiki">Branches &mdash; Creating, Syncing, and Deleting Private</a></li>
4949
<li><a href="branching.wiki"><b>Branching, Forking, Merging, and Tagging</b></a></li>
5050
<li><a href="bugtheory.wiki"><b>Bug Tracking In Fossil</b></a></li>
5151
<li><a href="makefile.wiki">Build Process &mdash; The Fossil</a></li>
@@ -126,11 +126,10 @@
126126
<li><a href="image-format-vs-repo-size.md">Format vs Fossil Repo Size &mdash; Image</a></li>
127127
<li><a href="../../../md_rules">Formatting Rules &mdash; Markdown</a></li>
128128
<li><a href="../../../wiki_rules">Formatting Rules &mdash; Wiki</a></li>
129129
<li><a href="forum.wiki">Forums &mdash; Fossil</a></li>
130130
<li><a href="cap-theorem.md"><b>Fossil and the CAP Theorem</b></a></li>
131
-<li><a href="blockchain.md"><b>Fossil As Blockchain</b></a></li>
132131
<li><a href="changes.wiki"><b>Fossil Changelog</b></a></li>
133132
<li><a href="concepts.wiki"><b>Fossil Core Concepts</b></a></li>
134133
<li><a href="css-tricks.md"><b>Fossil CSS Tips and Tricks</b></a></li>
135134
<li><a href="delta_encoder_algorithm.wiki"><b>Fossil Delta Encoding Algorithm</b></a></li>
136135
<li><a href="delta_format.wiki"><b>Fossil Delta Format</b></a></li>
@@ -188,10 +187,11 @@
188187
<li><a href="build.wiki">Installing Fossil &mdash; Compiling and</a></li>
189188
<li><a href="fossil-from-msvc.wiki"><b>Integrating Fossil in the Microsoft Express 2010 IDE</b></a></li>
190189
<li><a href="selfcheck.wiki">Integrity Self Checks &mdash; Fossil Repository</a></li>
191190
<li><a href="webui.wiki">Interface &mdash; The Fossil Web</a></li>
192191
<li><a href="interwiki.md"><b>Interwiki Links</b></a></li>
192
+<li><a href="blockchain.md"><b>Is Fossil A Blockchain?</b></a></li>
193193
<li><a href="javascript.md">JavaScript in Fossil &mdash; Use of</a></li>
194194
<li><a href="th1.md">Language &mdash; The TH1 Scripting</a></li>
195195
<li><a href="copyright-release.html">License Agreement &mdash; Contributor</a></li>
196196
<li><a href="mirrorlimitations.md"><b>Limitations On Git Mirrors</b></a></li>
197197
<li><a href="interwiki.md">Links &mdash; Interwiki</a></li>
198198
199199
ADDED www/pikchr.md
--- www/permutedindex.html
+++ www/permutedindex.html
@@ -41,11 +41,11 @@
41 <li><a href="backoffice.md">Backoffice mechanism of Fossil &mdash; The</a></li>
42 <li><a href="fossil_prompt.wiki">Bash Prompt &mdash; Fossilized</a></li>
43 <li><a href="whyusefossil.wiki"><b>Benefits Of Version Control</b></a></li>
44 <li><a href="caps/admin-v-setup.md">Between Setup and Admin Users &mdash; Differences</a></li>
45 <li><a href="hashpolicy.wiki">Between SHA1 and SHA3-256 &mdash; Hash Policy: Choosing</a></li>
46 <li><a href="blockchain.md">Blockchain &mdash; Fossil As</a></li>
47 <li><a href="antibot.wiki">Bots &mdash; Defense against Spiders and</a></li>
48 <li><a href="private.wiki">Branches &mdash; Creating, Syncing, and Deleting Private</a></li>
49 <li><a href="branching.wiki"><b>Branching, Forking, Merging, and Tagging</b></a></li>
50 <li><a href="bugtheory.wiki"><b>Bug Tracking In Fossil</b></a></li>
51 <li><a href="makefile.wiki">Build Process &mdash; The Fossil</a></li>
@@ -126,11 +126,10 @@
126 <li><a href="image-format-vs-repo-size.md">Format vs Fossil Repo Size &mdash; Image</a></li>
127 <li><a href="../../../md_rules">Formatting Rules &mdash; Markdown</a></li>
128 <li><a href="../../../wiki_rules">Formatting Rules &mdash; Wiki</a></li>
129 <li><a href="forum.wiki">Forums &mdash; Fossil</a></li>
130 <li><a href="cap-theorem.md"><b>Fossil and the CAP Theorem</b></a></li>
131 <li><a href="blockchain.md"><b>Fossil As Blockchain</b></a></li>
132 <li><a href="changes.wiki"><b>Fossil Changelog</b></a></li>
133 <li><a href="concepts.wiki"><b>Fossil Core Concepts</b></a></li>
134 <li><a href="css-tricks.md"><b>Fossil CSS Tips and Tricks</b></a></li>
135 <li><a href="delta_encoder_algorithm.wiki"><b>Fossil Delta Encoding Algorithm</b></a></li>
136 <li><a href="delta_format.wiki"><b>Fossil Delta Format</b></a></li>
@@ -188,10 +187,11 @@
188 <li><a href="build.wiki">Installing Fossil &mdash; Compiling and</a></li>
189 <li><a href="fossil-from-msvc.wiki"><b>Integrating Fossil in the Microsoft Express 2010 IDE</b></a></li>
190 <li><a href="selfcheck.wiki">Integrity Self Checks &mdash; Fossil Repository</a></li>
191 <li><a href="webui.wiki">Interface &mdash; The Fossil Web</a></li>
192 <li><a href="interwiki.md"><b>Interwiki Links</b></a></li>
 
193 <li><a href="javascript.md">JavaScript in Fossil &mdash; Use of</a></li>
194 <li><a href="th1.md">Language &mdash; The TH1 Scripting</a></li>
195 <li><a href="copyright-release.html">License Agreement &mdash; Contributor</a></li>
196 <li><a href="mirrorlimitations.md"><b>Limitations On Git Mirrors</b></a></li>
197 <li><a href="interwiki.md">Links &mdash; Interwiki</a></li>
198
199 DDED www/pikchr.md
--- www/permutedindex.html
+++ www/permutedindex.html
@@ -41,11 +41,11 @@
41 <li><a href="backoffice.md">Backoffice mechanism of Fossil &mdash; The</a></li>
42 <li><a href="fossil_prompt.wiki">Bash Prompt &mdash; Fossilized</a></li>
43 <li><a href="whyusefossil.wiki"><b>Benefits Of Version Control</b></a></li>
44 <li><a href="caps/admin-v-setup.md">Between Setup and Admin Users &mdash; Differences</a></li>
45 <li><a href="hashpolicy.wiki">Between SHA1 and SHA3-256 &mdash; Hash Policy: Choosing</a></li>
46 <li><a href="blockchain.md">Blockchain? &mdash; Is Fossil A</a></li>
47 <li><a href="antibot.wiki">Bots &mdash; Defense against Spiders and</a></li>
48 <li><a href="private.wiki">Branches &mdash; Creating, Syncing, and Deleting Private</a></li>
49 <li><a href="branching.wiki"><b>Branching, Forking, Merging, and Tagging</b></a></li>
50 <li><a href="bugtheory.wiki"><b>Bug Tracking In Fossil</b></a></li>
51 <li><a href="makefile.wiki">Build Process &mdash; The Fossil</a></li>
@@ -126,11 +126,10 @@
126 <li><a href="image-format-vs-repo-size.md">Format vs Fossil Repo Size &mdash; Image</a></li>
127 <li><a href="../../../md_rules">Formatting Rules &mdash; Markdown</a></li>
128 <li><a href="../../../wiki_rules">Formatting Rules &mdash; Wiki</a></li>
129 <li><a href="forum.wiki">Forums &mdash; Fossil</a></li>
130 <li><a href="cap-theorem.md"><b>Fossil and the CAP Theorem</b></a></li>
 
131 <li><a href="changes.wiki"><b>Fossil Changelog</b></a></li>
132 <li><a href="concepts.wiki"><b>Fossil Core Concepts</b></a></li>
133 <li><a href="css-tricks.md"><b>Fossil CSS Tips and Tricks</b></a></li>
134 <li><a href="delta_encoder_algorithm.wiki"><b>Fossil Delta Encoding Algorithm</b></a></li>
135 <li><a href="delta_format.wiki"><b>Fossil Delta Format</b></a></li>
@@ -188,10 +187,11 @@
187 <li><a href="build.wiki">Installing Fossil &mdash; Compiling and</a></li>
188 <li><a href="fossil-from-msvc.wiki"><b>Integrating Fossil in the Microsoft Express 2010 IDE</b></a></li>
189 <li><a href="selfcheck.wiki">Integrity Self Checks &mdash; Fossil Repository</a></li>
190 <li><a href="webui.wiki">Interface &mdash; The Fossil Web</a></li>
191 <li><a href="interwiki.md"><b>Interwiki Links</b></a></li>
192 <li><a href="blockchain.md"><b>Is Fossil A Blockchain?</b></a></li>
193 <li><a href="javascript.md">JavaScript in Fossil &mdash; Use of</a></li>
194 <li><a href="th1.md">Language &mdash; The TH1 Scripting</a></li>
195 <li><a href="copyright-release.html">License Agreement &mdash; Contributor</a></li>
196 <li><a href="mirrorlimitations.md"><b>Limitations On Git Mirrors</b></a></li>
197 <li><a href="interwiki.md">Links &mdash; Interwiki</a></li>
198
199 DDED www/pikchr.md
+133
--- a/www/pikchr.md
+++ b/www/pikchr.md
@@ -0,0 +1,133 @@
1
+# The Pikchr Diagram Language
2
+
3
+Pikchr (pronounced "picture") is a [PIC][1]-like markup language for creating
4
+diagrams in technical documentation. Pikchr diagrams source texte text for Pikchr diagrams
5
+can be embedded directly in either [Markdown][2] or [Fossil Wiki][3].
6
+Fossil translates the Pikchr source text into SVG which is displayed as
7
+part of the rendered wiki.
8
+
9
+[1]: wikipedia:/wiki/Pic_language
10
+[2]: /md_rules
11
+[3]: /wiki_rules
12
+
13
+For example, this document is written in Markdown. The following
14
+is a sample Pikchr diagram:
15
+
16
+``` pikchr
17
+arrow right 200% "Markdown" "Source"
18
+box rad 10px "Markdown" "Formatter" "(markdown.c)" fit
19
+arrow right 200% "HTML+SVG" "Output"
20
+arrow <-> down 70% from last box.s
21
+box same "Pikchr" "Formatter" "(pikchr.c)" fit
22
+```
23
+
24
+The diagram above was ``` pikchr
25
+ matter" "(markdownMarkdown" "Source"
26
+ ram:
27
+
28
+``` pikchr
29
+arrow right matter" "( t 200% "HTML+SVG" "Output"
30
+arrow <- HTML+SVG" "Output"
31
+a ormatter" "(pikchr.c)" fit
32
+```
33
+~~~~~
34
+
35
+See the [original Markdown source text of this document][4] for an
36
+example of Pikchr in operation.
37
+
38
+[4]: ./pikchr.md?mimetype=text/plain
39
+
40
+Fossil allows Pikchr diagrams to appear anywhere that Markdown or
41
+Fossil Wiki markup or used, including:
42
+
43
+ * [Embedded documentation](./embeddeddoc.wiki)
44
+ * Stand-alone wiki pages
45
+ * [Wiki pages associated with particular branches or check-ins](./wikitheory.wiki#assocwiki)
46
+ * Check-in comments
47
+ * [Technical notes](./event.wiki)
48
+ * [Forum posts](./forum.wiki)
49
+ * [Bug reports and trouble tickets](./bugtheory.wiki)
50
+
51
+## Pikchr Is A Separate Project
52
+
53
+Even though the original author of Pikchr is the same as the original
54
+creator of Fossil, the sources to the Pikchr formatter are maintained
55
+as a [separate project named "pikchr.org"](https://pikchr.org).
56
+Pikchr is a delivered as a single file of C code. The "pikchr.c" file
57
+from the Pikchr project is periodically copied into the Fossil source
58
+tree. Pikchr is maintained as a project distinct from Fossil so that it
59
+can be used independently of Fossil.
60
+
61
+### Pikchr User Manual And Tutorials
62
+
63
+Complete documentation on the Pikchr language can be found on the
64
+Pikchr project page:
65
+
66
+ * <https://pikchr.org/>
67
+
68
+That website contains a user manual, tutorials, a language specification,
69
+a summary of differences between Pikchr and legacy PIC,
70
+and it hosts copies of historical PIC documentation.
71
+
72
+## How To Include Pikchr Diagrams In Fossil Documents
73
+
74
+To illustrate how to include Pikchr in Fossil markup, we will use the
75
+following one-line Pikchr. Click to see the code:
76
+
77
+~~~ pikchr toggle
78
+arrow; box "Hello" "World!" fit; arrow
79
+~~~
80
+
81
+For Markdown, the Pikchr code is put inside of a
82
+[fenced code block][fcb]. A fenced code block is the text in between
83
+&#96;&#96;&#96; ... &#96;&#96;&#96; or between
84
+&#126;&#126;&#126; ... &#126;&#126;&#126; using three or
85
+more &#96; or &#126; characters. The fenced code block normally
86
+displays its content verbatim, but if an "info string" of "pikchr"
87
+follows the opening &#96;&#96;&#96; or &#126;&#126;&#126;, then the
88
+content is interpreted as Pikchr script and is replaced by the
89
+equivalent SVG.
90
+So either of these work:
91
+
92
+[fcb]: https://spec.commonmark.org/0.29/# ~~~ pikchr
93
+ posts](./for ~~~
94
+
95
+ ``` pikchr
96
+ posts](./for heory.wiki)
97
+
98
+## Pikchr Is A Separate Project
99
+
100
+Even though the original author of Pikchr is the same as the original
101
+creator of Fossil, the sources to the Pikchr formatter are maintained
102
+as a [separate project named "pikchr.org"](https://pikchr.org).
103
+Pikchr is a deliver de. The "pikchr.c" file
104
+ posts](./for copied into the Fossil source
105
+tree. Pikchr is maintained as a project distinct from Fossil so that it
106
+can be used independently of Fossil.
107
+
108
+### Pikchr User Manual And Tutorials
109
+
110
+Complete documentation on the Pikchr language can be found on the
111
+Pikchr project page:
112
+
113
+ * <https://pikchr.org/>
114
+
115
+That website contains a user manual, tutorials, a language specification,
116
+a summary of differences between Pikchr and legacy PIC,
117
+and it hosts copies of historical PIC documentation.
118
+
119
+## How To Include Pikchr Diagrams In Fossil Documents
120
+
121
+To illustrate how to include Pikchr in Fossil markup, we will use the
122
+following one-line Pikchr. Click to see the code:
123
+
124
+~~~ pikchr toggle
125
+arrow; box "Hello" "World!" fit; arrow
126
+~~~
127
+
128
+For Markdown, the Pikchr code is put inside of a
129
+[fenced code block][fcb]. A fenced code block is the text in between
130
+&#96;&#96;&#96; ... &#96;&#96;&#96; or between
131
+&#126;&#126;&#126; ... &#126;&#126;&#126; using three or
132
+more &#96; or &#126; characters. The fenced code block normally
133
+displays its content verbatim, but
--- a/www/pikchr.md
+++ b/www/pikchr.md
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/www/pikchr.md
+++ b/www/pikchr.md
@@ -0,0 +1,133 @@
1 # The Pikchr Diagram Language
2
3 Pikchr (pronounced "picture") is a [PIC][1]-like markup language for creating
4 diagrams in technical documentation. Pikchr diagrams source texte text for Pikchr diagrams
5 can be embedded directly in either [Markdown][2] or [Fossil Wiki][3].
6 Fossil translates the Pikchr source text into SVG which is displayed as
7 part of the rendered wiki.
8
9 [1]: wikipedia:/wiki/Pic_language
10 [2]: /md_rules
11 [3]: /wiki_rules
12
13 For example, this document is written in Markdown. The following
14 is a sample Pikchr diagram:
15
16 ``` pikchr
17 arrow right 200% "Markdown" "Source"
18 box rad 10px "Markdown" "Formatter" "(markdown.c)" fit
19 arrow right 200% "HTML+SVG" "Output"
20 arrow <-> down 70% from last box.s
21 box same "Pikchr" "Formatter" "(pikchr.c)" fit
22 ```
23
24 The diagram above was ``` pikchr
25 matter" "(markdownMarkdown" "Source"
26 ram:
27
28 ``` pikchr
29 arrow right matter" "( t 200% "HTML+SVG" "Output"
30 arrow <- HTML+SVG" "Output"
31 a ormatter" "(pikchr.c)" fit
32 ```
33 ~~~~~
34
35 See the [original Markdown source text of this document][4] for an
36 example of Pikchr in operation.
37
38 [4]: ./pikchr.md?mimetype=text/plain
39
40 Fossil allows Pikchr diagrams to appear anywhere that Markdown or
41 Fossil Wiki markup or used, including:
42
43 * [Embedded documentation](./embeddeddoc.wiki)
44 * Stand-alone wiki pages
45 * [Wiki pages associated with particular branches or check-ins](./wikitheory.wiki#assocwiki)
46 * Check-in comments
47 * [Technical notes](./event.wiki)
48 * [Forum posts](./forum.wiki)
49 * [Bug reports and trouble tickets](./bugtheory.wiki)
50
51 ## Pikchr Is A Separate Project
52
53 Even though the original author of Pikchr is the same as the original
54 creator of Fossil, the sources to the Pikchr formatter are maintained
55 as a [separate project named "pikchr.org"](https://pikchr.org).
56 Pikchr is a delivered as a single file of C code. The "pikchr.c" file
57 from the Pikchr project is periodically copied into the Fossil source
58 tree. Pikchr is maintained as a project distinct from Fossil so that it
59 can be used independently of Fossil.
60
61 ### Pikchr User Manual And Tutorials
62
63 Complete documentation on the Pikchr language can be found on the
64 Pikchr project page:
65
66 * <https://pikchr.org/>
67
68 That website contains a user manual, tutorials, a language specification,
69 a summary of differences between Pikchr and legacy PIC,
70 and it hosts copies of historical PIC documentation.
71
72 ## How To Include Pikchr Diagrams In Fossil Documents
73
74 To illustrate how to include Pikchr in Fossil markup, we will use the
75 following one-line Pikchr. Click to see the code:
76
77 ~~~ pikchr toggle
78 arrow; box "Hello" "World!" fit; arrow
79 ~~~
80
81 For Markdown, the Pikchr code is put inside of a
82 [fenced code block][fcb]. A fenced code block is the text in between
83 &#96;&#96;&#96; ... &#96;&#96;&#96; or between
84 &#126;&#126;&#126; ... &#126;&#126;&#126; using three or
85 more &#96; or &#126; characters. The fenced code block normally
86 displays its content verbatim, but if an "info string" of "pikchr"
87 follows the opening &#96;&#96;&#96; or &#126;&#126;&#126;, then the
88 content is interpreted as Pikchr script and is replaced by the
89 equivalent SVG.
90 So either of these work:
91
92 [fcb]: https://spec.commonmark.org/0.29/# ~~~ pikchr
93 posts](./for ~~~
94
95 ``` pikchr
96 posts](./for heory.wiki)
97
98 ## Pikchr Is A Separate Project
99
100 Even though the original author of Pikchr is the same as the original
101 creator of Fossil, the sources to the Pikchr formatter are maintained
102 as a [separate project named "pikchr.org"](https://pikchr.org).
103 Pikchr is a deliver de. The "pikchr.c" file
104 posts](./for copied into the Fossil source
105 tree. Pikchr is maintained as a project distinct from Fossil so that it
106 can be used independently of Fossil.
107
108 ### Pikchr User Manual And Tutorials
109
110 Complete documentation on the Pikchr language can be found on the
111 Pikchr project page:
112
113 * <https://pikchr.org/>
114
115 That website contains a user manual, tutorials, a language specification,
116 a summary of differences between Pikchr and legacy PIC,
117 and it hosts copies of historical PIC documentation.
118
119 ## How To Include Pikchr Diagrams In Fossil Documents
120
121 To illustrate how to include Pikchr in Fossil markup, we will use the
122 following one-line Pikchr. Click to see the code:
123
124 ~~~ pikchr toggle
125 arrow; box "Hello" "World!" fit; arrow
126 ~~~
127
128 For Markdown, the Pikchr code is put inside of a
129 [fenced code block][fcb]. A fenced code block is the text in between
130 &#96;&#96;&#96; ... &#96;&#96;&#96; or between
131 &#126;&#126;&#126; ... &#126;&#126;&#126; using three or
132 more &#96; or &#126; characters. The fenced code block normally
133 displays its content verbatim, but
+167 -15
--- www/quickstart.wiki
+++ www/quickstart.wiki
@@ -1,26 +1,34 @@
11
<title>Fossil Quick Start Guide</title>
22
<h1 align="center">Fossil Quick Start</h1>
33
4
-<p>This is a guide to help you get started using Fossil quickly
4
+<p>This is a guide to help you get started using the Fossil [https://en.wikipedia.org/wiki/Distributed_version_control|Distributed Version Control System] quickly
55
and painlessly.</p>
66
77
<h2 id="install">Installing</h2>
88
99
<p>Fossil is a single self-contained C program. You need to
1010
either download a
11
- <a href="https://www.fossil-scm.org/fossil/uv/download.html">precompiled
12
- binary</a>
11
+ [https://www.fossil-scm.org/fossil/uv/download.html|precompiled
12
+ binary]
1313
or <a href="build.wiki">compile it yourself</a> from sources.
1414
Install Fossil by putting the fossil binary
1515
someplace on your $PATH.</p>
16
+ You can test that Fossil is present and working like this:
17
+
18
+ <blockquote>
19
+ <b>
20
+ fossil version<br>
21
+ <tt>This is fossil version 2.13 [309af345ab] 2020-09-28 04:02:55 UTC</tt><br>
22
+ </b>
23
+ </blockquote>
1624
1725
<h2 id="workflow" name="fslclone">General Work Flow</h2>
1826
19
- <p>Fossil works with repository files (a database with the project's
27
+ <p>Fossil works with repository files (a database in a single file with the project's
2028
complete history) and with checked-out local trees (the working directory
21
- you use to do your work).
29
+ you use to do your work).
2230
(See [./whyusefossil.wiki#definitions | definitions] for more background.)
2331
The workflow looks like this:</p>
2432
2533
<ul>
2634
<li>Create or clone a repository file. ([/help/init|fossil init] or
@@ -28,21 +36,28 @@
2836
<li>Check out a local tree. ([/help/open | fossil open])
2937
<li>Perform operations on the repository (including repository
3038
configuration).
3139
</ul>
3240
33
- <p>The following sections will give you a brief overview of these
41
+ Fossil can be entirely driven from the command line. Many features
42
+ can also be conveniently accessed from the build-in web interface.
43
+
44
+ <p>The following sections give a brief overview of these
3445
operations.</p>
3546
3647
<h2 id="new">Starting A New Project</h2>
3748
38
- <p>To start a new project with fossil, create a new empty repository
49
+ <p>To start a new project with fossil create a new empty repository
3950
this way: ([/help/init | more info]) </p>
4051
4152
<blockquote>
4253
<b>fossil init </b><i> repository-filename</i>
4354
</blockquote>
55
+
56
+ You can name the database anything you like, and you can place it anywhere in the filesystem.
57
+ The <tt>.fossil</tt> extension is traditional but only required if you are going to use the
58
+ <tt>[./help?cmd=/server | fossil server DIRECTORY]</tt> feature.”
4459
4560
<h2 id="clone">Cloning An Existing Repository</h2>
4661
4762
<p>Most fossil operations interact with a repository that is on the
4863
local disk drive, not on a remote system. Hence, before accessing
@@ -57,46 +72,72 @@
5772
</blockquote>
5873
5974
<p>The <i>URL</i> specifies the fossil repository
6075
you want to clone. The <i>repository-filename</i> is the new local
6176
filename into which the cloned repository will be written. For
62
- example:
77
+ example, to clone the source code of Fossil itself:
78
+
79
+ <blockquote>
80
+ <b>fossil clone https://www.fossil-scm.org/ myclone.fossil</b>
81
+ </blockquote>
82
+
83
+ If your logged-in username is 'exampleuser', you should see output something like this:
6384
6485
<blockquote>
65
- <b>fossil clone http://www.fossil-scm.org/ myclone.fossil</b>
86
+ <b><tt>
87
+ Round-trips: 8 Artifacts sent: 0 received: 39421<br>
88
+ Clone done, sent: 2424 received: 42965725 ip: 10.10.10.0<br>
89
+ Rebuilding repository meta-data...<br>
90
+ 100% complete...<br>
91
+ Extra delta compression... <br>
92
+ Vacuuming the database... <br>
93
+ project-id: 94259BB9F186226D80E49D1FA2DB29F935CCA0333<br>
94
+ server-id: 016595e9043054038a9ea9bc526d7f33f7ac0e42<br>
95
+ admin-user: exampleuser (password is "yoWgDR42iv")><br>
96
+ </tt></b>
6697
</blockquote>
6798
6899
<p>If the remote repository requires a login, include a
69100
userid in the URL like this:
70101
71102
<blockquote>
72
- <b>fossil clone http://</b><i>userid</i><b>@www.fossil-scm.org/ myclone.fossil</b>
103
+ <b>fossil clone https://</b><i>remoteuserid</i><b>@www.example.org/ myclone.fossil</b>
73104
</blockquote>
74
-
75105
76106
<p>You will be prompted separately for the password.
77
- Use "%HH" escapes for special characters in the userid.
78
- Examples: "%40" in place of "@" and "%2F" in place of "/".
107
+ Use [https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters|"%HH"] escapes for special characters in the userid.
108
+ For example "/" would be replaced by "%2F" meaning that a userid of "Projects/Budget" would become "Projects%2FBudget") </p>
79109
80110
<p>If you are behind a restrictive firewall, you might need
81111
to <a href="#proxy">specify an HTTP proxy</a>.</p>
82112
83113
<p>A Fossil repository is a single disk file. Instead of cloning,
84114
you can just make a copy of the repository file (for example, using
85115
"scp"). Note, however, that the repository file contains auxiliary
86116
information above and beyond the versioned files, including some
87117
sensitive information such as password hashes and email addresses. If you
88
- want to share Fossil repositories directly, consider running the
118
+ want to share Fossil repositories directly by copying, consider running the
89119
[/help/scrub|fossil scrub] command to remove sensitive information
90120
before transmitting the file.
91121
92122
<h2 id="import">Importing From Another Version Control System</h2>
93123
94124
<p>Rather than start a new project, or clone an existing Fossil project,
95125
you might prefer to
96126
<a href="./inout.wiki">import an existing Git project</a>
97
- into Fossil using the [/help/import | fossil import] command.
127
+ into Fossil using the [/help/import | fossil import] command.
128
+
129
+ You can even decide to export your project back into git using the
130
+ [/help/git | fossil git] command, which is how the Fossil project maintains
131
+ [https://github.com/drhsqlite/fossil-mirror | its public GitHub mirror]. There
132
+ is no limit to the number of times a tree can be imported and exported between
133
+ Fossil and git.
134
+
135
+ The [https://git-scm.com/docs/git-fast-export|Git fast-export format] has become
136
+ a popular way to move files between version management systems, including from
137
+ [https://www.mercurial-scm.org/|Mercurial].
138
+ Fossil can also import [https://subversion.apache.org/|Subversion projects] directly.
98139
99140
<h2 id="checkout">Checking Out A Local Tree</h2>
100141
101142
<p>To work on a project in fossil, you need to check out a local
102143
copy of the source tree. Create the directory you want to be
@@ -105,10 +146,24 @@
105146
106147
<blockquote>
107148
<b>fossil open </b><i> repository-filename</i>
108149
</blockquote>
109150
151
+ for example:
152
+
153
+ <blockquote>
154
+ <b><tt>
155
+ fossil open ../myclone.fossil<br>
156
+ BUILD.txt<br>
157
+ COPYRIGHT-BSD2.txt<br>
158
+ README.md<br>
159
+ ︙<br>
160
+ </tt></b>
161
+ </blockquote>
162
+
163
+ (or "fossil open ..\myclone.fossil" on Windows).
164
+
110165
<p>This leaves you with the newest version of the tree
111166
checked out.
112167
From anywhere underneath the root of your local tree, you
113168
can type commands like the following to find out the status of
114169
your local tree:</p>
@@ -120,10 +175,13 @@
120175
<b>[/help/diff | fossil diff]</b><br>
121176
<b>[/help/timeline | fossil timeline]</b><br>
122177
<b>[/help/ls | fossil ls]</b><br>
123178
<b>[/help/branch | fossil branch]</b><br>
124179
</blockquote>
180
+
181
+ <p>If you created a new respository using "fossil init" some commands will not
182
+ produce much output.</p>
125183
126184
<p>Note that Fossil allows you to make multiple check-outs in
127185
separate directories from the same repository. This enables you,
128186
for example, to do builds from multiple branches or versions at
129187
the same time without having to generate extra clones.</p>
@@ -139,10 +197,104 @@
139197
<p>[/help/update | update] honors the "autosync" option and
140198
does a "soft" switch, merging any local changes into the target
141199
version, whereas [/help/checkout | checkout] does not
142200
automatically sync and does a "hard" switch, overwriting local
143201
changes if told to do so.</p>
202
+
203
+<h2 id="changes">Making and Commiting Changes</h2>
204
+
205
+ <p>To add new files to your project or remove existing ones, use these
206
+ commands:</p>
207
+
208
+ <blockquote>
209
+ <b>[/help/add | fossil add]</b> <i>file...</i><br>
210
+ <b>[/help/rm | fossil rm]</b> <i>file...</i><br>
211
+ <b>[/help/addremove | fossil addremove]</b> <i>file...</i><br>
212
+ </blockquote>
213
+
214
+ <p>The command:</p>
215
+ <blockquote>
216
+ <b>
217
+ [/help/changes | fossil changes]</b>
218
+ </blockquote>
219
+ <p>lists files that have changed since the last commit to the repository. For
220
+ example, if you edit the file "README.md":</p>
221
+
222
+ <blockquote>
223
+ <b>
224
+ fossil changes<br>
225
+ EDITED README.md
226
+ </b>
227
+ </blockquote>
228
+
229
+ <p>To see exactly what change was made you can use the command</p>
230
+ [/help/diff | fossil diff]:
231
+ <blockquote>
232
+ <b>
233
+ fossil diff <br><tt>
234
+ Index: README.md<br>
235
+ ============================================================<br>
236
+ --- README.md<br>
237
+ +++ README.md<br>
238
+ @@ -1,5 +1,6 @@<br>
239
+ +Made some changes to the project<br>
240
+ # Original text<br>
241
+ </tt></b>
242
+ </blockquote>
243
+
244
+ <p>"fossil diff" is the difference between your tree on disk now and as the tree was
245
+ when you did "fossil open". An open is the first checkout from a repository
246
+ into a new directory. </p>
247
+
248
+ <p>To commit your changes to a local-only repository:</p>
249
+ <blockquote>
250
+ <b>
251
+ fossil commit </b><i>(... Fossil will start your editor, if defined)</i><b><br><tt>
252
+ # Enter a commit message for this check-in. Lines beginning with # are ignored.<br>
253
+ #<br>
254
+ # user: exampleuser<br>
255
+ # tags: trunk<br>
256
+ #<br>
257
+ # EDITED README.md<br>
258
+ Edited file to add description of code changes<br>
259
+ New_Version: 7b9a416ced4a69a60589dde1aedd1a30fde8eec3528d265dbeed5135530440ab<br>
260
+ </tt></b>
261
+ </blockquote>
262
+
263
+ <p>You will be prompted for check-in comments using whatever editor
264
+ is specified by your VISUAL or EDITOR environment variable. If none is
265
+ specified Fossil uses line-editing in the terminal.</p>
266
+
267
+ <p>To commit your changes to a repository that was cloned from remote you
268
+ perform the same actions but the results are different. Fossil
269
+ defaults to 'autosync' mode, a single-stage commit that sends all changes
270
+ committed to the local repository immediately on to the remote parent repository. This
271
+ only works if you have write permission to the remote respository.</p>
272
+
273
+<h2 id="naming">Naming of Files, Checkins, and Branches</h2>
274
+
275
+ <p>Fossil deals with information artifacts. This Quickstart document only deals
276
+ with files and collections of files, but be aware there are also tickets, wiki pages and more.
277
+ Every artifact in Fossil has a universally-unique hash id, and may also have a
278
+ human-readable name.</p>
279
+
280
+ <p>The following are all equivalent ways of identifying a Fossil file,
281
+ checkin or branch artifact:</p>
282
+
283
+ <ul>
284
+ <li> the full unique SHA-256 hash, such as be836de35a821523beac2e53168e135d5ebd725d7af421e5f736a28e8034673a
285
+ <li> an abbreviated hash prefix, such as the first ten characters: be836de35a . This won't be universally unique, but it is usually unique within any one repository. As an example, the [https://fossil-scm.org/home/hash-collisions|Fossil project hash collisions] showed at the time of writing that there are no artifacts with identical first 8 characters
286
+ <li> a branch name, such as "special-features" or "juliet-testing". Each branch also has a unique SHA-256 hash
287
+ </ul>
288
+
289
+ <p>A special convenience branch is "trunk", which is Fossil's default branch name for
290
+ the first checkin, and the default for any time a branch name is needed but not
291
+ specified.</p>
292
+
293
+ This will get you started on identifying checkins. The
294
+ <a href="./checkin_names.wiki">Checkin Names document</a> is a complete reference, including
295
+ how timestamps can also be used.
144296
145297
<h2 id="config">Configuring Your Local Repository</h2>
146298
147299
<p>When you create a new repository, either by cloning an existing
148300
project or create a new project of your own, you usually want to do some
149301
--- www/quickstart.wiki
+++ www/quickstart.wiki
@@ -1,26 +1,34 @@
1 <title>Fossil Quick Start Guide</title>
2 <h1 align="center">Fossil Quick Start</h1>
3
4 <p>This is a guide to help you get started using Fossil quickly
5 and painlessly.</p>
6
7 <h2 id="install">Installing</h2>
8
9 <p>Fossil is a single self-contained C program. You need to
10 either download a
11 <a href="https://www.fossil-scm.org/fossil/uv/download.html">precompiled
12 binary</a>
13 or <a href="build.wiki">compile it yourself</a> from sources.
14 Install Fossil by putting the fossil binary
15 someplace on your $PATH.</p>
 
 
 
 
 
 
 
 
16
17 <h2 id="workflow" name="fslclone">General Work Flow</h2>
18
19 <p>Fossil works with repository files (a database with the project's
20 complete history) and with checked-out local trees (the working directory
21 you use to do your work).
22 (See [./whyusefossil.wiki#definitions | definitions] for more background.)
23 The workflow looks like this:</p>
24
25 <ul>
26 <li>Create or clone a repository file. ([/help/init|fossil init] or
@@ -28,21 +36,28 @@
28 <li>Check out a local tree. ([/help/open | fossil open])
29 <li>Perform operations on the repository (including repository
30 configuration).
31 </ul>
32
33 <p>The following sections will give you a brief overview of these
 
 
 
34 operations.</p>
35
36 <h2 id="new">Starting A New Project</h2>
37
38 <p>To start a new project with fossil, create a new empty repository
39 this way: ([/help/init | more info]) </p>
40
41 <blockquote>
42 <b>fossil init </b><i> repository-filename</i>
43 </blockquote>
 
 
 
 
44
45 <h2 id="clone">Cloning An Existing Repository</h2>
46
47 <p>Most fossil operations interact with a repository that is on the
48 local disk drive, not on a remote system. Hence, before accessing
@@ -57,46 +72,72 @@
57 </blockquote>
58
59 <p>The <i>URL</i> specifies the fossil repository
60 you want to clone. The <i>repository-filename</i> is the new local
61 filename into which the cloned repository will be written. For
62 example:
 
 
 
 
 
 
63
64 <blockquote>
65 <b>fossil clone http://www.fossil-scm.org/ myclone.fossil</b>
 
 
 
 
 
 
 
 
 
 
66 </blockquote>
67
68 <p>If the remote repository requires a login, include a
69 userid in the URL like this:
70
71 <blockquote>
72 <b>fossil clone http://</b><i>userid</i><b>@www.fossil-scm.org/ myclone.fossil</b>
73 </blockquote>
74
75
76 <p>You will be prompted separately for the password.
77 Use "%HH" escapes for special characters in the userid.
78 Examples: "%40" in place of "@" and "%2F" in place of "/".
79
80 <p>If you are behind a restrictive firewall, you might need
81 to <a href="#proxy">specify an HTTP proxy</a>.</p>
82
83 <p>A Fossil repository is a single disk file. Instead of cloning,
84 you can just make a copy of the repository file (for example, using
85 "scp"). Note, however, that the repository file contains auxiliary
86 information above and beyond the versioned files, including some
87 sensitive information such as password hashes and email addresses. If you
88 want to share Fossil repositories directly, consider running the
89 [/help/scrub|fossil scrub] command to remove sensitive information
90 before transmitting the file.
91
92 <h2 id="import">Importing From Another Version Control System</h2>
93
94 <p>Rather than start a new project, or clone an existing Fossil project,
95 you might prefer to
96 <a href="./inout.wiki">import an existing Git project</a>
97 into Fossil using the [/help/import | fossil import] command.
 
 
 
 
 
 
 
 
 
 
 
98
99 <h2 id="checkout">Checking Out A Local Tree</h2>
100
101 <p>To work on a project in fossil, you need to check out a local
102 copy of the source tree. Create the directory you want to be
@@ -105,10 +146,24 @@
105
106 <blockquote>
107 <b>fossil open </b><i> repository-filename</i>
108 </blockquote>
109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110 <p>This leaves you with the newest version of the tree
111 checked out.
112 From anywhere underneath the root of your local tree, you
113 can type commands like the following to find out the status of
114 your local tree:</p>
@@ -120,10 +175,13 @@
120 <b>[/help/diff | fossil diff]</b><br>
121 <b>[/help/timeline | fossil timeline]</b><br>
122 <b>[/help/ls | fossil ls]</b><br>
123 <b>[/help/branch | fossil branch]</b><br>
124 </blockquote>
 
 
 
125
126 <p>Note that Fossil allows you to make multiple check-outs in
127 separate directories from the same repository. This enables you,
128 for example, to do builds from multiple branches or versions at
129 the same time without having to generate extra clones.</p>
@@ -139,10 +197,104 @@
139 <p>[/help/update | update] honors the "autosync" option and
140 does a "soft" switch, merging any local changes into the target
141 version, whereas [/help/checkout | checkout] does not
142 automatically sync and does a "hard" switch, overwriting local
143 changes if told to do so.</p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
145 <h2 id="config">Configuring Your Local Repository</h2>
146
147 <p>When you create a new repository, either by cloning an existing
148 project or create a new project of your own, you usually want to do some
149
--- www/quickstart.wiki
+++ www/quickstart.wiki
@@ -1,26 +1,34 @@
1 <title>Fossil Quick Start Guide</title>
2 <h1 align="center">Fossil Quick Start</h1>
3
4 <p>This is a guide to help you get started using the Fossil [https://en.wikipedia.org/wiki/Distributed_version_control|Distributed Version Control System] quickly
5 and painlessly.</p>
6
7 <h2 id="install">Installing</h2>
8
9 <p>Fossil is a single self-contained C program. You need to
10 either download a
11 [https://www.fossil-scm.org/fossil/uv/download.html|precompiled
12 binary]
13 or <a href="build.wiki">compile it yourself</a> from sources.
14 Install Fossil by putting the fossil binary
15 someplace on your $PATH.</p>
16 You can test that Fossil is present and working like this:
17
18 <blockquote>
19 <b>
20 fossil version<br>
21 <tt>This is fossil version 2.13 [309af345ab] 2020-09-28 04:02:55 UTC</tt><br>
22 </b>
23 </blockquote>
24
25 <h2 id="workflow" name="fslclone">General Work Flow</h2>
26
27 <p>Fossil works with repository files (a database in a single file with the project's
28 complete history) and with checked-out local trees (the working directory
29 you use to do your work).
30 (See [./whyusefossil.wiki#definitions | definitions] for more background.)
31 The workflow looks like this:</p>
32
33 <ul>
34 <li>Create or clone a repository file. ([/help/init|fossil init] or
@@ -28,21 +36,28 @@
36 <li>Check out a local tree. ([/help/open | fossil open])
37 <li>Perform operations on the repository (including repository
38 configuration).
39 </ul>
40
41 Fossil can be entirely driven from the command line. Many features
42 can also be conveniently accessed from the build-in web interface.
43
44 <p>The following sections give a brief overview of these
45 operations.</p>
46
47 <h2 id="new">Starting A New Project</h2>
48
49 <p>To start a new project with fossil create a new empty repository
50 this way: ([/help/init | more info]) </p>
51
52 <blockquote>
53 <b>fossil init </b><i> repository-filename</i>
54 </blockquote>
55
56 You can name the database anything you like, and you can place it anywhere in the filesystem.
57 The <tt>.fossil</tt> extension is traditional but only required if you are going to use the
58 <tt>[./help?cmd=/server | fossil server DIRECTORY]</tt> feature.”
59
60 <h2 id="clone">Cloning An Existing Repository</h2>
61
62 <p>Most fossil operations interact with a repository that is on the
63 local disk drive, not on a remote system. Hence, before accessing
@@ -57,46 +72,72 @@
72 </blockquote>
73
74 <p>The <i>URL</i> specifies the fossil repository
75 you want to clone. The <i>repository-filename</i> is the new local
76 filename into which the cloned repository will be written. For
77 example, to clone the source code of Fossil itself:
78
79 <blockquote>
80 <b>fossil clone https://www.fossil-scm.org/ myclone.fossil</b>
81 </blockquote>
82
83 If your logged-in username is 'exampleuser', you should see output something like this:
84
85 <blockquote>
86 <b><tt>
87 Round-trips: 8 Artifacts sent: 0 received: 39421<br>
88 Clone done, sent: 2424 received: 42965725 ip: 10.10.10.0<br>
89 Rebuilding repository meta-data...<br>
90 100% complete...<br>
91 Extra delta compression... <br>
92 Vacuuming the database... <br>
93 project-id: 94259BB9F186226D80E49D1FA2DB29F935CCA0333<br>
94 server-id: 016595e9043054038a9ea9bc526d7f33f7ac0e42<br>
95 admin-user: exampleuser (password is "yoWgDR42iv")><br>
96 </tt></b>
97 </blockquote>
98
99 <p>If the remote repository requires a login, include a
100 userid in the URL like this:
101
102 <blockquote>
103 <b>fossil clone https://</b><i>remoteuserid</i><b>@www.example.org/ myclone.fossil</b>
104 </blockquote>
 
105
106 <p>You will be prompted separately for the password.
107 Use [https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters|"%HH"] escapes for special characters in the userid.
108 For example "/" would be replaced by "%2F" meaning that a userid of "Projects/Budget" would become "Projects%2FBudget") </p>
109
110 <p>If you are behind a restrictive firewall, you might need
111 to <a href="#proxy">specify an HTTP proxy</a>.</p>
112
113 <p>A Fossil repository is a single disk file. Instead of cloning,
114 you can just make a copy of the repository file (for example, using
115 "scp"). Note, however, that the repository file contains auxiliary
116 information above and beyond the versioned files, including some
117 sensitive information such as password hashes and email addresses. If you
118 want to share Fossil repositories directly by copying, consider running the
119 [/help/scrub|fossil scrub] command to remove sensitive information
120 before transmitting the file.
121
122 <h2 id="import">Importing From Another Version Control System</h2>
123
124 <p>Rather than start a new project, or clone an existing Fossil project,
125 you might prefer to
126 <a href="./inout.wiki">import an existing Git project</a>
127 into Fossil using the [/help/import | fossil import] command.
128
129 You can even decide to export your project back into git using the
130 [/help/git | fossil git] command, which is how the Fossil project maintains
131 [https://github.com/drhsqlite/fossil-mirror | its public GitHub mirror]. There
132 is no limit to the number of times a tree can be imported and exported between
133 Fossil and git.
134
135 The [https://git-scm.com/docs/git-fast-export|Git fast-export format] has become
136 a popular way to move files between version management systems, including from
137 [https://www.mercurial-scm.org/|Mercurial].
138 Fossil can also import [https://subversion.apache.org/|Subversion projects] directly.
139
140 <h2 id="checkout">Checking Out A Local Tree</h2>
141
142 <p>To work on a project in fossil, you need to check out a local
143 copy of the source tree. Create the directory you want to be
@@ -105,10 +146,24 @@
146
147 <blockquote>
148 <b>fossil open </b><i> repository-filename</i>
149 </blockquote>
150
151 for example:
152
153 <blockquote>
154 <b><tt>
155 fossil open ../myclone.fossil<br>
156 BUILD.txt<br>
157 COPYRIGHT-BSD2.txt<br>
158 README.md<br>
159 ︙<br>
160 </tt></b>
161 </blockquote>
162
163 (or "fossil open ..\myclone.fossil" on Windows).
164
165 <p>This leaves you with the newest version of the tree
166 checked out.
167 From anywhere underneath the root of your local tree, you
168 can type commands like the following to find out the status of
169 your local tree:</p>
@@ -120,10 +175,13 @@
175 <b>[/help/diff | fossil diff]</b><br>
176 <b>[/help/timeline | fossil timeline]</b><br>
177 <b>[/help/ls | fossil ls]</b><br>
178 <b>[/help/branch | fossil branch]</b><br>
179 </blockquote>
180
181 <p>If you created a new respository using "fossil init" some commands will not
182 produce much output.</p>
183
184 <p>Note that Fossil allows you to make multiple check-outs in
185 separate directories from the same repository. This enables you,
186 for example, to do builds from multiple branches or versions at
187 the same time without having to generate extra clones.</p>
@@ -139,10 +197,104 @@
197 <p>[/help/update | update] honors the "autosync" option and
198 does a "soft" switch, merging any local changes into the target
199 version, whereas [/help/checkout | checkout] does not
200 automatically sync and does a "hard" switch, overwriting local
201 changes if told to do so.</p>
202
203 <h2 id="changes">Making and Commiting Changes</h2>
204
205 <p>To add new files to your project or remove existing ones, use these
206 commands:</p>
207
208 <blockquote>
209 <b>[/help/add | fossil add]</b> <i>file...</i><br>
210 <b>[/help/rm | fossil rm]</b> <i>file...</i><br>
211 <b>[/help/addremove | fossil addremove]</b> <i>file...</i><br>
212 </blockquote>
213
214 <p>The command:</p>
215 <blockquote>
216 <b>
217 [/help/changes | fossil changes]</b>
218 </blockquote>
219 <p>lists files that have changed since the last commit to the repository. For
220 example, if you edit the file "README.md":</p>
221
222 <blockquote>
223 <b>
224 fossil changes<br>
225 EDITED README.md
226 </b>
227 </blockquote>
228
229 <p>To see exactly what change was made you can use the command</p>
230 [/help/diff | fossil diff]:
231 <blockquote>
232 <b>
233 fossil diff <br><tt>
234 Index: README.md<br>
235 ============================================================<br>
236 --- README.md<br>
237 +++ README.md<br>
238 @@ -1,5 +1,6 @@<br>
239 +Made some changes to the project<br>
240 # Original text<br>
241 </tt></b>
242 </blockquote>
243
244 <p>"fossil diff" is the difference between your tree on disk now and as the tree was
245 when you did "fossil open". An open is the first checkout from a repository
246 into a new directory. </p>
247
248 <p>To commit your changes to a local-only repository:</p>
249 <blockquote>
250 <b>
251 fossil commit </b><i>(... Fossil will start your editor, if defined)</i><b><br><tt>
252 # Enter a commit message for this check-in. Lines beginning with # are ignored.<br>
253 #<br>
254 # user: exampleuser<br>
255 # tags: trunk<br>
256 #<br>
257 # EDITED README.md<br>
258 Edited file to add description of code changes<br>
259 New_Version: 7b9a416ced4a69a60589dde1aedd1a30fde8eec3528d265dbeed5135530440ab<br>
260 </tt></b>
261 </blockquote>
262
263 <p>You will be prompted for check-in comments using whatever editor
264 is specified by your VISUAL or EDITOR environment variable. If none is
265 specified Fossil uses line-editing in the terminal.</p>
266
267 <p>To commit your changes to a repository that was cloned from remote you
268 perform the same actions but the results are different. Fossil
269 defaults to 'autosync' mode, a single-stage commit that sends all changes
270 committed to the local repository immediately on to the remote parent repository. This
271 only works if you have write permission to the remote respository.</p>
272
273 <h2 id="naming">Naming of Files, Checkins, and Branches</h2>
274
275 <p>Fossil deals with information artifacts. This Quickstart document only deals
276 with files and collections of files, but be aware there are also tickets, wiki pages and more.
277 Every artifact in Fossil has a universally-unique hash id, and may also have a
278 human-readable name.</p>
279
280 <p>The following are all equivalent ways of identifying a Fossil file,
281 checkin or branch artifact:</p>
282
283 <ul>
284 <li> the full unique SHA-256 hash, such as be836de35a821523beac2e53168e135d5ebd725d7af421e5f736a28e8034673a
285 <li> an abbreviated hash prefix, such as the first ten characters: be836de35a . This won't be universally unique, but it is usually unique within any one repository. As an example, the [https://fossil-scm.org/home/hash-collisions|Fossil project hash collisions] showed at the time of writing that there are no artifacts with identical first 8 characters
286 <li> a branch name, such as "special-features" or "juliet-testing". Each branch also has a unique SHA-256 hash
287 </ul>
288
289 <p>A special convenience branch is "trunk", which is Fossil's default branch name for
290 the first checkin, and the default for any time a branch name is needed but not
291 specified.</p>
292
293 This will get you started on identifying checkins. The
294 <a href="./checkin_names.wiki">Checkin Names document</a> is a complete reference, including
295 how timestamps can also be used.
296
297 <h2 id="config">Configuring Your Local Repository</h2>
298
299 <p>When you create a new repository, either by cloning an existing
300 project or create a new project of your own, you usually want to do some
301
--- www/shunning.wiki
+++ www/shunning.wiki
@@ -32,12 +32,12 @@
3232
some alternatives that usually suffice, which don't damage the project's
3333
fossil record:
3434
3535
<ul>
3636
<li><p>When a forum post or wiki article is "deleted," what actually
37
- happens is that a new empty version is added to the Fossil
38
- [./blockchain.md | block chain]. The web interface interprets this
37
+ happens is that a new empty version is added to the Fossil repository.
38
+ The web interface interprets this
3939
as "deleted," but the prior version remains available if you go
4040
digging for it.</p></li>
4141
4242
<li><p>When you close a ticket, it's marked in a way that causes it
4343
to not show up in the normal ticket reports. You usually want to
4444
--- www/shunning.wiki
+++ www/shunning.wiki
@@ -32,12 +32,12 @@
32 some alternatives that usually suffice, which don't damage the project's
33 fossil record:
34
35 <ul>
36 <li><p>When a forum post or wiki article is "deleted," what actually
37 happens is that a new empty version is added to the Fossil
38 [./blockchain.md | block chain]. The web interface interprets this
39 as "deleted," but the prior version remains available if you go
40 digging for it.</p></li>
41
42 <li><p>When you close a ticket, it's marked in a way that causes it
43 to not show up in the normal ticket reports. You usually want to
44
--- www/shunning.wiki
+++ www/shunning.wiki
@@ -32,12 +32,12 @@
32 some alternatives that usually suffice, which don't damage the project's
33 fossil record:
34
35 <ul>
36 <li><p>When a forum post or wiki article is "deleted," what actually
37 happens is that a new empty version is added to the Fossil repository.
38 The web interface interprets this
39 as "deleted," but the prior version remains available if you go
40 digging for it.</p></li>
41
42 <li><p>When you close a ticket, it's marked in a way that causes it
43 to not show up in the normal ticket reports. You usually want to
44
--- www/whyusefossil.wiki
+++ www/whyusefossil.wiki
@@ -187,11 +187,11 @@
187187
<li><p>Fossil (and other distributed VCSes like Git and Mercurial, but
188188
not Subversion) represent
189189
the history of a project as a directed acyclic graph (DAG).
190190
<ul>
191191
<li><p>Each check-in is a node in the graph
192
- <li><p>If check-in X is derived from check-in Y then there is
192
+ <li><p>If check-in Y is derived from check-in X then there is
193193
an arc in the graph from node X to node Y.
194194
<li><p>The older check-in (X) is call the "parent" and the newer
195195
check-in (Y) is the "child". The child is derived from
196196
the parent.
197197
</ul>
198198
--- www/whyusefossil.wiki
+++ www/whyusefossil.wiki
@@ -187,11 +187,11 @@
187 <li><p>Fossil (and other distributed VCSes like Git and Mercurial, but
188 not Subversion) represent
189 the history of a project as a directed acyclic graph (DAG).
190 <ul>
191 <li><p>Each check-in is a node in the graph
192 <li><p>If check-in X is derived from check-in Y then there is
193 an arc in the graph from node X to node Y.
194 <li><p>The older check-in (X) is call the "parent" and the newer
195 check-in (Y) is the "child". The child is derived from
196 the parent.
197 </ul>
198
--- www/whyusefossil.wiki
+++ www/whyusefossil.wiki
@@ -187,11 +187,11 @@
187 <li><p>Fossil (and other distributed VCSes like Git and Mercurial, but
188 not Subversion) represent
189 the history of a project as a directed acyclic graph (DAG).
190 <ul>
191 <li><p>Each check-in is a node in the graph
192 <li><p>If check-in Y is derived from check-in X then there is
193 an arc in the graph from node X to node Y.
194 <li><p>The older check-in (X) is call the "parent" and the newer
195 check-in (Y) is the "child". The child is derived from
196 the parent.
197 </ul>
198

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button