Fossil SCM

merge trunk

jan.nijtmans 2012-11-09 07:47 convert_before_commit merge
Commit 49f73889b34596887a4b082201cb50027ace56b7
+1 -1
--- src/attach.c
+++ src/attach.c
@@ -525,11 +525,11 @@
525525
@ </pre>
526526
}
527527
}else if( strncmp(zMime, "image/", 6)==0 ){
528528
@ <img src="%R/raw?name=%s(zSrc)&m=%s(zMime)"></img>
529529
}else{
530
- int sz = db_int(0, "SELECT sz FROM blob WHERE rid=%d", ridSrc);
530
+ int sz = db_int(0, "SELECT size FROM blob WHERE rid=%d", ridSrc);
531531
@ <i>(file is %d(sz) bytes of binary data)</i>
532532
}
533533
@ </blockquote>
534534
manifest_destroy(pAttach);
535535
blob_reset(&attach);
536536
--- src/attach.c
+++ src/attach.c
@@ -525,11 +525,11 @@
525 @ </pre>
526 }
527 }else if( strncmp(zMime, "image/", 6)==0 ){
528 @ <img src="%R/raw?name=%s(zSrc)&m=%s(zMime)"></img>
529 }else{
530 int sz = db_int(0, "SELECT sz FROM blob WHERE rid=%d", ridSrc);
531 @ <i>(file is %d(sz) bytes of binary data)</i>
532 }
533 @ </blockquote>
534 manifest_destroy(pAttach);
535 blob_reset(&attach);
536
--- src/attach.c
+++ src/attach.c
@@ -525,11 +525,11 @@
525 @ </pre>
526 }
527 }else if( strncmp(zMime, "image/", 6)==0 ){
528 @ <img src="%R/raw?name=%s(zSrc)&m=%s(zMime)"></img>
529 }else{
530 int sz = db_int(0, "SELECT size FROM blob WHERE rid=%d", ridSrc);
531 @ <i>(file is %d(sz) bytes of binary data)</i>
532 }
533 @ </blockquote>
534 manifest_destroy(pAttach);
535 blob_reset(&attach);
536
+9
--- src/diff.c
+++ src/diff.c
@@ -989,10 +989,19 @@
989989
}
990990
if( nRight==0 ){
991991
memset(aM, 1, nLeft);
992992
return aM;
993993
}
994
+
995
+ /* This algorithm is O(N**2). So if N is too big, bail out with a
996
+ ** simple (but stupid and ugly) result that doesn't take too long. */
997
+ if( nLeft*nRight>100000 ){
998
+ memset(aM, 3, nRight);
999
+ memset(aM+nRight, 1, nLeft);
1000
+ return aM;
1001
+ }
1002
+
9941003
if( nRight < (sizeof(aBuf)/sizeof(aBuf[0]))-1 ){
9951004
pToFree = 0;
9961005
a = aBuf;
9971006
}else{
9981007
a = pToFree = fossil_malloc( sizeof(a[0])*(nRight+1) );
9991008
--- src/diff.c
+++ src/diff.c
@@ -989,10 +989,19 @@
989 }
990 if( nRight==0 ){
991 memset(aM, 1, nLeft);
992 return aM;
993 }
 
 
 
 
 
 
 
 
 
994 if( nRight < (sizeof(aBuf)/sizeof(aBuf[0]))-1 ){
995 pToFree = 0;
996 a = aBuf;
997 }else{
998 a = pToFree = fossil_malloc( sizeof(a[0])*(nRight+1) );
999
--- src/diff.c
+++ src/diff.c
@@ -989,10 +989,19 @@
989 }
990 if( nRight==0 ){
991 memset(aM, 1, nLeft);
992 return aM;
993 }
994
995 /* This algorithm is O(N**2). So if N is too big, bail out with a
996 ** simple (but stupid and ugly) result that doesn't take too long. */
997 if( nLeft*nRight>100000 ){
998 memset(aM, 3, nRight);
999 memset(aM+nRight, 1, nLeft);
1000 return aM;
1001 }
1002
1003 if( nRight < (sizeof(aBuf)/sizeof(aBuf[0]))-1 ){
1004 pToFree = 0;
1005 a = aBuf;
1006 }else{
1007 a = pToFree = fossil_malloc( sizeof(a[0])*(nRight+1) );
1008
+1 -1
--- src/finfo.c
+++ src/finfo.c
@@ -357,11 +357,11 @@
357357
hyperlink_to_user(zUser, zDate, "");
358358
@ branch: %h(zBr))
359359
if( g.perm.Hyperlink && zUuid ){
360360
const char *z = zFilename;
361361
if( fpid ){
362
- @ %z(href("%R/fdiff?v1=%s&v2=%s",zPUuid,zUuid))[diff]</a>
362
+ @ %z(href("%R/fdiff?v1=%S&v2=%S",zPUuid,zUuid))[diff]</a>
363363
}
364364
@ %z(href("%R/annotate?checkin=%S&filename=%h",zCkin,z))
365365
@ [annotate]</a>
366366
}
367367
@ </td></tr>
368368
--- src/finfo.c
+++ src/finfo.c
@@ -357,11 +357,11 @@
357 hyperlink_to_user(zUser, zDate, "");
358 @ branch: %h(zBr))
359 if( g.perm.Hyperlink && zUuid ){
360 const char *z = zFilename;
361 if( fpid ){
362 @ %z(href("%R/fdiff?v1=%s&v2=%s",zPUuid,zUuid))[diff]</a>
363 }
364 @ %z(href("%R/annotate?checkin=%S&filename=%h",zCkin,z))
365 @ [annotate]</a>
366 }
367 @ </td></tr>
368
--- src/finfo.c
+++ src/finfo.c
@@ -357,11 +357,11 @@
357 hyperlink_to_user(zUser, zDate, "");
358 @ branch: %h(zBr))
359 if( g.perm.Hyperlink && zUuid ){
360 const char *z = zFilename;
361 if( fpid ){
362 @ %z(href("%R/fdiff?v1=%S&v2=%S",zPUuid,zUuid))[diff]</a>
363 }
364 @ %z(href("%R/annotate?checkin=%S&filename=%h",zCkin,z))
365 @ [annotate]</a>
366 }
367 @ </td></tr>
368
+3 -3
--- src/info.c
+++ src/info.c
@@ -617,16 +617,16 @@
617617
@ </td></tr>
618618
}
619619
620620
/* The Download: line */
621621
if( g.perm.Zip ){
622
- char *zUrl = mprintf("%R/tarball/%s-%S.tar.gz?uuid=%s",
622
+ char *zUrl = mprintf("%R/tarball/%t-%S.tar.gz?uuid=%s",
623623
zProjName, zUuid, zUuid);
624624
@ </td></tr>
625625
@ <tr><th>Downloads:</th><td>
626626
@ %z(href("%s",zUrl))Tarball</a>
627
- @ | %z(href("%R/zip/%s-%S.zip?uuid=%s",zProjName,zUuid,zUuid))
627
+ @ | %z(href("%R/zip/%t-%S.zip?uuid=%s",zProjName,zUuid,zUuid))
628628
@ ZIP archive</a>
629629
fossil_free(zUrl);
630630
}
631631
@ </td></tr>
632632
@ <tr><th>Other&nbsp;Links:</th>
@@ -885,11 +885,11 @@
885885
blob_zero(&links);
886886
while( z && z[0] ){
887887
for(i=0; z[i] && (z[i]!=',' || z[i+1]!=' '); i++){}
888888
blob_appendf(&links,
889889
"%z%#h</a>%.2s",
890
- href("%R/timeline?r=%#t&nd&c=%s",i,z,zDate), i,z, &z[i]
890
+ href("%R/timeline?r=%#t&nd&c=%t",i,z,zDate), i,z, &z[i]
891891
);
892892
if( z[i]==0 ) break;
893893
z += i+2;
894894
}
895895
@ tags: %s(blob_str(&links)),
896896
897897
ADDED test/many-www.tcl
--- src/info.c
+++ src/info.c
@@ -617,16 +617,16 @@
617 @ </td></tr>
618 }
619
620 /* The Download: line */
621 if( g.perm.Zip ){
622 char *zUrl = mprintf("%R/tarball/%s-%S.tar.gz?uuid=%s",
623 zProjName, zUuid, zUuid);
624 @ </td></tr>
625 @ <tr><th>Downloads:</th><td>
626 @ %z(href("%s",zUrl))Tarball</a>
627 @ | %z(href("%R/zip/%s-%S.zip?uuid=%s",zProjName,zUuid,zUuid))
628 @ ZIP archive</a>
629 fossil_free(zUrl);
630 }
631 @ </td></tr>
632 @ <tr><th>Other&nbsp;Links:</th>
@@ -885,11 +885,11 @@
885 blob_zero(&links);
886 while( z && z[0] ){
887 for(i=0; z[i] && (z[i]!=',' || z[i+1]!=' '); i++){}
888 blob_appendf(&links,
889 "%z%#h</a>%.2s",
890 href("%R/timeline?r=%#t&nd&c=%s",i,z,zDate), i,z, &z[i]
891 );
892 if( z[i]==0 ) break;
893 z += i+2;
894 }
895 @ tags: %s(blob_str(&links)),
896
897 DDED test/many-www.tcl
--- src/info.c
+++ src/info.c
@@ -617,16 +617,16 @@
617 @ </td></tr>
618 }
619
620 /* The Download: line */
621 if( g.perm.Zip ){
622 char *zUrl = mprintf("%R/tarball/%t-%S.tar.gz?uuid=%s",
623 zProjName, zUuid, zUuid);
624 @ </td></tr>
625 @ <tr><th>Downloads:</th><td>
626 @ %z(href("%s",zUrl))Tarball</a>
627 @ | %z(href("%R/zip/%t-%S.zip?uuid=%s",zProjName,zUuid,zUuid))
628 @ ZIP archive</a>
629 fossil_free(zUrl);
630 }
631 @ </td></tr>
632 @ <tr><th>Other&nbsp;Links:</th>
@@ -885,11 +885,11 @@
885 blob_zero(&links);
886 while( z && z[0] ){
887 for(i=0; z[i] && (z[i]!=',' || z[i+1]!=' '); i++){}
888 blob_appendf(&links,
889 "%z%#h</a>%.2s",
890 href("%R/timeline?r=%#t&nd&c=%t",i,z,zDate), i,z, &z[i]
891 );
892 if( z[i]==0 ) break;
893 z += i+2;
894 }
895 @ tags: %s(blob_str(&links)),
896
897 DDED test/many-www.tcl
--- a/test/many-www.tcl
+++ b/test/many-www.tcl
@@ -0,0 +1,78 @@
1
+#!/usr/bin/tclsh
2
+#
3
+# Run this script from within any open Fossil checkout. Example:
4
+#
5
+# tclsh many-www.tcl | tee out.txt
6
+#
7
+# About 10,000 different web page requests will be made. Each is timed
8
+# and the time shown on output. Use this script to search for segfault problems
9
+# or to look for pages that need optimization.
10
+#
11
+proc run_query {url} {
12
+ set fd [open q.txt w]
13
+ puts $fd "GET $url HTTP/1.0\r\n\r"
14
+ close $fd
15
+ return [exec fossil test-http <q.txt]
16
+}
17
+set todo {}
18
+foreach url {
19
+ /home
20
+ /timeline
21
+ /brlist
22
+ /taglist
23
+ /reportlist
24
+ /setup
25
+ /dir
26
+ /wcontent
27
+ /attachlist
28
+ /taglist
29
+ /test_env
30
+ /stat
31
+ /rcvfromlist
32
+ /urllist
33
+ /modreq
34
+ /info/d5c4
35
+ /test-all-help
36
+ /leaves
37
+ /timeline?a=1970-01-01
38
+} {
39
+ set seen($url) 1
40
+ set pending($url) 1
41
+}
42
+set round 1
43
+set limit 25000
44
+set npending [llength [array names pending]]
45
+proc get_pending {} {
46
+ global pending npending round next
47
+ if {$npending==0} {
48
+ incr round
49
+ array set pending [array get next]
50
+ set npending [llength [array names pending]]
51
+ unset -nocomplain next
52
+ }
53
+ set res [lindex [array names pending] [expr {int(rand()*$npending)}]]
54
+ unset pending($res)
55
+ incr npending -1
56
+ return $res
57
+}
58
+for {set i 0} {$i<$limit} {incr i} {
59
+ set url [get_pending]
60
+ puts -nonewline "($round/[expr {$i+1}]) $url "
61
+ flush stdout
62
+ set tm [time {set x [run_query $url]}]
63
+ set ms [lindex $tm 0]
64
+ puts [format {%.3fs} [expr {$ms/1000000.0}]]
65
+ flush stdout
66
+ if {[string length $x]>1000000} {
67
+ set x [string range $x 0 1000000]
68
+ }
69
+ set k 0
70
+ while {[regexp {<[aA] .*?href="(/[a-z].*?)".*?>(.*)$} $x all url tail]} {
71
+ # if {$npending>2*($limit - $i)} break
72
+ incr k
73
+ if {$k>100} break
74
+ set u2 [string map {&lt; < &gt; > &quot; \" &amp; &} $url]
75
+ if {![info exists seen($u2)]} {
76
+ set next($u2) 1
77
+ set seen($u2) 1
78
+
--- a/test/many-www.tcl
+++ b/test/many-www.tcl
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
--- a/test/many-www.tcl
+++ b/test/many-www.tcl
@@ -0,0 +1,78 @@
1 #!/usr/bin/tclsh
2 #
3 # Run this script from within any open Fossil checkout. Example:
4 #
5 # tclsh many-www.tcl | tee out.txt
6 #
7 # About 10,000 different web page requests will be made. Each is timed
8 # and the time shown on output. Use this script to search for segfault problems
9 # or to look for pages that need optimization.
10 #
11 proc run_query {url} {
12 set fd [open q.txt w]
13 puts $fd "GET $url HTTP/1.0\r\n\r"
14 close $fd
15 return [exec fossil test-http <q.txt]
16 }
17 set todo {}
18 foreach url {
19 /home
20 /timeline
21 /brlist
22 /taglist
23 /reportlist
24 /setup
25 /dir
26 /wcontent
27 /attachlist
28 /taglist
29 /test_env
30 /stat
31 /rcvfromlist
32 /urllist
33 /modreq
34 /info/d5c4
35 /test-all-help
36 /leaves
37 /timeline?a=1970-01-01
38 } {
39 set seen($url) 1
40 set pending($url) 1
41 }
42 set round 1
43 set limit 25000
44 set npending [llength [array names pending]]
45 proc get_pending {} {
46 global pending npending round next
47 if {$npending==0} {
48 incr round
49 array set pending [array get next]
50 set npending [llength [array names pending]]
51 unset -nocomplain next
52 }
53 set res [lindex [array names pending] [expr {int(rand()*$npending)}]]
54 unset pending($res)
55 incr npending -1
56 return $res
57 }
58 for {set i 0} {$i<$limit} {incr i} {
59 set url [get_pending]
60 puts -nonewline "($round/[expr {$i+1}]) $url "
61 flush stdout
62 set tm [time {set x [run_query $url]}]
63 set ms [lindex $tm 0]
64 puts [format {%.3fs} [expr {$ms/1000000.0}]]
65 flush stdout
66 if {[string length $x]>1000000} {
67 set x [string range $x 0 1000000]
68 }
69 set k 0
70 while {[regexp {<[aA] .*?href="(/[a-z].*?)".*?>(.*)$} $x all url tail]} {
71 # if {$npending>2*($limit - $i)} break
72 incr k
73 if {$k>100} break
74 set u2 [string map {&lt; < &gt; > &quot; \" &amp; &} $url]
75 if {![info exists seen($u2)]} {
76 set next($u2) 1
77 set seen($u2) 1
78
--- test/valgrind-www.tcl
+++ test/valgrind-www.tcl
@@ -23,22 +23,33 @@
2323
/brlist
2424
/taglist
2525
/reportlist
2626
/setup
2727
/dir
28
+ /wcontent
2829
} {
2930
set seen($url) 1
30
- lappend todo $url
31
+ set pending($url) 1
32
+}
33
+set limit 1000
34
+set npending [llength [array names pending]]
35
+proc get_pending {} {
36
+ global pending npending
37
+ set res [lindex [array names pending] [expr {int(rand()*$npending)}]]
38
+ unset pending($res)
39
+ incr npending -1
40
+ return $res
3141
}
32
-for {set i 0} {$i<[llength $todo] && $i<1000} {incr i} {
33
- set url [lindex $todo $i]
42
+for {set i 0} {$npending>0 && $i<$limit} {incr i} {
43
+ set url [get_pending]
3444
puts "====== ([expr {$i+1}]) $url ======"
3545
set x [run_query $url]
3646
while {[regexp {<[aA] .*?href="(/[a-z].*?)".*?>(.*)$} $x all url tail]} {
3747
set u2 [string map {&lt; < &gt; > &quot; \" &amp; &} $url]
3848
if {![info exists seen($u2)]} {
39
- lappend todo $u2
49
+ set pending($u2) 1
4050
set seen($u2) 1
51
+ incr npending
4152
}
4253
set x $tail
4354
}
4455
}
4556
--- test/valgrind-www.tcl
+++ test/valgrind-www.tcl
@@ -23,22 +23,33 @@
23 /brlist
24 /taglist
25 /reportlist
26 /setup
27 /dir
 
28 } {
29 set seen($url) 1
30 lappend todo $url
 
 
 
 
 
 
 
 
 
31 }
32 for {set i 0} {$i<[llength $todo] && $i<1000} {incr i} {
33 set url [lindex $todo $i]
34 puts "====== ([expr {$i+1}]) $url ======"
35 set x [run_query $url]
36 while {[regexp {<[aA] .*?href="(/[a-z].*?)".*?>(.*)$} $x all url tail]} {
37 set u2 [string map {&lt; < &gt; > &quot; \" &amp; &} $url]
38 if {![info exists seen($u2)]} {
39 lappend todo $u2
40 set seen($u2) 1
 
41 }
42 set x $tail
43 }
44 }
45
--- test/valgrind-www.tcl
+++ test/valgrind-www.tcl
@@ -23,22 +23,33 @@
23 /brlist
24 /taglist
25 /reportlist
26 /setup
27 /dir
28 /wcontent
29 } {
30 set seen($url) 1
31 set pending($url) 1
32 }
33 set limit 1000
34 set npending [llength [array names pending]]
35 proc get_pending {} {
36 global pending npending
37 set res [lindex [array names pending] [expr {int(rand()*$npending)}]]
38 unset pending($res)
39 incr npending -1
40 return $res
41 }
42 for {set i 0} {$npending>0 && $i<$limit} {incr i} {
43 set url [get_pending]
44 puts "====== ([expr {$i+1}]) $url ======"
45 set x [run_query $url]
46 while {[regexp {<[aA] .*?href="(/[a-z].*?)".*?>(.*)$} $x all url tail]} {
47 set u2 [string map {&lt; < &gt; > &quot; \" &amp; &} $url]
48 if {![info exists seen($u2)]} {
49 set pending($u2) 1
50 set seen($u2) 1
51 incr npending
52 }
53 set x $tail
54 }
55 }
56

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button