Fossil SCM

fossil-scm / compat / zlib / contrib / gcc_gvmat64 / gvmat64.S
Blame History Raw 571 lines
1
/*
2
;uInt longest_match_x64(
3
; deflate_state *s,
4
; IPos cur_match); // current match
5
6
; gvmat64.S -- Asm portion of the optimized longest_match for 32 bits x86_64
7
; (AMD64 on Athlon 64, Opteron, Phenom
8
; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7)
9
; this file is translation from gvmat64.asm to GCC 4.x (for Linux, Mac XCode)
10
; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant.
11
;
12
; File written by Gilles Vollant, by converting to assembly the longest_match
13
; from Jean-loup Gailly in deflate.c of zLib and infoZip zip.
14
; and by taking inspiration on asm686 with masm, optimised assembly code
15
; from Brian Raiter, written 1998
16
;
17
; This software is provided 'as-is', without any express or implied
18
; warranty. In no event will the authors be held liable for any damages
19
; arising from the use of this software.
20
;
21
; Permission is granted to anyone to use this software for any purpose,
22
; including commercial applications, and to alter it and redistribute it
23
; freely, subject to the following restrictions:
24
;
25
; 1. The origin of this software must not be misrepresented; you must not
26
; claim that you wrote the original software. If you use this software
27
; in a product, an acknowledgment in the product documentation would be
28
; appreciated but is not required.
29
; 2. Altered source versions must be plainly marked as such, and must not be
30
; misrepresented as being the original software
31
; 3. This notice may not be removed or altered from any source distribution.
32
;
33
; https://www.zlib.net
34
; https://www.muppetlabs.com/~breadbox/software/assembly.html
35
;
36
; to compile this file for zLib, I use option:
37
; gcc -c -arch x86_64 gvmat64.S
38
39
40
;uInt longest_match(s, cur_match)
41
; deflate_state *s;
42
; IPos cur_match; // current match /
43
;
44
; with XCode for Mac, I had strange error with some jump on intel syntax
45
; this is why BEFORE_JMP and AFTER_JMP are used
46
*/
47
48
49
#define BEFORE_JMP .att_syntax
50
#define AFTER_JMP .intel_syntax noprefix
51
52
#ifndef NO_UNDERLINE
53
# define match_init _match_init
54
# define longest_match _longest_match
55
#endif
56
57
.intel_syntax noprefix
58
59
.globl match_init, longest_match
60
.text
61
longest_match:
62
63
64
65
#define LocalVarsSize 96
66
/*
67
; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12
68
; free register : r14,r15
69
; register can be saved : rsp
70
*/
71
72
#define chainlenwmask (rsp + 8 - LocalVarsSize)
73
#define nicematch (rsp + 16 - LocalVarsSize)
74
75
#define save_rdi (rsp + 24 - LocalVarsSize)
76
#define save_rsi (rsp + 32 - LocalVarsSize)
77
#define save_rbx (rsp + 40 - LocalVarsSize)
78
#define save_rbp (rsp + 48 - LocalVarsSize)
79
#define save_r12 (rsp + 56 - LocalVarsSize)
80
#define save_r13 (rsp + 64 - LocalVarsSize)
81
#define save_r14 (rsp + 72 - LocalVarsSize)
82
#define save_r15 (rsp + 80 - LocalVarsSize)
83
84
85
/*
86
; all the +4 offsets are due to the addition of pending_buf_size (in zlib
87
; in the deflate_state structure since the asm code was first written
88
; (if you compile with zlib 1.0.4 or older, remove the +4).
89
; Note : these value are good with a 8 bytes boundary pack structure
90
*/
91
92
#define MAX_MATCH 258
93
#define MIN_MATCH 3
94
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
95
96
/*
97
;;; Offsets for fields in the deflate_state structure. These numbers
98
;;; are calculated from the definition of deflate_state, with the
99
;;; assumption that the compiler will dword-align the fields. (Thus,
100
;;; changing the definition of deflate_state could easily cause this
101
;;; program to crash horribly, without so much as a warning at
102
;;; compile time. Sigh.)
103
104
; all the +zlib1222add offsets are due to the addition of fields
105
; in zlib in the deflate_state structure since the asm code was first written
106
; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
107
; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
108
; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
109
*/
110
111
112
113
/* you can check the structure offset by running
114
115
#include <stdlib.h>
116
#include <stdio.h>
117
#include "deflate.h"
118
119
void print_depl()
120
{
121
deflate_state ds;
122
deflate_state *s=&ds;
123
printf("size pointer=%u\n",(int)sizeof(void*));
124
125
printf("#define dsWSize %u\n",(int)(((char*)&(s->w_size))-((char*)s)));
126
printf("#define dsWMask %u\n",(int)(((char*)&(s->w_mask))-((char*)s)));
127
printf("#define dsWindow %u\n",(int)(((char*)&(s->window))-((char*)s)));
128
printf("#define dsPrev %u\n",(int)(((char*)&(s->prev))-((char*)s)));
129
printf("#define dsMatchLen %u\n",(int)(((char*)&(s->match_length))-((char*)s)));
130
printf("#define dsPrevMatch %u\n",(int)(((char*)&(s->prev_match))-((char*)s)));
131
printf("#define dsStrStart %u\n",(int)(((char*)&(s->strstart))-((char*)s)));
132
printf("#define dsMatchStart %u\n",(int)(((char*)&(s->match_start))-((char*)s)));
133
printf("#define dsLookahead %u\n",(int)(((char*)&(s->lookahead))-((char*)s)));
134
printf("#define dsPrevLen %u\n",(int)(((char*)&(s->prev_length))-((char*)s)));
135
printf("#define dsMaxChainLen %u\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
136
printf("#define dsGoodMatch %u\n",(int)(((char*)&(s->good_match))-((char*)s)));
137
printf("#define dsNiceMatch %u\n",(int)(((char*)&(s->nice_match))-((char*)s)));
138
}
139
*/
140
141
#define dsWSize 68
142
#define dsWMask 76
143
#define dsWindow 80
144
#define dsPrev 96
145
#define dsMatchLen 144
146
#define dsPrevMatch 148
147
#define dsStrStart 156
148
#define dsMatchStart 160
149
#define dsLookahead 164
150
#define dsPrevLen 168
151
#define dsMaxChainLen 172
152
#define dsGoodMatch 188
153
#define dsNiceMatch 192
154
155
#define window_size [ rcx + dsWSize]
156
#define WMask [ rcx + dsWMask]
157
#define window_ad [ rcx + dsWindow]
158
#define prev_ad [ rcx + dsPrev]
159
#define strstart [ rcx + dsStrStart]
160
#define match_start [ rcx + dsMatchStart]
161
#define Lookahead [ rcx + dsLookahead] //; 0ffffffffh on infozip
162
#define prev_length [ rcx + dsPrevLen]
163
#define max_chain_length [ rcx + dsMaxChainLen]
164
#define good_match [ rcx + dsGoodMatch]
165
#define nice_match [ rcx + dsNiceMatch]
166
167
/*
168
; windows:
169
; parameter 1 in rcx(deflate state s), param 2 in rdx (cur match)
170
171
; All registers must be preserved across the call, except for
172
; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch.
173
174
;
175
; gcc on macosx-linux:
176
; see https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf
177
; param 1 in rdi, param 2 in rsi
178
; rbx, rsp, rbp, r12 to r15 must be preserved
179
180
;;; Save registers that the compiler may be using, and adjust esp to
181
;;; make room for our stack frame.
182
183
184
;;; Retrieve the function arguments. r8d will hold cur_match
185
;;; throughout the entire function. edx will hold the pointer to the
186
;;; deflate_state structure during the function's setup (before
187
;;; entering the main loop.
188
189
; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match)
190
; mac: param 1 in rdi, param 2 rsi
191
; this clear high 32 bits of r8, which can be garbage in both r8 and rdx
192
*/
193
mov [save_rbx],rbx
194
mov [save_rbp],rbp
195
196
197
mov rcx,rdi
198
199
mov r8d,esi
200
201
202
mov [save_r12],r12
203
mov [save_r13],r13
204
mov [save_r14],r14
205
mov [save_r15],r15
206
207
208
//;;; uInt wmask = s->w_mask;
209
//;;; unsigned chain_length = s->max_chain_length;
210
//;;; if (s->prev_length >= s->good_match) {
211
//;;; chain_length >>= 2;
212
//;;; }
213
214
215
mov edi, prev_length
216
mov esi, good_match
217
mov eax, WMask
218
mov ebx, max_chain_length
219
cmp edi, esi
220
jl LastMatchGood
221
shr ebx, 2
222
LastMatchGood:
223
224
//;;; chainlen is decremented once beforehand so that the function can
225
//;;; use the sign flag instead of the zero flag for the exit test.
226
//;;; It is then shifted into the high word, to make room for the wmask
227
//;;; value, which it will always accompany.
228
229
dec ebx
230
shl ebx, 16
231
or ebx, eax
232
233
//;;; on zlib only
234
//;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
235
236
237
238
mov eax, nice_match
239
mov [chainlenwmask], ebx
240
mov r10d, Lookahead
241
cmp r10d, eax
242
cmovnl r10d, eax
243
mov [nicematch],r10d
244
245
246
247
//;;; register Bytef *scan = s->window + s->strstart;
248
mov r10, window_ad
249
mov ebp, strstart
250
lea r13, [r10 + rbp]
251
252
//;;; Determine how many bytes the scan ptr is off from being
253
//;;; dword-aligned.
254
255
mov r9,r13
256
neg r13
257
and r13,3
258
259
//;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
260
//;;; s->strstart - (IPos)MAX_DIST(s) : NIL;
261
262
263
mov eax, window_size
264
sub eax, MIN_LOOKAHEAD
265
266
267
xor edi,edi
268
sub ebp, eax
269
270
mov r11d, prev_length
271
272
cmovng ebp,edi
273
274
//;;; int best_len = s->prev_length;
275
276
277
//;;; Store the sum of s->window + best_len in esi locally, and in esi.
278
279
lea rsi,[r10+r11]
280
281
//;;; register ush scan_start = *(ushf*)scan;
282
//;;; register ush scan_end = *(ushf*)(scan+best_len-1);
283
//;;; Posf *prev = s->prev;
284
285
movzx r12d,word ptr [r9]
286
movzx ebx, word ptr [r9 + r11 - 1]
287
288
mov rdi, prev_ad
289
290
//;;; Jump into the main loop.
291
292
mov edx, [chainlenwmask]
293
294
cmp bx,word ptr [rsi + r8 - 1]
295
jz LookupLoopIsZero
296
297
298
299
LookupLoop1:
300
and r8d, edx
301
302
movzx r8d, word ptr [rdi + r8*2]
303
cmp r8d, ebp
304
jbe LeaveNow
305
306
307
308
sub edx, 0x00010000
309
BEFORE_JMP
310
js LeaveNow
311
AFTER_JMP
312
313
LoopEntry1:
314
cmp bx,word ptr [rsi + r8 - 1]
315
BEFORE_JMP
316
jz LookupLoopIsZero
317
AFTER_JMP
318
319
LookupLoop2:
320
and r8d, edx
321
322
movzx r8d, word ptr [rdi + r8*2]
323
cmp r8d, ebp
324
BEFORE_JMP
325
jbe LeaveNow
326
AFTER_JMP
327
sub edx, 0x00010000
328
BEFORE_JMP
329
js LeaveNow
330
AFTER_JMP
331
332
LoopEntry2:
333
cmp bx,word ptr [rsi + r8 - 1]
334
BEFORE_JMP
335
jz LookupLoopIsZero
336
AFTER_JMP
337
338
LookupLoop4:
339
and r8d, edx
340
341
movzx r8d, word ptr [rdi + r8*2]
342
cmp r8d, ebp
343
BEFORE_JMP
344
jbe LeaveNow
345
AFTER_JMP
346
sub edx, 0x00010000
347
BEFORE_JMP
348
js LeaveNow
349
AFTER_JMP
350
351
LoopEntry4:
352
353
cmp bx,word ptr [rsi + r8 - 1]
354
BEFORE_JMP
355
jnz LookupLoop1
356
jmp LookupLoopIsZero
357
AFTER_JMP
358
/*
359
;;; do {
360
;;; match = s->window + cur_match;
361
;;; if (*(ushf*)(match+best_len-1) != scan_end ||
362
;;; *(ushf*)match != scan_start) continue;
363
;;; [...]
364
;;; } while ((cur_match = prev[cur_match & wmask]) > limit
365
;;; && --chain_length != 0);
366
;;;
367
;;; Here is the inner loop of the function. The function will spend the
368
;;; majority of its time in this loop, and majority of that time will
369
;;; be spent in the first ten instructions.
370
;;;
371
;;; Within this loop:
372
;;; ebx = scanend
373
;;; r8d = curmatch
374
;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
375
;;; esi = windowbestlen - i.e., (window + bestlen)
376
;;; edi = prev
377
;;; ebp = limit
378
*/
379
.balign 16
380
LookupLoop:
381
and r8d, edx
382
383
movzx r8d, word ptr [rdi + r8*2]
384
cmp r8d, ebp
385
BEFORE_JMP
386
jbe LeaveNow
387
AFTER_JMP
388
sub edx, 0x00010000
389
BEFORE_JMP
390
js LeaveNow
391
AFTER_JMP
392
393
LoopEntry:
394
395
cmp bx,word ptr [rsi + r8 - 1]
396
BEFORE_JMP
397
jnz LookupLoop1
398
AFTER_JMP
399
LookupLoopIsZero:
400
cmp r12w, word ptr [r10 + r8]
401
BEFORE_JMP
402
jnz LookupLoop1
403
AFTER_JMP
404
405
406
//;;; Store the current value of chainlen.
407
mov [chainlenwmask], edx
408
/*
409
;;; Point edi to the string under scrutiny, and esi to the string we
410
;;; are hoping to match it up with. In actuality, esi and edi are
411
;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is
412
;;; initialized to -(MAX_MATCH_8 - scanalign).
413
*/
414
lea rsi,[r8+r10]
415
mov rdx, 0xfffffffffffffef8 //; -(MAX_MATCH_8)
416
lea rsi, [rsi + r13 + 0x0108] //;MAX_MATCH_8]
417
lea rdi, [r9 + r13 + 0x0108] //;MAX_MATCH_8]
418
419
prefetcht1 [rsi+rdx]
420
prefetcht1 [rdi+rdx]
421
422
/*
423
;;; Test the strings for equality, 8 bytes at a time. At the end,
424
;;; adjust rdx so that it is offset to the exact byte that mismatched.
425
;;;
426
;;; We already know at this point that the first three bytes of the
427
;;; strings match each other, and they can be safely passed over before
428
;;; starting the compare loop. So what this code does is skip over 0-3
429
;;; bytes, as much as necessary in order to dword-align the edi
430
;;; pointer. (rsi will still be misaligned three times out of four.)
431
;;;
432
;;; It should be confessed that this loop usually does not represent
433
;;; much of the total running time. Replacing it with a more
434
;;; straightforward "rep cmpsb" would not drastically degrade
435
;;; performance.
436
*/
437
438
LoopCmps:
439
mov rax, [rsi + rdx]
440
xor rax, [rdi + rdx]
441
jnz LeaveLoopCmps
442
443
mov rax, [rsi + rdx + 8]
444
xor rax, [rdi + rdx + 8]
445
jnz LeaveLoopCmps8
446
447
448
mov rax, [rsi + rdx + 8+8]
449
xor rax, [rdi + rdx + 8+8]
450
jnz LeaveLoopCmps16
451
452
add rdx,8+8+8
453
454
BEFORE_JMP
455
jnz LoopCmps
456
jmp LenMaximum
457
AFTER_JMP
458
459
LeaveLoopCmps16: add rdx,8
460
LeaveLoopCmps8: add rdx,8
461
LeaveLoopCmps:
462
463
test eax, 0x0000FFFF
464
jnz LenLower
465
466
test eax,0xffffffff
467
468
jnz LenLower32
469
470
add rdx,4
471
shr rax,32
472
or ax,ax
473
BEFORE_JMP
474
jnz LenLower
475
AFTER_JMP
476
477
LenLower32:
478
shr eax,16
479
add rdx,2
480
481
LenLower:
482
sub al, 1
483
adc rdx, 0
484
//;;; Calculate the length of the match. If it is longer than MAX_MATCH,
485
//;;; then automatically accept it as the best possible match and leave.
486
487
lea rax, [rdi + rdx]
488
sub rax, r9
489
cmp eax, MAX_MATCH
490
BEFORE_JMP
491
jge LenMaximum
492
AFTER_JMP
493
/*
494
;;; If the length of the match is not longer than the best match we
495
;;; have so far, then forget it and return to the lookup loop.
496
;///////////////////////////////////
497
*/
498
cmp eax, r11d
499
jg LongerMatch
500
501
lea rsi,[r10+r11]
502
503
mov rdi, prev_ad
504
mov edx, [chainlenwmask]
505
BEFORE_JMP
506
jmp LookupLoop
507
AFTER_JMP
508
/*
509
;;; s->match_start = cur_match;
510
;;; best_len = len;
511
;;; if (len >= nice_match) break;
512
;;; scan_end = *(ushf*)(scan+best_len-1);
513
*/
514
LongerMatch:
515
mov r11d, eax
516
mov match_start, r8d
517
cmp eax, [nicematch]
518
BEFORE_JMP
519
jge LeaveNow
520
AFTER_JMP
521
522
lea rsi,[r10+rax]
523
524
movzx ebx, word ptr [r9 + rax - 1]
525
mov rdi, prev_ad
526
mov edx, [chainlenwmask]
527
BEFORE_JMP
528
jmp LookupLoop
529
AFTER_JMP
530
531
//;;; Accept the current string, with the maximum possible length.
532
533
LenMaximum:
534
mov r11d,MAX_MATCH
535
mov match_start, r8d
536
537
//;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
538
//;;; return s->lookahead;
539
540
LeaveNow:
541
mov eax, Lookahead
542
cmp r11d, eax
543
cmovng eax, r11d
544
545
546
547
//;;; Restore the stack and return from whence we came.
548
549
550
// mov rsi,[save_rsi]
551
// mov rdi,[save_rdi]
552
mov rbx,[save_rbx]
553
mov rbp,[save_rbp]
554
mov r12,[save_r12]
555
mov r13,[save_r13]
556
mov r14,[save_r14]
557
mov r15,[save_r15]
558
559
560
ret 0
561
//; please don't remove this string !
562
//; Your can freely use gvmat64 in any free or commercial app
563
//; but it is far better don't remove the string in the binary!
564
// db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0
565
566
567
match_init:
568
ret 0
569
570
571

Keyboard Shortcuts

Open search /
Next entry (timeline) j
Previous entry (timeline) k
Open focused entry Enter
Show this help ?
Toggle theme Top nav button