Fossil SCM
Omit some asm code from the SHA1 implementation as it provides no performance improvement with modern compilers, and SHA1 is seldom used now anyhow.
Commit
20187be72370eda24b32cfe1bd3c7216bc1ada7158cc33491a8ebe1bd5f55b2c
Parent
b86c8b476b4af7e…
1 file changed
-19
-19
| --- src/sha1.c | ||
| +++ src/sha1.c | ||
| @@ -101,32 +101,13 @@ | ||
| 101 | 101 | * blk0() and blk() perform the initial expand. |
| 102 | 102 | * I got the idea of expanding during the round function from SSLeay |
| 103 | 103 | * |
| 104 | 104 | * blk0le() for little-endian and blk0be() for big-endian. |
| 105 | 105 | */ |
| 106 | -#if __GNUC__ && (defined(__i386__) || defined(__x86_64__)) | |
| 107 | -/* | |
| 108 | - * GCC by itself only generates left rotates. Use right rotates if | |
| 109 | - * possible to be kinder to dinky implementations with iterative rotate | |
| 110 | - * instructions. | |
| 111 | - */ | |
| 112 | -#define SHA_ROT(op, x, k) \ | |
| 113 | - ({ unsigned int y; asm(op " %1,%0" : "=r" (y) : "I" (k), "0" (x)); y; }) | |
| 114 | -#define rol(x,k) SHA_ROT("roll", x, k) | |
| 115 | -#define ror(x,k) SHA_ROT("rorl", x, k) | |
| 116 | - | |
| 117 | -#else | |
| 118 | -/* Generic C equivalent */ | |
| 119 | 106 | #define SHA_ROT(x,l,r) ((x) << (l) | (x) >> (r)) |
| 120 | 107 | #define rol(x,k) SHA_ROT(x,k,32-(k)) |
| 121 | 108 | #define ror(x,k) SHA_ROT(x,32-(k),k) |
| 122 | -#endif | |
| 123 | - | |
| 124 | - | |
| 125 | - | |
| 126 | - | |
| 127 | - | |
| 128 | 109 | #define blk0le(i) (block[i] = (ror(block[i],8)&0xFF00FF00) \ |
| 129 | 110 | |(rol(block[i],8)&0x00FF00FF)) |
| 130 | 111 | #define blk0be(i) block[i] |
| 131 | 112 | #define blk(i) (block[i&15] = rol(block[(i+13)&15]^block[(i+8)&15] \ |
| 132 | 113 | ^block[(i+2)&15]^block[i&15],1)) |
| 133 | 114 |
| --- src/sha1.c | |
| +++ src/sha1.c | |
| @@ -101,32 +101,13 @@ | |
| 101 | * blk0() and blk() perform the initial expand. |
| 102 | * I got the idea of expanding during the round function from SSLeay |
| 103 | * |
| 104 | * blk0le() for little-endian and blk0be() for big-endian. |
| 105 | */ |
| 106 | #if __GNUC__ && (defined(__i386__) || defined(__x86_64__)) |
| 107 | /* |
| 108 | * GCC by itself only generates left rotates. Use right rotates if |
| 109 | * possible to be kinder to dinky implementations with iterative rotate |
| 110 | * instructions. |
| 111 | */ |
| 112 | #define SHA_ROT(op, x, k) \ |
| 113 | ({ unsigned int y; asm(op " %1,%0" : "=r" (y) : "I" (k), "0" (x)); y; }) |
| 114 | #define rol(x,k) SHA_ROT("roll", x, k) |
| 115 | #define ror(x,k) SHA_ROT("rorl", x, k) |
| 116 | |
| 117 | #else |
| 118 | /* Generic C equivalent */ |
| 119 | #define SHA_ROT(x,l,r) ((x) << (l) | (x) >> (r)) |
| 120 | #define rol(x,k) SHA_ROT(x,k,32-(k)) |
| 121 | #define ror(x,k) SHA_ROT(x,32-(k),k) |
| 122 | #endif |
| 123 | |
| 124 | |
| 125 | |
| 126 | |
| 127 | |
| 128 | #define blk0le(i) (block[i] = (ror(block[i],8)&0xFF00FF00) \ |
| 129 | |(rol(block[i],8)&0x00FF00FF)) |
| 130 | #define blk0be(i) block[i] |
| 131 | #define blk(i) (block[i&15] = rol(block[(i+13)&15]^block[(i+8)&15] \ |
| 132 | ^block[(i+2)&15]^block[i&15],1)) |
| 133 |
| --- src/sha1.c | |
| +++ src/sha1.c | |
| @@ -101,32 +101,13 @@ | |
| 101 | * blk0() and blk() perform the initial expand. |
| 102 | * I got the idea of expanding during the round function from SSLeay |
| 103 | * |
| 104 | * blk0le() for little-endian and blk0be() for big-endian. |
| 105 | */ |
| 106 | #define SHA_ROT(x,l,r) ((x) << (l) | (x) >> (r)) |
| 107 | #define rol(x,k) SHA_ROT(x,k,32-(k)) |
| 108 | #define ror(x,k) SHA_ROT(x,32-(k),k) |
| 109 | #define blk0le(i) (block[i] = (ror(block[i],8)&0xFF00FF00) \ |
| 110 | |(rol(block[i],8)&0x00FF00FF)) |
| 111 | #define blk0be(i) block[i] |
| 112 | #define blk(i) (block[i&15] = rol(block[(i+13)&15]^block[(i+8)&15] \ |
| 113 | ^block[(i+2)&15]^block[i&15],1)) |
| 114 |