forked from zlib-ng/zlib-ng
-
Notifications
You must be signed in to change notification settings - Fork 3
/
crc32_comb.c
108 lines (92 loc) · 3.52 KB
/
crc32_comb.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
/* crc32_comb.c -- compute the CRC-32 of a data stream
* Copyright (C) 1995-2006, 2010, 2011, 2012, 2016, 2018 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* Thanks to Rodney Brown <[email protected]> for his contribution of faster
* CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing
* tables for updating the shift register in one step with three exclusive-ors
* instead of four steps with four exclusive-ors. This results in about a
* factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3.
*/
#include "zbuild.h"
#include <inttypes.h>
#include "deflate.h"
#include "crc32_p.h"
#include "crc32_comb_tbl.h"
/* Local functions for crc concatenation */
static uint32_t crc32_combine_(uint32_t crc1, uint32_t crc2, z_off64_t len2);
static void crc32_combine_gen_(uint32_t *op, z_off64_t len2);
/* ========================================================================= */
static uint32_t crc32_combine_(uint32_t crc1, uint32_t crc2, z_off64_t len2) {
int n;
if (len2 > 0)
/* operator for 2^n zeros repeats every GF2_DIM n values */
for (n = 0; len2; n = (n + 1) % GF2_DIM, len2 >>= 1)
if (len2 & 1)
crc1 = gf2_matrix_times(crc_comb[n], crc1);
return crc1 ^ crc2;
}
/* ========================================================================= */
#ifdef ZLIB_COMPAT
unsigned long Z_EXPORT PREFIX(crc32_combine)(unsigned long crc1, unsigned long crc2, z_off_t len2) {
return (unsigned long)crc32_combine_((uint32_t)crc1, (uint32_t)crc2, len2);
}
unsigned long Z_EXPORT PREFIX4(crc32_combine)(unsigned long crc1, unsigned long crc2, z_off64_t len2) {
return (unsigned long)crc32_combine_((uint32_t)crc1, (uint32_t)crc2, len2);
}
#else
uint32_t Z_EXPORT PREFIX4(crc32_combine)(uint32_t crc1, uint32_t crc2, z_off64_t len2) {
return crc32_combine_(crc1, crc2, len2);
}
#endif
/* ========================================================================= */
static void crc32_combine_gen_(uint32_t *op, z_off64_t len2) {
uint32_t row;
int j;
unsigned i;
/* if len2 is zero or negative, return the identity matrix */
if (len2 <= 0) {
row = 1;
for (j = 0; j < GF2_DIM; j++) {
op[j] = row;
row <<= 1;
}
return;
}
/* at least one bit in len2 is set -- find it, and copy the operator
corresponding to that position into op */
i = 0;
for (;;) {
if (len2 & 1) {
for (j = 0; j < GF2_DIM; j++)
op[j] = crc_comb[i][j];
break;
}
len2 >>= 1;
i = (i + 1) % GF2_DIM;
}
/* for each remaining bit set in len2 (if any), multiply op by the operator
corresponding to that position */
for (;;) {
len2 >>= 1;
i = (i + 1) % GF2_DIM;
if (len2 == 0)
break;
if (len2 & 1)
for (j = 0; j < GF2_DIM; j++)
op[j] = gf2_matrix_times(crc_comb[i], op[j]);
}
}
/* ========================================================================= */
#ifdef ZLIB_COMPAT
void Z_EXPORT PREFIX(crc32_combine_gen)(uint32_t *op, z_off_t len2) {
crc32_combine_gen_(op, len2);
}
#endif
void Z_EXPORT PREFIX4(crc32_combine_gen)(uint32_t *op, z_off64_t len2) {
crc32_combine_gen_(op, len2);
}
/* ========================================================================= */
uint32_t Z_EXPORT PREFIX(crc32_combine_op)(uint32_t crc1, uint32_t crc2, const uint32_t *op) {
return gf2_matrix_times(op, crc1) ^ crc2;
}