ruby-libstorj 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +10 -0
  3. data/.gitmodules +3 -0
  4. data/.rspec +1 -0
  5. data/Gemfile +23 -0
  6. data/Gemfile.lock +111 -0
  7. data/Guardfile +21 -0
  8. data/LICENSE +502 -0
  9. data/README.md +262 -0
  10. data/Rakefile +76 -0
  11. data/ext/libstorj/.gitignore +47 -0
  12. data/ext/libstorj/.travis.yml +27 -0
  13. data/ext/libstorj/Doxyfile +2427 -0
  14. data/ext/libstorj/LICENSE +502 -0
  15. data/ext/libstorj/Makefile.am +6 -0
  16. data/ext/libstorj/README.md +198 -0
  17. data/ext/libstorj/autogen.sh +3 -0
  18. data/ext/libstorj/configure.ac +64 -0
  19. data/ext/libstorj/depends/Makefile +153 -0
  20. data/ext/libstorj/depends/config.guess +1462 -0
  21. data/ext/libstorj/depends/config.sub +1823 -0
  22. data/ext/libstorj/depends/extract-osx-sdk.sh +33 -0
  23. data/ext/libstorj/depends/packages/cctools.mk +7 -0
  24. data/ext/libstorj/depends/packages/clang.mk +7 -0
  25. data/ext/libstorj/depends/packages/gmp.mk +23 -0
  26. data/ext/libstorj/depends/packages/gnutls.mk +25 -0
  27. data/ext/libstorj/depends/packages/json-c.mk +7 -0
  28. data/ext/libstorj/depends/packages/libcurl.mk +39 -0
  29. data/ext/libstorj/depends/packages/libmicrohttpd.mk +7 -0
  30. data/ext/libstorj/depends/packages/libuv.mk +7 -0
  31. data/ext/libstorj/depends/packages/nettle.mk +30 -0
  32. data/ext/libstorj/libstorj.pc.in +11 -0
  33. data/ext/libstorj/src/Makefile.am +23 -0
  34. data/ext/libstorj/src/bip39.c +233 -0
  35. data/ext/libstorj/src/bip39.h +64 -0
  36. data/ext/libstorj/src/bip39_english.h +2074 -0
  37. data/ext/libstorj/src/cli.c +1494 -0
  38. data/ext/libstorj/src/crypto.c +525 -0
  39. data/ext/libstorj/src/crypto.h +178 -0
  40. data/ext/libstorj/src/downloader.c +1923 -0
  41. data/ext/libstorj/src/downloader.h +163 -0
  42. data/ext/libstorj/src/http.c +688 -0
  43. data/ext/libstorj/src/http.h +175 -0
  44. data/ext/libstorj/src/rs.c +962 -0
  45. data/ext/libstorj/src/rs.h +99 -0
  46. data/ext/libstorj/src/storj.c +1523 -0
  47. data/ext/libstorj/src/storj.h +1014 -0
  48. data/ext/libstorj/src/uploader.c +2736 -0
  49. data/ext/libstorj/src/uploader.h +181 -0
  50. data/ext/libstorj/src/utils.c +336 -0
  51. data/ext/libstorj/src/utils.h +65 -0
  52. data/ext/libstorj/test/Makefile.am +27 -0
  53. data/ext/libstorj/test/mockbridge.c +260 -0
  54. data/ext/libstorj/test/mockbridge.json +687 -0
  55. data/ext/libstorj/test/mockbridgeinfo.json +1836 -0
  56. data/ext/libstorj/test/mockfarmer.c +358 -0
  57. data/ext/libstorj/test/storjtests.h +41 -0
  58. data/ext/libstorj/test/tests.c +1617 -0
  59. data/ext/libstorj/test/tests_rs.c +869 -0
  60. data/ext/ruby-libstorj/extconf.rb +8 -0
  61. data/ext/ruby-libstorj/ruby-libstorj.cc +17 -0
  62. data/lib/ruby-libstorj.rb +1 -0
  63. data/lib/ruby-libstorj/arg_forwarding_task.rb +58 -0
  64. data/lib/ruby-libstorj/env.rb +178 -0
  65. data/lib/ruby-libstorj/ext/bucket.rb +71 -0
  66. data/lib/ruby-libstorj/ext/create_bucket_request.rb +53 -0
  67. data/lib/ruby-libstorj/ext/curl_code.rb +139 -0
  68. data/lib/ruby-libstorj/ext/ext.rb +71 -0
  69. data/lib/ruby-libstorj/ext/file.rb +84 -0
  70. data/lib/ruby-libstorj/ext/get_bucket_request.rb +45 -0
  71. data/lib/ruby-libstorj/ext/json_request.rb +51 -0
  72. data/lib/ruby-libstorj/ext/list_files_request.rb +63 -0
  73. data/lib/ruby-libstorj/ext/types.rb +226 -0
  74. data/lib/ruby-libstorj/ext/upload_options.rb +38 -0
  75. data/lib/ruby-libstorj/libstorj.rb +22 -0
  76. data/lib/ruby-libstorj/mixins/storj.rb +27 -0
  77. data/lib/ruby-libstorj/struct.rb +42 -0
  78. data/ruby-libstorj.gemspec +57 -0
  79. data/spec/helpers/options.yml.example +22 -0
  80. data/spec/helpers/shared_rake_examples.rb +132 -0
  81. data/spec/helpers/storj_options.rb +96 -0
  82. data/spec/helpers/upload.data +3 -0
  83. data/spec/helpers/upload.data.sha256 +1 -0
  84. data/spec/libstorj_spec.rb +0 -0
  85. data/spec/ruby-libstorj/arg_forwarding_task_spec.rb +311 -0
  86. data/spec/ruby-libstorj/env_spec.rb +353 -0
  87. data/spec/ruby-libstorj/ext_spec.rb +75 -0
  88. data/spec/ruby-libstorj/json_request_spec.rb +13 -0
  89. data/spec/ruby-libstorj/libstorj_spec.rb +81 -0
  90. data/spec/ruby-libstorj/struct_spec.rb +64 -0
  91. data/spec/spec_helper.rb +113 -0
  92. metadata +136 -0
@@ -0,0 +1,175 @@
1
+ /**
2
+ * @file http.h
3
+ * @brief Storj HTTP utilities.
4
+ *
5
+ * Helper methods and utilities for http requests.
6
+ */
7
+ #ifndef STORJ_HTTP_H
8
+ #define STORJ_HTTP_H
9
+
10
+ #include <curl/curl.h>
11
+ #include <nettle/sha.h>
12
+ #include <nettle/ripemd160.h>
13
+
14
+ #ifdef _WIN32
15
+ #include <signal.h>
16
+ #endif
17
+
18
+ #include "storj.h"
19
+ #include "utils.h"
20
+ #include "crypto.h"
21
+
22
+ #define SHARD_PROGRESS_INTERVAL BUFSIZ * 8
23
+
24
+ /** @brief A structure for sharing download progress state between threads.
25
+ *
26
+ * This structure is used to send async updates from a worker thread
27
+ * back to the event loop thread, to report the bytes that have been
28
+ * received for a shard.
29
+ */
30
+ typedef struct {
31
+ uint32_t pointer_index;
32
+ uint64_t bytes;
33
+ /* state should not be modified in worker threads */
34
+ void *state;
35
+ } shard_download_progress_t;
36
+
37
+ /** @brief A structure for sharing upload progress state between threads.
38
+ *
39
+ * This structure is used to send async updates from a worker thread
40
+ * back to the event loop thread, to report the bytes that have been
41
+ * received for a shard.
42
+ */
43
+ typedef struct {
44
+ uint32_t pointer_index;
45
+ uint64_t bytes;
46
+ /* state should not be modified in worker threads */
47
+ void *state;
48
+ } shard_upload_progress_t;
49
+
50
+ typedef struct {
51
+ FILE *fd;
52
+ storj_encryption_ctx_t *ctx;
53
+ uint64_t offset;
54
+ uint64_t length;
55
+ uint64_t remain;
56
+ uint64_t total_sent;
57
+ uint64_t bytes_since_progress;
58
+ uv_async_t *progress_handle;
59
+ int error_code;
60
+ bool *canceled;
61
+ } shard_body_send_t;
62
+
63
+ typedef struct {
64
+ uint8_t *tail;
65
+ size_t tail_position;
66
+ size_t tail_length;
67
+ uint8_t *data;
68
+ size_t length;
69
+ size_t bytes_since_progress;
70
+ uint64_t shard_total_bytes;
71
+ uv_async_t *progress_handle;
72
+ bool *canceled;
73
+ struct sha256_ctx *sha256_ctx;
74
+ FILE *destination;
75
+ uint64_t file_position;
76
+ int error_code;
77
+ } shard_body_receive_t;
78
+
79
+ typedef struct {
80
+ uint8_t *data;
81
+ size_t length;
82
+ } http_body_receive_t;
83
+
84
+ typedef struct {
85
+ void *pnt;
86
+ uint64_t remain;
87
+ } http_body_send_t;
88
+
89
+ /**
90
+ * @brief Send a shard to a farmer via an HTTP request
91
+ *
92
+ * @param[in] http_options The HTTP options including proxy
93
+ * @param[in] farmer_id The farmer id
94
+ * @param[in] proto The protocol "http" or "https"
95
+ * @param[in] host The farmer host address
96
+ * @param[in] port The farmer port
97
+ * @param[in] shard_hash The hash of the shard to send
98
+ * @param[in] shard_total_bytes The total bytes of the shard
99
+ * @param[in] shard_data The actual bytes
100
+ * @param[in] token The farmer token for uploading
101
+ * @param[in] status_code The HTTP response status code
102
+ * @param[in] progress_handle The async handle for progress updates
103
+ * @param[in] canceled Pointer for canceling uploads
104
+ * @return A non-zero error value on failure and 0 on success.
105
+ */
106
+ int put_shard(storj_http_options_t *http_options,
107
+ char *farmer_id,
108
+ char *proto,
109
+ char *host,
110
+ int port,
111
+ char *shard_hash,
112
+ uint64_t shard_total_bytes,
113
+ FILE *original_file,
114
+ uint64_t file_position,
115
+ storj_encryption_ctx_t *ctx,
116
+ char *token,
117
+ int *status_code,
118
+ int *read_code,
119
+ uv_async_t *progress_handle,
120
+ bool *canceled);
121
+
122
+ /**
123
+ * @brief Make a HTTP request for a shard
124
+ *
125
+ * @param[in] http_options The HTTP options including proxy
126
+ * @param[in] farmer_id The farmer id
127
+ * @param[in] proto The protocol "http" or "https"
128
+ * @param[in] host The farmer host address
129
+ * @param[in] port The farmer port
130
+ * @param[in] shard_hash The hash of the shard to fetch
131
+ * @param[in] shard_total_bytes The total bytes of the shard
132
+ * @param[in] shard_data The actual bytes
133
+ * @param[in] token The farmer token for downloading
134
+ * @param[in] status_code The HTTP response status code
135
+ * @param[in] progress_handle The async handle for progress updates
136
+ * @param[in] canceled Pointer for canceling downloads
137
+ * @return A non-zero error value on failure and 0 on success.
138
+ */
139
+ int fetch_shard(storj_http_options_t *http_options,
140
+ char *farmer_id,
141
+ char *proto,
142
+ char *host,
143
+ int port,
144
+ char *shard_hash,
145
+ uint64_t shard_total_bytes,
146
+ char *token,
147
+ FILE *destination,
148
+ uint64_t file_position,
149
+ int *status_code,
150
+ int *write_code,
151
+ uv_async_t *progress_handle,
152
+ bool *canceled);
153
+
154
+ /**
155
+ * @brief Make a JSON HTTP request
156
+ *
157
+ * @param[in] options The storj bridge options
158
+ * @param[in] method The HTTP method
159
+ * @param[in] path The path of the resource
160
+ * @param[in] request_body A json object of the request body
161
+ * @param[in] auth Boolean to include authentication
162
+ * @param[out] status_code The resulting status code from the request
163
+ * @return A non-zero error value on failure and 0 on success.
164
+ */
165
+ int fetch_json(storj_http_options_t *http_options,
166
+ storj_bridge_options_t *options,
167
+ char *method,
168
+ char *path,
169
+ struct json_object *request_body,
170
+ bool auth,
171
+ struct json_object **response,
172
+ int *status_code);
173
+
174
+
175
+ #endif /* STORJ_HTTP_H */
@@ -0,0 +1,962 @@
1
+ /*
2
+ * rs.c -- forward error correction based on Vandermonde matrices
3
+ * 980624
4
+ * (C) 1997-98 Luigi Rizzo (luigi@iet.unipi.it)
5
+ * (C) 2001 Alain Knaff (alain@knaff.lu)
6
+ * (C) 2017 Storj Labs, Inc.
7
+ *
8
+ * Portions derived from code by Phil Karn (karn@ka9q.ampr.org),
9
+ * Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari
10
+ * Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995
11
+
12
+ * Reimplement by Jannson (20161018): compatible for golang version of
13
+ * https://github.com/klauspost/reedsolomon
14
+ *
15
+ * Modifications by Braydon Fuller (braydon@storj.io) to support memory
16
+ * mapped files.
17
+ *
18
+ * Redistribution and use in source and binary forms, with or without
19
+ * modification, are permitted provided that the following conditions
20
+ * are met:
21
+ *
22
+ * 1. Redistributions of source code must retain the above copyright
23
+ * notice, this list of conditions and the following disclaimer.
24
+ * 2. Redistributions in binary form must reproduce the above
25
+ * copyright notice, this list of conditions and the following
26
+ * disclaimer in the documentation and/or other materials
27
+ * provided with the distribution.
28
+ *
29
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND
30
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
31
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
32
+ * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS
33
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
34
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
35
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
36
+ * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
39
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
40
+ * OF SUCH DAMAGE.
41
+ */
42
+
43
+ /*
44
+ * The following parameter defines how many bits are used for
45
+ * field elements. The code supports any value from 2 to 16
46
+ * but fastest operation is achieved with 8 bit elements
47
+ * This is the only parameter you may want to change.
48
+ */
49
+ #define GF_BITS 8 /* code over GF(2**GF_BITS) - change to suit */
50
+
51
+ #include <assert.h>
52
+ #include <stdint.h>
53
+ #include <stdio.h>
54
+ #include <stdlib.h>
55
+ #include <string.h>
56
+
57
+ #include "rs.h"
58
+
59
+ /*
60
+ * stuff used for testing purposes only
61
+ */
62
+
63
+ #ifdef TEST
64
+ #define DEB(x)
65
+ #define DDB(x) x
66
+ #define DEBUG 0 /* minimal debugging */
67
+
68
+ #include <sys/time.h>
69
+ #define DIFF_T(a,b) \
70
+ (1+ 1000000*(a.tv_sec - b.tv_sec) + (a.tv_usec - b.tv_usec) )
71
+
72
+ #define TICK(t) \
73
+ {struct timeval x; \
74
+ gettimeofday(&x, NULL); \
75
+ t = x.tv_usec + 1000000* (x.tv_sec & 0xff ); \
76
+ }
77
+ #define TOCK(t) \
78
+ { u_long t1; TICK(t1); \
79
+ if (t1 < t) t = 256000000 + t1 - t; \
80
+ else t = t1 - t; \
81
+ if (t == 0) t = 1;}
82
+
83
+ u_long ticks[10]; /* vars for timekeeping */
84
+ #else
85
+ #define DEB(x)
86
+ #define DDB(x)
87
+ #define TICK(x)
88
+ #define TOCK(x)
89
+ #endif /* TEST */
90
+
91
+ /*
92
+ * You should not need to change anything beyond this point.
93
+ * The first part of the file implements linear algebra in GF.
94
+ *
95
+ * gf is the type used to store an element of the Galois Field.
96
+ * Must constain at least GF_BITS bits.
97
+ *
98
+ * Note: uint8_t will work up to GF(256) but int seems to run
99
+ * faster on the Pentium. We use int whenever have to deal with an
100
+ * index, since they are generally faster.
101
+ */
102
+ /*
103
+ * AK: Udpcast only uses GF_BITS=8. Remove other possibilities
104
+ */
105
+ #if (GF_BITS != 8)
106
+ #error "GF_BITS must be 8"
107
+ #endif
108
+ typedef uint8_t gf;
109
+
110
+ #define GF_SIZE ((1 << GF_BITS) - 1) /* powers of \alpha */
111
+
112
+ /*
113
+ * Primitive polynomials - see Lin & Costello, Appendix A,
114
+ * and Lee & Messerschmitt, p. 453.
115
+ */
116
+ static char *allPp[] = { /* GF_BITS polynomial */
117
+ NULL, /* 0 no code */
118
+ NULL, /* 1 no code */
119
+ "111", /* 2 1+x+x^2 */
120
+ "1101", /* 3 1+x+x^3 */
121
+ "11001", /* 4 1+x+x^4 */
122
+ "101001", /* 5 1+x^2+x^5 */
123
+ "1100001", /* 6 1+x+x^6 */
124
+ "10010001", /* 7 1 + x^3 + x^7 */
125
+ "101110001", /* 8 1+x^2+x^3+x^4+x^8 */
126
+ "1000100001", /* 9 1+x^4+x^9 */
127
+ "10010000001", /* 10 1+x^3+x^10 */
128
+ "101000000001", /* 11 1+x^2+x^11 */
129
+ "1100101000001", /* 12 1+x+x^4+x^6+x^12 */
130
+ "11011000000001", /* 13 1+x+x^3+x^4+x^13 */
131
+ "110000100010001", /* 14 1+x+x^6+x^10+x^14 */
132
+ "1100000000000001", /* 15 1+x+x^15 */
133
+ "11010000000010001" /* 16 1+x+x^3+x^12+x^16 */
134
+ };
135
+
136
+
137
+ /*
138
+ * To speed up computations, we have tables for logarithm, exponent
139
+ * and inverse of a number. If GF_BITS <= 8, we use a table for
140
+ * multiplication as well (it takes 64K, no big deal even on a PDA,
141
+ * especially because it can be pre-initialized an put into a ROM!),
142
+ * otherwhise we use a table of logarithms.
143
+ * In any case the macro gf_mul(x,y) takes care of multiplications.
144
+ */
145
+
146
+ static gf gf_exp[2*GF_SIZE]; /* index->poly form conversion table */
147
+ static int gf_log[GF_SIZE + 1]; /* Poly->index form conversion table */
148
+ static gf inverse[GF_SIZE+1]; /* inverse of field elem. */
149
+ /* inv[\alpha**i]=\alpha**(GF_SIZE-i-1) */
150
+
151
+ /*
152
+ * modnn(x) computes x % GF_SIZE, where GF_SIZE is 2**GF_BITS - 1,
153
+ * without a slow divide.
154
+ */
155
+ static inline gf modnn(int x)
156
+ {
157
+ while (x >= GF_SIZE) {
158
+ x -= GF_SIZE;
159
+ x = (x >> GF_BITS) + (x & GF_SIZE);
160
+ }
161
+ return x;
162
+ }
163
+
164
+ #define SWAP(a,b,t) {t tmp; tmp=a; a=b; b=tmp;}
165
+
166
+ /*
167
+ * gf_mul(x,y) multiplies two numbers. If GF_BITS<=8, it is much
168
+ * faster to use a multiplication table.
169
+ *
170
+ * USE_GF_MULC, GF_MULC0(c) and GF_ADDMULC(x) can be used when multiplying
171
+ * many numbers by the same constant. In this case the first
172
+ * call sets the constant, and others perform the multiplications.
173
+ * A value related to the multiplication is held in a local variable
174
+ * declared with USE_GF_MULC . See usage in addmul1().
175
+ */
176
+ static gf gf_mul_table[(GF_SIZE + 1)*(GF_SIZE + 1)]
177
+ #ifdef WINDOWS
178
+ __attribute__((aligned (16)))
179
+ #else
180
+ __attribute__((aligned (256)))
181
+ #endif
182
+ ;
183
+
184
+ #define gf_mul(x,y) gf_mul_table[(x<<8)+y]
185
+
186
+ #define USE_GF_MULC register gf * __gf_mulc_
187
+ #define GF_MULC0(c) __gf_mulc_ = &gf_mul_table[(c)<<8]
188
+ #define GF_ADDMULC(dst, x) dst ^= __gf_mulc_[x]
189
+ #define GF_MULC(dst, x) dst = __gf_mulc_[x]
190
+
191
+ static void init_mul_table(void)
192
+ {
193
+ int i, j;
194
+ for (i=0; i< GF_SIZE+1; i++)
195
+ for (j=0; j< GF_SIZE+1; j++)
196
+ gf_mul_table[(i<<8)+j] = gf_exp[modnn(gf_log[i] + gf_log[j]) ];
197
+
198
+ for (j=0; j< GF_SIZE+1; j++)
199
+ gf_mul_table[j] = gf_mul_table[j<<8] = 0;
200
+ }
201
+
202
+ /*
203
+ * Generate GF(2**m) from the irreducible polynomial p(X) in p[0]..p[m]
204
+ * Lookup tables:
205
+ * index->polynomial form gf_exp[] contains j= \alpha^i;
206
+ * polynomial form -> index form gf_log[ j = \alpha^i ] = i
207
+ * \alpha=x is the primitive element of GF(2^m)
208
+ *
209
+ * For efficiency, gf_exp[] has size 2*GF_SIZE, so that a simple
210
+ * multiplication of two numbers can be resolved without calling modnn
211
+ */
212
+
213
+ /*
214
+ * initialize the data structures used for computations in GF.
215
+ */
216
+ static void generate_gf(void)
217
+ {
218
+ int i;
219
+ gf mask;
220
+ char *Pp = allPp[GF_BITS];
221
+
222
+ mask = 1; /* x ** 0 = 1 */
223
+ gf_exp[GF_BITS] = 0; /* will be updated at the end of the 1st loop */
224
+ /*
225
+ * first, generate the (polynomial representation of) powers of \alpha,
226
+ * which are stored in gf_exp[i] = \alpha ** i .
227
+ * At the same time build gf_log[gf_exp[i]] = i .
228
+ * The first GF_BITS powers are simply bits shifted to the left.
229
+ */
230
+ for (i = 0; i < GF_BITS; i++, mask <<= 1 ) {
231
+ gf_exp[i] = mask;
232
+ gf_log[gf_exp[i]] = i;
233
+ /*
234
+ * If Pp[i] == 1 then \alpha ** i occurs in poly-repr
235
+ * gf_exp[GF_BITS] = \alpha ** GF_BITS
236
+ */
237
+ if ( Pp[i] == '1' )
238
+ gf_exp[GF_BITS] ^= mask;
239
+ }
240
+ /*
241
+ * now gf_exp[GF_BITS] = \alpha ** GF_BITS is complete, so can als
242
+ * compute its inverse.
243
+ */
244
+ gf_log[gf_exp[GF_BITS]] = GF_BITS;
245
+ /*
246
+ * Poly-repr of \alpha ** (i+1) is given by poly-repr of
247
+ * \alpha ** i shifted left one-bit and accounting for any
248
+ * \alpha ** GF_BITS term that may occur when poly-repr of
249
+ * \alpha ** i is shifted.
250
+ */
251
+ mask = 1 << (GF_BITS - 1 );
252
+ for (i = GF_BITS + 1; i < GF_SIZE; i++) {
253
+ if (gf_exp[i - 1] >= mask)
254
+ gf_exp[i] = gf_exp[GF_BITS] ^ ((gf_exp[i - 1] ^ mask) << 1);
255
+ else
256
+ gf_exp[i] = gf_exp[i - 1] << 1;
257
+ gf_log[gf_exp[i]] = i;
258
+ }
259
+ /*
260
+ * log(0) is not defined, so use a special value
261
+ */
262
+ gf_log[0] = GF_SIZE;
263
+ /* set the extended gf_exp values for fast multiply */
264
+ for (i = 0; i < GF_SIZE; i++)
265
+ gf_exp[i + GF_SIZE] = gf_exp[i];
266
+
267
+ /*
268
+ * again special cases. 0 has no inverse. This used to
269
+ * be initialized to GF_SIZE, but it should make no difference
270
+ * since noone is supposed to read from here.
271
+ */
272
+ inverse[0] = 0;
273
+ inverse[1] = 1;
274
+ for (i=2; i<=GF_SIZE; i++)
275
+ inverse[i] = gf_exp[GF_SIZE-gf_log[i]];
276
+ }
277
+
278
+ /*
279
+ * Various linear algebra operations that i use often.
280
+ */
281
+
282
+ /*
283
+ * addmul() computes dst[] = dst[] + c * src[]
284
+ * This is used often, so better optimize it! Currently the loop is
285
+ * unrolled 16 times, a good value for 486 and pentium-class machines.
286
+ * The case c=0 is also optimized, whereas c=1 is not. These
287
+ * calls are unfrequent in my typical apps so I did not bother.
288
+ *
289
+ * Note that gcc on
290
+ */
291
+ #if 0
292
+ #define addmul(dst, src, c, sz, dst_max, src_max) \
293
+ if (c != 0) addmul1(dst, src, c, sz, dst_max, src_max)
294
+ #endif
295
+
296
+ static void slow_addmul1(gf *dst1, gf *src1, gf c, uint64_t sz, uint64_t dst_max, uint64_t src_max)
297
+ {
298
+ USE_GF_MULC;
299
+ register gf *dst = dst1, *src = src1;
300
+ uint64_t low_max = dst_max < src_max ? dst_max : src_max;
301
+ uint64_t pos = 0;
302
+ gf *lim = &dst[low_max];
303
+
304
+ GF_MULC0(c);
305
+
306
+ for (; dst < lim; dst++, src++ ) {
307
+ if (pos < src_max && pos < dst_max) {
308
+ GF_ADDMULC( *dst , *src );
309
+ } else if (pos < dst_max) {
310
+ /* assume zero when past the max */
311
+ GF_ADDMULC( *dst , 0 );
312
+ }
313
+ pos += 1;
314
+ }
315
+ }
316
+
317
+ # define addmul1 slow_addmul1
318
+
319
+ static void addmul(gf *dst, gf *src, gf c, uint64_t sz, uint64_t dst_max, uint64_t src_max)
320
+ {
321
+ if (c != 0) addmul1(dst, src, c, sz, dst_max, src_max);
322
+ }
323
+
324
+ /*
325
+ * mul() computes dst[] = c * src[]
326
+ * This is used often, so better optimize it! Currently the loop is
327
+ * unrolled 16 times, a good value for 486 and pentium-class machines.
328
+ * The case c=0 is also optimized, whereas c=1 is not. These
329
+ * calls are unfrequent in my typical apps so I did not bother.
330
+ *
331
+ * Note that gcc on
332
+ */
333
+ #if 0
334
+ #define mul(dst, src, c, sz, dst_max, src_max) \
335
+ do { if (c != 0) mul1(dst, src, c, sz, dst_max, src_max); else memset(dst, 0, c); } while(0)
336
+ #endif
337
+
338
+ static void slow_mul1(gf *dst1, gf *src1, gf c, uint64_t sz, uint64_t dst_max, uint64_t src_max)
339
+ {
340
+ USE_GF_MULC;
341
+ register gf *dst = dst1, *src = src1;
342
+ uint64_t low_max = dst_max < src_max ? dst_max : src_max;
343
+ uint64_t pos = 0;
344
+ gf *lim = &dst[low_max];
345
+
346
+ GF_MULC0(c);
347
+
348
+ for (; dst < lim; dst++, src++ ) {
349
+ if (pos < src_max && pos < dst_max) {
350
+ GF_MULC( *dst , *src );
351
+ } else if (pos < dst_max) {
352
+ /* assume zero when past the max */
353
+ GF_MULC( *dst , 0 );
354
+ }
355
+ pos += 1;
356
+ }
357
+ }
358
+
359
+ # define mul1 slow_mul1
360
+
361
+ static inline void mul(gf *dst, gf *src, gf c, uint64_t sz, uint64_t dst_max, uint64_t src_max)
362
+ {
363
+ if (c != 0) mul1(dst, src, c, sz, dst_max, src_max); else memset(dst, 0, c);
364
+ }
365
+
366
+ /*
367
+ * invert_mat() takes a matrix and produces its inverse
368
+ * k is the size of the matrix.
369
+ * (Gauss-Jordan, adapted from Numerical Recipes in C)
370
+ * Return non-zero if singular.
371
+ */
372
+ DEB( int pivloops=0; int pivswaps=0; /* diagnostic */)
373
+ static int invert_mat(gf *src, int k)
374
+ {
375
+ gf c, *p;
376
+ int irow, icol, row, col, i, ix;
377
+
378
+ int error = 1;
379
+ int indxc[k];
380
+ int indxr[k];
381
+ int ipiv[k];
382
+ gf id_row[k];
383
+
384
+ memset(id_row, 0, k*sizeof(gf));
385
+ DEB( pivloops=0; pivswaps=0; /* diagnostic */ )
386
+ /*
387
+ * ipiv marks elements already used as pivots.
388
+ */
389
+ for (i = 0; i < k; i++)
390
+ ipiv[i] = 0;
391
+
392
+ for (col = 0; col < k; col++) {
393
+ gf *pivot_row;
394
+ /*
395
+ * Zeroing column 'col', look for a non-zero element.
396
+ * First try on the diagonal, if it fails, look elsewhere.
397
+ */
398
+ irow = icol = -1;
399
+ if (ipiv[col] != 1 && src[col*k + col] != 0) {
400
+ irow = col;
401
+ icol = col;
402
+ goto found_piv;
403
+ }
404
+ for (row = 0; row < k; row++) {
405
+ if (ipiv[row] != 1) {
406
+ for (ix = 0; ix < k; ix++) {
407
+ DEB( pivloops++; )
408
+ if (ipiv[ix] == 0) {
409
+ if (src[row*k + ix] != 0) {
410
+ irow = row;
411
+ icol = ix;
412
+ goto found_piv;
413
+ }
414
+ } else if (ipiv[ix] > 1) {
415
+ fprintf(stderr, "singular matrix\n");
416
+ goto fail;
417
+ }
418
+ }
419
+ }
420
+ }
421
+ if (icol == -1) {
422
+ fprintf(stderr, "XXX pivot not found!\n");
423
+ goto fail;
424
+ }
425
+ found_piv:
426
+ ++(ipiv[icol]);
427
+ /*
428
+ * swap rows irow and icol, so afterwards the diagonal
429
+ * element will be correct. Rarely done, not worth
430
+ * optimizing.
431
+ */
432
+ if (irow != icol) {
433
+ for (ix = 0; ix < k; ix++ ) {
434
+ SWAP( src[irow*k + ix], src[icol*k + ix], gf);
435
+ }
436
+ }
437
+ indxr[col] = irow;
438
+ indxc[col] = icol;
439
+ pivot_row = &src[icol*k];
440
+ c = pivot_row[icol];
441
+ if (c == 0) {
442
+ fprintf(stderr, "singular matrix 2\n");
443
+ goto fail;
444
+ }
445
+ if (c != 1 ) { /* otherwhise this is a NOP */
446
+ /*
447
+ * this is done often , but optimizing is not so
448
+ * fruitful, at least in the obvious ways (unrolling)
449
+ */
450
+ DEB( pivswaps++; )
451
+ c = inverse[ c ];
452
+ pivot_row[icol] = 1;
453
+ for (ix = 0; ix < k; ix++ )
454
+ pivot_row[ix] = gf_mul(c, pivot_row[ix] );
455
+ }
456
+ /*
457
+ * from all rows, remove multiples of the selected row
458
+ * to zero the relevant entry (in fact, the entry is not zero
459
+ * because we know it must be zero).
460
+ * (Here, if we know that the pivot_row is the identity,
461
+ * we can optimize the addmul).
462
+ */
463
+ id_row[icol] = 1;
464
+ if (memcmp(pivot_row, id_row, k*sizeof(gf)) != 0) {
465
+ for (p = src, ix = 0; ix < k; ix++, p += k ) {
466
+ if (ix != icol) {
467
+ c = p[icol];
468
+ p[icol] = 0;
469
+ addmul(p, pivot_row, c, k, k, k);
470
+ }
471
+ }
472
+ }
473
+ id_row[icol] = 0;
474
+ } /* done all columns */
475
+ for (col = k-1; col >= 0; col-- ) {
476
+ if (indxr[col] <0 || indxr[col] >= k)
477
+ fprintf(stderr, "AARGH, indxr[col] %d\n", indxr[col]);
478
+ else if (indxc[col] <0 || indxc[col] >= k)
479
+ fprintf(stderr, "AARGH, indxc[col] %d\n", indxc[col]);
480
+ else
481
+ if (indxr[col] != indxc[col] ) {
482
+ for (row = 0; row < k; row++ ) {
483
+ SWAP( src[row*k + indxr[col]], src[row*k + indxc[col]], gf);
484
+ }
485
+ }
486
+ }
487
+ error = 0;
488
+ fail:
489
+ return error;
490
+ }
491
+
492
+ static int fec_initialized = 0;
493
+
494
+ void fec_init(void)
495
+ {
496
+ TICK(ticks[0]);
497
+ generate_gf();
498
+ TOCK(ticks[0]);
499
+ DDB(fprintf(stderr, "generate_gf took %ldus\n", ticks[0]);)
500
+ TICK(ticks[0]);
501
+ init_mul_table();
502
+ TOCK(ticks[0]);
503
+ DDB(fprintf(stderr, "init_mul_table took %ldus\n", ticks[0]);)
504
+ fec_initialized = 1;
505
+ }
506
+
507
+
508
+ #ifdef PROFILE
509
+ static long long rdtsc(void)
510
+ {
511
+ unsigned long low, hi;
512
+ asm volatile ("rdtsc" : "=d" (hi), "=a" (low));
513
+ return ( (((long long)hi) << 32) | ((long long) low));
514
+ }
515
+
516
+ void print_matrix1(gf* matrix, int nrows, int ncols)
517
+ {
518
+ int i, j;
519
+ printf("matrix (%d,%d):\n", nrows, ncols);
520
+ for(i = 0; i < nrows; i++) {
521
+ for(j = 0; j < ncols; j++) {
522
+ printf("%6d ", matrix[i*ncols + j]);
523
+ }
524
+ printf("\n");
525
+ }
526
+ }
527
+
528
+ void print_matrix2(gf** matrix, int nrows, int ncols)
529
+ {
530
+ int i, j;
531
+ printf("matrix (%d,%d):\n", nrows, ncols);
532
+ for(i = 0; i < nrows; i++) {
533
+ for(j = 0; j < ncols; j++) {
534
+ printf("%6d ", matrix[i][j]);
535
+ }
536
+ printf("\n");
537
+ }
538
+ }
539
+
540
+ #endif
541
+
542
+ /* y = a**n */
543
+ static gf galExp(gf a, gf n)
544
+ {
545
+ int logA;
546
+ int logResult;
547
+ if(0 == n) {
548
+ return 1;
549
+ }
550
+ if(0 == a) {
551
+ return 0;
552
+ }
553
+ logA = gf_log[a];
554
+ logResult = logA * n;
555
+ while(logResult >= 255) {
556
+ logResult -= 255;
557
+ }
558
+
559
+ return gf_exp[logResult];
560
+ }
561
+
562
+ static inline gf galMultiply(gf a, gf b)
563
+ {
564
+ return gf_mul_table[ ((int)a << 8) + (int)b ];
565
+ }
566
+
567
+ static gf* vandermonde(int nrows, int ncols)
568
+ {
569
+ int row, col, ptr;
570
+ gf* matrix = (gf*)RS_MALLOC(nrows * ncols);
571
+ if(NULL != matrix) {
572
+ ptr = 0;
573
+ for(row = 0; row < nrows; row++) {
574
+ for(col = 0; col < ncols; col++) {
575
+ matrix[ptr++] = galExp((gf)row, (gf)col);
576
+ }
577
+ }
578
+ }
579
+
580
+ return matrix;
581
+ }
582
+
583
+ /*
584
+ * Not check for input params
585
+ * */
586
+ static gf* sub_matrix(gf* matrix, int rmin, int cmin, int rmax, int cmax,
587
+ int nrows, int ncols)
588
+ {
589
+ int i, j, ptr = 0;
590
+ gf* new_m = (gf*)RS_MALLOC( (rmax-rmin) * (cmax-cmin) );
591
+ if(NULL != new_m) {
592
+ for(i = rmin; i < rmax; i++) {
593
+ for(j = cmin; j < cmax; j++) {
594
+ new_m[ptr++] = matrix[i*ncols + j];
595
+ }
596
+ }
597
+ }
598
+
599
+ return new_m;
600
+ }
601
+
602
+ /* y = a.dot(b) */
603
+ static gf* multiply1(gf *a, int ar, int ac, gf *b, int br, int bc)
604
+ {
605
+ gf *new_m, tg;
606
+ int r, c, i, ptr = 0;
607
+
608
+ assert(ac == br);
609
+ new_m = (gf*)RS_CALLOC(1, ar*bc);
610
+ if(NULL != new_m) {
611
+
612
+ /* this multiply is slow */
613
+ for(r = 0; r < ar; r++) {
614
+ for(c = 0; c < bc; c++) {
615
+ tg = 0;
616
+ for(i = 0; i < ac; i++) {
617
+ /* tg ^= gf_mul_table[ ((int)a[r*ac+i] << 8) + (int)b[i*bc+c] ]; */
618
+ tg ^= galMultiply(a[r*ac+i], b[i*bc+c]);
619
+ }
620
+
621
+ new_m[ptr++] = tg;
622
+ }
623
+ }
624
+
625
+ }
626
+
627
+ return new_m;
628
+ }
629
+
630
+ /* copy from golang rs version */
631
+ static inline int code_some_shards(gf* matrixRows, gf** inputs, gf** outputs,
632
+ int dataShards, int outputCount, int byteCount,
633
+ uint64_t *inputsMax, uint64_t * outputsMax)
634
+ {
635
+ gf* in;
636
+ int iRow, c;
637
+ for(c = 0; c < dataShards; c++) {
638
+ in = inputs[c];
639
+ for(iRow = 0; iRow < outputCount; iRow++) {
640
+ if(0 == c) {
641
+ mul(outputs[iRow], in, matrixRows[iRow*dataShards+c], byteCount,
642
+ outputsMax[iRow], inputsMax[c]);
643
+ } else {
644
+ addmul(outputs[iRow], in, matrixRows[iRow*dataShards+c], byteCount,
645
+ outputsMax[iRow], inputsMax[c]);
646
+ }
647
+ }
648
+ }
649
+
650
+ return 0;
651
+ }
652
+
653
+ reed_solomon* reed_solomon_new(int data_shards, int parity_shards)
654
+ {
655
+ gf* vm = NULL;
656
+ gf* top = NULL;
657
+ int err = 0;
658
+ reed_solomon* rs = NULL;
659
+
660
+ /* MUST use fec_init once time first */
661
+ assert(fec_initialized);
662
+
663
+ do {
664
+ rs = RS_MALLOC(sizeof(reed_solomon));
665
+ if(NULL == rs) {
666
+ return NULL;
667
+ }
668
+ rs->data_shards = data_shards;
669
+ rs->parity_shards = parity_shards;
670
+ rs->shards = (data_shards + parity_shards);
671
+ rs->m = NULL;
672
+ rs->parity = NULL;
673
+
674
+ if(rs->shards > DATA_SHARDS_MAX || data_shards <= 0 || parity_shards <= 0) {
675
+ err = 1;
676
+ break;
677
+ }
678
+
679
+ vm = vandermonde(rs->shards, rs->data_shards);
680
+ if(NULL == vm) {
681
+ err = 2;
682
+ break;
683
+ }
684
+
685
+ top = sub_matrix(vm, 0, 0, data_shards, data_shards, rs->shards, data_shards);
686
+ if(NULL == top) {
687
+ err = 3;
688
+ break;
689
+ }
690
+
691
+ err = invert_mat(top, data_shards);
692
+ assert(0 == err);
693
+
694
+ rs->m = multiply1(vm, rs->shards, data_shards, top, data_shards, data_shards);
695
+ if(NULL == rs->m) {
696
+ err = 4;
697
+ break;
698
+ }
699
+
700
+ rs->parity = sub_matrix(rs->m, data_shards, 0, rs->shards, data_shards, rs->shards, data_shards);
701
+ if(NULL == rs->parity) {
702
+ err = 5;
703
+ break;
704
+ }
705
+
706
+ RS_FREE(vm);
707
+ RS_FREE(top);
708
+ vm = NULL;
709
+ top = NULL;
710
+ return rs;
711
+
712
+ } while(0);
713
+
714
+ fprintf(stderr, "err=%d\n", err);
715
+ if(NULL != vm) {
716
+ RS_FREE(vm);
717
+ }
718
+ if(NULL != top) {
719
+ RS_FREE(top);
720
+ }
721
+ if(NULL != rs) {
722
+ if(NULL != rs->m) {
723
+ RS_FREE(rs->m);
724
+ }
725
+ if(NULL != rs->parity) {
726
+ RS_FREE(rs->parity);
727
+ }
728
+ RS_FREE(rs);
729
+ }
730
+
731
+ return NULL;
732
+ }
733
+
734
+ void reed_solomon_release(reed_solomon* rs)
735
+ {
736
+ if(NULL != rs) {
737
+ if(NULL != rs->m) {
738
+ RS_FREE(rs->m);
739
+ }
740
+ if(NULL != rs->parity) {
741
+ RS_FREE(rs->parity);
742
+ }
743
+ RS_FREE(rs);
744
+ }
745
+ }
746
+
747
+ int reed_solomon_encode(reed_solomon* rs,
748
+ uint8_t** data_blocks,
749
+ uint8_t** fec_blocks,
750
+ uint64_t block_size,
751
+ uint64_t total_bytes)
752
+ {
753
+ assert(NULL != rs && NULL != rs->parity);
754
+
755
+ uint64_t data_blocks_max[rs->data_shards];
756
+ uint64_t fec_blocks_max[rs->parity_shards];
757
+
758
+ int c = 0;
759
+
760
+ // Calculate the max for each shard based on the total bytes
761
+ // the last shard may be less than the shard size
762
+ for (c = 0; c < rs->data_shards; c++) {
763
+ uint64_t bytes_remaining = total_bytes - c * block_size;
764
+ uint64_t max = block_size;
765
+ if (bytes_remaining < block_size) {
766
+ max = bytes_remaining;
767
+ }
768
+ data_blocks_max[c] = max;
769
+ }
770
+
771
+ // All of the parity shards will be the block size
772
+ for (c = 0; c < rs->parity_shards; c++) {
773
+ fec_blocks_max[c] = block_size;
774
+ }
775
+
776
+ return code_some_shards(rs->parity, data_blocks, fec_blocks,
777
+ rs->data_shards, rs->parity_shards, block_size,
778
+ data_blocks_max, fec_blocks_max);
779
+ }
780
+
781
+ int reed_solomon_decode(reed_solomon* rs,
782
+ uint8_t **data_blocks,
783
+ uint64_t block_size,
784
+ uint8_t **dec_fec_blocks,
785
+ unsigned int *fec_block_nos,
786
+ unsigned int *erased_blocks,
787
+ int nr_fec_blocks,
788
+ uint64_t total_bytes)
789
+ {
790
+ /* use stack instead of malloc, define a small number of DATA_SHARDS_MAX to save memory */
791
+ gf dataDecodeMatrix[DATA_SHARDS_MAX*DATA_SHARDS_MAX];
792
+ uint8_t* subShards[DATA_SHARDS_MAX];
793
+ uint64_t subShardsMax[DATA_SHARDS_MAX];
794
+ uint8_t* outputs[DATA_SHARDS_MAX];
795
+ uint64_t outputsMax[DATA_SHARDS_MAX];
796
+ gf* m = rs->m;
797
+ int i, j, c, swap, subMatrixRow, dataShards, nos, nshards;
798
+
799
+ /* the erased_blocks should always sorted
800
+ * if sorted, nr_fec_blocks times to check it
801
+ * if not, sort it here
802
+ * */
803
+ for(i = 0; i < nr_fec_blocks; i++) {
804
+ swap = 0;
805
+ for(j = i+1; j < nr_fec_blocks; j++) {
806
+ if(erased_blocks[i] > erased_blocks[j]) {
807
+ /* the prefix is bigger than the following, swap */
808
+ c = erased_blocks[i];
809
+ erased_blocks[i] = erased_blocks[j];
810
+ erased_blocks[j] = c;
811
+
812
+ swap = 1;
813
+ }
814
+ }
815
+ if(!swap) {
816
+ //already sorted or sorted ok
817
+ break;
818
+ }
819
+ }
820
+
821
+ j = 0;
822
+ subMatrixRow = 0;
823
+ nos = 0;
824
+ nshards = 0;
825
+ dataShards = rs->data_shards;
826
+
827
+ for(i = 0; i < dataShards; i++) {
828
+
829
+ // Determine if the shard has less than block size
830
+ uint64_t remaining = total_bytes - i * block_size;
831
+ uint64_t max = block_size;
832
+ if (remaining < block_size) {
833
+ max = remaining;
834
+ }
835
+
836
+ if(j < nr_fec_blocks && i == erased_blocks[j]) {
837
+ //ignore the invalid block
838
+ j++;
839
+ } else {
840
+ /* this row is ok */
841
+ for(c = 0; c < dataShards; c++) {
842
+ dataDecodeMatrix[subMatrixRow*dataShards + c] = m[i*dataShards + c];
843
+ }
844
+ subShards[subMatrixRow] = data_blocks[i];
845
+ subShardsMax[subMatrixRow] = max;
846
+ subMatrixRow++;
847
+ }
848
+ }
849
+
850
+ for(i = 0; i < nr_fec_blocks && subMatrixRow < dataShards; i++) {
851
+ subShards[subMatrixRow] = dec_fec_blocks[i];
852
+ subShardsMax[subMatrixRow] = block_size; /* all fec shards have block_size */
853
+ j = dataShards + fec_block_nos[i];
854
+ for(c = 0; c < dataShards; c++) {
855
+ dataDecodeMatrix[subMatrixRow*dataShards + c] = m[j*dataShards + c]; //use spefic pos of original fec_blocks
856
+ }
857
+ subMatrixRow++;
858
+ }
859
+
860
+ if(subMatrixRow < dataShards) {
861
+ //cannot correct
862
+ return -1;
863
+ }
864
+
865
+ invert_mat(dataDecodeMatrix, dataShards);
866
+
867
+ for(i = 0; i < nr_fec_blocks; i++) {
868
+ j = erased_blocks[i];
869
+
870
+ // Determine if the shard has less than block size
871
+ uint64_t remaining = total_bytes - j * block_size;
872
+ uint64_t max = block_size;
873
+ if (remaining < block_size) {
874
+ max = remaining;
875
+ }
876
+
877
+ outputs[i] = data_blocks[j];
878
+ outputsMax[i] = max;
879
+ memmove(dataDecodeMatrix+i*dataShards, dataDecodeMatrix+j*dataShards, dataShards);
880
+ }
881
+
882
+ return code_some_shards(dataDecodeMatrix, subShards, outputs,
883
+ dataShards, nr_fec_blocks, block_size,
884
+ subShardsMax, outputsMax);
885
+ }
886
+
887
+ int reed_solomon_encode2(reed_solomon* rs, uint8_t** data_blocks,
888
+ uint8_t** fec_blocks, int nr_shards, uint64_t block_size,
889
+ uint64_t total_bytes)
890
+ {
891
+ int i, ds = rs->data_shards, ps = rs->parity_shards, ss = rs->shards;
892
+ i = nr_shards / ss;
893
+
894
+ for(i = 0; i < nr_shards; i += ss) {
895
+ reed_solomon_encode(rs, data_blocks, fec_blocks, block_size, total_bytes);
896
+ data_blocks += ds;
897
+ fec_blocks += ps;
898
+ }
899
+ return 0;
900
+ }
901
+
902
+ int reed_solomon_reconstruct(reed_solomon* rs,
903
+ uint8_t** data_blocks,
904
+ uint8_t** fec_blocks,
905
+ uint8_t* marks,
906
+ int nr_shards,
907
+ uint64_t block_size,
908
+ uint64_t total_bytes)
909
+ {
910
+ uint8_t *dec_fec_blocks[DATA_SHARDS_MAX];
911
+ unsigned int fec_block_nos[DATA_SHARDS_MAX];
912
+ unsigned int erased_blocks[DATA_SHARDS_MAX];
913
+ uint8_t* fec_marks;
914
+ int i, j, dn, pn, n;
915
+ int ds = rs->data_shards;
916
+ int ps = rs->parity_shards;
917
+ int err = 0;
918
+
919
+ n = nr_shards / rs->shards;
920
+ fec_marks = marks + n*ds; //after all data, is't fec marks
921
+
922
+ for(j = 0; j < n; j++) {
923
+ dn = 0;
924
+ for(i = 0; i < ds; i++) {
925
+ if(marks[i]) {
926
+ //errors
927
+ erased_blocks[dn++] = i;
928
+ }
929
+ }
930
+ if(dn > 0) {
931
+ pn = 0;
932
+ for(i = 0; i < ps && pn < dn; i++) {
933
+ if(!fec_marks[i]) {
934
+ //got valid fec row
935
+ fec_block_nos[pn] = i;
936
+ dec_fec_blocks[pn] = fec_blocks[i];
937
+ pn++;
938
+ }
939
+ }
940
+
941
+ if(dn == pn) {
942
+ reed_solomon_decode(rs,
943
+ data_blocks,
944
+ block_size,
945
+ dec_fec_blocks,
946
+ fec_block_nos,
947
+ erased_blocks,
948
+ dn,
949
+ total_bytes);
950
+ } else {
951
+ //error but we continue
952
+ err = -1;
953
+ }
954
+ }
955
+ data_blocks += ds;
956
+ marks += ds;
957
+ fec_blocks += ps;
958
+ fec_marks += ps;
959
+ }
960
+
961
+ return err;
962
+ }