ruby-libstorj 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.gitmodules +3 -0
- data/.rspec +1 -0
- data/Gemfile +23 -0
- data/Gemfile.lock +111 -0
- data/Guardfile +21 -0
- data/LICENSE +502 -0
- data/README.md +262 -0
- data/Rakefile +76 -0
- data/ext/libstorj/.gitignore +47 -0
- data/ext/libstorj/.travis.yml +27 -0
- data/ext/libstorj/Doxyfile +2427 -0
- data/ext/libstorj/LICENSE +502 -0
- data/ext/libstorj/Makefile.am +6 -0
- data/ext/libstorj/README.md +198 -0
- data/ext/libstorj/autogen.sh +3 -0
- data/ext/libstorj/configure.ac +64 -0
- data/ext/libstorj/depends/Makefile +153 -0
- data/ext/libstorj/depends/config.guess +1462 -0
- data/ext/libstorj/depends/config.sub +1823 -0
- data/ext/libstorj/depends/extract-osx-sdk.sh +33 -0
- data/ext/libstorj/depends/packages/cctools.mk +7 -0
- data/ext/libstorj/depends/packages/clang.mk +7 -0
- data/ext/libstorj/depends/packages/gmp.mk +23 -0
- data/ext/libstorj/depends/packages/gnutls.mk +25 -0
- data/ext/libstorj/depends/packages/json-c.mk +7 -0
- data/ext/libstorj/depends/packages/libcurl.mk +39 -0
- data/ext/libstorj/depends/packages/libmicrohttpd.mk +7 -0
- data/ext/libstorj/depends/packages/libuv.mk +7 -0
- data/ext/libstorj/depends/packages/nettle.mk +30 -0
- data/ext/libstorj/libstorj.pc.in +11 -0
- data/ext/libstorj/src/Makefile.am +23 -0
- data/ext/libstorj/src/bip39.c +233 -0
- data/ext/libstorj/src/bip39.h +64 -0
- data/ext/libstorj/src/bip39_english.h +2074 -0
- data/ext/libstorj/src/cli.c +1494 -0
- data/ext/libstorj/src/crypto.c +525 -0
- data/ext/libstorj/src/crypto.h +178 -0
- data/ext/libstorj/src/downloader.c +1923 -0
- data/ext/libstorj/src/downloader.h +163 -0
- data/ext/libstorj/src/http.c +688 -0
- data/ext/libstorj/src/http.h +175 -0
- data/ext/libstorj/src/rs.c +962 -0
- data/ext/libstorj/src/rs.h +99 -0
- data/ext/libstorj/src/storj.c +1523 -0
- data/ext/libstorj/src/storj.h +1014 -0
- data/ext/libstorj/src/uploader.c +2736 -0
- data/ext/libstorj/src/uploader.h +181 -0
- data/ext/libstorj/src/utils.c +336 -0
- data/ext/libstorj/src/utils.h +65 -0
- data/ext/libstorj/test/Makefile.am +27 -0
- data/ext/libstorj/test/mockbridge.c +260 -0
- data/ext/libstorj/test/mockbridge.json +687 -0
- data/ext/libstorj/test/mockbridgeinfo.json +1836 -0
- data/ext/libstorj/test/mockfarmer.c +358 -0
- data/ext/libstorj/test/storjtests.h +41 -0
- data/ext/libstorj/test/tests.c +1617 -0
- data/ext/libstorj/test/tests_rs.c +869 -0
- data/ext/ruby-libstorj/extconf.rb +8 -0
- data/ext/ruby-libstorj/ruby-libstorj.cc +17 -0
- data/lib/ruby-libstorj.rb +1 -0
- data/lib/ruby-libstorj/arg_forwarding_task.rb +58 -0
- data/lib/ruby-libstorj/env.rb +178 -0
- data/lib/ruby-libstorj/ext/bucket.rb +71 -0
- data/lib/ruby-libstorj/ext/create_bucket_request.rb +53 -0
- data/lib/ruby-libstorj/ext/curl_code.rb +139 -0
- data/lib/ruby-libstorj/ext/ext.rb +71 -0
- data/lib/ruby-libstorj/ext/file.rb +84 -0
- data/lib/ruby-libstorj/ext/get_bucket_request.rb +45 -0
- data/lib/ruby-libstorj/ext/json_request.rb +51 -0
- data/lib/ruby-libstorj/ext/list_files_request.rb +63 -0
- data/lib/ruby-libstorj/ext/types.rb +226 -0
- data/lib/ruby-libstorj/ext/upload_options.rb +38 -0
- data/lib/ruby-libstorj/libstorj.rb +22 -0
- data/lib/ruby-libstorj/mixins/storj.rb +27 -0
- data/lib/ruby-libstorj/struct.rb +42 -0
- data/ruby-libstorj.gemspec +57 -0
- data/spec/helpers/options.yml.example +22 -0
- data/spec/helpers/shared_rake_examples.rb +132 -0
- data/spec/helpers/storj_options.rb +96 -0
- data/spec/helpers/upload.data +3 -0
- data/spec/helpers/upload.data.sha256 +1 -0
- data/spec/libstorj_spec.rb +0 -0
- data/spec/ruby-libstorj/arg_forwarding_task_spec.rb +311 -0
- data/spec/ruby-libstorj/env_spec.rb +353 -0
- data/spec/ruby-libstorj/ext_spec.rb +75 -0
- data/spec/ruby-libstorj/json_request_spec.rb +13 -0
- data/spec/ruby-libstorj/libstorj_spec.rb +81 -0
- data/spec/ruby-libstorj/struct_spec.rb +64 -0
- data/spec/spec_helper.rb +113 -0
- metadata +136 -0
|
@@ -0,0 +1,2736 @@
|
|
|
1
|
+
#include "uploader.h"
|
|
2
|
+
|
|
3
|
+
static void print_shard_info(storj_upload_state_t *state, int index) {
|
|
4
|
+
shard_tracker_t *shard = &state->shard[index];
|
|
5
|
+
shard_meta_t *shard_meta = state->shard[index].meta;
|
|
6
|
+
farmer_pointer_t *p = state->shard[index].pointer;
|
|
7
|
+
|
|
8
|
+
printf("\n================\n");
|
|
9
|
+
|
|
10
|
+
printf("Shard index [%d]\n", index);
|
|
11
|
+
|
|
12
|
+
printf("=== Shard Tracker ===\n");
|
|
13
|
+
printf("progress: %d\n", shard->progress);
|
|
14
|
+
printf("push_frame_request_count: %d\n", shard->push_frame_request_count);
|
|
15
|
+
printf("push_shard_request_count: %d\n", shard->push_shard_request_count);
|
|
16
|
+
printf("index: %d\n", shard->index);
|
|
17
|
+
printf("uploaded_size: %"PRIu64"\n", shard->uploaded_size);
|
|
18
|
+
|
|
19
|
+
printf("\n=== Shard Pointer ===\n");
|
|
20
|
+
printf("token: %s\n", p->token);
|
|
21
|
+
printf("farmer_user_agent: %s\n", p->farmer_user_agent);
|
|
22
|
+
printf("farmer_protocol: %s\n", p->farmer_protocol);
|
|
23
|
+
printf("farmer_address: %s\n", p->farmer_address);
|
|
24
|
+
printf("farmer_port: %s\n", p->farmer_port);
|
|
25
|
+
printf("farmer_node_id: %s\n", p->farmer_node_id);
|
|
26
|
+
|
|
27
|
+
printf("\n=== Shard Meta ===\n");
|
|
28
|
+
printf("hash: %s\n", shard_meta->hash);
|
|
29
|
+
printf("index: %d\n", shard_meta->index);
|
|
30
|
+
printf("size: %"PRIu64"\n", shard_meta->size);
|
|
31
|
+
printf("is_parity: %d\n", shard_meta->is_parity);
|
|
32
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
33
|
+
printf("Challenge [%d]: %s\n", i, (char *)shard_meta->challenges_as_str[i]);
|
|
34
|
+
}
|
|
35
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
36
|
+
printf("Leaf [%d]: %s\n", i, (char *)shard_meta->tree[i]);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
printf("================\n");
|
|
40
|
+
|
|
41
|
+
return;
|
|
42
|
+
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
static uv_work_t *uv_work_new()
|
|
46
|
+
{
|
|
47
|
+
uv_work_t *work = malloc(sizeof(uv_work_t));
|
|
48
|
+
return work;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
static uv_work_t *frame_work_new(int *index, storj_upload_state_t *state)
|
|
52
|
+
{
|
|
53
|
+
uv_work_t *work = uv_work_new();
|
|
54
|
+
if (!work) {
|
|
55
|
+
return NULL;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
frame_request_t *req = malloc(sizeof(frame_request_t));
|
|
59
|
+
if (!req) {
|
|
60
|
+
return NULL;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
req->http_options = state->env->http_options;
|
|
64
|
+
req->options = state->env->bridge_options;
|
|
65
|
+
req->upload_state = state;
|
|
66
|
+
req->error_status = 0;
|
|
67
|
+
req->status_code = 0;
|
|
68
|
+
req->log = state->log;
|
|
69
|
+
|
|
70
|
+
if (index != NULL) {
|
|
71
|
+
req->shard_meta_index = *index;
|
|
72
|
+
req->farmer_pointer = farmer_pointer_new();
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
work->data = req;
|
|
76
|
+
|
|
77
|
+
return work;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
static uv_work_t *shard_meta_work_new(int index, storj_upload_state_t *state)
|
|
81
|
+
{
|
|
82
|
+
uv_work_t *work = uv_work_new();
|
|
83
|
+
if (!work) {
|
|
84
|
+
return NULL;
|
|
85
|
+
}
|
|
86
|
+
frame_builder_t *req = malloc(sizeof(frame_builder_t));
|
|
87
|
+
if (!req) {
|
|
88
|
+
return NULL;
|
|
89
|
+
}
|
|
90
|
+
req->shard_meta = malloc(sizeof(shard_meta_t));
|
|
91
|
+
if (!req->shard_meta) {
|
|
92
|
+
return NULL;
|
|
93
|
+
}
|
|
94
|
+
req->upload_state = state;
|
|
95
|
+
req->log = state->log;
|
|
96
|
+
|
|
97
|
+
// make sure we switch between parity and data shards files.
|
|
98
|
+
// When using Reed solomon must also read from encrypted file
|
|
99
|
+
// rather than the original file for the data
|
|
100
|
+
if (index + 1 > state->total_data_shards) {
|
|
101
|
+
req->shard_file = state->parity_file;
|
|
102
|
+
} else if (state->rs) {
|
|
103
|
+
req->shard_file = state->encrypted_file;
|
|
104
|
+
} else {
|
|
105
|
+
req->shard_file = state->original_file;
|
|
106
|
+
}
|
|
107
|
+
// Reset shard index when using parity shards
|
|
108
|
+
req->shard_meta->index = (index + 1 > state->total_data_shards) ? index - state->total_data_shards: index;
|
|
109
|
+
|
|
110
|
+
// Position on shard_meta array
|
|
111
|
+
req->shard_meta_index = index;
|
|
112
|
+
|
|
113
|
+
req->error_status = 0;
|
|
114
|
+
req->status_code = 0;
|
|
115
|
+
|
|
116
|
+
work->data = req;
|
|
117
|
+
|
|
118
|
+
return work;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
static storj_exchange_report_t *storj_exchange_report_new()
|
|
122
|
+
{
|
|
123
|
+
storj_exchange_report_t *report = malloc(sizeof(storj_exchange_report_t));
|
|
124
|
+
if (!report) {
|
|
125
|
+
return NULL;
|
|
126
|
+
}
|
|
127
|
+
report->data_hash = NULL;
|
|
128
|
+
report->reporter_id = NULL;
|
|
129
|
+
report->farmer_id = NULL;
|
|
130
|
+
report->client_id = NULL;
|
|
131
|
+
report->message = NULL;
|
|
132
|
+
|
|
133
|
+
report->send_status = STORJ_REPORT_NOT_PREPARED; // not sent
|
|
134
|
+
report->start = 0;
|
|
135
|
+
report->end = 0;
|
|
136
|
+
report->code = 0;
|
|
137
|
+
report->send_count = 0;
|
|
138
|
+
|
|
139
|
+
return report;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
static farmer_pointer_t *farmer_pointer_new()
|
|
143
|
+
{
|
|
144
|
+
farmer_pointer_t *pointer = calloc(sizeof(farmer_pointer_t), sizeof(char));
|
|
145
|
+
if (!pointer) {
|
|
146
|
+
return NULL;
|
|
147
|
+
}
|
|
148
|
+
pointer->token = NULL;
|
|
149
|
+
pointer->farmer_user_agent = NULL;
|
|
150
|
+
pointer->farmer_protocol = NULL;
|
|
151
|
+
pointer->farmer_address = NULL;
|
|
152
|
+
pointer->farmer_port = NULL;
|
|
153
|
+
pointer->farmer_node_id = NULL;
|
|
154
|
+
|
|
155
|
+
return pointer;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
static shard_meta_t *shard_meta_new()
|
|
159
|
+
{
|
|
160
|
+
shard_meta_t *meta = calloc(sizeof(shard_meta_t), sizeof(char));
|
|
161
|
+
if (!meta) {
|
|
162
|
+
return NULL;
|
|
163
|
+
}
|
|
164
|
+
meta->hash = NULL;
|
|
165
|
+
|
|
166
|
+
return meta;
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
static storj_encryption_ctx_t *prepare_encryption_ctx(uint8_t *ctr, uint8_t *pass)
|
|
170
|
+
{
|
|
171
|
+
storj_encryption_ctx_t *ctx = calloc(sizeof(storj_encryption_ctx_t), sizeof(char));
|
|
172
|
+
if (!ctx) {
|
|
173
|
+
return NULL;
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
ctx->ctx = calloc(sizeof(struct aes256_ctx), sizeof(char));
|
|
177
|
+
if (!ctx->ctx) {
|
|
178
|
+
return NULL;
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
ctx->encryption_ctr = calloc(AES_BLOCK_SIZE, sizeof(char));
|
|
182
|
+
if (!ctx->encryption_ctr) {
|
|
183
|
+
return NULL;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
memcpy(ctx->encryption_ctr, ctr, AES_BLOCK_SIZE);
|
|
187
|
+
|
|
188
|
+
aes256_set_encrypt_key(ctx->ctx, pass);
|
|
189
|
+
|
|
190
|
+
return ctx;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
static void shard_meta_cleanup(shard_meta_t *shard_meta)
|
|
194
|
+
{
|
|
195
|
+
if (shard_meta->hash != NULL) {
|
|
196
|
+
free(shard_meta->hash);
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
free(shard_meta);
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
static void pointer_cleanup(farmer_pointer_t *farmer_pointer)
|
|
203
|
+
{
|
|
204
|
+
if (farmer_pointer->token != NULL) {
|
|
205
|
+
free(farmer_pointer->token);
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
if (farmer_pointer->farmer_user_agent != NULL) {
|
|
209
|
+
free(farmer_pointer->farmer_user_agent);
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
if (farmer_pointer->farmer_protocol != NULL) {
|
|
213
|
+
free(farmer_pointer->farmer_protocol);
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
if (farmer_pointer->farmer_address != NULL) {
|
|
217
|
+
free(farmer_pointer->farmer_address);
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
if (farmer_pointer->farmer_port != NULL) {
|
|
221
|
+
free(farmer_pointer->farmer_port);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
if (farmer_pointer->farmer_node_id != NULL) {
|
|
225
|
+
free(farmer_pointer->farmer_node_id);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
free(farmer_pointer);
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
static void cleanup_state(storj_upload_state_t *state)
|
|
232
|
+
{
|
|
233
|
+
if (state->final_callback_called) {
|
|
234
|
+
return;
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
if (state->pending_work_count > 0) {
|
|
238
|
+
return;
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
if (state->original_file) {
|
|
242
|
+
fclose(state->original_file);
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
state->final_callback_called = true;
|
|
246
|
+
|
|
247
|
+
if (state->frame_id) {
|
|
248
|
+
free(state->frame_id);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
if (state->hmac_id) {
|
|
252
|
+
free(state->hmac_id);
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
if (state->encrypted_file_name) {
|
|
256
|
+
free((char *)state->encrypted_file_name);
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
if (state->exclude) {
|
|
260
|
+
free(state->exclude);
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if (state->encryption_ctr) {
|
|
264
|
+
free(state->encryption_ctr);
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
if (state->encryption_key) {
|
|
268
|
+
free(state->encryption_key);
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
if (state->parity_file) {
|
|
272
|
+
fclose(state->parity_file);
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
if (state->parity_file_path) {
|
|
276
|
+
unlink(state->parity_file_path);
|
|
277
|
+
free(state->parity_file_path);
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
if (state->encrypted_file) {
|
|
281
|
+
fclose(state->encrypted_file);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
if (state->encrypted_file_path) {
|
|
285
|
+
unlink(state->encrypted_file_path);
|
|
286
|
+
free(state->encrypted_file_path);
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
if (state->index) {
|
|
290
|
+
free((char *)state->index);
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
if (state->shard) {
|
|
294
|
+
for (int i = 0; i < state->total_shards; i++ ) {
|
|
295
|
+
|
|
296
|
+
state->log->debug(state->env->log_options, state->handle,
|
|
297
|
+
"fn[cleanup_state] - Cleaning up shard %d", i);
|
|
298
|
+
|
|
299
|
+
shard_meta_cleanup(state->shard[i].meta);
|
|
300
|
+
|
|
301
|
+
state->log->debug(state->env->log_options, state->handle,
|
|
302
|
+
"fn[cleanup_state] - Cleaning up pointers %d", i);
|
|
303
|
+
|
|
304
|
+
pointer_cleanup(state->shard[i].pointer);
|
|
305
|
+
if (state->shard[i].report) {
|
|
306
|
+
free(state->shard[i].report);
|
|
307
|
+
}
|
|
308
|
+
}
|
|
309
|
+
free(state->shard);
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
state->finished_cb(state->error_status, state->file_id, state->handle);
|
|
313
|
+
|
|
314
|
+
free(state);
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
static void free_encryption_ctx(storj_encryption_ctx_t *ctx)
|
|
318
|
+
{
|
|
319
|
+
if (ctx->encryption_ctr) {
|
|
320
|
+
free(ctx->encryption_ctr);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
if (ctx->encryption_key) {
|
|
324
|
+
free(ctx->encryption_key);
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
if (ctx->ctx) {
|
|
328
|
+
free(ctx->ctx);
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
free(ctx);
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
static void after_create_bucket_entry(uv_work_t *work, int status)
|
|
335
|
+
{
|
|
336
|
+
post_to_bucket_request_t *req = work->data;
|
|
337
|
+
storj_upload_state_t *state = req->upload_state;
|
|
338
|
+
|
|
339
|
+
state->pending_work_count -= 1;
|
|
340
|
+
|
|
341
|
+
if (status == UV_ECANCELED) {
|
|
342
|
+
state->add_bucket_entry_count = 0;
|
|
343
|
+
state->creating_bucket_entry = false;
|
|
344
|
+
goto clean_variables;
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
state->add_bucket_entry_count += 1;
|
|
348
|
+
state->creating_bucket_entry = false;
|
|
349
|
+
|
|
350
|
+
if (req->error_status) {
|
|
351
|
+
state->error_status = req->error_status;
|
|
352
|
+
goto clean_variables;
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
// Check if we got a 200 status and token
|
|
356
|
+
if (req->status_code == 200 || req->status_code == 201) {
|
|
357
|
+
|
|
358
|
+
req->log->info(state->env->log_options, state->handle,
|
|
359
|
+
"Successfully Added bucket entry");
|
|
360
|
+
|
|
361
|
+
state->add_bucket_entry_count = 0;
|
|
362
|
+
state->completed_upload = true;
|
|
363
|
+
|
|
364
|
+
struct json_object *file_id_value = NULL;
|
|
365
|
+
char *file_id = NULL;
|
|
366
|
+
if (json_object_object_get_ex(req->response, "id", &file_id_value)) {
|
|
367
|
+
file_id = (char *)json_object_get_string(file_id_value);
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
if (file_id) {
|
|
371
|
+
state->file_id = strdup(file_id);
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
} else if (state->add_bucket_entry_count == 6) {
|
|
375
|
+
state->error_status = STORJ_BRIDGE_REQUEST_ERROR;
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
clean_variables:
|
|
379
|
+
queue_next_work(state);
|
|
380
|
+
if (req->response) {
|
|
381
|
+
json_object_put(req->response);
|
|
382
|
+
}
|
|
383
|
+
free(req);
|
|
384
|
+
free(work);
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
static void create_bucket_entry(uv_work_t *work)
|
|
388
|
+
{
|
|
389
|
+
post_to_bucket_request_t *req = work->data;
|
|
390
|
+
storj_upload_state_t *state = req->upload_state;
|
|
391
|
+
|
|
392
|
+
req->log->info(state->env->log_options, state->handle,
|
|
393
|
+
"[%s] Creating bucket entry... (retry: %d)",
|
|
394
|
+
state->file_name,
|
|
395
|
+
state->add_bucket_entry_count);
|
|
396
|
+
|
|
397
|
+
struct json_object *body = json_object_new_object();
|
|
398
|
+
json_object *frame = json_object_new_string(state->frame_id);
|
|
399
|
+
json_object_object_add(body, "frame", frame);
|
|
400
|
+
|
|
401
|
+
json_object *file_name = json_object_new_string(state->encrypted_file_name);
|
|
402
|
+
json_object_object_add(body, "filename", file_name);
|
|
403
|
+
|
|
404
|
+
json_object *index = json_object_new_string(state->index);
|
|
405
|
+
json_object_object_add(body, "index", index);
|
|
406
|
+
|
|
407
|
+
struct json_object *hmac = json_object_new_object();
|
|
408
|
+
|
|
409
|
+
json_object *type = json_object_new_string("sha512");
|
|
410
|
+
json_object_object_add(hmac, "type", type);
|
|
411
|
+
|
|
412
|
+
json_object *value = json_object_new_string(state->hmac_id);
|
|
413
|
+
json_object_object_add(hmac, "value", value);
|
|
414
|
+
|
|
415
|
+
json_object_object_add(body, "hmac", hmac);
|
|
416
|
+
|
|
417
|
+
if (state->rs) {
|
|
418
|
+
struct json_object *erasure = json_object_new_object();
|
|
419
|
+
json_object *erasure_type = json_object_new_string("reedsolomon");
|
|
420
|
+
json_object_object_add(erasure, "type", erasure_type);
|
|
421
|
+
json_object_object_add(body, "erasure", erasure);
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
int path_len = strlen(state->bucket_id) + 16;
|
|
425
|
+
char *path = calloc(path_len + 1, sizeof(char));
|
|
426
|
+
if (!path) {
|
|
427
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
428
|
+
return;
|
|
429
|
+
}
|
|
430
|
+
sprintf(path, "%s%s%s%c", "/buckets/", state->bucket_id, "/files", '\0');
|
|
431
|
+
|
|
432
|
+
req->log->debug(state->env->log_options, state->handle,
|
|
433
|
+
"fn[create_bucket_entry] - JSON body: %s", json_object_to_json_string(body));
|
|
434
|
+
|
|
435
|
+
int status_code;
|
|
436
|
+
int request_status = fetch_json(req->http_options,
|
|
437
|
+
req->options,
|
|
438
|
+
"POST",
|
|
439
|
+
path,
|
|
440
|
+
body,
|
|
441
|
+
true,
|
|
442
|
+
&req->response,
|
|
443
|
+
&status_code);
|
|
444
|
+
|
|
445
|
+
req->log->debug(state->env->log_options,
|
|
446
|
+
state->handle,
|
|
447
|
+
"fn[create_bucket_entry] - JSON Response: %s",
|
|
448
|
+
json_object_to_json_string(req->response));
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
if (request_status) {
|
|
452
|
+
req->log->warn(state->env->log_options, state->handle,
|
|
453
|
+
"Create bucket entry error: %i", request_status);
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
req->status_code = status_code;
|
|
458
|
+
|
|
459
|
+
json_object_put(body);
|
|
460
|
+
free(path);
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
static int prepare_bucket_entry_hmac(storj_upload_state_t *state)
|
|
464
|
+
{
|
|
465
|
+
struct hmac_sha512_ctx hmac_ctx;
|
|
466
|
+
hmac_sha512_set_key(&hmac_ctx, SHA256_DIGEST_SIZE, state->encryption_key);
|
|
467
|
+
|
|
468
|
+
for (int i = 0; i < state->total_shards; i++) {
|
|
469
|
+
|
|
470
|
+
shard_tracker_t *shard = &state->shard[i];
|
|
471
|
+
|
|
472
|
+
if (!shard->meta ||
|
|
473
|
+
!shard->meta->hash ||
|
|
474
|
+
strlen(shard->meta->hash) != RIPEMD160_DIGEST_SIZE * 2) {
|
|
475
|
+
return 1;
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
struct base16_decode_ctx base16_ctx;
|
|
479
|
+
base16_decode_init(&base16_ctx);
|
|
480
|
+
|
|
481
|
+
size_t decode_len = 0;
|
|
482
|
+
uint8_t hash[RIPEMD160_DIGEST_SIZE];
|
|
483
|
+
if (!base16_decode_update(&base16_ctx, &decode_len, hash,
|
|
484
|
+
RIPEMD160_DIGEST_SIZE * 2,
|
|
485
|
+
(uint8_t *)shard->meta->hash)) {
|
|
486
|
+
return 1;
|
|
487
|
+
|
|
488
|
+
}
|
|
489
|
+
if (!base16_decode_final(&base16_ctx) ||
|
|
490
|
+
decode_len != RIPEMD160_DIGEST_SIZE) {
|
|
491
|
+
return 1;
|
|
492
|
+
}
|
|
493
|
+
hmac_sha512_update(&hmac_ctx, RIPEMD160_DIGEST_SIZE, hash);
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
uint8_t digest_raw[SHA512_DIGEST_SIZE];
|
|
497
|
+
hmac_sha512_digest(&hmac_ctx, SHA512_DIGEST_SIZE, digest_raw);
|
|
498
|
+
|
|
499
|
+
size_t digest_len = BASE16_ENCODE_LENGTH(SHA512_DIGEST_SIZE);
|
|
500
|
+
state->hmac_id = calloc(digest_len + 1, sizeof(char));
|
|
501
|
+
if (!state->hmac_id) {
|
|
502
|
+
return 1;
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
base16_encode_update((uint8_t *)state->hmac_id, SHA512_DIGEST_SIZE, digest_raw);
|
|
506
|
+
|
|
507
|
+
return 0;
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
static void queue_create_bucket_entry(storj_upload_state_t *state)
|
|
511
|
+
{
|
|
512
|
+
uv_work_t *work = uv_work_new();
|
|
513
|
+
if (!work) {
|
|
514
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
515
|
+
return;
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
post_to_bucket_request_t *req = malloc(sizeof(post_to_bucket_request_t));
|
|
519
|
+
if (!req) {
|
|
520
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
521
|
+
return;
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
if (prepare_bucket_entry_hmac(state)) {
|
|
525
|
+
state->error_status = STORJ_FILE_GENERATE_HMAC_ERROR;
|
|
526
|
+
return;
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
req->http_options = state->env->http_options;
|
|
530
|
+
req->options = state->env->bridge_options;
|
|
531
|
+
req->upload_state = state;
|
|
532
|
+
req->response = NULL;
|
|
533
|
+
req->error_status = 0;
|
|
534
|
+
req->status_code = 0;
|
|
535
|
+
req->log = state->log;
|
|
536
|
+
work->data = req;
|
|
537
|
+
|
|
538
|
+
state->pending_work_count += 1;
|
|
539
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) work,
|
|
540
|
+
create_bucket_entry, after_create_bucket_entry);
|
|
541
|
+
|
|
542
|
+
if (status) {
|
|
543
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
544
|
+
return;
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
state->creating_bucket_entry = true;
|
|
548
|
+
}
|
|
549
|
+
|
|
550
|
+
static void free_push_shard_work(uv_handle_t *progress_handle)
|
|
551
|
+
{
|
|
552
|
+
uv_work_t *work = progress_handle->data;
|
|
553
|
+
push_shard_request_t *req = work->data;
|
|
554
|
+
|
|
555
|
+
if (req) {
|
|
556
|
+
free(req);
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
if (work) {
|
|
560
|
+
free(work);
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
static void after_push_shard(uv_work_t *work, int status)
|
|
565
|
+
{
|
|
566
|
+
push_shard_request_t *req = work->data;
|
|
567
|
+
storj_upload_state_t *state = req->upload_state;
|
|
568
|
+
uv_handle_t *progress_handle = (uv_handle_t *) &req->progress_handle;
|
|
569
|
+
shard_tracker_t *shard = &state->shard[req->shard_meta_index];
|
|
570
|
+
|
|
571
|
+
// free the upload progress
|
|
572
|
+
free(progress_handle->data);
|
|
573
|
+
|
|
574
|
+
// assign work so that we can free after progress_handle is closed
|
|
575
|
+
progress_handle->data = work;
|
|
576
|
+
|
|
577
|
+
state->pending_work_count -= 1;
|
|
578
|
+
|
|
579
|
+
if (status == UV_ECANCELED) {
|
|
580
|
+
shard->push_shard_request_count = 0;
|
|
581
|
+
shard->progress = AWAITING_PUSH_FRAME;
|
|
582
|
+
shard->report->send_status = STORJ_REPORT_NOT_PREPARED;
|
|
583
|
+
goto clean_variables;
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
// Update times on exchange report
|
|
587
|
+
shard->report->start = req->start;
|
|
588
|
+
shard->report->end = req->end;
|
|
589
|
+
|
|
590
|
+
// Check if we got a 200 status and token
|
|
591
|
+
if (!req->error_status &&
|
|
592
|
+
(req->status_code == 200 ||
|
|
593
|
+
req->status_code == 201 ||
|
|
594
|
+
req->status_code == 304)) {
|
|
595
|
+
|
|
596
|
+
req->log->info(state->env->log_options, state->handle,
|
|
597
|
+
"Successfully transferred shard index %d",
|
|
598
|
+
req->shard_meta_index);
|
|
599
|
+
|
|
600
|
+
shard->progress = COMPLETED_PUSH_SHARD;
|
|
601
|
+
state->completed_shards += 1;
|
|
602
|
+
shard->push_shard_request_count = 0;
|
|
603
|
+
|
|
604
|
+
// Update the uploaded size outside of the progress async handle
|
|
605
|
+
shard->uploaded_size = shard->meta->size;
|
|
606
|
+
|
|
607
|
+
// Update the exchange report with success
|
|
608
|
+
shard->report->code = STORJ_REPORT_SUCCESS;
|
|
609
|
+
shard->report->message = STORJ_REPORT_SHARD_UPLOADED;
|
|
610
|
+
shard->report->send_status = STORJ_REPORT_AWAITING_SEND;
|
|
611
|
+
|
|
612
|
+
} else if (!state->canceled){
|
|
613
|
+
|
|
614
|
+
// Update the exchange report with failure
|
|
615
|
+
shard->report->code = STORJ_REPORT_FAILURE;
|
|
616
|
+
shard->report->message = STORJ_REPORT_UPLOAD_ERROR;
|
|
617
|
+
shard->report->send_status = STORJ_REPORT_AWAITING_SEND;
|
|
618
|
+
|
|
619
|
+
if (shard->push_shard_request_count == 6) {
|
|
620
|
+
|
|
621
|
+
req->log->error(state->env->log_options, state->handle,
|
|
622
|
+
"Failed to push shard %d\n", req->shard_meta_index);
|
|
623
|
+
|
|
624
|
+
state->error_status = STORJ_FARMER_REQUEST_ERROR;
|
|
625
|
+
} else {
|
|
626
|
+
req->log->warn(state->env->log_options, state->handle,
|
|
627
|
+
"Failed to push shard %d... Retrying...",
|
|
628
|
+
req->shard_meta_index);
|
|
629
|
+
|
|
630
|
+
// We go back to getting a new pointer instead of retrying push with same pointer
|
|
631
|
+
shard->progress = AWAITING_PUSH_FRAME;
|
|
632
|
+
shard->push_shard_request_count += 1;
|
|
633
|
+
|
|
634
|
+
// Add pointer to exclude for future calls
|
|
635
|
+
if (state->exclude == NULL) {
|
|
636
|
+
state->exclude = calloc(strlen(shard->pointer->farmer_node_id) + 1, sizeof(char));
|
|
637
|
+
if (!state->exclude) {
|
|
638
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
639
|
+
return;
|
|
640
|
+
}
|
|
641
|
+
strcpy(state->exclude, shard->pointer->farmer_node_id);
|
|
642
|
+
} else {
|
|
643
|
+
int new_len = strlen(state->exclude) + strlen(shard->pointer->farmer_node_id) + 1;
|
|
644
|
+
state->exclude = realloc(state->exclude, new_len + 1);
|
|
645
|
+
strcat(state->exclude, ",");
|
|
646
|
+
strcat(state->exclude, shard->pointer->farmer_node_id);
|
|
647
|
+
state->exclude[new_len] = '\0';
|
|
648
|
+
}
|
|
649
|
+
}
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
clean_variables:
|
|
653
|
+
queue_next_work(state);
|
|
654
|
+
// close the async progress handle
|
|
655
|
+
uv_close(progress_handle, free_push_shard_work);
|
|
656
|
+
}
|
|
657
|
+
|
|
658
|
+
static void push_shard(uv_work_t *work)
|
|
659
|
+
{
|
|
660
|
+
push_shard_request_t *req = work->data;
|
|
661
|
+
storj_upload_state_t *state = req->upload_state;
|
|
662
|
+
shard_tracker_t *shard = &state->shard[req->shard_meta_index];
|
|
663
|
+
|
|
664
|
+
req->log->info(state->env->log_options, state->handle,
|
|
665
|
+
"Transfering Shard index %d... (retry: %d)",
|
|
666
|
+
req->shard_meta_index,
|
|
667
|
+
state->shard[req->shard_meta_index].push_shard_request_count);
|
|
668
|
+
|
|
669
|
+
int status_code = 0;
|
|
670
|
+
int read_code = 0;
|
|
671
|
+
|
|
672
|
+
req->start = get_time_milliseconds();
|
|
673
|
+
|
|
674
|
+
uint64_t file_position = req->shard_index * state->shard_size;
|
|
675
|
+
|
|
676
|
+
storj_encryption_ctx_t *encryption_ctx = NULL;
|
|
677
|
+
if (!state->rs) {
|
|
678
|
+
// Initialize the encryption context
|
|
679
|
+
encryption_ctx = prepare_encryption_ctx(state->encryption_ctr,
|
|
680
|
+
state->encryption_key);
|
|
681
|
+
if (!encryption_ctx) {
|
|
682
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
683
|
+
goto clean_variables;
|
|
684
|
+
}
|
|
685
|
+
// Increment the iv to proper placement because we may be reading from the middle of the file
|
|
686
|
+
increment_ctr_aes_iv(encryption_ctx->encryption_ctr, req->shard_meta_index * state->shard_size);
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
int req_status = put_shard(req->http_options,
|
|
690
|
+
shard->pointer->farmer_node_id,
|
|
691
|
+
"http",
|
|
692
|
+
shard->pointer->farmer_address,
|
|
693
|
+
atoi(shard->pointer->farmer_port),
|
|
694
|
+
shard->meta->hash,
|
|
695
|
+
shard->meta->size,
|
|
696
|
+
req->shard_file,
|
|
697
|
+
file_position,
|
|
698
|
+
encryption_ctx,
|
|
699
|
+
shard->pointer->token,
|
|
700
|
+
&status_code,
|
|
701
|
+
&read_code,
|
|
702
|
+
&req->progress_handle,
|
|
703
|
+
req->canceled);
|
|
704
|
+
|
|
705
|
+
if (read_code != 0) {
|
|
706
|
+
req->log->error(state->env->log_options, state->handle,
|
|
707
|
+
"Put shard read error: %i", read_code);
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
if (req_status) {
|
|
711
|
+
req->error_status = req_status;
|
|
712
|
+
req->log->error(state->env->log_options, state->handle,
|
|
713
|
+
"Put shard request error code: %i", req_status);
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
req->end = get_time_milliseconds();
|
|
717
|
+
|
|
718
|
+
req->status_code = status_code;
|
|
719
|
+
|
|
720
|
+
clean_variables:
|
|
721
|
+
if (encryption_ctx) {
|
|
722
|
+
free_encryption_ctx(encryption_ctx);
|
|
723
|
+
}
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
static void progress_put_shard(uv_async_t* async)
|
|
727
|
+
{
|
|
728
|
+
|
|
729
|
+
shard_upload_progress_t *progress = async->data;
|
|
730
|
+
|
|
731
|
+
storj_upload_state_t *state = progress->state;
|
|
732
|
+
|
|
733
|
+
state->shard[progress->pointer_index].uploaded_size = progress->bytes;
|
|
734
|
+
|
|
735
|
+
uint64_t uploaded_bytes = 0;
|
|
736
|
+
uint64_t total_bytes = 0;
|
|
737
|
+
|
|
738
|
+
for (int i = 0; i < state->total_shards; i++) {
|
|
739
|
+
|
|
740
|
+
shard_tracker_t *shard = &state->shard[i];
|
|
741
|
+
|
|
742
|
+
uploaded_bytes += shard->uploaded_size;
|
|
743
|
+
total_bytes += shard->meta->size;
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
double total_progress = (double)uploaded_bytes / (double)total_bytes;
|
|
747
|
+
|
|
748
|
+
if (state->progress_finished) {
|
|
749
|
+
return;
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
if (uploaded_bytes == total_bytes) {
|
|
753
|
+
state->progress_finished = true;
|
|
754
|
+
}
|
|
755
|
+
|
|
756
|
+
state->progress_cb(total_progress,
|
|
757
|
+
uploaded_bytes,
|
|
758
|
+
total_bytes,
|
|
759
|
+
state->handle);
|
|
760
|
+
|
|
761
|
+
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
static void queue_push_shard(storj_upload_state_t *state, int index)
|
|
765
|
+
{
|
|
766
|
+
uv_work_t *work = uv_work_new();
|
|
767
|
+
if (!work) {
|
|
768
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
769
|
+
return;
|
|
770
|
+
}
|
|
771
|
+
|
|
772
|
+
push_shard_request_t *req = malloc(sizeof(push_shard_request_t));
|
|
773
|
+
if (!req) {
|
|
774
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
775
|
+
return;
|
|
776
|
+
}
|
|
777
|
+
|
|
778
|
+
req->http_options = state->env->http_options;
|
|
779
|
+
req->options = state->env->bridge_options;
|
|
780
|
+
req->upload_state = state;
|
|
781
|
+
req->error_status = 0;
|
|
782
|
+
req->log = state->log;
|
|
783
|
+
|
|
784
|
+
// Reset shard index when using parity shards
|
|
785
|
+
req->shard_index = (index + 1 > state->total_data_shards) ? index - state->total_data_shards: index;
|
|
786
|
+
|
|
787
|
+
// make sure we switch between parity and data shards files.
|
|
788
|
+
// When using Reed solomon must also read from encrypted file
|
|
789
|
+
// rather than the original file for the data
|
|
790
|
+
if (index + 1 > state->total_data_shards) {
|
|
791
|
+
req->shard_file = state->parity_file;
|
|
792
|
+
} else if (state->rs) {
|
|
793
|
+
req->shard_file = state->encrypted_file;
|
|
794
|
+
} else {
|
|
795
|
+
req->shard_file = state->original_file;
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
// Position on shard_meta array
|
|
799
|
+
req->shard_meta_index = index;
|
|
800
|
+
|
|
801
|
+
req->status_code = 0;
|
|
802
|
+
|
|
803
|
+
req->canceled = &state->canceled;
|
|
804
|
+
|
|
805
|
+
// setup upload progress reporting
|
|
806
|
+
shard_upload_progress_t *progress =
|
|
807
|
+
malloc(sizeof(shard_upload_progress_t));
|
|
808
|
+
|
|
809
|
+
if (!progress) {
|
|
810
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
811
|
+
return;
|
|
812
|
+
}
|
|
813
|
+
|
|
814
|
+
progress->pointer_index = index;
|
|
815
|
+
progress->bytes = 0;
|
|
816
|
+
progress->state = state;
|
|
817
|
+
|
|
818
|
+
req->progress_handle.data = progress;
|
|
819
|
+
|
|
820
|
+
uv_async_init(state->env->loop, &req->progress_handle,
|
|
821
|
+
progress_put_shard);
|
|
822
|
+
|
|
823
|
+
work->data = req;
|
|
824
|
+
|
|
825
|
+
state->pending_work_count += 1;
|
|
826
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) work,
|
|
827
|
+
push_shard, after_push_shard);
|
|
828
|
+
|
|
829
|
+
if (status) {
|
|
830
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
831
|
+
return;
|
|
832
|
+
}
|
|
833
|
+
|
|
834
|
+
state->shard[index].progress = PUSHING_SHARD;
|
|
835
|
+
|
|
836
|
+
if (state->shard[index].report->farmer_id != NULL) {
|
|
837
|
+
free(state->shard[index].report);
|
|
838
|
+
state->shard[index].report = storj_exchange_report_new();
|
|
839
|
+
}
|
|
840
|
+
|
|
841
|
+
if (!state->shard[index].report) {
|
|
842
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
843
|
+
return;
|
|
844
|
+
}
|
|
845
|
+
|
|
846
|
+
// setup the exchange report
|
|
847
|
+
storj_exchange_report_t *report = state->shard[index].report;
|
|
848
|
+
report->data_hash = state->shard[index].meta->hash;
|
|
849
|
+
report->reporter_id = (char *)state->env->bridge_options->user;
|
|
850
|
+
report->farmer_id = state->shard[index].pointer->farmer_node_id;
|
|
851
|
+
report->client_id = (char *)state->env->bridge_options->user;
|
|
852
|
+
report->pointer_index = index;
|
|
853
|
+
report->start = 0;
|
|
854
|
+
report->end = 0;
|
|
855
|
+
report->code = 0;
|
|
856
|
+
report->message = NULL;
|
|
857
|
+
report->send_status = 0; // not sent
|
|
858
|
+
report->send_count = 0;
|
|
859
|
+
|
|
860
|
+
state->shard[index].work = work;
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
static void after_push_frame(uv_work_t *work, int status)
|
|
864
|
+
{
|
|
865
|
+
frame_request_t *req = work->data;
|
|
866
|
+
storj_upload_state_t *state = req->upload_state;
|
|
867
|
+
farmer_pointer_t *pointer = req->farmer_pointer;
|
|
868
|
+
|
|
869
|
+
state->pending_work_count -= 1;
|
|
870
|
+
|
|
871
|
+
if (status == UV_ECANCELED) {
|
|
872
|
+
state->shard[req->shard_meta_index].push_frame_request_count = 0;
|
|
873
|
+
state->shard[req->shard_meta_index].progress = AWAITING_PUSH_FRAME;
|
|
874
|
+
goto clean_variables;
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
// Increment request count every request for retry counts
|
|
878
|
+
state->shard[req->shard_meta_index].push_frame_request_count += 1;
|
|
879
|
+
|
|
880
|
+
if (req->status_code == 429 || req->status_code == 420) {
|
|
881
|
+
|
|
882
|
+
state->error_status = STORJ_BRIDGE_RATE_ERROR;
|
|
883
|
+
|
|
884
|
+
} else if ((req->status_code == 200 || req->status_code == 201) &&
|
|
885
|
+
pointer->token != NULL) {
|
|
886
|
+
// Check if we got a 200 status and token
|
|
887
|
+
|
|
888
|
+
// Reset for if we need to get a new pointer later
|
|
889
|
+
state->shard[req->shard_meta_index].push_frame_request_count = 0;
|
|
890
|
+
state->shard[req->shard_meta_index].progress = AWAITING_PUSH_SHARD;
|
|
891
|
+
|
|
892
|
+
farmer_pointer_t *p = state->shard[req->shard_meta_index].pointer;
|
|
893
|
+
|
|
894
|
+
// Add token to shard[].pointer
|
|
895
|
+
p->token = calloc(strlen(pointer->token) + 1, sizeof(char));
|
|
896
|
+
if (!p->token) {
|
|
897
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
898
|
+
goto clean_variables;
|
|
899
|
+
}
|
|
900
|
+
memcpy(p->token, pointer->token, strlen(pointer->token));
|
|
901
|
+
|
|
902
|
+
// Add farmer_user_agent to shard[].pointer
|
|
903
|
+
p->farmer_user_agent = calloc(strlen(pointer->farmer_user_agent) + 1,
|
|
904
|
+
sizeof(char));
|
|
905
|
+
if (!p->farmer_user_agent) {
|
|
906
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
907
|
+
goto clean_variables;
|
|
908
|
+
}
|
|
909
|
+
memcpy(p->farmer_user_agent, pointer->farmer_user_agent,
|
|
910
|
+
strlen(pointer->farmer_user_agent));
|
|
911
|
+
|
|
912
|
+
// Add farmer_address to shard[].pointer
|
|
913
|
+
p->farmer_address = calloc(strlen(pointer->farmer_address) + 1,
|
|
914
|
+
sizeof(char));
|
|
915
|
+
if (!p->farmer_address) {
|
|
916
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
917
|
+
goto clean_variables;
|
|
918
|
+
}
|
|
919
|
+
memcpy(p->farmer_address, pointer->farmer_address,
|
|
920
|
+
strlen(pointer->farmer_address));
|
|
921
|
+
|
|
922
|
+
// Add farmer_port to shard[].pointer
|
|
923
|
+
p->farmer_port = calloc(strlen(pointer->farmer_port) + 1, sizeof(char));
|
|
924
|
+
if (!p->farmer_port) {
|
|
925
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
926
|
+
goto clean_variables;
|
|
927
|
+
}
|
|
928
|
+
memcpy(p->farmer_port, pointer->farmer_port,
|
|
929
|
+
strlen(pointer->farmer_port));
|
|
930
|
+
|
|
931
|
+
// Add farmer_protocol to shard[].pointer
|
|
932
|
+
p->farmer_protocol = calloc(strlen(pointer->farmer_protocol) + 1,
|
|
933
|
+
sizeof(char));
|
|
934
|
+
if (!p->farmer_protocol) {
|
|
935
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
936
|
+
goto clean_variables;
|
|
937
|
+
}
|
|
938
|
+
memcpy(p->farmer_protocol, pointer->farmer_protocol,
|
|
939
|
+
strlen(pointer->farmer_protocol));
|
|
940
|
+
|
|
941
|
+
// Add farmer_node_id to shard[].pointer
|
|
942
|
+
p->farmer_node_id = calloc(strlen(pointer->farmer_node_id) + 1,
|
|
943
|
+
sizeof(char));
|
|
944
|
+
if (!p->farmer_node_id) {
|
|
945
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
946
|
+
goto clean_variables;
|
|
947
|
+
}
|
|
948
|
+
memcpy(p->farmer_node_id, pointer->farmer_node_id,
|
|
949
|
+
strlen(pointer->farmer_node_id));
|
|
950
|
+
|
|
951
|
+
state->log->info(
|
|
952
|
+
state->env->log_options,
|
|
953
|
+
state->handle,
|
|
954
|
+
"Contract negotiated with: "
|
|
955
|
+
"{ "
|
|
956
|
+
"\"userAgent: \"%s\", "
|
|
957
|
+
"\"protocol:\" \"%s\", "
|
|
958
|
+
"\"port\": \"%s\", "
|
|
959
|
+
"\"nodeID\": \"%s\""
|
|
960
|
+
"}",
|
|
961
|
+
p->farmer_user_agent,
|
|
962
|
+
p->farmer_protocol,
|
|
963
|
+
p->farmer_port,
|
|
964
|
+
p->farmer_node_id
|
|
965
|
+
);
|
|
966
|
+
|
|
967
|
+
} else if (state->shard[req->shard_meta_index].push_frame_request_count ==
|
|
968
|
+
STORJ_MAX_PUSH_FRAME_COUNT) {
|
|
969
|
+
state->error_status = STORJ_BRIDGE_OFFER_ERROR;
|
|
970
|
+
} else {
|
|
971
|
+
state->shard[req->shard_meta_index].progress = AWAITING_PUSH_FRAME;
|
|
972
|
+
}
|
|
973
|
+
|
|
974
|
+
clean_variables:
|
|
975
|
+
queue_next_work(state);
|
|
976
|
+
if (pointer) {
|
|
977
|
+
pointer_cleanup(pointer);
|
|
978
|
+
}
|
|
979
|
+
|
|
980
|
+
free(req);
|
|
981
|
+
free(work);
|
|
982
|
+
}
|
|
983
|
+
|
|
984
|
+
static void push_frame(uv_work_t *work)
|
|
985
|
+
{
|
|
986
|
+
frame_request_t *req = work->data;
|
|
987
|
+
storj_upload_state_t *state = req->upload_state;
|
|
988
|
+
shard_meta_t *shard_meta = state->shard[req->shard_meta_index].meta;
|
|
989
|
+
|
|
990
|
+
req->log->info(state->env->log_options, state->handle,
|
|
991
|
+
"Pushing frame for shard index %d... (retry: %d)",
|
|
992
|
+
req->shard_meta_index,
|
|
993
|
+
state->shard[req->shard_meta_index].push_frame_request_count);
|
|
994
|
+
|
|
995
|
+
char resource[strlen(state->frame_id) + 9];
|
|
996
|
+
memset(resource, '\0', strlen(state->frame_id) + 9);
|
|
997
|
+
strcpy(resource, "/frames/");
|
|
998
|
+
strcat(resource, state->frame_id);
|
|
999
|
+
|
|
1000
|
+
// Prepare the body
|
|
1001
|
+
struct json_object *body = json_object_new_object();
|
|
1002
|
+
|
|
1003
|
+
// Add shard hash
|
|
1004
|
+
json_object *shard_hash = json_object_new_string(shard_meta->hash);
|
|
1005
|
+
json_object_object_add(body, "hash", shard_hash);
|
|
1006
|
+
|
|
1007
|
+
// Add shard size
|
|
1008
|
+
json_object *shard_size = json_object_new_int64(shard_meta->size);
|
|
1009
|
+
json_object_object_add(body, "size", shard_size);
|
|
1010
|
+
|
|
1011
|
+
// Add shard index
|
|
1012
|
+
json_object *shard_index = json_object_new_int(req->shard_meta_index);
|
|
1013
|
+
json_object_object_add(body, "index", shard_index);
|
|
1014
|
+
|
|
1015
|
+
json_object *parity_shard = NULL;
|
|
1016
|
+
if (req->shard_meta_index + 1 > state->total_data_shards) {
|
|
1017
|
+
parity_shard = json_object_new_boolean(true);
|
|
1018
|
+
} else {
|
|
1019
|
+
parity_shard = json_object_new_boolean(false);
|
|
1020
|
+
}
|
|
1021
|
+
json_object_object_add(body, "parity", parity_shard);
|
|
1022
|
+
|
|
1023
|
+
// Add challenges
|
|
1024
|
+
json_object *challenges = json_object_new_array();
|
|
1025
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
1026
|
+
json_object_array_add(challenges,
|
|
1027
|
+
json_object_new_string(
|
|
1028
|
+
(char *)shard_meta->challenges_as_str[i]));
|
|
1029
|
+
}
|
|
1030
|
+
json_object_object_add(body, "challenges", challenges);
|
|
1031
|
+
|
|
1032
|
+
// Add Tree
|
|
1033
|
+
json_object *tree = json_object_new_array();
|
|
1034
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
1035
|
+
json_object_array_add(tree,
|
|
1036
|
+
json_object_new_string(
|
|
1037
|
+
(char *)shard_meta->tree[i]));
|
|
1038
|
+
}
|
|
1039
|
+
json_object_object_add(body, "tree", tree);
|
|
1040
|
+
|
|
1041
|
+
// Add exclude (Don't try to upload to farmers that have failed before)
|
|
1042
|
+
json_object *exclude = json_object_new_array();
|
|
1043
|
+
if (state->exclude) {
|
|
1044
|
+
char *exclude_list = calloc(strlen(state->exclude) + 1, sizeof(char));
|
|
1045
|
+
if (!exclude_list) {
|
|
1046
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1047
|
+
goto clean_variables;
|
|
1048
|
+
}
|
|
1049
|
+
strcpy(exclude_list, state->exclude);
|
|
1050
|
+
|
|
1051
|
+
char *node_id = strtok(exclude_list, ",");
|
|
1052
|
+
while (node_id != NULL) {
|
|
1053
|
+
json_object_array_add(exclude, json_object_new_string(node_id));
|
|
1054
|
+
node_id = strtok (NULL, ",");
|
|
1055
|
+
}
|
|
1056
|
+
free(exclude_list);
|
|
1057
|
+
}
|
|
1058
|
+
|
|
1059
|
+
json_object_object_add(body, "exclude", exclude);
|
|
1060
|
+
|
|
1061
|
+
req->log->debug(state->env->log_options, state->handle,
|
|
1062
|
+
"fn[push_frame] - JSON body: %s", json_object_to_json_string(body));
|
|
1063
|
+
|
|
1064
|
+
int status_code;
|
|
1065
|
+
struct json_object *response = NULL;
|
|
1066
|
+
int request_status = fetch_json(req->http_options,
|
|
1067
|
+
req->options,
|
|
1068
|
+
"PUT",
|
|
1069
|
+
resource,
|
|
1070
|
+
body,
|
|
1071
|
+
true,
|
|
1072
|
+
&response,
|
|
1073
|
+
&status_code);
|
|
1074
|
+
|
|
1075
|
+
req->log->debug(state->env->log_options, state->handle,
|
|
1076
|
+
"fn[push_frame] - JSON Response: %s",
|
|
1077
|
+
json_object_to_json_string(response));
|
|
1078
|
+
|
|
1079
|
+
if (request_status) {
|
|
1080
|
+
req->log->warn(state->env->log_options, state->handle,
|
|
1081
|
+
"Push frame error: %i", request_status);
|
|
1082
|
+
req->error_status = STORJ_BRIDGE_REQUEST_ERROR;
|
|
1083
|
+
goto clean_variables;
|
|
1084
|
+
}
|
|
1085
|
+
|
|
1086
|
+
struct json_object *obj_token;
|
|
1087
|
+
if (!json_object_object_get_ex(response, "token", &obj_token)) {
|
|
1088
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1089
|
+
goto clean_variables;
|
|
1090
|
+
}
|
|
1091
|
+
|
|
1092
|
+
struct json_object *obj_farmer;
|
|
1093
|
+
if (!json_object_object_get_ex(response, "farmer", &obj_farmer)) {
|
|
1094
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1095
|
+
goto clean_variables;
|
|
1096
|
+
}
|
|
1097
|
+
|
|
1098
|
+
struct json_object *obj_farmer_address;
|
|
1099
|
+
if (!json_object_object_get_ex(obj_farmer, "address",
|
|
1100
|
+
&obj_farmer_address)) {
|
|
1101
|
+
|
|
1102
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1103
|
+
goto clean_variables;
|
|
1104
|
+
}
|
|
1105
|
+
|
|
1106
|
+
struct json_object *obj_farmer_port;
|
|
1107
|
+
if (!json_object_object_get_ex(obj_farmer, "port", &obj_farmer_port)) {
|
|
1108
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1109
|
+
goto clean_variables;
|
|
1110
|
+
}
|
|
1111
|
+
|
|
1112
|
+
struct json_object *obj_farmer_user_agent;
|
|
1113
|
+
if (!json_object_object_get_ex(obj_farmer, "userAgent",
|
|
1114
|
+
&obj_farmer_user_agent)) {
|
|
1115
|
+
|
|
1116
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1117
|
+
goto clean_variables;
|
|
1118
|
+
}
|
|
1119
|
+
|
|
1120
|
+
struct json_object *obj_farmer_protocol;
|
|
1121
|
+
if (!json_object_object_get_ex(obj_farmer, "protocol",
|
|
1122
|
+
&obj_farmer_protocol)) {
|
|
1123
|
+
|
|
1124
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1125
|
+
goto clean_variables;
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
struct json_object *obj_farmer_node_id;
|
|
1129
|
+
if (!json_object_object_get_ex(obj_farmer, "nodeID",
|
|
1130
|
+
&obj_farmer_node_id)) {
|
|
1131
|
+
|
|
1132
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1133
|
+
goto clean_variables;
|
|
1134
|
+
}
|
|
1135
|
+
|
|
1136
|
+
if (!json_object_is_type(obj_token, json_type_string)) {
|
|
1137
|
+
|
|
1138
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1139
|
+
goto clean_variables;
|
|
1140
|
+
}
|
|
1141
|
+
|
|
1142
|
+
// Token
|
|
1143
|
+
char *token = (char *)json_object_get_string(obj_token);
|
|
1144
|
+
req->farmer_pointer->token = calloc(strlen(token) + 1, sizeof(char));
|
|
1145
|
+
if (!req->farmer_pointer->token) {
|
|
1146
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1147
|
+
goto clean_variables;
|
|
1148
|
+
}
|
|
1149
|
+
memcpy(req->farmer_pointer->token, token, strlen(token));
|
|
1150
|
+
|
|
1151
|
+
// Farmer user agent
|
|
1152
|
+
char *farmer_user_agent =
|
|
1153
|
+
(char *)json_object_get_string(obj_farmer_user_agent);
|
|
1154
|
+
req->farmer_pointer->farmer_user_agent =
|
|
1155
|
+
calloc(strlen(farmer_user_agent) + 1, sizeof(char));
|
|
1156
|
+
if (!req->farmer_pointer->farmer_user_agent) {
|
|
1157
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1158
|
+
goto clean_variables;
|
|
1159
|
+
}
|
|
1160
|
+
memcpy(req->farmer_pointer->farmer_user_agent,
|
|
1161
|
+
farmer_user_agent,
|
|
1162
|
+
strlen(farmer_user_agent));
|
|
1163
|
+
|
|
1164
|
+
// Farmer protocol
|
|
1165
|
+
char *farmer_protocol = (char *)json_object_get_string(obj_farmer_protocol);
|
|
1166
|
+
req->farmer_pointer->farmer_protocol =
|
|
1167
|
+
calloc(strlen(farmer_protocol) + 1, sizeof(char));
|
|
1168
|
+
if (!req->farmer_pointer->farmer_protocol) {
|
|
1169
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1170
|
+
goto clean_variables;
|
|
1171
|
+
}
|
|
1172
|
+
memcpy(req->farmer_pointer->farmer_protocol,
|
|
1173
|
+
farmer_protocol,
|
|
1174
|
+
strlen(farmer_protocol));
|
|
1175
|
+
|
|
1176
|
+
// Farmer address
|
|
1177
|
+
char *farmer_address = (char *)json_object_get_string(obj_farmer_address);
|
|
1178
|
+
req->farmer_pointer->farmer_address =
|
|
1179
|
+
calloc(strlen(farmer_address) + 1, sizeof(char));
|
|
1180
|
+
if (!req->farmer_pointer->farmer_address) {
|
|
1181
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1182
|
+
goto clean_variables;
|
|
1183
|
+
}
|
|
1184
|
+
memcpy(req->farmer_pointer->farmer_address,
|
|
1185
|
+
farmer_address,
|
|
1186
|
+
strlen(farmer_address));
|
|
1187
|
+
|
|
1188
|
+
// Farmer port
|
|
1189
|
+
char *farmer_port = (char *)json_object_get_string(obj_farmer_port);
|
|
1190
|
+
req->farmer_pointer->farmer_port = calloc(strlen(farmer_port) + 1, sizeof(char));
|
|
1191
|
+
if (!req->farmer_pointer->farmer_port) {
|
|
1192
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1193
|
+
goto clean_variables;
|
|
1194
|
+
}
|
|
1195
|
+
memcpy(req->farmer_pointer->farmer_port, farmer_port, strlen(farmer_port));
|
|
1196
|
+
|
|
1197
|
+
// Farmer node id
|
|
1198
|
+
char *farmer_node_id = (char *)json_object_get_string(obj_farmer_node_id);
|
|
1199
|
+
req->farmer_pointer->farmer_node_id =
|
|
1200
|
+
calloc(strlen(farmer_node_id) + 1, sizeof(char));
|
|
1201
|
+
if (!req->farmer_pointer->farmer_node_id) {
|
|
1202
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1203
|
+
goto clean_variables;
|
|
1204
|
+
}
|
|
1205
|
+
memcpy(req->farmer_pointer->farmer_node_id,
|
|
1206
|
+
farmer_node_id,
|
|
1207
|
+
strlen(farmer_node_id));
|
|
1208
|
+
|
|
1209
|
+
// Status code
|
|
1210
|
+
req->status_code = status_code;
|
|
1211
|
+
|
|
1212
|
+
clean_variables:
|
|
1213
|
+
if (response) {
|
|
1214
|
+
json_object_put(response);
|
|
1215
|
+
}
|
|
1216
|
+
if (body) {
|
|
1217
|
+
json_object_put(body);
|
|
1218
|
+
}
|
|
1219
|
+
}
|
|
1220
|
+
|
|
1221
|
+
static void queue_push_frame(storj_upload_state_t *state, int index)
|
|
1222
|
+
{
|
|
1223
|
+
if (state->shard[index].pointer->token != NULL) {
|
|
1224
|
+
pointer_cleanup(state->shard[index].pointer);
|
|
1225
|
+
state->shard[index].pointer = farmer_pointer_new();
|
|
1226
|
+
if (!state->shard[index].pointer) {
|
|
1227
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1228
|
+
return;
|
|
1229
|
+
}
|
|
1230
|
+
}
|
|
1231
|
+
|
|
1232
|
+
uv_work_t *shard_work = frame_work_new(&index, state);
|
|
1233
|
+
if (!shard_work) {
|
|
1234
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1235
|
+
return;
|
|
1236
|
+
}
|
|
1237
|
+
|
|
1238
|
+
state->pending_work_count += 1;
|
|
1239
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) shard_work,
|
|
1240
|
+
push_frame, after_push_frame);
|
|
1241
|
+
if (status) {
|
|
1242
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
1243
|
+
return;
|
|
1244
|
+
}
|
|
1245
|
+
|
|
1246
|
+
state->shard[index].progress = PUSHING_FRAME;
|
|
1247
|
+
}
|
|
1248
|
+
|
|
1249
|
+
static void after_prepare_frame(uv_work_t *work, int status)
|
|
1250
|
+
{
|
|
1251
|
+
frame_builder_t *req = work->data;
|
|
1252
|
+
shard_meta_t *shard_meta = req->shard_meta;
|
|
1253
|
+
storj_upload_state_t *state = req->upload_state;
|
|
1254
|
+
|
|
1255
|
+
state->pending_work_count -= 1;
|
|
1256
|
+
|
|
1257
|
+
if (status == UV_ECANCELED) {
|
|
1258
|
+
state->shard[shard_meta->index].progress = AWAITING_PREPARE_FRAME;
|
|
1259
|
+
goto clean_variables;
|
|
1260
|
+
}
|
|
1261
|
+
|
|
1262
|
+
if (req->error_status) {
|
|
1263
|
+
state->error_status = req->error_status;
|
|
1264
|
+
goto clean_variables;
|
|
1265
|
+
}
|
|
1266
|
+
|
|
1267
|
+
/* set the shard_meta to a struct array in the state for later use. */
|
|
1268
|
+
|
|
1269
|
+
// Add Hash
|
|
1270
|
+
state->shard[req->shard_meta_index].meta->hash =
|
|
1271
|
+
calloc(RIPEMD160_DIGEST_SIZE * 2 + 1, sizeof(char));
|
|
1272
|
+
|
|
1273
|
+
if (!state->shard[req->shard_meta_index].meta->hash) {
|
|
1274
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1275
|
+
goto clean_variables;
|
|
1276
|
+
}
|
|
1277
|
+
|
|
1278
|
+
memcpy(state->shard[req->shard_meta_index].meta->hash,
|
|
1279
|
+
shard_meta->hash,
|
|
1280
|
+
RIPEMD160_DIGEST_SIZE * 2);
|
|
1281
|
+
|
|
1282
|
+
req->log->info(state->env->log_options, state->handle,
|
|
1283
|
+
"Shard (%d) hash: %s", req->shard_meta_index,
|
|
1284
|
+
state->shard[req->shard_meta_index].meta->hash);
|
|
1285
|
+
|
|
1286
|
+
// Add challenges_as_str
|
|
1287
|
+
state->log->debug(state->env->log_options, state->handle,
|
|
1288
|
+
"Challenges for shard index %d",
|
|
1289
|
+
req->shard_meta_index);
|
|
1290
|
+
|
|
1291
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
1292
|
+
memcpy(state->shard[req->shard_meta_index].meta->challenges_as_str[i],
|
|
1293
|
+
shard_meta->challenges_as_str[i],
|
|
1294
|
+
64);
|
|
1295
|
+
|
|
1296
|
+
state->log->debug(state->env->log_options, state->handle,
|
|
1297
|
+
"Shard %d Challenge [%d]: %s",
|
|
1298
|
+
req->shard_meta_index,
|
|
1299
|
+
i,
|
|
1300
|
+
state->shard[req->shard_meta_index].meta->challenges_as_str[i]);
|
|
1301
|
+
}
|
|
1302
|
+
|
|
1303
|
+
// Add Merkle Tree leaves.
|
|
1304
|
+
state->log->debug(state->env->log_options, state->handle,
|
|
1305
|
+
"Tree for shard index %d",
|
|
1306
|
+
req->shard_meta_index);
|
|
1307
|
+
|
|
1308
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
1309
|
+
memcpy(state->shard[req->shard_meta_index].meta->tree[i],
|
|
1310
|
+
shard_meta->tree[i],
|
|
1311
|
+
40);
|
|
1312
|
+
|
|
1313
|
+
state->log->debug(state->env->log_options, state->handle,
|
|
1314
|
+
"Shard %d Leaf [%d]: %s", req->shard_meta_index, i,
|
|
1315
|
+
state->shard[req->shard_meta_index].meta->tree[i]);
|
|
1316
|
+
}
|
|
1317
|
+
|
|
1318
|
+
// Add index
|
|
1319
|
+
state->shard[req->shard_meta_index].meta->index = shard_meta->index;
|
|
1320
|
+
|
|
1321
|
+
// Add size
|
|
1322
|
+
state->shard[req->shard_meta_index].meta->size = shard_meta->size;
|
|
1323
|
+
|
|
1324
|
+
state->log->info(state->env->log_options, state->handle,
|
|
1325
|
+
"Successfully created frame for shard index %d",
|
|
1326
|
+
req->shard_meta_index);
|
|
1327
|
+
|
|
1328
|
+
state->shard[req->shard_meta_index].progress = AWAITING_PUSH_FRAME;
|
|
1329
|
+
|
|
1330
|
+
clean_variables:
|
|
1331
|
+
queue_next_work(state);
|
|
1332
|
+
if (shard_meta) {
|
|
1333
|
+
shard_meta_cleanup(shard_meta);
|
|
1334
|
+
}
|
|
1335
|
+
|
|
1336
|
+
free(req);
|
|
1337
|
+
free(work);
|
|
1338
|
+
}
|
|
1339
|
+
|
|
1340
|
+
static void prepare_frame(uv_work_t *work)
|
|
1341
|
+
{
|
|
1342
|
+
frame_builder_t *req = work->data;
|
|
1343
|
+
shard_meta_t *shard_meta = req->shard_meta;
|
|
1344
|
+
storj_upload_state_t *state = req->upload_state;
|
|
1345
|
+
|
|
1346
|
+
// Set the challenges
|
|
1347
|
+
uint8_t buff[32];
|
|
1348
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
1349
|
+
memset_zero(buff, 32);
|
|
1350
|
+
|
|
1351
|
+
random_buffer(buff, 32);
|
|
1352
|
+
memcpy(shard_meta->challenges[i], buff, 32);
|
|
1353
|
+
|
|
1354
|
+
// Convert the uint8_t challenges to character arrays
|
|
1355
|
+
char *challenge_as_str = hex2str(32, buff);
|
|
1356
|
+
if (!challenge_as_str) {
|
|
1357
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1358
|
+
goto clean_variables;
|
|
1359
|
+
}
|
|
1360
|
+
memcpy(shard_meta->challenges_as_str[i], challenge_as_str, strlen(challenge_as_str));
|
|
1361
|
+
free(challenge_as_str);
|
|
1362
|
+
}
|
|
1363
|
+
|
|
1364
|
+
// Hash of the shard_data
|
|
1365
|
+
shard_meta->hash = calloc(RIPEMD160_DIGEST_SIZE*2 + 2, sizeof(char));
|
|
1366
|
+
if (!shard_meta->hash) {
|
|
1367
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1368
|
+
goto clean_variables;
|
|
1369
|
+
}
|
|
1370
|
+
|
|
1371
|
+
req->log->info(state->env->log_options, state->handle,
|
|
1372
|
+
"Creating frame for shard index %d",
|
|
1373
|
+
req->shard_meta_index);
|
|
1374
|
+
|
|
1375
|
+
// Sha256 of encrypted data for calculating shard has
|
|
1376
|
+
uint8_t prehash_sha256[SHA256_DIGEST_SIZE];
|
|
1377
|
+
|
|
1378
|
+
// Initialize context for sha256 of encrypted data
|
|
1379
|
+
struct sha256_ctx shard_hash_ctx;
|
|
1380
|
+
sha256_init(&shard_hash_ctx);
|
|
1381
|
+
|
|
1382
|
+
// Calculate the merkle tree with challenges
|
|
1383
|
+
struct sha256_ctx first_sha256_for_leaf[STORJ_SHARD_CHALLENGES];
|
|
1384
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
1385
|
+
sha256_init(&first_sha256_for_leaf[i]);
|
|
1386
|
+
sha256_update(&first_sha256_for_leaf[i], 32, (uint8_t *)&shard_meta->challenges[i]);
|
|
1387
|
+
}
|
|
1388
|
+
|
|
1389
|
+
storj_encryption_ctx_t *encryption_ctx = NULL;
|
|
1390
|
+
if (!state->rs) {
|
|
1391
|
+
// Initialize the encryption context
|
|
1392
|
+
encryption_ctx = prepare_encryption_ctx(state->encryption_ctr, state->encryption_key);
|
|
1393
|
+
if (!encryption_ctx) {
|
|
1394
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1395
|
+
goto clean_variables;
|
|
1396
|
+
}
|
|
1397
|
+
// Increment the iv to proper placement because we may be reading from the middle of the file
|
|
1398
|
+
increment_ctr_aes_iv(encryption_ctx->encryption_ctr, req->shard_meta_index * state->shard_size);
|
|
1399
|
+
}
|
|
1400
|
+
|
|
1401
|
+
uint8_t cphr_txt[AES_BLOCK_SIZE * 256];
|
|
1402
|
+
memset_zero(cphr_txt, AES_BLOCK_SIZE * 256);
|
|
1403
|
+
char read_data[AES_BLOCK_SIZE * 256];
|
|
1404
|
+
memset_zero(read_data, AES_BLOCK_SIZE * 256);
|
|
1405
|
+
unsigned long int read_bytes = 0;
|
|
1406
|
+
uint64_t total_read = 0;
|
|
1407
|
+
|
|
1408
|
+
do {
|
|
1409
|
+
if (state->canceled) {
|
|
1410
|
+
goto clean_variables;
|
|
1411
|
+
}
|
|
1412
|
+
|
|
1413
|
+
read_bytes = pread(fileno(req->shard_file),
|
|
1414
|
+
read_data, AES_BLOCK_SIZE * 256,
|
|
1415
|
+
shard_meta->index*state->shard_size + total_read);
|
|
1416
|
+
|
|
1417
|
+
if (read_bytes == -1) {
|
|
1418
|
+
req->log->warn(state->env->log_options, state->handle,
|
|
1419
|
+
"Error reading file: %d",
|
|
1420
|
+
errno);
|
|
1421
|
+
req->error_status = STORJ_FILE_READ_ERROR;
|
|
1422
|
+
goto clean_variables;
|
|
1423
|
+
}
|
|
1424
|
+
|
|
1425
|
+
total_read += read_bytes;
|
|
1426
|
+
|
|
1427
|
+
if (!state->rs) {
|
|
1428
|
+
// Encrypt data
|
|
1429
|
+
ctr_crypt(encryption_ctx->ctx, (nettle_cipher_func *)aes256_encrypt,
|
|
1430
|
+
AES_BLOCK_SIZE, encryption_ctx->encryption_ctr, read_bytes,
|
|
1431
|
+
(uint8_t *)cphr_txt, (uint8_t *)read_data);
|
|
1432
|
+
} else {
|
|
1433
|
+
// Just use the already encrypted data
|
|
1434
|
+
memcpy(cphr_txt, read_data, AES_BLOCK_SIZE*256);
|
|
1435
|
+
}
|
|
1436
|
+
|
|
1437
|
+
sha256_update(&shard_hash_ctx, read_bytes, cphr_txt);
|
|
1438
|
+
|
|
1439
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
1440
|
+
sha256_update(&first_sha256_for_leaf[i], read_bytes, cphr_txt);
|
|
1441
|
+
}
|
|
1442
|
+
|
|
1443
|
+
memset_zero(read_data, AES_BLOCK_SIZE * 256);
|
|
1444
|
+
memset_zero(cphr_txt, AES_BLOCK_SIZE * 256);
|
|
1445
|
+
|
|
1446
|
+
} while(total_read < state->shard_size && read_bytes > 0);
|
|
1447
|
+
|
|
1448
|
+
shard_meta->size = total_read;
|
|
1449
|
+
|
|
1450
|
+
sha256_digest(&shard_hash_ctx, SHA256_DIGEST_SIZE, prehash_sha256);
|
|
1451
|
+
|
|
1452
|
+
uint8_t prehash_ripemd160[RIPEMD160_DIGEST_SIZE];
|
|
1453
|
+
memset_zero(prehash_ripemd160, RIPEMD160_DIGEST_SIZE);
|
|
1454
|
+
ripemd160_of_str(prehash_sha256, SHA256_DIGEST_SIZE, prehash_ripemd160);
|
|
1455
|
+
|
|
1456
|
+
// Shard Hash
|
|
1457
|
+
char *hash = hex2str(RIPEMD160_DIGEST_SIZE, prehash_ripemd160);
|
|
1458
|
+
if (!hash) {
|
|
1459
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1460
|
+
goto clean_variables;
|
|
1461
|
+
}
|
|
1462
|
+
memcpy(shard_meta->hash, hash, strlen(hash));
|
|
1463
|
+
free(hash);
|
|
1464
|
+
|
|
1465
|
+
uint8_t preleaf_sha256[SHA256_DIGEST_SIZE];
|
|
1466
|
+
memset_zero(preleaf_sha256, SHA256_DIGEST_SIZE);
|
|
1467
|
+
uint8_t preleaf_ripemd160[RIPEMD160_DIGEST_SIZE];
|
|
1468
|
+
memset_zero(preleaf_ripemd160, RIPEMD160_DIGEST_SIZE);
|
|
1469
|
+
char leaf[RIPEMD160_DIGEST_SIZE*2 +1];
|
|
1470
|
+
memset(leaf, '\0', RIPEMD160_DIGEST_SIZE*2 +1);
|
|
1471
|
+
for (int i = 0; i < STORJ_SHARD_CHALLENGES; i++ ) {
|
|
1472
|
+
// finish first sha256 for leaf
|
|
1473
|
+
sha256_digest(&first_sha256_for_leaf[i], SHA256_DIGEST_SIZE, preleaf_sha256);
|
|
1474
|
+
|
|
1475
|
+
// ripemd160 result of sha256
|
|
1476
|
+
ripemd160_of_str(preleaf_sha256, SHA256_DIGEST_SIZE, preleaf_ripemd160);
|
|
1477
|
+
|
|
1478
|
+
// sha256 and ripemd160 again
|
|
1479
|
+
ripemd160sha256_as_string(preleaf_ripemd160, RIPEMD160_DIGEST_SIZE, leaf);
|
|
1480
|
+
|
|
1481
|
+
memcpy(shard_meta->tree[i], leaf, RIPEMD160_DIGEST_SIZE*2 + 1);
|
|
1482
|
+
}
|
|
1483
|
+
|
|
1484
|
+
clean_variables:
|
|
1485
|
+
if (encryption_ctx) {
|
|
1486
|
+
free_encryption_ctx(encryption_ctx);
|
|
1487
|
+
}
|
|
1488
|
+
}
|
|
1489
|
+
|
|
1490
|
+
static void queue_prepare_frame(storj_upload_state_t *state, int index)
|
|
1491
|
+
{
|
|
1492
|
+
uv_work_t *shard_work = shard_meta_work_new(index, state);
|
|
1493
|
+
if (!shard_work) {
|
|
1494
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1495
|
+
return;
|
|
1496
|
+
}
|
|
1497
|
+
|
|
1498
|
+
state->pending_work_count += 1;
|
|
1499
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) shard_work,
|
|
1500
|
+
prepare_frame, after_prepare_frame);
|
|
1501
|
+
|
|
1502
|
+
if (status) {
|
|
1503
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
1504
|
+
return;
|
|
1505
|
+
}
|
|
1506
|
+
|
|
1507
|
+
state->shard[index].progress = PREPARING_FRAME;
|
|
1508
|
+
}
|
|
1509
|
+
|
|
1510
|
+
static void after_create_encrypted_file(uv_work_t *work, int status)
|
|
1511
|
+
{
|
|
1512
|
+
encrypt_file_req_t *req = work->data;
|
|
1513
|
+
storj_upload_state_t *state = req->upload_state;
|
|
1514
|
+
|
|
1515
|
+
state->pending_work_count -= 1;
|
|
1516
|
+
state->create_encrypted_file_count += 1;
|
|
1517
|
+
|
|
1518
|
+
uint64_t encrypted_file_size = 0;
|
|
1519
|
+
#ifdef _WIN32
|
|
1520
|
+
struct _stati64 st;
|
|
1521
|
+
_stati64(state->encrypted_file_path, &st);
|
|
1522
|
+
encrypted_file_size = st.st_size;
|
|
1523
|
+
#else
|
|
1524
|
+
struct stat st;
|
|
1525
|
+
stat(state->encrypted_file_path, &st);
|
|
1526
|
+
encrypted_file_size = st.st_size;
|
|
1527
|
+
#endif
|
|
1528
|
+
|
|
1529
|
+
if (req->error_status != 0 || state->file_size != encrypted_file_size) {
|
|
1530
|
+
state->log->warn(state->env->log_options, state->handle,
|
|
1531
|
+
"Failed to encrypt data.");
|
|
1532
|
+
|
|
1533
|
+
if (state->create_encrypted_file_count == 6) {
|
|
1534
|
+
state->error_status = STORJ_FILE_ENCRYPTION_ERROR;
|
|
1535
|
+
}
|
|
1536
|
+
} else {
|
|
1537
|
+
state->log->info(state->env->log_options, state->handle,
|
|
1538
|
+
"Successfully encrypted file");
|
|
1539
|
+
|
|
1540
|
+
state->encrypted_file = fopen(state->encrypted_file_path, "r");
|
|
1541
|
+
if (!state->encrypted_file) {
|
|
1542
|
+
state->error_status = STORJ_FILE_READ_ERROR;
|
|
1543
|
+
}
|
|
1544
|
+
}
|
|
1545
|
+
|
|
1546
|
+
state->creating_encrypted_file = false;
|
|
1547
|
+
|
|
1548
|
+
clean_variables:
|
|
1549
|
+
queue_next_work(state);
|
|
1550
|
+
free(work->data);
|
|
1551
|
+
free(work);
|
|
1552
|
+
}
|
|
1553
|
+
|
|
1554
|
+
static void create_encrypted_file(uv_work_t *work)
|
|
1555
|
+
{
|
|
1556
|
+
encrypt_file_req_t *req = work->data;
|
|
1557
|
+
storj_upload_state_t *state = req->upload_state;
|
|
1558
|
+
|
|
1559
|
+
state->log->info(state->env->log_options, state->handle, "Encrypting file...");
|
|
1560
|
+
|
|
1561
|
+
// Initialize the encryption context
|
|
1562
|
+
storj_encryption_ctx_t *encryption_ctx = prepare_encryption_ctx(state->encryption_ctr,
|
|
1563
|
+
state->encryption_key);
|
|
1564
|
+
if (!encryption_ctx) {
|
|
1565
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1566
|
+
goto clean_variables;
|
|
1567
|
+
}
|
|
1568
|
+
|
|
1569
|
+
uint8_t cphr_txt[AES_BLOCK_SIZE * 256];
|
|
1570
|
+
memset_zero(cphr_txt, AES_BLOCK_SIZE * 256);
|
|
1571
|
+
char read_data[AES_BLOCK_SIZE * 256];
|
|
1572
|
+
memset_zero(read_data, AES_BLOCK_SIZE * 256);
|
|
1573
|
+
unsigned long int read_bytes = 0;
|
|
1574
|
+
unsigned long int written_bytes = 0;
|
|
1575
|
+
uint64_t total_read = 0;
|
|
1576
|
+
|
|
1577
|
+
FILE *encrypted_file = fopen(state->encrypted_file_path, "w+");
|
|
1578
|
+
|
|
1579
|
+
if (encrypted_file == NULL) {
|
|
1580
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1581
|
+
"Can't create file for encrypted data [%s]",
|
|
1582
|
+
state->encrypted_file_path);
|
|
1583
|
+
goto clean_variables;
|
|
1584
|
+
}
|
|
1585
|
+
|
|
1586
|
+
do {
|
|
1587
|
+
if (state->canceled) {
|
|
1588
|
+
goto clean_variables;
|
|
1589
|
+
}
|
|
1590
|
+
|
|
1591
|
+
read_bytes = pread(fileno(state->original_file),
|
|
1592
|
+
read_data, AES_BLOCK_SIZE * 256,
|
|
1593
|
+
total_read);
|
|
1594
|
+
|
|
1595
|
+
if (read_bytes == -1) {
|
|
1596
|
+
state->log->warn(state->env->log_options, state->handle,
|
|
1597
|
+
"Error reading file: %d",
|
|
1598
|
+
errno);
|
|
1599
|
+
req->error_status = STORJ_FILE_READ_ERROR;
|
|
1600
|
+
goto clean_variables;
|
|
1601
|
+
}
|
|
1602
|
+
|
|
1603
|
+
// Encrypt data
|
|
1604
|
+
ctr_crypt(encryption_ctx->ctx, (nettle_cipher_func *)aes256_encrypt,
|
|
1605
|
+
AES_BLOCK_SIZE, encryption_ctx->encryption_ctr, read_bytes,
|
|
1606
|
+
(uint8_t *)cphr_txt, (uint8_t *)read_data);
|
|
1607
|
+
|
|
1608
|
+
written_bytes = pwrite(fileno(encrypted_file), cphr_txt, read_bytes, total_read);
|
|
1609
|
+
|
|
1610
|
+
memset_zero(read_data, AES_BLOCK_SIZE * 256);
|
|
1611
|
+
memset_zero(cphr_txt, AES_BLOCK_SIZE * 256);
|
|
1612
|
+
|
|
1613
|
+
total_read += read_bytes;
|
|
1614
|
+
|
|
1615
|
+
if (written_bytes != read_bytes) {
|
|
1616
|
+
goto clean_variables;
|
|
1617
|
+
}
|
|
1618
|
+
|
|
1619
|
+
} while(total_read < state->file_size && read_bytes > 0);
|
|
1620
|
+
|
|
1621
|
+
clean_variables:
|
|
1622
|
+
if (encrypted_file) {
|
|
1623
|
+
fclose(encrypted_file);
|
|
1624
|
+
}
|
|
1625
|
+
if (encryption_ctx) {
|
|
1626
|
+
free_encryption_ctx(encryption_ctx);
|
|
1627
|
+
}
|
|
1628
|
+
}
|
|
1629
|
+
|
|
1630
|
+
static void queue_create_encrypted_file(storj_upload_state_t *state)
|
|
1631
|
+
{
|
|
1632
|
+
uv_work_t *work = uv_work_new();
|
|
1633
|
+
if (!work) {
|
|
1634
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1635
|
+
return;
|
|
1636
|
+
}
|
|
1637
|
+
|
|
1638
|
+
state->pending_work_count += 1;
|
|
1639
|
+
|
|
1640
|
+
encrypt_file_req_t *req = malloc(sizeof(encrypt_file_req_t));
|
|
1641
|
+
|
|
1642
|
+
req->error_status = 0;
|
|
1643
|
+
req->upload_state = state;
|
|
1644
|
+
work->data = req;
|
|
1645
|
+
|
|
1646
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) work,
|
|
1647
|
+
create_encrypted_file, after_create_encrypted_file);
|
|
1648
|
+
|
|
1649
|
+
if (status) {
|
|
1650
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
1651
|
+
}
|
|
1652
|
+
|
|
1653
|
+
state->creating_encrypted_file = true;
|
|
1654
|
+
}
|
|
1655
|
+
|
|
1656
|
+
static void after_request_frame_id(uv_work_t *work, int status)
|
|
1657
|
+
{
|
|
1658
|
+
frame_request_t *req = work->data;
|
|
1659
|
+
storj_upload_state_t *state = req->upload_state;
|
|
1660
|
+
|
|
1661
|
+
state->requesting_frame = false;
|
|
1662
|
+
state->pending_work_count -= 1;
|
|
1663
|
+
|
|
1664
|
+
if (status == UV_ECANCELED) {
|
|
1665
|
+
state->frame_request_count = 0;
|
|
1666
|
+
goto clean_variables;
|
|
1667
|
+
}
|
|
1668
|
+
|
|
1669
|
+
state->frame_request_count += 1;
|
|
1670
|
+
|
|
1671
|
+
if (req->status_code == 429 || req->status_code == 420) {
|
|
1672
|
+
|
|
1673
|
+
state->error_status = STORJ_BRIDGE_RATE_ERROR;
|
|
1674
|
+
|
|
1675
|
+
} else if (req->error_status == 0 && req->status_code == 200 && req->frame_id) {
|
|
1676
|
+
|
|
1677
|
+
state->log->info(state->env->log_options, state->handle,
|
|
1678
|
+
"Successfully retrieved frame id: %s", req->frame_id);
|
|
1679
|
+
|
|
1680
|
+
state->frame_id = req->frame_id;
|
|
1681
|
+
|
|
1682
|
+
} else if (state->frame_request_count == 6) {
|
|
1683
|
+
state->error_status = STORJ_BRIDGE_FRAME_ERROR;
|
|
1684
|
+
}
|
|
1685
|
+
|
|
1686
|
+
clean_variables:
|
|
1687
|
+
queue_next_work(state);
|
|
1688
|
+
free(req);
|
|
1689
|
+
free(work);
|
|
1690
|
+
}
|
|
1691
|
+
|
|
1692
|
+
static void request_frame_id(uv_work_t *work)
|
|
1693
|
+
{
|
|
1694
|
+
frame_request_t *req = work->data;
|
|
1695
|
+
storj_upload_state_t *state = req->upload_state;
|
|
1696
|
+
|
|
1697
|
+
req->log->info(state->env->log_options,
|
|
1698
|
+
state->handle,
|
|
1699
|
+
"[%s] Requesting file staging frame... (retry: %d)",
|
|
1700
|
+
state->file_name,
|
|
1701
|
+
state->frame_request_count);
|
|
1702
|
+
|
|
1703
|
+
// Prepare the body
|
|
1704
|
+
struct json_object *body = json_object_new_object();
|
|
1705
|
+
|
|
1706
|
+
int status_code;
|
|
1707
|
+
struct json_object *response = NULL;
|
|
1708
|
+
int request_status = fetch_json(req->http_options,
|
|
1709
|
+
req->options,
|
|
1710
|
+
"POST",
|
|
1711
|
+
"/frames",
|
|
1712
|
+
body,
|
|
1713
|
+
true,
|
|
1714
|
+
&response,
|
|
1715
|
+
&status_code);
|
|
1716
|
+
|
|
1717
|
+
|
|
1718
|
+
if (request_status) {
|
|
1719
|
+
req->log->warn(state->env->log_options, state->handle,
|
|
1720
|
+
"Request frame id error: %i", request_status);
|
|
1721
|
+
}
|
|
1722
|
+
|
|
1723
|
+
req->log->debug(state->env->log_options,
|
|
1724
|
+
state->handle,
|
|
1725
|
+
"fn[request_frame_id] - JSON Response: %s",
|
|
1726
|
+
json_object_to_json_string(response));
|
|
1727
|
+
|
|
1728
|
+
struct json_object *frame_id;
|
|
1729
|
+
if (!json_object_object_get_ex(response, "id", &frame_id)) {
|
|
1730
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1731
|
+
goto cleanup;
|
|
1732
|
+
}
|
|
1733
|
+
|
|
1734
|
+
if (!json_object_is_type(frame_id, json_type_string)) {
|
|
1735
|
+
req->error_status = STORJ_BRIDGE_JSON_ERROR;
|
|
1736
|
+
goto cleanup;
|
|
1737
|
+
}
|
|
1738
|
+
|
|
1739
|
+
char *frame_id_str = (char *)json_object_get_string(frame_id);
|
|
1740
|
+
req->frame_id = calloc(strlen(frame_id_str) + 1, sizeof(char));
|
|
1741
|
+
if (!req->frame_id) {
|
|
1742
|
+
req->error_status = STORJ_MEMORY_ERROR;
|
|
1743
|
+
goto cleanup;
|
|
1744
|
+
}
|
|
1745
|
+
|
|
1746
|
+
strcpy(req->frame_id, frame_id_str);
|
|
1747
|
+
|
|
1748
|
+
cleanup:
|
|
1749
|
+
req->status_code = status_code;
|
|
1750
|
+
|
|
1751
|
+
json_object_put(response);
|
|
1752
|
+
json_object_put(body);
|
|
1753
|
+
}
|
|
1754
|
+
|
|
1755
|
+
static void queue_request_frame_id(storj_upload_state_t *state)
|
|
1756
|
+
{
|
|
1757
|
+
uv_work_t *work = frame_work_new(NULL, state);
|
|
1758
|
+
if (!work) {
|
|
1759
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1760
|
+
return;
|
|
1761
|
+
}
|
|
1762
|
+
|
|
1763
|
+
state->pending_work_count += 1;
|
|
1764
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) work,
|
|
1765
|
+
request_frame_id, after_request_frame_id);
|
|
1766
|
+
|
|
1767
|
+
if (status) {
|
|
1768
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
1769
|
+
}
|
|
1770
|
+
|
|
1771
|
+
state->requesting_frame = true;
|
|
1772
|
+
}
|
|
1773
|
+
|
|
1774
|
+
static void after_create_parity_shards(uv_work_t *work, int status)
|
|
1775
|
+
{
|
|
1776
|
+
parity_shard_req_t *req = work->data;
|
|
1777
|
+
storj_upload_state_t *state = req->upload_state;
|
|
1778
|
+
|
|
1779
|
+
state->pending_work_count -= 1;
|
|
1780
|
+
|
|
1781
|
+
// TODO: Check if file was created
|
|
1782
|
+
if (req->error_status != 0) {
|
|
1783
|
+
state->log->warn(state->env->log_options, state->handle,
|
|
1784
|
+
"Failed to create parity shards");
|
|
1785
|
+
|
|
1786
|
+
state->awaiting_parity_shards = true;
|
|
1787
|
+
|
|
1788
|
+
state->error_status = STORJ_FILE_PARITY_ERROR;
|
|
1789
|
+
} else {
|
|
1790
|
+
state->log->info(state->env->log_options, state->handle,
|
|
1791
|
+
"Successfully created parity shards");
|
|
1792
|
+
|
|
1793
|
+
state->parity_file = fopen(state->parity_file_path, "r");
|
|
1794
|
+
|
|
1795
|
+
if (!state->parity_file) {
|
|
1796
|
+
state->error_status = STORJ_FILE_READ_ERROR;
|
|
1797
|
+
}
|
|
1798
|
+
|
|
1799
|
+
}
|
|
1800
|
+
|
|
1801
|
+
clean_variables:
|
|
1802
|
+
queue_next_work(state);
|
|
1803
|
+
free(work->data);
|
|
1804
|
+
free(work);
|
|
1805
|
+
}
|
|
1806
|
+
|
|
1807
|
+
static void create_parity_shards(uv_work_t *work)
|
|
1808
|
+
{
|
|
1809
|
+
parity_shard_req_t *req = work->data;
|
|
1810
|
+
storj_upload_state_t *state = req->upload_state;
|
|
1811
|
+
|
|
1812
|
+
state->log->info(state->env->log_options, state->handle,
|
|
1813
|
+
"Creating parity shards");
|
|
1814
|
+
|
|
1815
|
+
// ???
|
|
1816
|
+
fec_init();
|
|
1817
|
+
|
|
1818
|
+
uint8_t **data_blocks = NULL;
|
|
1819
|
+
uint8_t **fec_blocks = NULL;
|
|
1820
|
+
|
|
1821
|
+
uint8_t *map = NULL;
|
|
1822
|
+
int status = 0;
|
|
1823
|
+
|
|
1824
|
+
FILE *encrypted_file = fopen(state->encrypted_file_path, "r");
|
|
1825
|
+
|
|
1826
|
+
if (!encrypted_file) {
|
|
1827
|
+
req->error_status = 1;
|
|
1828
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1829
|
+
"Unable to open encrypted file");
|
|
1830
|
+
goto clean_variables;
|
|
1831
|
+
}
|
|
1832
|
+
|
|
1833
|
+
status = map_file(fileno(encrypted_file), state->file_size, &map, true);
|
|
1834
|
+
|
|
1835
|
+
if (status) {
|
|
1836
|
+
req->error_status = 1;
|
|
1837
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1838
|
+
"Could not create mmap original file: %d", status);
|
|
1839
|
+
goto clean_variables;
|
|
1840
|
+
}
|
|
1841
|
+
|
|
1842
|
+
uint64_t parity_size = state->total_shards * state->shard_size - state->file_size;
|
|
1843
|
+
|
|
1844
|
+
// determine parity shard location
|
|
1845
|
+
char *tmp_folder = NULL;
|
|
1846
|
+
if (!state->parity_file_path) {
|
|
1847
|
+
req->error_status = 1;
|
|
1848
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1849
|
+
"No temp folder set for parity shards");
|
|
1850
|
+
goto clean_variables;
|
|
1851
|
+
}
|
|
1852
|
+
|
|
1853
|
+
FILE *parity_file = fopen(state->parity_file_path, "w+");
|
|
1854
|
+
if (!parity_file) {
|
|
1855
|
+
req->error_status = 1;
|
|
1856
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1857
|
+
"Could not open parity file [%s]", state->parity_file_path);
|
|
1858
|
+
goto clean_variables;
|
|
1859
|
+
}
|
|
1860
|
+
|
|
1861
|
+
int falloc_status = allocatefile(fileno(parity_file), parity_size);
|
|
1862
|
+
|
|
1863
|
+
if (falloc_status) {
|
|
1864
|
+
req->error_status = 1;
|
|
1865
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1866
|
+
"Could not allocate space for mmap parity " \
|
|
1867
|
+
"shard file: %i", falloc_status);
|
|
1868
|
+
goto clean_variables;
|
|
1869
|
+
}
|
|
1870
|
+
|
|
1871
|
+
uint8_t *map_parity = NULL;
|
|
1872
|
+
status = map_file(fileno(parity_file), parity_size, &map_parity, false);
|
|
1873
|
+
|
|
1874
|
+
if (status) {
|
|
1875
|
+
req->error_status = 1;
|
|
1876
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1877
|
+
"Could not create mmap parity shard file: %d", status);
|
|
1878
|
+
goto clean_variables;
|
|
1879
|
+
}
|
|
1880
|
+
|
|
1881
|
+
data_blocks = (uint8_t**)malloc(state->total_data_shards * sizeof(uint8_t *));
|
|
1882
|
+
if (!data_blocks) {
|
|
1883
|
+
req->error_status = 1;
|
|
1884
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1885
|
+
"memory error: unable to malloc");
|
|
1886
|
+
goto clean_variables;
|
|
1887
|
+
}
|
|
1888
|
+
|
|
1889
|
+
for (int i = 0; i < state->total_data_shards; i++) {
|
|
1890
|
+
data_blocks[i] = map + i * state->shard_size;
|
|
1891
|
+
}
|
|
1892
|
+
|
|
1893
|
+
fec_blocks = (uint8_t**)malloc(state->total_parity_shards * sizeof(uint8_t *));
|
|
1894
|
+
if (!fec_blocks) {
|
|
1895
|
+
req->error_status = 1;
|
|
1896
|
+
state->log->error(state->env->log_options, state->handle,
|
|
1897
|
+
"memory error: unable to malloc");
|
|
1898
|
+
goto clean_variables;
|
|
1899
|
+
}
|
|
1900
|
+
|
|
1901
|
+
for (int i = 0; i < state->total_parity_shards; i++) {
|
|
1902
|
+
fec_blocks[i] = map_parity + i * state->shard_size;
|
|
1903
|
+
}
|
|
1904
|
+
|
|
1905
|
+
state->log->debug(state->env->log_options, state->handle,
|
|
1906
|
+
"Encoding parity shards, data_shards: %i, " \
|
|
1907
|
+
"parity_shards: %i, shard_size: %" PRIu64 ", " \
|
|
1908
|
+
"file_size: %" PRIu64,
|
|
1909
|
+
state->total_data_shards,
|
|
1910
|
+
state->total_parity_shards,
|
|
1911
|
+
state->shard_size,
|
|
1912
|
+
state->file_size);
|
|
1913
|
+
|
|
1914
|
+
|
|
1915
|
+
reed_solomon *rs = reed_solomon_new(state->total_data_shards,
|
|
1916
|
+
state->total_parity_shards);
|
|
1917
|
+
reed_solomon_encode2(rs, data_blocks, fec_blocks, state->total_shards,
|
|
1918
|
+
state->shard_size, state->file_size);
|
|
1919
|
+
reed_solomon_release(rs);
|
|
1920
|
+
|
|
1921
|
+
clean_variables:
|
|
1922
|
+
if (data_blocks) {
|
|
1923
|
+
free(data_blocks);
|
|
1924
|
+
}
|
|
1925
|
+
|
|
1926
|
+
if (fec_blocks) {
|
|
1927
|
+
free(fec_blocks);
|
|
1928
|
+
}
|
|
1929
|
+
|
|
1930
|
+
if (tmp_folder) {
|
|
1931
|
+
free(tmp_folder);
|
|
1932
|
+
}
|
|
1933
|
+
|
|
1934
|
+
if (map) {
|
|
1935
|
+
unmap_file(map, state->file_size);
|
|
1936
|
+
}
|
|
1937
|
+
|
|
1938
|
+
if (map_parity) {
|
|
1939
|
+
unmap_file(map_parity, parity_size);
|
|
1940
|
+
}
|
|
1941
|
+
|
|
1942
|
+
if (parity_file) {
|
|
1943
|
+
fclose(parity_file);
|
|
1944
|
+
}
|
|
1945
|
+
|
|
1946
|
+
if (encrypted_file) {
|
|
1947
|
+
fclose(encrypted_file);
|
|
1948
|
+
}
|
|
1949
|
+
}
|
|
1950
|
+
|
|
1951
|
+
|
|
1952
|
+
static void queue_create_parity_shards(storj_upload_state_t *state)
|
|
1953
|
+
{
|
|
1954
|
+
uv_work_t *work = uv_work_new();
|
|
1955
|
+
if (!work) {
|
|
1956
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
1957
|
+
return;
|
|
1958
|
+
}
|
|
1959
|
+
|
|
1960
|
+
state->pending_work_count += 1;
|
|
1961
|
+
|
|
1962
|
+
parity_shard_req_t *req = malloc(sizeof(parity_shard_req_t));
|
|
1963
|
+
|
|
1964
|
+
req->error_status = 0;
|
|
1965
|
+
req->upload_state = state;
|
|
1966
|
+
work->data = req;
|
|
1967
|
+
|
|
1968
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) work,
|
|
1969
|
+
create_parity_shards, after_create_parity_shards);
|
|
1970
|
+
|
|
1971
|
+
if (status) {
|
|
1972
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
1973
|
+
}
|
|
1974
|
+
|
|
1975
|
+
state->awaiting_parity_shards = false;
|
|
1976
|
+
}
|
|
1977
|
+
|
|
1978
|
+
static void after_send_exchange_report(uv_work_t *work, int status)
|
|
1979
|
+
{
|
|
1980
|
+
shard_send_report_t *req = work->data;
|
|
1981
|
+
|
|
1982
|
+
req->state->pending_work_count -= 1;
|
|
1983
|
+
|
|
1984
|
+
if (status == UV_ECANCELED) {
|
|
1985
|
+
req->report->send_count = 0;
|
|
1986
|
+
req->report->send_status = STORJ_REPORT_AWAITING_SEND;
|
|
1987
|
+
|
|
1988
|
+
goto clean_variables;
|
|
1989
|
+
}
|
|
1990
|
+
|
|
1991
|
+
req->report->send_count += 1;
|
|
1992
|
+
|
|
1993
|
+
if (req->status_code == 201) {
|
|
1994
|
+
req->state->env->log->info(req->state->env->log_options,
|
|
1995
|
+
req->state->handle,
|
|
1996
|
+
"Successfully sent exchange report for shard %d",
|
|
1997
|
+
req->report->pointer_index);
|
|
1998
|
+
|
|
1999
|
+
req->report->send_status = STORJ_REPORT_NOT_PREPARED; // report has been sent
|
|
2000
|
+
} else if (req->report->send_count == 6) {
|
|
2001
|
+
req->report->send_status = STORJ_REPORT_NOT_PREPARED; // report failed retries
|
|
2002
|
+
} else {
|
|
2003
|
+
req->report->send_status = STORJ_REPORT_AWAITING_SEND; // reset report back to unsent
|
|
2004
|
+
}
|
|
2005
|
+
|
|
2006
|
+
clean_variables:
|
|
2007
|
+
queue_next_work(req->state);
|
|
2008
|
+
free(work->data);
|
|
2009
|
+
free(work);
|
|
2010
|
+
|
|
2011
|
+
}
|
|
2012
|
+
|
|
2013
|
+
static void send_exchange_report(uv_work_t *work)
|
|
2014
|
+
{
|
|
2015
|
+
shard_send_report_t *req = work->data;
|
|
2016
|
+
storj_upload_state_t *state = req->state;
|
|
2017
|
+
|
|
2018
|
+
struct json_object *body = json_object_new_object();
|
|
2019
|
+
|
|
2020
|
+
json_object_object_add(body, "dataHash",
|
|
2021
|
+
json_object_new_string(req->report->data_hash));
|
|
2022
|
+
|
|
2023
|
+
json_object_object_add(body, "reporterId",
|
|
2024
|
+
json_object_new_string(req->report->reporter_id));
|
|
2025
|
+
|
|
2026
|
+
json_object_object_add(body, "farmerId",
|
|
2027
|
+
json_object_new_string(req->report->farmer_id));
|
|
2028
|
+
|
|
2029
|
+
json_object_object_add(body, "clientId",
|
|
2030
|
+
json_object_new_string(req->report->client_id));
|
|
2031
|
+
|
|
2032
|
+
json_object_object_add(body, "exchangeStart",
|
|
2033
|
+
json_object_new_int64(req->report->start));
|
|
2034
|
+
|
|
2035
|
+
json_object_object_add(body, "exchangeEnd",
|
|
2036
|
+
json_object_new_int64(req->report->end));
|
|
2037
|
+
|
|
2038
|
+
json_object_object_add(body, "exchangeResultCode",
|
|
2039
|
+
json_object_new_int(req->report->code));
|
|
2040
|
+
|
|
2041
|
+
json_object_object_add(body, "exchangeResultMessage",
|
|
2042
|
+
json_object_new_string(req->report->message));
|
|
2043
|
+
|
|
2044
|
+
int status_code = 0;
|
|
2045
|
+
|
|
2046
|
+
// there should be an empty object in response
|
|
2047
|
+
struct json_object *response = NULL;
|
|
2048
|
+
int request_status = fetch_json(req->http_options,
|
|
2049
|
+
req->options, "POST",
|
|
2050
|
+
"/reports/exchanges", body,
|
|
2051
|
+
true, &response, &status_code);
|
|
2052
|
+
|
|
2053
|
+
|
|
2054
|
+
if (request_status) {
|
|
2055
|
+
state->log->warn(state->env->log_options, state->handle,
|
|
2056
|
+
"Send exchange report error: %i", request_status);
|
|
2057
|
+
}
|
|
2058
|
+
|
|
2059
|
+
req->status_code = status_code;
|
|
2060
|
+
|
|
2061
|
+
// free all memory for body and response
|
|
2062
|
+
json_object_put(response);
|
|
2063
|
+
json_object_put(body);
|
|
2064
|
+
}
|
|
2065
|
+
|
|
2066
|
+
static void queue_send_exchange_report(storj_upload_state_t *state, int index)
|
|
2067
|
+
{
|
|
2068
|
+
if (state->shard[index].report->send_count == 6) {
|
|
2069
|
+
return;
|
|
2070
|
+
}
|
|
2071
|
+
|
|
2072
|
+
state->env->log->info(state->env->log_options, state->handle,
|
|
2073
|
+
"Sending exchange report for Shard index %d... (retry: %d)",
|
|
2074
|
+
index,
|
|
2075
|
+
state->shard[index].report->send_count);
|
|
2076
|
+
|
|
2077
|
+
shard_tracker_t *shard = &state->shard[index];
|
|
2078
|
+
|
|
2079
|
+
uv_work_t *work = malloc(sizeof(uv_work_t));
|
|
2080
|
+
assert(work != NULL);
|
|
2081
|
+
|
|
2082
|
+
shard_send_report_t *req = malloc(sizeof(shard_send_report_t));
|
|
2083
|
+
|
|
2084
|
+
req->http_options = state->env->http_options;
|
|
2085
|
+
req->options = state->env->bridge_options;
|
|
2086
|
+
req->status_code = 0;
|
|
2087
|
+
req->report = shard->report;
|
|
2088
|
+
req->report->send_status = STORJ_REPORT_SENDING;
|
|
2089
|
+
req->state = state;
|
|
2090
|
+
req->pointer_index = index;
|
|
2091
|
+
|
|
2092
|
+
work->data = req;
|
|
2093
|
+
|
|
2094
|
+
state->pending_work_count += 1;
|
|
2095
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) work,
|
|
2096
|
+
send_exchange_report, after_send_exchange_report);
|
|
2097
|
+
if (status) {
|
|
2098
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
2099
|
+
}
|
|
2100
|
+
}
|
|
2101
|
+
|
|
2102
|
+
static void verify_bucket_id_callback(uv_work_t *work_req, int status)
|
|
2103
|
+
{
|
|
2104
|
+
get_bucket_request_t *req = work_req->data;
|
|
2105
|
+
storj_upload_state_t *state = req->handle;
|
|
2106
|
+
|
|
2107
|
+
state->log->info(state->env->log_options, state->handle,
|
|
2108
|
+
"Checking if bucket id [%s] exists", state->bucket_id);
|
|
2109
|
+
|
|
2110
|
+
state->pending_work_count -= 1;
|
|
2111
|
+
state->bucket_verify_count += 1;
|
|
2112
|
+
|
|
2113
|
+
if (req->status_code == 200) {
|
|
2114
|
+
state->bucket_verified = true;
|
|
2115
|
+
goto clean_variables;
|
|
2116
|
+
} else if (req->status_code == 404 || req->status_code == 400) {
|
|
2117
|
+
state->log->error(state->env->log_options, state->handle,
|
|
2118
|
+
"Bucket [%s] doesn't exist", state->bucket_id);
|
|
2119
|
+
state->error_status = STORJ_BRIDGE_BUCKET_NOTFOUND_ERROR;
|
|
2120
|
+
} else {
|
|
2121
|
+
state->log->error(state->env->log_options, state->handle,
|
|
2122
|
+
"Request failed with status code: %i", req->status_code);
|
|
2123
|
+
|
|
2124
|
+
if (state->bucket_verify_count == 6) {
|
|
2125
|
+
state->error_status = STORJ_BRIDGE_REQUEST_ERROR;
|
|
2126
|
+
state->bucket_verify_count = 0;
|
|
2127
|
+
}
|
|
2128
|
+
|
|
2129
|
+
goto clean_variables;
|
|
2130
|
+
}
|
|
2131
|
+
state->bucket_verified = true;
|
|
2132
|
+
|
|
2133
|
+
clean_variables:
|
|
2134
|
+
queue_next_work(state);
|
|
2135
|
+
|
|
2136
|
+
json_object_put(req->response);
|
|
2137
|
+
storj_free_get_bucket_request(req);
|
|
2138
|
+
free(work_req);
|
|
2139
|
+
}
|
|
2140
|
+
|
|
2141
|
+
static void queue_verify_bucket_id(storj_upload_state_t *state)
|
|
2142
|
+
{
|
|
2143
|
+
state->pending_work_count += 1;
|
|
2144
|
+
storj_bridge_get_bucket(state->env, state->bucket_id, state, verify_bucket_id_callback);
|
|
2145
|
+
}
|
|
2146
|
+
|
|
2147
|
+
|
|
2148
|
+
static void verify_file_name_callback(uv_work_t *work_req, int status)
|
|
2149
|
+
{
|
|
2150
|
+
json_request_t *req = work_req->data;
|
|
2151
|
+
storj_upload_state_t *state = req->handle;
|
|
2152
|
+
|
|
2153
|
+
state->pending_work_count -= 1;
|
|
2154
|
+
state->file_verify_count += 1;
|
|
2155
|
+
|
|
2156
|
+
if (req->status_code == 404) {
|
|
2157
|
+
state->file_verified = true;
|
|
2158
|
+
goto clean_variables;
|
|
2159
|
+
} else if (req->status_code == 200) {
|
|
2160
|
+
state->log->error(state->env->log_options, state->handle,
|
|
2161
|
+
"File [%s] already exists", state->file_name);
|
|
2162
|
+
state->error_status = STORJ_BRIDGE_BUCKET_FILE_EXISTS;
|
|
2163
|
+
} else {
|
|
2164
|
+
state->log->error(state->env->log_options, state->handle,
|
|
2165
|
+
"Request failed with status code: %i", req->status_code);
|
|
2166
|
+
|
|
2167
|
+
if (state->file_verify_count == 6) {
|
|
2168
|
+
state->error_status = STORJ_BRIDGE_REQUEST_ERROR;
|
|
2169
|
+
state->file_verify_count = 0;
|
|
2170
|
+
}
|
|
2171
|
+
|
|
2172
|
+
goto clean_variables;
|
|
2173
|
+
}
|
|
2174
|
+
|
|
2175
|
+
state->file_verified = true;
|
|
2176
|
+
|
|
2177
|
+
clean_variables:
|
|
2178
|
+
queue_next_work(state);
|
|
2179
|
+
|
|
2180
|
+
json_object_put(req->response);
|
|
2181
|
+
free(req->path);
|
|
2182
|
+
free(req);
|
|
2183
|
+
free(work_req);
|
|
2184
|
+
}
|
|
2185
|
+
|
|
2186
|
+
static void verify_file_name(uv_work_t *work)
|
|
2187
|
+
{
|
|
2188
|
+
json_request_t *req = work->data;
|
|
2189
|
+
storj_upload_state_t *state = req->handle;
|
|
2190
|
+
int status_code = 0;
|
|
2191
|
+
|
|
2192
|
+
state->log->info(state->env->log_options, state->handle,
|
|
2193
|
+
"Checking if file name [%s] already exists...", state->file_name);
|
|
2194
|
+
|
|
2195
|
+
req->error_code = fetch_json(req->http_options,
|
|
2196
|
+
req->options, req->method, req->path, req->body,
|
|
2197
|
+
req->auth, &req->response, &status_code);
|
|
2198
|
+
|
|
2199
|
+
req->status_code = status_code;
|
|
2200
|
+
}
|
|
2201
|
+
|
|
2202
|
+
static void queue_verify_file_name(storj_upload_state_t *state)
|
|
2203
|
+
{
|
|
2204
|
+
state->pending_work_count += 1;
|
|
2205
|
+
|
|
2206
|
+
CURL *curl = curl_easy_init();
|
|
2207
|
+
if (!curl) {
|
|
2208
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2209
|
+
return;
|
|
2210
|
+
}
|
|
2211
|
+
|
|
2212
|
+
char *escaped = curl_easy_escape(curl, state->encrypted_file_name,
|
|
2213
|
+
strlen(state->encrypted_file_name));
|
|
2214
|
+
|
|
2215
|
+
if (!escaped) {
|
|
2216
|
+
curl_easy_cleanup(curl);
|
|
2217
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2218
|
+
return;
|
|
2219
|
+
}
|
|
2220
|
+
|
|
2221
|
+
char *path = str_concat_many(4, "/buckets/", state->bucket_id,
|
|
2222
|
+
"/file-ids/", escaped);
|
|
2223
|
+
curl_free(escaped);
|
|
2224
|
+
curl_easy_cleanup(curl);
|
|
2225
|
+
|
|
2226
|
+
if (!path) {
|
|
2227
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2228
|
+
return;
|
|
2229
|
+
}
|
|
2230
|
+
|
|
2231
|
+
uv_work_t *work = uv_work_new();
|
|
2232
|
+
if (!work) {
|
|
2233
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2234
|
+
return;
|
|
2235
|
+
}
|
|
2236
|
+
|
|
2237
|
+
json_request_t *req = malloc(sizeof(json_request_t));
|
|
2238
|
+
if (!req) {
|
|
2239
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2240
|
+
return;
|
|
2241
|
+
}
|
|
2242
|
+
|
|
2243
|
+
req->http_options = state->env->http_options;
|
|
2244
|
+
req->options = state->env->bridge_options;
|
|
2245
|
+
req->method = "GET";
|
|
2246
|
+
req->path = path;
|
|
2247
|
+
req->auth = true;
|
|
2248
|
+
req->body = NULL;
|
|
2249
|
+
req->response = NULL;
|
|
2250
|
+
req->error_code = 0;
|
|
2251
|
+
req->status_code = 0;
|
|
2252
|
+
req->handle = state;
|
|
2253
|
+
|
|
2254
|
+
work->data = req;
|
|
2255
|
+
|
|
2256
|
+
int status = uv_queue_work(state->env->loop, (uv_work_t*) work,
|
|
2257
|
+
verify_file_name, verify_file_name_callback);
|
|
2258
|
+
|
|
2259
|
+
if (status) {
|
|
2260
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
2261
|
+
return;
|
|
2262
|
+
}
|
|
2263
|
+
}
|
|
2264
|
+
|
|
2265
|
+
// Check if a frame/shard is already being prepared/pushed.
|
|
2266
|
+
// We want to limit disk reads for dd and network activity
|
|
2267
|
+
static int check_in_progress(storj_upload_state_t *state, int status)
|
|
2268
|
+
{
|
|
2269
|
+
int active = 0;
|
|
2270
|
+
|
|
2271
|
+
for (int index = 0; index < state->total_shards; index++ ) {
|
|
2272
|
+
if (state->shard[index].progress == status) {
|
|
2273
|
+
active += 1;
|
|
2274
|
+
}
|
|
2275
|
+
}
|
|
2276
|
+
|
|
2277
|
+
return active;
|
|
2278
|
+
}
|
|
2279
|
+
|
|
2280
|
+
static void queue_push_frame_and_shard(storj_upload_state_t *state)
|
|
2281
|
+
{
|
|
2282
|
+
for (int index = 0; index < state->total_shards; index++) {
|
|
2283
|
+
|
|
2284
|
+
if (state->shard[index].progress == AWAITING_PUSH_FRAME &&
|
|
2285
|
+
state->shard[index].report->send_status == STORJ_REPORT_NOT_PREPARED &&
|
|
2286
|
+
check_in_progress(state, PUSHING_FRAME) < state->push_frame_limit) {
|
|
2287
|
+
queue_push_frame(state, index);
|
|
2288
|
+
}
|
|
2289
|
+
|
|
2290
|
+
if (state->shard[index].progress == AWAITING_PUSH_SHARD &&
|
|
2291
|
+
state->shard[index].report->send_status == STORJ_REPORT_NOT_PREPARED &&
|
|
2292
|
+
check_in_progress(state, PUSHING_SHARD) < state->push_shard_limit) {
|
|
2293
|
+
queue_push_shard(state, index);
|
|
2294
|
+
}
|
|
2295
|
+
}
|
|
2296
|
+
}
|
|
2297
|
+
|
|
2298
|
+
static void queue_next_work(storj_upload_state_t *state)
|
|
2299
|
+
{
|
|
2300
|
+
storj_log_levels_t *log = state->log;
|
|
2301
|
+
storj_log_options_t *log_options = state->env->log_options;
|
|
2302
|
+
void *handle = state->handle;
|
|
2303
|
+
int *pending_work_count = &state->pending_work_count;
|
|
2304
|
+
|
|
2305
|
+
if (state->canceled) {
|
|
2306
|
+
return cleanup_state(state);
|
|
2307
|
+
}
|
|
2308
|
+
|
|
2309
|
+
// report any errors
|
|
2310
|
+
if (state->error_status != 0) {
|
|
2311
|
+
return cleanup_state(state);
|
|
2312
|
+
}
|
|
2313
|
+
|
|
2314
|
+
// report upload complete
|
|
2315
|
+
if (state->completed_upload) {
|
|
2316
|
+
return cleanup_state(state);
|
|
2317
|
+
}
|
|
2318
|
+
|
|
2319
|
+
// Verify bucket_id is exists
|
|
2320
|
+
if (!state->bucket_verified) {
|
|
2321
|
+
queue_verify_bucket_id(state);
|
|
2322
|
+
goto finish_up;
|
|
2323
|
+
}
|
|
2324
|
+
|
|
2325
|
+
// Verify that the file name doesn't exist
|
|
2326
|
+
if (!state->file_verified) {
|
|
2327
|
+
queue_verify_file_name(state);
|
|
2328
|
+
goto finish_up;
|
|
2329
|
+
}
|
|
2330
|
+
|
|
2331
|
+
if (!state->frame_id && !state->requesting_frame) {
|
|
2332
|
+
queue_request_frame_id(state);
|
|
2333
|
+
goto finish_up;
|
|
2334
|
+
}
|
|
2335
|
+
|
|
2336
|
+
if (state->rs) {
|
|
2337
|
+
if (!state->encrypted_file) {
|
|
2338
|
+
queue_create_encrypted_file(state);
|
|
2339
|
+
goto finish_up;
|
|
2340
|
+
}
|
|
2341
|
+
|
|
2342
|
+
// Create parity shards using reed solomon
|
|
2343
|
+
if (state->awaiting_parity_shards) {
|
|
2344
|
+
queue_create_parity_shards(state);
|
|
2345
|
+
goto finish_up;
|
|
2346
|
+
}
|
|
2347
|
+
}
|
|
2348
|
+
|
|
2349
|
+
for (int index = 0; index < state->total_shards; index++ ) {
|
|
2350
|
+
if (state->shard[index].progress == AWAITING_PREPARE_FRAME &&
|
|
2351
|
+
check_in_progress(state, PREPARING_FRAME) < state->prepare_frame_limit) {
|
|
2352
|
+
queue_prepare_frame(state, index);
|
|
2353
|
+
}
|
|
2354
|
+
}
|
|
2355
|
+
|
|
2356
|
+
// report upload complete
|
|
2357
|
+
if (state->completed_shards == state->total_shards &&
|
|
2358
|
+
!state->creating_bucket_entry &&
|
|
2359
|
+
!state->completed_upload) {
|
|
2360
|
+
queue_create_bucket_entry(state);
|
|
2361
|
+
}
|
|
2362
|
+
|
|
2363
|
+
for (int index = 0; index < state->total_shards; index++ ) {
|
|
2364
|
+
if (state->shard[index].report->send_status == STORJ_REPORT_AWAITING_SEND) {
|
|
2365
|
+
queue_send_exchange_report(state, index);
|
|
2366
|
+
}
|
|
2367
|
+
}
|
|
2368
|
+
|
|
2369
|
+
// NB: This needs to be the last thing, there is a bug with mingw
|
|
2370
|
+
// builds and uv_async_init, where leaving a block will cause the state
|
|
2371
|
+
// pointer to change values.
|
|
2372
|
+
if (state->frame_id) {
|
|
2373
|
+
queue_push_frame_and_shard(state);
|
|
2374
|
+
}
|
|
2375
|
+
|
|
2376
|
+
finish_up:
|
|
2377
|
+
|
|
2378
|
+
log->debug(log_options, handle,
|
|
2379
|
+
"Pending work count: %d", *pending_work_count);
|
|
2380
|
+
}
|
|
2381
|
+
|
|
2382
|
+
static void begin_work_queue(uv_work_t *work, int status)
|
|
2383
|
+
{
|
|
2384
|
+
storj_upload_state_t *state = work->data;
|
|
2385
|
+
|
|
2386
|
+
// Load progress bar
|
|
2387
|
+
state->progress_cb(0, 0, 0, state->handle);
|
|
2388
|
+
|
|
2389
|
+
state->pending_work_count -= 1;
|
|
2390
|
+
queue_next_work(state);
|
|
2391
|
+
|
|
2392
|
+
free(work);
|
|
2393
|
+
}
|
|
2394
|
+
|
|
2395
|
+
static void prepare_upload_state(uv_work_t *work)
|
|
2396
|
+
{
|
|
2397
|
+
storj_upload_state_t *state = work->data;
|
|
2398
|
+
|
|
2399
|
+
// Get the file size, expect to be up to 10tb
|
|
2400
|
+
#ifdef _WIN32
|
|
2401
|
+
struct _stati64 st;
|
|
2402
|
+
|
|
2403
|
+
if(_fstati64(fileno(state->original_file), &st) != 0) {
|
|
2404
|
+
state->error_status = STORJ_FILE_INTEGRITY_ERROR;
|
|
2405
|
+
return;
|
|
2406
|
+
}
|
|
2407
|
+
#else
|
|
2408
|
+
struct stat st;
|
|
2409
|
+
if(fstat(fileno(state->original_file), &st) != 0) {
|
|
2410
|
+
state->error_status = STORJ_FILE_INTEGRITY_ERROR;
|
|
2411
|
+
return;
|
|
2412
|
+
}
|
|
2413
|
+
#endif
|
|
2414
|
+
|
|
2415
|
+
state->file_size = st.st_size;
|
|
2416
|
+
if (state->file_size < MIN_SHARD_SIZE) {
|
|
2417
|
+
state->rs = false;
|
|
2418
|
+
}
|
|
2419
|
+
|
|
2420
|
+
// Set Shard calculations
|
|
2421
|
+
state->shard_size = determine_shard_size(state->file_size, 0);
|
|
2422
|
+
if (!state->shard_size || state->shard_size == 0) {
|
|
2423
|
+
state->error_status = STORJ_FILE_SIZE_ERROR;
|
|
2424
|
+
return;
|
|
2425
|
+
}
|
|
2426
|
+
|
|
2427
|
+
state->total_data_shards = ceil((double)state->file_size / state->shard_size);
|
|
2428
|
+
state->total_parity_shards = (state->rs) ? ceil((double)state->total_data_shards * 2.0 / 3.0) : 0;
|
|
2429
|
+
state->total_shards = state->total_data_shards + state->total_parity_shards;
|
|
2430
|
+
|
|
2431
|
+
int tracker_calloc_amount = state->total_shards * sizeof(shard_tracker_t);
|
|
2432
|
+
state->shard = malloc(tracker_calloc_amount);
|
|
2433
|
+
if (!state->shard) {
|
|
2434
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2435
|
+
return;
|
|
2436
|
+
}
|
|
2437
|
+
|
|
2438
|
+
for (int i = 0; i < state->total_shards; i++) {
|
|
2439
|
+
state->shard[i].progress = AWAITING_PREPARE_FRAME;
|
|
2440
|
+
state->shard[i].push_frame_request_count = 0;
|
|
2441
|
+
state->shard[i].push_shard_request_count = 0;
|
|
2442
|
+
state->shard[i].index = i;
|
|
2443
|
+
state->shard[i].pointer = farmer_pointer_new();
|
|
2444
|
+
if (!state->shard[i].pointer) {
|
|
2445
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2446
|
+
return;
|
|
2447
|
+
}
|
|
2448
|
+
state->shard[i].meta = shard_meta_new();
|
|
2449
|
+
if (!state->shard[i].meta) {
|
|
2450
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2451
|
+
return;
|
|
2452
|
+
}
|
|
2453
|
+
state->shard[i].meta->is_parity = (i + 1 > state->total_data_shards) ? true : false;
|
|
2454
|
+
state->shard[i].report = storj_exchange_report_new();
|
|
2455
|
+
if (!state->shard[i].report) {
|
|
2456
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2457
|
+
return;
|
|
2458
|
+
}
|
|
2459
|
+
state->shard[i].uploaded_size = 0;
|
|
2460
|
+
state->shard[i].work = NULL;
|
|
2461
|
+
}
|
|
2462
|
+
|
|
2463
|
+
// Get the bucket key to encrypt the filename
|
|
2464
|
+
char *bucket_key_as_str = calloc(DETERMINISTIC_KEY_SIZE + 1, sizeof(char));
|
|
2465
|
+
generate_bucket_key(state->env->encrypt_options->mnemonic,
|
|
2466
|
+
state->bucket_id,
|
|
2467
|
+
&bucket_key_as_str);
|
|
2468
|
+
|
|
2469
|
+
uint8_t *bucket_key = str2hex(strlen(bucket_key_as_str), bucket_key_as_str);
|
|
2470
|
+
if (!bucket_key) {
|
|
2471
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2472
|
+
return;
|
|
2473
|
+
}
|
|
2474
|
+
|
|
2475
|
+
free(bucket_key_as_str);
|
|
2476
|
+
|
|
2477
|
+
// Get file name encryption key with first half of hmac w/ magic
|
|
2478
|
+
struct hmac_sha512_ctx ctx1;
|
|
2479
|
+
hmac_sha512_set_key(&ctx1, SHA256_DIGEST_SIZE, bucket_key);
|
|
2480
|
+
hmac_sha512_update(&ctx1, SHA256_DIGEST_SIZE, BUCKET_META_MAGIC);
|
|
2481
|
+
uint8_t key[SHA256_DIGEST_SIZE];
|
|
2482
|
+
hmac_sha512_digest(&ctx1, SHA256_DIGEST_SIZE, key);
|
|
2483
|
+
|
|
2484
|
+
// Generate the synthetic iv with first half of hmac w/ bucket and filename
|
|
2485
|
+
struct hmac_sha512_ctx ctx2;
|
|
2486
|
+
hmac_sha512_set_key(&ctx2, SHA256_DIGEST_SIZE, bucket_key);
|
|
2487
|
+
hmac_sha512_update(&ctx2, strlen(state->bucket_id),
|
|
2488
|
+
(uint8_t *)state->bucket_id);
|
|
2489
|
+
hmac_sha512_update(&ctx2, strlen(state->file_name),
|
|
2490
|
+
(uint8_t *)state->file_name);
|
|
2491
|
+
uint8_t filename_iv[SHA256_DIGEST_SIZE];
|
|
2492
|
+
hmac_sha512_digest(&ctx2, SHA256_DIGEST_SIZE, filename_iv);
|
|
2493
|
+
|
|
2494
|
+
free(bucket_key);
|
|
2495
|
+
|
|
2496
|
+
char *encrypted_file_name;
|
|
2497
|
+
encrypt_meta(state->file_name, key, filename_iv, &encrypted_file_name);
|
|
2498
|
+
|
|
2499
|
+
state->encrypted_file_name = encrypted_file_name;
|
|
2500
|
+
|
|
2501
|
+
uint8_t *index = NULL;
|
|
2502
|
+
char *key_as_str = NULL;
|
|
2503
|
+
|
|
2504
|
+
if (state->index) {
|
|
2505
|
+
index = str2hex(strlen(state->index), (char *)state->index);
|
|
2506
|
+
if (!index) {
|
|
2507
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2508
|
+
goto cleanup;
|
|
2509
|
+
}
|
|
2510
|
+
} else {
|
|
2511
|
+
// Get random index used for encryption
|
|
2512
|
+
index = calloc(SHA256_DIGEST_SIZE + 1, sizeof(uint8_t));
|
|
2513
|
+
if (!index) {
|
|
2514
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2515
|
+
goto cleanup;
|
|
2516
|
+
}
|
|
2517
|
+
random_buffer(index, SHA256_DIGEST_SIZE);
|
|
2518
|
+
}
|
|
2519
|
+
|
|
2520
|
+
char *index_as_str = hex2str(SHA256_DIGEST_SIZE, index);
|
|
2521
|
+
if (!index_as_str) {
|
|
2522
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2523
|
+
goto cleanup;
|
|
2524
|
+
}
|
|
2525
|
+
|
|
2526
|
+
state->index = index_as_str;
|
|
2527
|
+
|
|
2528
|
+
// Caculate the file encryption key based on the index
|
|
2529
|
+
key_as_str = calloc(DETERMINISTIC_KEY_SIZE + 1, sizeof(char));
|
|
2530
|
+
if (!key_as_str) {
|
|
2531
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2532
|
+
goto cleanup;
|
|
2533
|
+
}
|
|
2534
|
+
|
|
2535
|
+
int key_status = generate_file_key(state->env->encrypt_options->mnemonic,
|
|
2536
|
+
state->bucket_id,
|
|
2537
|
+
index_as_str,
|
|
2538
|
+
&key_as_str);
|
|
2539
|
+
if (key_status) {
|
|
2540
|
+
switch (key_status) {
|
|
2541
|
+
case 2:
|
|
2542
|
+
state->error_status = STORJ_HEX_DECODE_ERROR;
|
|
2543
|
+
break;
|
|
2544
|
+
default:
|
|
2545
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2546
|
+
}
|
|
2547
|
+
goto cleanup;
|
|
2548
|
+
}
|
|
2549
|
+
|
|
2550
|
+
uint8_t *encryption_key = str2hex(strlen(key_as_str), key_as_str);
|
|
2551
|
+
if (!encryption_key) {
|
|
2552
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2553
|
+
goto cleanup;
|
|
2554
|
+
}
|
|
2555
|
+
state->encryption_key = encryption_key;
|
|
2556
|
+
|
|
2557
|
+
uint8_t *encryption_ctr = calloc(AES_BLOCK_SIZE, sizeof(uint8_t));
|
|
2558
|
+
if (!encryption_ctr) {
|
|
2559
|
+
state->error_status = STORJ_MEMORY_ERROR;
|
|
2560
|
+
goto cleanup;
|
|
2561
|
+
}
|
|
2562
|
+
memcpy(encryption_ctr, index, AES_BLOCK_SIZE);
|
|
2563
|
+
state->encryption_ctr = encryption_ctr;
|
|
2564
|
+
|
|
2565
|
+
if (state->rs) {
|
|
2566
|
+
state->parity_file_path = create_tmp_name(state, ".parity");
|
|
2567
|
+
state->encrypted_file_path = create_tmp_name(state, ".crypt");
|
|
2568
|
+
}
|
|
2569
|
+
|
|
2570
|
+
|
|
2571
|
+
cleanup:
|
|
2572
|
+
if (key_as_str) {
|
|
2573
|
+
free(key_as_str);
|
|
2574
|
+
}
|
|
2575
|
+
|
|
2576
|
+
if (index) {
|
|
2577
|
+
free(index);
|
|
2578
|
+
}
|
|
2579
|
+
|
|
2580
|
+
}
|
|
2581
|
+
|
|
2582
|
+
char *create_tmp_name(storj_upload_state_t *state, char *extension)
|
|
2583
|
+
{
|
|
2584
|
+
char *tmp_folder = strdup(state->env->tmp_path);
|
|
2585
|
+
int encode_len = BASE16_ENCODE_LENGTH(SHA256_DIGEST_SIZE);
|
|
2586
|
+
int file_name_len = strlen(state->encrypted_file_name);
|
|
2587
|
+
int extension_len = strlen(extension);
|
|
2588
|
+
int tmp_folder_len = strlen(tmp_folder);
|
|
2589
|
+
if (tmp_folder[tmp_folder_len - 1] == separator()) {
|
|
2590
|
+
tmp_folder[tmp_folder_len - 1] = '\0';
|
|
2591
|
+
tmp_folder_len -= 1;
|
|
2592
|
+
}
|
|
2593
|
+
|
|
2594
|
+
char *path = calloc(
|
|
2595
|
+
tmp_folder_len + 1 + encode_len + extension_len + 2,
|
|
2596
|
+
sizeof(char)
|
|
2597
|
+
);
|
|
2598
|
+
|
|
2599
|
+
// hash and encode name for filesystem use
|
|
2600
|
+
struct sha256_ctx ctx;
|
|
2601
|
+
uint8_t digest[SHA256_DIGEST_SIZE];
|
|
2602
|
+
uint8_t digest_encoded[encode_len + 1];
|
|
2603
|
+
sha256_init(&ctx);
|
|
2604
|
+
sha256_update(&ctx, file_name_len, state->encrypted_file_name);
|
|
2605
|
+
sha256_digest(&ctx, SHA256_DIGEST_SIZE, digest);
|
|
2606
|
+
base16_encode_update(digest_encoded, SHA256_DIGEST_SIZE, digest);
|
|
2607
|
+
digest_encoded[encode_len] = '\0';
|
|
2608
|
+
|
|
2609
|
+
sprintf(path,
|
|
2610
|
+
"%s%c%s%s%c",
|
|
2611
|
+
tmp_folder,
|
|
2612
|
+
separator(),
|
|
2613
|
+
digest_encoded,
|
|
2614
|
+
extension,
|
|
2615
|
+
'\0');
|
|
2616
|
+
|
|
2617
|
+
free(tmp_folder);
|
|
2618
|
+
return path;
|
|
2619
|
+
}
|
|
2620
|
+
|
|
2621
|
+
STORJ_API int storj_bridge_store_file_cancel(storj_upload_state_t *state)
|
|
2622
|
+
{
|
|
2623
|
+
if (state->canceled) {
|
|
2624
|
+
return 0;
|
|
2625
|
+
}
|
|
2626
|
+
|
|
2627
|
+
state->canceled = true;
|
|
2628
|
+
|
|
2629
|
+
state->error_status = STORJ_TRANSFER_CANCELED;
|
|
2630
|
+
|
|
2631
|
+
// loop over all shards, and cancel any that are queued to be uploaded
|
|
2632
|
+
// any uploads that are in-progress will monitor the state->canceled
|
|
2633
|
+
// status and exit when set to true
|
|
2634
|
+
for (int i = 0; i < state->total_shards; i++) {
|
|
2635
|
+
shard_tracker_t *shard = &state->shard[i];
|
|
2636
|
+
if (shard->progress == PUSHING_SHARD) {
|
|
2637
|
+
uv_cancel((uv_req_t *)shard->work);
|
|
2638
|
+
}
|
|
2639
|
+
}
|
|
2640
|
+
|
|
2641
|
+
return 0;
|
|
2642
|
+
}
|
|
2643
|
+
|
|
2644
|
+
STORJ_API storj_upload_state_t *storj_bridge_store_file(storj_env_t *env,
|
|
2645
|
+
storj_upload_opts_t *opts,
|
|
2646
|
+
void *handle,
|
|
2647
|
+
storj_progress_cb progress_cb,
|
|
2648
|
+
storj_finished_upload_cb finished_cb)
|
|
2649
|
+
{
|
|
2650
|
+
if (!opts->fd) {
|
|
2651
|
+
env->log->error(env->log_options, handle, "Invalid File descriptor");
|
|
2652
|
+
return NULL;
|
|
2653
|
+
}
|
|
2654
|
+
|
|
2655
|
+
storj_upload_state_t *state = malloc(sizeof(storj_upload_state_t));
|
|
2656
|
+
if (!state) {
|
|
2657
|
+
return NULL;
|
|
2658
|
+
}
|
|
2659
|
+
|
|
2660
|
+
state->env = env;
|
|
2661
|
+
if (opts->index && strlen(opts->index) == 64) {
|
|
2662
|
+
state->index = opts->index;
|
|
2663
|
+
} else {
|
|
2664
|
+
state->index = NULL;
|
|
2665
|
+
}
|
|
2666
|
+
state->file_id = NULL;
|
|
2667
|
+
state->file_name = opts->file_name;
|
|
2668
|
+
state->encrypted_file_name = NULL;
|
|
2669
|
+
state->original_file = opts->fd;
|
|
2670
|
+
state->file_size = 0;
|
|
2671
|
+
state->bucket_id = opts->bucket_id;
|
|
2672
|
+
state->bucket_key = NULL;
|
|
2673
|
+
state->completed_shards = 0;
|
|
2674
|
+
state->total_shards = 0;
|
|
2675
|
+
state->total_data_shards = 0;
|
|
2676
|
+
state->total_parity_shards = 0;
|
|
2677
|
+
state->shard_size = 0;
|
|
2678
|
+
state->total_bytes = 0;
|
|
2679
|
+
state->uploaded_bytes = 0;
|
|
2680
|
+
state->exclude = NULL;
|
|
2681
|
+
state->frame_id = NULL;
|
|
2682
|
+
state->hmac_id = NULL;
|
|
2683
|
+
state->encryption_key = NULL;
|
|
2684
|
+
state->encryption_ctr = NULL;
|
|
2685
|
+
|
|
2686
|
+
state->rs = (opts->rs == false) ? false : true;
|
|
2687
|
+
state->awaiting_parity_shards = true;
|
|
2688
|
+
state->parity_file_path = NULL;
|
|
2689
|
+
state->parity_file = NULL;
|
|
2690
|
+
|
|
2691
|
+
// Only use this if rs after encryption
|
|
2692
|
+
state->encrypted_file = NULL;
|
|
2693
|
+
state->encrypted_file_path = NULL;
|
|
2694
|
+
state->creating_encrypted_file = false;
|
|
2695
|
+
|
|
2696
|
+
state->requesting_frame = false;
|
|
2697
|
+
state->completed_upload = false;
|
|
2698
|
+
state->creating_bucket_entry = false;
|
|
2699
|
+
state->received_all_pointers = false;
|
|
2700
|
+
state->final_callback_called = false;
|
|
2701
|
+
state->canceled = false;
|
|
2702
|
+
state->bucket_verified = false;
|
|
2703
|
+
state->file_verified = false;
|
|
2704
|
+
|
|
2705
|
+
state->progress_finished = false;
|
|
2706
|
+
|
|
2707
|
+
state->push_shard_limit = (opts->push_shard_limit > 0) ? (opts->push_shard_limit) : PUSH_SHARD_LIMIT;
|
|
2708
|
+
state->push_frame_limit = (opts->push_frame_limit > 0) ? (opts->push_frame_limit) : PUSH_FRAME_LIMIT;
|
|
2709
|
+
state->prepare_frame_limit = (opts->prepare_frame_limit > 0) ? (opts->prepare_frame_limit) : PREPARE_FRAME_LIMIT;
|
|
2710
|
+
|
|
2711
|
+
state->frame_request_count = 0;
|
|
2712
|
+
state->add_bucket_entry_count = 0;
|
|
2713
|
+
state->bucket_verify_count = 0;
|
|
2714
|
+
state->file_verify_count = 0;
|
|
2715
|
+
state->create_encrypted_file_count = 0;
|
|
2716
|
+
|
|
2717
|
+
state->progress_cb = progress_cb;
|
|
2718
|
+
state->finished_cb = finished_cb;
|
|
2719
|
+
state->error_status = 0;
|
|
2720
|
+
state->log = env->log;
|
|
2721
|
+
state->handle = handle;
|
|
2722
|
+
state->shard = NULL;
|
|
2723
|
+
state->pending_work_count = 0;
|
|
2724
|
+
|
|
2725
|
+
uv_work_t *work = uv_work_new();
|
|
2726
|
+
work->data = state;
|
|
2727
|
+
|
|
2728
|
+
state->pending_work_count += 1;
|
|
2729
|
+
|
|
2730
|
+
int status = uv_queue_work(env->loop, (uv_work_t*) work,
|
|
2731
|
+
prepare_upload_state, begin_work_queue);
|
|
2732
|
+
if (status) {
|
|
2733
|
+
state->error_status = STORJ_QUEUE_ERROR;
|
|
2734
|
+
}
|
|
2735
|
+
return state;
|
|
2736
|
+
}
|