condo 1.0.6 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. checksums.yaml +4 -4
  2. data/README.textile +19 -32
  3. data/lib/condo.rb +124 -127
  4. data/lib/condo/configuration.rb +41 -76
  5. data/lib/condo/engine.rb +32 -39
  6. data/lib/condo/errors.rb +6 -8
  7. data/lib/condo/strata/amazon_s3.rb +246 -294
  8. data/lib/condo/strata/google_cloud_storage.rb +238 -272
  9. data/lib/condo/strata/open_stack_swift.rb +251 -0
  10. data/lib/condo/version.rb +1 -1
  11. metadata +31 -96
  12. data/app/assets/javascripts/condo.js +0 -9
  13. data/app/assets/javascripts/condo/amazon.js +0 -403
  14. data/app/assets/javascripts/condo/condo.js +0 -184
  15. data/app/assets/javascripts/condo/config.js +0 -69
  16. data/app/assets/javascripts/condo/google.js +0 -338
  17. data/app/assets/javascripts/condo/md5/hash.worker.emulator.js +0 -23
  18. data/app/assets/javascripts/condo/md5/hash.worker.js +0 -11
  19. data/app/assets/javascripts/condo/md5/hasher.js +0 -119
  20. data/app/assets/javascripts/condo/md5/spark-md5.js +0 -599
  21. data/app/assets/javascripts/condo/rackspace.js +0 -326
  22. data/app/assets/javascripts/condo/services/abstract-md5.js.erb +0 -86
  23. data/app/assets/javascripts/condo/services/base64.js +0 -184
  24. data/app/assets/javascripts/condo/services/broadcaster.js +0 -26
  25. data/app/assets/javascripts/condo/services/uploader.js +0 -302
  26. data/app/assets/javascripts/core/core.js +0 -4
  27. data/app/assets/javascripts/core/services/1-safe-apply.js +0 -17
  28. data/app/assets/javascripts/core/services/2-messaging.js +0 -171
  29. data/lib/condo/strata/rackspace_cloud_files.rb +0 -245
  30. data/test/condo_test.rb +0 -27
  31. data/test/dummy/README.rdoc +0 -261
  32. data/test/dummy/Rakefile +0 -7
  33. data/test/dummy/app/assets/javascripts/application.js +0 -15
  34. data/test/dummy/app/assets/stylesheets/application.css +0 -13
  35. data/test/dummy/app/controllers/application_controller.rb +0 -3
  36. data/test/dummy/app/helpers/application_helper.rb +0 -2
  37. data/test/dummy/app/views/layouts/application.html.erb +0 -14
  38. data/test/dummy/config.ru +0 -4
  39. data/test/dummy/config/application.rb +0 -59
  40. data/test/dummy/config/boot.rb +0 -10
  41. data/test/dummy/config/database.yml +0 -25
  42. data/test/dummy/config/environment.rb +0 -5
  43. data/test/dummy/config/environments/development.rb +0 -37
  44. data/test/dummy/config/environments/production.rb +0 -67
  45. data/test/dummy/config/environments/test.rb +0 -37
  46. data/test/dummy/config/initializers/backtrace_silencers.rb +0 -7
  47. data/test/dummy/config/initializers/inflections.rb +0 -15
  48. data/test/dummy/config/initializers/mime_types.rb +0 -5
  49. data/test/dummy/config/initializers/secret_token.rb +0 -7
  50. data/test/dummy/config/initializers/session_store.rb +0 -8
  51. data/test/dummy/config/initializers/wrap_parameters.rb +0 -14
  52. data/test/dummy/config/locales/en.yml +0 -5
  53. data/test/dummy/config/routes.rb +0 -58
  54. data/test/dummy/public/404.html +0 -26
  55. data/test/dummy/public/422.html +0 -26
  56. data/test/dummy/public/500.html +0 -25
  57. data/test/dummy/public/favicon.ico +0 -0
  58. data/test/dummy/script/rails +0 -6
  59. data/test/integration/navigation_test.rb +0 -10
  60. data/test/test_helper.rb +0 -15
@@ -1,403 +0,0 @@
1
- /**
2
- * CoTag Condo Amazon S3 Strategy
3
- * Direct to cloud resumable uploads for Amazon S3
4
- *
5
- * Copyright (c) 2012 CoTag Media.
6
- *
7
- * @author Stephen von Takach <steve@cotag.me>
8
- * @copyright 2012 cotag.me
9
- *
10
- *
11
- * References:
12
- * *
13
- *
14
- **/
15
-
16
-
17
- (function(angular, base64, undefined) {
18
- 'use strict';
19
-
20
- angular.module('Condo').
21
-
22
- factory('Condo.Amazon', ['$q', 'Condo.Md5', function($q, md5) {
23
- var PENDING = 0,
24
- STARTED = 1,
25
- PAUSED = 2,
26
- UPLOADING = 3,
27
- COMPLETED = 4,
28
- ABORTED = 5,
29
-
30
-
31
-
32
- hexToBin = function(input) {
33
- var result = "";
34
-
35
- if ((input.length % 2) > 0) {
36
- input = '0' + input;
37
- }
38
-
39
- for (var i = 0, length = input.length; i < length; i += 2) {
40
- result += String.fromCharCode(parseInt(input.slice(i, i + 2), 16));
41
- }
42
-
43
- return result;
44
- },
45
-
46
-
47
- Amazon = function (api, file) {
48
- var self = this,
49
- strategy = null,
50
- part_size = 5242880, // Multi-part uploads should be bigger then this
51
- pausing = false,
52
- defaultError = function(reason) {
53
- self.error = !pausing;
54
- pausing = false;
55
- self.pause(reason);
56
- },
57
-
58
- restart = function() {
59
- strategy = null;
60
- },
61
-
62
-
63
- completeUpload = function() {
64
- api.update().then(function(data) {
65
- self.progress = self.size; // Update to 100%
66
- self.state = COMPLETED;
67
- }, defaultError);
68
- },
69
-
70
-
71
- //
72
- // We need to sign our uploads so amazon can confirm they are valid for us
73
- // Part numbers can be any number from 1 to 10,000 - inclusive
74
- //
75
- build_request = function(part_number) {
76
- var current_part;
77
-
78
- if (file.size > part_size) { // If file bigger then 5mb we expect a chunked upload
79
- var endbyte = part_number * part_size;
80
- if (endbyte > file.size)
81
- endbyte = file.size;
82
- current_part = file.slice((part_number - 1) * part_size, endbyte);
83
- } else {
84
- current_part = file;
85
- }
86
-
87
- return md5.hash(current_part).then(function(val) {
88
- return {
89
- data: current_part,
90
- data_id: val,
91
- part_number: part_number
92
- }
93
- }, function(reason){
94
- return $q.reject(reason);
95
- });
96
- },
97
-
98
- //
99
- // Direct file upload strategy
100
- //
101
- AmazonDirect = function(data) {
102
- //
103
- // resume
104
- // abort
105
- // pause
106
- //
107
- var $this = this,
108
- finalising = false;
109
-
110
- //
111
- // Update the parent
112
- //
113
- self.state = UPLOADING;
114
-
115
-
116
- //
117
- // This will only be called when the upload has finished and we need to inform the application
118
- //
119
- this.resume = function() {
120
- self.state = UPLOADING;
121
- completeUpload();
122
- }
123
-
124
- this.pause = function() {
125
- api.abort();
126
-
127
- if(!finalising) {
128
- restart(); // Should occur before events triggered
129
- self.progress = 0;
130
- }
131
- };
132
-
133
-
134
- //
135
- // AJAX for upload goes here
136
- //
137
- data['data'] = file;
138
- api.process_request(data, function(progress) {
139
- self.progress = progress;
140
- }).then(function(result) {
141
- finalising = true;
142
- $this.resume(); // Resume informs the application that the upload is complete
143
- }, function(reason) {
144
- self.progress = 0;
145
- defaultError(reason);
146
- });
147
- }, // END DIRECT
148
-
149
-
150
- //
151
- // Chunked upload strategy--------------------------------------------------
152
- //
153
- AmazonChunked = function (data, first_chunk) {
154
- //
155
- // resume
156
- // abort
157
- // pause
158
- //
159
- var part_ids = [],
160
- last_part = 0,
161
-
162
-
163
- generatePartManifest = function() {
164
- var list = '<CompleteMultipartUpload>';
165
-
166
- for (var i = 0, length = part_ids.length; i < length; i += 1) {
167
- list += '<Part><PartNumber>' + (i + 1) + '</PartNumber><ETag>"' + part_ids[i] + '"</ETag></Part>';
168
- }
169
- list += '</CompleteMultipartUpload>';
170
- return list;
171
- },
172
-
173
- //
174
- // Get the next part signature
175
- //
176
- next_part = function(part_number) {
177
- //
178
- // Check if we are past the end of the file
179
- //
180
- if ((part_number - 1) * part_size < file.size) {
181
-
182
- self.progress = (part_number - 1) * part_size; // Update the progress
183
-
184
- build_request(part_number).then(function(result) {
185
- if (self.state != UPLOADING)
186
- return; // upload was paused or aborted as we were reading the file
187
-
188
- api.edit(part_number, base64.encode(hexToBin(result.data_id))).
189
- then(function(data) {
190
- set_part(data, result);
191
- }, defaultError);
192
-
193
- }, defaultError); // END BUILD_REQUEST
194
-
195
- } else {
196
- //
197
- // We're after the final commit
198
- //
199
- api.edit('finish').
200
- then(function(request) {
201
- request['data'] = generatePartManifest();
202
- api.process_request(request).then(completeUpload, defaultError);
203
- }, defaultError);
204
- }
205
- },
206
-
207
-
208
- //
209
- // Send a part to amazon
210
- //
211
- set_part = function(request, part_info) {
212
- request['data'] = part_info.data;
213
- api.process_request(request, function(progress) {
214
- self.progress = (part_info.part_number - 1) * part_size + progress;
215
- }).then(function(result) {
216
- part_ids.push(part_info.data_id); // We need to record the list of part IDs for completion
217
- last_part = part_info.part_number;
218
- next_part(last_part + 1);
219
- }, function(reason) {
220
- self.progress = (part_info.part_number - 1) * part_size;
221
- defaultError(reason);
222
- });
223
- };
224
-
225
-
226
- self.state = UPLOADING;
227
-
228
- this.resume = function() {
229
- self.state = UPLOADING;
230
- next_part(last_part + 1);
231
- };
232
-
233
- this.pause = function() {
234
- api.abort();
235
- };
236
-
237
-
238
- //
239
- // We need to check if we are grabbing a parts list or creating an upload
240
- //
241
- api.process_request(data).then(function(response) {
242
- if(data.type == 'parts') { // was the original request for a list of parts
243
- //
244
- // NextPartNumberMarker == the final part in the current request
245
- // TODO:: if IsTruncated is set then we need to keep getting parts
246
- //
247
- response = $(response[0]);
248
- var next = parseInt(response.find('NextPartNumberMarker').eq(0).text()),
249
- etags = response.find('ETag');
250
-
251
- etags.each(function(index) {
252
- part_ids.push($(this).text().replace(/"{1}/gi,'')); // Removes " from strings
253
- });
254
-
255
- last_part = next; // So we can resume
256
- next_part(next + 1); // As NextPartNumberMarker is just the last part uploaded
257
- } else {
258
- //
259
- // We've created the upload - we need to update the application with the upload id.
260
- // This will also return the request for uploading the first part which we've already prepared
261
- //
262
- api.update({
263
- resumable_id: $(response[0]).find('UploadId').eq(0).text(),
264
- file_id: base64.encode(hexToBin(first_chunk.data_id)),
265
- part: 1
266
- }).then(function(data) {
267
- set_part(data, first_chunk); // Parts start at 1
268
- }, function(reason) {
269
- defaultError(reason);
270
- restart(); // Easier to start from the beginning
271
- });
272
- }
273
- }, function(reason) {
274
- defaultError(reason);
275
- restart(); // We need to get a new request signature
276
- });
277
- }; // END CHUNKED
278
-
279
-
280
- //
281
- // Variables required for all drivers
282
- //
283
- this.state = PENDING;
284
- this.progress = 0;
285
- this.message = 'pending';
286
- this.name = file.name;
287
- this.size = file.size;
288
- this.error = false;
289
-
290
-
291
- //
292
- // File path is optional (amazon supports paths as part of the key name)
293
- // http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/ListingKeysHierarchy.html
294
- //
295
- if(!!file.dir_path)
296
- this.path = file.dir_path;
297
-
298
-
299
- //
300
- // Support file slicing
301
- //
302
- if (typeof(file.slice) != 'function')
303
- file.slice = file.webkitSlice || file.mozSlice;
304
-
305
-
306
- this.start = function(){
307
- if(strategy == null) { // We need to create the upload
308
- self.error = false;
309
- pausing = false;
310
-
311
- //
312
- // Update part size if required
313
- //
314
- if((part_size * 9999) < file.size) {
315
- part_size = file.size / 9999;
316
- if(part_size > (5 * 1024 * 1024 * 1024)) { // 5GB limit on part sizes
317
- this.abort('file too big');
318
- return;
319
- }
320
- }
321
-
322
- this.message = null;
323
- this.state = STARTED;
324
- strategy = {}; // This function shouldn't be called twice so we need a state (TODO:: fix this)
325
-
326
- build_request(1).then(function(result) {
327
- if (self.state != STARTED)
328
- return; // upload was paused or aborted as we were reading the file
329
-
330
- api.create({file_id: base64.encode(hexToBin(result.data_id))}).
331
- then(function(data) {
332
- if(data.type == 'direct_upload') {
333
- strategy = new AmazonDirect(data);
334
- } else {
335
- strategy = new AmazonChunked(data, result);
336
- }
337
- }, defaultError);
338
-
339
- }, defaultError); // END BUILD_REQUEST
340
-
341
-
342
- } else if (this.state == PAUSED) { // We need to resume the upload if it is paused
343
- this.message = null;
344
- self.error = false;
345
- pausing = false;
346
- strategy.resume();
347
- }
348
- };
349
-
350
- this.pause = function(reason) {
351
- if(strategy != null && this.state == UPLOADING) { // Check if the upload is uploading
352
- this.state = PAUSED;
353
- pausing = true;
354
- strategy.pause();
355
- } else if (this.state <= STARTED) {
356
- this.state = PAUSED;
357
- restart();
358
- }
359
- if(this.state == PAUSED)
360
- this.message = reason;
361
- };
362
-
363
- this.abort = function(reason) {
364
- if(strategy != null && this.state < COMPLETED) { // Check the upload has not finished
365
- var old_state = this.state;
366
-
367
- this.state = ABORTED;
368
- api.abort();
369
-
370
-
371
- //
372
- // As we may not have successfully deleted the upload
373
- // or we aborted before we received a response from create
374
- //
375
- restart(); // nullifies strategy
376
-
377
-
378
- //
379
- // if we have an upload_id then we should destroy the upload
380
- // we won't worry if this fails as it should be automatically cleaned up by the back end
381
- //
382
- if(old_state > STARTED) {
383
- api.destroy();
384
- }
385
-
386
- this.message = reason;
387
- }
388
- };
389
- }; // END AMAZON
390
-
391
-
392
- return {
393
- new_upload: function(api, file) {
394
- return new Amazon(api, file);
395
- }
396
- };
397
- }]).
398
-
399
- config(['Condo.ApiProvider', function (ApiProvider) {
400
- ApiProvider.register('AmazonS3', 'Condo.Amazon');
401
- }]);
402
-
403
- })(angular, window.base64);
@@ -1,184 +0,0 @@
1
- /**
2
- * CoTag Condo
3
- * Direct to cloud resumable uploads
4
- *
5
- * Copyright (c) 2012 CoTag Media.
6
- *
7
- * @author Stephen von Takach <steve@cotag.me>
8
- * @copyright 2012 cotag.me
9
- *
10
- *
11
- * References:
12
- * * http://ericterpstra.com/2012/09/angular-cats-part-3-communicating-with-broadcast/
13
- * * http://docs.angularjs.org/api/ng.$rootScope.Scope#$watch
14
- *
15
- **/
16
-
17
-
18
- (function (angular, undefined) {
19
- 'use strict';
20
-
21
- //
22
- // Create a controller for managing the upload states
23
- //
24
- angular.module('Condo', ['Core']).
25
- controller('Condo.Controller', ['$scope', 'Condo.Api', 'Condo.Broadcast', 'Condo.Config', function($scope, api, broadcaster, config) {
26
-
27
- $scope.uploads = [];
28
- $scope.upload_count = 0;
29
-
30
-
31
- //
32
- // See Condo.Config for configuration options
33
- //
34
- $scope.endpoint = config.endpoint;
35
- $scope.autostart = config.autostart;
36
- $scope.ignore_errors = config.ignore_errors; // Continue to autostart after an error?
37
- $scope.parallelism = config.parallelism; // number of uploads at once
38
-
39
-
40
- $scope.add = function(files) {
41
- var length = files.length,
42
- i = 0,
43
- ret = 0, // We only want to check for auto-start after the files have been added
44
- file;
45
-
46
- for (; i < length; i += 1) {
47
- file = files[i];
48
-
49
- if(file.size <= 0 || file.type == '')
50
- continue;
51
-
52
- //
53
- // check file size is acceptable
54
- //
55
- if(!config.file_checker(file) || (config.size_limit != undefined && file.size > config.size_limit)) {
56
- broadcaster.publish('coNotice', {
57
- type: 'warn',
58
- number: 0,
59
- file: file
60
- });
61
- continue;
62
- }
63
-
64
- $scope.upload_count += 1;
65
-
66
- api.check_provider($scope.endpoint, files[i]).then(function(upload){
67
- ret += 1;
68
- $scope.uploads.push(upload);
69
- if(ret == length)
70
- $scope.check_autostart();
71
- }, function(failure) {
72
-
73
- $scope.upload_count -= 1;
74
-
75
- ret += 1;
76
- if(ret == length)
77
- $scope.check_autostart();
78
-
79
- //
80
- // broadcast this so it can be handled by a directive
81
- //
82
- broadcaster.publish('coNotice', failure);
83
- });
84
- }
85
- };
86
-
87
-
88
- $scope.abort = function(upload) {
89
- upload.abort();
90
- $scope.check_autostart();
91
- };
92
-
93
-
94
- $scope.remove = function(upload) {
95
- //
96
- // Splice(upload, 1) was unreliable. This is better
97
- //
98
- for (var i = 0, length = $scope.uploads.length; i < length; i += 1) {
99
- if($scope.uploads[i] === upload) {
100
- $scope.uploads.splice(i, 1);
101
- $scope.upload_count -= 1;
102
- break;
103
- }
104
- }
105
- };
106
-
107
-
108
- $scope.playpause = function(upload) {
109
- if (upload.state == 3) // Uploading
110
- upload.pause();
111
- else
112
- upload.start();
113
- };
114
-
115
-
116
- //
117
- // Watch autostart and trigger a check when it is changed
118
- //
119
- $scope.$watch('autostart', function(newValue, oldValue) {
120
- if (newValue === true)
121
- $scope.check_autostart();
122
- });
123
-
124
-
125
- //
126
- // Autostart more uploads as this is bumped up
127
- //
128
- $scope.$watch('parallelism', function(newValue, oldValue) {
129
- if(newValue > oldValue)
130
- $scope.check_autostart();
131
- });
132
-
133
-
134
- $scope.check_autostart = function() {
135
- //
136
- // Check if any uploads have been started already
137
- // If there are no active uploads we'll auto-start
138
- //
139
- // PENDING = 0,
140
- // STARTED = 1,
141
- // PAUSED = 2,
142
- // UPLOADING = 3,
143
- // COMPLETED = 4,
144
- // ABORTED = 5
145
- //
146
- if ($scope.autostart) {
147
- var shouldStart = true,
148
- state, i, length, started = 0;
149
-
150
- for (i = 0, length = $scope.uploads.length; i < length; i += 1) {
151
- state = $scope.uploads[i].state;
152
-
153
- //
154
- // Count started uploads (that don't have errors if we are ignoring errors)
155
- // Up until we've reached our parallel limit, then stop
156
- //
157
- if (state > 0 && state < 4 && !($scope.uploads[i].error && $scope.ignore_errors)) {
158
- started += 1;
159
- if(started >= $scope.parallelism) {
160
- shouldStart = false;
161
- break;
162
- }
163
- }
164
- }
165
-
166
- if (shouldStart) {
167
- started = $scope.parallelism - started; // How many can we start
168
-
169
- for (i = 0; i < length; i += 1) {
170
- if ($scope.uploads[i].state == 0) {
171
- $scope.uploads[i].start();
172
-
173
- started -= 1;
174
- if(started <= 0) // Break if we can't start anymore
175
- break;
176
- }
177
- }
178
- }
179
- }
180
- };
181
-
182
- }]);
183
-
184
- })(angular);