condo 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LGPL3-LICENSE +165 -0
- data/README.textile +20 -0
- data/Rakefile +40 -0
- data/app/assets/javascripts/condo.js +7 -0
- data/app/assets/javascripts/condo/amazon.js +409 -0
- data/app/assets/javascripts/condo/base64.js +192 -0
- data/app/assets/javascripts/condo/controller.js +162 -0
- data/app/assets/javascripts/condo/google.js +292 -0
- data/app/assets/javascripts/condo/rackspace.js +340 -0
- data/app/assets/javascripts/condo/spark-md5.js +470 -0
- data/app/assets/javascripts/condo/uploader.js +298 -0
- data/lib/condo.rb +267 -0
- data/lib/condo/configuration.rb +129 -0
- data/lib/condo/engine.rb +36 -0
- data/lib/condo/errors.rb +9 -0
- data/lib/condo/strata/amazon_s3.rb +301 -0
- data/lib/condo/strata/google_cloud_storage.rb +306 -0
- data/lib/condo/strata/rackspace_cloud_files.rb +223 -0
- data/lib/condo/version.rb +3 -0
- data/lib/tasks/condo_tasks.rake +4 -0
- data/test/condo_test.rb +27 -0
- data/test/dummy/README.rdoc +261 -0
- data/test/dummy/Rakefile +7 -0
- data/test/dummy/app/assets/javascripts/application.js +15 -0
- data/test/dummy/app/assets/stylesheets/application.css +13 -0
- data/test/dummy/app/controllers/application_controller.rb +3 -0
- data/test/dummy/app/helpers/application_helper.rb +2 -0
- data/test/dummy/app/views/layouts/application.html.erb +14 -0
- data/test/dummy/config.ru +4 -0
- data/test/dummy/config/application.rb +59 -0
- data/test/dummy/config/boot.rb +10 -0
- data/test/dummy/config/database.yml +25 -0
- data/test/dummy/config/environment.rb +5 -0
- data/test/dummy/config/environments/development.rb +37 -0
- data/test/dummy/config/environments/production.rb +67 -0
- data/test/dummy/config/environments/test.rb +37 -0
- data/test/dummy/config/initializers/backtrace_silencers.rb +7 -0
- data/test/dummy/config/initializers/inflections.rb +15 -0
- data/test/dummy/config/initializers/mime_types.rb +5 -0
- data/test/dummy/config/initializers/secret_token.rb +7 -0
- data/test/dummy/config/initializers/session_store.rb +8 -0
- data/test/dummy/config/initializers/wrap_parameters.rb +14 -0
- data/test/dummy/config/locales/en.yml +5 -0
- data/test/dummy/config/routes.rb +58 -0
- data/test/dummy/db/test.sqlite3 +0 -0
- data/test/dummy/log/test.log +25 -0
- data/test/dummy/public/404.html +26 -0
- data/test/dummy/public/422.html +26 -0
- data/test/dummy/public/500.html +25 -0
- data/test/dummy/public/favicon.ico +0 -0
- data/test/dummy/script/rails +6 -0
- data/test/integration/navigation_test.rb +10 -0
- data/test/test_helper.rb +15 -0
- metadata +180 -0
@@ -0,0 +1,340 @@
|
|
1
|
+
/**
|
2
|
+
* CoTag Condo Rackspace Cloud Files Strategy
|
3
|
+
* Direct to cloud resumable uploads for Amazon S3
|
4
|
+
*
|
5
|
+
* Copyright (c) 2012 CoTag Media.
|
6
|
+
*
|
7
|
+
* @author Stephen von Takach <steve@cotag.me>
|
8
|
+
* @copyright 2012 cotag.me
|
9
|
+
*
|
10
|
+
*
|
11
|
+
* References:
|
12
|
+
* * https://github.com/umdjs/umd
|
13
|
+
* * https://github.com/addyosmani/jquery-plugin-patterns
|
14
|
+
* *
|
15
|
+
*
|
16
|
+
**/
|
17
|
+
|
18
|
+
(function (factory) {
|
19
|
+
if (typeof define === 'function' && define.amd) {
|
20
|
+
// AMD
|
21
|
+
define(['jquery', 'spark-md5', 'condo_uploader'], factory);
|
22
|
+
} else {
|
23
|
+
// Browser globals
|
24
|
+
factory(jQuery, window.SparkMD5, window.CondoUploader);
|
25
|
+
}
|
26
|
+
}(function ($, MD5, uploads, undefined) {
|
27
|
+
'use strict';
|
28
|
+
|
29
|
+
//
|
30
|
+
// TODO:: Create an Amazon, google factory etc
|
31
|
+
// We should split all these into different files too (controller and factories separate from directives and views)
|
32
|
+
// So we can have different views for the same controller
|
33
|
+
//
|
34
|
+
uploads.factory('Condo.RackspaceCloudFiles', ['$rootScope', '$q', function($rootScope, $q) {
|
35
|
+
var PENDING = 0,
|
36
|
+
STARTED = 1,
|
37
|
+
PAUSED = 2,
|
38
|
+
UPLOADING = 3,
|
39
|
+
COMPLETED = 4,
|
40
|
+
ABORTED = 5,
|
41
|
+
|
42
|
+
|
43
|
+
Rackspace = function (api, file) {
|
44
|
+
var self = this,
|
45
|
+
strategy = null,
|
46
|
+
part_size = 2097152, // Multi-part uploads should be bigger then this
|
47
|
+
defaultError = function(reason) {
|
48
|
+
self.pause(reason);
|
49
|
+
},
|
50
|
+
|
51
|
+
restart = function() {
|
52
|
+
strategy = null;
|
53
|
+
},
|
54
|
+
|
55
|
+
|
56
|
+
completeUpload = function() {
|
57
|
+
api.update().then(function(data) {
|
58
|
+
self.state = COMPLETED;
|
59
|
+
}, defaultError);
|
60
|
+
},
|
61
|
+
|
62
|
+
|
63
|
+
//
|
64
|
+
// We need to sign our uploads so rackspace can confirm they are valid for us
|
65
|
+
// TODO:: use http://updates.html5rocks.com/2011/12/Transferable-Objects-Lightning-Fast
|
66
|
+
// where available :)
|
67
|
+
//
|
68
|
+
build_request = function(part_number) {
|
69
|
+
var result = $q.defer(),
|
70
|
+
reader = new FileReader(),
|
71
|
+
fail = function(){
|
72
|
+
result.reject('file read failed');
|
73
|
+
},
|
74
|
+
current_part;
|
75
|
+
|
76
|
+
if (file.size > part_size) { // If file bigger then 5mb we expect a chunked upload
|
77
|
+
var endbyte = part_number * part_size;
|
78
|
+
if (endbyte > file.size)
|
79
|
+
endbyte = file.size;
|
80
|
+
current_part = file.slice((part_number - 1) * part_size, endbyte);
|
81
|
+
} else {
|
82
|
+
current_part = file;
|
83
|
+
}
|
84
|
+
|
85
|
+
reader.onload = function(e) {
|
86
|
+
result.resolve({
|
87
|
+
data: current_part,
|
88
|
+
data_id: MD5.hashBinary(e.target.result),
|
89
|
+
part_number: part_number
|
90
|
+
});
|
91
|
+
|
92
|
+
|
93
|
+
if(!$rootScope.$$phase) {
|
94
|
+
$rootScope.$apply(); // This triggers the promise response if required
|
95
|
+
}
|
96
|
+
};
|
97
|
+
reader.onerror = fail;
|
98
|
+
reader.onabort = fail;
|
99
|
+
reader.readAsBinaryString(current_part);
|
100
|
+
|
101
|
+
return result.promise;
|
102
|
+
},
|
103
|
+
|
104
|
+
//
|
105
|
+
// Direct file upload strategy
|
106
|
+
//
|
107
|
+
RackspaceDirect = function(data) {
|
108
|
+
//
|
109
|
+
// resume
|
110
|
+
// abort
|
111
|
+
// pause
|
112
|
+
//
|
113
|
+
var $this = this,
|
114
|
+
finalising = false;
|
115
|
+
|
116
|
+
//
|
117
|
+
// Update the parent
|
118
|
+
//
|
119
|
+
self.state = UPLOADING;
|
120
|
+
|
121
|
+
|
122
|
+
//
|
123
|
+
// This will only be called when the upload has finished and we need to inform the application
|
124
|
+
//
|
125
|
+
this.resume = function() {
|
126
|
+
self.state = UPLOADING;
|
127
|
+
completeUpload();
|
128
|
+
}
|
129
|
+
|
130
|
+
this.pause = function() {
|
131
|
+
api.abort();
|
132
|
+
|
133
|
+
if(!finalising) {
|
134
|
+
restart(); // Should occur before events triggered
|
135
|
+
self.progress = 0;
|
136
|
+
}
|
137
|
+
};
|
138
|
+
|
139
|
+
|
140
|
+
//
|
141
|
+
// AJAX for upload goes here
|
142
|
+
//
|
143
|
+
data['data'] = file;
|
144
|
+
api.process_request(data, function(progress) {
|
145
|
+
self.progress = progress;
|
146
|
+
}).then(function(result) {
|
147
|
+
finalising = true;
|
148
|
+
$this.resume(); // Resume informs the application that the upload is complete
|
149
|
+
}, function(reason) {
|
150
|
+
self.progress = 0;
|
151
|
+
defaultError(reason);
|
152
|
+
});
|
153
|
+
}, // END DIRECT
|
154
|
+
|
155
|
+
|
156
|
+
//
|
157
|
+
// Chunked upload strategy--------------------------------------------------
|
158
|
+
//
|
159
|
+
RackspaceChunked = function (data, first_chunk) {
|
160
|
+
//
|
161
|
+
// resume
|
162
|
+
// abort
|
163
|
+
// pause
|
164
|
+
//
|
165
|
+
var last_part = 0,
|
166
|
+
|
167
|
+
//
|
168
|
+
// Get the next part signature
|
169
|
+
//
|
170
|
+
next_part = function(part_number) {
|
171
|
+
//
|
172
|
+
// Check if we are past the end of the file
|
173
|
+
//
|
174
|
+
if ((part_number - 1) * part_size < file.size) {
|
175
|
+
build_request(part_number).then(function(result) {
|
176
|
+
if (self.state != UPLOADING)
|
177
|
+
return; // upload was paused or aborted as we were reading the file
|
178
|
+
|
179
|
+
api.update({
|
180
|
+
resumable_id: part_number,
|
181
|
+
file_id: result.data_id,
|
182
|
+
part: part_number
|
183
|
+
}).then(function(data) {
|
184
|
+
set_part(data, result);
|
185
|
+
}, defaultError);
|
186
|
+
|
187
|
+
}, function(reason){
|
188
|
+
self.pause(reason);
|
189
|
+
}); // END BUILD_REQUEST
|
190
|
+
|
191
|
+
} else {
|
192
|
+
//
|
193
|
+
// We're after the final commit
|
194
|
+
//
|
195
|
+
api.edit('finish').
|
196
|
+
then(function(request) {
|
197
|
+
api.process_request(request).then(completeUpload, defaultError);
|
198
|
+
}, defaultError);
|
199
|
+
}
|
200
|
+
},
|
201
|
+
|
202
|
+
|
203
|
+
//
|
204
|
+
// Send a part to amazon
|
205
|
+
//
|
206
|
+
set_part = function(request, part_info) {
|
207
|
+
request['data'] = part_info.data;
|
208
|
+
api.process_request(request, function(progress) {
|
209
|
+
self.progress = (part_info.part_number - 1) * part_size + progress;
|
210
|
+
}).then(function(result) {
|
211
|
+
last_part = part_info.part_number;
|
212
|
+
next_part(last_part + 1);
|
213
|
+
}, function(reason) {
|
214
|
+
self.progress = (part_info.part_number - 1) * part_size;
|
215
|
+
defaultError(reason);
|
216
|
+
});
|
217
|
+
};
|
218
|
+
|
219
|
+
|
220
|
+
self.state = UPLOADING;
|
221
|
+
|
222
|
+
this.resume = function() {
|
223
|
+
self.state = UPLOADING;
|
224
|
+
next_part(last_part + 1);
|
225
|
+
};
|
226
|
+
|
227
|
+
this.pause = function() {
|
228
|
+
api.abort();
|
229
|
+
};
|
230
|
+
|
231
|
+
|
232
|
+
//
|
233
|
+
// We need to check if we are resuming or starting an upload
|
234
|
+
//
|
235
|
+
if(data.type == 'parts') {
|
236
|
+
next_part(data.current_part);
|
237
|
+
} else {
|
238
|
+
set_part(data, first_chunk);
|
239
|
+
}
|
240
|
+
}; // END CHUNKED
|
241
|
+
|
242
|
+
|
243
|
+
//
|
244
|
+
// Variables required for all drivers
|
245
|
+
//
|
246
|
+
this.state = PENDING;
|
247
|
+
this.progress = 0;
|
248
|
+
this.message = 'pending';
|
249
|
+
this.name = file.name;
|
250
|
+
this.size = file.size;
|
251
|
+
|
252
|
+
|
253
|
+
//
|
254
|
+
// Support file slicing
|
255
|
+
//
|
256
|
+
if (typeof(file.slice) != 'function')
|
257
|
+
file.slice = file.webkitSlice || file.mozSlice;
|
258
|
+
|
259
|
+
|
260
|
+
this.start = function(){
|
261
|
+
if(strategy == null) { // We need to create the upload
|
262
|
+
|
263
|
+
this.message = null;
|
264
|
+
this.state = STARTED;
|
265
|
+
strategy = {}; // This function shouldn't be called twice so we need a state (TODO:: fix this)
|
266
|
+
|
267
|
+
build_request(1).then(function(result) {
|
268
|
+
if (self.state != STARTED)
|
269
|
+
return; // upload was paused or aborted as we were reading the file
|
270
|
+
|
271
|
+
api.create({file_id: result.data_id}).
|
272
|
+
then(function(data) {
|
273
|
+
if(data.type == 'direct_upload') {
|
274
|
+
strategy = new RackspaceDirect(data);
|
275
|
+
} else {
|
276
|
+
strategy = new RackspaceChunked(data, result);
|
277
|
+
}
|
278
|
+
}, defaultError);
|
279
|
+
|
280
|
+
}, function(reason){
|
281
|
+
self.pause(reason);
|
282
|
+
}); // END BUILD_REQUEST
|
283
|
+
|
284
|
+
|
285
|
+
} else if (this.state == PAUSED) { // We need to resume the upload if it is paused
|
286
|
+
this.message = null;
|
287
|
+
strategy.resume();
|
288
|
+
}
|
289
|
+
};
|
290
|
+
|
291
|
+
this.pause = function(reason) {
|
292
|
+
if(strategy != null && this.state == UPLOADING) { // Check if the upload is uploading
|
293
|
+
this.state = PAUSED;
|
294
|
+
strategy.pause();
|
295
|
+
} else if (this.state <= STARTED) {
|
296
|
+
this.state = PAUSED;
|
297
|
+
restart();
|
298
|
+
}
|
299
|
+
if(this.state == PAUSED)
|
300
|
+
this.message = reason;
|
301
|
+
};
|
302
|
+
|
303
|
+
this.abort = function(reason) {
|
304
|
+
if(strategy != null && this.state < COMPLETED) { // Check the upload has not finished
|
305
|
+
var old_state = this.state;
|
306
|
+
|
307
|
+
this.state = ABORTED;
|
308
|
+
api.abort();
|
309
|
+
|
310
|
+
|
311
|
+
//
|
312
|
+
// As we may not have successfully deleted the upload
|
313
|
+
// or we aborted before we received a response from create
|
314
|
+
//
|
315
|
+
restart(); // nullifies strategy
|
316
|
+
|
317
|
+
|
318
|
+
//
|
319
|
+
// if we have an upload_id then we should destroy the upload
|
320
|
+
// we won't worry if this fails as it should be automatically cleaned up by the back end
|
321
|
+
//
|
322
|
+
if(old_state > STARTED) {
|
323
|
+
api.destroy();
|
324
|
+
}
|
325
|
+
|
326
|
+
this.message = reason;
|
327
|
+
}
|
328
|
+
};
|
329
|
+
}; // END AMAZON
|
330
|
+
|
331
|
+
|
332
|
+
return {
|
333
|
+
new_upload: function(api, file) {
|
334
|
+
return new Rackspace(api, file);
|
335
|
+
}
|
336
|
+
};
|
337
|
+
|
338
|
+
}]);
|
339
|
+
|
340
|
+
}));
|
@@ -0,0 +1,470 @@
|
|
1
|
+
/*jslint bitwise: true, nomen: true */
|
2
|
+
/*global unescape*/
|
3
|
+
|
4
|
+
/**
|
5
|
+
* SparkMD5 is a fast md5 implementation of the MD5 algorithm.
|
6
|
+
* This script is based in the JKM md5 library which is the
|
7
|
+
* fastest algorithm around (see: http://jsperf.com/md5-shootout/7).
|
8
|
+
*
|
9
|
+
* NOTE: Please disable Firebug while testing this script!
|
10
|
+
* Firebug consumes a lot of memory and CPU and slows down by a great margin.
|
11
|
+
* Opera Dragonfly also slows down by a great margin.
|
12
|
+
* Safari/Chrome developer tools seems not to slow it down.
|
13
|
+
*
|
14
|
+
* Improvements over the JKM md5 library:
|
15
|
+
*
|
16
|
+
* - Functionality wrapped in a closure
|
17
|
+
* - Object oriented library
|
18
|
+
* - Incremental md5 (see bellow)
|
19
|
+
* - Validates using jslint
|
20
|
+
*
|
21
|
+
* Incremental md5 performs a lot better for hashing large ammounts of data, such as
|
22
|
+
* files. One could read files in chunks, using the FileReader & Blob's, and append
|
23
|
+
* each chunk for md5 hashing while keeping memory usage low. See example bellow.
|
24
|
+
*
|
25
|
+
* @example
|
26
|
+
*
|
27
|
+
* Normal usage:
|
28
|
+
*
|
29
|
+
* var hexHash = SparkMD5.hash('Hi there'); // hex hash
|
30
|
+
* var rawHash = SparkMD5.hash('Hi there', true); // raw hash
|
31
|
+
*
|
32
|
+
* Incremental usage:
|
33
|
+
*
|
34
|
+
* var spark = new SparkMD5();
|
35
|
+
* spark.append('Hi');
|
36
|
+
* spark.append(' there');
|
37
|
+
* var hexHash = spark.end(); // hex hash
|
38
|
+
* var rawHash = spark.end(true); // raw hash
|
39
|
+
*
|
40
|
+
* Hash a file incrementally:
|
41
|
+
*
|
42
|
+
* NOTE: If you test the code bellow using the file:// protocol in chrome you must start the browser with -allow-file-access-from-files argument.
|
43
|
+
* Please see: http://code.google.com/p/chromium/issues/detail?id=60889
|
44
|
+
*
|
45
|
+
* document.getElementById("file").addEventListener("change", function() {
|
46
|
+
*
|
47
|
+
* var fileReader = new FileReader(),
|
48
|
+
* blobSlice = File.prototype.mozSlice || File.prototype.webkitSlice || File.prototype.slice,
|
49
|
+
* file = document.getElementById("file").files[0],
|
50
|
+
* chunkSize = 2097152, // read in chunks of 2MB
|
51
|
+
* chunks = Math.ceil(file.size / chunkSize),
|
52
|
+
* currentChunk = 0,
|
53
|
+
* spark = new SparkMD5();
|
54
|
+
*
|
55
|
+
* fileReader.onload = function(e) {
|
56
|
+
* console.log("read chunk nr", currentChunk + 1, "of", chunks);
|
57
|
+
* spark.appendBinary(e.target.result); // append binary string
|
58
|
+
* currentChunk++;
|
59
|
+
*
|
60
|
+
* if (currentChunk < chunks) {
|
61
|
+
* loadNext();
|
62
|
+
* }
|
63
|
+
* else {
|
64
|
+
* console.log("finished loading");
|
65
|
+
* console.info("computed hash", spark.end()); // compute hash
|
66
|
+
* }
|
67
|
+
* };
|
68
|
+
*
|
69
|
+
* function loadNext() {
|
70
|
+
* var start = currentChunk * chunkSize,
|
71
|
+
* end = start + chunkSize >= file.size ? file.size : start + chunkSize;
|
72
|
+
*
|
73
|
+
* fileReader.readAsBinaryString(blobSlice.call(file, start, end));
|
74
|
+
* };
|
75
|
+
*
|
76
|
+
* loadNext();
|
77
|
+
* });
|
78
|
+
*
|
79
|
+
* @TODO: Add support for byteArrays.
|
80
|
+
* @TODO: Add support for HMAC.
|
81
|
+
* @TODO: Add native support for reading files? Maybe add it as an extension?
|
82
|
+
*/
|
83
|
+
(function (factory) {
|
84
|
+
if (typeof exports === 'object') {
|
85
|
+
// Node/CommonJS
|
86
|
+
exports.SparkMD5 = factory();
|
87
|
+
} else if (typeof define === 'function' && define.amd) {
|
88
|
+
// AMD
|
89
|
+
define('spark-md5', factory);
|
90
|
+
} else {
|
91
|
+
// Browser globals
|
92
|
+
window.SparkMD5 = factory();
|
93
|
+
}
|
94
|
+
}(function (undefined) {
|
95
|
+
|
96
|
+
"use strict";
|
97
|
+
|
98
|
+
////////////////////////////////////////////////////////////////////////////
|
99
|
+
|
100
|
+
/*
|
101
|
+
* Fastest md5 implementation around (JKM md5)
|
102
|
+
* Credits: Joseph Myers
|
103
|
+
*
|
104
|
+
* @see http://www.myersdaily.org/joseph/javascript/md5-text.html
|
105
|
+
* @see http://jsperf.com/md5-shootout/7
|
106
|
+
*/
|
107
|
+
|
108
|
+
/* this function is much faster,
|
109
|
+
so if possible we use it. Some IEs
|
110
|
+
are the only ones I know of that
|
111
|
+
need the idiotic second function,
|
112
|
+
generated by an if clause. */
|
113
|
+
var add32 = function (a, b) {
|
114
|
+
return (a + b) & 0xFFFFFFFF;
|
115
|
+
};
|
116
|
+
|
117
|
+
function cmn(q, a, b, x, s, t) {
|
118
|
+
a = add32(add32(a, q), add32(x, t));
|
119
|
+
return add32((a << s) | (a >>> (32 - s)), b);
|
120
|
+
}
|
121
|
+
|
122
|
+
function ff(a, b, c, d, x, s, t) {
|
123
|
+
return cmn((b & c) | ((~b) & d), a, b, x, s, t);
|
124
|
+
}
|
125
|
+
|
126
|
+
function gg(a, b, c, d, x, s, t) {
|
127
|
+
return cmn((b & d) | (c & (~d)), a, b, x, s, t);
|
128
|
+
}
|
129
|
+
|
130
|
+
function hh(a, b, c, d, x, s, t) {
|
131
|
+
return cmn(b ^ c ^ d, a, b, x, s, t);
|
132
|
+
}
|
133
|
+
|
134
|
+
function ii(a, b, c, d, x, s, t) {
|
135
|
+
return cmn(c ^ (b | (~d)), a, b, x, s, t);
|
136
|
+
}
|
137
|
+
|
138
|
+
function md5cycle(x, k) {
|
139
|
+
var a = x[0],
|
140
|
+
b = x[1],
|
141
|
+
c = x[2],
|
142
|
+
d = x[3];
|
143
|
+
|
144
|
+
a = ff(a, b, c, d, k[0], 7, -680876936);
|
145
|
+
d = ff(d, a, b, c, k[1], 12, -389564586);
|
146
|
+
c = ff(c, d, a, b, k[2], 17, 606105819);
|
147
|
+
b = ff(b, c, d, a, k[3], 22, -1044525330);
|
148
|
+
a = ff(a, b, c, d, k[4], 7, -176418897);
|
149
|
+
d = ff(d, a, b, c, k[5], 12, 1200080426);
|
150
|
+
c = ff(c, d, a, b, k[6], 17, -1473231341);
|
151
|
+
b = ff(b, c, d, a, k[7], 22, -45705983);
|
152
|
+
a = ff(a, b, c, d, k[8], 7, 1770035416);
|
153
|
+
d = ff(d, a, b, c, k[9], 12, -1958414417);
|
154
|
+
c = ff(c, d, a, b, k[10], 17, -42063);
|
155
|
+
b = ff(b, c, d, a, k[11], 22, -1990404162);
|
156
|
+
a = ff(a, b, c, d, k[12], 7, 1804603682);
|
157
|
+
d = ff(d, a, b, c, k[13], 12, -40341101);
|
158
|
+
c = ff(c, d, a, b, k[14], 17, -1502002290);
|
159
|
+
b = ff(b, c, d, a, k[15], 22, 1236535329);
|
160
|
+
|
161
|
+
a = gg(a, b, c, d, k[1], 5, -165796510);
|
162
|
+
d = gg(d, a, b, c, k[6], 9, -1069501632);
|
163
|
+
c = gg(c, d, a, b, k[11], 14, 643717713);
|
164
|
+
b = gg(b, c, d, a, k[0], 20, -373897302);
|
165
|
+
a = gg(a, b, c, d, k[5], 5, -701558691);
|
166
|
+
d = gg(d, a, b, c, k[10], 9, 38016083);
|
167
|
+
c = gg(c, d, a, b, k[15], 14, -660478335);
|
168
|
+
b = gg(b, c, d, a, k[4], 20, -405537848);
|
169
|
+
a = gg(a, b, c, d, k[9], 5, 568446438);
|
170
|
+
d = gg(d, a, b, c, k[14], 9, -1019803690);
|
171
|
+
c = gg(c, d, a, b, k[3], 14, -187363961);
|
172
|
+
b = gg(b, c, d, a, k[8], 20, 1163531501);
|
173
|
+
a = gg(a, b, c, d, k[13], 5, -1444681467);
|
174
|
+
d = gg(d, a, b, c, k[2], 9, -51403784);
|
175
|
+
c = gg(c, d, a, b, k[7], 14, 1735328473);
|
176
|
+
b = gg(b, c, d, a, k[12], 20, -1926607734);
|
177
|
+
|
178
|
+
a = hh(a, b, c, d, k[5], 4, -378558);
|
179
|
+
d = hh(d, a, b, c, k[8], 11, -2022574463);
|
180
|
+
c = hh(c, d, a, b, k[11], 16, 1839030562);
|
181
|
+
b = hh(b, c, d, a, k[14], 23, -35309556);
|
182
|
+
a = hh(a, b, c, d, k[1], 4, -1530992060);
|
183
|
+
d = hh(d, a, b, c, k[4], 11, 1272893353);
|
184
|
+
c = hh(c, d, a, b, k[7], 16, -155497632);
|
185
|
+
b = hh(b, c, d, a, k[10], 23, -1094730640);
|
186
|
+
a = hh(a, b, c, d, k[13], 4, 681279174);
|
187
|
+
d = hh(d, a, b, c, k[0], 11, -358537222);
|
188
|
+
c = hh(c, d, a, b, k[3], 16, -722521979);
|
189
|
+
b = hh(b, c, d, a, k[6], 23, 76029189);
|
190
|
+
a = hh(a, b, c, d, k[9], 4, -640364487);
|
191
|
+
d = hh(d, a, b, c, k[12], 11, -421815835);
|
192
|
+
c = hh(c, d, a, b, k[15], 16, 530742520);
|
193
|
+
b = hh(b, c, d, a, k[2], 23, -995338651);
|
194
|
+
|
195
|
+
a = ii(a, b, c, d, k[0], 6, -198630844);
|
196
|
+
d = ii(d, a, b, c, k[7], 10, 1126891415);
|
197
|
+
c = ii(c, d, a, b, k[14], 15, -1416354905);
|
198
|
+
b = ii(b, c, d, a, k[5], 21, -57434055);
|
199
|
+
a = ii(a, b, c, d, k[12], 6, 1700485571);
|
200
|
+
d = ii(d, a, b, c, k[3], 10, -1894986606);
|
201
|
+
c = ii(c, d, a, b, k[10], 15, -1051523);
|
202
|
+
b = ii(b, c, d, a, k[1], 21, -2054922799);
|
203
|
+
a = ii(a, b, c, d, k[8], 6, 1873313359);
|
204
|
+
d = ii(d, a, b, c, k[15], 10, -30611744);
|
205
|
+
c = ii(c, d, a, b, k[6], 15, -1560198380);
|
206
|
+
b = ii(b, c, d, a, k[13], 21, 1309151649);
|
207
|
+
a = ii(a, b, c, d, k[4], 6, -145523070);
|
208
|
+
d = ii(d, a, b, c, k[11], 10, -1120210379);
|
209
|
+
c = ii(c, d, a, b, k[2], 15, 718787259);
|
210
|
+
b = ii(b, c, d, a, k[9], 21, -343485551);
|
211
|
+
|
212
|
+
x[0] = add32(a, x[0]);
|
213
|
+
x[1] = add32(b, x[1]);
|
214
|
+
x[2] = add32(c, x[2]);
|
215
|
+
x[3] = add32(d, x[3]);
|
216
|
+
|
217
|
+
}
|
218
|
+
|
219
|
+
/* there needs to be support for Unicode here,
|
220
|
+
* unless we pretend that we can redefine the MD-5
|
221
|
+
* algorithm for multi-byte characters (perhaps
|
222
|
+
* by adding every four 16-bit characters and
|
223
|
+
* shortening the sum to 32 bits). Otherwise
|
224
|
+
* I suggest performing MD-5 as if every character
|
225
|
+
* was two bytes--e.g., 0040 0025 = @%--but then
|
226
|
+
* how will an ordinary MD-5 sum be matched?
|
227
|
+
* There is no way to standardize text to something
|
228
|
+
* like UTF-8 before transformation; speed cost is
|
229
|
+
* utterly prohibitive. The JavaScript standard
|
230
|
+
* itself needs to look at this: it should start
|
231
|
+
* providing access to strings as preformed UTF-8
|
232
|
+
* 8-bit unsigned value arrays.
|
233
|
+
*/
|
234
|
+
function md5blk(s) { /* I figured global was faster. */
|
235
|
+
var md5blks = [],
|
236
|
+
i; /* Andy King said do it this way. */
|
237
|
+
for (i = 0; i < 64; i += 4) {
|
238
|
+
md5blks[i >> 2] = s.charCodeAt(i) + (s.charCodeAt(i + 1) << 8) + (s.charCodeAt(i + 2) << 16) + (s.charCodeAt(i + 3) << 24);
|
239
|
+
}
|
240
|
+
return md5blks;
|
241
|
+
}
|
242
|
+
|
243
|
+
function md51(s) {
|
244
|
+
var n = s.length,
|
245
|
+
state = [1732584193, -271733879, -1732584194, 271733878],
|
246
|
+
i,
|
247
|
+
length,
|
248
|
+
tail;
|
249
|
+
for (i = 64; i <= n; i += 64) {
|
250
|
+
md5cycle(state, md5blk(s.substring(i - 64, i)));
|
251
|
+
}
|
252
|
+
s = s.substring(i - 64);
|
253
|
+
length = s.length;
|
254
|
+
tail = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
|
255
|
+
for (i = 0; i < length; i += 1) {
|
256
|
+
tail[i >> 2] |= s.charCodeAt(i) << ((i % 4) << 3);
|
257
|
+
}
|
258
|
+
tail[i >> 2] |= 0x80 << ((i % 4) << 3);
|
259
|
+
if (i > 55) {
|
260
|
+
md5cycle(state, tail);
|
261
|
+
for (i = 0; i < 16; i += 1) {
|
262
|
+
tail[i] = 0;
|
263
|
+
}
|
264
|
+
}
|
265
|
+
tail[14] = n * 8;
|
266
|
+
md5cycle(state, tail);
|
267
|
+
return state;
|
268
|
+
}
|
269
|
+
|
270
|
+
/*jslint vars: true*/
|
271
|
+
var hex_chr = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'];
|
272
|
+
/*jslint vars: false*/
|
273
|
+
|
274
|
+
function rhex(n) {
|
275
|
+
var s = '',
|
276
|
+
j;
|
277
|
+
for (j = 0; j < 4; j += 1) {
|
278
|
+
s += hex_chr[(n >> (j * 8 + 4)) & 0x0F] + hex_chr[(n >> (j * 8)) & 0x0F];
|
279
|
+
}
|
280
|
+
return s;
|
281
|
+
}
|
282
|
+
|
283
|
+
function hex(x) {
|
284
|
+
var i;
|
285
|
+
for (i = 0; i < x.length; i += 1) {
|
286
|
+
x[i] = rhex(x[i]);
|
287
|
+
}
|
288
|
+
return x.join('');
|
289
|
+
}
|
290
|
+
|
291
|
+
function md5(s) {
|
292
|
+
return hex(md51(s));
|
293
|
+
}
|
294
|
+
|
295
|
+
if (md5('hello') !== '5d41402abc4b2a76b9719d911017c592') {
|
296
|
+
add32 = function (x, y) {
|
297
|
+
var lsw = (x & 0xFFFF) + (y & 0xFFFF),
|
298
|
+
msw = (x >> 16) + (y >> 16) + (lsw >> 16);
|
299
|
+
return (msw << 16) | (lsw & 0xFFFF);
|
300
|
+
};
|
301
|
+
}
|
302
|
+
|
303
|
+
////////////////////////////////////////////////////////////////////////////
|
304
|
+
|
305
|
+
/**
|
306
|
+
* SparkMD5 OOP implementation.
|
307
|
+
*
|
308
|
+
* Use this class to perform an incremental md5, otherwise use the
|
309
|
+
* static methods instead.
|
310
|
+
*/
|
311
|
+
/*jslint vars: true*/
|
312
|
+
var SparkMD5 = function () {
|
313
|
+
/*jslint vars: false*/
|
314
|
+
// call reset to init the instance
|
315
|
+
this.reset();
|
316
|
+
};
|
317
|
+
|
318
|
+
/**
|
319
|
+
* Appends a string.
|
320
|
+
* A conversion will be applied if an utf8 string is detected.
|
321
|
+
*
|
322
|
+
* @param {String} str The string to be appended
|
323
|
+
*
|
324
|
+
* @return {SparkMD5} The instance itself
|
325
|
+
*/
|
326
|
+
SparkMD5.prototype.append = function (str) {
|
327
|
+
// converts the string to utf8 bytes if necessary
|
328
|
+
if (/[\u0080-\uFFFF]/.test(str)) {
|
329
|
+
str = unescape(encodeURIComponent(str));
|
330
|
+
}
|
331
|
+
|
332
|
+
// then append as binary
|
333
|
+
this.appendBinary(str);
|
334
|
+
|
335
|
+
return this;
|
336
|
+
};
|
337
|
+
|
338
|
+
/**
|
339
|
+
* Appends a binary string.
|
340
|
+
*
|
341
|
+
* @param {String} contents The binary string to be appended
|
342
|
+
*
|
343
|
+
* @return {SparkMD5} The instance itself
|
344
|
+
*/
|
345
|
+
SparkMD5.prototype.appendBinary = function (contents) {
|
346
|
+
// add to the buffer and increment string total length
|
347
|
+
var offset = 64 - this._buff.length,
|
348
|
+
sub = this._buff + contents.substr(0, offset),
|
349
|
+
length = contents.length,
|
350
|
+
total;
|
351
|
+
|
352
|
+
this._length += length;
|
353
|
+
|
354
|
+
if (sub.length >= 64) { // if there is 64 bytes accumulated
|
355
|
+
|
356
|
+
md5cycle(this._state, md5blk(sub));
|
357
|
+
|
358
|
+
total = contents.length - 64;
|
359
|
+
|
360
|
+
// while we got bytes to process
|
361
|
+
while (offset <= total) {
|
362
|
+
sub = contents.substr(offset, 64);
|
363
|
+
md5cycle(this._state, md5blk(sub));
|
364
|
+
offset += 64;
|
365
|
+
}
|
366
|
+
|
367
|
+
this._buff = contents.substr(offset, 64);
|
368
|
+
|
369
|
+
} else {
|
370
|
+
this._buff = sub;
|
371
|
+
}
|
372
|
+
|
373
|
+
return this;
|
374
|
+
};
|
375
|
+
|
376
|
+
/**
|
377
|
+
* Finishes the incremental computation, reseting the internal state and
|
378
|
+
* returning the result.
|
379
|
+
* Use the raw parameter to obtain the raw result instead of the hex one.
|
380
|
+
*
|
381
|
+
* @param {Boolean} raw True to get the raw result, false to get the hex result
|
382
|
+
*
|
383
|
+
* @return {String|Array} The result
|
384
|
+
*/
|
385
|
+
SparkMD5.prototype.end = function (raw) {
|
386
|
+
var buff = this._buff,
|
387
|
+
length = buff.length,
|
388
|
+
tail = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
389
|
+
i,
|
390
|
+
ret;
|
391
|
+
|
392
|
+
for (i = 0; i < length; i += 1) {
|
393
|
+
tail[i >> 2] |= buff.charCodeAt(i) << ((i % 4) << 3);
|
394
|
+
}
|
395
|
+
tail[i >> 2] |= 0x80 << ((i % 4) << 3);
|
396
|
+
if (i > 55) {
|
397
|
+
md5cycle(this._state, tail);
|
398
|
+
for (i = 0; i < 16; i += 1) {
|
399
|
+
tail[i] = 0;
|
400
|
+
}
|
401
|
+
}
|
402
|
+
tail[14] = this._length * 8;
|
403
|
+
md5cycle(this._state, tail);
|
404
|
+
|
405
|
+
ret = !!raw ? this._state : hex(this._state);
|
406
|
+
|
407
|
+
this.reset();
|
408
|
+
|
409
|
+
return ret;
|
410
|
+
};
|
411
|
+
|
412
|
+
/**
|
413
|
+
* Resets the internal state of the computation.
|
414
|
+
*
|
415
|
+
* @return {SparkMD5} The instance itself
|
416
|
+
*/
|
417
|
+
SparkMD5.prototype.reset = function () {
|
418
|
+
this._buff = "";
|
419
|
+
this._length = 0;
|
420
|
+
this._state = [1732584193, -271733879, -1732584194, 271733878];
|
421
|
+
|
422
|
+
return this;
|
423
|
+
};
|
424
|
+
|
425
|
+
/**
|
426
|
+
* Releases memory used by the incremental buffer and other aditional
|
427
|
+
* resources. If you plan to use the instance again, use reset instead.
|
428
|
+
*/
|
429
|
+
SparkMD5.prototype.destroy = function () {
|
430
|
+
delete this._state;
|
431
|
+
delete this._buff;
|
432
|
+
delete this._length;
|
433
|
+
};
|
434
|
+
|
435
|
+
/**
|
436
|
+
* Performs the md5 hash on a string.
|
437
|
+
* A conversion will be applied if utf8 string is detected.
|
438
|
+
*
|
439
|
+
* @param {String} str The string
|
440
|
+
* @param {Boolean} raw True to get the raw result, false to get the hex result
|
441
|
+
*
|
442
|
+
* @return {String|Array} The result
|
443
|
+
*/
|
444
|
+
SparkMD5.hash = function (str, raw) {
|
445
|
+
// converts the string to utf8 bytes if necessary
|
446
|
+
if (/[\u0080-\uFFFF]/.test(str)) {
|
447
|
+
str = unescape(encodeURIComponent(str));
|
448
|
+
}
|
449
|
+
|
450
|
+
var hash = md51(str);
|
451
|
+
|
452
|
+
return !!raw ? hash : hex(hash);
|
453
|
+
};
|
454
|
+
|
455
|
+
/**
|
456
|
+
* Performs the md5 hash on a binary string.
|
457
|
+
*
|
458
|
+
* @param {String} content The binary string
|
459
|
+
* @param {Boolean} raw True to get the raw result, false to get the hex result
|
460
|
+
*
|
461
|
+
* @return {String|Array} The result
|
462
|
+
*/
|
463
|
+
SparkMD5.hashBinary = function (content, raw) {
|
464
|
+
var hash = md51(content);
|
465
|
+
|
466
|
+
return !!raw ? hash : hex(hash);
|
467
|
+
};
|
468
|
+
|
469
|
+
return SparkMD5;
|
470
|
+
}));
|