@karpeleslab/klbfw 0.1.8 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +95 -0
  3. package/package.json +1 -1
  4. package/upload.js +104 -44
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2022 Karpelès Lab Inc.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,95 @@
1
+ # klbfw
2
+
3
+ Karpeles Lab framework lib
4
+
5
+ This lib is used on frontend sites to communicate through the KLB API.
6
+
7
+ # API
8
+
9
+ ## rest(api, method, params, context)
10
+
11
+ Performs a rest query and returns a promise to the response.
12
+
13
+ ## upload.init(api, params, context)
14
+
15
+ Perform an upload. This API will show a file selector and allow the user to select one or more files.
16
+
17
+ ## getPrefix()
18
+
19
+ Returns the language/etc prefix part of the URL, for example `/l/en-US`. The prefix should be inserted before the path in the URL.
20
+
21
+ ## getSettings()
22
+
23
+ Returns active settings if any.
24
+
25
+ ## getRealm()
26
+
27
+ Returns realm information.
28
+
29
+ ## getContext()
30
+
31
+ Returns current context.
32
+
33
+ ## setContext(ctx)
34
+
35
+ Modifies the current context.
36
+
37
+ ## getMode()
38
+
39
+ Returns the current rending mode `ssr`, `js` etc.
40
+
41
+ ## getHostname()
42
+
43
+ Returns the hostname part of the current URL.
44
+
45
+ ## getRegistry()
46
+
47
+ Returns data from the registry.
48
+
49
+ ## getLocale()
50
+
51
+ Returns the currently active locale, for example `en-US`.
52
+
53
+ ## getUserGroup()
54
+
55
+ Returns `g` from context, which is the current active user group.
56
+
57
+ ## getCurrency()
58
+
59
+ Returns the currently selected currency, such as `USD`.
60
+
61
+ ## getToken()
62
+
63
+ Returns the CSRF token.
64
+
65
+ ## getUrl()
66
+
67
+ Returns the active URL.
68
+
69
+ ## getPath()
70
+
71
+ Returns the non-prefixed request path.
72
+
73
+ ## getUuid()
74
+
75
+ Returns the UUID of the request.
76
+
77
+ ## getInitialState()
78
+
79
+ Returns the initial state passed from SSR execution (or null if no SSR was performed).
80
+
81
+ # Cookie functions
82
+
83
+ Those methods are a requirement as using things like `document.cookie` will not work in SSR mode. The methods described here will work when SSR is enabled, and will cause cookies to be added to the HTTP response.
84
+
85
+ ## getCookie(cookie)
86
+
87
+ Get the value of a specific cookie.
88
+
89
+ ## setCookie(cookie, value)
90
+
91
+ Sets value for a cookie.
92
+
93
+ ## hasCookie(cookie)
94
+
95
+ Checks for presence of a given cookie.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@karpeleslab/klbfw",
3
- "version": "0.1.8",
3
+ "version": "0.1.11",
4
4
  "description": "Frontend Framework",
5
5
  "main": "index.js",
6
6
  "scripts": {
package/upload.js CHANGED
@@ -100,7 +100,7 @@ module.exports.upload = (function () {
100
100
 
101
101
 
102
102
  function sendprogress() {
103
- if (upload.onprogress == undefined) return;
103
+ if (typeof upload.onprogress === "undefined") return;
104
104
 
105
105
  upload.onprogress(upload.getStatus());
106
106
  }
@@ -117,35 +117,55 @@ module.exports.upload = (function () {
117
117
  params["type"] = up.file.type;
118
118
 
119
119
  rest.rest(up.path, "POST", params, up.context).then(function (res) {
120
- if (!res["data"]["Cloud_Aws_Bucket_Upload__"]) {
121
- // invalid data
122
- up.reject();
123
- delete upload_running[up.up_id];
124
- upload_failed.push(up);
120
+ // Method 1: aws signed multipart upload
121
+ if (res["data"]["Cloud_Aws_Bucket_Upload__"]) {
122
+ up.info = res["data"]; // contains stuff like Bucket_Endpoint, Key, etc
123
+
124
+ // ok we are ready to upload - this will initiate an upload
125
+ awsReq(up.info, "POST", "uploads=", "", {"Content-Type": up.file.type, "X-Amz-Acl": "private"}, up.context)
126
+ .then(response => response.text())
127
+ .then(str => (new DOMParser()).parseFromString(str, "text/xml"))
128
+ .then(dom => dom.querySelector('UploadId').innerHTML)
129
+ .then(function (uploadId) {
130
+ up.uploadId = uploadId;
131
+
132
+ // ok, let's compute block size so we know how many parts we need to send
133
+ var fsize = up.file.size;
134
+ var bsize = Math.ceil(fsize / 10000); // we want ~10k parts
135
+ if (bsize < 5242880) bsize = 5242880; // minimum block size = 5MB
136
+
137
+ up.method = 'aws';
138
+ up.bsize = bsize;
139
+ up.blocks = Math.ceil(fsize / bsize);
140
+ up.b = {};
141
+ up['status'] = 'uploading';
142
+ upload.run();
143
+ }).catch(res => failure(up, res))
125
144
  return;
126
145
  }
146
+ // Method 2: PUT requests
147
+ if (res["data"]["PUT"]) {
148
+ var fsize = up.file.size;
149
+ var bsize = fsize; // upload file in a single block
150
+ if (res["data"]["Blocksize"]) {
151
+ // this upload target supports multipart PUT upload
152
+ bsize = res["data"]["Blocksize"]; // multipart upload
153
+ }
127
154
 
128
- up.info = res["data"]; // contains stuff like Bucket_Endpoint, Key, etc
129
-
130
- // ok we are ready to upload - this will initiate an upload
131
- awsReq(up.info, "POST", "uploads=", "", {"Content-Type": up.file.type, "X-Amz-Acl": "private"}, up.context)
132
- .then(response => response.text())
133
- .then(str => (new DOMParser()).parseFromString(str, "text/xml"))
134
- .then(dom => dom.querySelector('UploadId').innerHTML)
135
- .then(function (uploadId) {
136
- up.uploadId = uploadId;
137
-
138
- // ok, let's compute block size so we know how many parts we need to send
139
- var fsize = up.file.size;
140
- var bsize = Math.ceil(fsize / 10000); // we want ~10k parts
141
- if (bsize < 5242880) bsize = 5242880; // minimum block size = 5MB
142
-
143
- up.bsize = bsize;
144
- up.blocks = Math.ceil(fsize / bsize);
145
- up.b = {};
146
- up['status'] = 'uploading';
147
- upload.run();
148
- }).catch(res => failure(up, res))
155
+ up.info = res["data"];
156
+ up.method = 'put';
157
+ up.bsize = bsize;
158
+ up.blocks = Math.ceil(fsize / bsize);
159
+ up.b = {};
160
+ up['status'] = 'uploading';
161
+ upload.run();
162
+ return;
163
+ }
164
+ // invalid data
165
+ delete upload_running[up.up_id];
166
+ upload_failed.push(up);
167
+ up.reject();
168
+ return;
149
169
  })
150
170
  .catch(res => failure(up, res));
151
171
  }
@@ -185,12 +205,36 @@ module.exports.upload = (function () {
185
205
 
186
206
  var reader = new FileReader();
187
207
  reader.addEventListener("loadend", function () {
188
- awsReq(up.info, "PUT", "partNumber=" + (partno + 1) + "&uploadId=" + up.uploadId, reader.result, null, up.context)
189
- .then(function (response) {
190
- up.b[partno] = response.headers.get("ETag");
208
+ switch(up.method) {
209
+ case 'aws':
210
+ awsReq(up.info, "PUT", "partNumber=" + (partno + 1) + "&uploadId=" + up.uploadId, reader.result, null, up.context)
211
+ .then(function (response) {
212
+ up.b[partno] = response.headers.get("ETag");
213
+ sendprogress();
214
+ upload.run();
215
+ }).catch(res => failure(up, res));
216
+ break;
217
+ case 'put':
218
+ let headers = {};
219
+ headers["Content-Type"] = up.file.type;
220
+ if (up.blocks > 1) {
221
+ // add Content-Range header
222
+ // Content-Range: bytes start-end/*
223
+ const end = start + reader.result.byteLength - 1; // inclusive
224
+ headers["Content-Range"] = "bytes "+start+"-"+end+"/*";
225
+ }
226
+
227
+ fetch(up.info["PUT"], {
228
+ method: "PUT",
229
+ body: reader.result,
230
+ headers: headers,
231
+ }).then(function (response) {
232
+ up.b[partno] = "done";
191
233
  sendprogress();
192
234
  upload.run();
193
235
  }).catch(res => failure(up, res));
236
+ break;
237
+ }
194
238
  });
195
239
 
196
240
  reader.addEventListener("error", function (e) {
@@ -221,27 +265,43 @@ module.exports.upload = (function () {
221
265
  up["done"] = d;
222
266
 
223
267
  if (p == 0) {
224
- // complete, see https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
225
268
  up["status"] = "validating";
226
- var xml = "<CompleteMultipartUpload>";
227
- for (var i = 0; i < up.blocks; i++) {
228
- xml += "<Part><PartNumber>" + (i + 1) + "</PartNumber><ETag>" + up.b[i] + "</ETag></Part>";
229
- }
230
- xml += "</CompleteMultipartUpload>";
231
- awsReq(up.info, "POST", "uploadId=" + up.uploadId, xml, null, up.context)
232
- .then(response => response.text())
233
- .then(function (r) {
234
- // if success, need to call finalize
235
- rest.rest("Cloud/Aws/Bucket/Upload/" + up.info.Cloud_Aws_Bucket_Upload__ + ":handleComplete", "POST", {}, up.context).then(function (ares) {
236
- // SUCCESS!
269
+ switch(up.method) {
270
+ case 'aws':
271
+ // complete, see https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
272
+ var xml = "<CompleteMultipartUpload>";
273
+ for (var i = 0; i < up.blocks; i++) {
274
+ xml += "<Part><PartNumber>" + (i + 1) + "</PartNumber><ETag>" + up.b[i] + "</ETag></Part>";
275
+ }
276
+ xml += "</CompleteMultipartUpload>";
277
+ awsReq(up.info, "POST", "uploadId=" + up.uploadId, xml, null, up.context)
278
+ .then(response => response.text())
279
+ .then(function (r) {
280
+ // if success, need to call finalize
281
+ rest.rest("Cloud/Aws/Bucket/Upload/" + up.info.Cloud_Aws_Bucket_Upload__ + ":handleComplete", "POST", {}, up.context).then(function (ares) {
282
+ // SUCCESS!
283
+ up["status"] = "complete";
284
+ up["final"] = ares["data"];
285
+ sendprogress();
286
+ up.resolve(up);
287
+ delete upload_running[up.up_id];
288
+ upload.run();
289
+ }).catch(res => failure(up, res));
290
+ }).catch(res => failure(up, res));
291
+ break;
292
+ case 'put':
293
+ // complete, directly call handleComplete
294
+ rest.rest(up.info.Complete, "POST", {}, up.context).then(function (ares) {
295
+ // success!
237
296
  up["status"] = "complete";
238
297
  up["final"] = ares["data"];
239
298
  sendprogress();
240
- up.resolve(up);
241
299
  delete upload_running[up.up_id];
300
+ up.resolve(up);
242
301
  upload.run();
243
302
  }).catch(res => failure(up, res));
244
- }).catch(res => failure(up, res));
303
+ break;
304
+ }
245
305
  }
246
306
  }
247
307