pyview-web 0.4.3__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyview-web might be problematic. Click here for more details.

pyview/assets/js/app.js CHANGED
@@ -61,6 +61,7 @@ let csrfToken = document
61
61
  let liveSocket = new LiveSocket("/live", Socket, {
62
62
  hooks: Hooks,
63
63
  params: { _csrf_token: csrfToken },
64
+ uploaders: window.Uploaders || {},
64
65
  });
65
66
 
66
67
  // Show progress bar on live navigation and form submits
@@ -0,0 +1,221 @@
1
+ /**
2
+ * PyView External S3 Uploaders
3
+ *
4
+ * Client-side uploaders for external S3 uploads.
5
+ *
6
+ * Available uploaders:
7
+ * - S3: Simple POST upload to S3 using presigned POST URLs
8
+ * - S3Multipart: Multipart upload for large files (>5GB)
9
+ */
10
+
11
+ window.Uploaders = window.Uploaders || {};
12
+
13
+ // S3 Simple POST uploader
14
+ // Uses presigned POST URLs for direct upload to S3
15
+ // Works for files up to ~5GB
16
+ if (!window.Uploaders.S3) {
17
+ window.Uploaders.S3 = function (entries, onViewError) {
18
+ entries.forEach((entry) => {
19
+ let formData = new FormData();
20
+ let { url, fields } = entry.meta;
21
+
22
+ // Add all fields from presigned POST
23
+ Object.entries(fields).forEach(([key, val]) =>
24
+ formData.append(key, val)
25
+ );
26
+ formData.append("file", entry.file);
27
+
28
+ let xhr = new XMLHttpRequest();
29
+ onViewError(() => xhr.abort());
30
+
31
+ xhr.onload = () => {
32
+ if (xhr.status === 204 || xhr.status === 200) {
33
+ entry.progress(100);
34
+ } else {
35
+ entry.error(`S3 upload failed with status ${xhr.status}`);
36
+ }
37
+ };
38
+ xhr.onerror = () => entry.error("Network error during upload");
39
+
40
+ xhr.upload.addEventListener("progress", (event) => {
41
+ if (event.lengthComputable) {
42
+ let percent = Math.round((event.loaded / event.total) * 100);
43
+ if (percent < 100) {
44
+ entry.progress(percent);
45
+ }
46
+ }
47
+ });
48
+
49
+ xhr.open("POST", url, true);
50
+ xhr.send(formData);
51
+ });
52
+ };
53
+ }
54
+
55
+ // S3 Multipart uploader for large files
56
+ // Uploads file in chunks with retry logic and concurrency control
57
+ //
58
+ // - Exponential backoff retry (max 3 attempts per part)
59
+ // - Concurrency limit (max 6 parallel uploads)
60
+ // - Automatic cleanup on fatal errors
61
+ //
62
+ // Based on AWS best practices:
63
+ // https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html
64
+ //
65
+ // Server must:
66
+ // 1. Return metadata with: uploader="S3Multipart", upload_id, part_urls, chunk_size
67
+ // 2. Provide entry_complete callback to finalize the upload
68
+ if (!window.Uploaders.S3Multipart) {
69
+ window.Uploaders.S3Multipart = function (entries, onViewError) {
70
+ entries.forEach((entry) => {
71
+ const { upload_id, part_urls, chunk_size, key } = entry.meta;
72
+ const file = entry.file;
73
+ const parts = []; // Store {PartNumber, ETag} for each uploaded part
74
+
75
+ const MAX_RETRIES = 3;
76
+ const MAX_CONCURRENT = 6;
77
+ let uploadedParts = 0;
78
+ let activeUploads = 0;
79
+ let partIndex = 0;
80
+ let hasError = false;
81
+ const totalParts = part_urls.length;
82
+
83
+ console.log(`[S3Multipart] Starting upload for ${entry.file.name}`);
84
+ console.log(`[S3Multipart] Total parts: ${totalParts}, chunk size: ${chunk_size}`);
85
+ console.log(`[S3Multipart] Max concurrent uploads: ${MAX_CONCURRENT}, max retries: ${MAX_RETRIES}`);
86
+
87
+ // Add a custom method to send completion data directly
88
+ // This bypasses entry.progress() which only handles numbers
89
+ entry.complete = function(completionData) {
90
+ console.log(`[S3Multipart] Calling entry.complete with:`, completionData);
91
+ // Call pushFileProgress directly with the completion data
92
+ entry.view.pushFileProgress(entry.fileEl, entry.ref, completionData);
93
+ };
94
+
95
+ // Upload a single part with retry logic
96
+ const uploadPart = (index, retryCount = 0) => {
97
+ if (hasError) return; // Stop if we've hit a fatal error
98
+
99
+ const partNumber = index + 1;
100
+ const url = part_urls[index];
101
+ const start = index * chunk_size;
102
+ const end = Math.min(start + chunk_size, file.size);
103
+ const chunk = file.slice(start, end);
104
+
105
+ console.log(`[S3Multipart] Starting part ${partNumber}/${totalParts}, size: ${chunk.size} bytes, attempt ${retryCount + 1}`);
106
+
107
+ const xhr = new XMLHttpRequest();
108
+ onViewError(() => xhr.abort());
109
+
110
+ // Track upload progress within this chunk
111
+ xhr.upload.addEventListener("progress", (event) => {
112
+ if (event.lengthComputable) {
113
+ // Calculate overall progress: completed parts + current part's progress
114
+ const completedBytes = uploadedParts * chunk_size;
115
+ const currentPartBytes = event.loaded;
116
+ const totalBytes = file.size;
117
+ const overallPercent = Math.round(((completedBytes + currentPartBytes) / totalBytes) * 100);
118
+
119
+ // Don't report 100% until all parts complete and we send completion data
120
+ if (overallPercent < 100) {
121
+ entry.progress(overallPercent);
122
+ }
123
+ }
124
+ });
125
+
126
+ xhr.onload = () => {
127
+ activeUploads--;
128
+
129
+ if (xhr.status === 200) {
130
+ const etag = xhr.getResponseHeader('ETag');
131
+ console.log(`[S3Multipart] Part ${partNumber} succeeded, ETag: ${etag}`);
132
+
133
+ if (!etag) {
134
+ console.error(`[S3Multipart] Part ${partNumber} missing ETag!`);
135
+ entry.error(`Part ${partNumber} upload succeeded but no ETag returned`);
136
+ hasError = true;
137
+ return;
138
+ }
139
+
140
+ // Store the part with its ETag
141
+ parts.push({
142
+ PartNumber: partNumber,
143
+ ETag: etag.replace(/"/g, '')
144
+ });
145
+ uploadedParts++;
146
+
147
+ // Update progress
148
+ const progressPercent = Math.round((uploadedParts / totalParts) * 100);
149
+ console.log(`[S3Multipart] Progress: ${uploadedParts}/${totalParts} parts (${progressPercent}%)`);
150
+
151
+ if (uploadedParts < totalParts) {
152
+ entry.progress(progressPercent < 100 ? progressPercent : 99);
153
+ uploadNextPart(); // Start next part
154
+ } else {
155
+ // All parts complete!
156
+ const completionData = {
157
+ complete: true,
158
+ upload_id: upload_id,
159
+ key: key,
160
+ parts: parts.sort((a, b) => a.PartNumber - b.PartNumber)
161
+ };
162
+ console.log(`[S3Multipart] All parts complete! Sending completion data`);
163
+ entry.complete(completionData);
164
+ }
165
+ } else {
166
+ // Upload failed - retry with exponential backoff
167
+ console.error(`[S3Multipart] Part ${partNumber} failed with status ${xhr.status}, attempt ${retryCount + 1}`);
168
+
169
+ if (retryCount < MAX_RETRIES) {
170
+ // Exponential backoff: 1s, 2s, 4s, max 10s
171
+ const delay = Math.min(1000 * (2 ** retryCount), 10000);
172
+ console.log(`[S3Multipart] Retrying part ${partNumber} in ${delay}ms...`);
173
+
174
+ setTimeout(() => {
175
+ uploadPart(index, retryCount + 1);
176
+ }, delay);
177
+ } else {
178
+ // Max retries exceeded - fatal error
179
+ console.error(`[S3Multipart] Part ${partNumber} failed after ${MAX_RETRIES} retries, aborting upload`);
180
+ entry.error(`Part ${partNumber} failed after ${MAX_RETRIES} attempts. Upload aborted.`);
181
+ hasError = true;
182
+ }
183
+ }
184
+ };
185
+
186
+ xhr.onerror = () => {
187
+ activeUploads--;
188
+ console.error(`[S3Multipart] Network error on part ${partNumber}, attempt ${retryCount + 1}`);
189
+
190
+ if (retryCount < MAX_RETRIES) {
191
+ const delay = Math.min(1000 * (2 ** retryCount), 10000);
192
+ console.log(`[S3Multipart] Retrying part ${partNumber} after network error in ${delay}ms...`);
193
+
194
+ setTimeout(() => {
195
+ uploadPart(index, retryCount + 1);
196
+ }, delay);
197
+ } else {
198
+ console.error(`[S3Multipart] Part ${partNumber} network error after ${MAX_RETRIES} retries, aborting upload`);
199
+ entry.error(`Part ${partNumber} network error after ${MAX_RETRIES} attempts. Upload aborted.`);
200
+ hasError = true;
201
+ }
202
+ };
203
+
204
+ xhr.open('PUT', url, true);
205
+ xhr.send(chunk);
206
+ activeUploads++;
207
+ };
208
+
209
+ // Upload next part if we haven't hit the concurrency limit
210
+ const uploadNextPart = () => {
211
+ while (partIndex < totalParts && activeUploads < MAX_CONCURRENT && !hasError) {
212
+ uploadPart(partIndex);
213
+ partIndex++;
214
+ }
215
+ };
216
+
217
+ // Start initial batch of uploads
218
+ uploadNextPart();
219
+ });
220
+ };
221
+ }
pyview/live_socket.py CHANGED
@@ -52,12 +52,16 @@ class UnconnectedSocket(Generic[T]):
52
52
  constraints: UploadConstraints,
53
53
  auto_upload: bool = False,
54
54
  progress: Optional[Callable] = None,
55
+ external: Optional[Callable] = None,
56
+ entry_complete: Optional[Callable] = None,
55
57
  ) -> UploadConfig:
56
58
  return UploadConfig(
57
59
  name=upload_name,
58
60
  constraints=constraints,
59
61
  autoUpload=auto_upload,
60
62
  progress_callback=progress,
63
+ external_callback=external,
64
+ entry_complete_callback=entry_complete,
61
65
  )
62
66
 
63
67
 
@@ -230,9 +234,11 @@ class ConnectedLiveViewSocket(Generic[T]):
230
234
  constraints: UploadConstraints,
231
235
  auto_upload: bool = False,
232
236
  progress: Optional[Callable] = None,
237
+ external: Optional[Callable] = None,
238
+ entry_complete: Optional[Callable] = None,
233
239
  ) -> UploadConfig:
234
240
  return self.upload_manager.allow_upload(
235
- upload_name, constraints, auto_upload, progress
241
+ upload_name, constraints, auto_upload, progress, external, entry_complete
236
242
  )
237
243
 
238
244
  async def close(self):
@@ -39,9 +39,9 @@
39
39
  mod
40
40
  ));
41
41
 
42
- // assets/node_modules/nprogress/nprogress.js
42
+ // node_modules/nprogress/nprogress.js
43
43
  var require_nprogress = __commonJS({
44
- "assets/node_modules/nprogress/nprogress.js"(exports, module) {
44
+ "node_modules/nprogress/nprogress.js"(exports, module) {
45
45
  (function(root, factory) {
46
46
  if (typeof define === "function" && define.amd) {
47
47
  define(factory);
@@ -71,8 +71,7 @@
71
71
  var key, value;
72
72
  for (key in options) {
73
73
  value = options[key];
74
- if (value !== void 0 && options.hasOwnProperty(key))
75
- Settings[key] = value;
74
+ if (value !== void 0 && options.hasOwnProperty(key)) Settings[key] = value;
76
75
  }
77
76
  return this;
78
77
  };
@@ -84,8 +83,7 @@
84
83
  var progress = NProgress2.render(!started), bar = progress.querySelector(Settings.barSelector), speed = Settings.speed, ease = Settings.easing;
85
84
  progress.offsetWidth;
86
85
  queue(function(next) {
87
- if (Settings.positionUsing === "")
88
- Settings.positionUsing = NProgress2.getPositioningCSS();
86
+ if (Settings.positionUsing === "") Settings.positionUsing = NProgress2.getPositioningCSS();
89
87
  css(bar, barPositionCSS(n, speed, ease));
90
88
  if (n === 1) {
91
89
  css(progress, {
@@ -113,23 +111,19 @@
113
111
  return typeof NProgress2.status === "number";
114
112
  };
115
113
  NProgress2.start = function() {
116
- if (!NProgress2.status)
117
- NProgress2.set(0);
114
+ if (!NProgress2.status) NProgress2.set(0);
118
115
  var work = function() {
119
116
  setTimeout(function() {
120
- if (!NProgress2.status)
121
- return;
117
+ if (!NProgress2.status) return;
122
118
  NProgress2.trickle();
123
119
  work();
124
120
  }, Settings.trickleSpeed);
125
121
  };
126
- if (Settings.trickle)
127
- work();
122
+ if (Settings.trickle) work();
128
123
  return this;
129
124
  };
130
125
  NProgress2.done = function(force) {
131
- if (!force && !NProgress2.status)
132
- return this;
126
+ if (!force && !NProgress2.status) return this;
133
127
  return NProgress2.inc(0.3 + 0.5 * Math.random()).set(1);
134
128
  };
135
129
  NProgress2.inc = function(amount) {
@@ -171,8 +165,7 @@
171
165
  };
172
166
  })();
173
167
  NProgress2.render = function(fromStart) {
174
- if (NProgress2.isRendered())
175
- return document.getElementById("nprogress");
168
+ if (NProgress2.isRendered()) return document.getElementById("nprogress");
176
169
  addClass(document.documentElement, "nprogress-busy");
177
170
  var progress = document.createElement("div");
178
171
  progress.id = "nprogress";
@@ -213,10 +206,8 @@
213
206
  }
214
207
  };
215
208
  function clamp(n, min, max) {
216
- if (n < min)
217
- return min;
218
- if (n > max)
219
- return max;
209
+ if (n < min) return min;
210
+ if (n > max) return max;
220
211
  return n;
221
212
  }
222
213
  function toBarPerc(n) {
@@ -234,7 +225,7 @@
234
225
  barCSS.transition = "all " + speed + "ms " + ease;
235
226
  return barCSS;
236
227
  }
237
- var queue = function() {
228
+ var queue = /* @__PURE__ */ (function() {
238
229
  var pending = [];
239
230
  function next() {
240
231
  var fn = pending.shift();
@@ -244,11 +235,10 @@
244
235
  }
245
236
  return function(fn) {
246
237
  pending.push(fn);
247
- if (pending.length == 1)
248
- next();
238
+ if (pending.length == 1) next();
249
239
  };
250
- }();
251
- var css = function() {
240
+ })();
241
+ var css = /* @__PURE__ */ (function() {
252
242
  var cssPrefixes = ["Webkit", "O", "Moz", "ms"], cssProps = {};
253
243
  function camelCase(string) {
254
244
  return string.replace(/^-ms-/, "ms-").replace(/-([\da-z])/gi, function(match, letter) {
@@ -257,13 +247,11 @@
257
247
  }
258
248
  function getVendorProp(name) {
259
249
  var style = document.body.style;
260
- if (name in style)
261
- return name;
250
+ if (name in style) return name;
262
251
  var i = cssPrefixes.length, capName = name.charAt(0).toUpperCase() + name.slice(1), vendorName;
263
252
  while (i--) {
264
253
  vendorName = cssPrefixes[i] + capName;
265
- if (vendorName in style)
266
- return vendorName;
254
+ if (vendorName in style) return vendorName;
267
255
  }
268
256
  return name;
269
257
  }
@@ -280,28 +268,25 @@
280
268
  if (args.length == 2) {
281
269
  for (prop in properties) {
282
270
  value = properties[prop];
283
- if (value !== void 0 && properties.hasOwnProperty(prop))
284
- applyCss(element, prop, value);
271
+ if (value !== void 0 && properties.hasOwnProperty(prop)) applyCss(element, prop, value);
285
272
  }
286
273
  } else {
287
274
  applyCss(element, args[1], args[2]);
288
275
  }
289
276
  };
290
- }();
277
+ })();
291
278
  function hasClass(element, name) {
292
279
  var list = typeof element == "string" ? element : classList(element);
293
280
  return list.indexOf(" " + name + " ") >= 0;
294
281
  }
295
282
  function addClass(element, name) {
296
283
  var oldList = classList(element), newList = oldList + name;
297
- if (hasClass(oldList, name))
298
- return;
284
+ if (hasClass(oldList, name)) return;
299
285
  element.className = newList.substring(1);
300
286
  }
301
287
  function removeClass(element, name) {
302
288
  var oldList = classList(element), newList;
303
- if (!hasClass(element, name))
304
- return;
289
+ if (!hasClass(element, name)) return;
305
290
  newList = oldList.replace(" " + name + " ", " ");
306
291
  element.className = newList.substring(1, newList.length - 1);
307
292
  }
@@ -316,12 +301,11 @@
316
301
  }
317
302
  });
318
303
 
319
- // assets/node_modules/phoenix_html/priv/static/phoenix_html.js
304
+ // node_modules/phoenix_html/priv/static/phoenix_html.js
320
305
  (function() {
321
306
  var PolyfillEvent = eventConstructor();
322
307
  function eventConstructor() {
323
- if (typeof window.CustomEvent === "function")
324
- return window.CustomEvent;
308
+ if (typeof window.CustomEvent === "function") return window.CustomEvent;
325
309
  function CustomEvent2(event, params) {
326
310
  params = params || { bubbles: false, cancelable: false, detail: void 0 };
327
311
  var evt = document.createEvent("CustomEvent");
@@ -343,10 +327,8 @@
343
327
  form.method = element.getAttribute("data-method") === "get" ? "get" : "post";
344
328
  form.action = to;
345
329
  form.style.display = "hidden";
346
- if (target)
347
- form.target = target;
348
- else if (targetModifierKey)
349
- form.target = "_blank";
330
+ if (target) form.target = target;
331
+ else if (targetModifierKey) form.target = "_blank";
350
332
  form.appendChild(csrf);
351
333
  form.appendChild(method);
352
334
  document.body.appendChild(form);
@@ -354,8 +336,7 @@
354
336
  }
355
337
  window.addEventListener("click", function(e) {
356
338
  var element = e.target;
357
- if (e.defaultPrevented)
358
- return;
339
+ if (e.defaultPrevented) return;
359
340
  while (element && element.getAttribute) {
360
341
  var phoenixLinkEvent = new PolyfillEvent("phoenix.link.click", {
361
342
  "bubbles": true,
@@ -383,7 +364,7 @@
383
364
  }, false);
384
365
  })();
385
366
 
386
- // assets/node_modules/phoenix/priv/static/phoenix.mjs
367
+ // node_modules/phoenix/priv/static/phoenix.mjs
387
368
  var closure = (value) => {
388
369
  if (typeof value === "function") {
389
370
  return value;
@@ -1354,7 +1335,7 @@
1354
1335
  }
1355
1336
  };
1356
1337
 
1357
- // assets/node_modules/phoenix_live_view/priv/static/phoenix_live_view.esm.js
1338
+ // node_modules/phoenix_live_view/priv/static/phoenix_live_view.esm.js
1358
1339
  var CONSECUTIVE_RELOADS = "consecutive-reloads";
1359
1340
  var MAX_RELOADS = 10;
1360
1341
  var RELOAD_JITTER_MIN = 5e3;
@@ -5457,7 +5438,7 @@ within:
5457
5438
  }
5458
5439
  };
5459
5440
 
5460
- // assets/js/app.js
5441
+ // js/app.js
5461
5442
  var import_nprogress = __toESM(require_nprogress());
5462
5443
  var _a;
5463
5444
  var Hooks2 = (_a = window.Hooks) != null ? _a : {};
@@ -5487,7 +5468,8 @@ within:
5487
5468
  var csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content");
5488
5469
  var liveSocket = new LiveSocket("/live", Socket, {
5489
5470
  hooks: Hooks2,
5490
- params: { _csrf_token: csrfToken }
5471
+ params: { _csrf_token: csrfToken },
5472
+ uploaders: window.Uploaders || {}
5491
5473
  });
5492
5474
  window.addEventListener("phx:page-loading-start", (info) => import_nprogress.default.start());
5493
5475
  window.addEventListener("phx:page-loading-stop", (info) => import_nprogress.default.done());
@@ -0,0 +1,221 @@
1
+ /**
2
+ * PyView External S3 Uploaders
3
+ *
4
+ * Client-side uploaders for external S3 uploads.
5
+ *
6
+ * Available uploaders:
7
+ * - S3: Simple POST upload to S3 using presigned POST URLs
8
+ * - S3Multipart: Multipart upload for large files (>5GB)
9
+ */
10
+
11
+ window.Uploaders = window.Uploaders || {};
12
+
13
+ // S3 Simple POST uploader
14
+ // Uses presigned POST URLs for direct upload to S3
15
+ // Works for files up to ~5GB
16
+ if (!window.Uploaders.S3) {
17
+ window.Uploaders.S3 = function (entries, onViewError) {
18
+ entries.forEach((entry) => {
19
+ let formData = new FormData();
20
+ let { url, fields } = entry.meta;
21
+
22
+ // Add all fields from presigned POST
23
+ Object.entries(fields).forEach(([key, val]) =>
24
+ formData.append(key, val)
25
+ );
26
+ formData.append("file", entry.file);
27
+
28
+ let xhr = new XMLHttpRequest();
29
+ onViewError(() => xhr.abort());
30
+
31
+ xhr.onload = () => {
32
+ if (xhr.status === 204 || xhr.status === 200) {
33
+ entry.progress(100);
34
+ } else {
35
+ entry.error(`S3 upload failed with status ${xhr.status}`);
36
+ }
37
+ };
38
+ xhr.onerror = () => entry.error("Network error during upload");
39
+
40
+ xhr.upload.addEventListener("progress", (event) => {
41
+ if (event.lengthComputable) {
42
+ let percent = Math.round((event.loaded / event.total) * 100);
43
+ if (percent < 100) {
44
+ entry.progress(percent);
45
+ }
46
+ }
47
+ });
48
+
49
+ xhr.open("POST", url, true);
50
+ xhr.send(formData);
51
+ });
52
+ };
53
+ }
54
+
55
+ // S3 Multipart uploader for large files
56
+ // Uploads file in chunks with retry logic and concurrency control
57
+ //
58
+ // - Exponential backoff retry (max 3 attempts per part)
59
+ // - Concurrency limit (max 6 parallel uploads)
60
+ // - Automatic cleanup on fatal errors
61
+ //
62
+ // Based on AWS best practices:
63
+ // https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html
64
+ //
65
+ // Server must:
66
+ // 1. Return metadata with: uploader="S3Multipart", upload_id, part_urls, chunk_size
67
+ // 2. Provide entry_complete callback to finalize the upload
68
+ if (!window.Uploaders.S3Multipart) {
69
+ window.Uploaders.S3Multipart = function (entries, onViewError) {
70
+ entries.forEach((entry) => {
71
+ const { upload_id, part_urls, chunk_size, key } = entry.meta;
72
+ const file = entry.file;
73
+ const parts = []; // Store {PartNumber, ETag} for each uploaded part
74
+
75
+ const MAX_RETRIES = 3;
76
+ const MAX_CONCURRENT = 6;
77
+ let uploadedParts = 0;
78
+ let activeUploads = 0;
79
+ let partIndex = 0;
80
+ let hasError = false;
81
+ const totalParts = part_urls.length;
82
+
83
+ console.log(`[S3Multipart] Starting upload for ${entry.file.name}`);
84
+ console.log(`[S3Multipart] Total parts: ${totalParts}, chunk size: ${chunk_size}`);
85
+ console.log(`[S3Multipart] Max concurrent uploads: ${MAX_CONCURRENT}, max retries: ${MAX_RETRIES}`);
86
+
87
+ // Add a custom method to send completion data directly
88
+ // This bypasses entry.progress() which only handles numbers
89
+ entry.complete = function(completionData) {
90
+ console.log(`[S3Multipart] Calling entry.complete with:`, completionData);
91
+ // Call pushFileProgress directly with the completion data
92
+ entry.view.pushFileProgress(entry.fileEl, entry.ref, completionData);
93
+ };
94
+
95
+ // Upload a single part with retry logic
96
+ const uploadPart = (index, retryCount = 0) => {
97
+ if (hasError) return; // Stop if we've hit a fatal error
98
+
99
+ const partNumber = index + 1;
100
+ const url = part_urls[index];
101
+ const start = index * chunk_size;
102
+ const end = Math.min(start + chunk_size, file.size);
103
+ const chunk = file.slice(start, end);
104
+
105
+ console.log(`[S3Multipart] Starting part ${partNumber}/${totalParts}, size: ${chunk.size} bytes, attempt ${retryCount + 1}`);
106
+
107
+ const xhr = new XMLHttpRequest();
108
+ onViewError(() => xhr.abort());
109
+
110
+ // Track upload progress within this chunk
111
+ xhr.upload.addEventListener("progress", (event) => {
112
+ if (event.lengthComputable) {
113
+ // Calculate overall progress: completed parts + current part's progress
114
+ const completedBytes = uploadedParts * chunk_size;
115
+ const currentPartBytes = event.loaded;
116
+ const totalBytes = file.size;
117
+ const overallPercent = Math.round(((completedBytes + currentPartBytes) / totalBytes) * 100);
118
+
119
+ // Don't report 100% until all parts complete and we send completion data
120
+ if (overallPercent < 100) {
121
+ entry.progress(overallPercent);
122
+ }
123
+ }
124
+ });
125
+
126
+ xhr.onload = () => {
127
+ activeUploads--;
128
+
129
+ if (xhr.status === 200) {
130
+ const etag = xhr.getResponseHeader('ETag');
131
+ console.log(`[S3Multipart] Part ${partNumber} succeeded, ETag: ${etag}`);
132
+
133
+ if (!etag) {
134
+ console.error(`[S3Multipart] Part ${partNumber} missing ETag!`);
135
+ entry.error(`Part ${partNumber} upload succeeded but no ETag returned`);
136
+ hasError = true;
137
+ return;
138
+ }
139
+
140
+ // Store the part with its ETag
141
+ parts.push({
142
+ PartNumber: partNumber,
143
+ ETag: etag.replace(/"/g, '')
144
+ });
145
+ uploadedParts++;
146
+
147
+ // Update progress
148
+ const progressPercent = Math.round((uploadedParts / totalParts) * 100);
149
+ console.log(`[S3Multipart] Progress: ${uploadedParts}/${totalParts} parts (${progressPercent}%)`);
150
+
151
+ if (uploadedParts < totalParts) {
152
+ entry.progress(progressPercent < 100 ? progressPercent : 99);
153
+ uploadNextPart(); // Start next part
154
+ } else {
155
+ // All parts complete!
156
+ const completionData = {
157
+ complete: true,
158
+ upload_id: upload_id,
159
+ key: key,
160
+ parts: parts.sort((a, b) => a.PartNumber - b.PartNumber)
161
+ };
162
+ console.log(`[S3Multipart] All parts complete! Sending completion data`);
163
+ entry.complete(completionData);
164
+ }
165
+ } else {
166
+ // Upload failed - retry with exponential backoff
167
+ console.error(`[S3Multipart] Part ${partNumber} failed with status ${xhr.status}, attempt ${retryCount + 1}`);
168
+
169
+ if (retryCount < MAX_RETRIES) {
170
+ // Exponential backoff: 1s, 2s, 4s, max 10s
171
+ const delay = Math.min(1000 * (2 ** retryCount), 10000);
172
+ console.log(`[S3Multipart] Retrying part ${partNumber} in ${delay}ms...`);
173
+
174
+ setTimeout(() => {
175
+ uploadPart(index, retryCount + 1);
176
+ }, delay);
177
+ } else {
178
+ // Max retries exceeded - fatal error
179
+ console.error(`[S3Multipart] Part ${partNumber} failed after ${MAX_RETRIES} retries, aborting upload`);
180
+ entry.error(`Part ${partNumber} failed after ${MAX_RETRIES} attempts. Upload aborted.`);
181
+ hasError = true;
182
+ }
183
+ }
184
+ };
185
+
186
+ xhr.onerror = () => {
187
+ activeUploads--;
188
+ console.error(`[S3Multipart] Network error on part ${partNumber}, attempt ${retryCount + 1}`);
189
+
190
+ if (retryCount < MAX_RETRIES) {
191
+ const delay = Math.min(1000 * (2 ** retryCount), 10000);
192
+ console.log(`[S3Multipart] Retrying part ${partNumber} after network error in ${delay}ms...`);
193
+
194
+ setTimeout(() => {
195
+ uploadPart(index, retryCount + 1);
196
+ }, delay);
197
+ } else {
198
+ console.error(`[S3Multipart] Part ${partNumber} network error after ${MAX_RETRIES} retries, aborting upload`);
199
+ entry.error(`Part ${partNumber} network error after ${MAX_RETRIES} attempts. Upload aborted.`);
200
+ hasError = true;
201
+ }
202
+ };
203
+
204
+ xhr.open('PUT', url, true);
205
+ xhr.send(chunk);
206
+ activeUploads++;
207
+ };
208
+
209
+ // Upload next part if we haven't hit the concurrency limit
210
+ const uploadNextPart = () => {
211
+ while (partIndex < totalParts && activeUploads < MAX_CONCURRENT && !hasError) {
212
+ uploadPart(partIndex);
213
+ partIndex++;
214
+ }
215
+ };
216
+
217
+ // Start initial batch of uploads
218
+ uploadNextPart();
219
+ });
220
+ };
221
+ }
pyview/uploads.py CHANGED
@@ -2,7 +2,7 @@ import datetime
2
2
  import uuid
3
3
  import logging
4
4
  from pydantic import BaseModel, Field
5
- from typing import Optional, Any, Literal, Generator, Callable
5
+ from typing import Optional, Any, Literal, Generator, Callable, Awaitable
6
6
  from dataclasses import dataclass, field
7
7
  from contextlib import contextmanager
8
8
  import os
@@ -11,24 +11,60 @@ import tempfile
11
11
  logger = logging.getLogger(__name__)
12
12
 
13
13
 
14
+ @dataclass
15
+ class UploadSuccess:
16
+ """Upload completed successfully (no additional data needed). """
17
+ pass
18
+
19
+
20
+ @dataclass
21
+ class UploadSuccessWithData:
22
+ """Upload completed successfully with completion data.
23
+
24
+ Used for multipart uploads where the client sends additional data like:
25
+ - upload_id: S3 multipart upload ID
26
+ - parts: List of {PartNumber, ETag} dicts
27
+ - key: S3 object key
28
+ - Any other provider-specific fields
29
+ """
30
+ data: dict
31
+
32
+
33
+ @dataclass
34
+ class UploadFailure:
35
+ """Upload failed with an error.
36
+
37
+ Used when the client reports an upload error.
38
+ """
39
+ error: str
40
+
41
+
42
+ # Type alias for upload completion results
43
+ UploadResult = UploadSuccess | UploadSuccessWithData | UploadFailure
44
+
45
+
14
46
  @dataclass
15
47
  class ConstraintViolation:
16
48
  ref: str
17
- code: Literal["too_large", "too_many_files"]
49
+ code: Literal["too_large", "too_many_files", "upload_failed"]
18
50
 
19
51
  @property
20
52
  def message(self) -> str:
21
53
  if self.code == "too_large":
22
54
  return "File too large"
23
- return "Too many files"
55
+ if self.code == "too_many_files":
56
+ return "Too many files"
57
+ if self.code == "upload_failed":
58
+ return "Upload failed"
59
+ return self.code
24
60
 
25
61
 
26
62
  class UploadEntry(BaseModel):
27
- path: str
28
63
  ref: str
29
64
  name: str
30
65
  size: int
31
66
  type: str
67
+ path: Optional[str] = None # None for external uploads, set for internal uploads
32
68
  upload_config: Optional["UploadConfig"] = None
33
69
  uuid: str = Field(default_factory=lambda: str(uuid.uuid4()))
34
70
  valid: bool = True
@@ -40,6 +76,7 @@ class UploadEntry(BaseModel):
40
76
  last_modified: int = Field(
41
77
  default_factory=lambda: int(datetime.datetime.now().timestamp())
42
78
  )
79
+ meta: Optional["ExternalUploadMeta"] = None # Metadata from external uploads
43
80
 
44
81
 
45
82
  def parse_entries(entries: list[dict]) -> list[UploadEntry]:
@@ -90,6 +127,20 @@ class ActiveUploads:
90
127
  upload.close()
91
128
 
92
129
 
130
+ class ExternalUploadMeta(BaseModel):
131
+ """Metadata returned by external upload presign functions.
132
+
133
+ The 'uploader' field is required and specifies the name of the client-side
134
+ JavaScript uploader (e.g., "S3", "GCS", "Azure").
135
+
136
+ Additional provider-specific fields (url, fields, etc.) can be added as needed.
137
+ """
138
+ uploader: str # Required - name of client-side JS uploader
139
+
140
+ # Allow extra fields for provider-specific data (url, fields, etc.)
141
+ model_config = {"extra": "allow"}
142
+
143
+
93
144
  class UploadConstraints(BaseModel):
94
145
  max_file_size: int = 10 * 1024 * 1024 # 10MB
95
146
  max_files: int = 10
@@ -105,7 +156,15 @@ class UploadConfig(BaseModel):
105
156
  errors: list[ConstraintViolation] = Field(default_factory=list)
106
157
  autoUpload: bool = False
107
158
  constraints: UploadConstraints = Field(default_factory=UploadConstraints)
108
- progress_callback: Optional[Callable] = None
159
+ progress_callback: Optional[
160
+ Callable[[UploadEntry, Any], Awaitable[None]]
161
+ ] = None
162
+ external_callback: Optional[
163
+ Callable[[UploadEntry, Any], Awaitable[ExternalUploadMeta]]
164
+ ] = None
165
+ entry_complete_callback: Optional[
166
+ Callable[[UploadEntry, UploadResult, Any], Awaitable[None]]
167
+ ] = None
109
168
 
110
169
  uploads: ActiveUploads = Field(default_factory=ActiveUploads)
111
170
 
@@ -113,6 +172,11 @@ class UploadConfig(BaseModel):
113
172
  def entries(self) -> list[UploadEntry]:
114
173
  return list(self.entries_by_ref.values())
115
174
 
175
+ @property
176
+ def is_external(self) -> bool:
177
+ """Returns True if this upload config uses external (direct-to-cloud) uploads"""
178
+ return self.external_callback is not None
179
+
116
180
  def cancel_entry(self, ref: str):
117
181
  del self.entries_by_ref[ref]
118
182
 
@@ -154,19 +218,19 @@ class UploadConfig(BaseModel):
154
218
  self.uploads = ActiveUploads()
155
219
  self.entries_by_ref = {}
156
220
 
157
- @contextmanager
221
+ @contextmanager
158
222
  def consume_upload_entry(self, entry_ref: str) -> Generator[Optional["ActiveUpload"], None, None]:
159
223
  """Consume a single upload entry by its ref"""
160
224
  upload = None
161
225
  join_ref = None
162
-
226
+
163
227
  # Find the join_ref for this entry
164
228
  for jr, active_upload in self.uploads.uploads.items():
165
229
  if active_upload.entry.ref == entry_ref:
166
230
  upload = active_upload
167
231
  join_ref = jr
168
232
  break
169
-
233
+
170
234
  try:
171
235
  yield upload
172
236
  finally:
@@ -175,13 +239,64 @@ class UploadConfig(BaseModel):
175
239
  upload.close()
176
240
  except Exception:
177
241
  logger.warning("Error closing upload entry", exc_info=True)
178
-
242
+
179
243
  # Remove only this specific upload
180
244
  if join_ref in self.uploads.uploads:
181
245
  del self.uploads.uploads[join_ref]
182
246
  if entry_ref in self.entries_by_ref:
183
247
  del self.entries_by_ref[entry_ref]
184
248
 
249
+ @contextmanager
250
+ def consume_external_upload(self, entry_ref: str) -> Generator[Optional["UploadEntry"], None, None]:
251
+ """Consume a single external upload entry by its ref.
252
+
253
+ For external uploads (direct-to-cloud), this returns the UploadEntry containing
254
+ metadata about the uploaded file. The entry is automatically removed after the
255
+ context manager exits.
256
+
257
+ Args:
258
+ entry_ref: The ref of the entry to consume
259
+
260
+ Yields:
261
+ UploadEntry if found, None otherwise
262
+
263
+ Raises:
264
+ ValueError: If called on a non-external upload config
265
+ """
266
+ if not self.is_external:
267
+ raise ValueError("consume_external_upload() can only be called on external upload configs")
268
+
269
+ entry = self.entries_by_ref.get(entry_ref)
270
+
271
+ try:
272
+ yield entry
273
+ finally:
274
+ if entry_ref in self.entries_by_ref:
275
+ del self.entries_by_ref[entry_ref]
276
+
277
+ @contextmanager
278
+ def consume_external_uploads(self) -> Generator[list["UploadEntry"], None, None]:
279
+ """Consume all external upload entries and clean up.
280
+
281
+ For external uploads (direct-to-cloud), this returns the UploadEntry objects
282
+ containing metadata about the uploaded files. The entries are automatically
283
+ cleared after the context manager exits.
284
+
285
+ Yields:
286
+ List of UploadEntry objects
287
+
288
+ Raises:
289
+ ValueError: If called on a non-external upload config
290
+ """
291
+ if not self.is_external:
292
+ raise ValueError("consume_external_uploads() can only be called on external upload configs")
293
+
294
+ try:
295
+ upload_list = list(self.entries_by_ref.values())
296
+ yield upload_list
297
+ finally:
298
+ self.entries_by_ref = {}
299
+
185
300
  def close(self):
186
301
  self.uploads.close()
187
302
 
@@ -195,9 +310,22 @@ class UploadManager:
195
310
  self.upload_config_join_refs = {}
196
311
 
197
312
  def allow_upload(
198
- self, upload_name: str, constraints: UploadConstraints, auto_upload: bool = False, progress: Optional[Callable] = None
313
+ self,
314
+ upload_name: str,
315
+ constraints: UploadConstraints,
316
+ auto_upload: bool = False,
317
+ progress: Optional[Callable] = None,
318
+ external: Optional[Callable] = None,
319
+ entry_complete: Optional[Callable] = None,
199
320
  ) -> UploadConfig:
200
- config = UploadConfig(name=upload_name, constraints=constraints, autoUpload=auto_upload, progress_callback=progress)
321
+ config = UploadConfig(
322
+ name=upload_name,
323
+ constraints=constraints,
324
+ autoUpload=auto_upload,
325
+ progress_callback=progress,
326
+ external_callback=external,
327
+ entry_complete_callback=entry_complete,
328
+ )
201
329
  self.upload_configs[upload_name] = config
202
330
  return config
203
331
 
@@ -220,34 +348,98 @@ class UploadManager:
220
348
  else:
221
349
  logger.warning("Upload config not found for ref: %s", config.ref)
222
350
 
223
- def process_allow_upload(self, payload: dict[str, Any]) -> dict[str, Any]:
224
- ref = payload["ref"]
225
- config = self.config_for_ref(ref)
226
-
227
- if not config:
228
- logger.warning("Can't find upload config for ref: %s", ref)
229
- return {"error": [(ref, "not_found")]}
230
-
231
- proposed_entries = payload["entries"]
232
-
351
+ def _validate_constraints(
352
+ self, config: UploadConfig, proposed_entries: list[dict[str, Any]]
353
+ ) -> list[ConstraintViolation]:
354
+ """Validate proposed entries against upload constraints."""
233
355
  errors = []
234
356
  for entry in proposed_entries:
235
357
  if entry["size"] > config.constraints.max_file_size:
236
358
  errors.append(ConstraintViolation(ref=entry["ref"], code="too_large"))
237
359
 
238
360
  if len(proposed_entries) > config.constraints.max_files:
239
- errors.append(ConstraintViolation(ref=ref, code="too_many_files"))
361
+ errors.append(ConstraintViolation(ref=config.ref, code="too_many_files"))
240
362
 
241
- if errors:
242
- return {"error": [(e.ref, e.code) for e in errors]}
363
+ return errors
364
+
365
+ async def _process_external_upload(
366
+ self, config: UploadConfig, proposed_entries: list[dict[str, Any]], context: Any
367
+ ) -> dict[str, Any]:
368
+ """Process external (direct-to-cloud) upload by calling presign function for each entry."""
369
+ entries_with_meta = {}
370
+ successfully_preflighted = [] # Track entries added to config for atomic cleanup
371
+
372
+ if not config.external_callback:
373
+ logger.error("external_callback is required for external uploads")
374
+ return {"error": [("config", "external_callback_missing")]}
375
+
376
+ for entry_data in proposed_entries:
377
+ # Create UploadEntry to pass to presign function
378
+ entry = UploadEntry(**entry_data)
379
+ entry.upload_config = config
380
+
381
+ try:
382
+ # Call user's presign function
383
+ meta: ExternalUploadMeta = await config.external_callback(entry, context)
384
+
385
+ # Store metadata and mark entry as preflighted
386
+ entry.meta = meta
387
+ entry.preflighted = True
388
+ config.entries_by_ref[entry.ref] = entry
389
+ successfully_preflighted.append(entry.ref) # Track for cleanup
390
+
391
+ # Build entry JSON with metadata merged at top level
392
+ entry_dict = entry.model_dump(exclude={"upload_config", "meta"})
393
+ entry_dict.update(meta.model_dump()) # Merge meta fields into entry
394
+ entries_with_meta[entry.ref] = entry_dict
395
+
396
+ except Exception as e:
397
+ logger.error(f"Error calling presign function for entry {entry.ref}: {e}", exc_info=True)
398
+
399
+ # Atomic cleanup: remove all entries added before this failure
400
+ for ref in successfully_preflighted:
401
+ config.entries_by_ref.pop(ref, None)
402
+
403
+ return {"error": [(entry.ref, "presign_error")]}
243
404
 
405
+ configJson = config.constraints.model_dump()
406
+ return {"config": configJson, "entries": entries_with_meta}
407
+
408
+ def _process_internal_upload(self, config: UploadConfig) -> dict[str, Any]:
409
+ """Process internal (direct-to-server) upload."""
244
410
  configJson = config.constraints.model_dump()
245
411
  entryJson = {
246
412
  e.ref: e.model_dump(exclude={"upload_config"}) for e in config.entries
247
413
  }
248
-
249
414
  return {"config": configJson, "entries": entryJson}
250
415
 
416
+ async def process_allow_upload(self, payload: dict[str, Any], context: Any) -> dict[str, Any]:
417
+ """Process allow_upload request from client.
418
+
419
+ Validates constraints and either:
420
+ - For external uploads: calls presign function to generate upload metadata
421
+ - For internal uploads: returns standard config/entries response
422
+ """
423
+ ref = payload["ref"]
424
+ config = self.config_for_ref(ref)
425
+
426
+ if not config:
427
+ logger.warning("Can't find upload config for ref: %s", ref)
428
+ return {"error": [(ref, "not_found")]}
429
+
430
+ proposed_entries = payload["entries"]
431
+
432
+ # Validate constraints
433
+ errors = self._validate_constraints(config, proposed_entries)
434
+ if errors:
435
+ return {"error": [(e.ref, e.code) for e in errors]}
436
+
437
+ # Handle external vs internal uploads
438
+ if config.is_external:
439
+ return await self._process_external_upload(config, proposed_entries, context)
440
+ else:
441
+ return self._process_internal_upload(config)
442
+
251
443
  def add_upload(self, joinRef: str, payload: dict[str, Any]):
252
444
  token = payload["token"]
253
445
 
@@ -262,16 +454,59 @@ class UploadManager:
262
454
  config.uploads.add_chunk(joinRef, chunk)
263
455
  pass
264
456
 
265
- def update_progress(self, joinRef: str, payload: dict[str, Any]):
457
+ async def update_progress(self, joinRef: str, payload: dict[str, Any], socket):
266
458
  upload_config_ref = payload["ref"]
267
459
  entry_ref = payload["entry_ref"]
268
- progress = int(payload["progress"])
460
+ progress_data = payload["progress"]
269
461
 
270
462
  config = self.config_for_ref(upload_config_ref)
271
- if config:
272
- config.update_progress(entry_ref, progress)
463
+ if not config:
464
+ logger.warning(f"[update_progress] No config found for ref: {upload_config_ref}")
465
+ return
466
+
467
+ # Handle dict (error or completion)
468
+ if isinstance(progress_data, dict):
469
+ if progress_data.get('complete'):
470
+ entry = config.entries_by_ref.get(entry_ref)
471
+ if entry:
472
+ entry.progress = 100
473
+ entry.done = True
474
+
475
+ # Call entry_complete callback with success result
476
+ if config.entry_complete_callback:
477
+ result = UploadSuccessWithData(data=progress_data)
478
+ await config.entry_complete_callback(entry, result, socket)
479
+ return
480
+
481
+ # Handle error case: {error: "reason"}
482
+ error_msg = progress_data.get('error', 'Upload failed')
483
+ logger.warning(f"Upload error for entry {entry_ref}: {error_msg}")
273
484
 
274
- if progress == 100:
485
+ if entry_ref in config.entries_by_ref:
486
+ entry = config.entries_by_ref[entry_ref]
487
+ entry.valid = False
488
+ entry.done = True
489
+ entry.errors.append(ConstraintViolation(ref=entry_ref, code="upload_failed"))
490
+
491
+ # Call entry_complete callback with failure result
492
+ if config.entry_complete_callback:
493
+ result = UploadFailure(error=error_msg)
494
+ await config.entry_complete_callback(entry, result, socket)
495
+ return
496
+
497
+ # Handle progress number
498
+ progress = int(progress_data)
499
+ config.update_progress(entry_ref, progress)
500
+
501
+ # Fire entry_complete callback on 100
502
+ if progress == 100:
503
+ entry = config.entries_by_ref.get(entry_ref)
504
+ if entry and config.entry_complete_callback:
505
+ result = UploadSuccess()
506
+ await config.entry_complete_callback(entry, result, socket)
507
+
508
+ # Cleanup for internal uploads only (external uploads never populate upload_config_join_refs)
509
+ if not config.is_external:
275
510
  try:
276
511
  joinRef_to_remove = config.uploads.join_ref_for_entry(entry_ref)
277
512
  if joinRef_to_remove in self.upload_config_join_refs:
@@ -288,15 +523,20 @@ class UploadManager:
288
523
  """Trigger progress callback if one exists for this upload config"""
289
524
  upload_config_ref = payload["ref"]
290
525
  config = self.config_for_ref(upload_config_ref)
291
-
526
+
292
527
  if config and config.progress_callback:
293
528
  entry_ref = payload["entry_ref"]
294
529
  if entry_ref in config.entries_by_ref:
295
530
  entry = config.entries_by_ref[entry_ref]
531
+ progress_data = payload["progress"]
532
+
296
533
  # Update entry progress before calling callback
297
- progress = int(payload["progress"])
298
- entry.progress = progress
299
- entry.done = progress == 100
534
+ if isinstance(progress_data, int):
535
+ entry.progress = progress_data
536
+ entry.done = progress_data == 100
537
+ # For dict (error or completion), don't update entry.progress here
538
+ # (will be handled in update_progress or completion handler)
539
+
300
540
  await config.progress_callback(entry, socket)
301
541
 
302
542
  def close(self):
pyview/ws_handler.py CHANGED
@@ -228,8 +228,8 @@ class LiveSocketHandler:
228
228
  continue
229
229
 
230
230
  if event == "allow_upload":
231
- allow_upload_response = socket.upload_manager.process_allow_upload(
232
- payload
231
+ allow_upload_response = await socket.upload_manager.process_allow_upload(
232
+ payload, socket.context
233
233
  )
234
234
 
235
235
  rendered = await _render(socket)
@@ -342,9 +342,9 @@ class LiveSocketHandler:
342
342
  if event == "progress":
343
343
  # Trigger progress callback BEFORE updating progress (which may consume the entry)
344
344
  await socket.upload_manager.trigger_progress_callback_if_exists(payload, socket)
345
-
346
- socket.upload_manager.update_progress(joinRef, payload)
347
-
345
+
346
+ await socket.upload_manager.update_progress(joinRef, payload, socket)
347
+
348
348
  rendered = await _render(socket)
349
349
  diff = socket.diff(rendered)
350
350
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyview-web
3
- Version: 0.4.3
3
+ Version: 0.5.1
4
4
  Summary: LiveView in Python
5
5
  License: MIT
6
6
  License-File: LICENSE
@@ -1,5 +1,6 @@
1
1
  pyview/__init__.py,sha256=5RJ_KtwJvI_-_Vhb3-py5Qf78YdH1HHvAzZO1ddzzrU,518
2
- pyview/assets/js/app.js,sha256=XuuSgEMY4hx8v0OuEPwaa7trktu_vppL0tc3Bs9Fw7s,2524
2
+ pyview/assets/js/app.js,sha256=8Y3mGEf6KeqBUSzyYFalnzD6U_r5hhU332RyQSXwW0w,2561
3
+ pyview/assets/js/uploaders.js,sha256=fKqvmGSfM_dIalcldqy_Zd-4Jv_7ruucfC6hdoPW2QQ,8249
3
4
  pyview/assets/package-lock.json,sha256=kFCrEUJc3G7VD7EsBQf6__EKQhaKAok-I5rrwiAoX0w,2425
4
5
  pyview/assets/package.json,sha256=E6xaX8KMUAektIIedLmI55jGnmlNMSeD2tgKYXWk1vg,151
5
6
  pyview/async_stream_runner.py,sha256=_vXeU1LyuQkJrK5AlaaF1gyhRFWXhzn3Y73BFhSsWVc,2289
@@ -21,14 +22,15 @@ pyview/instrumentation/interfaces.py,sha256=AhVDM_vzETWtM-wfOXaM13K2OgdL0H8lu5wh
21
22
  pyview/instrumentation/noop.py,sha256=VP8UjiI--A7KWqnSFh7PMG7MqY0Z9ddQjBYVW7iHZa0,2941
22
23
  pyview/js.py,sha256=E6HMsUfXQjrcLqYq26ieeYuzTjBeZqfJwwOm3uSR4ME,3498
23
24
  pyview/live_routes.py,sha256=IN2Jmy8b1umcfx1R7ZgFXHZNbYDJp_kLIbADtDJknPM,1749
24
- pyview/live_socket.py,sha256=SjN2TSYHnMgmzjE3rVZkNBxtZ0EA3Wx3frg7tx85YBY,7655
25
+ pyview/live_socket.py,sha256=OqdoN6SiWSYbzjsY_nNbhm9_swAC5VQhjS--HmxzMxA,7965
25
26
  pyview/live_view.py,sha256=mwAp7jiABSZCBgYF-GLQCB7zcJ7Wpz9cuC84zjzsp2U,1455
26
27
  pyview/meta.py,sha256=01Z-qldB9jrewmIJHQpUqyIhuHodQGgCvpuY9YM5R6c,74
27
28
  pyview/phx_message.py,sha256=DUdPfl6tlw9K0FNXJ35ehq03JGgynvwA_JItHQ_dxMQ,2007
28
29
  pyview/pyview.py,sha256=2rj7NMuc6-tml2Wg4PBV7tydFJVa6XUw0pM0voWYg5g,2972
29
30
  pyview/secret.py,sha256=HbaNpGAkFs4uxMVAmk9HwE3FIehg7dmwEOlED7C9moM,363
30
31
  pyview/session.py,sha256=nC8ExyVwfCgQfx9T-aJGyFhr2C7jsrEY_QFkaXtP28U,432
31
- pyview/static/assets/app.js,sha256=QoXfdcOCYwVYJftvjsIIVwFye7onaOJMxRpalyYqoMU,200029
32
+ pyview/static/assets/app.js,sha256=pr_xqmsWzA63ebjdkogJLpTUPFI8xZFvcOn1fv1IVxI,199828
33
+ pyview/static/assets/uploaders.js,sha256=fKqvmGSfM_dIalcldqy_Zd-4Jv_7ruucfC6hdoPW2QQ,8249
32
34
  pyview/template/__init__.py,sha256=0goMpA8-TCKcwHbhjvAgbPYnY929vBrwjc701t9RIQw,583
33
35
  pyview/template/context_processor.py,sha256=y07t7mhL7XjZNbwHnTTyXJvYhXabtuTukDScycAFjVc,312
34
36
  pyview/template/live_template.py,sha256=m8_1TCFGfpVkXyZOIWN6a3ksvsewPlo8vTzzPGDyEU0,2408
@@ -36,7 +38,7 @@ pyview/template/render_diff.py,sha256=1P-OgtcGb0Y-zJ9uUH3bKWX-qQTHBa4jgg73qJD7eg
36
38
  pyview/template/root_template.py,sha256=zCUs1bt8R7qynhBE0tTSEYfdkGtbeKNmPhwzRiFNdsI,2031
37
39
  pyview/template/serializer.py,sha256=WDZfqJr2LMlf36fUW2CmWc2aREc63553_y_GRP2-qYc,826
38
40
  pyview/template/utils.py,sha256=S8593UjUJztUrtC3h1EL9MxQp5uH7rFDTNkv9C6A_xU,642
39
- pyview/uploads.py,sha256=G8zcVoTfWUjFs4nW3XZj60PqWujWTw2Rs3WOyyo52nY,12077
41
+ pyview/uploads.py,sha256=eQGfvYTUuhdEfcTBBPlWPlhpWaN7b70m1Ph0HXaTPP0,21369
40
42
  pyview/vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
43
  pyview/vendor/flet/pubsub/__init__.py,sha256=JSPCeKB26b5E-IVHNRvVHrlf_CBGDLCulE9ADrostGs,39
42
44
  pyview/vendor/flet/pubsub/pub_sub.py,sha256=gpdruSxKQBqL7_Dtxo4vETm1kM0YH7S299msw2oyUoE,10184
@@ -50,9 +52,9 @@ pyview/vendor/ibis/nodes.py,sha256=TgFt4q5MrVW3gC3PVitrs2LyXKllRveooM7XKydNATk,2
50
52
  pyview/vendor/ibis/template.py,sha256=6XJXnztw87CrOaKeW3e18LL0fNM8AI6AaK_QgMdb7ew,2353
51
53
  pyview/vendor/ibis/tree.py,sha256=hg8f-fKHeo6DE8R-QgAhdvEaZ8rKyz7p0nGwPy0CBTs,2509
52
54
  pyview/vendor/ibis/utils.py,sha256=nLSaxPR9vMphzV9qinlz_Iurv9c49Ps6Knv8vyNlewU,2768
53
- pyview/ws_handler.py,sha256=f6iI4vvLesepI9tCTHLyRBbdKCGPS6Gzj_UUjfIrPT8,14722
54
- pyview_web-0.4.3.dist-info/METADATA,sha256=uqYhaTBqJQgHBz5s2S33GfRp7BeVPNEmAuaQcitesNk,5280
55
- pyview_web-0.4.3.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
56
- pyview_web-0.4.3.dist-info/entry_points.txt,sha256=GAT-ic-VYmmSMUSUVKdV1bp4w-vgEeVP-XzElvarQ9U,42
57
- pyview_web-0.4.3.dist-info/licenses/LICENSE,sha256=M_bADaBm9_MV9llX3lCicksLhwk3eZUjA2srE0uUWr0,1071
58
- pyview_web-0.4.3.dist-info/RECORD,,
55
+ pyview/ws_handler.py,sha256=vBsYxr7C7qs8sKcHQuxMnuuBv7_B-DPc1oq_YSrI1ms,14726
56
+ pyview_web-0.5.1.dist-info/METADATA,sha256=uDi_cHRZuItEHHRu9q4GEDeyOkmvY3HZO6SqngtA6VI,5280
57
+ pyview_web-0.5.1.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
58
+ pyview_web-0.5.1.dist-info/entry_points.txt,sha256=GAT-ic-VYmmSMUSUVKdV1bp4w-vgEeVP-XzElvarQ9U,42
59
+ pyview_web-0.5.1.dist-info/licenses/LICENSE,sha256=M_bADaBm9_MV9llX3lCicksLhwk3eZUjA2srE0uUWr0,1071
60
+ pyview_web-0.5.1.dist-info/RECORD,,