fal 1.7.2__py3-none-any.whl → 1.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fal might be problematic. Click here for more details.
- fal/_fal_version.py +2 -2
- fal/toolkit/file/providers/fal.py +237 -121
- {fal-1.7.2.dist-info → fal-1.7.3.dist-info}/METADATA +1 -1
- {fal-1.7.2.dist-info → fal-1.7.3.dist-info}/RECORD +7 -7
- {fal-1.7.2.dist-info → fal-1.7.3.dist-info}/WHEEL +0 -0
- {fal-1.7.2.dist-info → fal-1.7.3.dist-info}/entry_points.txt +0 -0
- {fal-1.7.2.dist-info → fal-1.7.3.dist-info}/top_level.txt +0 -0
fal/_fal_version.py
CHANGED
|
@@ -205,12 +205,12 @@ class MultipartUpload:
|
|
|
205
205
|
|
|
206
206
|
def __init__(
|
|
207
207
|
self,
|
|
208
|
-
|
|
208
|
+
file_name: str,
|
|
209
209
|
chunk_size: int | None = None,
|
|
210
210
|
content_type: str | None = None,
|
|
211
211
|
max_concurrency: int | None = None,
|
|
212
212
|
) -> None:
|
|
213
|
-
self.
|
|
213
|
+
self.file_name = file_name
|
|
214
214
|
self.chunk_size = chunk_size or self.MULTIPART_CHUNK_SIZE
|
|
215
215
|
self.content_type = content_type or "application/octet-stream"
|
|
216
216
|
self.max_concurrency = max_concurrency or self.MULTIPART_MAX_CONCURRENCY
|
|
@@ -230,7 +230,7 @@ class MultipartUpload:
|
|
|
230
230
|
},
|
|
231
231
|
data=json.dumps(
|
|
232
232
|
{
|
|
233
|
-
"file_name":
|
|
233
|
+
"file_name": self.file_name,
|
|
234
234
|
"content_type": self.content_type,
|
|
235
235
|
}
|
|
236
236
|
).encode(),
|
|
@@ -244,47 +244,29 @@ class MultipartUpload:
|
|
|
244
244
|
f"Error initiating upload. Status {exc.status}: {exc.reason}"
|
|
245
245
|
)
|
|
246
246
|
|
|
247
|
-
def
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
data=data,
|
|
257
|
-
)
|
|
247
|
+
def upload_part(self, part_number: int, data: bytes) -> None:
|
|
248
|
+
url = f"{self._upload_url}&part_number={part_number}"
|
|
249
|
+
|
|
250
|
+
req = Request(
|
|
251
|
+
url,
|
|
252
|
+
method="PUT",
|
|
253
|
+
headers={"Content-Type": self.content_type},
|
|
254
|
+
data=data,
|
|
255
|
+
)
|
|
258
256
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
257
|
+
try:
|
|
258
|
+
with urlopen(req) as resp:
|
|
259
|
+
self._parts.append(
|
|
260
|
+
{
|
|
262
261
|
"part_number": part_number,
|
|
263
262
|
"etag": resp.headers["ETag"],
|
|
264
263
|
}
|
|
265
|
-
except HTTPError as exc:
|
|
266
|
-
raise FileUploadException(
|
|
267
|
-
f"Error uploading part {part_number} to {url}. "
|
|
268
|
-
f"Status {exc.status}: {exc.reason}"
|
|
269
264
|
)
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
with concurrent.futures.ThreadPoolExecutor(
|
|
276
|
-
max_workers=self.max_concurrency
|
|
277
|
-
) as executor:
|
|
278
|
-
futures = []
|
|
279
|
-
for part_number in range(1, parts + 1):
|
|
280
|
-
upload_url = f"{self._upload_url}&part_number={part_number}"
|
|
281
|
-
futures.append(
|
|
282
|
-
executor.submit(self._upload_part, upload_url, part_number)
|
|
283
|
-
)
|
|
284
|
-
|
|
285
|
-
for future in concurrent.futures.as_completed(futures):
|
|
286
|
-
entry = future.result()
|
|
287
|
-
self._parts.append(entry)
|
|
265
|
+
except HTTPError as exc:
|
|
266
|
+
raise FileUploadException(
|
|
267
|
+
f"Error uploading part {part_number} to {url}. "
|
|
268
|
+
f"Status {exc.status}: {exc.reason}"
|
|
269
|
+
)
|
|
288
270
|
|
|
289
271
|
def complete(self):
|
|
290
272
|
url = self._upload_url
|
|
@@ -307,6 +289,82 @@ class MultipartUpload:
|
|
|
307
289
|
|
|
308
290
|
return self._file_url
|
|
309
291
|
|
|
292
|
+
@classmethod
|
|
293
|
+
def save(
|
|
294
|
+
cls,
|
|
295
|
+
file: FileData,
|
|
296
|
+
chunk_size: int | None = None,
|
|
297
|
+
max_concurrency: int | None = None,
|
|
298
|
+
):
|
|
299
|
+
import concurrent.futures
|
|
300
|
+
|
|
301
|
+
multipart = cls(
|
|
302
|
+
file.file_name,
|
|
303
|
+
chunk_size=chunk_size,
|
|
304
|
+
content_type=file.content_type,
|
|
305
|
+
max_concurrency=max_concurrency,
|
|
306
|
+
)
|
|
307
|
+
multipart.create()
|
|
308
|
+
|
|
309
|
+
parts = math.ceil(len(file.data) / multipart.chunk_size)
|
|
310
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
311
|
+
max_workers=multipart.max_concurrency
|
|
312
|
+
) as executor:
|
|
313
|
+
futures = []
|
|
314
|
+
for part_number in range(1, parts + 1):
|
|
315
|
+
start = (part_number - 1) * multipart.chunk_size
|
|
316
|
+
data = file.data[start : start + multipart.chunk_size]
|
|
317
|
+
futures.append(
|
|
318
|
+
executor.submit(multipart.upload_part, part_number, data)
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
for future in concurrent.futures.as_completed(futures):
|
|
322
|
+
future.result()
|
|
323
|
+
|
|
324
|
+
return multipart.complete()
|
|
325
|
+
|
|
326
|
+
@classmethod
|
|
327
|
+
def save_file(
|
|
328
|
+
cls,
|
|
329
|
+
file_path: str | Path,
|
|
330
|
+
chunk_size: int | None = None,
|
|
331
|
+
content_type: str | None = None,
|
|
332
|
+
max_concurrency: int | None = None,
|
|
333
|
+
) -> str:
|
|
334
|
+
import concurrent.futures
|
|
335
|
+
|
|
336
|
+
file_name = os.path.basename(file_path)
|
|
337
|
+
size = os.path.getsize(file_path)
|
|
338
|
+
|
|
339
|
+
multipart = cls(
|
|
340
|
+
file_name,
|
|
341
|
+
chunk_size=chunk_size,
|
|
342
|
+
content_type=content_type,
|
|
343
|
+
max_concurrency=max_concurrency,
|
|
344
|
+
)
|
|
345
|
+
multipart.create()
|
|
346
|
+
|
|
347
|
+
parts = math.ceil(size / multipart.chunk_size)
|
|
348
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
349
|
+
max_workers=multipart.max_concurrency
|
|
350
|
+
) as executor:
|
|
351
|
+
futures = []
|
|
352
|
+
for part_number in range(1, parts + 1):
|
|
353
|
+
|
|
354
|
+
def _upload_part(pn: int) -> None:
|
|
355
|
+
with open(file_path, "rb") as f:
|
|
356
|
+
start = (pn - 1) * multipart.chunk_size
|
|
357
|
+
f.seek(start)
|
|
358
|
+
data = f.read(multipart.chunk_size)
|
|
359
|
+
multipart.upload_part(pn, data)
|
|
360
|
+
|
|
361
|
+
futures.append(executor.submit(_upload_part, part_number))
|
|
362
|
+
|
|
363
|
+
for future in concurrent.futures.as_completed(futures):
|
|
364
|
+
future.result()
|
|
365
|
+
|
|
366
|
+
return multipart.complete()
|
|
367
|
+
|
|
310
368
|
|
|
311
369
|
class InternalMultipartUploadV3:
|
|
312
370
|
MULTIPART_THRESHOLD = 100 * 1024 * 1024
|
|
@@ -315,12 +373,12 @@ class InternalMultipartUploadV3:
|
|
|
315
373
|
|
|
316
374
|
def __init__(
|
|
317
375
|
self,
|
|
318
|
-
|
|
376
|
+
file_name: str,
|
|
319
377
|
chunk_size: int | None = None,
|
|
320
378
|
content_type: str | None = None,
|
|
321
379
|
max_concurrency: int | None = None,
|
|
322
380
|
) -> None:
|
|
323
|
-
self.
|
|
381
|
+
self.file_name = file_name
|
|
324
382
|
self.chunk_size = chunk_size or self.MULTIPART_CHUNK_SIZE
|
|
325
383
|
self.content_type = content_type or "application/octet-stream"
|
|
326
384
|
self.max_concurrency = max_concurrency or self.MULTIPART_MAX_CONCURRENCY
|
|
@@ -359,7 +417,7 @@ class InternalMultipartUploadV3:
|
|
|
359
417
|
**self.auth_headers,
|
|
360
418
|
"Accept": "application/json",
|
|
361
419
|
"Content-Type": self.content_type,
|
|
362
|
-
"X-Fal-File-Name":
|
|
420
|
+
"X-Fal-File-Name": self.file_name,
|
|
363
421
|
},
|
|
364
422
|
)
|
|
365
423
|
with urlopen(req) as response:
|
|
@@ -373,52 +431,32 @@ class InternalMultipartUploadV3:
|
|
|
373
431
|
)
|
|
374
432
|
|
|
375
433
|
@retry(max_retries=5, base_delay=1, backoff_type="exponential", jitter=True)
|
|
376
|
-
def
|
|
377
|
-
|
|
378
|
-
start = (part_number - 1) * self.chunk_size
|
|
379
|
-
f.seek(start)
|
|
380
|
-
data = f.read(self.chunk_size)
|
|
381
|
-
req = Request(
|
|
382
|
-
url,
|
|
383
|
-
method="PUT",
|
|
384
|
-
headers={
|
|
385
|
-
**self.auth_headers,
|
|
386
|
-
"Content-Type": self.content_type,
|
|
387
|
-
},
|
|
388
|
-
data=data,
|
|
389
|
-
)
|
|
434
|
+
def upload_part(self, part_number: int, data: bytes) -> None:
|
|
435
|
+
url = f"{self.access_url}/multipart/{self.upload_id}/{part_number}"
|
|
390
436
|
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
437
|
+
req = Request(
|
|
438
|
+
url,
|
|
439
|
+
method="PUT",
|
|
440
|
+
headers={
|
|
441
|
+
**self.auth_headers,
|
|
442
|
+
"Content-Type": self.content_type,
|
|
443
|
+
},
|
|
444
|
+
data=data,
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
try:
|
|
448
|
+
with urlopen(req) as resp:
|
|
449
|
+
self._parts.append(
|
|
450
|
+
{
|
|
394
451
|
"partNumber": part_number,
|
|
395
452
|
"etag": resp.headers["ETag"],
|
|
396
453
|
}
|
|
397
|
-
except HTTPError as exc:
|
|
398
|
-
raise FileUploadException(
|
|
399
|
-
f"Error uploading part {part_number} to {url}. "
|
|
400
|
-
f"Status {exc.status}: {exc.reason}"
|
|
401
|
-
)
|
|
402
|
-
|
|
403
|
-
def upload(self) -> None:
|
|
404
|
-
import concurrent.futures
|
|
405
|
-
|
|
406
|
-
parts = math.ceil(os.path.getsize(self.file_path) / self.chunk_size)
|
|
407
|
-
with concurrent.futures.ThreadPoolExecutor(
|
|
408
|
-
max_workers=self.max_concurrency
|
|
409
|
-
) as executor:
|
|
410
|
-
futures = []
|
|
411
|
-
for part_number in range(1, parts + 1):
|
|
412
|
-
upload_url = (
|
|
413
|
-
f"{self.access_url}/multipart/{self.upload_id}/{part_number}"
|
|
414
|
-
)
|
|
415
|
-
futures.append(
|
|
416
|
-
executor.submit(self._upload_part, upload_url, part_number)
|
|
417
454
|
)
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
455
|
+
except HTTPError as exc:
|
|
456
|
+
raise FileUploadException(
|
|
457
|
+
f"Error uploading part {part_number} to {url}. "
|
|
458
|
+
f"Status {exc.status}: {exc.reason}"
|
|
459
|
+
)
|
|
422
460
|
|
|
423
461
|
def complete(self) -> str:
|
|
424
462
|
url = f"{self.access_url}/multipart/{self.upload_id}/complete"
|
|
@@ -442,13 +480,106 @@ class InternalMultipartUploadV3:
|
|
|
442
480
|
|
|
443
481
|
return self.access_url
|
|
444
482
|
|
|
483
|
+
@classmethod
|
|
484
|
+
def save(
|
|
485
|
+
cls,
|
|
486
|
+
file: FileData,
|
|
487
|
+
chunk_size: int | None = None,
|
|
488
|
+
max_concurrency: int | None = None,
|
|
489
|
+
):
|
|
490
|
+
import concurrent.futures
|
|
491
|
+
|
|
492
|
+
multipart = cls(
|
|
493
|
+
file.file_name,
|
|
494
|
+
chunk_size=chunk_size,
|
|
495
|
+
content_type=file.content_type,
|
|
496
|
+
max_concurrency=max_concurrency,
|
|
497
|
+
)
|
|
498
|
+
multipart.create()
|
|
499
|
+
|
|
500
|
+
parts = math.ceil(len(file.data) / multipart.chunk_size)
|
|
501
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
502
|
+
max_workers=multipart.max_concurrency
|
|
503
|
+
) as executor:
|
|
504
|
+
futures = []
|
|
505
|
+
for part_number in range(1, parts + 1):
|
|
506
|
+
start = (part_number - 1) * multipart.chunk_size
|
|
507
|
+
data = file.data[start : start + multipart.chunk_size]
|
|
508
|
+
futures.append(
|
|
509
|
+
executor.submit(multipart.upload_part, part_number, data)
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
for future in concurrent.futures.as_completed(futures):
|
|
513
|
+
future.result()
|
|
514
|
+
|
|
515
|
+
return multipart.complete()
|
|
516
|
+
|
|
517
|
+
@classmethod
|
|
518
|
+
def save_file(
|
|
519
|
+
cls,
|
|
520
|
+
file_path: str | Path,
|
|
521
|
+
chunk_size: int | None = None,
|
|
522
|
+
content_type: str | None = None,
|
|
523
|
+
max_concurrency: int | None = None,
|
|
524
|
+
) -> str:
|
|
525
|
+
import concurrent.futures
|
|
526
|
+
|
|
527
|
+
file_name = os.path.basename(file_path)
|
|
528
|
+
size = os.path.getsize(file_path)
|
|
529
|
+
|
|
530
|
+
multipart = cls(
|
|
531
|
+
file_name,
|
|
532
|
+
chunk_size=chunk_size,
|
|
533
|
+
content_type=content_type,
|
|
534
|
+
max_concurrency=max_concurrency,
|
|
535
|
+
)
|
|
536
|
+
multipart.create()
|
|
537
|
+
|
|
538
|
+
parts = math.ceil(size / multipart.chunk_size)
|
|
539
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
540
|
+
max_workers=multipart.max_concurrency
|
|
541
|
+
) as executor:
|
|
542
|
+
futures = []
|
|
543
|
+
for part_number in range(1, parts + 1):
|
|
544
|
+
|
|
545
|
+
def _upload_part(pn: int) -> None:
|
|
546
|
+
with open(file_path, "rb") as f:
|
|
547
|
+
start = (pn - 1) * multipart.chunk_size
|
|
548
|
+
f.seek(start)
|
|
549
|
+
data = f.read(multipart.chunk_size)
|
|
550
|
+
multipart.upload_part(pn, data)
|
|
551
|
+
|
|
552
|
+
futures.append(executor.submit(_upload_part, part_number))
|
|
553
|
+
|
|
554
|
+
for future in concurrent.futures.as_completed(futures):
|
|
555
|
+
future.result()
|
|
556
|
+
|
|
557
|
+
return multipart.complete()
|
|
558
|
+
|
|
445
559
|
|
|
446
560
|
@dataclass
|
|
447
561
|
class FalFileRepositoryV2(FalFileRepositoryBase):
|
|
448
562
|
@retry(max_retries=3, base_delay=1, backoff_type="exponential", jitter=True)
|
|
449
563
|
def save(
|
|
450
|
-
self,
|
|
564
|
+
self,
|
|
565
|
+
file: FileData,
|
|
566
|
+
multipart: bool | None = None,
|
|
567
|
+
multipart_threshold: int | None = None,
|
|
568
|
+
multipart_chunk_size: int | None = None,
|
|
569
|
+
multipart_max_concurrency: int | None = None,
|
|
570
|
+
object_lifecycle_preference: dict[str, str] | None = None,
|
|
451
571
|
) -> str:
|
|
572
|
+
if multipart is None:
|
|
573
|
+
threshold = multipart_threshold or MultipartUpload.MULTIPART_THRESHOLD
|
|
574
|
+
multipart = len(file.data) > threshold
|
|
575
|
+
|
|
576
|
+
if multipart:
|
|
577
|
+
return MultipartUpload.save(
|
|
578
|
+
file,
|
|
579
|
+
chunk_size=multipart_chunk_size,
|
|
580
|
+
max_concurrency=multipart_max_concurrency,
|
|
581
|
+
)
|
|
582
|
+
|
|
452
583
|
token = fal_v2_token_manager.get_token()
|
|
453
584
|
headers = {
|
|
454
585
|
"Authorization": f"{token.token_type} {token.token}",
|
|
@@ -475,23 +606,6 @@ class FalFileRepositoryV2(FalFileRepositoryBase):
|
|
|
475
606
|
f"Error initiating upload. Status {e.status}: {e.reason}"
|
|
476
607
|
)
|
|
477
608
|
|
|
478
|
-
def _save_multipart(
|
|
479
|
-
self,
|
|
480
|
-
file_path: str | Path,
|
|
481
|
-
chunk_size: int | None = None,
|
|
482
|
-
content_type: str | None = None,
|
|
483
|
-
max_concurrency: int | None = None,
|
|
484
|
-
) -> str:
|
|
485
|
-
multipart = MultipartUpload(
|
|
486
|
-
file_path,
|
|
487
|
-
chunk_size=chunk_size,
|
|
488
|
-
content_type=content_type,
|
|
489
|
-
max_concurrency=max_concurrency,
|
|
490
|
-
)
|
|
491
|
-
multipart.create()
|
|
492
|
-
multipart.upload()
|
|
493
|
-
return multipart.complete()
|
|
494
|
-
|
|
495
609
|
def save_file(
|
|
496
610
|
self,
|
|
497
611
|
file_path: str | Path,
|
|
@@ -507,7 +621,7 @@ class FalFileRepositoryV2(FalFileRepositoryBase):
|
|
|
507
621
|
multipart = os.path.getsize(file_path) > threshold
|
|
508
622
|
|
|
509
623
|
if multipart:
|
|
510
|
-
url =
|
|
624
|
+
url = MultipartUpload.save_file(
|
|
511
625
|
file_path,
|
|
512
626
|
chunk_size=multipart_chunk_size,
|
|
513
627
|
content_type=content_type,
|
|
@@ -608,8 +722,27 @@ class InternalFalFileRepositoryV3(FileRepository):
|
|
|
608
722
|
|
|
609
723
|
@retry(max_retries=3, base_delay=1, backoff_type="exponential", jitter=True)
|
|
610
724
|
def save(
|
|
611
|
-
self,
|
|
725
|
+
self,
|
|
726
|
+
file: FileData,
|
|
727
|
+
multipart: bool | None = None,
|
|
728
|
+
multipart_threshold: int | None = None,
|
|
729
|
+
multipart_chunk_size: int | None = None,
|
|
730
|
+
multipart_max_concurrency: int | None = None,
|
|
731
|
+
object_lifecycle_preference: dict[str, str] | None = None,
|
|
612
732
|
) -> str:
|
|
733
|
+
if multipart is None:
|
|
734
|
+
threshold = (
|
|
735
|
+
multipart_threshold or InternalMultipartUploadV3.MULTIPART_THRESHOLD
|
|
736
|
+
)
|
|
737
|
+
multipart = len(file.data) > threshold
|
|
738
|
+
|
|
739
|
+
if multipart:
|
|
740
|
+
return InternalMultipartUploadV3.save(
|
|
741
|
+
file,
|
|
742
|
+
chunk_size=multipart_chunk_size,
|
|
743
|
+
max_concurrency=multipart_max_concurrency,
|
|
744
|
+
)
|
|
745
|
+
|
|
613
746
|
headers = {
|
|
614
747
|
**self.auth_headers,
|
|
615
748
|
"Accept": "application/json",
|
|
@@ -640,23 +773,6 @@ class InternalFalFileRepositoryV3(FileRepository):
|
|
|
640
773
|
"User-Agent": "fal/0.1.0",
|
|
641
774
|
}
|
|
642
775
|
|
|
643
|
-
def _save_multipart(
|
|
644
|
-
self,
|
|
645
|
-
file_path: str | Path,
|
|
646
|
-
chunk_size: int | None = None,
|
|
647
|
-
content_type: str | None = None,
|
|
648
|
-
max_concurrency: int | None = None,
|
|
649
|
-
) -> str:
|
|
650
|
-
multipart = InternalMultipartUploadV3(
|
|
651
|
-
file_path,
|
|
652
|
-
chunk_size=chunk_size,
|
|
653
|
-
content_type=content_type,
|
|
654
|
-
max_concurrency=max_concurrency,
|
|
655
|
-
)
|
|
656
|
-
multipart.create()
|
|
657
|
-
multipart.upload()
|
|
658
|
-
return multipart.complete()
|
|
659
|
-
|
|
660
776
|
def save_file(
|
|
661
777
|
self,
|
|
662
778
|
file_path: str | Path,
|
|
@@ -672,7 +788,7 @@ class InternalFalFileRepositoryV3(FileRepository):
|
|
|
672
788
|
multipart = os.path.getsize(file_path) > threshold
|
|
673
789
|
|
|
674
790
|
if multipart:
|
|
675
|
-
url =
|
|
791
|
+
url = MultipartUpload.save_file(
|
|
676
792
|
file_path,
|
|
677
793
|
chunk_size=multipart_chunk_size,
|
|
678
794
|
content_type=content_type,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
fal/__init__.py,sha256=wXs1G0gSc7ZK60-bHe-B2m0l_sA6TrFk4BxY0tMoLe8,784
|
|
2
2
|
fal/__main__.py,sha256=4JMK66Wj4uLZTKbF-sT3LAxOsr6buig77PmOkJCRRxw,83
|
|
3
|
-
fal/_fal_version.py,sha256=
|
|
3
|
+
fal/_fal_version.py,sha256=Lv0gR-NbC-8DxwfmwXEmOzSq6Hgx6MH4xF1fYh_opXo,411
|
|
4
4
|
fal/_serialization.py,sha256=rD2YiSa8iuzCaZohZwN_MPEB-PpSKbWRDeaIDpTEjyY,7653
|
|
5
5
|
fal/_version.py,sha256=EBGqrknaf1WygENX-H4fBefLvHryvJBBGtVJetaB0NY,266
|
|
6
6
|
fal/api.py,sha256=u9QfJtb1nLDJu9kegKCrdvW-Cp0mfMSGTPm5X1ywoeE,43388
|
|
@@ -52,7 +52,7 @@ fal/toolkit/types.py,sha256=kkbOsDKj1qPGb1UARTBp7yuJ5JUuyy7XQurYUBCdti8,4064
|
|
|
52
52
|
fal/toolkit/file/__init__.py,sha256=FbNl6wD-P0aSSTUwzHt4HujBXrbC3ABmaigPQA4hRfg,70
|
|
53
53
|
fal/toolkit/file/file.py,sha256=-gccCKnarTu6Nfm_0yQ0sJM9aadB5tUNvKS1PTqxiFc,9071
|
|
54
54
|
fal/toolkit/file/types.py,sha256=MjZ6xAhKPv4rowLo2Vcbho0sX7AQ3lm3KFyYDcw0dL4,1845
|
|
55
|
-
fal/toolkit/file/providers/fal.py,sha256=
|
|
55
|
+
fal/toolkit/file/providers/fal.py,sha256=X7vz0QQg4xFdglbHvOzjgL78dleFMeUzUh1xX68K-zQ,25831
|
|
56
56
|
fal/toolkit/file/providers/gcp.py,sha256=iQtkoYUqbmKKpC5srVOYtrruZ3reGRm5lz4kM8bshgk,2247
|
|
57
57
|
fal/toolkit/file/providers/r2.py,sha256=G2OHcCH2yWrVtXT4hWHEXUeEjFhbKO0koqHcd7hkczk,2871
|
|
58
58
|
fal/toolkit/file/providers/s3.py,sha256=CfiA6rTBFfP-empp0cB9OW2c9F5iy0Z-kGwCs5HBICU,2524
|
|
@@ -130,8 +130,8 @@ openapi_fal_rest/models/workflow_node_type.py,sha256=-FzyeY2bxcNmizKbJI8joG7byRi
|
|
|
130
130
|
openapi_fal_rest/models/workflow_schema.py,sha256=4K5gsv9u9pxx2ItkffoyHeNjBBYf6ur5bN4m_zePZNY,2019
|
|
131
131
|
openapi_fal_rest/models/workflow_schema_input.py,sha256=2OkOXWHTNsCXHWS6EGDFzcJKkW5FIap-2gfO233EvZQ,1191
|
|
132
132
|
openapi_fal_rest/models/workflow_schema_output.py,sha256=EblwSPAGfWfYVWw_WSSaBzQVju296is9o28rMBAd0mc,1196
|
|
133
|
-
fal-1.7.
|
|
134
|
-
fal-1.7.
|
|
135
|
-
fal-1.7.
|
|
136
|
-
fal-1.7.
|
|
137
|
-
fal-1.7.
|
|
133
|
+
fal-1.7.3.dist-info/METADATA,sha256=eTRggSeYlEsdzY5D68R6NkJbD65PrD-nkIRMd2MPa5Q,3996
|
|
134
|
+
fal-1.7.3.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
|
135
|
+
fal-1.7.3.dist-info/entry_points.txt,sha256=32zwTUC1U1E7nSTIGCoANQOQ3I7-qHG5wI6gsVz5pNU,37
|
|
136
|
+
fal-1.7.3.dist-info/top_level.txt,sha256=r257X1L57oJL8_lM0tRrfGuXFwm66i1huwQygbpLmHw,21
|
|
137
|
+
fal-1.7.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|