ciocore 9.1.0rc1__py2.py3-none-any.whl → 9.1.0rc2__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ciocore might be problematic. Click here for more details.

ciocore/VERSION CHANGED
@@ -1 +1 @@
1
- 9.1.0-rc.1
1
+ 9.1.0-rc.2
@@ -2,7 +2,6 @@ import datetime
2
2
  import json
3
3
  import logging
4
4
  import os
5
- import pathlib
6
5
  import requests.exceptions
7
6
  import sys
8
7
  import time
@@ -26,12 +25,13 @@ from ciocore import (
26
25
  exceptions,
27
26
  )
28
27
 
29
- from . import thread_queue_job
30
-
31
28
  from .upload_stats import UploadStats
32
29
 
33
30
  logger = logging.getLogger("{}.uploader".format(loggeria.CONDUCTOR_LOGGER_NAME))
34
31
 
32
+ SINGLEPART = "singlepart"
33
+ MULTIPART = "multipart"
34
+
35
35
 
36
36
  class MD5Worker(worker.ThreadWorker):
37
37
  """
@@ -329,61 +329,28 @@ class FileStatWorker(worker.ThreadWorker):
329
329
  """
330
330
 
331
331
  if job:
332
-
333
- kms_key_name = job.get('kmsKeyName')
334
-
335
332
  # iterate through singlepart urls
336
333
  for singlepart_upload in job.get("singlePartURLs", []):
337
334
  path = singlepart_upload["filePath"]
338
335
  file_size = singlepart_upload["fileSize"]
339
336
  upload_url = singlepart_upload["preSignedURL"]
340
- md5 = self.metric_store.get_dict("file_md5s", path)
341
337
 
342
338
  self.metric_store.increment("bytes_to_upload", file_size, path)
343
339
  self.metric_store.increment("num_files_to_upload")
344
340
  logger.debug("Singlepart, adding task %s", path)
345
341
 
346
- upload_tq_job = thread_queue_job.UploadThreadQueueJob(path,
347
- file_size,
348
- presigned_url=upload_url,
349
- file_md5=md5,
350
- upload_id=None,
351
- part_size=file_size,
352
- part_index=1,
353
- kms_key_name=kms_key_name)
354
-
355
- self.put_job(upload_tq_job)
342
+ self.put_job((path, file_size, upload_url, SINGLEPART))
356
343
 
357
344
  # iterate through multipart
358
345
  for multipart_upload in job.get("multiPartURLs", []):
359
346
  path = multipart_upload["filePath"]
360
347
  file_size = multipart_upload["fileSize"]
361
348
 
362
- part = multipart_upload
363
- total_parts = len(multipart_upload['parts'])
364
- md5 = self.metric_store.get_dict("file_md5s", path)
365
-
366
- for chunk in multipart_upload['parts']:
367
- logger.debug("Multipart, adding task %s (part %s)", path, chunk['partNumber'])
368
-
369
- upload_tq_job = thread_queue_job.UploadThreadQueueJob(
370
- path=path,
371
- file_size=file_size,
372
- presigned_url=chunk['url'],
373
- file_md5=md5,
374
- upload_id=multipart_upload['uploadID'],
375
- part_size=multipart_upload['partSize'],
376
- total_parts=total_parts,
377
- part_index=chunk['partNumber'],
378
- kms_key_name=kms_key_name)
379
-
380
-
381
- part['parts'] = chunk
382
- self.put_job(upload_tq_job)
383
-
384
349
  self.metric_store.increment("bytes_to_upload", file_size, path)
385
350
  self.metric_store.increment("num_files_to_upload")
386
-
351
+ logger.debug("Multipart, adding task %s", path)
352
+ self.put_job((path, file_size, multipart_upload, MULTIPART))
353
+
387
354
  # make sure we return None, so no message is automatically added to the out_queue
388
355
  return None
389
356
 
@@ -415,30 +382,45 @@ class UploadWorker(worker.ThreadWorker):
415
382
  self.metric_store.increment("bytes_uploaded", len(data), filename)
416
383
 
417
384
  def do_work(self, job, thread_int):
418
-
419
385
  if job:
420
-
421
- md5 = self.metric_store.get_dict("file_md5s", job.path)
386
+ kms_key_name = None
422
387
 
423
388
  try:
424
- if job.is_multipart():
425
- return self.do_multipart_upload(job)
426
-
427
- else:
428
- return self.do_singlepart_upload(job)
389
+ filename = job[0]
390
+ file_size = job[1]
391
+ upload = job[2]
392
+ upload_type = job[3]
429
393
 
430
- except Exception as err_msg:
431
- real_md5 = common.get_base64_md5(job.path)
394
+ except Exception:
395
+ logger.error("Issue with job (%s): %s", len(job), job)
396
+ raise
397
+
398
+ if len(job) > 4:
399
+ kms_key_name = job[4]
400
+
401
+ md5 = self.metric_store.get_dict("file_md5s", filename)
402
+
403
+ try:
404
+ if upload_type == SINGLEPART:
405
+ return self.do_singlepart_upload(
406
+ upload, filename, file_size, md5, kms_key_name
407
+ )
408
+ elif upload_type == MULTIPART:
409
+ return self.do_multipart_upload(upload, filename, md5)
432
410
 
433
- exc_tb = sys.exc_info()[2]
434
- exception_line_num = exc_tb.tb_lineno
435
- exception_file = pathlib.Path(exc_tb.tb_frame.f_code.co_filename).name
411
+ raise Exception(
412
+ "upload_type is '%s' expected %s or %s"
413
+ % (upload_type, SINGLEPART, MULTIPART)
414
+ )
415
+
416
+ except Exception as err_msg:
417
+ real_md5 = common.get_base64_md5(filename)
436
418
 
437
419
  if isinstance(err_msg, requests.exceptions.HTTPError):
438
- error_message = f"Upload of {job.path} failed with a response code {err_msg.response.status_code} ({err_msg.response.reason}) (expected '{job.md5}', got '{real_md5}')"
420
+ error_message = f"Upload of {filename} failed with a response code {err_msg.response.status_code} ({err_msg.response.reason}) (expected '{md5}', got '{real_md5}')"
439
421
  else:
440
422
  error_message = (
441
- f"Upload of {job.path} failed. (expected '{job.file_md5}', got '{real_md5}') {str(err_msg)} [{exception_file}-{exception_line_num}]"
423
+ f"Upload of {filename} failed. (expected '{md5}', got '{real_md5}') {str(err_msg)}"
442
424
  )
443
425
 
444
426
  logger.error(error_message)
@@ -447,7 +429,9 @@ class UploadWorker(worker.ThreadWorker):
447
429
  return worker.EMPTY_JOB
448
430
 
449
431
  @common.DecRetry(retry_exceptions=api_client.CONNECTION_EXCEPTIONS, tries=5)
450
- def do_singlepart_upload(self, job):
432
+ def do_singlepart_upload(
433
+ self, upload_url, filename, file_size, md5, kms_key_name=None
434
+ ):
451
435
  """
452
436
  Note that for GCS we don't rely on the make_request's own retry mechanism because we need to
453
437
  recreate the chunked_reader generator before retrying the request. Instead, we wrap this
@@ -457,23 +441,19 @@ class UploadWorker(worker.ThreadWorker):
457
441
  headers that S3 does not accept.
458
442
  """
459
443
 
460
- tq_job = thread_queue_job.MultiPartThreadQueueJob( md5=job.file_md5,
461
- path=job.path,
462
- total_parts=job.total_parts)
463
-
464
- if job.is_vendor_aws() or job.is_vendor_cw():
444
+ if ("amazonaws" in upload_url) or ("coreweave" in upload_url):
465
445
  # must declare content-length ourselves due to zero byte bug in requests library.
466
446
  # api_client.make_prepared_request docstring.
467
447
  headers = {
468
448
  "Content-Type": "application/octet-stream",
469
- "Content-Length": str(job.file_size),
449
+ "Content-Length": str(file_size),
470
450
  }
471
451
 
472
- with open(job.path, "rb") as fh:
452
+ with open(filename, "rb") as fh:
473
453
  # TODO: support chunked
474
454
  response = self.api_client.make_prepared_request(
475
455
  verb="PUT",
476
- url=job.presigned_url,
456
+ url=upload_url,
477
457
  headers=headers,
478
458
  params=None,
479
459
  data=fh,
@@ -487,26 +467,25 @@ class UploadWorker(worker.ThreadWorker):
487
467
  response.close()
488
468
 
489
469
  # report upload progress
490
- self.metric_store.increment("bytes_uploaded", job.file_size, job.path)
491
-
470
+ self.metric_store.increment("bytes_uploaded", file_size, filename)
471
+
472
+ return response
492
473
  else:
493
474
  headers = {"Content-MD5": md5, "Content-Type": "application/octet-stream"}
494
475
 
495
- if job.kms_key_name is not None:
496
- headers["x-goog-encryption-kms-key-name"] = job.kms_key_name
476
+ if kms_key_name:
477
+ headers["x-goog-encryption-kms-key-name"] = kms_key_name
497
478
 
498
- self.api_client.make_request(
499
- conductor_url=job.presigned_url,
479
+ return self.api_client.make_request(
480
+ conductor_url=upload_url,
500
481
  headers=headers,
501
- data=self.chunked_reader(job.path),
482
+ data=self.chunked_reader(filename),
502
483
  verb="PUT",
503
484
  tries=1,
504
485
  use_api_key=True,
505
486
  )
506
-
507
- return tq_job
508
487
 
509
- def do_multipart_upload(self, job):
488
+ def do_multipart_upload(self, upload, filename, md5):
510
489
  """
511
490
  Files will be split into partSize returned by the FileAPI and hydrated once all parts are
512
491
  uploaded. On successful part upload, response headers will contain an ETag. This value must
@@ -514,32 +493,42 @@ class UploadWorker(worker.ThreadWorker):
514
493
  """
515
494
  uploads = []
516
495
  complete_payload = {
517
- "uploadID": job.upload_id,
518
- "hash": job.file_md5,
496
+ "uploadID": upload["uploadID"],
497
+ "hash": md5,
519
498
  "completedParts": [],
520
499
  "project": self.project,
521
500
  }
522
501
 
523
- tq_job = thread_queue_job.MultiPartThreadQueueJob(path=job.path,
524
- md5=job.file_md5,
525
- total_parts=job.total_parts,
526
- part_index=job.part_index)
527
- tq_job.upload_id = job.upload_id
528
- tq_job.project = self.project
502
+ # iterate over parts and upload
503
+ for part in upload["parts"]:
504
+ resp_headers = self._do_multipart_upload(
505
+ upload_url=part["url"],
506
+ filename=filename,
507
+ part_number=part["partNumber"],
508
+ part_size=upload["partSize"],
509
+ )
529
510
 
511
+ if resp_headers:
512
+ uploads.append(upload["uploadID"])
513
+ completed_part = {
514
+ "partNumber": part["partNumber"],
515
+ "etag": resp_headers["ETag"].strip('"'),
516
+ }
517
+ complete_payload["completedParts"].append(completed_part)
530
518
 
531
- resp_headers = self._do_multipart_upload(
532
- upload_url=job.presigned_url,
533
- filename=job.path,
534
- part_number=job.part_index,
535
- part_size=job.part_size,
519
+ # Complete multipart upload in order to hydrate file for availability
520
+ uri_path = "/api/v2/files/multipart/complete"
521
+ headers = {"Content-Type": "application/json"}
522
+ self.api_client.make_request(
523
+ uri_path=uri_path,
524
+ verb="POST",
525
+ headers=headers,
526
+ data=json.dumps(complete_payload),
527
+ raise_on_error=True,
528
+ use_api_key=True,
536
529
  )
537
530
 
538
- if resp_headers:
539
- tq_job.part = job.part_index
540
- tq_job.etag = resp_headers["ETag"].strip('"')
541
-
542
- return tq_job
531
+ return uploads
543
532
 
544
533
  @common.DecRetry(retry_exceptions=api_client.CONNECTION_EXCEPTIONS, tries=5)
545
534
  def _do_multipart_upload(self, upload_url, filename, part_number, part_size):
@@ -574,73 +563,6 @@ class UploadWorker(worker.ThreadWorker):
574
563
 
575
564
  return response.headers
576
565
 
577
-
578
- class MultiPartSiphonWorker(worker.ThreadWorker):
579
- def __init__(self, *args, **kwargs):
580
- super(MultiPartSiphonWorker, self).__init__(*args, **kwargs)
581
-
582
- self.api_client = api_client.ApiClient()
583
- self.multipart_siphon = {}
584
-
585
- def do_work(self, job, thread_int):
586
- """
587
- Process files that have already been uploaded.
588
-
589
- If it's a single-part file, add the job to the out queue, so that it can
590
- be used to determine if the Upload entity is complete.
591
-
592
- If it's a multi-part upload, collect all the parts together. Once all the
593
- parts have been accumulated, mark it as complete and add the file to the
594
- out queue.
595
- """
596
-
597
- if job:
598
-
599
- if not job.is_multipart():
600
- logger.debug("Job is not multipart (%s, %s)", job.total_parts, job.part_index)
601
-
602
- else:
603
-
604
- if job.md5 not in self.multipart_siphon:
605
- self.multipart_siphon[job.md5] = []
606
-
607
- # Add to the task count for this worker.
608
- # -1 because a task has already been added for a single file
609
- # but not all its parts.
610
- old_task_count = self.task_count
611
- self.task_count += job.total_parts - 1
612
- logger.debug("Incrementing task count to %s from %s", self.task_count, old_task_count)
613
-
614
- self.multipart_siphon[job.md5].append(job)
615
-
616
- if len(self.multipart_siphon[job.md5]) == job.total_parts:
617
-
618
- complete_payload = {
619
- "uploadID": job.upload_id,
620
- "hash": job.md5,
621
- "completedParts": thread_queue_job.MultiPartThreadQueueJob.aggregate_parts(self.multipart_siphon[job.md5]),
622
- "project": job.project,
623
- }
624
-
625
- # Complete multipart upload in order to hydrate file for availability
626
- uri_path = "/api/v2/files/multipart/complete"
627
- headers = {"Content-Type": "application/json"}
628
- self.api_client.make_request(
629
- uri_path=uri_path,
630
- verb="POST",
631
- headers=headers,
632
- data=json.dumps(complete_payload),
633
- raise_on_error=True,
634
- use_api_key=True,
635
- )
636
-
637
- logger.debug("JSON payload: '%s'", json.dumps(complete_payload))
638
-
639
- return job
640
-
641
- # make sure we return None, so no message is automatically added to the out_queue
642
- return None
643
-
644
566
  def is_complete(self):
645
567
  # Get the number of files already uploaded as they are not passed to the Upload
646
568
  # worker
@@ -658,11 +580,11 @@ class MultiPartSiphonWorker(worker.ThreadWorker):
658
580
  self.task_count,
659
581
  )
660
582
 
661
- return (queue_size) >= self.task_count
583
+ return (queue_size + already_completed_uploads) >= self.task_count
662
584
 
663
585
  else:
664
586
  logger.debug("Is complete?: files not initialized yet")
665
- return False
587
+ return False
666
588
 
667
589
 
668
590
  class Uploader(object):
@@ -716,7 +638,6 @@ class Uploader(object):
716
638
  ),
717
639
  (FileStatWorker, [], {"thread_count": 1}),
718
640
  (UploadWorker, [], {"thread_count": self.args["thread_count"]}),
719
- (MultiPartSiphonWorker, [], {"thread_count": 1})
720
641
  ]
721
642
 
722
643
  manager = worker.JobManager(job_description)
@@ -839,31 +760,6 @@ class Uploader(object):
839
760
  file_map = {path: None for path in processed_filepaths}
840
761
  self.handle_upload_response(project=None, upload_files=file_map)
841
762
 
842
- if common.SIGINT_EXIT or self.cancel:
843
- print("\nUpload cancelled\n")
844
-
845
- else:
846
- print("\nUpload of {} file completed\n".format(len(file_map)))
847
-
848
- error_messages = []
849
-
850
- for exception in self.error_messages:
851
- error_messages.append(str(exception[1]))
852
- print("".join(traceback.format_tb(exception[2])))
853
- logger.error("".join(traceback.format_tb(exception[2])))
854
-
855
- if error_messages:
856
-
857
- log_file = loggeria.LOG_PATH
858
- sys.stderr.write("\nError uploading files:\n")
859
-
860
- for err_msg in error_messages:
861
- sys.stderr.write("\t{}\n".format(err_msg))
862
-
863
- sys.stderr.write("\nSee log {} for more details\n\n".format(log_file))
864
-
865
- self.error_messages = []
866
-
867
763
  def handle_upload_response(self, project, upload_files, upload_id=None):
868
764
  """
869
765
  This is a really confusing method and should probably be split into to clear logic
@@ -922,7 +818,8 @@ class Uploader(object):
922
818
  time.sleep(5)
923
819
 
924
820
  # Shutdown the manager once all jobs are done
925
- if not (self.cancel or self.manager.error or common.SIGINT_EXIT):
821
+ if not self.cancel and not self.manager.error:
822
+ logger.debug("Waiting for Manager to join")
926
823
  self.manager.join()
927
824
 
928
825
  upload_stats = UploadStats.create(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ciocore
3
- Version: 9.1.0rc1
3
+ Version: 9.1.0rc2
4
4
  Summary: Core functionality for Conductor's client tools
5
5
  Home-page: https://github.com/ConductorTechnologies/ciocore
6
6
  Author: conductor
@@ -50,18 +50,15 @@ See [CONTRIBUTING](CONTRIBUTING.md)
50
50
 
51
51
  ## Changelog
52
52
 
53
- ## Version:9.1.0-rc.1 -- 11 Dec 2024
53
+ ## Version:9.1.0-rc.2 -- 12 Dec 2024
54
54
 
55
- * Adds required changes to parallelize multi-part uploads
56
- * Cleans up the output when explicit paths are uploaded
57
- * Fixes logic so managers doesn't erroneously try and call join a second time if cancelled
58
- * Use the new required jwt parameters
59
- * Removing py2.7 compatibility
55
+ * Use the new required jwt parameters
56
+ * Removing py2.7 compatibility
60
57
 
61
58
  ## Version:8.3.3 -- 04 Dec 2024
62
59
 
63
60
  * Adds filter option for API data query
64
-
61
+
65
62
  ## Version:8.3.2 -- 01 Oct 2024
66
63
 
67
64
  * Tweak to package order behavior for markdown package query
@@ -1,4 +1,4 @@
1
- ciocore/VERSION,sha256=ax2aQwModu65ZstFnKm4dTChIjzu_0ljZcaKGDaGkSA,10
1
+ ciocore/VERSION,sha256=rK2e-XPKebjaWLwloD4dWey9U5CMdVkZh1nVx-IOI3s,10
2
2
  ciocore/__init__.py,sha256=aTP7LeeosQA8BZE67gDV4jgfTK5zxmwZRjiTRu_ZWj0,646
3
3
  ciocore/api_client.py,sha256=KKL7TsYygNcfkFZDPPq1CSJsrVN_QLK4PqP44vXsCQg,33101
4
4
  ciocore/cli.py,sha256=jZ1lOKQiUcrMhsVmD9SVmPMFwHtgDF4SaoAf2-PBS54,15449
@@ -94,8 +94,7 @@ ciocore/downloader/perpetual_downloader.py,sha256=cD7lnBH75-c-ZVVPHZc1vSnDhgJOnG
94
94
  ciocore/downloader/registry.py,sha256=_JIOuqpWkJkgJGN33nt-DCvqN9Gw3xeFhzPq4RUxIoE,2903
95
95
  ciocore/downloader/reporter.py,sha256=p1NK9k6iQ-jt7lRvZR0xFz0cGb2yo8tQcjlvYKR9SWM,4501
96
96
  ciocore/uploader/__init__.py,sha256=hxRFJf5Lo86rtRObFXSjjot8nybQd-SebSfYCbgZwow,24
97
- ciocore/uploader/_uploader.py,sha256=Kt4toITJHZDMjRLqRyw_lwe_HOoWz2AigMp2k5heHBI,42291
98
- ciocore/uploader/thread_queue_job.py,sha256=MzOcetttfWtDfwy-M0_ARwUf8_OjaGjyy-dA_WgNTPE,3416
97
+ ciocore/uploader/_uploader.py,sha256=40nzqO5DuFi4sx31VvjWxZPNkrWsWqM9jtFVxs_-o3o,37479
99
98
  ciocore/uploader/upload_stats/__init__.py,sha256=Lg1y4zq1i0cwc6Hh2K1TAQDYymLff49W-uIo1xjcvdI,5309
100
99
  ciocore/uploader/upload_stats/stats_formats.py,sha256=giNirtObU66VALWghPFSRhg3q_vw5MvESsnXhb_I3y8,2402
101
100
  tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -119,8 +118,8 @@ tests/test_uploader.py,sha256=JGp6GEyqRXRtbQSb-IW-cCX_BzNotWdCbnJnLwZvpUM,2869
119
118
  tests/test_validator.py,sha256=2fY66ayNc08PGyj2vTI-V_1yeCWJDngkj2zkUM5TTCI,1526
120
119
  tests/mocks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
121
120
  tests/mocks/glob.py,sha256=J2MH7nqi6NJOHuGdVWxhfeBd700_Ckj6cLh_8jSNkfg,215
122
- ciocore-9.1.0rc1.dist-info/METADATA,sha256=R_i2XrMj2RbhWLQrczzrbq71Ufz_3Q37WY3d0qIe2m8,19201
123
- ciocore-9.1.0rc1.dist-info/WHEEL,sha256=qUzzGenXXuJTzyjFah76kDVqDvnk-YDzY00svnrl84w,109
124
- ciocore-9.1.0rc1.dist-info/entry_points.txt,sha256=cCqcALMYbC4d8545V9w0Zysfg9MVuKWhzDQ2er4UfGE,47
125
- ciocore-9.1.0rc1.dist-info/top_level.txt,sha256=SvlM5JlqULzAz00JZWfiUhfjhqDzYzSWssA87zdJl0o,14
126
- ciocore-9.1.0rc1.dist-info/RECORD,,
121
+ ciocore-9.1.0rc2.dist-info/METADATA,sha256=5ckG7OoC_iQTAE6keOl-Sc3GgfEihq4xc-2BvA10ZnU,18999
122
+ ciocore-9.1.0rc2.dist-info/WHEEL,sha256=qUzzGenXXuJTzyjFah76kDVqDvnk-YDzY00svnrl84w,109
123
+ ciocore-9.1.0rc2.dist-info/entry_points.txt,sha256=cCqcALMYbC4d8545V9w0Zysfg9MVuKWhzDQ2er4UfGE,47
124
+ ciocore-9.1.0rc2.dist-info/top_level.txt,sha256=SvlM5JlqULzAz00JZWfiUhfjhqDzYzSWssA87zdJl0o,14
125
+ ciocore-9.1.0rc2.dist-info/RECORD,,
@@ -1,101 +0,0 @@
1
- import logging
2
- from ciocore import loggeria
3
-
4
- logger = logging.getLogger("{}.uploader".format(loggeria.CONDUCTOR_LOGGER_NAME))
5
-
6
- class ThreadQueueJob():
7
- pass
8
-
9
- class UploadThreadQueueJob(ThreadQueueJob):
10
-
11
- def __init__(self, path, file_size, presigned_url, file_md5=None, upload_id=None, part_size=None, total_parts=1, part_index=1, kms_key_name=None):
12
-
13
- super().__init__()
14
-
15
- self.path = path
16
- self.file_size = file_size
17
- self.upload_id = upload_id
18
- self.presigned_url = presigned_url
19
- self.file_md5 = file_md5
20
- self.part_size = part_size
21
- self.part_index = part_index
22
- self.total_parts = total_parts
23
- self.kms_key_name = kms_key_name
24
-
25
- logger.info("Creating %s (%s): %s", str(self.__class__), str(self), str(self.__dict__))
26
-
27
- def is_multipart(self):
28
- return self.total_parts != 1
29
-
30
- def is_vendor_aws(self):
31
- return "amazonaws" in self.presigned_url
32
-
33
- def is_vendor_cw(self):
34
- return "coreweave" in self.presigned_url
35
-
36
- @classmethod
37
- def create_from_response(cls, response):
38
-
39
- new_thread_queue_jobs = []
40
-
41
- for part_type, file_request_list in response.items():
42
-
43
- for file_request in file_request_list:
44
- if part_type == "multiPartURLs":
45
-
46
- for part in file_request["parts"]:
47
- new_tqj = cls( path=file_request['filePath'],
48
- file_size = file_request['filePath'],
49
- presigned_url = file_request['preSignedURL'],
50
- file_md5 = file_request['preSignedURL'],
51
- upload_id = file_request['preSignedURL'],
52
- part_size = file_request['preSignedURL'],
53
- part_index = file_request['preSignedURL'])
54
-
55
-
56
- else:
57
- new_tqj = cls( path=file_request['filePath'],
58
- file_size = file_request['filePath'],
59
- presigned_url = file_request['preSignedURL'])
60
-
61
- new_thread_queue_jobs.append(new_tqj)
62
-
63
-
64
-
65
- class MultiPartThreadQueueJob(ThreadQueueJob):
66
-
67
- def __init__(self, path, md5, total_parts=1, part_index=1):
68
-
69
- super().__init__()
70
-
71
- self.upload_id = None
72
- self.md5 = md5
73
- self.project = None
74
- self.path = path
75
- self.part_index = part_index
76
- self.etag = None
77
- self.total_parts = total_parts
78
-
79
- logger.info("Creating %s (%s): %s", str(self.__class__), str(self), str(self.__dict__))
80
-
81
- def is_multipart(self):
82
- return self.total_parts != 1
83
-
84
- # def __str__(self):
85
- # return
86
-
87
- @staticmethod
88
- def aggregate_parts(parts):
89
- """
90
- Helper function to take all the parts of a multipart upload and put
91
- them into a format that's expected for the HTTP call.
92
- """
93
-
94
- completed_parts_payload = []
95
-
96
- for part in parts:
97
- completed_parts_payload.append({'partNumber': part.part,
98
- 'etag': part.etag}
99
- )
100
-
101
- return completed_parts_payload