dtlpy 1.98.8__py3-none-any.whl → 1.99.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dtlpy/__init__.py CHANGED
@@ -239,6 +239,7 @@ def checkout_state():
239
239
 
240
240
 
241
241
  def use_attributes_2(state: bool = True):
242
+ warnings.warn("Function 'use_attributes_2()' is deprecated as of version 1.99.11 and has been non-functional since version 1.90.39. To work with attributes 2.0, simply use 'update_attributes()'.", DeprecationWarning)
242
243
  client_api.attributes_mode.use_attributes_2 = state
243
244
 
244
245
 
dtlpy/__version__.py CHANGED
@@ -1 +1 @@
1
- version = '1.98.8'
1
+ version = '1.99.11'
dtlpy/entities/dpk.py CHANGED
@@ -59,6 +59,7 @@ class Toolbar(entities.DlEntity):
59
59
 
60
60
  class Panel(entities.DlEntity):
61
61
  name = entities.DlProperty(location=['name'], _type=str)
62
+ path = entities.DlProperty(location=['path'], _type=str, default=None)
62
63
  min_role = entities.DlProperty(location=['minRole'], _type=list)
63
64
  supported_slots = entities.DlProperty(location=['supportedSlots'], _type=list)
64
65
 
dtlpy/entities/model.py CHANGED
@@ -307,7 +307,7 @@ class Model(entities.BaseEntity):
307
307
  @_repositories.default
308
308
  def set_repositories(self):
309
309
  reps = namedtuple('repositories',
310
- field_names=['projects', 'datasets', 'packages', 'models', 'ontologies', 'artifacts',
310
+ field_names=['projects', 'datasets', 'models', 'packages', 'ontologies', 'artifacts',
311
311
  'metrics', 'dpks', 'services'])
312
312
 
313
313
  r = reps(projects=repositories.Projects(client_api=self._client_api),
@@ -552,14 +552,34 @@ class Model(entities.BaseEntity):
552
552
  filters=filters,
553
553
  service_config=service_config)
554
554
 
555
- def predict(self, item_ids):
555
+ def predict(self, item_ids=None, dataset_id=None):
556
556
  """
557
557
  Run model prediction with items
558
558
 
559
559
  :param item_ids: a list of item id to run the prediction.
560
+ :param dataset_id: dataset id to run the prediction on
560
561
  :return:
561
562
  """
562
- return self.models.predict(model=self, item_ids=item_ids)
563
+ return self.models.predict(model=self, item_ids=item_ids, dataset_id=dataset_id)
564
+
565
+ def embed(self, item_ids):
566
+ """
567
+ Run model embed with items
568
+
569
+ :param item_ids: a list of item id to run the embed.
570
+ :return:
571
+ """
572
+ return self.models.embed(model=self, item_ids=item_ids)
573
+
574
+ def embed_datasets(self, dataset_ids, attach_trigger=False):
575
+ """
576
+ Run model embed with datasets
577
+
578
+ :param dataset_ids: a list of dataset id to run the embed.
579
+ :param attach_trigger: bool - True, if you want to activate the trigger
580
+ :return:
581
+ """
582
+ return self.models.embed_datasets(model=self, dataset_ids=dataset_ids, attach_trigger=attach_trigger)
563
583
 
564
584
  def deploy(self, service_config=None) -> entities.Service:
565
585
  """
@@ -347,7 +347,6 @@ class PromptItem:
347
347
  def add(self,
348
348
  message: dict,
349
349
  prompt_key: str = None,
350
- stream: bool = True,
351
350
  model_info: dict = None):
352
351
  """
353
352
  add a prompt to the prompt item
@@ -356,29 +355,28 @@ class PromptItem:
356
355
 
357
356
  :param message:
358
357
  :param prompt_key:
359
- :param stream:
360
358
  :param model_info:
361
359
  :return:
362
360
  """
363
- if prompt_key is None:
364
- prompt_key = len(self.prompts) + 1
365
361
  role = message.get('role', 'user')
366
362
  content = message.get('content', list())
367
363
 
368
364
  if self.role_mapping.get(role, 'item') == 'item':
365
+ if prompt_key is None:
366
+ prompt_key = str(len(self.prompts) + 1)
369
367
  # for new prompt we need a new key
370
- prompt = Prompt(key=str(prompt_key), role=role)
368
+ prompt = Prompt(key=prompt_key, role=role)
371
369
  for element in content:
372
370
  prompt.add_element(value=element.get('value', ''),
373
371
  mimetype=element.get('mimetype', PromptType.TEXT))
374
372
 
375
373
  # create new prompt and add to prompts
376
374
  self.prompts.append(prompt)
377
- if self._item is not None and stream is True:
375
+ if self._item is not None:
378
376
  self._item._Item__update_item_binary(_json=self.to_json())
379
377
  else:
380
- # for response - we need to assign to previous key
381
- prompt_key = str(prompt_key - 1)
378
+ if prompt_key is None:
379
+ prompt_key = str(len(self.prompts))
382
380
  assistant_message = content[0]
383
381
  assistant_mimetype = assistant_message.get('mimetype', PromptType.TEXT)
384
382
  uploaded_annotation = None
@@ -421,21 +419,19 @@ class PromptItem:
421
419
  prompt.add_element(mimetype=PromptType.METADATA,
422
420
  value={"model_info": model_info})
423
421
 
424
- if stream:
425
- existing_annotation = entities.Annotation.new(item=self._item,
426
- metadata=metadata,
427
- annotation_definition=annotation_definition)
428
- uploaded_annotation = existing_annotation.upload()
429
- prompt.add_element(mimetype=PromptType.METADATA,
430
- value={"id": uploaded_annotation.id})
422
+ existing_annotation = entities.Annotation.new(item=self._item,
423
+ metadata=metadata,
424
+ annotation_definition=annotation_definition)
425
+ uploaded_annotation = existing_annotation.upload()
426
+ prompt.add_element(mimetype=PromptType.METADATA,
427
+ value={"id": uploaded_annotation.id})
431
428
  existing_prompt = prompt
432
429
  self.assistant_prompts.append(prompt)
433
430
 
434
- # TODO Shadi fix
435
431
  existing_prompt_element = [element for element in existing_prompt.elements if
436
432
  element['mimetype'] != PromptType.METADATA][-1]
437
433
  existing_prompt_element['value'] = assistant_message.get('value')
438
- if stream is True and uploaded_annotation is None:
434
+ if uploaded_annotation is None:
439
435
  # Creating annotation with old dict to match platform dict
440
436
  annotation_definition = entities.FreeText(text='')
441
437
  metadata = {'system': {'promptId': prompt_key},
@@ -451,7 +447,3 @@ class PromptItem:
451
447
  # update the annotation with the new text
452
448
  annotation.annotation_definition.text = existing_prompt_element['value']
453
449
  self._item.annotations.update(annotation)
454
-
455
- def update(self):
456
- if self._item is not None:
457
- self._item._Item__update_item_binary(_json=self.to_json())
@@ -602,12 +602,20 @@ class Downloader:
602
602
  else:
603
603
  return item, '', False
604
604
 
605
- def __video_validation(self, item, downloaded_file):
605
+ def __file_validation(self, item, downloaded_file):
606
606
  res = False
607
- size_diff = os.stat(downloaded_file).st_size - item.metadata['system']['size']
607
+ resume = True
608
+ if isinstance(downloaded_file, io.BytesIO):
609
+ file_size = downloaded_file.getbuffer().nbytes
610
+ else:
611
+ file_size = os.stat(downloaded_file).st_size
612
+ expected_size = item.metadata['system']['size']
613
+ size_diff = file_size - expected_size
608
614
  if size_diff == 0:
609
615
  res = True
610
- return res
616
+ if size_diff > 0:
617
+ resume = False
618
+ return res, file_size, resume
611
619
 
612
620
  def __thread_download(self,
613
621
  item,
@@ -650,132 +658,165 @@ class Downloader:
650
658
 
651
659
  item, url, is_url = self.__get_link_source(item=item)
652
660
 
661
+ # save as byte stream
662
+ data = io.BytesIO()
653
663
  if need_to_download:
654
- if not is_url:
655
- headers = {'x-dl-sanitize': '0'}
656
- result, response = self.items_repository._client_api.gen_request(req_type="get",
657
- headers=headers,
658
- path="/items/{}/stream".format(
659
- item.id),
660
- stream=True,
661
- dataset_id=item.dataset_id)
662
- if not result:
663
- raise PlatformException(response)
664
- else:
665
- _, ext = os.path.splitext(item.metadata['system']['shebang']['linkInfo']['ref'].split('?')[0])
666
- local_filepath += ext
667
- response = self.get_url_stream(url=url)
668
-
669
- if save_locally:
670
- # save to file
671
- if not os.path.exists(os.path.dirname(local_filepath)):
672
- # create folder if not exists
673
- os.makedirs(os.path.dirname(local_filepath), exist_ok=True)
674
-
675
- # decide if create progress bar for item
676
- total_length = response.headers.get("content-length")
677
- one_file_pbar = None
678
- try:
679
- one_file_progress_bar = total_length is not None and int(
680
- total_length) > 10e6 # size larger than 10 MB
681
- if one_file_progress_bar:
682
- one_file_pbar = tqdm.tqdm(total=int(total_length),
683
- unit='B',
684
- unit_scale=True,
685
- unit_divisor=1024,
686
- position=1,
687
- file=sys.stdout,
688
- disable=self.items_repository._client_api.verbose.disable_progress_bar,
689
- desc='Download Item')
690
- except Exception as err:
691
- one_file_progress_bar = False
692
- logger.debug('Cant decide downloaded file length, bar will not be presented: {}'.format(err))
693
-
694
- # start download
695
- if self.items_repository._client_api.sdk_cache.use_cache and \
696
- self.items_repository._client_api.cache is not None:
697
- response_output = os.path.normpath(response.content)
698
- if isinstance(response_output, bytes):
699
- response_output = response_output.decode('utf-8')[1:-1]
700
-
701
- if os.path.isfile(os.path.normpath(response_output)):
702
- if response_output != local_filepath:
703
- source_path = os.path.normpath(response_output)
704
- shutil.copyfile(source_path, local_filepath)
664
+ chunk_resume = {0: 0}
665
+ start_point = 0
666
+ download_done = False
667
+ while chunk_resume.get(start_point, '') != 3 and not download_done:
668
+ if not is_url:
669
+ headers = {'x-dl-sanitize': '0', 'Range': 'bytes={}-'.format(start_point)}
670
+ result, response = self.items_repository._client_api.gen_request(req_type="get",
671
+ headers=headers,
672
+ path="/items/{}/stream".format(
673
+ item.id),
674
+ stream=True,
675
+ dataset_id=item.dataset_id)
676
+ if not result:
677
+ raise PlatformException(response)
705
678
  else:
679
+ _, ext = os.path.splitext(item.metadata['system']['shebang']['linkInfo']['ref'].split('?')[0])
680
+ local_filepath += ext
681
+ response = self.get_url_stream(url=url)
682
+
683
+ if save_locally:
684
+ # save to file
685
+ if not os.path.exists(os.path.dirname(local_filepath)):
686
+ # create folder if not exists
687
+ os.makedirs(os.path.dirname(local_filepath), exist_ok=True)
688
+
689
+ # decide if create progress bar for item
690
+ total_length = response.headers.get("content-length")
691
+ one_file_pbar = None
706
692
  try:
707
- temp_file_path = local_filepath + '.download'
708
- with open(temp_file_path, "wb") as f:
693
+ one_file_progress_bar = total_length is not None and int(
694
+ total_length) > 10e6 # size larger than 10 MB
695
+ if one_file_progress_bar:
696
+ one_file_pbar = tqdm.tqdm(total=int(total_length),
697
+ unit='B',
698
+ unit_scale=True,
699
+ unit_divisor=1024,
700
+ position=1,
701
+ file=sys.stdout,
702
+ disable=self.items_repository._client_api.verbose.disable_progress_bar,
703
+ desc='Download Item')
704
+ except Exception as err:
705
+ one_file_progress_bar = False
706
+ logger.debug('Cant decide downloaded file length, bar will not be presented: {}'.format(err))
707
+
708
+ # start download
709
+ if self.items_repository._client_api.sdk_cache.use_cache and \
710
+ self.items_repository._client_api.cache is not None:
711
+ response_output = os.path.normpath(response.content)
712
+ if isinstance(response_output, bytes):
713
+ response_output = response_output.decode('utf-8')[1:-1]
714
+
715
+ if os.path.isfile(os.path.normpath(response_output)):
716
+ if response_output != local_filepath:
717
+ source_path = os.path.normpath(response_output)
718
+ shutil.copyfile(source_path, local_filepath)
719
+ else:
720
+ try:
721
+ temp_file_path = local_filepath + '.download'
722
+ with open(temp_file_path, "ab") as f:
723
+ try:
724
+ for chunk in response.iter_content(chunk_size=chunk_size):
725
+ if chunk: # filter out keep-alive new chunks
726
+ f.write(chunk)
727
+ if one_file_progress_bar:
728
+ one_file_pbar.update(len(chunk))
729
+ except Exception as err:
730
+ pass
731
+ file_validation, start_point, chunk_resume = self.__get_next_chunk(item=item,
732
+ download_progress=temp_file_path,
733
+ chunk_resume=chunk_resume)
734
+ if file_validation:
735
+ shutil.move(temp_file_path, local_filepath)
736
+ download_done = True
737
+ else:
738
+ if not is_url:
739
+ continue
740
+ else:
741
+ raise PlatformException(
742
+ error="400",
743
+ message='Downloaded file is corrupted. Please try again. If the issue repeats please contact support.')
744
+ except Exception as err:
745
+ if os.path.isfile(temp_file_path):
746
+ os.remove(temp_file_path)
747
+ raise err
748
+ if one_file_progress_bar:
749
+ one_file_pbar.close()
750
+ # save to output variable
751
+ data = local_filepath
752
+ # if image - can download annotation mask
753
+ if item.annotated and annotation_options:
754
+ self._download_img_annotations(item=item,
755
+ img_filepath=local_filepath,
756
+ annotation_options=annotation_options,
757
+ annotation_filters=annotation_filters,
758
+ local_path=local_path,
759
+ overwrite=overwrite,
760
+ thickness=thickness,
761
+ alpha=alpha,
762
+ with_text=with_text,
763
+ export_version=export_version
764
+ )
765
+ else:
766
+ if self.items_repository._client_api.sdk_cache.use_cache and \
767
+ self.items_repository._client_api.cache is not None:
768
+ response_output = os.path.normpath(response.content)
769
+ if isinstance(response_output, bytes):
770
+ response_output = response_output.decode('utf-8')[1:-1]
771
+
772
+ if os.path.isfile(response_output):
773
+ source_file = response_output
774
+ with open(source_file, 'wb') as f:
775
+ data = f.read()
776
+ else:
777
+ try:
709
778
  for chunk in response.iter_content(chunk_size=chunk_size):
710
779
  if chunk: # filter out keep-alive new chunks
711
- f.write(chunk)
712
- if one_file_progress_bar:
713
- one_file_pbar.update(len(chunk))
714
- # TODO remove this after the BE fix
715
- if self.__video_validation(item=item,
716
- downloaded_file=temp_file_path) or is_url:
717
- shutil.move(temp_file_path, local_filepath)
718
- else:
719
- os.remove(temp_file_path)
780
+ data.write(chunk)
781
+ file_validation, start_point, chunk_resume = self.__get_next_chunk(item=item,
782
+ download_progress=data,
783
+ chunk_resume=chunk_resume)
784
+ if file_validation:
785
+ download_done = True
786
+ else:
787
+ continue
788
+ except Exception as err:
789
+ raise err
790
+ # go back to the beginning of the stream
791
+ data.seek(0)
792
+ data.name = item.name
793
+ if not save_locally and to_array:
794
+ if 'image' not in item.mimetype:
720
795
  raise PlatformException(
721
- error=500,
722
- message='The downloaded file is corrupted. Please try again. If the issue repeats please contact support.')
723
- except Exception as err:
724
- if os.path.isfile(temp_file_path):
725
- os.remove(temp_file_path)
726
- raise err
727
- if one_file_progress_bar:
728
- one_file_pbar.close()
729
- # save to output variable
730
- data = local_filepath
731
- # if image - can download annotation mask
732
- if item.annotated and annotation_options:
733
- self._download_img_annotations(item=item,
734
- img_filepath=local_filepath,
735
- annotation_options=annotation_options,
736
- annotation_filters=annotation_filters,
737
- local_path=local_path,
738
- overwrite=overwrite,
739
- thickness=thickness,
740
- alpha=alpha,
741
- with_text=with_text,
742
- export_version=export_version
743
- )
744
- else:
745
- # save as byte stream
746
- data = io.BytesIO()
747
- if self.items_repository._client_api.sdk_cache.use_cache and \
748
- self.items_repository._client_api.cache is not None:
749
- response_output = os.path.normpath(response.content)
750
- if isinstance(response_output, bytes):
751
- response_output = response_output.decode('utf-8')[1:-1]
752
-
753
- if os.path.isfile(response_output):
754
- source_file = response_output
755
- with open(source_file, 'wb') as f:
756
- data = f.read()
757
- else:
758
- try:
759
- for chunk in response.iter_content(chunk_size=chunk_size):
760
- if chunk: # filter out keep-alive new chunks
761
- data.write(chunk)
762
- except Exception as err:
763
- raise err
764
- # go back to the beginning of the stream
765
- data.seek(0)
766
- data.name = item.name
767
- if not save_locally and to_array:
768
- if 'image' not in item.mimetype:
769
- raise PlatformException(
770
- error="400",
771
- message='Download element type numpy.ndarray support for image only. '
772
- 'Item Id: {} is {} type'.format(item.id, item.mimetype))
796
+ error="400",
797
+ message='Download element type numpy.ndarray support for image only. '
798
+ 'Item Id: {} is {} type'.format(item.id, item.mimetype))
773
799
 
774
- data = np.array(Image.open(data))
800
+ data = np.array(Image.open(data))
775
801
  else:
776
802
  data = local_filepath
777
803
  return data
778
804
 
805
+ def __get_next_chunk(self, item, download_progress, chunk_resume):
806
+ size_validation, file_size, resume = self.__file_validation(item=item,
807
+ downloaded_file=download_progress)
808
+ start_point = file_size
809
+ if not size_validation:
810
+ if chunk_resume.get(start_point, None) is None:
811
+ chunk_resume = {start_point: 1}
812
+ else:
813
+ chunk_resume[start_point] += 1
814
+ if chunk_resume[start_point] == 3 or not resume:
815
+ raise PlatformException(
816
+ error=500,
817
+ message='The downloaded file is corrupted. Please try again. If the issue repeats please contact support.')
818
+ return size_validation, start_point, chunk_resume
819
+
779
820
  def __default_local_path(self):
780
821
 
781
822
  # create default local path
@@ -660,18 +660,21 @@ class Models:
660
660
  client_api=self._client_api,
661
661
  project=self._project)
662
662
 
663
- def predict(self, model, item_ids):
663
+ def predict(self, model, item_ids, dataset_id=None):
664
664
  """
665
665
  Run model prediction with items
666
666
 
667
667
  :param model: dl.Model entity to run the prediction.
668
668
  :param item_ids: a list of item id to run the prediction.
669
+ :param dataset_id: a dataset id to run the prediction.
669
670
  :return:
670
671
  """
671
672
  if len(model.metadata['system'].get('deploy', {}).get('services', [])) == 0:
672
673
  # no services for model
673
674
  raise ValueError("Model doesnt have any associated services. Need to deploy before predicting")
674
- payload = {'input': {'itemIds': item_ids},
675
+ if item_ids is None and dataset_id is None:
676
+ raise ValueError("Need to provide either item_ids or dataset_id")
677
+ payload = {'input': {'itemIds': item_ids, 'datasetId': dataset_id},
675
678
  'config': {'serviceId': model.metadata['system']['deploy']['services'][0]}}
676
679
 
677
680
  success, response = self._client_api.gen_request(req_type="post",
@@ -683,6 +686,62 @@ class Models:
683
686
  client_api=self._client_api,
684
687
  project=self._project)
685
688
 
689
+ def embed(self, model, item_ids=None, dataset_id=None):
690
+ """
691
+ Run model embed with items
692
+
693
+ :param model: dl.Model entity to run the prediction.
694
+ :param item_ids: a list of item id to run the embed.
695
+ :param dataset_id: a dataset id to run the embed.
696
+ :return: Execution
697
+ :rtype: dtlpy.entities.execution.Execution
698
+ """
699
+ if len(model.metadata['system'].get('deploy', {}).get('services', [])) == 0:
700
+ # no services for model
701
+ raise ValueError("Model doesnt have any associated services. Need to deploy before predicting")
702
+ if item_ids is None and dataset_id is None:
703
+ raise ValueError("Need to provide either item_ids or dataset_id")
704
+ payload = {'input': {'itemIds': item_ids, 'datasetId': dataset_id},
705
+ 'config': {'serviceId': model.metadata['system']['deploy']['services'][0]}}
706
+
707
+ success, response = self._client_api.gen_request(req_type="post",
708
+ path=f"/ml/models/{model.id}/embed",
709
+ json_req=payload)
710
+ if not success:
711
+ raise exceptions.PlatformException(response)
712
+ return entities.Execution.from_json(_json=response.json(),
713
+ client_api=self._client_api,
714
+ project=self._project)
715
+
716
+ def embed_datasets(self, model, dataset_ids, attach_trigger=False):
717
+ """
718
+ Run model embed with datasets
719
+
720
+ :param model: dl.Model entity to run the prediction.
721
+ :param dataset_ids: a list of dataset id to run the embed.
722
+ :param attach_trigger: bool, if True will activate the trigger
723
+ :return:
724
+ """
725
+ if len(model.metadata['system'].get('deploy', {}).get('services', [])) == 0:
726
+ # no services for model
727
+ raise ValueError("Model doesnt have any associated services. Need to deploy before predicting")
728
+ if dataset_ids is None:
729
+ raise ValueError("Need to provide either dataset_id")
730
+ payload = {'datasetIds': dataset_ids,
731
+ 'config': {'serviceId': model.metadata['system']['deploy']['services'][0]},
732
+ 'attachTrigger': attach_trigger
733
+ }
734
+
735
+ success, response = self._client_api.gen_request(req_type="post",
736
+ path=f"/ml/models/{model.id}/embed/datasets",
737
+ json_req=payload)
738
+ if not success:
739
+ raise exceptions.PlatformException(response)
740
+ command = entities.Command.from_json(_json=response.json(),
741
+ client_api=self._client_api)
742
+ command = command.wait()
743
+ return command
744
+
686
745
  def deploy(self, model_id: str, service_config=None) -> entities.Service:
687
746
  """
688
747
  Deploy a trained model. This will create a service that will execute predictions
@@ -24,6 +24,7 @@ class BaseUploadElement:
24
24
  self.filename = all_upload_elements['filename']
25
25
  self.export_version = all_upload_elements['export_version']
26
26
  self.item_description = all_upload_elements['item_description']
27
+ self.driver_path = all_upload_elements['driver_path']
27
28
 
28
29
 
29
30
  class BinaryUploadElement(BaseUploadElement):
@@ -101,6 +102,16 @@ class ExternalItemUploadElement(BaseUploadElement):
101
102
  else:
102
103
  annotations_filepath = None
103
104
  # append to list
105
+ if self.remote_path is None:
106
+ split_dir = os.path.dirname(filepath).split('//')
107
+ if len(split_dir) > 1:
108
+ split_dir = split_dir[1] + '/'
109
+ if self.driver_path == '/':
110
+ self.driver_path = None
111
+ if self.driver_path and split_dir.startswith(self.driver_path):
112
+ self.remote_path = split_dir.replace(self.driver_path, '')
113
+ else:
114
+ self.remote_path = split_dir
104
115
  remote_filepath = self.remote_path + self.remote_name
105
116
  self.type = 'external_file'
106
117
  self.buffer = filepath
@@ -135,10 +135,13 @@ class Uploader:
135
135
  item_description=None):
136
136
  # fix remote path
137
137
  if remote_path is None:
138
- remote_path = "/"
139
- if not remote_path.startswith('/'):
138
+ if isinstance(local_path, str) and local_path.startswith('external://'):
139
+ remote_path = None
140
+ else:
141
+ remote_path = "/"
142
+ if remote_path and not remote_path.startswith('/'):
140
143
  remote_path = f"/{remote_path}"
141
- if not remote_path.endswith("/"):
144
+ if remote_path and not remote_path.endswith("/"):
142
145
  remote_path = f"{remote_path}/"
143
146
  if file_types is not None and not isinstance(file_types, list):
144
147
  msg = '"file_types" should be a list of file extension. e.g [".jpg", ".png"]'
@@ -198,6 +201,12 @@ class Uploader:
198
201
  if remote_name is None:
199
202
  remote_name_list = [None] * len(local_path_list)
200
203
 
204
+ try:
205
+ driver_path = self.items_repository.dataset.project.drivers.get(
206
+ driver_id=self.items_repository.dataset.driver).path
207
+ except Exception:
208
+ driver_path = None
209
+
201
210
  futures = deque()
202
211
  total_size = 0
203
212
  for upload_item_element, remote_name, upload_annotations_element in zip(local_path_list,
@@ -238,7 +247,8 @@ class Uploader:
238
247
  'filename': None,
239
248
  'root': None,
240
249
  'export_version': export_version,
241
- 'item_description': item_description
250
+ 'item_description': item_description,
251
+ 'driver_path': driver_path
242
252
  }
243
253
  if isinstance(upload_item_element, str):
244
254
  with_head_folder = True
@@ -342,16 +352,9 @@ class Uploader:
342
352
  return futures
343
353
 
344
354
  async def __single_external_sync(self, element):
345
- remote_path = element.remote_path
346
- if len(remote_path) <= 1:
347
- split_dir = os.path.dirname(element.buffer).split('//')
348
- if len(split_dir) > 1:
349
- remote_path = split_dir[1] + '/'
350
-
351
- filename = remote_path + element.remote_name
352
355
  storage_id = element.buffer.split('//')[1]
353
356
  req_json = dict()
354
- req_json['filename'] = filename
357
+ req_json['filename'] = element.remote_filepath
355
358
  req_json['storageId'] = storage_id
356
359
  success, response = self.items_repository._client_api.gen_request(req_type='post',
357
360
  path='/datasets/{}/imports'.format(
@@ -530,9 +533,10 @@ class Uploader:
530
533
  ref=item.id)
531
534
  if pbar is not None:
532
535
  pbar.update()
533
- self.items_repository._client_api.callbacks.run_on_event(event=self.items_repository._client_api.callbacks.CallbackEvent.ITEMS_UPLOAD,
534
- context={'item_id': item.id, 'dataset_id': item.dataset_id},
535
- progress=round(pbar.n / pbar.total * 100, 0))
536
+ self.items_repository._client_api.callbacks.run_on_event(
537
+ event=self.items_repository._client_api.callbacks.CallbackEvent.ITEMS_UPLOAD,
538
+ context={'item_id': item.id, 'dataset_id': item.dataset_id},
539
+ progress=round(pbar.n / pbar.total * 100, 0))
536
540
  else:
537
541
  if isinstance(element.buffer, str):
538
542
  ref = element.buffer
@@ -1170,12 +1170,7 @@ class ApiClient:
1170
1170
  def callback(bytes_read):
1171
1171
  pass
1172
1172
 
1173
- timeout = aiohttp.ClientTimeout(
1174
- total=None, # Disable overall timeout
1175
- connect=2 * 60, # Set connect timeout (in seconds)
1176
- sock_read=10 * 60, # Set read timeout for socket read operations
1177
- sock_connect=2 * 60 # Set timeout for connection setup
1178
- )
1173
+ timeout = aiohttp.ClientTimeout(total=0)
1179
1174
  async with aiohttp.ClientSession(headers=headers, timeout=timeout) as session:
1180
1175
  try:
1181
1176
  form = aiohttp.FormData({})
@@ -1187,7 +1182,8 @@ class ApiClient:
1187
1182
  form.add_field('description', item_description)
1188
1183
  form.add_field('file', AsyncUploadStream(buffer=to_upload,
1189
1184
  callback=callback,
1190
- name=uploaded_filename))
1185
+ name=uploaded_filename,
1186
+ chunk_timeout=2 * 60))
1191
1187
  url = '{}?mode={}'.format(self.base_gate_url + remote_url, mode)
1192
1188
 
1193
1189
  # use SSL context
@@ -36,6 +36,8 @@ class AsyncThreadEventLoop(threading.Thread):
36
36
  def semaphore(self, name, n=None):
37
37
  if n is None:
38
38
  n = self.n
39
+ else:
40
+ n = min(n, self.n)
39
41
  if name not in self._semaphores:
40
42
  self._semaphores[name] = asyncio.BoundedSemaphore(n)
41
43
  return self._semaphores[name]
@@ -94,17 +96,32 @@ class AsyncResponseError(AsyncResponse):
94
96
 
95
97
 
96
98
  class AsyncUploadStream(io.IOBase):
97
- def __init__(self, buffer, callback=None, name=''):
99
+ def __init__(self, buffer, callback=None, name='', chunk_timeout=10, max_retries=3):
98
100
  self.buffer = buffer
99
101
  self.buffer.seek(0)
100
102
  self.callback = callback
101
103
  self._name = name
104
+ self.chunk_timeout = chunk_timeout
105
+ self.max_retries = max_retries
102
106
 
103
107
  @property
104
108
  def name(self):
105
109
  return self._name
106
110
 
111
+ async def async_read(self, size):
112
+ retries = 0
113
+ while retries < self.max_retries:
114
+ try:
115
+ data = await asyncio.wait_for(asyncio.to_thread(self.buffer.read, size), timeout=self.chunk_timeout)
116
+ if self.callback is not None:
117
+ self.callback(size)
118
+ return data
119
+ except asyncio.TimeoutError:
120
+ retries += 1
121
+ logger.warning(
122
+ f"Chunk read timed out after {self.chunk_timeout} seconds. Retrying {retries}/{self.max_retries}...")
123
+
124
+ raise Exception(f"Chunk read failed after {self.max_retries} retries due to timeouts")
125
+
107
126
  def read(self, size):
108
- if self.callback is not None:
109
- self.callback(size)
110
- return self.buffer.read(size)
127
+ return asyncio.run(self.async_read(size))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dtlpy
3
- Version: 1.98.8
3
+ Version: 1.99.11
4
4
  Summary: SDK and CLI for Dataloop platform
5
5
  Home-page: https://github.com/dataloop-ai/dtlpy
6
6
  Author: Dataloop Team
@@ -1,5 +1,5 @@
1
- dtlpy/__init__.py,sha256=fZYNrXe_suwThe-iSg5z45dkhTqiHe7btaxjhITesXE,20677
2
- dtlpy/__version__.py,sha256=VyHfNvXyywstm8UJU4FD18Pn8gxwDCQgKYhzxKXGUUY,19
1
+ dtlpy/__init__.py,sha256=XcjyX8cwvOrrIHDS00P2iRvp11Kfysj1-mPlltnIe1s,20899
2
+ dtlpy/__version__.py,sha256=pz-xlegF77txuTofrPEqyKxub0sp_icRq1r7TlkHMbM,20
3
3
  dtlpy/exceptions.py,sha256=EQCKs3pwhwZhgMByQN3D3LpWpdxwcKPEEt-bIaDwURM,2871
4
4
  dtlpy/new_instance.py,sha256=u_c6JtgqsKCr7TU24-g7_CaST9ghqamMhM4Z0Zxt50w,10121
5
5
  dtlpy/assets/__init__.py,sha256=D_hAa6NM8Zoy32sF_9b7m0b7I-BQEyBFg8-9Tg2WOeo,976
@@ -59,7 +59,7 @@ dtlpy/entities/command.py,sha256=ARu8ttk-C7_Ice7chRyTtyOtakBTF09FC04mEk73SO8,501
59
59
  dtlpy/entities/compute.py,sha256=4FEpahPFFGHxye_fLh_p_kP6iEQ3QJK7S5hAdd6Afos,12744
60
60
  dtlpy/entities/dataset.py,sha256=tNCl7nNCx-DrZ3z96APhRdvllfQA1-9y8DpL6Ma2l0I,47516
61
61
  dtlpy/entities/directory_tree.py,sha256=Rni6pLSWytR6yeUPgEdCCRfTg_cqLOdUc9uCqz9KT-Q,1186
62
- dtlpy/entities/dpk.py,sha256=a5C1UG_cvDnXSee650WHH43QflxbJCo_g0V17-GRb24,17639
62
+ dtlpy/entities/dpk.py,sha256=fsJnKXyWTLI_sVkHt7j2stz_bdvXZ8ouNTiWSsiuUcA,17714
63
63
  dtlpy/entities/driver.py,sha256=O_QdK1EaLjQyQkmvKsmkNgmvmMb1mPjKnJGxK43KrOA,7197
64
64
  dtlpy/entities/execution.py,sha256=WBiAws-6wZnQQ3y9wyvOeexA3OjxfaRdwDu5dSFYL1g,13420
65
65
  dtlpy/entities/feature.py,sha256=9fFjD0W57anOVSAVU55ypxN_WTCsWTG03Wkc3cAAj78,3732
@@ -71,7 +71,7 @@ dtlpy/entities/item.py,sha256=G6VVcVCudqeShWigZmNIuKD4OkvTRJ05CeXFXNe3Jk8,29691
71
71
  dtlpy/entities/label.py,sha256=ycDYavIgKhz806plIX-64c07_TeHpDa-V7LnfFVe4Rg,3869
72
72
  dtlpy/entities/links.py,sha256=FAmEwHtsrqKet3c0UHH9u_gHgG6_OwF1-rl4xK7guME,2516
73
73
  dtlpy/entities/message.py,sha256=ApJuaKEqxATpXjNYUjGdYPu3ibQzEMo8-LtJ_4xAcPI,5865
74
- dtlpy/entities/model.py,sha256=UKtai_V8ckTNPlhzflmJNHXJvH6BH9UYOwCMWXNZueU,24822
74
+ dtlpy/entities/model.py,sha256=ufrWny7eqpu4J3XKy8SXMLDBnIRuPvcNUcmbtM1pdxc,25567
75
75
  dtlpy/entities/node.py,sha256=yPPYDLtNMc6vZbbf4FIffY86y7tkaTvYm42Jb7k3Ofk,39617
76
76
  dtlpy/entities/ontology.py,sha256=ok4p3sLBc_SS5hs2gZr5-gbblrveM7qSIX4z67QSKeQ,31967
77
77
  dtlpy/entities/organization.py,sha256=AMkx8hNIIIjnu5pYlNjckMRuKt6H3lnOAqtEynkr7wg,9893
@@ -84,7 +84,7 @@ dtlpy/entities/paged_entities.py,sha256=6y44H3FSclQvhB1KLI4zuIs317hWOhdHUynldRrU
84
84
  dtlpy/entities/pipeline.py,sha256=X9238WbMGfZcXdQVEtkw8twZwl0O4EZB4TxbTSEyPeI,20788
85
85
  dtlpy/entities/pipeline_execution.py,sha256=XCXlBAHFYVL2HajE71hK-bPxI4gTwZvg5SKri4BgyRA,9928
86
86
  dtlpy/entities/project.py,sha256=ZUx8zA3mr6N145M62R3UDPCCzO1vxfyWO6vjES-bO-g,14653
87
- dtlpy/entities/prompt_item.py,sha256=Kmvguz3f0sGtkKZS9OEA_-Yi4aQRCgdg1GBkaLQyyTg,19592
87
+ dtlpy/entities/prompt_item.py,sha256=d4rqP961PYlJvJJDRXZPI7Z6NdwRXlx_Q0_N0xtZ_B8,19276
88
88
  dtlpy/entities/recipe.py,sha256=Q1HtYgind3bEe-vnDZWhw6H-rcIAGhkGHPRWtLIkPSE,11917
89
89
  dtlpy/entities/reflect_dict.py,sha256=2NaSAL-CO0T0FYRYFQlaSpbsoLT2Q18AqdHgQSLX5Y4,3273
90
90
  dtlpy/entities/resource_execution.py,sha256=1HuVV__U4jAUOtOkWlWImnM3Yts8qxMSAkMA9sBhArY,5033
@@ -165,7 +165,7 @@ dtlpy/repositories/commands.py,sha256=kXhmyBpLZNs-6vKBo4iXaommpjcGBDXs287IICUnQM
165
165
  dtlpy/repositories/compositions.py,sha256=H417BvlQAiWr5NH2eANFke6CfEO5o7DSvapYpf7v5Hk,2150
166
166
  dtlpy/repositories/computes.py,sha256=EtfE_3JhTdNlSYDPkKXBFkq-DBl4sgQqIm50ajvFdWM,9976
167
167
  dtlpy/repositories/datasets.py,sha256=rDpJXNyxOlJwDQB-wNkM-JIqOGH10q9nujnAl6y8_xU,52077
168
- dtlpy/repositories/downloader.py,sha256=pNwL7Nid8xmOyYNiv4DB_WY4RoKlxQ-U9nG2V99Gyr8,41342
168
+ dtlpy/repositories/downloader.py,sha256=h5Gs_hVXIOobzdwTHgLfkJYWiwtbRn3my-QMWnWJccw,44082
169
169
  dtlpy/repositories/dpks.py,sha256=mj3QPvfzj_jZAscwIgpKUfa7fLxptc3OJQ_RrSfgYxo,17487
170
170
  dtlpy/repositories/drivers.py,sha256=fF0UuHCyBzop8pHfryex23mf0kVFAkqzNdOmwBbaWxY,10204
171
171
  dtlpy/repositories/executions.py,sha256=4UoU6bnB3kl5cMuF1eJvDecfZCaB06gKWxPfv6_g1_k,32598
@@ -174,7 +174,7 @@ dtlpy/repositories/features.py,sha256=7xA2ihEuNgZD7HBQMMGLWpsS2V_3PgieKW2YAk1OeU
174
174
  dtlpy/repositories/integrations.py,sha256=Wi-CpT2PH36GFu3znWP5Uf2CmkqWBUYyOdwvatGD_eM,11798
175
175
  dtlpy/repositories/items.py,sha256=90Z8-thLWBd49fmmnP-P6pZxhHX1k4Wv6Qfxq-Ovcz4,38092
176
176
  dtlpy/repositories/messages.py,sha256=QU0Psckg6CA_Tlw9AVxqa-Ay1fRM4n269sSIJkH9o7E,3066
177
- dtlpy/repositories/models.py,sha256=GdVWHJ6kOIxM01wH7RVQ3CVaR4OmGurWJdQVHZezLDM,34789
177
+ dtlpy/repositories/models.py,sha256=HGDyV0kUNeH96QgjOcjigg7KV0-NubFLLapmiCVeNik,37889
178
178
  dtlpy/repositories/nodes.py,sha256=xXJm_YA0vDUn0dVvaGeq6ORM0vI3YXvfjuylvGRtkxo,3061
179
179
  dtlpy/repositories/ontologies.py,sha256=unnMhD2isR9DVE5S8Fg6fSDf1ZZ5Xemxxufx4LEUT3w,19577
180
180
  dtlpy/repositories/organizations.py,sha256=6ijUDFbsogfRul1g_vUB5AZOb41MRmV5NhNU7WLHt3A,22825
@@ -190,14 +190,14 @@ dtlpy/repositories/settings.py,sha256=pvqNse0ANCdU3NSLJEzHco-PZq__OIsPSPVJveB9E4
190
190
  dtlpy/repositories/tasks.py,sha256=v09S2pYGkKx_vBG7SWigJeuMhp0GsefKo3Td7ImrWb0,49039
191
191
  dtlpy/repositories/times_series.py,sha256=m-bKFEgiZ13yQNelDjBfeXMUy_HgsPD_JAHj1GVx9fU,11420
192
192
  dtlpy/repositories/triggers.py,sha256=izdNyCN1gDc5uo7AXntso0HSMTDIzGFUp-dSEz8cn_U,21990
193
- dtlpy/repositories/upload_element.py,sha256=4CDZRKLubanOP0ZyGwxAHTtl6GLzwAyRAIm-PLYt0ck,10140
194
- dtlpy/repositories/uploader.py,sha256=SW3mJHFgd5JgYUhwTwm63xXFQ8DB97-bIzc3Fk9BYMU,31219
193
+ dtlpy/repositories/upload_element.py,sha256=R2KWIXmkp_dMAIr81tu3Y_VRfldj0ju8__V28ombkcg,10677
194
+ dtlpy/repositories/uploader.py,sha256=c-YPOGd3ONCHrPJKGDNe_DiFkUiIAdeWY5Yv1Neg_rM,31306
195
195
  dtlpy/repositories/webhooks.py,sha256=IIpxOJ-7KeQp1TY9aJZz-FuycSjAoYx0TDk8z86KAK8,9033
196
196
  dtlpy/services/__init__.py,sha256=VfVJy2otIrDra6i7Sepjyez2ujiE6171ChQZp-YgxsM,904
197
197
  dtlpy/services/aihttp_retry.py,sha256=tgntZsAY0dW9v08rkjX1T5BLNDdDd8svtgn7nH8DSGU,5022
198
- dtlpy/services/api_client.py,sha256=DRGSi2gTbgLh_LR0vhwWh3f-tYuJql6VKL58Ov1Iqug,69478
198
+ dtlpy/services/api_client.py,sha256=_LpnqLO-9eFAMQkOk5qP3wMVcdwKyOQ-iNvC0jdvATg,69294
199
199
  dtlpy/services/api_reference.py,sha256=cW-B3eoi9Xs3AwI87_Kr6GV_E6HPoC73aETFaGz3A-0,1515
200
- dtlpy/services/async_utils.py,sha256=bVz7PLCpnldyQXMKTPahnQqAudxNW1-c71nfMjcI41Q,2858
200
+ dtlpy/services/async_utils.py,sha256=6Dm82Yj8FWPSnxHy-zs6C7uvepiIsPiWtMf7sM2ChMg,3629
201
201
  dtlpy/services/calls_counter.py,sha256=gr0io5rIsO5-7Cgc8neA1vK8kUtYhgFPmDQ2jXtiZZs,1036
202
202
  dtlpy/services/check_sdk.py,sha256=tnFWCzkJa8w2jLtw-guwuqpOtXGyiVU7ZCDFiUZUqzY,3593
203
203
  dtlpy/services/cookie.py,sha256=sSZR1QV4ienCcZ8lEK_Y4nZYBgAxO3kHrcBXFKGcmwQ,3694
@@ -223,9 +223,9 @@ dtlpy/utilities/reports/report.py,sha256=3nEsNnIWmdPEsd21nN8vMMgaZVcPKn9iawKTTeO
223
223
  dtlpy/utilities/videos/__init__.py,sha256=SV3w51vfPuGBxaMeNemx6qEMHw_C4lLpWNGXMvdsKSY,734
224
224
  dtlpy/utilities/videos/video_player.py,sha256=LCxg0EZ_DeuwcT7U_r7MRC6Q19s0xdFb7x5Gk39PRms,24072
225
225
  dtlpy/utilities/videos/videos.py,sha256=Dj916B4TQRIhI7HZVevl3foFrCsPp0eeWwvGbgX3-_A,21875
226
- dtlpy-1.98.8.data/scripts/dlp,sha256=-F0vSCWuSOOtgERAtsPMPyMmzitjhB7Yeftg_PDlDjw,10
227
- dtlpy-1.98.8.data/scripts/dlp.bat,sha256=QOvx8Dlx5dUbCTMpwbhOcAIXL1IWmgVRSboQqDhIn3A,37
228
- dtlpy-1.98.8.data/scripts/dlp.py,sha256=tEokRaDINISXnq8yNx_CBw1qM5uwjYiZoJOYGqWB3RU,4267
226
+ dtlpy-1.99.11.data/scripts/dlp,sha256=-F0vSCWuSOOtgERAtsPMPyMmzitjhB7Yeftg_PDlDjw,10
227
+ dtlpy-1.99.11.data/scripts/dlp.bat,sha256=QOvx8Dlx5dUbCTMpwbhOcAIXL1IWmgVRSboQqDhIn3A,37
228
+ dtlpy-1.99.11.data/scripts/dlp.py,sha256=tEokRaDINISXnq8yNx_CBw1qM5uwjYiZoJOYGqWB3RU,4267
229
229
  tests/assets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
230
230
  tests/assets/models_flow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
231
231
  tests/assets/models_flow/failedmain.py,sha256=n8F4eu_u7JPrJ1zedbJPvv9e3lHb3ihoErqrBIcseEc,1847
@@ -233,9 +233,9 @@ tests/assets/models_flow/main.py,sha256=xotAjdHpFnIic3Wb-4f7GSg2igtuXZjvRPiYdCTa
233
233
  tests/assets/models_flow/main_model.py,sha256=Hl_tv7Q6KaRL3yLkpUoLMRqu5-ab1QsUYPL6RPEoamw,2042
234
234
  tests/features/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
235
235
  tests/features/environment.py,sha256=V23cUx_p4VpNk9kc2I0BDZJHO_xcJBFJq8m3JlYCooc,16736
236
- dtlpy-1.98.8.dist-info/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
237
- dtlpy-1.98.8.dist-info/METADATA,sha256=21j22oI6cRAaGPEiDzFoctdPwBRHrm3wGwQlrVCiB90,3002
238
- dtlpy-1.98.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
239
- dtlpy-1.98.8.dist-info/entry_points.txt,sha256=C4PyKthCs_no88HU39eioO68oei64STYXC2ooGZTc4Y,43
240
- dtlpy-1.98.8.dist-info/top_level.txt,sha256=ZWuLmQGUOtWAdgTf4Fbx884w1o0vBYq9dEc1zLv9Mig,12
241
- dtlpy-1.98.8.dist-info/RECORD,,
236
+ dtlpy-1.99.11.dist-info/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
237
+ dtlpy-1.99.11.dist-info/METADATA,sha256=w7iA4gBhm97WutAg8hbHGg468RcdO1CzLc-2xrR4kM4,3003
238
+ dtlpy-1.99.11.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
239
+ dtlpy-1.99.11.dist-info/entry_points.txt,sha256=C4PyKthCs_no88HU39eioO68oei64STYXC2ooGZTc4Y,43
240
+ dtlpy-1.99.11.dist-info/top_level.txt,sha256=ZWuLmQGUOtWAdgTf4Fbx884w1o0vBYq9dEc1zLv9Mig,12
241
+ dtlpy-1.99.11.dist-info/RECORD,,
File without changes
File without changes