dtlpy 1.116.6__py3-none-any.whl → 1.117.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dtlpy/__init__.py CHANGED
@@ -108,7 +108,7 @@ from .entities import (
108
108
  # compute
109
109
  ClusterProvider, ComputeType, ComputeStatus, Toleration, DeploymentResource, DeploymentResources,
110
110
  NodePool, AuthenticationIntegration, Authentication, ComputeCluster, ComputeContext, Compute, KubernetesCompute,
111
- ServiceDriver, ExportType, OutputExportType
111
+ ServiceDriver, ExportType, OutputExportType, DynamicConcurrencyUpdateMethod
112
112
  )
113
113
  from .ml import BaseModelAdapter
114
114
  from .utilities import Converter, BaseServiceRunner, Progress, Context, AnnotationFormat
dtlpy/__version__.py CHANGED
@@ -1 +1 @@
1
- version = '1.116.6'
1
+ version = '1.117.6'
@@ -44,7 +44,7 @@ from .package_slot import PackageSlot, SlotPostAction, SlotPostActionType, SlotD
44
44
  from .package_function import PackageFunction, FunctionIO, PackageInputType
45
45
  from .time_series import TimeSeries
46
46
  from .service import Service, KubernetesAutoscalerType, KubernetesRabbitmqAutoscaler, KubernetesAutoscaler, KubernetesRPSAutoscaler, \
47
- InstanceCatalog, KubernetesRuntime, ServiceType, ServiceModeType
47
+ InstanceCatalog, KubernetesRuntime, ServiceType, ServiceModeType, DynamicConcurrencyUpdateMethod
48
48
  from .execution import Execution, ExecutionStatus
49
49
  from .command import Command, CommandsStatus
50
50
  from .assignment import Assignment, Workload, WorkloadUnit
@@ -135,6 +135,13 @@ class FeatureSet(entities.BaseEntity):
135
135
 
136
136
  return _json
137
137
 
138
+ def update(self):
139
+ """
140
+ Update feature set
141
+
142
+ :return: entities.FeatureSet
143
+ """
144
+ return self.feature_sets.update(self)
138
145
  def delete(self):
139
146
  """
140
147
  Delete the feature set
dtlpy/entities/item.py CHANGED
@@ -80,6 +80,22 @@ class Item(entities.BaseEntity):
80
80
  def datasetId(self):
81
81
  return self.dataset_id
82
82
 
83
+ @property
84
+ def resolved_stream(self):
85
+ stream = self.metadata.get('system', dict()).get('shebang', dict()).get('linkInfo', dict()).get('ref', None)
86
+ if stream is None:
87
+ stream = self.stream
88
+ api_url = self._client_api.environment
89
+ if api_url != self._client_api.base_gate_url:
90
+ stream = stream.replace(api_url, self._client_api.base_gate_url)
91
+ else:
92
+ link_item_url_override = os.environ.get('LINK_ITEM_URL_OVERRIDE', None)
93
+ if link_item_url_override is not None:
94
+ src, target = link_item_url_override.split(',')
95
+ stream = stream.replace(src, target)
96
+
97
+ return stream
98
+
83
99
  @staticmethod
84
100
  def _protected_from_json(_json, client_api, dataset=None):
85
101
  """
@@ -290,7 +290,7 @@ class Ontology(entities.BaseEntity):
290
290
 
291
291
  def update(self, system_metadata=False):
292
292
  """
293
- Update items metadata
293
+ Update Ontology attribute
294
294
 
295
295
  :param bool system_metadata: bool - True, if you want to change metadata system
296
296
  :return: Ontology object
dtlpy/entities/service.py CHANGED
@@ -11,6 +11,12 @@ from ..services.api_client import ApiClient
11
11
 
12
12
  logger = logging.getLogger(name='dtlpy')
13
13
 
14
+ class DynamicConcurrencyUpdateMethod(str, Enum):
15
+ """ The method of updating the dynamic concurrency.
16
+ """
17
+ RESTART = 'restart',
18
+ SYNC = 'sync'
19
+
14
20
 
15
21
  class ServiceType(str, Enum):
16
22
  """ The type of the service (SYSTEM).
@@ -136,6 +142,7 @@ class KubernetesRuntime(ServiceRuntime):
136
142
  num_replicas=DEFAULT_NUM_REPLICAS,
137
143
  concurrency=DEFAULT_CONCURRENCY,
138
144
  dynamic_concurrency=None,
145
+ concurrency_update_method=None,
139
146
  runner_image=None,
140
147
  autoscaler=None,
141
148
  **kwargs):
@@ -149,6 +156,7 @@ class KubernetesRuntime(ServiceRuntime):
149
156
  self.single_agent = kwargs.get('singleAgent', None)
150
157
  self.preemptible = kwargs.get('preemptible', None)
151
158
  self.dynamic_concurrency = kwargs.get('dynamicConcurrency', dynamic_concurrency)
159
+ self.concurrency_update_method = kwargs.get('concurrencyUpdateMethod', concurrency_update_method)
152
160
 
153
161
  self.autoscaler = kwargs.get('autoscaler', autoscaler)
154
162
  if self.autoscaler is not None and isinstance(self.autoscaler, dict):
@@ -183,6 +191,9 @@ class KubernetesRuntime(ServiceRuntime):
183
191
  if self.dynamic_concurrency is not None:
184
192
  _json['dynamicConcurrency'] = self.dynamic_concurrency
185
193
 
194
+ if self.concurrency_update_method is not None:
195
+ _json['concurrencyUpdateMethod'] = self.concurrency_update_method
196
+
186
197
  return _json
187
198
 
188
199
 
@@ -368,14 +368,41 @@ class BaseModelAdapter(utilities.BaseServiceRunner):
368
368
  annotation_filters.custom_filter['filter']['$and'].append({'metadata.system.model.name': {'$exists': False}})
369
369
  return annotation_filters
370
370
 
371
+ def __download_items(self, dataset, filters, local_path, annotation_options, annotation_filters=None):
372
+ """
373
+ Download items from dataset with optional annotation filters.
374
+
375
+ :param dataset: Dataset to download from
376
+ :param filters: Filters to apply
377
+ :param local_path: Local path to save files
378
+ :param annotation_options: Annotation download options
379
+ :param annotation_filters: Optional filters for annotations
380
+ :return: List of downloaded items
381
+ """
382
+ if annotation_options == entities.ViewAnnotationOptions.JSON:
383
+ downloader = repositories.Downloader(dataset.items)
384
+ return downloader._download_recursive(
385
+ local_path=local_path,
386
+ filters=filters,
387
+ annotation_filters=annotation_filters
388
+ )
389
+ else:
390
+ return dataset.items.download(
391
+ filters=filters,
392
+ local_path=local_path,
393
+ annotation_options=annotation_options,
394
+ annotation_filters=annotation_filters
395
+ )
396
+
371
397
  def __download_background_images(self, filters, data_subset_base_path, annotation_options):
372
398
  background_list = list()
373
399
  if self.configuration.get('include_background', False) is True:
374
400
  filters.custom_filter["filter"]["$and"].append({"annotated": False})
375
- background_list = self.model_entity.dataset.items.download(
401
+ background_list = self.__download_items(
402
+ dataset=self.model_entity.dataset,
376
403
  filters=filters,
377
404
  local_path=data_subset_base_path,
378
- annotation_options=annotation_options,
405
+ annotation_options=annotation_options
379
406
  )
380
407
  return background_list
381
408
 
@@ -434,13 +461,14 @@ class BaseModelAdapter(utilities.BaseServiceRunner):
434
461
  if subsets is None:
435
462
  raise ValueError("Model (id: {}) must have subsets in metadata.system.subsets".format(self.model_entity.id))
436
463
  for subset, filters_dict in subsets.items():
464
+ _filters_dict = filters_dict.copy()
437
465
  data_subset_base_path = os.path.join(data_path, subset)
438
466
  if os.path.isdir(data_subset_base_path) and not overwrite:
439
467
  # existing and dont overwrite
440
468
  self.logger.debug("Subset {!r} already exists (and overwrite=False). Skipping.".format(subset))
441
469
  continue
442
470
 
443
- filters = entities.Filters(custom_filter=filters_dict)
471
+ filters = entities.Filters(custom_filter=_filters_dict)
444
472
  self.logger.debug("Downloading subset {!r} of {}".format(subset, self.model_entity.dataset.name))
445
473
 
446
474
  annotation_filters = None
@@ -470,13 +498,15 @@ class BaseModelAdapter(utilities.BaseServiceRunner):
470
498
  annotation_filters = self.__include_model_annotations(annotation_filters)
471
499
  annotations_subsets[subset] = annotation_filters.prepare()
472
500
 
473
- ret_list = dataset.items.download(
501
+ ret_list = self.__download_items(
502
+ dataset=dataset,
474
503
  filters=filters,
475
504
  local_path=data_subset_base_path,
476
505
  annotation_options=annotation_options,
477
- annotation_filters=annotation_filters,
506
+ annotation_filters=annotation_filters
478
507
  )
479
- filters = entities.Filters(custom_filter=subsets[subset])
508
+ _filters_dict = subsets[subset].copy()
509
+ filters = entities.Filters(custom_filter=_filters_dict)
480
510
  background_ret_list = self.__download_background_images(
481
511
  filters=filters,
482
512
  data_subset_base_path=data_subset_base_path,
@@ -2,23 +2,28 @@
2
2
  Datasets Repository
3
3
  """
4
4
 
5
+ import copy
6
+ import json
7
+ import logging
5
8
  import os
6
9
  import sys
10
+ import tempfile
7
11
  import time
8
- import copy
9
- import tqdm
10
- import logging
11
12
  import zipfile
12
- import json
13
- from typing import Union, Generator, Optional
13
+ from pathlib import Path
14
+ from typing import Generator, Optional, Union
15
+
16
+ import tqdm
14
17
 
15
- from .. import entities, repositories, miscellaneous, exceptions, services, PlatformException, _api_reference
18
+ from .. import _api_reference, entities, exceptions, miscellaneous, PlatformException, repositories, services
19
+ from ..entities.dataset import ExportType, OutputExportType
20
+ from ..services import service_defaults
16
21
  from ..services.api_client import ApiClient
17
- from ..entities.dataset import OutputExportType, ExportType
18
22
 
19
23
  logger = logging.getLogger(name='dtlpy')
20
24
 
21
25
  MAX_ITEMS_PER_SUBSET = 50000
26
+ DOWNLOAD_ANNOTATIONS_MAX_ITEMS_PER_SUBSET = 1000
22
27
 
23
28
  class Datasets:
24
29
  """
@@ -129,6 +134,54 @@ class Datasets:
129
134
  dataset_id = dataset.id
130
135
  return dataset_id
131
136
 
137
+ @staticmethod
138
+ def _save_item_json_file(item_data, base_path: Path, export_version=None):
139
+ """
140
+ Save a single item's JSON data to a file, creating the directory structure as needed.
141
+
142
+ :param dict item_data: The item data dictionary (must have 'filename' key)
143
+ :param Path base_path: Base directory path where JSON files should be saved
144
+ :param entities.ExportVersion export_version: Optional export version (V1 or V2) affecting filename handling
145
+ :return: Path to the saved JSON file
146
+ :rtype: Path
147
+ """
148
+ # Get filename and remove leading slash
149
+ filename = item_data.get('filename', '')
150
+ if not filename:
151
+ raise ValueError("item_data must have a 'filename' key")
152
+ filename = filename.lstrip('/')
153
+
154
+ # Determine relative JSON path based on export version
155
+ if export_version == entities.ExportVersion.V1:
156
+ # V1: Replace extension with .json (e.g., "file.jpg" -> "file.json")
157
+ rel_json_path = str(Path(filename).with_suffix('.json'))
158
+ elif export_version == entities.ExportVersion.V2:
159
+ # V2: Append .json (e.g., "file.jpg" -> "file.jpg.json")
160
+ rel_json_path = filename + '.json'
161
+ else:
162
+ # Default/None: Replace extension with .json (backward compatible with section 1)
163
+ rel_json_path = os.path.splitext(filename)[0] + '.json'
164
+
165
+ # Remove leading slash if present
166
+ if rel_json_path.startswith('/'):
167
+ rel_json_path = rel_json_path[1:]
168
+
169
+ # Build output path
170
+ out_path = base_path / rel_json_path
171
+
172
+ # Create parent directories
173
+ out_path.parent.mkdir(parents=True, exist_ok=True)
174
+
175
+ # Write JSON file
176
+ try:
177
+ with open(out_path, 'w') as outf:
178
+ json.dump(item_data, outf, indent=2)
179
+ except Exception:
180
+ logger.exception(f'Failed writing export item JSON to {out_path}')
181
+ raise
182
+
183
+ return out_path
184
+
132
185
  @staticmethod
133
186
  def _build_payload(filters, include_feature_vectors, include_annotations,
134
187
  export_type, annotation_filters, feature_vector_filters, dataset_lock, lock_timeout_sec, export_summary):
@@ -902,17 +955,7 @@ class Datasets:
902
955
  logger.debug("start building per-item JSON files under local_path mirroring remote structure")
903
956
  # Build per-item JSON files under local_path mirroring remote structure
904
957
  for item in all_items:
905
- rel_json_path = os.path.splitext(item.get('filename'))[0] + '.json'
906
- # Remove leading slash to make it a relative path
907
- if rel_json_path.startswith('/'):
908
- rel_json_path = rel_json_path[1:]
909
- out_path = os.path.join(base_dir, rel_json_path)
910
- os.makedirs(os.path.dirname(out_path), exist_ok=True)
911
- try:
912
- with open(out_path, 'w') as outf:
913
- json.dump(item, outf)
914
- except Exception:
915
- logger.exception(f'Failed writing export item JSON to {out_path}')
958
+ self._save_item_json_file(item_data=item, base_path=Path(base_dir), export_version=None)
916
959
  logger.debug("end building per-item JSON files under local_path mirroring remote structure")
917
960
  return base_dir
918
961
 
@@ -1159,7 +1202,7 @@ class Datasets:
1159
1202
  include_annotations_in_output: bool = True,
1160
1203
  export_png_files: bool = False,
1161
1204
  filter_output_annotations: bool = False,
1162
- alpha: float = None,
1205
+ alpha: float = 1,
1163
1206
  export_version=entities.ExportVersion.V1,
1164
1207
  dataset_lock: bool = False,
1165
1208
  lock_timeout_sec: int = None,
@@ -1216,33 +1259,26 @@ class Datasets:
1216
1259
  elif not isinstance(annotation_options, list):
1217
1260
  annotation_options = [annotation_options]
1218
1261
  for ann_option in annotation_options:
1219
- if not isinstance(ann_option, entities.ViewAnnotationOptions):
1220
- if ann_option not in list(entities.ViewAnnotationOptions):
1221
- raise PlatformException(
1222
- error='400',
1223
- message='Unknown annotation download option: {}, please choose from: {}'.format(
1224
- ann_option, list(entities.ViewAnnotationOptions)))
1225
-
1262
+ if ann_option not in entities.ViewAnnotationOptions:
1263
+ raise PlatformException(
1264
+ error='400',
1265
+ message=f'Unknown annotation download option: {ann_option}, please choose from: {list(entities.ViewAnnotationOptions)}',
1266
+ )
1226
1267
  if remote_path is not None:
1227
- logger.warning(
1228
- '"remote_path" is ignored. Use "filters=dl.Filters(field="dir, values={!r}"'.format(remote_path))
1268
+ logger.warning(f'"remote_path" is ignored. Use "filters=dl.Filters(field="dir, values={remote_path!r}"')
1269
+ if filter_output_annotations is True:
1270
+ logger.warning("'filter_output_annotations' is ignored but kept for legacy support")
1271
+ if include_annotations_in_output is False:
1272
+ logger.warning("include_annotations_in_output was False, but was set to True since this function downloads annotations.")
1273
+ include_annotations_in_output = True
1274
+
1229
1275
  if local_path is None:
1230
1276
  if dataset.project is None:
1231
1277
  # by dataset name
1232
- local_path = os.path.join(
1233
- services.service_defaults.DATALOOP_PATH,
1234
- "datasets",
1235
- "{}_{}".format(dataset.name, dataset.id),
1236
- )
1278
+ local_path = str(Path(service_defaults.DATALOOP_PATH) / "datasets" / f"{dataset.name}_{dataset.id}")
1237
1279
  else:
1238
1280
  # by dataset and project name
1239
- local_path = os.path.join(
1240
- services.service_defaults.DATALOOP_PATH,
1241
- "projects",
1242
- dataset.project.name,
1243
- "datasets",
1244
- dataset.name,
1245
- )
1281
+ local_path = str(Path(service_defaults.DATALOOP_PATH) / "projects" / dataset.project.name / "datasets" / dataset.name)
1246
1282
 
1247
1283
  if filters is None:
1248
1284
  filters = entities.Filters()
@@ -1260,53 +1296,98 @@ class Datasets:
1260
1296
  method=entities.FiltersMethod.OR)
1261
1297
 
1262
1298
  downloader = repositories.Downloader(items_repository=dataset.items)
1263
- downloader.download_annotations(dataset=dataset,
1264
- filters=filters,
1265
- annotation_filters=annotation_filters,
1266
- local_path=local_path,
1267
- overwrite=overwrite,
1268
- include_annotations_in_output=include_annotations_in_output,
1269
- export_png_files=export_png_files,
1270
- filter_output_annotations=filter_output_annotations,
1271
- export_version=export_version,
1272
- dataset_lock=dataset_lock,
1273
- lock_timeout_sec=lock_timeout_sec,
1274
- export_summary=export_summary
1275
- )
1276
- if annotation_options:
1277
- pages = dataset.items.list(filters=filters)
1278
- if not isinstance(annotation_options, list):
1279
- annotation_options = [annotation_options]
1280
- # convert all annotations to annotation_options
1299
+
1300
+ # Setup for incremental processing
1301
+ if len(annotation_options) == 0 :
1302
+ pool = None
1303
+ progress = None
1304
+ jobs = []
1305
+ else:
1306
+ # Get total count for progress bar
1307
+ filter_copy = copy.deepcopy(filters)
1308
+ filter_copy.page_size = 0
1309
+ pages = dataset.items.list(filters=filter_copy)
1310
+ total_items = pages.items_count
1311
+
1312
+ # Setup thread pool and progress bar
1281
1313
  pool = dataset._client_api.thread_pools(pool_name='dataset.download')
1282
- jobs = [None for _ in range(pages.items_count)]
1283
- progress = tqdm.tqdm(total=pages.items_count,
1284
- disable=dataset._client_api.verbose.disable_progress_bar_download_annotations,
1285
- file=sys.stdout, desc='Download Annotations')
1286
- i_item = 0
1287
- for page in pages:
1288
- for item in page:
1289
- jobs[i_item] = pool.submit(
1290
- Datasets._convert_single,
1291
- **{
1292
- 'downloader': downloader,
1293
- 'item': item,
1294
- 'img_filepath': None,
1295
- 'local_path': local_path,
1296
- 'overwrite': overwrite,
1297
- 'annotation_options': annotation_options,
1298
- 'annotation_filters': annotation_filters,
1299
- 'thickness': thickness,
1300
- 'with_text': with_text,
1301
- 'progress': progress,
1302
- 'alpha': alpha,
1303
- 'export_version': export_version
1304
- }
1305
- )
1306
- i_item += 1
1307
- # get all results
1314
+ progress = tqdm.tqdm(
1315
+ total=total_items,
1316
+ disable=dataset._client_api.verbose.disable_progress_bar_download_annotations,
1317
+ file=sys.stdout,
1318
+ desc='Download Annotations'
1319
+ )
1320
+ jobs = []
1321
+
1322
+
1323
+ # Call _export_recursive as generator
1324
+ export_generator = dataset.project.datasets._export_recursive(
1325
+ dataset=dataset,
1326
+ local_path=tempfile.mkdtemp(prefix='annotations_jsons_'),
1327
+ filters=filters,
1328
+ annotation_filters=annotation_filters,
1329
+ include_annotations=True,
1330
+ export_type=ExportType.JSON,
1331
+ dataset_lock=dataset_lock,
1332
+ lock_timeout_sec=lock_timeout_sec,
1333
+ export_summary=export_summary,
1334
+ timeout=0,
1335
+ max_items_per_subset=DOWNLOAD_ANNOTATIONS_MAX_ITEMS_PER_SUBSET
1336
+ )
1337
+
1338
+ # Process each subset JSON file incrementally
1339
+ for subset_json_file in export_generator:
1340
+ if subset_json_file is None or not Path(subset_json_file).is_file():
1341
+ continue
1342
+
1343
+ try:
1344
+ # Open and load the items array
1345
+ with open(subset_json_file, 'r') as f:
1346
+ items_data = json.load(f)
1347
+
1348
+ # Process each item immediately
1349
+ for item_data in items_data:
1350
+ # Split and save individual JSON file
1351
+ Datasets._save_item_json_file(item_data=item_data, base_path=Path(local_path) / 'json', export_version=export_version)
1352
+
1353
+ # If annotation_options are provided, submit to thread pool immediately
1354
+ if annotation_options:
1355
+ # Create Item entity from item_data
1356
+ item = entities.Item.from_json(
1357
+ _json=item_data,
1358
+ client_api=dataset._client_api,
1359
+ dataset=dataset
1360
+ )
1361
+
1362
+ job = pool.submit(
1363
+ Datasets._convert_single,
1364
+ **{
1365
+ 'downloader': downloader,
1366
+ 'item': item,
1367
+ 'img_filepath': None,
1368
+ 'local_path': local_path,
1369
+ 'overwrite': overwrite,
1370
+ 'annotation_options': annotation_options,
1371
+ 'annotation_filters': annotation_filters,
1372
+ 'thickness': thickness,
1373
+ 'with_text': with_text,
1374
+ 'progress': progress,
1375
+ 'alpha': alpha,
1376
+ 'export_version': export_version
1377
+ }
1378
+ )
1379
+ jobs.append(job)
1380
+
1381
+ # Clean up temporary subset JSON file
1382
+ os.remove(subset_json_file)
1383
+ except Exception as e:
1384
+ logger.exception(f'Failed processing subset JSON file {subset_json_file}: {e}')
1385
+
1386
+ # Wait for all thread pool jobs to complete
1387
+ if annotation_options:
1308
1388
  _ = [j.result() for j in jobs]
1309
1389
  progress.close()
1390
+
1310
1391
  return local_path
1311
1392
 
1312
1393
  def _upload_single_item_annotation(self, item, file, pbar):
@@ -1,18 +1,22 @@
1
+ import copy
2
+ import io
3
+ import json
4
+ import logging
5
+ import multiprocessing
6
+ import os
7
+ import shutil
8
+ import sys
9
+ import tempfile
10
+ import traceback
1
11
  from pathlib import Path
2
- from requests.adapters import HTTPAdapter
3
- from urllib3.util import Retry
4
- from PIL import Image
12
+ from urllib.parse import unquote, urlparse
13
+
5
14
  import numpy as np
6
- import traceback
7
- from urllib.parse import urlparse, unquote
8
15
  import requests
9
- import logging
10
- import shutil
11
- import json
12
16
  import tqdm
13
- import sys
14
- import os
15
- import io
17
+ from PIL import Image
18
+ from requests.adapters import HTTPAdapter
19
+ from urllib3.util import Retry
16
20
 
17
21
  from .. import entities, repositories, miscellaneous, PlatformException, exceptions
18
22
  from ..services import Reporter
@@ -20,12 +24,257 @@ from ..services import Reporter
20
24
  logger = logging.getLogger(name='dtlpy')
21
25
 
22
26
  NUM_TRIES = 3 # try to download 3 time before fail on item
23
-
27
+ DOWNLOAD_MAX_ITEMS_PER_SUBSET = 1000
24
28
 
25
29
  class Downloader:
26
30
  def __init__(self, items_repository):
27
31
  self.items_repository = items_repository
28
32
 
33
+ def _process_download_results(self, reporter, raise_on_error=False):
34
+ """
35
+ Process download results and generate summary report.
36
+
37
+ :param reporter: Reporter instance containing download results
38
+ :param raise_on_error: If True, raise exception on download errors
39
+ :return: Output from reporter
40
+ """
41
+ # reporting
42
+ n_download = reporter.status_count(status='download')
43
+ n_exist = reporter.status_count(status='exist')
44
+ n_error = reporter.status_count(status='error')
45
+ logger.info(f"Number of files downloaded:{n_download}")
46
+ logger.info(f"Number of files exists: {n_exist}")
47
+ logger.info(f"Total number of files: {n_download + n_exist}")
48
+
49
+ # log error
50
+ if n_error > 0:
51
+ log_filepath = reporter.generate_log_files()
52
+ # Get up to 5 error examples for the exception message
53
+ error_text = ""
54
+ error_counter = 0
55
+ if reporter._errors:
56
+ for _id, error in reporter._errors.items():
57
+ error_counter += 1
58
+ error_text += f"Item ID: {_id}, Error: {error} | "
59
+ if error_counter >= 5:
60
+ break
61
+ error_message = f"Errors in {n_error} files. Errors: {error_text}"
62
+ if log_filepath is not None:
63
+ error_message += f", see {log_filepath} for full log"
64
+ if raise_on_error is True:
65
+ raise PlatformException(
66
+ error="400", message=error_message
67
+ )
68
+ else:
69
+ logger.warning(error_message)
70
+
71
+ if int(n_download) <= 1 and int(n_exist) <= 1:
72
+ try:
73
+ return next(reporter.output)
74
+ except StopIteration:
75
+ return None
76
+ return reporter.output
77
+
78
+ def _process_item_json(self, local_path, item_json, reporter, pbar, overwrite=False):
79
+ """
80
+ Process a single item JSON for download, saving both the item file and metadata.
81
+
82
+ :param local_path: Local path to save files
83
+ :param item_json: Item JSON metadata
84
+ :param reporter: Reporter instance for tracking progress
85
+ :param pbar: Progress bar instance
86
+ :param overwrite: Whether to overwrite existing files
87
+ :return: Error message, traceback, and downloaded filepath
88
+ """
89
+ err = None
90
+ trace = None
91
+ downloaded_filepath = None
92
+ item_id = item_json['id']
93
+ filename = item_json['filename'].lstrip('/')
94
+
95
+ for i_try in range(NUM_TRIES):
96
+ try:
97
+ # Download the image
98
+ image_path = Path(local_path) / 'items' / filename
99
+ # Ensure the directory for the image file exists (in case filename has subdirectories)
100
+ image_path.parent.mkdir(parents=True, exist_ok=True)
101
+ item = entities.Item.from_json(_json = item_json, client_api=self.items_repository._client_api, is_fetched=False)
102
+ downloaded_data = self.__thread_download(
103
+ item=item,
104
+ local_path=str(image_path.parent),
105
+ local_filepath=str(image_path),
106
+ save_locally=True,
107
+ to_array=False,
108
+ overwrite=overwrite,
109
+ annotation_options=[],
110
+ annotation_filters=None,
111
+ )
112
+
113
+ if downloaded_data is None:
114
+ err = 'Failed to download image'
115
+ trace = ''
116
+ else:
117
+ # Save the item JSON directly
118
+ json_filename = Path(filename).stem + '.json'
119
+ json_path = Path(local_path) / 'json' / Path(filename).parent / json_filename
120
+
121
+ # Ensure the directory for the JSON file exists (in case filename has subdirectories)
122
+ json_path.parent.mkdir(parents=True, exist_ok=True)
123
+
124
+ # Save the original item_json directly
125
+ with open(json_path, 'w', encoding='utf-8') as f:
126
+ json.dump(item_json, f, indent=2, ensure_ascii=False)
127
+
128
+ downloaded_filepath = str(image_path)
129
+
130
+ if downloaded_filepath is not None:
131
+ break
132
+
133
+ except Exception as e:
134
+ logger.debug(f"Download item: {filename}. Try {i_try + 1}/{NUM_TRIES}. Fail.")
135
+ err = e
136
+ trace = traceback.format_exc()
137
+
138
+ pbar.update()
139
+ if downloaded_filepath is None:
140
+ if err is None:
141
+ err = self.items_repository._client_api.platform_exception
142
+ reporter.set_index(status="error", ref=item_id, success=False, error=f"{err}\n{trace}")
143
+ else:
144
+ reporter.set_index(ref=item_id, status="download", output=downloaded_filepath, success=True)
145
+
146
+ def _download_recursive(
147
+ self,
148
+ local_path=None,
149
+ filters: entities.Filters = None,
150
+ annotation_filters: entities.Filters = None,
151
+ file_types=None,
152
+ overwrite=False,
153
+ raise_on_error=False,
154
+ dataset_lock=False,
155
+ lock_timeout_sec=None,
156
+ ):
157
+ """
158
+ Download items recursively from a dataset.
159
+
160
+ :param local_path: Local path to save downloaded items
161
+ :param filters: Filters entity to filter items
162
+ :param annotation_filters: Filters entity to filter annotations
163
+ :param file_types: List of file types to download
164
+ :param overwrite: Whether to overwrite existing files
165
+ :param raise_on_error: Raise error if download fails
166
+ :param dataset_lock: Lock dataset during download
167
+ :param lock_timeout_sec: Lock timeout in seconds
168
+ """
169
+ filters, annotation_filters = self._prepare_filters(filters=filters,annotation_filters=annotation_filters,file_types=file_types)
170
+ filter_copy = copy.deepcopy(filters)
171
+ filter_copy.page_size = 0
172
+ num_items = self.items_repository.list(filters=filter_copy).items_count
173
+ if num_items == 0:
174
+ return list()
175
+ client_api = self.items_repository._client_api
176
+ reporter = Reporter(
177
+ num_workers=num_items,
178
+ resource=Reporter.ITEMS_DOWNLOAD,
179
+ print_error_logs=client_api.verbose.print_error_logs,
180
+ client_api=client_api,
181
+ )
182
+
183
+ # Create directories once using pathlib
184
+ local_path_obj = Path(local_path)
185
+ items_dir = local_path_obj / 'items'
186
+ jsons_dir = local_path_obj / 'json'
187
+ items_dir.mkdir(parents=True, exist_ok=True)
188
+ jsons_dir.mkdir(parents=True, exist_ok=True)
189
+
190
+ jobs = [None for _ in range(num_items)]
191
+ # crrently keep the thread count to default.
192
+ # client_api._thread_pools_names['item.download'] = 5 * multiprocessing.cpu_count()
193
+ pool = client_api.thread_pools(pool_name='item.download')
194
+ pbar = tqdm.tqdm(
195
+ total=num_items,
196
+ disable=client_api.verbose.disable_progress_bar_download_dataset,
197
+ file=sys.stdout,
198
+ desc='Download Items',
199
+ )
200
+ try:
201
+ i_item = 0
202
+ import time
203
+ start_time = time.time()
204
+ for json_file in self.items_repository.dataset.project.datasets._export_recursive(
205
+ dataset=self.items_repository.dataset,
206
+ local_path=tempfile.mkdtemp(prefix='download_recursive_jsons_'),
207
+ max_items_per_subset=DOWNLOAD_MAX_ITEMS_PER_SUBSET,
208
+ include_annotations=True,
209
+ filters=filters,
210
+ annotation_filters=annotation_filters,
211
+ dataset_lock=dataset_lock,
212
+ lock_timeout_sec=lock_timeout_sec,
213
+ ):
214
+ end_time = time.time()
215
+ with open(json_file, 'r') as f:
216
+ data = json.load(f)
217
+ for item_json in data:
218
+ jobs[i_item] = pool.submit(
219
+ self._process_item_json,
220
+ **{
221
+ "local_path": local_path,
222
+ "item_json": item_json,
223
+ "reporter": reporter,
224
+ "pbar": pbar,
225
+ "overwrite": overwrite,
226
+ },
227
+ )
228
+ i_item += 1
229
+ finally:
230
+ _ = [j.result() for j in jobs if j is not None]
231
+ pbar.close()
232
+ return self._process_download_results(reporter=reporter, raise_on_error=raise_on_error)
233
+
234
+ @staticmethod
235
+ def _prepare_filters(filters: entities.Filters = None,
236
+ annotation_filters: entities.Filters = None,
237
+ file_types=None):
238
+ """
239
+ Prepare and merge filters with annotation filters.
240
+
241
+ :param filters: Filters entity or None
242
+ :param annotation_filters: Annotation filters to merge with item filters
243
+ :param file_types: List of file types to filter
244
+ :return: Prepared filters entity
245
+ """
246
+ # filters
247
+ if filters is None:
248
+ filters = entities.Filters()
249
+ filters._user_query = 'false'
250
+ # file types
251
+ if file_types is not None:
252
+ filters.add(field='metadata.system.mimetype', values=file_types, operator=entities.FiltersOperations.IN)
253
+ if annotation_filters is not None:
254
+ if len(annotation_filters.and_filter_list) > 0 or len(annotation_filters.or_filter_list) > 0:
255
+ for annotation_filter_and in annotation_filters.and_filter_list:
256
+ filters.add_join(field=annotation_filter_and.field,
257
+ values=annotation_filter_and.values,
258
+ operator=annotation_filter_and.operator,
259
+ method=entities.FiltersMethod.AND)
260
+ for annotation_filter_or in annotation_filters.or_filter_list:
261
+ filters.add_join(field=annotation_filter_or.field,
262
+ values=annotation_filter_or.values,
263
+ operator=annotation_filter_or.operator,
264
+ method=entities.FiltersMethod.OR)
265
+ elif annotation_filters.custom_filter is not None:
266
+ annotation_query_dict = annotation_filters.prepare()
267
+ items_query_dict = filters.prepare()
268
+ items_query_dict["join"] = annotation_query_dict
269
+ filters.reset()
270
+ filters.custom_filter = items_query_dict
271
+
272
+ else:
273
+ annotation_filters = entities.Filters(resource=entities.FiltersResource.ANNOTATION)
274
+ filters._user_query = 'false'
275
+
276
+ return filters, annotation_filters
277
+
29
278
  def download(self,
30
279
  # filter options
31
280
  filters: entities.Filters = None,
@@ -131,35 +380,12 @@ class Downloader:
131
380
  items_to_download = [items]
132
381
  num_items = len(items)
133
382
  else:
134
- # filters
135
- if filters is None:
136
- filters = entities.Filters()
137
- filters._user_query = 'false'
138
- # file types
139
- if file_types is not None:
140
- filters.add(field='metadata.system.mimetype', values=file_types, operator=entities.FiltersOperations.IN)
141
- if annotation_filters is not None:
142
- if len(annotation_filters.and_filter_list) > 0 or len(annotation_filters.or_filter_list) > 0:
143
- for annotation_filter_and in annotation_filters.and_filter_list:
144
- filters.add_join(field=annotation_filter_and.field,
145
- values=annotation_filter_and.values,
146
- operator=annotation_filter_and.operator,
147
- method=entities.FiltersMethod.AND)
148
- for annotation_filter_or in annotation_filters.or_filter_list:
149
- filters.add_join(field=annotation_filter_or.field,
150
- values=annotation_filter_or.values,
151
- operator=annotation_filter_or.operator,
152
- method=entities.FiltersMethod.OR)
153
- elif annotation_filters.custom_filter is not None:
154
- annotation_query_dict = annotation_filters.prepare()
155
- items_query_dict = filters.prepare()
156
- items_query_dict["join"] = annotation_query_dict
157
- filters.reset()
158
- filters.custom_filter = items_query_dict
159
-
160
- else:
161
- annotation_filters = entities.Filters(resource=entities.FiltersResource.ANNOTATION)
162
- filters._user_query = 'false'
383
+ # Prepare and merge filters
384
+ filters, annotation_filters = self._prepare_filters(
385
+ filters=filters,
386
+ annotation_filters=annotation_filters,
387
+ file_types=file_types
388
+ )
163
389
 
164
390
  items_to_download = self.items_repository.list(filters=filters)
165
391
  num_items = items_to_download.items_count
@@ -234,7 +460,8 @@ class Downloader:
234
460
  # pool
235
461
  pool = client_api.thread_pools(pool_name='item.download')
236
462
  # download
237
- pbar = tqdm.tqdm(total=num_items, disable=client_api.verbose.disable_progress_bar_download_dataset, file=sys.stdout,
463
+ pbar = tqdm.tqdm(total=num_items, disable=client_api.verbose.disable_progress_bar_download_dataset,
464
+ file=sys.stdout,
238
465
  desc='Download Items')
239
466
  try:
240
467
  i_item = 0
@@ -305,41 +532,8 @@ class Downloader:
305
532
  finally:
306
533
  _ = [j.result() for j in jobs if j is not None]
307
534
  pbar.close()
308
- # reporting
309
- n_download = reporter.status_count(status='download')
310
- n_exist = reporter.status_count(status='exist')
311
- n_error = reporter.status_count(status='error')
312
- logger.info("Number of files downloaded:{}".format(n_download))
313
- logger.info("Number of files exists: {}".format(n_exist))
314
- logger.info("Total number of files: {}".format(n_download + n_exist))
315
535
 
316
- # log error
317
- if n_error > 0:
318
- log_filepath = reporter.generate_log_files()
319
- # Get up to 5 error examples for the exception message
320
- error_text = ""
321
- error_counter = 0
322
- if reporter._errors:
323
- for _id, error in reporter._errors.items():
324
- error_counter += 1
325
- error_text += f"Item ID: {_id}, Error: {error} | "
326
- if error_counter >= 5:
327
- break
328
- error_message = f"Errors in {n_error} files. Errors: {error_text}"
329
- if log_filepath is not None:
330
- error_message += f", see {log_filepath} for full log"
331
- if raise_on_error is True:
332
- raise PlatformException(
333
- error="400", message=error_message
334
- )
335
- else:
336
- logger.warning(error_message)
337
- if int(n_download) <= 1 and int(n_exist) <= 1:
338
- try:
339
- return next(reporter.output)
340
- except StopIteration:
341
- return None
342
- return reporter.output
536
+ return self._process_download_results(reporter=reporter, raise_on_error=raise_on_error)
343
537
 
344
538
  def __thread_download_wrapper(self, i_item,
345
539
  # item params
@@ -403,7 +597,7 @@ class Downloader:
403
597
  export_version=entities.ExportVersion.V1,
404
598
  dataset_lock=False,
405
599
  lock_timeout_sec=None,
406
- export_summary=False
600
+ export_summary=False
407
601
  ):
408
602
  """
409
603
  Download annotations json for entire dataset
@@ -633,27 +827,12 @@ class Downloader:
633
827
  @staticmethod
634
828
  def __get_link_source(item):
635
829
  assert isinstance(item, entities.Item)
636
- if not item.is_fetched:
637
- return item, '', False
638
-
639
- if not item.filename.endswith('.json') or \
640
- item.metadata.get('system', {}).get('shebang', {}).get('dltype', '') != 'link':
641
- return item, '', False
642
-
643
- # recursively get next id link item
644
- while item.filename.endswith('.json') and \
645
- item.metadata.get('system', {}).get('shebang', {}).get('dltype', '') == 'link' and \
646
- item.metadata.get('system', {}).get('shebang', {}).get('linkInfo', {}).get('type', '') == 'id':
647
- item = item.dataset.items.get(item_id=item.metadata['system']['shebang']['linkInfo']['ref'])
648
-
649
- # check if link
650
- if item.filename.endswith('.json') and \
651
- item.metadata.get('system', {}).get('shebang', {}).get('dltype', '') == 'link' and \
652
- item.metadata.get('system', {}).get('shebang', {}).get('linkInfo', {}).get('type', '') == 'url':
653
- url = item.metadata['system']['shebang']['linkInfo']['ref']
654
- return item, url, True
655
- else:
656
- return item, '', False
830
+ is_url = False
831
+ url = item.resolved_stream
832
+ if item.metadata.get('system', {}).get('shebang', {}).get('linkInfo', {}).get('type', '') == 'url':
833
+ is_url = True
834
+
835
+ return item, url, is_url, url.startswith('file://')
657
836
 
658
837
  def __file_validation(self, item, downloaded_file):
659
838
  res = False
@@ -688,7 +867,7 @@ class Downloader:
688
867
  """
689
868
  Get a single item's binary data
690
869
  Calling this method will returns the item body itself , an image for example with the proper mimetype.
691
-
870
+
692
871
  :param item: Item entity to download
693
872
  :param save_locally: bool. save to file or return buffer
694
873
  :param local_path: item local folder to save to.
@@ -709,8 +888,7 @@ class Downloader:
709
888
  if save_locally and os.path.isfile(local_filepath):
710
889
  need_to_download = overwrite
711
890
 
712
- item, url, is_url = self.__get_link_source(item=item)
713
- is_local_link = isinstance(url, str) and url.startswith('file://')
891
+ item, url, is_url, is_local_link = self.__get_link_source(item=item)
714
892
 
715
893
  # save as byte stream
716
894
  data = io.BytesIO()
@@ -804,9 +982,11 @@ class Downloader:
804
982
 
805
983
  file_validation = True
806
984
  if not is_url:
807
- file_validation, start_point, chunk_resume = self.__get_next_chunk(item=item,
808
- download_progress=temp_file_path,
809
- chunk_resume=chunk_resume)
985
+ file_validation, start_point, chunk_resume = self.__get_next_chunk(
986
+ item=item,
987
+ download_progress=temp_file_path,
988
+ chunk_resume=chunk_resume
989
+ )
810
990
  if file_validation:
811
991
  shutil.move(temp_file_path, local_filepath)
812
992
  download_done = True
@@ -933,6 +1113,7 @@ class Downloader:
933
1113
  """
934
1114
  :param url:
935
1115
  """
1116
+ response = None
936
1117
 
937
1118
  if url.startswith('file://'):
938
1119
  parsed = urlparse(url)
@@ -953,24 +1134,24 @@ class Downloader:
953
1134
  )
954
1135
 
955
1136
  try:
956
- return io.BufferedReader(io.FileIO(path, 'rb'))
1137
+ response = io.BufferedReader(io.FileIO(path, 'rb'))
957
1138
  except PermissionError as e:
958
1139
  raise PlatformException(
959
1140
  error='403',
960
1141
  message=f'Permission denied accessing file: {url}'
961
1142
  ) from e
962
-
963
- prepared_request = requests.Request(method='GET', url=url).prepare()
964
- with requests.Session() as s:
965
- retry = Retry(
966
- total=3,
967
- read=3,
968
- connect=3,
969
- backoff_factor=1,
970
- )
971
- adapter = HTTPAdapter(max_retries=retry)
972
- s.mount('http://', adapter)
973
- s.mount('https://', adapter)
974
- response = s.send(request=prepared_request, stream=True)
1143
+ else:
1144
+ prepared_request = requests.Request(method='GET', url=url).prepare()
1145
+ with requests.Session() as s:
1146
+ retry = Retry(
1147
+ total=3,
1148
+ read=3,
1149
+ connect=3,
1150
+ backoff_factor=1,
1151
+ )
1152
+ adapter = HTTPAdapter(max_retries=retry)
1153
+ s.mount('http://', adapter)
1154
+ s.mount('https://', adapter)
1155
+ response = s.send(request=prepared_request, stream=True)
975
1156
 
976
1157
  return response
@@ -200,7 +200,7 @@ class FeatureSets:
200
200
  """
201
201
 
202
202
  success, response = self._client_api.gen_request(req_type="delete",
203
- path="{}/{}".format(self.URL, feature_set_id))
203
+ path=f"{self.URL}/{feature_set_id}")
204
204
 
205
205
  # check response
206
206
  if success:
@@ -209,6 +209,36 @@ class FeatureSets:
209
209
  else:
210
210
  raise exceptions.PlatformException(response)
211
211
 
212
+ @_api_reference.add(path='/features/set/{id}', method='patch')
213
+ def update(self, feature_set: entities.FeatureSet) -> entities.FeatureSet:
214
+ """
215
+ Update a Feature Set
216
+
217
+ **Prerequisites**: You must be in the role of an *owner* or *developer*.
218
+
219
+ :param dtlpy.entities.FeatureSet feature_set: FeatureSet object
220
+ :return: FeatureSet
221
+ :rtype: dtlpy.entities.FeatureSet
222
+
223
+ **Example**:
224
+
225
+ .. code-block:: python
226
+
227
+ dl.feature_sets.update(feature_set='feature_set')
228
+ """
229
+ success, response = self._client_api.gen_request(req_type="patch",
230
+ path=f"{self.URL}/{feature_set.id}",
231
+ json_req=feature_set.to_json())
232
+ if not success:
233
+ raise exceptions.PlatformException(response)
234
+
235
+ logger.debug("feature_set updated successfully")
236
+ # update dataset labels
237
+ feature_set = entities.FeatureSet.from_json(_json=response.json(),
238
+ client_api=self._client_api,
239
+ is_fetched=True)
240
+ return feature_set
241
+
212
242
  def _build_entities_from_response(self, response_items) -> miscellaneous.List[entities.Item]:
213
243
  pool = self._client_api.thread_pools(pool_name='entity.create')
214
244
  jobs = [None for _ in range(len(response_items))]
@@ -42,10 +42,6 @@ class Recipes:
42
42
  @property
43
43
  def project(self) -> entities.Project:
44
44
  if self._project is None:
45
- project = self._client_api.state_io.get('project')
46
- if project is not None:
47
- self._project = entities.Project.from_json(_json=project, client_api=self._client_api)
48
- self._project_id = self._project.id
49
45
  if self._project_id is None:
50
46
  if self._dataset is None:
51
47
  raise exceptions.PlatformException(
@@ -122,7 +118,21 @@ class Recipes:
122
118
  if attributes is None:
123
119
  attributes = list()
124
120
  if project_ids is None:
125
- project_ids = [self.project.id]
121
+ if self._dataset is not None:
122
+ project_ids = [self._dataset.project.id]
123
+ else:
124
+ # get from cache
125
+ project = self._client_api.state_io.get('project')
126
+ if project is not None:
127
+ # build entity from json
128
+ p = entities.Project.from_json(_json=project, client_api=self._client_api)
129
+ project_ids = [p.id]
130
+ else:
131
+ # get from self.project property
132
+ try:
133
+ project_ids = [self.project.id]
134
+ except exceptions.PlatformException:
135
+ raise exceptions.PlatformException('Must provide project_ids')
126
136
  if ontology_ids is None:
127
137
  ontolgies = repositories.Ontologies(client_api=self._client_api,
128
138
  recipe=None)
@@ -830,10 +830,11 @@ class ApiClient:
830
830
  environments = self.environments
831
831
  if environment in environments:
832
832
  logger.warning('Environment exists. Overwriting. env: {}'.format(environment))
833
- if token is None:
834
- token = None
835
- if alias is None:
836
- alias = None
833
+ if alias is not None:
834
+ keys_to_remove = [e for e, v in environments.items() if v.get('alias') == alias]
835
+ for e in keys_to_remove:
836
+ logger.warning('Alias exists. Overwriting. alias: {}'.format(alias))
837
+ environments.pop(e)
837
838
  environments[environment] = {'audience': audience,
838
839
  'client_id': client_id,
839
840
  'auth0_url': auth0_url,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dtlpy
3
- Version: 1.116.6
3
+ Version: 1.117.6
4
4
  Summary: SDK and CLI for Dataloop platform
5
5
  Home-page: https://github.com/dataloop-ai/dtlpy
6
6
  Author: Dataloop Team
@@ -1,5 +1,5 @@
1
- dtlpy/__init__.py,sha256=59ZkyPzyOQJaA9O8ZpyyFAWvof8OxQO2IrmoEN4QA2Y,20593
2
- dtlpy/__version__.py,sha256=qhqfKmFRYTm7tEnTLeiyuBcjOerPrhpw7PACo1jXzlE,20
1
+ dtlpy/__init__.py,sha256=p9qDLEQ01cTj0Xx8FkxJPHSmP7LMHMv8b5-ZWW1FPrU,20625
2
+ dtlpy/__version__.py,sha256=EAXt30NxCM3wPLWcW6lyXBUATt56UfSsHHucdFvuHBo,20
3
3
  dtlpy/exceptions.py,sha256=EQCKs3pwhwZhgMByQN3D3LpWpdxwcKPEEt-bIaDwURM,2871
4
4
  dtlpy/new_instance.py,sha256=XegQav2hzPrPAUzuRFvUIGSPidoK-rbb02Q43NPsIpo,9982
5
5
  dtlpy/assets/__init__.py,sha256=D_hAa6NM8Zoy32sF_9b7m0b7I-BQEyBFg8-9Tg2WOeo,976
@@ -43,7 +43,7 @@ dtlpy/dlp/dlp,sha256=-F0vSCWuSOOtgERAtsPMPyMmzitjhB7Yeftg_PDlDjw,10
43
43
  dtlpy/dlp/dlp.bat,sha256=QOvx8Dlx5dUbCTMpwbhOcAIXL1IWmgVRSboQqDhIn3A,37
44
44
  dtlpy/dlp/dlp.py,sha256=Zv9yoXwNAx4gkED-JiayN-ZkX2dPn4FB0SDx9qc7muo,4404
45
45
  dtlpy/dlp/parser.py,sha256=p-TFaiAU2c3QkI97TXzL2LDR3Eq0hGDFrTc9J2jWLh4,30551
46
- dtlpy/entities/__init__.py,sha256=rAA5GbNpbwa37vdKIe9FtpvIOI1rEHYwj0Da_Mv4Xn4,5018
46
+ dtlpy/entities/__init__.py,sha256=tHndgUHZOaiXJdOY-NTvGoHcJ29EkGzz389sWQPWyaI,5050
47
47
  dtlpy/entities/analytic.py,sha256=6bMG4FD7VIDr3ca8hJ25yd9IyAuzPdaN1lsV4VD7z2Q,13739
48
48
  dtlpy/entities/annotation.py,sha256=0bF-N3ApbUaTWa_cIPNHMGxaWGG0q3lQos6fMDX5mCc,66661
49
49
  dtlpy/entities/annotation_collection.py,sha256=CEYSBHhhDkC0VJdHsBSrA6TgdKGMcKeI3tFM40UJwS8,29838
@@ -63,17 +63,17 @@ dtlpy/entities/dpk.py,sha256=XrK8X8p4Ag6LMjDrDpMstY-h_yTll_sMmKTZT6bLbWE,17923
63
63
  dtlpy/entities/driver.py,sha256=usyWqlM8KxQ-L9uCTSAmPXCu2dU4fordAN8uL0I2wRM,7720
64
64
  dtlpy/entities/execution.py,sha256=uQe535w9OcAoDiNWf96KcpFzUDEUU-DYsUalv5VziyM,13673
65
65
  dtlpy/entities/feature.py,sha256=9fFjD0W57anOVSAVU55ypxN_WTCsWTG03Wkc3cAAj78,3732
66
- dtlpy/entities/feature_set.py,sha256=niw4MkmrDbD_LWQu1X30uE6U4DCzmFhPTaYeZ6VZDB0,4443
66
+ dtlpy/entities/feature_set.py,sha256=fCBEJVyo91SWz9bw8HczduRfgPJkBsAgeT6PPrYfy5k,4600
67
67
  dtlpy/entities/filters.py,sha256=6CK66kkh5LbLjnzKoGe9jsLP8KBfdBRvNumkj-4EtfM,29399
68
68
  dtlpy/entities/gis_item.py,sha256=Uk-wMBxwcHsImjz4qOjP-EyZAohbRzN43kMpCaVjCXU,3982
69
69
  dtlpy/entities/integration.py,sha256=XraOApW9jbT6EdZraRX2In6sMbfNgEGf2V5Um2RCRqA,6001
70
- dtlpy/entities/item.py,sha256=iR8qzk-8h5YuQDR4P9KYIEhKOaYZSVgnVMxS32L-3Hg,34884
70
+ dtlpy/entities/item.py,sha256=JoF3vTlsBUXwsC7b5KCkvDOeGP7Iby--Ww4PJQ5_UF4,35586
71
71
  dtlpy/entities/label.py,sha256=ycDYavIgKhz806plIX-64c07_TeHpDa-V7LnfFVe4Rg,3869
72
72
  dtlpy/entities/links.py,sha256=FAmEwHtsrqKet3c0UHH9u_gHgG6_OwF1-rl4xK7guME,2516
73
73
  dtlpy/entities/message.py,sha256=ApJuaKEqxATpXjNYUjGdYPu3ibQzEMo8-LtJ_4xAcPI,5865
74
74
  dtlpy/entities/model.py,sha256=gFZ_I6Th4KqkWS5X57aVfEoMAGtAhcPlCOXofo9ARUM,27792
75
75
  dtlpy/entities/node.py,sha256=RiCqG659Pb1PZNMewR-F7eNbU5tt713fiZY9xW6-Pes,39199
76
- dtlpy/entities/ontology.py,sha256=R29baDbx_TKdaUJZKVJPee1_sJ-DLwwlQ2AEjExP2Ko,32530
76
+ dtlpy/entities/ontology.py,sha256=qA8XOhHPiZ7fUK2QPBola2xA8SGEdzsgCy5oozgARwc,32534
77
77
  dtlpy/entities/organization.py,sha256=Zm-tTHV82PvYyTNetRRXqvmvzBCbXEwS-gAENf7Zny4,9874
78
78
  dtlpy/entities/package.py,sha256=QSDePHlp4ik19aUE3dAUC7edh0oUUVjzSmMG867avc4,26363
79
79
  dtlpy/entities/package_defaults.py,sha256=wTD7Z7rGYjVy8AcUxTFEnkOkviiJaLVZYvduiUBKNZo,211
@@ -88,7 +88,7 @@ dtlpy/entities/prompt_item.py,sha256=S_cgekiUsAK0OLP_XXbfvpNv7Zr5XT86jCB2w1yyyjQ
88
88
  dtlpy/entities/recipe.py,sha256=SX0T7gw-_9Cs2FZyC_htIxQd7CwDwb2zA3SqB37vymM,11917
89
89
  dtlpy/entities/reflect_dict.py,sha256=2NaSAL-CO0T0FYRYFQlaSpbsoLT2Q18AqdHgQSLX5Y4,3273
90
90
  dtlpy/entities/resource_execution.py,sha256=1HuVV__U4jAUOtOkWlWImnM3Yts8qxMSAkMA9sBhArY,5033
91
- dtlpy/entities/service.py,sha256=xG4Rj-kyhJNreesJgq7_Php7vTYztCvFVXifWbCog7s,33479
91
+ dtlpy/entities/service.py,sha256=sIWKx-b9JbxHGmZmgeBv5eArQrI9HB00C1Zmj7ejTc0,33925
92
92
  dtlpy/entities/service_driver.py,sha256=N3fL_xTPLu75UBFQZO5Plxx2kpED-UIILxTYbD58rzQ,3917
93
93
  dtlpy/entities/setting.py,sha256=uXagJHtcCR3nJYClR_AUGZjz_kx3TejPcUZ8ginHFIA,8561
94
94
  dtlpy/entities/task.py,sha256=ajVIkB-3Aqm9Udn87ITvIsGwduyCTtGeqV-FjSYtZKg,19605
@@ -149,7 +149,7 @@ dtlpy/miscellaneous/list_print.py,sha256=fBGTMXFUwDG8DD4W6HyR8BTGtbTckLf4W09quNR
149
149
  dtlpy/miscellaneous/zipping.py,sha256=JplTc8UDFvO8WaD5vKuumVLN0lU_-GtHoE0doWKtmKg,5383
150
150
  dtlpy/ml/__init__.py,sha256=vPkyXpc9kcWWZ_PxyPEOsjKBJdEbowLkZr8FZIb_OBM,799
151
151
  dtlpy/ml/base_feature_extractor_adapter.py,sha256=iiEGYAx0Rdn4K46H_FlKrAv3ebTXHSxNVAmio0BxhaI,1178
152
- dtlpy/ml/base_model_adapter.py,sha256=YQrO2mG2FiC3WobRUnaLqlq-GJEHvmBSyB3nuQGaU9o,62822
152
+ dtlpy/ml/base_model_adapter.py,sha256=DS5J0vdWlk2W46sDIlFq1xDapgoz8riNhcWm0itJ7QY,64077
153
153
  dtlpy/ml/metrics.py,sha256=BG2E-1Mvjv2e2No9mIJKVmvzqBvLqytKcw3hA7wVUNc,20037
154
154
  dtlpy/ml/predictions_utils.py,sha256=He_84U14oS2Ss7T_-Zj5GDiBZwS-GjMPURUh7u7DjF8,12484
155
155
  dtlpy/ml/summary_writer.py,sha256=dehDi8zmGC1sAGyy_3cpSWGXoGQSiQd7bL_Thoo8yIs,2784
@@ -166,12 +166,12 @@ dtlpy/repositories/collections.py,sha256=lA4T7irQ_dtI_bcVxrHHikClcglp8ltsoPcLwHy
166
166
  dtlpy/repositories/commands.py,sha256=aismmQOkZbpo9vN1fVYTHXrsJoQNrN7wiANUCiUWUmY,5717
167
167
  dtlpy/repositories/compositions.py,sha256=H417BvlQAiWr5NH2eANFke6CfEO5o7DSvapYpf7v5Hk,2150
168
168
  dtlpy/repositories/computes.py,sha256=K1a28vJn3tB-h_Xvl8blr-lOytfz3MvyzFzauCja8UA,16936
169
- dtlpy/repositories/datasets.py,sha256=W-nuH5oX0PGu3lj3-Z0EJpZy10wt-eEMKp0EZ3N4gYo,69846
170
- dtlpy/repositories/downloader.py,sha256=_GCYBtGR7Rkd0MlLSt7OFOAIck92kPuXmFJfKo_QCWw,49103
169
+ dtlpy/repositories/datasets.py,sha256=1-ud7Ulc8z5IwQezjrW4C6V24xVUhR0U_dgix76zDYw,72778
170
+ dtlpy/repositories/downloader.py,sha256=EkCscU8QHa6H0-t17laFZn49Up-wNHbFd2DxVIqX8Fc,56209
171
171
  dtlpy/repositories/dpks.py,sha256=Cu8pqFZr6MlrdidjysQT_X1hyKaL5YNUn81puOM5FX0,18508
172
172
  dtlpy/repositories/drivers.py,sha256=z9qu4I2lwa0aujkKxj0bvn71Zzs8U8byqSx8S9DAIQw,19553
173
173
  dtlpy/repositories/executions.py,sha256=BuFv7J6U2Q7om4OlC0q6tnk-1Vcw5m0adfR9a5Mj98Y,32361
174
- dtlpy/repositories/feature_sets.py,sha256=UowMDAl_CRefRB5oZzubnsjU_OFgiPPdQXn8q2j4Kuw,9666
174
+ dtlpy/repositories/feature_sets.py,sha256=JIn1tFn8I50OPNRjKvEpgdQNcYKIGYIPjm-j-PR7uaQ,10874
175
175
  dtlpy/repositories/features.py,sha256=SNmECqKSfHlNgjjG_RlX-GAU3udYN9_ZYOe8mFNy010,10671
176
176
  dtlpy/repositories/integrations.py,sha256=Y5c37fQCaIkw1p5jPEbAqytgRVXuqe771eHC1hNDE7A,19491
177
177
  dtlpy/repositories/items.py,sha256=u2Vg0jOTZ9EhV1sPJdAeIUyfqBRv63Gl-IXaMlon8PM,40028
@@ -184,7 +184,7 @@ dtlpy/repositories/packages.py,sha256=QhkXMZkpseCt0pDropJuqoHJL0RMa5plk8AN0V3w6N
184
184
  dtlpy/repositories/pipeline_executions.py,sha256=sEC03bu5DsHc554z3xDbMCP529rhfADcktXgWkswEwI,17281
185
185
  dtlpy/repositories/pipelines.py,sha256=5qosyxLFgNbcmL7uoTr9klAj1VM-7mWvsOvngbUU1Qk,24320
186
186
  dtlpy/repositories/projects.py,sha256=TKLCuL7Inlv4GwgcQcuXkPQtgacfrXYjsTQng8nPC7Y,21623
187
- dtlpy/repositories/recipes.py,sha256=ZtH9s3BBLCpzlx59onqvrw3PvKYCWEM7payQbAY9tgA,16677
187
+ dtlpy/repositories/recipes.py,sha256=q1FMk4kBPzBS-QIbkxeSsMcAJmYuS7gpYL8t3XIBWII,17117
188
188
  dtlpy/repositories/resource_executions.py,sha256=PyzsbdJxz6jf17Gx13GZmqdu6tZo3TTVv-DypnJ_sY0,5374
189
189
  dtlpy/repositories/schema.py,sha256=kTKDrbwm7BfQnBAK81LpAl9ChNFdyUweSLNazlJJhjk,3953
190
190
  dtlpy/repositories/service_drivers.py,sha256=rxbhLUtT7TCbkVxH4HJtFT9ywcikByPFdX7_4kiTiK0,6766
@@ -198,7 +198,7 @@ dtlpy/repositories/uploader.py,sha256=I9mP-Ikj0zUOdMTf-7FN_huHWXYeWc8gzVRpfUPAXj
198
198
  dtlpy/repositories/webhooks.py,sha256=IIpxOJ-7KeQp1TY9aJZz-FuycSjAoYx0TDk8z86KAK8,9033
199
199
  dtlpy/services/__init__.py,sha256=VfVJy2otIrDra6i7Sepjyez2ujiE6171ChQZp-YgxsM,904
200
200
  dtlpy/services/aihttp_retry.py,sha256=tgntZsAY0dW9v08rkjX1T5BLNDdDd8svtgn7nH8DSGU,5022
201
- dtlpy/services/api_client.py,sha256=vTysmEqXMEyEwAKaQK9JGILkHfrQNJFtZ60WEqQG8QQ,71728
201
+ dtlpy/services/api_client.py,sha256=MbpTsGVgFonv04pWCfYkOP0shM3LcfCOrucVEXvNn4M,71907
202
202
  dtlpy/services/api_reference.py,sha256=cW-B3eoi9Xs3AwI87_Kr6GV_E6HPoC73aETFaGz3A-0,1515
203
203
  dtlpy/services/async_utils.py,sha256=kaYHTPw0Lg8PeJJq8whPyzrBYkzD7offs5hsKRZXJm8,3960
204
204
  dtlpy/services/calls_counter.py,sha256=gr0io5rIsO5-7Cgc8neA1vK8kUtYhgFPmDQ2jXtiZZs,1036
@@ -226,14 +226,14 @@ dtlpy/utilities/reports/report.py,sha256=3nEsNnIWmdPEsd21nN8vMMgaZVcPKn9iawKTTeO
226
226
  dtlpy/utilities/videos/__init__.py,sha256=SV3w51vfPuGBxaMeNemx6qEMHw_C4lLpWNGXMvdsKSY,734
227
227
  dtlpy/utilities/videos/video_player.py,sha256=LCxg0EZ_DeuwcT7U_r7MRC6Q19s0xdFb7x5Gk39PRms,24072
228
228
  dtlpy/utilities/videos/videos.py,sha256=Dj916B4TQRIhI7HZVevl3foFrCsPp0eeWwvGbgX3-_A,21875
229
- dtlpy-1.116.6.data/scripts/dlp,sha256=-F0vSCWuSOOtgERAtsPMPyMmzitjhB7Yeftg_PDlDjw,10
230
- dtlpy-1.116.6.data/scripts/dlp.bat,sha256=QOvx8Dlx5dUbCTMpwbhOcAIXL1IWmgVRSboQqDhIn3A,37
231
- dtlpy-1.116.6.data/scripts/dlp.py,sha256=ZpfJvYE1_OTSorEYBphqTOutnHSb5TqOXh0y_mUCTJs,4393
232
- dtlpy-1.116.6.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
229
+ dtlpy-1.117.6.data/scripts/dlp,sha256=-F0vSCWuSOOtgERAtsPMPyMmzitjhB7Yeftg_PDlDjw,10
230
+ dtlpy-1.117.6.data/scripts/dlp.bat,sha256=QOvx8Dlx5dUbCTMpwbhOcAIXL1IWmgVRSboQqDhIn3A,37
231
+ dtlpy-1.117.6.data/scripts/dlp.py,sha256=ZpfJvYE1_OTSorEYBphqTOutnHSb5TqOXh0y_mUCTJs,4393
232
+ dtlpy-1.117.6.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
233
233
  tests/features/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
234
234
  tests/features/environment.py,sha256=FjOLwKvQx44TNvIidvoQaTXxvoEaSFsXQ9xkqkskkdo,18933
235
- dtlpy-1.116.6.dist-info/METADATA,sha256=a4Z5q1gGBk5e-PnmYqRC6FfPNynDl1N8LqbMm0Tmawo,5835
236
- dtlpy-1.116.6.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
237
- dtlpy-1.116.6.dist-info/entry_points.txt,sha256=C4PyKthCs_no88HU39eioO68oei64STYXC2ooGZTc4Y,43
238
- dtlpy-1.116.6.dist-info/top_level.txt,sha256=ZWuLmQGUOtWAdgTf4Fbx884w1o0vBYq9dEc1zLv9Mig,12
239
- dtlpy-1.116.6.dist-info/RECORD,,
235
+ dtlpy-1.117.6.dist-info/METADATA,sha256=d7p2fvvyIiNWCqPQP7AYTQEBw78AgqxXGr67f7G5o68,5835
236
+ dtlpy-1.117.6.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
237
+ dtlpy-1.117.6.dist-info/entry_points.txt,sha256=C4PyKthCs_no88HU39eioO68oei64STYXC2ooGZTc4Y,43
238
+ dtlpy-1.117.6.dist-info/top_level.txt,sha256=ZWuLmQGUOtWAdgTf4Fbx884w1o0vBYq9dEc1zLv9Mig,12
239
+ dtlpy-1.117.6.dist-info/RECORD,,
File without changes