label-studio-sdk 1.0.7__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of label-studio-sdk might be problematic. Click here for more details.

Files changed (206) hide show
  1. label_studio_sdk/__init__.py +26 -7
  2. label_studio_sdk/_extensions/label_studio_tools/core/label_config.py +13 -4
  3. label_studio_sdk/_extensions/label_studio_tools/core/utils/io.py +16 -4
  4. label_studio_sdk/_extensions/label_studio_tools/core/utils/json_schema.py +86 -0
  5. label_studio_sdk/_extensions/pager_ext.py +8 -0
  6. label_studio_sdk/_legacy/schema/label_config_schema.json +42 -11
  7. label_studio_sdk/actions/client.py +91 -40
  8. label_studio_sdk/actions/types/actions_create_request_filters.py +14 -24
  9. label_studio_sdk/actions/types/actions_create_request_filters_items_item.py +16 -26
  10. label_studio_sdk/actions/types/actions_create_request_filters_items_item_value.py +3 -1
  11. label_studio_sdk/actions/types/actions_create_request_selected_items.py +1 -2
  12. label_studio_sdk/actions/types/actions_create_request_selected_items_excluded.py +15 -25
  13. label_studio_sdk/actions/types/actions_create_request_selected_items_included.py +15 -25
  14. label_studio_sdk/annotations/__init__.py +2 -2
  15. label_studio_sdk/annotations/client.py +278 -104
  16. label_studio_sdk/annotations/types/__init__.py +2 -1
  17. label_studio_sdk/annotations/types/annotations_create_bulk_request_selected_items.py +34 -0
  18. label_studio_sdk/annotations/types/annotations_create_bulk_response_item.py +11 -21
  19. label_studio_sdk/base_client.py +46 -27
  20. label_studio_sdk/client.py +1 -0
  21. label_studio_sdk/comments/client.py +190 -44
  22. label_studio_sdk/converter/converter.py +66 -18
  23. label_studio_sdk/converter/imports/yolo.py +1 -1
  24. label_studio_sdk/converter/utils.py +3 -2
  25. label_studio_sdk/core/__init__.py +21 -4
  26. label_studio_sdk/core/client_wrapper.py +9 -10
  27. label_studio_sdk/core/file.py +37 -8
  28. label_studio_sdk/core/http_client.py +52 -28
  29. label_studio_sdk/core/jsonable_encoder.py +33 -31
  30. label_studio_sdk/core/pagination.py +5 -4
  31. label_studio_sdk/core/pydantic_utilities.py +272 -4
  32. label_studio_sdk/core/query_encoder.py +38 -13
  33. label_studio_sdk/core/request_options.py +3 -0
  34. label_studio_sdk/core/serialization.py +272 -0
  35. label_studio_sdk/errors/bad_request_error.py +2 -3
  36. label_studio_sdk/export_storage/azure/client.py +228 -58
  37. label_studio_sdk/export_storage/azure/types/azure_create_response.py +19 -29
  38. label_studio_sdk/export_storage/azure/types/azure_update_response.py +19 -29
  39. label_studio_sdk/export_storage/client.py +48 -18
  40. label_studio_sdk/export_storage/gcs/client.py +228 -58
  41. label_studio_sdk/export_storage/gcs/types/gcs_create_response.py +19 -29
  42. label_studio_sdk/export_storage/gcs/types/gcs_update_response.py +19 -29
  43. label_studio_sdk/export_storage/local/client.py +222 -56
  44. label_studio_sdk/export_storage/local/types/local_create_response.py +17 -27
  45. label_studio_sdk/export_storage/local/types/local_update_response.py +17 -27
  46. label_studio_sdk/export_storage/redis/client.py +228 -58
  47. label_studio_sdk/export_storage/redis/types/redis_create_response.py +20 -30
  48. label_studio_sdk/export_storage/redis/types/redis_update_response.py +20 -30
  49. label_studio_sdk/export_storage/s3/client.py +228 -58
  50. label_studio_sdk/export_storage/s3/types/s3create_response.py +27 -35
  51. label_studio_sdk/export_storage/s3/types/s3update_response.py +27 -35
  52. label_studio_sdk/export_storage/s3s/client.py +187 -43
  53. label_studio_sdk/export_storage/types/export_storage_list_types_response_item.py +11 -21
  54. label_studio_sdk/files/client.py +172 -56
  55. label_studio_sdk/import_storage/azure/client.py +223 -53
  56. label_studio_sdk/import_storage/azure/types/azure_create_response.py +22 -32
  57. label_studio_sdk/import_storage/azure/types/azure_update_response.py +22 -32
  58. label_studio_sdk/import_storage/client.py +48 -18
  59. label_studio_sdk/import_storage/gcs/client.py +223 -53
  60. label_studio_sdk/import_storage/gcs/types/gcs_create_response.py +22 -32
  61. label_studio_sdk/import_storage/gcs/types/gcs_update_response.py +22 -32
  62. label_studio_sdk/import_storage/local/client.py +223 -53
  63. label_studio_sdk/import_storage/local/types/local_create_response.py +17 -27
  64. label_studio_sdk/import_storage/local/types/local_update_response.py +17 -27
  65. label_studio_sdk/import_storage/redis/client.py +223 -53
  66. label_studio_sdk/import_storage/redis/types/redis_create_response.py +20 -30
  67. label_studio_sdk/import_storage/redis/types/redis_update_response.py +20 -30
  68. label_studio_sdk/import_storage/s3/client.py +223 -53
  69. label_studio_sdk/import_storage/s3/types/s3create_response.py +31 -39
  70. label_studio_sdk/import_storage/s3/types/s3update_response.py +31 -39
  71. label_studio_sdk/import_storage/s3s/client.py +222 -52
  72. label_studio_sdk/import_storage/types/import_storage_list_types_response_item.py +11 -21
  73. label_studio_sdk/label_interface/control_tags.py +205 -10
  74. label_studio_sdk/label_interface/interface.py +80 -6
  75. label_studio_sdk/label_interface/region.py +1 -10
  76. label_studio_sdk/ml/client.py +280 -78
  77. label_studio_sdk/ml/types/ml_create_response.py +21 -31
  78. label_studio_sdk/ml/types/ml_update_response.py +21 -31
  79. label_studio_sdk/model_providers/client.py +656 -21
  80. label_studio_sdk/predictions/client.py +247 -101
  81. label_studio_sdk/projects/__init__.py +3 -0
  82. label_studio_sdk/projects/client.py +309 -115
  83. label_studio_sdk/projects/client_ext.py +16 -0
  84. label_studio_sdk/projects/exports/__init__.py +3 -0
  85. label_studio_sdk/projects/exports/client.py +447 -296
  86. label_studio_sdk/projects/exports/client_ext.py +134 -0
  87. label_studio_sdk/projects/exports/types/__init__.py +6 -0
  88. label_studio_sdk/projects/exports/types/exports_convert_response.py +24 -0
  89. label_studio_sdk/projects/exports/types/exports_list_formats_response_item.py +44 -0
  90. label_studio_sdk/projects/types/projects_create_response.py +29 -34
  91. label_studio_sdk/projects/types/projects_import_tasks_response.py +19 -29
  92. label_studio_sdk/projects/types/projects_list_response.py +11 -21
  93. label_studio_sdk/projects/types/projects_update_response.py +24 -34
  94. label_studio_sdk/prompts/client.py +309 -92
  95. label_studio_sdk/prompts/indicators/client.py +67 -23
  96. label_studio_sdk/prompts/runs/client.py +95 -40
  97. label_studio_sdk/prompts/types/prompts_batch_failed_predictions_request_failed_predictions_item.py +14 -24
  98. label_studio_sdk/prompts/types/prompts_batch_failed_predictions_response.py +11 -21
  99. label_studio_sdk/prompts/types/prompts_batch_predictions_request_results_item.py +26 -29
  100. label_studio_sdk/prompts/types/prompts_batch_predictions_response.py +11 -21
  101. label_studio_sdk/prompts/versions/client.py +389 -75
  102. label_studio_sdk/tasks/client.py +263 -90
  103. label_studio_sdk/tasks/types/tasks_list_response.py +15 -25
  104. label_studio_sdk/types/__init__.py +16 -6
  105. label_studio_sdk/types/annotation.py +29 -38
  106. label_studio_sdk/types/annotation_filter_options.py +14 -24
  107. label_studio_sdk/types/annotations_dm_field.py +30 -39
  108. label_studio_sdk/types/azure_blob_export_storage.py +28 -37
  109. label_studio_sdk/types/azure_blob_import_storage.py +28 -37
  110. label_studio_sdk/types/base_task.py +30 -39
  111. label_studio_sdk/types/base_task_updated_by.py +3 -1
  112. label_studio_sdk/types/base_user.py +14 -21
  113. label_studio_sdk/types/comment.py +12 -21
  114. label_studio_sdk/types/comment_created_by.py +1 -1
  115. label_studio_sdk/types/converted_format.py +12 -22
  116. label_studio_sdk/types/data_manager_task_serializer.py +31 -40
  117. label_studio_sdk/types/data_manager_task_serializer_annotators_item.py +1 -1
  118. label_studio_sdk/types/data_manager_task_serializer_drafts_item.py +13 -22
  119. label_studio_sdk/types/data_manager_task_serializer_predictions_item.py +15 -24
  120. label_studio_sdk/types/export.py +17 -26
  121. label_studio_sdk/types/export_format.py +25 -0
  122. label_studio_sdk/types/export_snapshot.py +45 -0
  123. label_studio_sdk/types/export_snapshot_status.py +5 -0
  124. label_studio_sdk/types/file_upload.py +11 -21
  125. label_studio_sdk/types/filter.py +16 -26
  126. label_studio_sdk/types/filter_group.py +12 -22
  127. label_studio_sdk/types/gcs_export_storage.py +28 -37
  128. label_studio_sdk/types/gcs_import_storage.py +28 -37
  129. label_studio_sdk/types/inference_run.py +14 -23
  130. label_studio_sdk/types/inference_run_cost_estimate.py +47 -0
  131. label_studio_sdk/types/inference_run_created_by.py +1 -1
  132. label_studio_sdk/types/inference_run_organization.py +1 -1
  133. label_studio_sdk/types/key_indicator_value.py +12 -22
  134. label_studio_sdk/types/key_indicators.py +0 -1
  135. label_studio_sdk/types/key_indicators_item.py +15 -25
  136. label_studio_sdk/types/key_indicators_item_additional_kpis_item.py +13 -23
  137. label_studio_sdk/types/key_indicators_item_extra_kpis_item.py +13 -23
  138. label_studio_sdk/types/local_files_export_storage.py +25 -34
  139. label_studio_sdk/types/local_files_import_storage.py +24 -33
  140. label_studio_sdk/types/ml_backend.py +23 -32
  141. label_studio_sdk/types/model_provider_connection.py +47 -26
  142. label_studio_sdk/types/model_provider_connection_budget_reset_period.py +5 -0
  143. label_studio_sdk/types/model_provider_connection_created_by.py +1 -1
  144. label_studio_sdk/types/model_provider_connection_organization.py +1 -1
  145. label_studio_sdk/types/model_provider_connection_provider.py +3 -1
  146. label_studio_sdk/types/prediction.py +21 -30
  147. label_studio_sdk/types/project.py +48 -55
  148. label_studio_sdk/types/project_import.py +21 -30
  149. label_studio_sdk/types/project_label_config.py +12 -22
  150. label_studio_sdk/types/prompt.py +24 -32
  151. label_studio_sdk/types/prompt_associated_projects_item.py +6 -0
  152. label_studio_sdk/types/prompt_associated_projects_item_id.py +20 -0
  153. label_studio_sdk/types/prompt_created_by.py +1 -1
  154. label_studio_sdk/types/prompt_organization.py +1 -1
  155. label_studio_sdk/types/prompt_version.py +13 -22
  156. label_studio_sdk/types/prompt_version_created_by.py +1 -1
  157. label_studio_sdk/types/prompt_version_organization.py +1 -1
  158. label_studio_sdk/types/prompt_version_provider.py +3 -1
  159. label_studio_sdk/types/redis_export_storage.py +29 -38
  160. label_studio_sdk/types/redis_import_storage.py +28 -37
  161. label_studio_sdk/types/refined_prompt_response.py +19 -29
  162. label_studio_sdk/types/s3export_storage.py +36 -43
  163. label_studio_sdk/types/s3import_storage.py +37 -44
  164. label_studio_sdk/types/s3s_export_storage.py +26 -33
  165. label_studio_sdk/types/s3s_import_storage.py +35 -42
  166. label_studio_sdk/types/serialization_option.py +12 -22
  167. label_studio_sdk/types/serialization_options.py +18 -28
  168. label_studio_sdk/types/task.py +46 -48
  169. label_studio_sdk/types/task_annotators_item.py +1 -1
  170. label_studio_sdk/types/task_comment_authors_item.py +5 -0
  171. label_studio_sdk/types/task_filter_options.py +15 -25
  172. label_studio_sdk/types/user_simple.py +11 -21
  173. label_studio_sdk/types/view.py +16 -26
  174. label_studio_sdk/types/webhook.py +19 -28
  175. label_studio_sdk/types/webhook_serializer_for_update.py +19 -28
  176. label_studio_sdk/types/workspace.py +22 -31
  177. label_studio_sdk/users/client.py +257 -63
  178. label_studio_sdk/users/types/users_get_token_response.py +12 -22
  179. label_studio_sdk/users/types/users_reset_token_response.py +12 -22
  180. label_studio_sdk/version.py +0 -1
  181. label_studio_sdk/versions/__init__.py +5 -0
  182. label_studio_sdk/versions/client.py +112 -0
  183. label_studio_sdk/versions/types/__init__.py +6 -0
  184. label_studio_sdk/versions/types/versions_get_response.py +73 -0
  185. label_studio_sdk/versions/types/versions_get_response_edition.py +5 -0
  186. label_studio_sdk/views/client.py +219 -52
  187. label_studio_sdk/views/types/views_create_request_data.py +13 -23
  188. label_studio_sdk/views/types/views_create_request_data_filters.py +14 -24
  189. label_studio_sdk/views/types/views_create_request_data_filters_items_item.py +16 -26
  190. label_studio_sdk/views/types/views_create_request_data_filters_items_item_value.py +3 -1
  191. label_studio_sdk/views/types/views_update_request_data.py +13 -23
  192. label_studio_sdk/views/types/views_update_request_data_filters.py +14 -24
  193. label_studio_sdk/views/types/views_update_request_data_filters_items_item.py +16 -26
  194. label_studio_sdk/views/types/views_update_request_data_filters_items_item_value.py +3 -1
  195. label_studio_sdk/webhooks/client.py +191 -61
  196. label_studio_sdk/workspaces/client.py +164 -41
  197. label_studio_sdk/workspaces/members/client.py +109 -31
  198. label_studio_sdk/workspaces/members/types/members_create_response.py +12 -22
  199. label_studio_sdk/workspaces/members/types/members_list_response_item.py +12 -22
  200. {label_studio_sdk-1.0.7.dist-info → label_studio_sdk-1.0.10.dist-info}/METADATA +9 -5
  201. {label_studio_sdk-1.0.7.dist-info → label_studio_sdk-1.0.10.dist-info}/RECORD +203 -186
  202. {label_studio_sdk-1.0.7.dist-info → label_studio_sdk-1.0.10.dist-info}/WHEEL +1 -1
  203. label_studio_sdk/types/export_convert.py +0 -32
  204. label_studio_sdk/types/export_create.py +0 -54
  205. label_studio_sdk/types/export_create_status.py +0 -5
  206. {label_studio_sdk-1.0.7.dist-info → label_studio_sdk-1.0.10.dist-info}/LICENSE +0 -0
@@ -10,7 +10,6 @@ from copy import deepcopy
10
10
  from datetime import datetime
11
11
  from enum import Enum
12
12
  from glob import glob
13
- from operator import itemgetter
14
13
  from shutil import copy2
15
14
  from typing import Optional
16
15
 
@@ -34,6 +33,7 @@ from label_studio_sdk.converter.utils import (
34
33
  convert_annotation_to_yolo,
35
34
  convert_annotation_to_yolo_obb,
36
35
  )
36
+ from label_studio_sdk._extensions.label_studio_tools.core.utils.io import get_local_path
37
37
 
38
38
  logger = logging.getLogger(__name__)
39
39
 
@@ -56,6 +56,9 @@ class Format(Enum):
56
56
  YOLO = 11
57
57
  YOLO_OBB = 12
58
58
  CSV_OLD = 13
59
+ YOLO_WITH_IMAGES = 14
60
+ COCO_WITH_IMAGES = 15
61
+ YOLO_OBB_WITH_IMAGES = 16
59
62
 
60
63
  def __str__(self):
61
64
  return self.name
@@ -107,6 +110,12 @@ class Converter(object):
107
110
  "link": "https://labelstud.io/guide/export.html#COCO",
108
111
  "tags": ["image segmentation", "object detection"],
109
112
  },
113
+ Format.COCO_WITH_IMAGES: {
114
+ "title": "COCO with Images",
115
+ "description": "COCO format with images downloaded.",
116
+ "link": "https://labelstud.io/guide/export.html#COCO",
117
+ "tags": ["image segmentation", "object detection"],
118
+ },
110
119
  Format.VOC: {
111
120
  "title": "Pascal VOC XML",
112
121
  "description": "Popular XML format used for object detection and polygon image segmentation tasks.",
@@ -120,6 +129,12 @@ class Converter(object):
120
129
  "link": "https://labelstud.io/guide/export.html#YOLO",
121
130
  "tags": ["image segmentation", "object detection"],
122
131
  },
132
+ Format.YOLO_WITH_IMAGES: {
133
+ "title": "YOLO with Images",
134
+ "description": "YOLO format with images downloaded.",
135
+ "link": "https://labelstud.io/guide/export.html#YOLO",
136
+ "tags": ["image segmentation", "object detection"],
137
+ },
123
138
  Format.YOLO_OBB: {
124
139
  "title": "YOLOv8 OBB",
125
140
  "description": "Popular TXT format is created for each image file. Each txt file contains annotations for "
@@ -128,6 +143,12 @@ class Converter(object):
128
143
  "link": "https://labelstud.io/guide/export.html#YOLO",
129
144
  "tags": ["image segmentation", "object detection"],
130
145
  },
146
+ Format.YOLO_OBB_WITH_IMAGES: {
147
+ "title": "YOLOv8 OBB with Images",
148
+ "description": "YOLOv8 OBB format with images downloaded.",
149
+ "link": "https://labelstud.io/guide/export.html#YOLO",
150
+ "tags": ["image segmentation", "object detection"],
151
+ },
131
152
  Format.BRUSH_TO_NUMPY: {
132
153
  "title": "Brush labels to NumPy",
133
154
  "description": "Export your brush labels as NumPy 2d arrays. Each label outputs as one image.",
@@ -159,6 +180,8 @@ class Converter(object):
159
180
  output_tags=None,
160
181
  upload_dir=None,
161
182
  download_resources=True,
183
+ access_token=None,
184
+ hostname=None,
162
185
  ):
163
186
  """Initialize Label Studio Converter for Exports
164
187
 
@@ -172,6 +195,8 @@ class Converter(object):
172
195
  self.upload_dir = upload_dir
173
196
  self.download_resources = download_resources
174
197
  self._schema = None
198
+ self.access_token = access_token
199
+ self.hostname = hostname
175
200
 
176
201
  if isinstance(config, dict):
177
202
  self._schema = config
@@ -217,21 +242,23 @@ class Converter(object):
217
242
  )
218
243
  elif format == Format.CONLL2003:
219
244
  self.convert_to_conll2003(input_data, output_data, is_dir=is_dir)
220
- elif format == Format.COCO:
245
+ elif format in [Format.COCO, Format.COCO_WITH_IMAGES]:
221
246
  image_dir = kwargs.get("image_dir")
247
+ self.download_resources = format == Format.COCO_WITH_IMAGES
222
248
  self.convert_to_coco(
223
249
  input_data, output_data, output_image_dir=image_dir, is_dir=is_dir
224
250
  )
225
- elif format == Format.YOLO or format == Format.YOLO_OBB:
251
+ elif format in [Format.YOLO, Format.YOLO_OBB, Format.YOLO_OBB_WITH_IMAGES, Format.YOLO_WITH_IMAGES]:
226
252
  image_dir = kwargs.get("image_dir")
227
253
  label_dir = kwargs.get("label_dir")
254
+ self.download_resources = format in [Format.YOLO_WITH_IMAGES, Format.YOLO_OBB_WITH_IMAGES]
228
255
  self.convert_to_yolo(
229
256
  input_data,
230
257
  output_data,
231
258
  output_image_dir=image_dir,
232
259
  output_label_dir=label_dir,
233
260
  is_dir=is_dir,
234
- is_obb=(format == Format.YOLO_OBB),
261
+ is_obb=(format in [Format.YOLO_OBB, Format.YOLO_OBB_WITH_IMAGES]),
235
262
  )
236
263
  elif format == Format.VOC:
237
264
  image_dir = kwargs.get("image_dir")
@@ -282,12 +309,16 @@ class Converter(object):
282
309
  for name, info in self._schema.items():
283
310
  if output_tags is not None and name not in output_tags:
284
311
  continue
285
- data_keys |= set(map(itemgetter("value"), info["inputs"]))
312
+ for input_tag in info["inputs"]:
313
+ for value_key_name in ["value", "valueList"]:
314
+ if value_key_name in input_tag:
315
+ data_keys.add(input_tag[value_key_name])
286
316
  output_tag_names.append(name)
287
317
 
288
318
  return list(data_keys), output_tag_names
289
319
 
290
320
  def _get_supported_formats(self):
321
+ is_mig = False
291
322
  if len(self._data_keys) > 1:
292
323
  return [
293
324
  Format.JSON.name,
@@ -300,6 +331,8 @@ class Converter(object):
300
331
  for info in self._schema.values():
301
332
  output_tag_types.add(info["type"])
302
333
  for input_tag in info["inputs"]:
334
+ if input_tag.get("valueList"):
335
+ is_mig = True
303
336
  if input_tag["type"] == "Text" and input_tag.get("valueType") == "url":
304
337
  logger.error('valueType="url" are not supported for text inputs')
305
338
  continue
@@ -308,7 +341,7 @@ class Converter(object):
308
341
  all_formats = [f.name for f in Format]
309
342
  if not ("Text" in input_tag_types and "Labels" in output_tag_types):
310
343
  all_formats.remove(Format.CONLL2003.name)
311
- if not (
344
+ if is_mig or not (
312
345
  "Image" in input_tag_types
313
346
  and (
314
347
  "RectangleLabels" in output_tag_types
@@ -317,7 +350,7 @@ class Converter(object):
317
350
  )
318
351
  ):
319
352
  all_formats.remove(Format.VOC.name)
320
- if not (
353
+ if is_mig or not (
321
354
  "Image" in input_tag_types
322
355
  and (
323
356
  "RectangleLabels" in output_tag_types
@@ -329,7 +362,9 @@ class Converter(object):
329
362
  and "Labels" in output_tag_types
330
363
  ):
331
364
  all_formats.remove(Format.COCO.name)
365
+ all_formats.remove(Format.COCO_WITH_IMAGES.name)
332
366
  all_formats.remove(Format.YOLO.name)
367
+ all_formats.remove(Format.YOLO_WITH_IMAGES.name)
333
368
  if not (
334
369
  "Image" in input_tag_types
335
370
  and (
@@ -346,8 +381,9 @@ class Converter(object):
346
381
  and "TextArea" in output_tag_types
347
382
  ):
348
383
  all_formats.remove(Format.ASR_MANIFEST.name)
349
- if 'Video' in input_tag_types and 'TimelineLabels' in output_tag_types:
384
+ if is_mig or ('Video' in input_tag_types and 'TimelineLabels' in output_tag_types):
350
385
  all_formats.remove(Format.YOLO_OBB.name)
386
+ all_formats.remove(Format.YOLO_OBB_WITH_IMAGES.name)
351
387
 
352
388
  return all_formats
353
389
 
@@ -588,20 +624,25 @@ class Converter(object):
588
624
  )
589
625
  for item_idx, item in enumerate(item_iterator):
590
626
  image_path = item["input"][data_key]
627
+ task_id = item["id"]
591
628
  image_id = len(images)
592
629
  width = None
593
630
  height = None
594
631
  # download all images of the dataset, including the ones without annotations
595
632
  if not os.path.exists(image_path):
596
633
  try:
597
- image_path = download(
598
- image_path,
599
- output_image_dir,
634
+ image_path = get_local_path(
635
+ url=image_path,
636
+ hostname=self.hostname,
600
637
  project_dir=self.project_dir,
601
- return_relative_path=True,
602
- upload_dir=self.upload_dir,
638
+ image_dir=self.upload_dir,
639
+ cache_dir=output_image_dir,
603
640
  download_resources=self.download_resources,
641
+ access_token=self.access_token,
642
+ task_id=task_id,
604
643
  )
644
+ # make path relative to output_image_dir
645
+ image_path = os.path.relpath(image_path, output_dir)
605
646
  except:
606
647
  logger.info(
607
648
  "Unable to download {image_path}. The image of {item} will be skipped".format(
@@ -796,19 +837,24 @@ class Converter(object):
796
837
  image_paths = [image_paths] if isinstance(image_paths, str) else image_paths
797
838
  # download image(s)
798
839
  image_path = None
840
+ task_id = item["id"]
799
841
  # TODO: for multi-page annotation, this code won't produce correct relationships between page and annotated shapes
800
842
  # fixing the issue in RND-84
801
843
  for image_path in reversed(image_paths):
802
844
  if not os.path.exists(image_path):
803
845
  try:
804
- image_path = download(
805
- image_path,
806
- output_image_dir,
846
+ image_path = get_local_path(
847
+ url=image_path,
848
+ hostname=self.hostname,
807
849
  project_dir=self.project_dir,
808
- return_relative_path=True,
809
- upload_dir=self.upload_dir,
850
+ image_dir=self.upload_dir,
851
+ cache_dir=output_image_dir,
810
852
  download_resources=self.download_resources,
853
+ access_token=self.access_token,
854
+ task_id=task_id,
811
855
  )
856
+ # make path relative to output_image_dir
857
+ image_path = os.path.relpath(image_path, output_dir)
812
858
  except:
813
859
  logger.info(
814
860
  "Unable to download {image_path}. The item {item} will be skipped".format(
@@ -915,6 +961,8 @@ class Converter(object):
915
961
  annotations.append([category_id, x, y, w, h])
916
962
 
917
963
  elif "polygonlabels" in label or "polygon" in label:
964
+ if not ('points' in label):
965
+ continue
918
966
  points_abs = [(x / 100, y / 100) for x, y in label["points"]]
919
967
  annotations.append(
920
968
  [category_id]
@@ -218,7 +218,7 @@ def add_parser(subparsers):
218
218
  "--image-ext",
219
219
  dest="image_ext",
220
220
  help="image extension to search: .jpeg or .jpg, .png",
221
- default=".jpg",
221
+ default=".jpg,jpeg,.png",
222
222
  )
223
223
  yolo.add_argument(
224
224
  "--image-dims",
@@ -21,6 +21,7 @@ from lxml import etree
21
21
  from nltk.tokenize.treebank import TreebankWordTokenizer
22
22
 
23
23
  from label_studio_sdk._extensions.label_studio_tools.core.utils.params import get_env
24
+ from label_studio_sdk._extensions.label_studio_tools.core.utils.io import safe_build_path
24
25
 
25
26
  logger = logging.getLogger(__name__)
26
27
 
@@ -148,7 +149,7 @@ def download(
148
149
  if is_uploaded_file:
149
150
  upload_dir = _get_upload_dir(project_dir, upload_dir)
150
151
  filename = urllib.parse.unquote(url.replace("/data/upload/", ""))
151
- filepath = os.path.join(upload_dir, filename)
152
+ filepath = safe_build_path(upload_dir, filename)
152
153
  logger.debug(
153
154
  f"Copy {filepath} to {output_dir}".format(
154
155
  filepath=filepath, output_dir=output_dir
@@ -165,7 +166,7 @@ def download(
165
166
  if is_local_file:
166
167
  filename, dir_path = url.split("/data/", 1)[-1].split("?d=")
167
168
  dir_path = str(urllib.parse.unquote(dir_path))
168
- filepath = os.path.join(LOCAL_FILES_DOCUMENT_ROOT, dir_path)
169
+ filepath = safe_build_path(LOCAL_FILES_DOCUMENT_ROOT, dir_path)
169
170
  if not os.path.exists(filepath):
170
171
  raise FileNotFoundError(filepath)
171
172
  if download_resources:
@@ -3,14 +3,23 @@
3
3
  from .api_error import ApiError
4
4
  from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper
5
5
  from .datetime_utils import serialize_datetime
6
- from .file import File, convert_file_dict_to_httpx_tuples
6
+ from .file import File, convert_file_dict_to_httpx_tuples, with_content_type
7
7
  from .http_client import AsyncHttpClient, HttpClient
8
8
  from .jsonable_encoder import jsonable_encoder
9
9
  from .pagination import AsyncPager, SyncPager
10
- from .pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
10
+ from .pydantic_utilities import (
11
+ IS_PYDANTIC_V2,
12
+ UniversalBaseModel,
13
+ UniversalRootModel,
14
+ parse_obj_as,
15
+ universal_field_validator,
16
+ universal_root_validator,
17
+ update_forward_refs,
18
+ )
11
19
  from .query_encoder import encode_query
12
20
  from .remove_none_from_dict import remove_none_from_dict
13
21
  from .request_options import RequestOptions
22
+ from .serialization import FieldMetadata, convert_and_respect_annotation_metadata
14
23
 
15
24
  __all__ = [
16
25
  "ApiError",
@@ -18,16 +27,24 @@ __all__ = [
18
27
  "AsyncHttpClient",
19
28
  "AsyncPager",
20
29
  "BaseClientWrapper",
30
+ "FieldMetadata",
21
31
  "File",
22
32
  "HttpClient",
33
+ "IS_PYDANTIC_V2",
23
34
  "RequestOptions",
24
35
  "SyncClientWrapper",
25
36
  "SyncPager",
37
+ "UniversalBaseModel",
38
+ "UniversalRootModel",
39
+ "convert_and_respect_annotation_metadata",
26
40
  "convert_file_dict_to_httpx_tuples",
27
- "deep_union_pydantic_dicts",
28
41
  "encode_query",
29
42
  "jsonable_encoder",
30
- "pydantic_v1",
43
+ "parse_obj_as",
31
44
  "remove_none_from_dict",
32
45
  "serialize_datetime",
46
+ "universal_field_validator",
47
+ "universal_root_validator",
48
+ "update_forward_refs",
49
+ "with_content_type",
33
50
  ]
@@ -1,10 +1,9 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  import typing
4
-
5
4
  import httpx
6
-
7
- from .http_client import AsyncHttpClient, HttpClient
5
+ from .http_client import HttpClient
6
+ from .http_client import AsyncHttpClient
8
7
 
9
8
 
10
9
  class BaseClientWrapper:
@@ -17,7 +16,7 @@ class BaseClientWrapper:
17
16
  headers: typing.Dict[str, str] = {
18
17
  "X-Fern-Language": "Python",
19
18
  "X-Fern-SDK-Name": "label-studio-sdk",
20
- "X-Fern-SDK-Version": "1.0.6",
19
+ "X-Fern-SDK-Version": "1.0.9",
21
20
  }
22
21
  headers["Authorization"] = f"Token {self.api_key}"
23
22
  return headers
@@ -36,9 +35,9 @@ class SyncClientWrapper(BaseClientWrapper):
36
35
  super().__init__(api_key=api_key, base_url=base_url, timeout=timeout)
37
36
  self.httpx_client = HttpClient(
38
37
  httpx_client=httpx_client,
39
- base_headers=self.get_headers(),
40
- base_timeout=self.get_timeout(),
41
- base_url=self.get_base_url(),
38
+ base_headers=self.get_headers,
39
+ base_timeout=self.get_timeout,
40
+ base_url=self.get_base_url,
42
41
  )
43
42
 
44
43
 
@@ -49,7 +48,7 @@ class AsyncClientWrapper(BaseClientWrapper):
49
48
  super().__init__(api_key=api_key, base_url=base_url, timeout=timeout)
50
49
  self.httpx_client = AsyncHttpClient(
51
50
  httpx_client=httpx_client,
52
- base_headers=self.get_headers(),
53
- base_timeout=self.get_timeout(),
54
- base_url=self.get_base_url(),
51
+ base_headers=self.get_headers,
52
+ base_timeout=self.get_timeout,
53
+ base_url=self.get_base_url,
55
54
  )
@@ -1,25 +1,30 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- import typing
3
+ from typing import IO, Dict, List, Mapping, Optional, Tuple, Union, cast
4
4
 
5
5
  # File typing inspired by the flexibility of types within the httpx library
6
6
  # https://github.com/encode/httpx/blob/master/httpx/_types.py
7
- FileContent = typing.Union[typing.IO[bytes], bytes, str]
8
- File = typing.Union[
7
+ FileContent = Union[IO[bytes], bytes, str]
8
+ File = Union[
9
9
  # file (or bytes)
10
10
  FileContent,
11
11
  # (filename, file (or bytes))
12
- typing.Tuple[typing.Optional[str], FileContent],
12
+ Tuple[Optional[str], FileContent],
13
13
  # (filename, file (or bytes), content_type)
14
- typing.Tuple[typing.Optional[str], FileContent, typing.Optional[str]],
14
+ Tuple[Optional[str], FileContent, Optional[str]],
15
15
  # (filename, file (or bytes), content_type, headers)
16
- typing.Tuple[typing.Optional[str], FileContent, typing.Optional[str], typing.Mapping[str, str]],
16
+ Tuple[
17
+ Optional[str],
18
+ FileContent,
19
+ Optional[str],
20
+ Mapping[str, str],
21
+ ],
17
22
  ]
18
23
 
19
24
 
20
25
  def convert_file_dict_to_httpx_tuples(
21
- d: typing.Dict[str, typing.Union[File, typing.List[File]]]
22
- ) -> typing.List[typing.Tuple[str, File]]:
26
+ d: Dict[str, Union[File, List[File]]],
27
+ ) -> List[Tuple[str, File]]:
23
28
  """
24
29
  The format we use is a list of tuples, where the first element is the
25
30
  name of the file and the second is the file object. Typically HTTPX wants
@@ -36,3 +41,27 @@ def convert_file_dict_to_httpx_tuples(
36
41
  else:
37
42
  httpx_tuples.append((key, file_like))
38
43
  return httpx_tuples
44
+
45
+
46
+ def with_content_type(*, file: File, default_content_type: str) -> File:
47
+ """
48
+ This function resolves to the file's content type, if provided, and defaults
49
+ to the default_content_type value if not.
50
+ """
51
+ if isinstance(file, tuple):
52
+ if len(file) == 2:
53
+ filename, content = cast(Tuple[Optional[str], FileContent], file) # type: ignore
54
+ return (filename, content, default_content_type)
55
+ elif len(file) == 3:
56
+ filename, content, file_content_type = cast(Tuple[Optional[str], FileContent, Optional[str]], file) # type: ignore
57
+ out_content_type = file_content_type or default_content_type
58
+ return (filename, content, out_content_type)
59
+ elif len(file) == 4:
60
+ filename, content, file_content_type, headers = cast( # type: ignore
61
+ Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], file
62
+ )
63
+ out_content_type = file_content_type or default_content_type
64
+ return (filename, content, out_content_type, headers)
65
+ else:
66
+ raise ValueError(f"Unexpected tuple length: {len(file)}")
67
+ return (None, file, default_content_type)
@@ -90,7 +90,8 @@ def _should_retry(response: httpx.Response) -> bool:
90
90
 
91
91
 
92
92
  def remove_omit_from_dict(
93
- original: typing.Dict[str, typing.Optional[typing.Any]], omit: typing.Optional[typing.Any]
93
+ original: typing.Dict[str, typing.Optional[typing.Any]],
94
+ omit: typing.Optional[typing.Any],
94
95
  ) -> typing.Dict[str, typing.Any]:
95
96
  if omit is None:
96
97
  return original
@@ -108,7 +109,7 @@ def maybe_filter_request_body(
108
109
  ) -> typing.Optional[typing.Any]:
109
110
  if data is None:
110
111
  return (
111
- jsonable_encoder(request_options.get("additional_body_parameters", {}))
112
+ jsonable_encoder(request_options.get("additional_body_parameters", {})) or {}
112
113
  if request_options is not None
113
114
  else None
114
115
  )
@@ -118,7 +119,7 @@ def maybe_filter_request_body(
118
119
  data_content = {
119
120
  **(jsonable_encoder(remove_omit_from_dict(data, omit))), # type: ignore
120
121
  **(
121
- jsonable_encoder(request_options.get("additional_body_parameters", {}))
122
+ jsonable_encoder(request_options.get("additional_body_parameters", {})) or {}
122
123
  if request_options is not None
123
124
  else {}
124
125
  ),
@@ -142,7 +143,8 @@ def get_request_body(
142
143
  # If both data and json are None, we send json data in the event extra properties are specified
143
144
  json_body = maybe_filter_request_body(json, request_options, omit)
144
145
 
145
- return json_body, data_body
146
+ # If you have an empty JSON body, you should just send None
147
+ return (json_body if json_body != {} else None), data_body if data_body != {} else None
146
148
 
147
149
 
148
150
  class HttpClient:
@@ -150,9 +152,9 @@ class HttpClient:
150
152
  self,
151
153
  *,
152
154
  httpx_client: httpx.Client,
153
- base_timeout: typing.Optional[float],
154
- base_headers: typing.Dict[str, str],
155
- base_url: typing.Optional[str] = None,
155
+ base_timeout: typing.Callable[[], typing.Optional[float]],
156
+ base_headers: typing.Callable[[], typing.Dict[str, str]],
157
+ base_url: typing.Optional[typing.Callable[[], str]] = None,
156
158
  ):
157
159
  self.base_url = base_url
158
160
  self.base_timeout = base_timeout
@@ -160,7 +162,10 @@ class HttpClient:
160
162
  self.httpx_client = httpx_client
161
163
 
162
164
  def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
163
- base_url = self.base_url if maybe_base_url is None else maybe_base_url
165
+ base_url = maybe_base_url
166
+ if self.base_url is not None and base_url is None:
167
+ base_url = self.base_url()
168
+
164
169
  if base_url is None:
165
170
  raise ValueError("A base_url is required to make this request, please provide one and try again.")
166
171
  return base_url
@@ -185,7 +190,7 @@ class HttpClient:
185
190
  timeout = (
186
191
  request_options.get("timeout_in_seconds")
187
192
  if request_options is not None and request_options.get("timeout_in_seconds") is not None
188
- else self.base_timeout
193
+ else self.base_timeout()
189
194
  )
190
195
 
191
196
  json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
@@ -196,9 +201,9 @@ class HttpClient:
196
201
  headers=jsonable_encoder(
197
202
  remove_none_from_dict(
198
203
  {
199
- **self.base_headers,
204
+ **self.base_headers(),
200
205
  **(headers if headers is not None else {}),
201
- **(request_options.get("additional_headers", {}) if request_options is not None else {}),
206
+ **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
202
207
  }
203
208
  )
204
209
  ),
@@ -209,7 +214,7 @@ class HttpClient:
209
214
  {
210
215
  **(params if params is not None else {}),
211
216
  **(
212
- request_options.get("additional_query_parameters", {})
217
+ request_options.get("additional_query_parameters", {}) or {}
213
218
  if request_options is not None
214
219
  else {}
215
220
  ),
@@ -222,7 +227,11 @@ class HttpClient:
222
227
  json=json_body,
223
228
  data=data_body,
224
229
  content=content,
225
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
230
+ files=(
231
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
232
+ if (files is not None and files is not omit)
233
+ else None
234
+ ),
226
235
  timeout=timeout,
227
236
  )
228
237
 
@@ -267,7 +276,7 @@ class HttpClient:
267
276
  timeout = (
268
277
  request_options.get("timeout_in_seconds")
269
278
  if request_options is not None and request_options.get("timeout_in_seconds") is not None
270
- else self.base_timeout
279
+ else self.base_timeout()
271
280
  )
272
281
 
273
282
  json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
@@ -278,7 +287,7 @@ class HttpClient:
278
287
  headers=jsonable_encoder(
279
288
  remove_none_from_dict(
280
289
  {
281
- **self.base_headers,
290
+ **self.base_headers(),
282
291
  **(headers if headers is not None else {}),
283
292
  **(request_options.get("additional_headers", {}) if request_options is not None else {}),
284
293
  }
@@ -304,7 +313,11 @@ class HttpClient:
304
313
  json=json_body,
305
314
  data=data_body,
306
315
  content=content,
307
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
316
+ files=(
317
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
318
+ if (files is not None and files is not omit)
319
+ else None
320
+ ),
308
321
  timeout=timeout,
309
322
  ) as stream:
310
323
  yield stream
@@ -315,9 +328,9 @@ class AsyncHttpClient:
315
328
  self,
316
329
  *,
317
330
  httpx_client: httpx.AsyncClient,
318
- base_timeout: typing.Optional[float],
319
- base_headers: typing.Dict[str, str],
320
- base_url: typing.Optional[str] = None,
331
+ base_timeout: typing.Callable[[], typing.Optional[float]],
332
+ base_headers: typing.Callable[[], typing.Dict[str, str]],
333
+ base_url: typing.Optional[typing.Callable[[], str]] = None,
321
334
  ):
322
335
  self.base_url = base_url
323
336
  self.base_timeout = base_timeout
@@ -325,7 +338,10 @@ class AsyncHttpClient:
325
338
  self.httpx_client = httpx_client
326
339
 
327
340
  def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str:
328
- base_url = self.base_url if maybe_base_url is None else maybe_base_url
341
+ base_url = maybe_base_url
342
+ if self.base_url is not None and base_url is None:
343
+ base_url = self.base_url()
344
+
329
345
  if base_url is None:
330
346
  raise ValueError("A base_url is required to make this request, please provide one and try again.")
331
347
  return base_url
@@ -350,7 +366,7 @@ class AsyncHttpClient:
350
366
  timeout = (
351
367
  request_options.get("timeout_in_seconds")
352
368
  if request_options is not None and request_options.get("timeout_in_seconds") is not None
353
- else self.base_timeout
369
+ else self.base_timeout()
354
370
  )
355
371
 
356
372
  json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
@@ -362,9 +378,9 @@ class AsyncHttpClient:
362
378
  headers=jsonable_encoder(
363
379
  remove_none_from_dict(
364
380
  {
365
- **self.base_headers,
381
+ **self.base_headers(),
366
382
  **(headers if headers is not None else {}),
367
- **(request_options.get("additional_headers", {}) if request_options is not None else {}),
383
+ **(request_options.get("additional_headers", {}) or {} if request_options is not None else {}),
368
384
  }
369
385
  )
370
386
  ),
@@ -375,7 +391,7 @@ class AsyncHttpClient:
375
391
  {
376
392
  **(params if params is not None else {}),
377
393
  **(
378
- request_options.get("additional_query_parameters", {})
394
+ request_options.get("additional_query_parameters", {}) or {}
379
395
  if request_options is not None
380
396
  else {}
381
397
  ),
@@ -388,7 +404,11 @@ class AsyncHttpClient:
388
404
  json=json_body,
389
405
  data=data_body,
390
406
  content=content,
391
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
407
+ files=(
408
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
409
+ if files is not None
410
+ else None
411
+ ),
392
412
  timeout=timeout,
393
413
  )
394
414
 
@@ -432,7 +452,7 @@ class AsyncHttpClient:
432
452
  timeout = (
433
453
  request_options.get("timeout_in_seconds")
434
454
  if request_options is not None and request_options.get("timeout_in_seconds") is not None
435
- else self.base_timeout
455
+ else self.base_timeout()
436
456
  )
437
457
 
438
458
  json_body, data_body = get_request_body(json=json, data=data, request_options=request_options, omit=omit)
@@ -443,7 +463,7 @@ class AsyncHttpClient:
443
463
  headers=jsonable_encoder(
444
464
  remove_none_from_dict(
445
465
  {
446
- **self.base_headers,
466
+ **self.base_headers(),
447
467
  **(headers if headers is not None else {}),
448
468
  **(request_options.get("additional_headers", {}) if request_options is not None else {}),
449
469
  }
@@ -469,7 +489,11 @@ class AsyncHttpClient:
469
489
  json=json_body,
470
490
  data=data_body,
471
491
  content=content,
472
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
492
+ files=(
493
+ convert_file_dict_to_httpx_tuples(remove_omit_from_dict(remove_none_from_dict(files), omit))
494
+ if files is not None
495
+ else None
496
+ ),
473
497
  timeout=timeout,
474
498
  ) as stream:
475
499
  yield stream