label-studio-sdk 2.0.8__py3-none-any.whl → 2.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of label-studio-sdk might be problematic. Click here for more details.

Files changed (75) hide show
  1. label_studio_sdk/__init__.py +36 -16
  2. label_studio_sdk/base_client.py +0 -4
  3. label_studio_sdk/core/client_wrapper.py +1 -1
  4. label_studio_sdk/export_storage/__init__.py +13 -2
  5. label_studio_sdk/export_storage/client.py +4 -0
  6. label_studio_sdk/export_storage/databricks/client.py +1406 -0
  7. label_studio_sdk/import_storage/__init__.py +13 -2
  8. label_studio_sdk/import_storage/client.py +4 -0
  9. label_studio_sdk/import_storage/databricks/__init__.py +2 -0
  10. label_studio_sdk/import_storage/databricks/client.py +1466 -0
  11. label_studio_sdk/import_storage/gcswif/client.py +30 -0
  12. label_studio_sdk/organizations/invites/client.py +31 -6
  13. label_studio_sdk/projects/__init__.py +0 -2
  14. label_studio_sdk/projects/client.py +186 -32
  15. label_studio_sdk/projects/client_ext.py +20 -8
  16. label_studio_sdk/projects/types/__init__.py +0 -2
  17. label_studio_sdk/projects/types/lse_project_create_request_sampling.py +2 -2
  18. label_studio_sdk/projects/types/patched_lse_project_update_request_sampling.py +2 -2
  19. label_studio_sdk/prompts/client.py +340 -1
  20. label_studio_sdk/prompts/runs/client.py +127 -0
  21. label_studio_sdk/tasks/client.py +7 -2
  22. label_studio_sdk/types/__init__.py +36 -12
  23. label_studio_sdk/types/all_roles_project_list.py +10 -10
  24. label_studio_sdk/types/all_roles_project_list_sampling.py +2 -2
  25. label_studio_sdk/types/azure_blob_import_storage.py +5 -0
  26. label_studio_sdk/types/cancel_model_run_response.py +19 -0
  27. label_studio_sdk/types/configurable_permission_option.py +2 -2
  28. label_studio_sdk/types/databricks_export_storage.py +113 -0
  29. label_studio_sdk/types/databricks_export_storage_request.py +107 -0
  30. label_studio_sdk/types/databricks_import_storage.py +123 -0
  31. label_studio_sdk/types/databricks_import_storage_request.py +117 -0
  32. label_studio_sdk/types/default165enum.py +5 -0
  33. label_studio_sdk/types/gcs_import_storage.py +5 -0
  34. label_studio_sdk/types/gcswif_import_storage.py +5 -0
  35. label_studio_sdk/types/gcswif_import_storage_request.py +5 -0
  36. label_studio_sdk/types/local_files_import_storage.py +5 -0
  37. label_studio_sdk/types/lse_project_counts.py +8 -8
  38. label_studio_sdk/types/lse_project_create_sampling.py +2 -2
  39. label_studio_sdk/types/{project.py → lse_project_response.py} +44 -31
  40. label_studio_sdk/types/lse_project_response_sampling.py +7 -0
  41. label_studio_sdk/types/{project_skip_queue.py → lse_project_response_skip_queue.py} +1 -1
  42. label_studio_sdk/types/lse_project_sampling.py +2 -2
  43. label_studio_sdk/types/lse_project_update_sampling.py +2 -2
  44. label_studio_sdk/types/lse_task.py +6 -0
  45. label_studio_sdk/types/lse_task_serializer_for_reviewers.py +6 -0
  46. label_studio_sdk/types/lse_user.py +1 -0
  47. label_studio_sdk/types/lse_user_api.py +1 -0
  48. label_studio_sdk/types/options165enum.py +5 -0
  49. label_studio_sdk/types/organization_permission.py +7 -4
  50. label_studio_sdk/types/paginated_project_member.py +1 -0
  51. label_studio_sdk/types/paginated_project_subset_tasks_response_list.py +23 -0
  52. label_studio_sdk/types/project_subset_item.py +21 -0
  53. label_studio_sdk/types/project_subset_task_item.py +24 -0
  54. label_studio_sdk/types/project_subset_tasks_response.py +27 -0
  55. label_studio_sdk/types/review_settings.py +14 -0
  56. label_studio_sdk/types/review_settings_request.py +14 -0
  57. label_studio_sdk/types/review_settings_request_sampling.py +8 -0
  58. label_studio_sdk/types/review_settings_sampling.py +8 -0
  59. label_studio_sdk/types/review_settings_sampling_enum.py +5 -0
  60. label_studio_sdk/types/{sampling_enum.py → sampling_de5enum.py} +1 -1
  61. label_studio_sdk/types/who_am_i_user.py +1 -0
  62. label_studio_sdk/workspaces/client.py +60 -0
  63. label_studio_sdk/workspaces/members/bulk/client.py +24 -0
  64. label_studio_sdk/workspaces/members/client.py +36 -0
  65. label_studio_sdk/workspaces/members/paginated/client.py +12 -0
  66. {label_studio_sdk-2.0.8.dist-info → label_studio_sdk-2.0.10.dist-info}/METADATA +41 -90
  67. {label_studio_sdk-2.0.8.dist-info → label_studio_sdk-2.0.10.dist-info}/RECORD +70 -57
  68. label_studio_sdk/blueprints/client.py +0 -272
  69. label_studio_sdk/projects/types/projects_list_request_filter.py +0 -5
  70. label_studio_sdk/types/blueprint.py +0 -41
  71. label_studio_sdk/types/configurable_permission_option_default.py +0 -7
  72. label_studio_sdk/types/project_sampling.py +0 -7
  73. /label_studio_sdk/{blueprints → export_storage/databricks}/__init__.py +0 -0
  74. {label_studio_sdk-2.0.8.dist-info → label_studio_sdk-2.0.10.dist-info}/LICENSE +0 -0
  75. {label_studio_sdk-2.0.8.dist-info → label_studio_sdk-2.0.10.dist-info}/WHEEL +0 -0
@@ -11,6 +11,10 @@ from ..core.unchecked_base_model import construct_type
11
11
  from json.decoder import JSONDecodeError
12
12
  from ..core.api_error import ApiError
13
13
  from ..types.batch_predictions import BatchPredictions
14
+ from ..types.paginated_project_subset_tasks_response_list import PaginatedProjectSubsetTasksResponseList
15
+ from ..core.jsonable_encoder import jsonable_encoder
16
+ from ..errors.bad_request_error import BadRequestError
17
+ from ..types.project_subset_item import ProjectSubsetItem
14
18
  from ..types.model_interface_serializer_get import ModelInterfaceSerializerGet
15
19
  from ..types.user_simple_request import UserSimpleRequest
16
20
  from ..types.skill_name_enum import SkillNameEnum
@@ -18,7 +22,6 @@ from ..types.model_interface import ModelInterface
18
22
  from ..core.serialization import convert_and_respect_annotation_metadata
19
23
  from .types.prompts_compatible_projects_request_project_type import PromptsCompatibleProjectsRequestProjectType
20
24
  from ..types.paginated_all_roles_project_list_list import PaginatedAllRolesProjectListList
21
- from ..core.jsonable_encoder import jsonable_encoder
22
25
  from ..core.client_wrapper import AsyncClientWrapper
23
26
  from .indicators.client import AsyncIndicatorsClient
24
27
  from .versions.client import AsyncVersionsClient
@@ -183,6 +186,166 @@ class PromptsClient:
183
186
  raise ApiError(status_code=_response.status_code, body=_response.text)
184
187
  raise ApiError(status_code=_response.status_code, body=_response_json)
185
188
 
189
+ def subset_tasks(
190
+ self,
191
+ project_pk: int,
192
+ *,
193
+ include_total: typing.Optional[bool] = None,
194
+ model_run: typing.Optional[int] = None,
195
+ ordering: typing.Optional[str] = None,
196
+ page: typing.Optional[int] = None,
197
+ page_size: typing.Optional[int] = None,
198
+ parent_model: typing.Optional[int] = None,
199
+ project_subset: typing.Optional[str] = None,
200
+ request_options: typing.Optional[RequestOptions] = None,
201
+ ) -> PaginatedProjectSubsetTasksResponseList:
202
+ """
203
+
204
+ Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided.
205
+
206
+
207
+ Parameters
208
+ ----------
209
+ project_pk : int
210
+
211
+ include_total : typing.Optional[bool]
212
+ If true (default), includes task_count in response; if false, omits it.
213
+
214
+ model_run : typing.Optional[int]
215
+ A unique ID of a ModelRun
216
+
217
+ ordering : typing.Optional[str]
218
+ Which field to use when ordering the results.
219
+
220
+ page : typing.Optional[int]
221
+ A page number within the paginated result set.
222
+
223
+ page_size : typing.Optional[int]
224
+ Number of results to return per page.
225
+
226
+ parent_model : typing.Optional[int]
227
+ The ID of the parent model (ModelInterface) for this Inference Run
228
+
229
+ project_subset : typing.Optional[str]
230
+ The project subset to retrieve tasks for
231
+
232
+ request_options : typing.Optional[RequestOptions]
233
+ Request-specific configuration.
234
+
235
+ Returns
236
+ -------
237
+ PaginatedProjectSubsetTasksResponseList
238
+
239
+
240
+ Examples
241
+ --------
242
+ from label_studio_sdk import LabelStudio
243
+
244
+ client = LabelStudio(
245
+ api_key="YOUR_API_KEY",
246
+ )
247
+ client.prompts.subset_tasks(
248
+ project_pk=1,
249
+ )
250
+ """
251
+ _response = self._client_wrapper.httpx_client.request(
252
+ f"api/projects/{jsonable_encoder(project_pk)}/subset-tasks",
253
+ method="GET",
254
+ params={
255
+ "include_total": include_total,
256
+ "model_run": model_run,
257
+ "ordering": ordering,
258
+ "page": page,
259
+ "page_size": page_size,
260
+ "parent_model": parent_model,
261
+ "project_subset": project_subset,
262
+ },
263
+ request_options=request_options,
264
+ )
265
+ try:
266
+ if 200 <= _response.status_code < 300:
267
+ return typing.cast(
268
+ PaginatedProjectSubsetTasksResponseList,
269
+ construct_type(
270
+ type_=PaginatedProjectSubsetTasksResponseList, # type: ignore
271
+ object_=_response.json(),
272
+ ),
273
+ )
274
+ if _response.status_code == 400:
275
+ raise BadRequestError(
276
+ typing.cast(
277
+ typing.Optional[typing.Any],
278
+ construct_type(
279
+ type_=typing.Optional[typing.Any], # type: ignore
280
+ object_=_response.json(),
281
+ ),
282
+ )
283
+ )
284
+ _response_json = _response.json()
285
+ except JSONDecodeError:
286
+ raise ApiError(status_code=_response.status_code, body=_response.text)
287
+ raise ApiError(status_code=_response.status_code, body=_response_json)
288
+
289
+ def subsets(
290
+ self,
291
+ project_pk: int,
292
+ *,
293
+ ordering: typing.Optional[str] = None,
294
+ request_options: typing.Optional[RequestOptions] = None,
295
+ ) -> typing.List[ProjectSubsetItem]:
296
+ """
297
+
298
+ Provides list of available subsets for a project along with count of tasks in each subset
299
+
300
+
301
+ Parameters
302
+ ----------
303
+ project_pk : int
304
+
305
+ ordering : typing.Optional[str]
306
+ Which field to use when ordering the results.
307
+
308
+ request_options : typing.Optional[RequestOptions]
309
+ Request-specific configuration.
310
+
311
+ Returns
312
+ -------
313
+ typing.List[ProjectSubsetItem]
314
+
315
+
316
+ Examples
317
+ --------
318
+ from label_studio_sdk import LabelStudio
319
+
320
+ client = LabelStudio(
321
+ api_key="YOUR_API_KEY",
322
+ )
323
+ client.prompts.subsets(
324
+ project_pk=1,
325
+ )
326
+ """
327
+ _response = self._client_wrapper.httpx_client.request(
328
+ f"api/projects/{jsonable_encoder(project_pk)}/subsets",
329
+ method="GET",
330
+ params={
331
+ "ordering": ordering,
332
+ },
333
+ request_options=request_options,
334
+ )
335
+ try:
336
+ if 200 <= _response.status_code < 300:
337
+ return typing.cast(
338
+ typing.List[ProjectSubsetItem],
339
+ construct_type(
340
+ type_=typing.List[ProjectSubsetItem], # type: ignore
341
+ object_=_response.json(),
342
+ ),
343
+ )
344
+ _response_json = _response.json()
345
+ except JSONDecodeError:
346
+ raise ApiError(status_code=_response.status_code, body=_response.text)
347
+ raise ApiError(status_code=_response.status_code, body=_response_json)
348
+
186
349
  def list(
187
350
  self, *, ordering: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
188
351
  ) -> typing.List[ModelInterfaceSerializerGet]:
@@ -740,6 +903,182 @@ class AsyncPromptsClient:
740
903
  raise ApiError(status_code=_response.status_code, body=_response.text)
741
904
  raise ApiError(status_code=_response.status_code, body=_response_json)
742
905
 
906
+ async def subset_tasks(
907
+ self,
908
+ project_pk: int,
909
+ *,
910
+ include_total: typing.Optional[bool] = None,
911
+ model_run: typing.Optional[int] = None,
912
+ ordering: typing.Optional[str] = None,
913
+ page: typing.Optional[int] = None,
914
+ page_size: typing.Optional[int] = None,
915
+ parent_model: typing.Optional[int] = None,
916
+ project_subset: typing.Optional[str] = None,
917
+ request_options: typing.Optional[RequestOptions] = None,
918
+ ) -> PaginatedProjectSubsetTasksResponseList:
919
+ """
920
+
921
+ Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided.
922
+
923
+
924
+ Parameters
925
+ ----------
926
+ project_pk : int
927
+
928
+ include_total : typing.Optional[bool]
929
+ If true (default), includes task_count in response; if false, omits it.
930
+
931
+ model_run : typing.Optional[int]
932
+ A unique ID of a ModelRun
933
+
934
+ ordering : typing.Optional[str]
935
+ Which field to use when ordering the results.
936
+
937
+ page : typing.Optional[int]
938
+ A page number within the paginated result set.
939
+
940
+ page_size : typing.Optional[int]
941
+ Number of results to return per page.
942
+
943
+ parent_model : typing.Optional[int]
944
+ The ID of the parent model (ModelInterface) for this Inference Run
945
+
946
+ project_subset : typing.Optional[str]
947
+ The project subset to retrieve tasks for
948
+
949
+ request_options : typing.Optional[RequestOptions]
950
+ Request-specific configuration.
951
+
952
+ Returns
953
+ -------
954
+ PaginatedProjectSubsetTasksResponseList
955
+
956
+
957
+ Examples
958
+ --------
959
+ import asyncio
960
+
961
+ from label_studio_sdk import AsyncLabelStudio
962
+
963
+ client = AsyncLabelStudio(
964
+ api_key="YOUR_API_KEY",
965
+ )
966
+
967
+
968
+ async def main() -> None:
969
+ await client.prompts.subset_tasks(
970
+ project_pk=1,
971
+ )
972
+
973
+
974
+ asyncio.run(main())
975
+ """
976
+ _response = await self._client_wrapper.httpx_client.request(
977
+ f"api/projects/{jsonable_encoder(project_pk)}/subset-tasks",
978
+ method="GET",
979
+ params={
980
+ "include_total": include_total,
981
+ "model_run": model_run,
982
+ "ordering": ordering,
983
+ "page": page,
984
+ "page_size": page_size,
985
+ "parent_model": parent_model,
986
+ "project_subset": project_subset,
987
+ },
988
+ request_options=request_options,
989
+ )
990
+ try:
991
+ if 200 <= _response.status_code < 300:
992
+ return typing.cast(
993
+ PaginatedProjectSubsetTasksResponseList,
994
+ construct_type(
995
+ type_=PaginatedProjectSubsetTasksResponseList, # type: ignore
996
+ object_=_response.json(),
997
+ ),
998
+ )
999
+ if _response.status_code == 400:
1000
+ raise BadRequestError(
1001
+ typing.cast(
1002
+ typing.Optional[typing.Any],
1003
+ construct_type(
1004
+ type_=typing.Optional[typing.Any], # type: ignore
1005
+ object_=_response.json(),
1006
+ ),
1007
+ )
1008
+ )
1009
+ _response_json = _response.json()
1010
+ except JSONDecodeError:
1011
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1012
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1013
+
1014
+ async def subsets(
1015
+ self,
1016
+ project_pk: int,
1017
+ *,
1018
+ ordering: typing.Optional[str] = None,
1019
+ request_options: typing.Optional[RequestOptions] = None,
1020
+ ) -> typing.List[ProjectSubsetItem]:
1021
+ """
1022
+
1023
+ Provides list of available subsets for a project along with count of tasks in each subset
1024
+
1025
+
1026
+ Parameters
1027
+ ----------
1028
+ project_pk : int
1029
+
1030
+ ordering : typing.Optional[str]
1031
+ Which field to use when ordering the results.
1032
+
1033
+ request_options : typing.Optional[RequestOptions]
1034
+ Request-specific configuration.
1035
+
1036
+ Returns
1037
+ -------
1038
+ typing.List[ProjectSubsetItem]
1039
+
1040
+
1041
+ Examples
1042
+ --------
1043
+ import asyncio
1044
+
1045
+ from label_studio_sdk import AsyncLabelStudio
1046
+
1047
+ client = AsyncLabelStudio(
1048
+ api_key="YOUR_API_KEY",
1049
+ )
1050
+
1051
+
1052
+ async def main() -> None:
1053
+ await client.prompts.subsets(
1054
+ project_pk=1,
1055
+ )
1056
+
1057
+
1058
+ asyncio.run(main())
1059
+ """
1060
+ _response = await self._client_wrapper.httpx_client.request(
1061
+ f"api/projects/{jsonable_encoder(project_pk)}/subsets",
1062
+ method="GET",
1063
+ params={
1064
+ "ordering": ordering,
1065
+ },
1066
+ request_options=request_options,
1067
+ )
1068
+ try:
1069
+ if 200 <= _response.status_code < 300:
1070
+ return typing.cast(
1071
+ typing.List[ProjectSubsetItem],
1072
+ construct_type(
1073
+ type_=typing.List[ProjectSubsetItem], # type: ignore
1074
+ object_=_response.json(),
1075
+ ),
1076
+ )
1077
+ _response_json = _response.json()
1078
+ except JSONDecodeError:
1079
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1080
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1081
+
743
1082
  async def list(
744
1083
  self, *, ordering: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
745
1084
  ) -> typing.List[ModelInterfaceSerializerGet]:
@@ -11,6 +11,7 @@ from json.decoder import JSONDecodeError
11
11
  from ...core.api_error import ApiError
12
12
  import datetime as dt
13
13
  from ...types.project_subset_enum import ProjectSubsetEnum
14
+ from ...types.cancel_model_run_response import CancelModelRunResponse
14
15
  from ...core.client_wrapper import AsyncClientWrapper
15
16
 
16
17
  # this is used as the default value for optional parameters
@@ -193,6 +194,65 @@ class RunsClient:
193
194
  raise ApiError(status_code=_response.status_code, body=_response.text)
194
195
  raise ApiError(status_code=_response.status_code, body=_response_json)
195
196
 
197
+ def cancel(
198
+ self,
199
+ inference_run_id: int,
200
+ prompt_id: int,
201
+ version_id: int,
202
+ *,
203
+ request_options: typing.Optional[RequestOptions] = None,
204
+ ) -> CancelModelRunResponse:
205
+ """
206
+ Cancel the inference run for the given api
207
+
208
+ Parameters
209
+ ----------
210
+ inference_run_id : int
211
+
212
+ prompt_id : int
213
+
214
+ version_id : int
215
+
216
+ request_options : typing.Optional[RequestOptions]
217
+ Request-specific configuration.
218
+
219
+ Returns
220
+ -------
221
+ CancelModelRunResponse
222
+
223
+
224
+ Examples
225
+ --------
226
+ from label_studio_sdk import LabelStudio
227
+
228
+ client = LabelStudio(
229
+ api_key="YOUR_API_KEY",
230
+ )
231
+ client.prompts.runs.cancel(
232
+ inference_run_id=1,
233
+ prompt_id=1,
234
+ version_id=1,
235
+ )
236
+ """
237
+ _response = self._client_wrapper.httpx_client.request(
238
+ f"api/prompts/{jsonable_encoder(prompt_id)}/versions/{jsonable_encoder(version_id)}/inference-runs/{jsonable_encoder(inference_run_id)}/cancel",
239
+ method="POST",
240
+ request_options=request_options,
241
+ )
242
+ try:
243
+ if 200 <= _response.status_code < 300:
244
+ return typing.cast(
245
+ CancelModelRunResponse,
246
+ construct_type(
247
+ type_=CancelModelRunResponse, # type: ignore
248
+ object_=_response.json(),
249
+ ),
250
+ )
251
+ _response_json = _response.json()
252
+ except JSONDecodeError:
253
+ raise ApiError(status_code=_response.status_code, body=_response.text)
254
+ raise ApiError(status_code=_response.status_code, body=_response_json)
255
+
196
256
 
197
257
  class AsyncRunsClient:
198
258
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -385,3 +445,70 @@ class AsyncRunsClient:
385
445
  except JSONDecodeError:
386
446
  raise ApiError(status_code=_response.status_code, body=_response.text)
387
447
  raise ApiError(status_code=_response.status_code, body=_response_json)
448
+
449
+ async def cancel(
450
+ self,
451
+ inference_run_id: int,
452
+ prompt_id: int,
453
+ version_id: int,
454
+ *,
455
+ request_options: typing.Optional[RequestOptions] = None,
456
+ ) -> CancelModelRunResponse:
457
+ """
458
+ Cancel the inference run for the given api
459
+
460
+ Parameters
461
+ ----------
462
+ inference_run_id : int
463
+
464
+ prompt_id : int
465
+
466
+ version_id : int
467
+
468
+ request_options : typing.Optional[RequestOptions]
469
+ Request-specific configuration.
470
+
471
+ Returns
472
+ -------
473
+ CancelModelRunResponse
474
+
475
+
476
+ Examples
477
+ --------
478
+ import asyncio
479
+
480
+ from label_studio_sdk import AsyncLabelStudio
481
+
482
+ client = AsyncLabelStudio(
483
+ api_key="YOUR_API_KEY",
484
+ )
485
+
486
+
487
+ async def main() -> None:
488
+ await client.prompts.runs.cancel(
489
+ inference_run_id=1,
490
+ prompt_id=1,
491
+ version_id=1,
492
+ )
493
+
494
+
495
+ asyncio.run(main())
496
+ """
497
+ _response = await self._client_wrapper.httpx_client.request(
498
+ f"api/prompts/{jsonable_encoder(prompt_id)}/versions/{jsonable_encoder(version_id)}/inference-runs/{jsonable_encoder(inference_run_id)}/cancel",
499
+ method="POST",
500
+ request_options=request_options,
501
+ )
502
+ try:
503
+ if 200 <= _response.status_code < 300:
504
+ return typing.cast(
505
+ CancelModelRunResponse,
506
+ construct_type(
507
+ type_=CancelModelRunResponse, # type: ignore
508
+ object_=_response.json(),
509
+ ),
510
+ )
511
+ _response_json = _response.json()
512
+ except JSONDecodeError:
513
+ raise ApiError(status_code=_response.status_code, body=_response.text)
514
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -372,7 +372,8 @@ class TasksClient:
372
372
  api_key="YOUR_API_KEY",
373
373
  )
374
374
  client.tasks.create(
375
- data={"key": "value"},
375
+ data={"image": "https://example.com/image.jpg", "text": "Hello, world!"},
376
+ project=1,
376
377
  )
377
378
  """
378
379
  _response = self._client_wrapper.httpx_client.request(
@@ -1203,7 +1204,11 @@ class AsyncTasksClient:
1203
1204
 
1204
1205
  async def main() -> None:
1205
1206
  await client.tasks.create(
1206
- data={"key": "value"},
1207
+ data={
1208
+ "image": "https://example.com/image.jpg",
1209
+ "text": "Hello, world!",
1210
+ },
1211
+ project=1,
1207
1212
  )
1208
1213
 
1209
1214