label-studio-sdk 1.0.19__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of label-studio-sdk might be problematic. Click here for more details.

Files changed (410) hide show
  1. label_studio_sdk/__init__.py +325 -180
  2. label_studio_sdk/actions/__init__.py +4 -0
  3. label_studio_sdk/actions/client.py +46 -10
  4. label_studio_sdk/actions/types/__init__.py +4 -0
  5. label_studio_sdk/actions/types/actions_create_request_filters.py +2 -2
  6. label_studio_sdk/actions/types/actions_create_request_filters_items_item.py +2 -2
  7. label_studio_sdk/actions/types/actions_create_request_id.py +7 -7
  8. label_studio_sdk/actions/types/actions_create_request_selected_items_excluded.py +2 -2
  9. label_studio_sdk/actions/types/actions_create_request_selected_items_included.py +2 -2
  10. label_studio_sdk/actions/types/actions_list_response_item.py +25 -0
  11. label_studio_sdk/actions/types/actions_list_response_item_dialog.py +22 -0
  12. label_studio_sdk/annotations/__init__.py +2 -2
  13. label_studio_sdk/annotations/client.py +379 -243
  14. label_studio_sdk/annotations/types/__init__.py +4 -2
  15. label_studio_sdk/annotations/types/annotation_bulk_serializer_with_selected_items_request_last_action.py +7 -0
  16. label_studio_sdk/annotations/types/annotations_create_bulk_response_item.py +8 -5
  17. label_studio_sdk/base_client.py +28 -24
  18. label_studio_sdk/comments/client.py +378 -140
  19. label_studio_sdk/converter/README.md +207 -0
  20. label_studio_sdk/converter/imports/coco.py +132 -23
  21. label_studio_sdk/core/__init__.py +4 -0
  22. label_studio_sdk/core/unchecked_base_model.py +305 -0
  23. label_studio_sdk/environment.py +1 -1
  24. label_studio_sdk/errors/__init__.py +10 -1
  25. label_studio_sdk/errors/forbidden_error.py +9 -0
  26. label_studio_sdk/errors/method_not_allowed_error.py +9 -0
  27. label_studio_sdk/export_storage/__init__.py +1 -24
  28. label_studio_sdk/export_storage/azure/__init__.py +0 -3
  29. label_studio_sdk/export_storage/azure/client.py +231 -273
  30. label_studio_sdk/export_storage/client.py +5 -5
  31. label_studio_sdk/export_storage/gcs/__init__.py +0 -3
  32. label_studio_sdk/export_storage/gcs/client.py +231 -273
  33. label_studio_sdk/export_storage/local/__init__.py +0 -3
  34. label_studio_sdk/export_storage/local/client.py +211 -253
  35. label_studio_sdk/export_storage/redis/__init__.py +0 -3
  36. label_studio_sdk/export_storage/redis/client.py +239 -281
  37. label_studio_sdk/export_storage/s3/__init__.py +0 -3
  38. label_studio_sdk/export_storage/s3/client.py +254 -296
  39. label_studio_sdk/export_storage/s3s/client.py +694 -210
  40. label_studio_sdk/export_storage/types/export_storage_list_types_response_item.py +2 -2
  41. label_studio_sdk/files/client.py +52 -71
  42. label_studio_sdk/import_storage/__init__.py +1 -24
  43. label_studio_sdk/import_storage/azure/__init__.py +0 -3
  44. label_studio_sdk/import_storage/azure/client.py +249 -299
  45. label_studio_sdk/import_storage/client.py +5 -5
  46. label_studio_sdk/import_storage/gcs/__init__.py +0 -3
  47. label_studio_sdk/import_storage/gcs/client.py +249 -299
  48. label_studio_sdk/import_storage/local/__init__.py +0 -3
  49. label_studio_sdk/import_storage/local/client.py +211 -257
  50. label_studio_sdk/import_storage/redis/__init__.py +0 -3
  51. label_studio_sdk/import_storage/redis/client.py +239 -285
  52. label_studio_sdk/import_storage/s3/__init__.py +0 -3
  53. label_studio_sdk/import_storage/s3/client.py +274 -324
  54. label_studio_sdk/import_storage/s3s/client.py +728 -434
  55. label_studio_sdk/import_storage/types/import_storage_list_types_response_item.py +2 -2
  56. label_studio_sdk/jwt_settings/client.py +56 -58
  57. label_studio_sdk/label_interface/control_tags.py +48 -8
  58. label_studio_sdk/label_interface/interface.py +261 -56
  59. label_studio_sdk/ml/__init__.py +2 -16
  60. label_studio_sdk/ml/client.py +196 -179
  61. label_studio_sdk/ml/types/__init__.py +2 -12
  62. label_studio_sdk/ml/types/ml_list_model_versions_response.py +20 -0
  63. label_studio_sdk/model_providers/__init__.py +3 -0
  64. label_studio_sdk/model_providers/client.py +280 -228
  65. label_studio_sdk/model_providers/types/__init__.py +5 -0
  66. label_studio_sdk/{prompts/types/prompts_batch_predictions_response.py → model_providers/types/model_providers_list_model_provider_choices_response.py} +3 -3
  67. label_studio_sdk/organizations/__init__.py +5 -0
  68. label_studio_sdk/organizations/client.py +331 -0
  69. label_studio_sdk/organizations/members/__init__.py +2 -0
  70. label_studio_sdk/organizations/members/client.py +290 -0
  71. label_studio_sdk/predictions/client.py +29 -77
  72. label_studio_sdk/projects/__init__.py +18 -9
  73. label_studio_sdk/projects/client.py +905 -414
  74. label_studio_sdk/projects/exports/__init__.py +2 -2
  75. label_studio_sdk/projects/exports/client.py +336 -396
  76. label_studio_sdk/projects/exports/client_ext.py +30 -30
  77. label_studio_sdk/projects/exports/types/__init__.py +1 -2
  78. label_studio_sdk/projects/exports/types/exports_convert_response.py +5 -9
  79. label_studio_sdk/projects/pauses/client.py +114 -105
  80. label_studio_sdk/projects/stats/__init__.py +5 -0
  81. label_studio_sdk/projects/stats/client.py +175 -0
  82. label_studio_sdk/projects/stats/types/__init__.py +8 -0
  83. label_studio_sdk/projects/stats/types/stats_iaa_response.py +44 -0
  84. label_studio_sdk/projects/stats/types/stats_iaa_response_common_tasks.py +7 -0
  85. label_studio_sdk/projects/stats/types/stats_iaa_response_iaa.py +5 -0
  86. label_studio_sdk/{types/base_task_file_upload.py → projects/stats/types/stats_iaa_response_std.py} +1 -1
  87. label_studio_sdk/projects/types/__init__.py +10 -6
  88. label_studio_sdk/projects/types/lse_project_create_request_sampling.py +7 -0
  89. label_studio_sdk/projects/types/lse_project_create_request_skip_queue.py +7 -0
  90. label_studio_sdk/projects/types/patched_lse_project_update_request_sampling.py +7 -0
  91. label_studio_sdk/projects/types/patched_lse_project_update_request_skip_queue.py +7 -0
  92. label_studio_sdk/{prompts/types/prompts_batch_failed_predictions_response.py → projects/types/projects_duplicate_response.py} +8 -5
  93. label_studio_sdk/projects/types/projects_import_tasks_response.py +2 -2
  94. label_studio_sdk/projects/types/projects_list_request_filter.py +1 -1
  95. label_studio_sdk/prompts/__init__.py +4 -10
  96. label_studio_sdk/prompts/client.py +511 -442
  97. label_studio_sdk/prompts/indicators/__init__.py +3 -0
  98. label_studio_sdk/prompts/indicators/client.py +47 -49
  99. label_studio_sdk/prompts/indicators/types/__init__.py +5 -0
  100. label_studio_sdk/{types/key_indicator_value.py → prompts/indicators/types/indicators_list_response_item.py} +3 -3
  101. label_studio_sdk/prompts/runs/client.py +113 -135
  102. label_studio_sdk/prompts/types/__init__.py +2 -12
  103. label_studio_sdk/prompts/types/prompts_compatible_projects_request_project_type.py +7 -0
  104. label_studio_sdk/prompts/versions/client.py +372 -312
  105. label_studio_sdk/tasks/__init__.py +2 -2
  106. label_studio_sdk/tasks/client.py +514 -213
  107. label_studio_sdk/tasks/types/__init__.py +1 -2
  108. label_studio_sdk/tokens/client.py +160 -152
  109. label_studio_sdk/tokens/client_ext.py +3 -3
  110. label_studio_sdk/types/__init__.py +258 -142
  111. label_studio_sdk/{webhooks/types/webhooks_update_request_actions_item.py → types/actions_enum.py} +4 -1
  112. label_studio_sdk/types/all_roles_project_list.py +197 -0
  113. label_studio_sdk/types/all_roles_project_list_sampling.py +7 -0
  114. label_studio_sdk/types/all_roles_project_list_skip_queue.py +7 -0
  115. label_studio_sdk/types/annotated_enum.py +5 -0
  116. label_studio_sdk/types/annotation.py +24 -10
  117. label_studio_sdk/types/annotation_last_action.py +3 -15
  118. label_studio_sdk/types/{annotations_dm_field.py → annotation_request.py} +21 -30
  119. label_studio_sdk/types/annotation_request_last_action.py +7 -0
  120. label_studio_sdk/types/assignment_settings.py +31 -0
  121. label_studio_sdk/types/assignment_settings_label_stream_task_distribution.py +7 -0
  122. label_studio_sdk/types/assignment_settings_request.py +32 -0
  123. label_studio_sdk/types/assignment_settings_request_label_stream_task_distribution.py +7 -0
  124. label_studio_sdk/types/{key_indicators_item_additional_kpis_item.py → associated_project.py} +9 -6
  125. label_studio_sdk/types/auth_method_enum.py +5 -0
  126. label_studio_sdk/types/azure_blob_export_storage.py +8 -12
  127. label_studio_sdk/types/azure_blob_import_storage.py +8 -12
  128. label_studio_sdk/types/{prompt_associated_projects_item_id.py → batch_failed_predictions.py} +4 -4
  129. label_studio_sdk/types/{access_token_response.py → batch_predictions.py} +6 -8
  130. label_studio_sdk/types/blank_enum.py +5 -0
  131. label_studio_sdk/types/{key_indicators_item_extra_kpis_item.py → blueprint_list.py} +12 -6
  132. label_studio_sdk/types/budget_reset_period_enum.py +5 -0
  133. label_studio_sdk/types/child_filter.py +44 -0
  134. label_studio_sdk/types/comment.py +39 -14
  135. label_studio_sdk/types/comment_request.py +32 -0
  136. label_studio_sdk/types/comment_serializer_with_expanded_user.py +53 -0
  137. label_studio_sdk/types/converted_format.py +5 -5
  138. label_studio_sdk/types/{api_token_response.py → converted_format_request.py} +8 -15
  139. label_studio_sdk/types/custom_scripts_editable_by_enum.py +5 -0
  140. label_studio_sdk/types/default_role_enum.py +5 -0
  141. label_studio_sdk/types/edition_enum.py +5 -0
  142. label_studio_sdk/types/export.py +7 -7
  143. label_studio_sdk/types/file_upload.py +5 -5
  144. label_studio_sdk/types/filter.py +9 -6
  145. label_studio_sdk/types/filter_group.py +3 -3
  146. label_studio_sdk/types/finished_enum.py +5 -0
  147. label_studio_sdk/types/gcs_export_storage.py +8 -12
  148. label_studio_sdk/types/gcs_import_storage.py +8 -12
  149. label_studio_sdk/types/{rotate_token_response.py → hotkeys.py} +5 -8
  150. label_studio_sdk/types/{base_task.py → import_api_request.py} +11 -34
  151. label_studio_sdk/types/inference_run_cost_estimate.py +2 -2
  152. label_studio_sdk/types/label_stream_task_distribution_enum.py +5 -0
  153. label_studio_sdk/types/{annotations_dm_field_last_action.py → last_action_enum.py} +1 -1
  154. label_studio_sdk/types/local_files_export_storage.py +8 -12
  155. label_studio_sdk/types/local_files_import_storage.py +8 -12
  156. label_studio_sdk/types/{annotation_filter_options.py → lse_annotation_filter_options.py} +12 -2
  157. label_studio_sdk/types/lse_annotation_filter_options_request.py +42 -0
  158. label_studio_sdk/types/lse_annotation_filter_options_request_reviewed.py +7 -0
  159. label_studio_sdk/types/lse_annotation_filter_options_reviewed.py +7 -0
  160. label_studio_sdk/types/{export_snapshot.py → lse_export_create.py} +11 -11
  161. label_studio_sdk/types/lse_fields.py +49 -0
  162. label_studio_sdk/types/lse_fields_onboarding_state.py +8 -0
  163. label_studio_sdk/types/lse_fields_trial_role.py +8 -0
  164. label_studio_sdk/types/lse_key_indicator_value.py +35 -0
  165. label_studio_sdk/types/lse_organization.py +57 -0
  166. label_studio_sdk/types/lse_organization_custom_scripts_editable_by.py +7 -0
  167. label_studio_sdk/types/lse_project_create.py +196 -0
  168. label_studio_sdk/types/lse_project_create_sampling.py +7 -0
  169. label_studio_sdk/types/lse_project_create_skip_queue.py +7 -0
  170. label_studio_sdk/types/lse_project_update.py +215 -0
  171. label_studio_sdk/types/lse_project_update_sampling.py +7 -0
  172. label_studio_sdk/types/lse_project_update_skip_queue.py +7 -0
  173. label_studio_sdk/types/lse_s3export_storage.py +134 -0
  174. label_studio_sdk/{import_storage/s3/types/s3create_response.py → types/lse_s3export_storage_request.py} +47 -21
  175. label_studio_sdk/{import_storage/s3/types/s3update_response.py → types/lse_s3import_storage.py} +60 -21
  176. label_studio_sdk/types/{s3s_import_storage.py → lse_s3import_storage_request.py} +32 -21
  177. label_studio_sdk/types/lse_task.py +117 -0
  178. label_studio_sdk/types/{data_manager_task_serializer_drafts_item.py → lse_task_drafts_item.py} +2 -2
  179. label_studio_sdk/types/lse_task_filter_options.py +63 -0
  180. label_studio_sdk/types/lse_task_filter_options_annotated.py +7 -0
  181. label_studio_sdk/types/lse_task_filter_options_finished.py +7 -0
  182. label_studio_sdk/types/lse_task_filter_options_request.py +63 -0
  183. label_studio_sdk/types/lse_task_filter_options_request_annotated.py +7 -0
  184. label_studio_sdk/types/lse_task_filter_options_request_finished.py +7 -0
  185. label_studio_sdk/types/lse_task_filter_options_request_reviewed.py +7 -0
  186. label_studio_sdk/types/lse_task_filter_options_request_skipped.py +7 -0
  187. label_studio_sdk/types/lse_task_filter_options_reviewed.py +7 -0
  188. label_studio_sdk/types/lse_task_filter_options_skipped.py +7 -0
  189. label_studio_sdk/types/{data_manager_task_serializer_predictions_item.py → lse_task_predictions_item.py} +4 -5
  190. label_studio_sdk/types/lse_task_serializer_for_annotators.py +54 -0
  191. label_studio_sdk/types/lse_task_serializer_for_annotators_drafts_item.py +22 -0
  192. label_studio_sdk/types/lse_task_serializer_for_annotators_predictions_item.py +28 -0
  193. label_studio_sdk/types/lse_task_serializer_for_reviewers.py +117 -0
  194. label_studio_sdk/types/lse_task_serializer_for_reviewers_drafts_item.py +22 -0
  195. label_studio_sdk/types/lse_task_serializer_for_reviewers_predictions_item.py +28 -0
  196. label_studio_sdk/types/lse_user.py +49 -0
  197. label_studio_sdk/types/{base_user.py → lse_user_api.py} +17 -6
  198. label_studio_sdk/types/lseapi_token_create.py +21 -0
  199. label_studio_sdk/types/lseapi_token_list.py +21 -0
  200. label_studio_sdk/types/lsejwt_settings.py +32 -0
  201. label_studio_sdk/types/maybe_expanded_comment.py +7 -0
  202. label_studio_sdk/types/ml_backend.py +16 -17
  203. label_studio_sdk/types/mode_enum.py +5 -0
  204. label_studio_sdk/types/model_interface.py +44 -0
  205. label_studio_sdk/types/model_interface_request.py +40 -0
  206. label_studio_sdk/types/model_interface_serializer_get.py +45 -0
  207. label_studio_sdk/types/model_provider_connection.py +48 -17
  208. label_studio_sdk/types/model_provider_connection_budget_reset_period.py +3 -1
  209. label_studio_sdk/types/model_provider_connection_request.py +71 -0
  210. label_studio_sdk/types/model_run.py +40 -0
  211. label_studio_sdk/types/{inference_run_status.py → model_run_status_enum.py} +1 -1
  212. label_studio_sdk/types/null_enum.py +3 -0
  213. label_studio_sdk/types/onboarding_state_enum.py +7 -0
  214. label_studio_sdk/types/organization_billing.py +20 -0
  215. label_studio_sdk/types/organization_id.py +28 -0
  216. label_studio_sdk/types/organization_invite.py +20 -0
  217. label_studio_sdk/types/organization_member.py +37 -0
  218. label_studio_sdk/types/organization_membership.py +24 -0
  219. label_studio_sdk/{projects/types/projects_list_response.py → types/paginated_all_roles_project_list_list.py} +5 -5
  220. label_studio_sdk/types/{jwt_settings_response.py → paginated_role_based_task_list.py} +11 -9
  221. label_studio_sdk/types/pause.py +55 -14
  222. label_studio_sdk/types/pause_request.py +41 -0
  223. label_studio_sdk/types/prediction.py +7 -11
  224. label_studio_sdk/types/prediction_request.py +56 -0
  225. label_studio_sdk/types/project.py +32 -39
  226. label_studio_sdk/types/project_import.py +12 -13
  227. label_studio_sdk/types/project_label_config.py +2 -2
  228. label_studio_sdk/types/project_label_config_request.py +22 -0
  229. label_studio_sdk/types/project_sampling.py +3 -3
  230. label_studio_sdk/types/project_skip_queue.py +3 -1
  231. label_studio_sdk/types/project_subset_enum.py +5 -0
  232. label_studio_sdk/types/{prompt_version_provider.py → provider_enum.py} +1 -1
  233. label_studio_sdk/types/reason_enum.py +7 -0
  234. label_studio_sdk/types/redis_export_storage.py +8 -12
  235. label_studio_sdk/types/redis_import_storage.py +8 -12
  236. label_studio_sdk/types/refined_prompt_response.py +5 -6
  237. label_studio_sdk/types/requeue_rejected_tasks_mode_enum.py +5 -0
  238. label_studio_sdk/types/review_criteria_enum.py +5 -0
  239. label_studio_sdk/types/review_settings.py +80 -0
  240. label_studio_sdk/types/review_settings_request.py +80 -0
  241. label_studio_sdk/types/review_settings_request_requeue_rejected_tasks_mode.py +8 -0
  242. label_studio_sdk/types/review_settings_request_review_criteria.py +7 -0
  243. label_studio_sdk/types/review_settings_requeue_rejected_tasks_mode.py +8 -0
  244. label_studio_sdk/types/review_settings_review_criteria.py +7 -0
  245. label_studio_sdk/types/reviewed_enum.py +5 -0
  246. label_studio_sdk/types/role_based_task.py +8 -0
  247. label_studio_sdk/types/s3export_storage.py +8 -12
  248. label_studio_sdk/types/s3import_storage.py +8 -12
  249. label_studio_sdk/types/sampling_enum.py +7 -0
  250. label_studio_sdk/types/scope_enum.py +5 -0
  251. label_studio_sdk/types/selected_items_request.py +23 -0
  252. label_studio_sdk/types/serialization_option.py +2 -6
  253. label_studio_sdk/types/serialization_option_request.py +22 -0
  254. label_studio_sdk/types/serialization_options.py +17 -5
  255. label_studio_sdk/types/serialization_options_request.py +47 -0
  256. label_studio_sdk/types/skill_name_enum.py +5 -0
  257. label_studio_sdk/types/skip_queue_enum.py +5 -0
  258. label_studio_sdk/types/skipped_enum.py +5 -0
  259. label_studio_sdk/types/state_enum.py +5 -0
  260. label_studio_sdk/types/status7bf_enum.py +5 -0
  261. label_studio_sdk/types/{azure_blob_import_storage_status.py → status_c5a_enum.py} +2 -2
  262. label_studio_sdk/types/third_party_model_version.py +65 -0
  263. label_studio_sdk/types/third_party_model_version_request.py +54 -0
  264. label_studio_sdk/types/token_refresh_response.py +19 -0
  265. label_studio_sdk/types/token_rotate_response.py +19 -0
  266. label_studio_sdk/types/trial_role_enum.py +16 -0
  267. label_studio_sdk/types/user_simple.py +8 -5
  268. label_studio_sdk/types/user_simple_request.py +28 -0
  269. label_studio_sdk/types/version_response.py +49 -0
  270. label_studio_sdk/types/view.py +8 -15
  271. label_studio_sdk/types/webhook.py +9 -13
  272. label_studio_sdk/types/webhook_serializer_for_update.py +15 -13
  273. label_studio_sdk/types/workspace.py +14 -34
  274. label_studio_sdk/types/workspace_member_create.py +27 -0
  275. label_studio_sdk/types/workspace_member_list.py +24 -0
  276. label_studio_sdk/users/client.py +604 -87
  277. label_studio_sdk/users/types/users_get_token_response.py +4 -11
  278. label_studio_sdk/users/types/users_reset_token_response.py +4 -11
  279. label_studio_sdk/versions/__init__.py +0 -3
  280. label_studio_sdk/versions/client.py +14 -14
  281. label_studio_sdk/views/client.py +227 -141
  282. label_studio_sdk/views/types/views_create_request_data.py +2 -2
  283. label_studio_sdk/views/types/views_create_request_data_filters.py +2 -2
  284. label_studio_sdk/views/types/views_create_request_data_filters_items_item.py +2 -2
  285. label_studio_sdk/views/types/views_update_request_data.py +2 -2
  286. label_studio_sdk/views/types/views_update_request_data_filters.py +2 -2
  287. label_studio_sdk/views/types/views_update_request_data_filters_items_item.py +2 -2
  288. label_studio_sdk/webhooks/__init__.py +36 -2
  289. label_studio_sdk/webhooks/client.py +173 -367
  290. label_studio_sdk/webhooks/types/__init__.py +34 -2
  291. label_studio_sdk/webhooks/types/webhooks_info_response.py +80 -0
  292. label_studio_sdk/webhooks/types/webhooks_info_response_annotation_created.py +24 -0
  293. label_studio_sdk/webhooks/types/webhooks_info_response_annotation_updated.py +24 -0
  294. label_studio_sdk/webhooks/types/webhooks_info_response_annotations_created.py +24 -0
  295. label_studio_sdk/webhooks/types/webhooks_info_response_annotations_deleted.py +24 -0
  296. label_studio_sdk/webhooks/types/webhooks_info_response_label_link_created.py +24 -0
  297. label_studio_sdk/webhooks/types/webhooks_info_response_label_link_deleted.py +24 -0
  298. label_studio_sdk/webhooks/types/webhooks_info_response_label_link_updated.py +24 -0
  299. label_studio_sdk/webhooks/types/webhooks_info_response_project_created.py +24 -0
  300. label_studio_sdk/webhooks/types/webhooks_info_response_project_deleted.py +24 -0
  301. label_studio_sdk/webhooks/types/webhooks_info_response_project_updated.py +24 -0
  302. label_studio_sdk/webhooks/types/webhooks_info_response_review_created.py +24 -0
  303. label_studio_sdk/webhooks/types/webhooks_info_response_review_updated.py +24 -0
  304. label_studio_sdk/webhooks/types/webhooks_info_response_reviews_deleted.py +24 -0
  305. label_studio_sdk/webhooks/types/webhooks_info_response_tasks_created.py +24 -0
  306. label_studio_sdk/webhooks/types/webhooks_info_response_tasks_deleted.py +24 -0
  307. label_studio_sdk/workspaces/__init__.py +1 -2
  308. label_studio_sdk/workspaces/client.py +97 -117
  309. label_studio_sdk/workspaces/members/__init__.py +0 -3
  310. label_studio_sdk/workspaces/members/client.py +65 -81
  311. {label_studio_sdk-1.0.19.dist-info → label_studio_sdk-2.0.0.dist-info}/METADATA +1 -1
  312. label_studio_sdk-2.0.0.dist-info/RECORD +424 -0
  313. {label_studio_sdk-1.0.19.dist-info → label_studio_sdk-2.0.0.dist-info}/WHEEL +1 -1
  314. label_studio_sdk/annotations/types/annotations_create_bulk_request_selected_items.py +0 -34
  315. label_studio_sdk/export_storage/azure/types/__init__.py +0 -6
  316. label_studio_sdk/export_storage/azure/types/azure_create_response.py +0 -57
  317. label_studio_sdk/export_storage/azure/types/azure_update_response.py +0 -57
  318. label_studio_sdk/export_storage/gcs/types/__init__.py +0 -6
  319. label_studio_sdk/export_storage/gcs/types/gcs_create_response.py +0 -57
  320. label_studio_sdk/export_storage/gcs/types/gcs_update_response.py +0 -57
  321. label_studio_sdk/export_storage/local/types/__init__.py +0 -6
  322. label_studio_sdk/export_storage/local/types/local_create_response.py +0 -47
  323. label_studio_sdk/export_storage/local/types/local_update_response.py +0 -47
  324. label_studio_sdk/export_storage/redis/types/__init__.py +0 -6
  325. label_studio_sdk/export_storage/redis/types/redis_create_response.py +0 -62
  326. label_studio_sdk/export_storage/redis/types/redis_update_response.py +0 -62
  327. label_studio_sdk/export_storage/s3/types/__init__.py +0 -6
  328. label_studio_sdk/export_storage/s3/types/s3create_response.py +0 -81
  329. label_studio_sdk/export_storage/s3/types/s3update_response.py +0 -81
  330. label_studio_sdk/import_storage/azure/types/__init__.py +0 -6
  331. label_studio_sdk/import_storage/azure/types/azure_create_response.py +0 -72
  332. label_studio_sdk/import_storage/azure/types/azure_update_response.py +0 -72
  333. label_studio_sdk/import_storage/gcs/types/__init__.py +0 -6
  334. label_studio_sdk/import_storage/gcs/types/gcs_create_response.py +0 -72
  335. label_studio_sdk/import_storage/gcs/types/gcs_update_response.py +0 -72
  336. label_studio_sdk/import_storage/local/types/__init__.py +0 -6
  337. label_studio_sdk/import_storage/local/types/local_create_response.py +0 -47
  338. label_studio_sdk/import_storage/local/types/local_update_response.py +0 -47
  339. label_studio_sdk/import_storage/redis/types/__init__.py +0 -6
  340. label_studio_sdk/import_storage/redis/types/redis_create_response.py +0 -62
  341. label_studio_sdk/import_storage/redis/types/redis_update_response.py +0 -62
  342. label_studio_sdk/import_storage/s3/types/__init__.py +0 -6
  343. label_studio_sdk/ml/types/ml_create_response.py +0 -68
  344. label_studio_sdk/ml/types/ml_create_response_auth_method.py +0 -5
  345. label_studio_sdk/ml/types/ml_update_response.py +0 -68
  346. label_studio_sdk/ml/types/ml_update_response_auth_method.py +0 -5
  347. label_studio_sdk/projects/exports/types/exports_list_formats_response_item.py +0 -44
  348. label_studio_sdk/projects/types/projects_create_response.py +0 -91
  349. label_studio_sdk/projects/types/projects_update_response.py +0 -96
  350. label_studio_sdk/prompts/types/prompts_batch_failed_predictions_request_failed_predictions_item.py +0 -32
  351. label_studio_sdk/prompts/types/prompts_batch_predictions_request_results_item.py +0 -59
  352. label_studio_sdk/tasks/types/tasks_list_response.py +0 -38
  353. label_studio_sdk/types/annotation_completed_by.py +0 -6
  354. label_studio_sdk/types/azure_blob_export_storage_status.py +0 -7
  355. label_studio_sdk/types/base_task_updated_by.py +0 -7
  356. label_studio_sdk/types/comment_created_by.py +0 -5
  357. label_studio_sdk/types/converted_format_status.py +0 -5
  358. label_studio_sdk/types/data_manager_task_serializer.py +0 -118
  359. label_studio_sdk/types/data_manager_task_serializer_annotators_item.py +0 -5
  360. label_studio_sdk/types/data_manager_task_serializer_comment_authors_item.py +0 -5
  361. label_studio_sdk/types/data_manager_task_serializer_predictions_item_model_run.py +0 -5
  362. label_studio_sdk/types/export_format.py +0 -25
  363. label_studio_sdk/types/export_snapshot_status.py +0 -5
  364. label_studio_sdk/types/export_status.py +0 -5
  365. label_studio_sdk/types/gcs_export_storage_status.py +0 -7
  366. label_studio_sdk/types/gcs_import_storage_status.py +0 -7
  367. label_studio_sdk/types/inference_run.py +0 -34
  368. label_studio_sdk/types/inference_run_created_by.py +0 -5
  369. label_studio_sdk/types/inference_run_organization.py +0 -5
  370. label_studio_sdk/types/inference_run_project_subset.py +0 -5
  371. label_studio_sdk/types/key_indicators.py +0 -6
  372. label_studio_sdk/types/key_indicators_item.py +0 -41
  373. label_studio_sdk/types/local_files_export_storage_status.py +0 -7
  374. label_studio_sdk/types/local_files_import_storage_status.py +0 -7
  375. label_studio_sdk/types/ml_backend_auth_method.py +0 -5
  376. label_studio_sdk/types/ml_backend_state.py +0 -5
  377. label_studio_sdk/types/model_provider_connection_created_by.py +0 -5
  378. label_studio_sdk/types/model_provider_connection_organization.py +0 -5
  379. label_studio_sdk/types/model_provider_connection_provider.py +0 -7
  380. label_studio_sdk/types/model_provider_connection_scope.py +0 -5
  381. label_studio_sdk/types/pause_paused_by.py +0 -5
  382. label_studio_sdk/types/project_import_status.py +0 -5
  383. label_studio_sdk/types/prompt.py +0 -71
  384. label_studio_sdk/types/prompt_associated_projects_item.py +0 -6
  385. label_studio_sdk/types/prompt_created_by.py +0 -5
  386. label_studio_sdk/types/prompt_organization.py +0 -5
  387. label_studio_sdk/types/prompt_version.py +0 -32
  388. label_studio_sdk/types/prompt_version_created_by.py +0 -5
  389. label_studio_sdk/types/prompt_version_organization.py +0 -5
  390. label_studio_sdk/types/redis_export_storage_status.py +0 -7
  391. label_studio_sdk/types/redis_import_storage_status.py +0 -7
  392. label_studio_sdk/types/refined_prompt_response_refinement_status.py +0 -7
  393. label_studio_sdk/types/s3export_storage_status.py +0 -7
  394. label_studio_sdk/types/s3import_storage_status.py +0 -7
  395. label_studio_sdk/types/s3s_export_storage.py +0 -73
  396. label_studio_sdk/types/s3s_import_storage_status.py +0 -7
  397. label_studio_sdk/types/task.py +0 -156
  398. label_studio_sdk/types/task_annotators_item.py +0 -5
  399. label_studio_sdk/types/task_comment_authors_item.py +0 -5
  400. label_studio_sdk/types/task_filter_options.py +0 -39
  401. label_studio_sdk/types/webhook_actions_item.py +0 -21
  402. label_studio_sdk/types/webhook_serializer_for_update_actions_item.py +0 -21
  403. label_studio_sdk/versions/types/__init__.py +0 -6
  404. label_studio_sdk/versions/types/versions_get_response.py +0 -73
  405. label_studio_sdk/versions/types/versions_get_response_edition.py +0 -5
  406. label_studio_sdk/workspaces/members/types/__init__.py +0 -6
  407. label_studio_sdk/workspaces/members/types/members_create_response.py +0 -22
  408. label_studio_sdk/workspaces/members/types/members_list_response_item.py +0 -22
  409. label_studio_sdk-1.0.19.dist-info/RECORD +0 -374
  410. {label_studio_sdk-1.0.19.dist-info → label_studio_sdk-2.0.0.dist-info}/LICENSE +0 -0
@@ -2,30 +2,27 @@
2
2
 
3
3
  import typing
4
4
  from ..core.client_wrapper import SyncClientWrapper
5
+ from .indicators.client import IndicatorsClient
5
6
  from .versions.client import VersionsClient
6
7
  from .runs.client import RunsClient
7
- from .indicators.client import IndicatorsClient
8
8
  from ..core.request_options import RequestOptions
9
- from ..types.prompt import Prompt
10
- from ..core.pydantic_utilities import parse_obj_as
9
+ from ..types.batch_failed_predictions import BatchFailedPredictions
10
+ from ..core.unchecked_base_model import construct_type
11
11
  from json.decoder import JSONDecodeError
12
12
  from ..core.api_error import ApiError
13
- from ..types.prompt_created_by import PromptCreatedBy
14
- import datetime as dt
15
- from ..types.prompt_organization import PromptOrganization
16
- from ..types.prompt_associated_projects_item import PromptAssociatedProjectsItem
13
+ from ..types.batch_predictions import BatchPredictions
14
+ from ..types.model_interface_serializer_get import ModelInterfaceSerializerGet
15
+ from ..types.user_simple_request import UserSimpleRequest
16
+ from ..types.skill_name_enum import SkillNameEnum
17
+ from ..types.model_interface import ModelInterface
17
18
  from ..core.serialization import convert_and_respect_annotation_metadata
18
19
  from ..core.jsonable_encoder import jsonable_encoder
19
- from .types.prompts_batch_predictions_request_results_item import PromptsBatchPredictionsRequestResultsItem
20
- from .types.prompts_batch_predictions_response import PromptsBatchPredictionsResponse
21
- from .types.prompts_batch_failed_predictions_request_failed_predictions_item import (
22
- PromptsBatchFailedPredictionsRequestFailedPredictionsItem,
23
- )
24
- from .types.prompts_batch_failed_predictions_response import PromptsBatchFailedPredictionsResponse
20
+ from .types.prompts_compatible_projects_request_project_type import PromptsCompatibleProjectsRequestProjectType
21
+ from ..types.paginated_all_roles_project_list_list import PaginatedAllRolesProjectListList
25
22
  from ..core.client_wrapper import AsyncClientWrapper
23
+ from .indicators.client import AsyncIndicatorsClient
26
24
  from .versions.client import AsyncVersionsClient
27
25
  from .runs.client import AsyncRunsClient
28
- from .indicators.client import AsyncIndicatorsClient
29
26
 
30
27
  # this is used as the default value for optional parameters
31
28
  OMIT = typing.cast(typing.Any, ...)
@@ -34,22 +31,175 @@ OMIT = typing.cast(typing.Any, ...)
34
31
  class PromptsClient:
35
32
  def __init__(self, *, client_wrapper: SyncClientWrapper):
36
33
  self._client_wrapper = client_wrapper
34
+ self.indicators = IndicatorsClient(client_wrapper=self._client_wrapper)
37
35
  self.versions = VersionsClient(client_wrapper=self._client_wrapper)
38
36
  self.runs = RunsClient(client_wrapper=self._client_wrapper)
39
- self.indicators = IndicatorsClient(client_wrapper=self._client_wrapper)
40
37
 
41
- def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[Prompt]:
38
+ def batch_failed_predictions(
39
+ self,
40
+ *,
41
+ failed_predictions: typing.Sequence[typing.Optional[typing.Any]],
42
+ modelrun_id: int,
43
+ num_failed_predictions: typing.Optional[int] = None,
44
+ job_id: typing.Optional[str] = OMIT,
45
+ request_options: typing.Optional[RequestOptions] = None,
46
+ ) -> BatchFailedPredictions:
47
+ """
48
+ Create a new batch of failed predictions.
49
+
50
+ Parameters
51
+ ----------
52
+ failed_predictions : typing.Sequence[typing.Optional[typing.Any]]
53
+
54
+ modelrun_id : int
55
+
56
+ num_failed_predictions : typing.Optional[int]
57
+ Number of failed predictions being sent (for telemetry only, has no effect)
58
+
59
+ job_id : typing.Optional[str]
60
+
61
+ request_options : typing.Optional[RequestOptions]
62
+ Request-specific configuration.
63
+
64
+ Returns
65
+ -------
66
+ BatchFailedPredictions
67
+
68
+
69
+ Examples
70
+ --------
71
+ from label_studio_sdk import LabelStudio
72
+
73
+ client = LabelStudio(
74
+ api_key="YOUR_API_KEY",
75
+ )
76
+ client.prompts.batch_failed_predictions(
77
+ failed_predictions=[],
78
+ modelrun_id=1,
79
+ )
80
+ """
81
+ _response = self._client_wrapper.httpx_client.request(
82
+ "api/model-run/batch-failed-predictions",
83
+ method="POST",
84
+ params={
85
+ "num_failed_predictions": num_failed_predictions,
86
+ },
87
+ json={
88
+ "job_id": job_id,
89
+ "failed_predictions": failed_predictions,
90
+ "modelrun_id": modelrun_id,
91
+ },
92
+ headers={
93
+ "content-type": "application/json",
94
+ },
95
+ request_options=request_options,
96
+ omit=OMIT,
97
+ )
98
+ try:
99
+ if 200 <= _response.status_code < 300:
100
+ return typing.cast(
101
+ BatchFailedPredictions,
102
+ construct_type(
103
+ type_=BatchFailedPredictions, # type: ignore
104
+ object_=_response.json(),
105
+ ),
106
+ )
107
+ _response_json = _response.json()
108
+ except JSONDecodeError:
109
+ raise ApiError(status_code=_response.status_code, body=_response.text)
110
+ raise ApiError(status_code=_response.status_code, body=_response_json)
111
+
112
+ def batch_predictions(
113
+ self,
114
+ *,
115
+ results: typing.Sequence[typing.Optional[typing.Any]],
116
+ modelrun_id: int,
117
+ num_predictions: typing.Optional[int] = None,
118
+ job_id: typing.Optional[str] = OMIT,
119
+ request_options: typing.Optional[RequestOptions] = None,
120
+ ) -> BatchPredictions:
121
+ """
122
+ Create a new batch prediction.
123
+
124
+ Parameters
125
+ ----------
126
+ results : typing.Sequence[typing.Optional[typing.Any]]
127
+
128
+ modelrun_id : int
129
+
130
+ num_predictions : typing.Optional[int]
131
+ Number of predictions being sent (for telemetry only, has no effect)
132
+
133
+ job_id : typing.Optional[str]
134
+
135
+ request_options : typing.Optional[RequestOptions]
136
+ Request-specific configuration.
137
+
138
+ Returns
139
+ -------
140
+ BatchPredictions
141
+
142
+
143
+ Examples
144
+ --------
145
+ from label_studio_sdk import LabelStudio
146
+
147
+ client = LabelStudio(
148
+ api_key="YOUR_API_KEY",
149
+ )
150
+ client.prompts.batch_predictions(
151
+ results=[],
152
+ modelrun_id=1,
153
+ )
154
+ """
155
+ _response = self._client_wrapper.httpx_client.request(
156
+ "api/model-run/batch-predictions",
157
+ method="POST",
158
+ params={
159
+ "num_predictions": num_predictions,
160
+ },
161
+ json={
162
+ "job_id": job_id,
163
+ "results": results,
164
+ "modelrun_id": modelrun_id,
165
+ },
166
+ headers={
167
+ "content-type": "application/json",
168
+ },
169
+ request_options=request_options,
170
+ omit=OMIT,
171
+ )
172
+ try:
173
+ if 200 <= _response.status_code < 300:
174
+ return typing.cast(
175
+ BatchPredictions,
176
+ construct_type(
177
+ type_=BatchPredictions, # type: ignore
178
+ object_=_response.json(),
179
+ ),
180
+ )
181
+ _response_json = _response.json()
182
+ except JSONDecodeError:
183
+ raise ApiError(status_code=_response.status_code, body=_response.text)
184
+ raise ApiError(status_code=_response.status_code, body=_response_json)
185
+
186
+ def list(
187
+ self, *, ordering: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
188
+ ) -> typing.List[ModelInterfaceSerializerGet]:
42
189
  """
43
- Get a list of prompts.
190
+ List all prompts.
44
191
 
45
192
  Parameters
46
193
  ----------
194
+ ordering : typing.Optional[str]
195
+ Which field to use when ordering the results.
196
+
47
197
  request_options : typing.Optional[RequestOptions]
48
198
  Request-specific configuration.
49
199
 
50
200
  Returns
51
201
  -------
52
- typing.List[Prompt]
202
+ typing.List[ModelInterfaceSerializerGet]
53
203
 
54
204
 
55
205
  Examples
@@ -64,14 +214,17 @@ class PromptsClient:
64
214
  _response = self._client_wrapper.httpx_client.request(
65
215
  "api/prompts/",
66
216
  method="GET",
217
+ params={
218
+ "ordering": ordering,
219
+ },
67
220
  request_options=request_options,
68
221
  )
69
222
  try:
70
223
  if 200 <= _response.status_code < 300:
71
224
  return typing.cast(
72
- typing.List[Prompt],
73
- parse_obj_as(
74
- type_=typing.List[Prompt], # type: ignore
225
+ typing.List[ModelInterfaceSerializerGet],
226
+ construct_type(
227
+ type_=typing.List[ModelInterfaceSerializerGet], # type: ignore
75
228
  object_=_response.json(),
76
229
  ),
77
230
  )
@@ -84,58 +237,45 @@ class PromptsClient:
84
237
  self,
85
238
  *,
86
239
  title: str,
87
- input_fields: typing.Sequence[str],
88
- output_classes: typing.Sequence[str],
240
+ created_by: typing.Optional[UserSimpleRequest] = OMIT,
241
+ skill_name: typing.Optional[SkillNameEnum] = OMIT,
89
242
  description: typing.Optional[str] = OMIT,
90
- created_by: typing.Optional[PromptCreatedBy] = OMIT,
91
- created_at: typing.Optional[dt.datetime] = OMIT,
92
- updated_at: typing.Optional[dt.datetime] = OMIT,
93
- organization: typing.Optional[PromptOrganization] = OMIT,
94
- associated_projects: typing.Optional[typing.Sequence[PromptAssociatedProjectsItem]] = OMIT,
95
- skill_name: typing.Optional[str] = OMIT,
243
+ input_fields: typing.Optional[typing.Optional[typing.Any]] = OMIT,
244
+ output_classes: typing.Optional[typing.Optional[typing.Any]] = OMIT,
245
+ organization: typing.Optional[int] = OMIT,
246
+ associated_projects: typing.Optional[typing.Sequence[int]] = OMIT,
96
247
  request_options: typing.Optional[RequestOptions] = None,
97
- ) -> Prompt:
248
+ ) -> ModelInterface:
98
249
  """
99
250
  Create a new prompt.
100
251
 
101
252
  Parameters
102
253
  ----------
103
254
  title : str
104
- Title of the prompt
255
+ Model name
105
256
 
106
- input_fields : typing.Sequence[str]
107
- List of input fields
257
+ created_by : typing.Optional[UserSimpleRequest]
258
+ User who created Dataset
108
259
 
109
- output_classes : typing.Sequence[str]
110
- List of output classes
260
+ skill_name : typing.Optional[SkillNameEnum]
111
261
 
112
262
  description : typing.Optional[str]
113
- Description of the prompt
263
+ Model description
114
264
 
115
- created_by : typing.Optional[PromptCreatedBy]
116
- User ID of the creator of the prompt
265
+ input_fields : typing.Optional[typing.Optional[typing.Any]]
117
266
 
118
- created_at : typing.Optional[dt.datetime]
119
- Date and time the prompt was created
267
+ output_classes : typing.Optional[typing.Optional[typing.Any]]
120
268
 
121
- updated_at : typing.Optional[dt.datetime]
122
- Date and time the prompt was last updated
269
+ organization : typing.Optional[int]
123
270
 
124
- organization : typing.Optional[PromptOrganization]
125
- Organization ID of the prompt
126
-
127
- associated_projects : typing.Optional[typing.Sequence[PromptAssociatedProjectsItem]]
128
- List of associated projects IDs or objects
129
-
130
- skill_name : typing.Optional[str]
131
- Name of the skill
271
+ associated_projects : typing.Optional[typing.Sequence[int]]
132
272
 
133
273
  request_options : typing.Optional[RequestOptions]
134
274
  Request-specific configuration.
135
275
 
136
276
  Returns
137
277
  -------
138
- Prompt
278
+ ModelInterface
139
279
 
140
280
 
141
281
  Examples
@@ -147,32 +287,22 @@ class PromptsClient:
147
287
  )
148
288
  client.prompts.create(
149
289
  title="title",
150
- input_fields=["input_fields"],
151
- output_classes=["output_classes"],
152
290
  )
153
291
  """
154
292
  _response = self._client_wrapper.httpx_client.request(
155
293
  "api/prompts/",
156
294
  method="POST",
157
295
  json={
158
- "title": title,
159
- "description": description,
160
296
  "created_by": convert_and_respect_annotation_metadata(
161
- object_=created_by, annotation=PromptCreatedBy, direction="write"
162
- ),
163
- "created_at": created_at,
164
- "updated_at": updated_at,
165
- "organization": convert_and_respect_annotation_metadata(
166
- object_=organization, annotation=PromptOrganization, direction="write"
297
+ object_=created_by, annotation=UserSimpleRequest, direction="write"
167
298
  ),
299
+ "skill_name": skill_name,
300
+ "title": title,
301
+ "description": description,
168
302
  "input_fields": input_fields,
169
303
  "output_classes": output_classes,
170
- "associated_projects": convert_and_respect_annotation_metadata(
171
- object_=associated_projects,
172
- annotation=typing.Sequence[PromptAssociatedProjectsItem],
173
- direction="write",
174
- ),
175
- "skill_name": skill_name,
304
+ "organization": organization,
305
+ "associated_projects": associated_projects,
176
306
  },
177
307
  request_options=request_options,
178
308
  omit=OMIT,
@@ -180,9 +310,9 @@ class PromptsClient:
180
310
  try:
181
311
  if 200 <= _response.status_code < 300:
182
312
  return typing.cast(
183
- Prompt,
184
- parse_obj_as(
185
- type_=Prompt, # type: ignore
313
+ ModelInterface,
314
+ construct_type(
315
+ type_=ModelInterface, # type: ignore
186
316
  object_=_response.json(),
187
317
  ),
188
318
  )
@@ -191,21 +321,20 @@ class PromptsClient:
191
321
  raise ApiError(status_code=_response.status_code, body=_response.text)
192
322
  raise ApiError(status_code=_response.status_code, body=_response_json)
193
323
 
194
- def get(self, id: int, *, request_options: typing.Optional[RequestOptions] = None) -> Prompt:
324
+ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> ModelInterfaceSerializerGet:
195
325
  """
196
- Get a prompt by ID.
326
+ Retrieve a specific prompt.
197
327
 
198
328
  Parameters
199
329
  ----------
200
- id : int
201
- Prompt ID
330
+ id : str
202
331
 
203
332
  request_options : typing.Optional[RequestOptions]
204
333
  Request-specific configuration.
205
334
 
206
335
  Returns
207
336
  -------
208
- Prompt
337
+ ModelInterfaceSerializerGet
209
338
 
210
339
 
211
340
  Examples
@@ -216,20 +345,20 @@ class PromptsClient:
216
345
  api_key="YOUR_API_KEY",
217
346
  )
218
347
  client.prompts.get(
219
- id=1,
348
+ id="id",
220
349
  )
221
350
  """
222
351
  _response = self._client_wrapper.httpx_client.request(
223
- f"api/prompts/{jsonable_encoder(id)}",
352
+ f"api/prompts/{jsonable_encoder(id)}/",
224
353
  method="GET",
225
354
  request_options=request_options,
226
355
  )
227
356
  try:
228
357
  if 200 <= _response.status_code < 300:
229
358
  return typing.cast(
230
- Prompt,
231
- parse_obj_as(
232
- type_=Prompt, # type: ignore
359
+ ModelInterfaceSerializerGet,
360
+ construct_type(
361
+ type_=ModelInterfaceSerializerGet, # type: ignore
233
362
  object_=_response.json(),
234
363
  ),
235
364
  )
@@ -238,14 +367,13 @@ class PromptsClient:
238
367
  raise ApiError(status_code=_response.status_code, body=_response.text)
239
368
  raise ApiError(status_code=_response.status_code, body=_response_json)
240
369
 
241
- def delete(self, id: int, *, request_options: typing.Optional[RequestOptions] = None) -> None:
370
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
242
371
  """
243
- Delete a prompt by ID.
372
+ Delete a prompt by ID
244
373
 
245
374
  Parameters
246
375
  ----------
247
- id : int
248
- Prompt ID
376
+ id : str
249
377
 
250
378
  request_options : typing.Optional[RequestOptions]
251
379
  Request-specific configuration.
@@ -262,11 +390,11 @@ class PromptsClient:
262
390
  api_key="YOUR_API_KEY",
263
391
  )
264
392
  client.prompts.delete(
265
- id=1,
393
+ id="id",
266
394
  )
267
395
  """
268
396
  _response = self._client_wrapper.httpx_client.request(
269
- f"api/prompts/{jsonable_encoder(id)}",
397
+ f"api/prompts/{jsonable_encoder(id)}/",
270
398
  method="DELETE",
271
399
  request_options=request_options,
272
400
  )
@@ -280,64 +408,50 @@ class PromptsClient:
280
408
 
281
409
  def update(
282
410
  self,
283
- id: int,
411
+ id: str,
284
412
  *,
285
- title: str,
286
- input_fields: typing.Sequence[str],
287
- output_classes: typing.Sequence[str],
413
+ created_by: typing.Optional[UserSimpleRequest] = OMIT,
414
+ skill_name: typing.Optional[SkillNameEnum] = OMIT,
415
+ title: typing.Optional[str] = OMIT,
288
416
  description: typing.Optional[str] = OMIT,
289
- created_by: typing.Optional[PromptCreatedBy] = OMIT,
290
- created_at: typing.Optional[dt.datetime] = OMIT,
291
- updated_at: typing.Optional[dt.datetime] = OMIT,
292
- organization: typing.Optional[PromptOrganization] = OMIT,
293
- associated_projects: typing.Optional[typing.Sequence[PromptAssociatedProjectsItem]] = OMIT,
294
- skill_name: typing.Optional[str] = OMIT,
417
+ input_fields: typing.Optional[typing.Optional[typing.Any]] = OMIT,
418
+ output_classes: typing.Optional[typing.Optional[typing.Any]] = OMIT,
419
+ organization: typing.Optional[int] = OMIT,
420
+ associated_projects: typing.Optional[typing.Sequence[int]] = OMIT,
295
421
  request_options: typing.Optional[RequestOptions] = None,
296
- ) -> Prompt:
422
+ ) -> ModelInterface:
297
423
  """
298
- Update a prompt by ID.
424
+ Update a specific prompt by ID.
299
425
 
300
426
  Parameters
301
427
  ----------
302
- id : int
303
- Prompt ID
428
+ id : str
304
429
 
305
- title : str
306
- Title of the prompt
430
+ created_by : typing.Optional[UserSimpleRequest]
431
+ User who created Dataset
307
432
 
308
- input_fields : typing.Sequence[str]
309
- List of input fields
433
+ skill_name : typing.Optional[SkillNameEnum]
310
434
 
311
- output_classes : typing.Sequence[str]
312
- List of output classes
435
+ title : typing.Optional[str]
436
+ Model name
313
437
 
314
438
  description : typing.Optional[str]
315
- Description of the prompt
316
-
317
- created_by : typing.Optional[PromptCreatedBy]
318
- User ID of the creator of the prompt
319
-
320
- created_at : typing.Optional[dt.datetime]
321
- Date and time the prompt was created
439
+ Model description
322
440
 
323
- updated_at : typing.Optional[dt.datetime]
324
- Date and time the prompt was last updated
441
+ input_fields : typing.Optional[typing.Optional[typing.Any]]
325
442
 
326
- organization : typing.Optional[PromptOrganization]
327
- Organization ID of the prompt
443
+ output_classes : typing.Optional[typing.Optional[typing.Any]]
328
444
 
329
- associated_projects : typing.Optional[typing.Sequence[PromptAssociatedProjectsItem]]
330
- List of associated projects IDs or objects
445
+ organization : typing.Optional[int]
331
446
 
332
- skill_name : typing.Optional[str]
333
- Name of the skill
447
+ associated_projects : typing.Optional[typing.Sequence[int]]
334
448
 
335
449
  request_options : typing.Optional[RequestOptions]
336
450
  Request-specific configuration.
337
451
 
338
452
  Returns
339
453
  -------
340
- Prompt
454
+ ModelInterface
341
455
 
342
456
 
343
457
  Examples
@@ -348,34 +462,26 @@ class PromptsClient:
348
462
  api_key="YOUR_API_KEY",
349
463
  )
350
464
  client.prompts.update(
351
- id=1,
352
- title="title",
353
- input_fields=["input_fields"],
354
- output_classes=["output_classes"],
465
+ id="id",
355
466
  )
356
467
  """
357
468
  _response = self._client_wrapper.httpx_client.request(
358
- f"api/prompts/{jsonable_encoder(id)}",
469
+ f"api/prompts/{jsonable_encoder(id)}/",
359
470
  method="PATCH",
360
471
  json={
361
- "title": title,
362
- "description": description,
363
472
  "created_by": convert_and_respect_annotation_metadata(
364
- object_=created_by, annotation=PromptCreatedBy, direction="write"
365
- ),
366
- "created_at": created_at,
367
- "updated_at": updated_at,
368
- "organization": convert_and_respect_annotation_metadata(
369
- object_=organization, annotation=PromptOrganization, direction="write"
473
+ object_=created_by, annotation=UserSimpleRequest, direction="write"
370
474
  ),
475
+ "skill_name": skill_name,
476
+ "title": title,
477
+ "description": description,
371
478
  "input_fields": input_fields,
372
479
  "output_classes": output_classes,
373
- "associated_projects": convert_and_respect_annotation_metadata(
374
- object_=associated_projects,
375
- annotation=typing.Sequence[PromptAssociatedProjectsItem],
376
- direction="write",
377
- ),
378
- "skill_name": skill_name,
480
+ "organization": organization,
481
+ "associated_projects": associated_projects,
482
+ },
483
+ headers={
484
+ "content-type": "application/json",
379
485
  },
380
486
  request_options=request_options,
381
487
  omit=OMIT,
@@ -383,9 +489,9 @@ class PromptsClient:
383
489
  try:
384
490
  if 200 <= _response.status_code < 300:
385
491
  return typing.cast(
386
- Prompt,
387
- parse_obj_as(
388
- type_=Prompt, # type: ignore
492
+ ModelInterface,
493
+ construct_type(
494
+ type_=ModelInterface, # type: ignore
389
495
  object_=_response.json(),
390
496
  ),
391
497
  )
@@ -394,33 +500,38 @@ class PromptsClient:
394
500
  raise ApiError(status_code=_response.status_code, body=_response.text)
395
501
  raise ApiError(status_code=_response.status_code, body=_response_json)
396
502
 
397
- def batch_predictions(
503
+ def compatible_projects(
398
504
  self,
399
505
  *,
400
- num_predictions: typing.Optional[int] = None,
401
- modelrun_id: typing.Optional[int] = OMIT,
402
- results: typing.Optional[typing.Sequence[PromptsBatchPredictionsRequestResultsItem]] = OMIT,
506
+ ordering: typing.Optional[str] = None,
507
+ page: typing.Optional[int] = None,
508
+ page_size: typing.Optional[int] = None,
509
+ project_type: typing.Optional[PromptsCompatibleProjectsRequestProjectType] = None,
403
510
  request_options: typing.Optional[RequestOptions] = None,
404
- ) -> PromptsBatchPredictionsResponse:
511
+ ) -> PaginatedAllRolesProjectListList:
405
512
  """
406
- Create a new batch prediction.
513
+ Retrieve a list of compatible project for prompt.
407
514
 
408
515
  Parameters
409
516
  ----------
410
- num_predictions : typing.Optional[int]
411
- Number of predictions being sent
517
+ ordering : typing.Optional[str]
518
+ Which field to use when ordering the results.
519
+
520
+ page : typing.Optional[int]
521
+ A page number within the paginated result set.
412
522
 
413
- modelrun_id : typing.Optional[int]
414
- Model Run ID to associate the prediction with
523
+ page_size : typing.Optional[int]
524
+ Number of results to return per page.
415
525
 
416
- results : typing.Optional[typing.Sequence[PromptsBatchPredictionsRequestResultsItem]]
526
+ project_type : typing.Optional[PromptsCompatibleProjectsRequestProjectType]
527
+ Skill to filter by
417
528
 
418
529
  request_options : typing.Optional[RequestOptions]
419
530
  Request-specific configuration.
420
531
 
421
532
  Returns
422
533
  -------
423
- PromptsBatchPredictionsResponse
534
+ PaginatedAllRolesProjectListList
424
535
 
425
536
 
426
537
  Examples
@@ -430,21 +541,102 @@ class PromptsClient:
430
541
  client = LabelStudio(
431
542
  api_key="YOUR_API_KEY",
432
543
  )
433
- client.prompts.batch_predictions()
544
+ client.prompts.compatible_projects()
434
545
  """
435
546
  _response = self._client_wrapper.httpx_client.request(
436
- "api/model-run/batch-predictions",
547
+ "api/prompts/compatible-projects",
548
+ method="GET",
549
+ params={
550
+ "ordering": ordering,
551
+ "page": page,
552
+ "page_size": page_size,
553
+ "project_type": project_type,
554
+ },
555
+ request_options=request_options,
556
+ )
557
+ try:
558
+ if 200 <= _response.status_code < 300:
559
+ return typing.cast(
560
+ PaginatedAllRolesProjectListList,
561
+ construct_type(
562
+ type_=PaginatedAllRolesProjectListList, # type: ignore
563
+ object_=_response.json(),
564
+ ),
565
+ )
566
+ _response_json = _response.json()
567
+ except JSONDecodeError:
568
+ raise ApiError(status_code=_response.status_code, body=_response.text)
569
+ raise ApiError(status_code=_response.status_code, body=_response_json)
570
+
571
+
572
+ class AsyncPromptsClient:
573
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
574
+ self._client_wrapper = client_wrapper
575
+ self.indicators = AsyncIndicatorsClient(client_wrapper=self._client_wrapper)
576
+ self.versions = AsyncVersionsClient(client_wrapper=self._client_wrapper)
577
+ self.runs = AsyncRunsClient(client_wrapper=self._client_wrapper)
578
+
579
+ async def batch_failed_predictions(
580
+ self,
581
+ *,
582
+ failed_predictions: typing.Sequence[typing.Optional[typing.Any]],
583
+ modelrun_id: int,
584
+ num_failed_predictions: typing.Optional[int] = None,
585
+ job_id: typing.Optional[str] = OMIT,
586
+ request_options: typing.Optional[RequestOptions] = None,
587
+ ) -> BatchFailedPredictions:
588
+ """
589
+ Create a new batch of failed predictions.
590
+
591
+ Parameters
592
+ ----------
593
+ failed_predictions : typing.Sequence[typing.Optional[typing.Any]]
594
+
595
+ modelrun_id : int
596
+
597
+ num_failed_predictions : typing.Optional[int]
598
+ Number of failed predictions being sent (for telemetry only, has no effect)
599
+
600
+ job_id : typing.Optional[str]
601
+
602
+ request_options : typing.Optional[RequestOptions]
603
+ Request-specific configuration.
604
+
605
+ Returns
606
+ -------
607
+ BatchFailedPredictions
608
+
609
+
610
+ Examples
611
+ --------
612
+ import asyncio
613
+
614
+ from label_studio_sdk import AsyncLabelStudio
615
+
616
+ client = AsyncLabelStudio(
617
+ api_key="YOUR_API_KEY",
618
+ )
619
+
620
+
621
+ async def main() -> None:
622
+ await client.prompts.batch_failed_predictions(
623
+ failed_predictions=[],
624
+ modelrun_id=1,
625
+ )
626
+
627
+
628
+ asyncio.run(main())
629
+ """
630
+ _response = await self._client_wrapper.httpx_client.request(
631
+ "api/model-run/batch-failed-predictions",
437
632
  method="POST",
438
633
  params={
439
- "num_predictions": num_predictions,
634
+ "num_failed_predictions": num_failed_predictions,
440
635
  },
441
636
  json={
637
+ "job_id": job_id,
638
+ "failed_predictions": failed_predictions,
442
639
  "modelrun_id": modelrun_id,
443
- "results": convert_and_respect_annotation_metadata(
444
- object_=results,
445
- annotation=typing.Sequence[PromptsBatchPredictionsRequestResultsItem],
446
- direction="write",
447
- ),
448
640
  },
449
641
  headers={
450
642
  "content-type": "application/json",
@@ -455,9 +647,9 @@ class PromptsClient:
455
647
  try:
456
648
  if 200 <= _response.status_code < 300:
457
649
  return typing.cast(
458
- PromptsBatchPredictionsResponse,
459
- parse_obj_as(
460
- type_=PromptsBatchPredictionsResponse, # type: ignore
650
+ BatchFailedPredictions,
651
+ construct_type(
652
+ type_=BatchFailedPredictions, # type: ignore
461
653
  object_=_response.json(),
462
654
  ),
463
655
  )
@@ -466,59 +658,67 @@ class PromptsClient:
466
658
  raise ApiError(status_code=_response.status_code, body=_response.text)
467
659
  raise ApiError(status_code=_response.status_code, body=_response_json)
468
660
 
469
- def batch_failed_predictions(
661
+ async def batch_predictions(
470
662
  self,
471
663
  *,
472
- num_failed_predictions: typing.Optional[int] = None,
473
- modelrun_id: typing.Optional[int] = OMIT,
474
- failed_predictions: typing.Optional[
475
- typing.Sequence[PromptsBatchFailedPredictionsRequestFailedPredictionsItem]
476
- ] = OMIT,
664
+ results: typing.Sequence[typing.Optional[typing.Any]],
665
+ modelrun_id: int,
666
+ num_predictions: typing.Optional[int] = None,
667
+ job_id: typing.Optional[str] = OMIT,
477
668
  request_options: typing.Optional[RequestOptions] = None,
478
- ) -> PromptsBatchFailedPredictionsResponse:
669
+ ) -> BatchPredictions:
479
670
  """
480
- Create a new batch of failed predictions.
671
+ Create a new batch prediction.
481
672
 
482
673
  Parameters
483
674
  ----------
484
- num_failed_predictions : typing.Optional[int]
485
- Number of failed predictions being sent
675
+ results : typing.Sequence[typing.Optional[typing.Any]]
676
+
677
+ modelrun_id : int
486
678
 
487
- modelrun_id : typing.Optional[int]
488
- Model Run ID where the failed predictions came from
679
+ num_predictions : typing.Optional[int]
680
+ Number of predictions being sent (for telemetry only, has no effect)
489
681
 
490
- failed_predictions : typing.Optional[typing.Sequence[PromptsBatchFailedPredictionsRequestFailedPredictionsItem]]
682
+ job_id : typing.Optional[str]
491
683
 
492
684
  request_options : typing.Optional[RequestOptions]
493
685
  Request-specific configuration.
494
686
 
495
687
  Returns
496
688
  -------
497
- PromptsBatchFailedPredictionsResponse
689
+ BatchPredictions
498
690
 
499
691
 
500
692
  Examples
501
693
  --------
502
- from label_studio_sdk import LabelStudio
694
+ import asyncio
503
695
 
504
- client = LabelStudio(
696
+ from label_studio_sdk import AsyncLabelStudio
697
+
698
+ client = AsyncLabelStudio(
505
699
  api_key="YOUR_API_KEY",
506
700
  )
507
- client.prompts.batch_failed_predictions()
701
+
702
+
703
+ async def main() -> None:
704
+ await client.prompts.batch_predictions(
705
+ results=[],
706
+ modelrun_id=1,
707
+ )
708
+
709
+
710
+ asyncio.run(main())
508
711
  """
509
- _response = self._client_wrapper.httpx_client.request(
510
- "api/model-run/batch-failed-predictions",
712
+ _response = await self._client_wrapper.httpx_client.request(
713
+ "api/model-run/batch-predictions",
511
714
  method="POST",
512
715
  params={
513
- "num_failed_predictions": num_failed_predictions,
716
+ "num_predictions": num_predictions,
514
717
  },
515
718
  json={
719
+ "job_id": job_id,
720
+ "results": results,
516
721
  "modelrun_id": modelrun_id,
517
- "failed_predictions": convert_and_respect_annotation_metadata(
518
- object_=failed_predictions,
519
- annotation=typing.Sequence[PromptsBatchFailedPredictionsRequestFailedPredictionsItem],
520
- direction="write",
521
- ),
522
722
  },
523
723
  headers={
524
724
  "content-type": "application/json",
@@ -529,9 +729,9 @@ class PromptsClient:
529
729
  try:
530
730
  if 200 <= _response.status_code < 300:
531
731
  return typing.cast(
532
- PromptsBatchFailedPredictionsResponse,
533
- parse_obj_as(
534
- type_=PromptsBatchFailedPredictionsResponse, # type: ignore
732
+ BatchPredictions,
733
+ construct_type(
734
+ type_=BatchPredictions, # type: ignore
535
735
  object_=_response.json(),
536
736
  ),
537
737
  )
@@ -540,26 +740,23 @@ class PromptsClient:
540
740
  raise ApiError(status_code=_response.status_code, body=_response.text)
541
741
  raise ApiError(status_code=_response.status_code, body=_response_json)
542
742
 
543
-
544
- class AsyncPromptsClient:
545
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
546
- self._client_wrapper = client_wrapper
547
- self.versions = AsyncVersionsClient(client_wrapper=self._client_wrapper)
548
- self.runs = AsyncRunsClient(client_wrapper=self._client_wrapper)
549
- self.indicators = AsyncIndicatorsClient(client_wrapper=self._client_wrapper)
550
-
551
- async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[Prompt]:
743
+ async def list(
744
+ self, *, ordering: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
745
+ ) -> typing.List[ModelInterfaceSerializerGet]:
552
746
  """
553
- Get a list of prompts.
747
+ List all prompts.
554
748
 
555
749
  Parameters
556
750
  ----------
751
+ ordering : typing.Optional[str]
752
+ Which field to use when ordering the results.
753
+
557
754
  request_options : typing.Optional[RequestOptions]
558
755
  Request-specific configuration.
559
756
 
560
757
  Returns
561
758
  -------
562
- typing.List[Prompt]
759
+ typing.List[ModelInterfaceSerializerGet]
563
760
 
564
761
 
565
762
  Examples
@@ -582,14 +779,17 @@ class AsyncPromptsClient:
582
779
  _response = await self._client_wrapper.httpx_client.request(
583
780
  "api/prompts/",
584
781
  method="GET",
782
+ params={
783
+ "ordering": ordering,
784
+ },
585
785
  request_options=request_options,
586
786
  )
587
787
  try:
588
788
  if 200 <= _response.status_code < 300:
589
789
  return typing.cast(
590
- typing.List[Prompt],
591
- parse_obj_as(
592
- type_=typing.List[Prompt], # type: ignore
790
+ typing.List[ModelInterfaceSerializerGet],
791
+ construct_type(
792
+ type_=typing.List[ModelInterfaceSerializerGet], # type: ignore
593
793
  object_=_response.json(),
594
794
  ),
595
795
  )
@@ -602,58 +802,45 @@ class AsyncPromptsClient:
602
802
  self,
603
803
  *,
604
804
  title: str,
605
- input_fields: typing.Sequence[str],
606
- output_classes: typing.Sequence[str],
805
+ created_by: typing.Optional[UserSimpleRequest] = OMIT,
806
+ skill_name: typing.Optional[SkillNameEnum] = OMIT,
607
807
  description: typing.Optional[str] = OMIT,
608
- created_by: typing.Optional[PromptCreatedBy] = OMIT,
609
- created_at: typing.Optional[dt.datetime] = OMIT,
610
- updated_at: typing.Optional[dt.datetime] = OMIT,
611
- organization: typing.Optional[PromptOrganization] = OMIT,
612
- associated_projects: typing.Optional[typing.Sequence[PromptAssociatedProjectsItem]] = OMIT,
613
- skill_name: typing.Optional[str] = OMIT,
808
+ input_fields: typing.Optional[typing.Optional[typing.Any]] = OMIT,
809
+ output_classes: typing.Optional[typing.Optional[typing.Any]] = OMIT,
810
+ organization: typing.Optional[int] = OMIT,
811
+ associated_projects: typing.Optional[typing.Sequence[int]] = OMIT,
614
812
  request_options: typing.Optional[RequestOptions] = None,
615
- ) -> Prompt:
813
+ ) -> ModelInterface:
616
814
  """
617
815
  Create a new prompt.
618
816
 
619
817
  Parameters
620
818
  ----------
621
819
  title : str
622
- Title of the prompt
820
+ Model name
623
821
 
624
- input_fields : typing.Sequence[str]
625
- List of input fields
822
+ created_by : typing.Optional[UserSimpleRequest]
823
+ User who created Dataset
626
824
 
627
- output_classes : typing.Sequence[str]
628
- List of output classes
825
+ skill_name : typing.Optional[SkillNameEnum]
629
826
 
630
827
  description : typing.Optional[str]
631
- Description of the prompt
632
-
633
- created_by : typing.Optional[PromptCreatedBy]
634
- User ID of the creator of the prompt
635
-
636
- created_at : typing.Optional[dt.datetime]
637
- Date and time the prompt was created
828
+ Model description
638
829
 
639
- updated_at : typing.Optional[dt.datetime]
640
- Date and time the prompt was last updated
830
+ input_fields : typing.Optional[typing.Optional[typing.Any]]
641
831
 
642
- organization : typing.Optional[PromptOrganization]
643
- Organization ID of the prompt
832
+ output_classes : typing.Optional[typing.Optional[typing.Any]]
644
833
 
645
- associated_projects : typing.Optional[typing.Sequence[PromptAssociatedProjectsItem]]
646
- List of associated projects IDs or objects
834
+ organization : typing.Optional[int]
647
835
 
648
- skill_name : typing.Optional[str]
649
- Name of the skill
836
+ associated_projects : typing.Optional[typing.Sequence[int]]
650
837
 
651
838
  request_options : typing.Optional[RequestOptions]
652
839
  Request-specific configuration.
653
840
 
654
841
  Returns
655
842
  -------
656
- Prompt
843
+ ModelInterface
657
844
 
658
845
 
659
846
  Examples
@@ -670,8 +857,6 @@ class AsyncPromptsClient:
670
857
  async def main() -> None:
671
858
  await client.prompts.create(
672
859
  title="title",
673
- input_fields=["input_fields"],
674
- output_classes=["output_classes"],
675
860
  )
676
861
 
677
862
 
@@ -681,24 +866,16 @@ class AsyncPromptsClient:
681
866
  "api/prompts/",
682
867
  method="POST",
683
868
  json={
684
- "title": title,
685
- "description": description,
686
869
  "created_by": convert_and_respect_annotation_metadata(
687
- object_=created_by, annotation=PromptCreatedBy, direction="write"
688
- ),
689
- "created_at": created_at,
690
- "updated_at": updated_at,
691
- "organization": convert_and_respect_annotation_metadata(
692
- object_=organization, annotation=PromptOrganization, direction="write"
870
+ object_=created_by, annotation=UserSimpleRequest, direction="write"
693
871
  ),
872
+ "skill_name": skill_name,
873
+ "title": title,
874
+ "description": description,
694
875
  "input_fields": input_fields,
695
876
  "output_classes": output_classes,
696
- "associated_projects": convert_and_respect_annotation_metadata(
697
- object_=associated_projects,
698
- annotation=typing.Sequence[PromptAssociatedProjectsItem],
699
- direction="write",
700
- ),
701
- "skill_name": skill_name,
877
+ "organization": organization,
878
+ "associated_projects": associated_projects,
702
879
  },
703
880
  request_options=request_options,
704
881
  omit=OMIT,
@@ -706,9 +883,9 @@ class AsyncPromptsClient:
706
883
  try:
707
884
  if 200 <= _response.status_code < 300:
708
885
  return typing.cast(
709
- Prompt,
710
- parse_obj_as(
711
- type_=Prompt, # type: ignore
886
+ ModelInterface,
887
+ construct_type(
888
+ type_=ModelInterface, # type: ignore
712
889
  object_=_response.json(),
713
890
  ),
714
891
  )
@@ -717,21 +894,22 @@ class AsyncPromptsClient:
717
894
  raise ApiError(status_code=_response.status_code, body=_response.text)
718
895
  raise ApiError(status_code=_response.status_code, body=_response_json)
719
896
 
720
- async def get(self, id: int, *, request_options: typing.Optional[RequestOptions] = None) -> Prompt:
897
+ async def get(
898
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
899
+ ) -> ModelInterfaceSerializerGet:
721
900
  """
722
- Get a prompt by ID.
901
+ Retrieve a specific prompt.
723
902
 
724
903
  Parameters
725
904
  ----------
726
- id : int
727
- Prompt ID
905
+ id : str
728
906
 
729
907
  request_options : typing.Optional[RequestOptions]
730
908
  Request-specific configuration.
731
909
 
732
910
  Returns
733
911
  -------
734
- Prompt
912
+ ModelInterfaceSerializerGet
735
913
 
736
914
 
737
915
  Examples
@@ -747,23 +925,23 @@ class AsyncPromptsClient:
747
925
 
748
926
  async def main() -> None:
749
927
  await client.prompts.get(
750
- id=1,
928
+ id="id",
751
929
  )
752
930
 
753
931
 
754
932
  asyncio.run(main())
755
933
  """
756
934
  _response = await self._client_wrapper.httpx_client.request(
757
- f"api/prompts/{jsonable_encoder(id)}",
935
+ f"api/prompts/{jsonable_encoder(id)}/",
758
936
  method="GET",
759
937
  request_options=request_options,
760
938
  )
761
939
  try:
762
940
  if 200 <= _response.status_code < 300:
763
941
  return typing.cast(
764
- Prompt,
765
- parse_obj_as(
766
- type_=Prompt, # type: ignore
942
+ ModelInterfaceSerializerGet,
943
+ construct_type(
944
+ type_=ModelInterfaceSerializerGet, # type: ignore
767
945
  object_=_response.json(),
768
946
  ),
769
947
  )
@@ -772,14 +950,13 @@ class AsyncPromptsClient:
772
950
  raise ApiError(status_code=_response.status_code, body=_response.text)
773
951
  raise ApiError(status_code=_response.status_code, body=_response_json)
774
952
 
775
- async def delete(self, id: int, *, request_options: typing.Optional[RequestOptions] = None) -> None:
953
+ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
776
954
  """
777
- Delete a prompt by ID.
955
+ Delete a prompt by ID
778
956
 
779
957
  Parameters
780
958
  ----------
781
- id : int
782
- Prompt ID
959
+ id : str
783
960
 
784
961
  request_options : typing.Optional[RequestOptions]
785
962
  Request-specific configuration.
@@ -801,14 +978,14 @@ class AsyncPromptsClient:
801
978
 
802
979
  async def main() -> None:
803
980
  await client.prompts.delete(
804
- id=1,
981
+ id="id",
805
982
  )
806
983
 
807
984
 
808
985
  asyncio.run(main())
809
986
  """
810
987
  _response = await self._client_wrapper.httpx_client.request(
811
- f"api/prompts/{jsonable_encoder(id)}",
988
+ f"api/prompts/{jsonable_encoder(id)}/",
812
989
  method="DELETE",
813
990
  request_options=request_options,
814
991
  )
@@ -822,64 +999,50 @@ class AsyncPromptsClient:
822
999
 
823
1000
  async def update(
824
1001
  self,
825
- id: int,
1002
+ id: str,
826
1003
  *,
827
- title: str,
828
- input_fields: typing.Sequence[str],
829
- output_classes: typing.Sequence[str],
1004
+ created_by: typing.Optional[UserSimpleRequest] = OMIT,
1005
+ skill_name: typing.Optional[SkillNameEnum] = OMIT,
1006
+ title: typing.Optional[str] = OMIT,
830
1007
  description: typing.Optional[str] = OMIT,
831
- created_by: typing.Optional[PromptCreatedBy] = OMIT,
832
- created_at: typing.Optional[dt.datetime] = OMIT,
833
- updated_at: typing.Optional[dt.datetime] = OMIT,
834
- organization: typing.Optional[PromptOrganization] = OMIT,
835
- associated_projects: typing.Optional[typing.Sequence[PromptAssociatedProjectsItem]] = OMIT,
836
- skill_name: typing.Optional[str] = OMIT,
1008
+ input_fields: typing.Optional[typing.Optional[typing.Any]] = OMIT,
1009
+ output_classes: typing.Optional[typing.Optional[typing.Any]] = OMIT,
1010
+ organization: typing.Optional[int] = OMIT,
1011
+ associated_projects: typing.Optional[typing.Sequence[int]] = OMIT,
837
1012
  request_options: typing.Optional[RequestOptions] = None,
838
- ) -> Prompt:
1013
+ ) -> ModelInterface:
839
1014
  """
840
- Update a prompt by ID.
1015
+ Update a specific prompt by ID.
841
1016
 
842
1017
  Parameters
843
1018
  ----------
844
- id : int
845
- Prompt ID
1019
+ id : str
846
1020
 
847
- title : str
848
- Title of the prompt
1021
+ created_by : typing.Optional[UserSimpleRequest]
1022
+ User who created Dataset
849
1023
 
850
- input_fields : typing.Sequence[str]
851
- List of input fields
1024
+ skill_name : typing.Optional[SkillNameEnum]
852
1025
 
853
- output_classes : typing.Sequence[str]
854
- List of output classes
1026
+ title : typing.Optional[str]
1027
+ Model name
855
1028
 
856
1029
  description : typing.Optional[str]
857
- Description of the prompt
858
-
859
- created_by : typing.Optional[PromptCreatedBy]
860
- User ID of the creator of the prompt
1030
+ Model description
861
1031
 
862
- created_at : typing.Optional[dt.datetime]
863
- Date and time the prompt was created
1032
+ input_fields : typing.Optional[typing.Optional[typing.Any]]
864
1033
 
865
- updated_at : typing.Optional[dt.datetime]
866
- Date and time the prompt was last updated
1034
+ output_classes : typing.Optional[typing.Optional[typing.Any]]
867
1035
 
868
- organization : typing.Optional[PromptOrganization]
869
- Organization ID of the prompt
1036
+ organization : typing.Optional[int]
870
1037
 
871
- associated_projects : typing.Optional[typing.Sequence[PromptAssociatedProjectsItem]]
872
- List of associated projects IDs or objects
873
-
874
- skill_name : typing.Optional[str]
875
- Name of the skill
1038
+ associated_projects : typing.Optional[typing.Sequence[int]]
876
1039
 
877
1040
  request_options : typing.Optional[RequestOptions]
878
1041
  Request-specific configuration.
879
1042
 
880
1043
  Returns
881
1044
  -------
882
- Prompt
1045
+ ModelInterface
883
1046
 
884
1047
 
885
1048
  Examples
@@ -895,114 +1058,26 @@ class AsyncPromptsClient:
895
1058
 
896
1059
  async def main() -> None:
897
1060
  await client.prompts.update(
898
- id=1,
899
- title="title",
900
- input_fields=["input_fields"],
901
- output_classes=["output_classes"],
1061
+ id="id",
902
1062
  )
903
1063
 
904
1064
 
905
1065
  asyncio.run(main())
906
1066
  """
907
1067
  _response = await self._client_wrapper.httpx_client.request(
908
- f"api/prompts/{jsonable_encoder(id)}",
1068
+ f"api/prompts/{jsonable_encoder(id)}/",
909
1069
  method="PATCH",
910
1070
  json={
911
- "title": title,
912
- "description": description,
913
1071
  "created_by": convert_and_respect_annotation_metadata(
914
- object_=created_by, annotation=PromptCreatedBy, direction="write"
915
- ),
916
- "created_at": created_at,
917
- "updated_at": updated_at,
918
- "organization": convert_and_respect_annotation_metadata(
919
- object_=organization, annotation=PromptOrganization, direction="write"
1072
+ object_=created_by, annotation=UserSimpleRequest, direction="write"
920
1073
  ),
1074
+ "skill_name": skill_name,
1075
+ "title": title,
1076
+ "description": description,
921
1077
  "input_fields": input_fields,
922
1078
  "output_classes": output_classes,
923
- "associated_projects": convert_and_respect_annotation_metadata(
924
- object_=associated_projects,
925
- annotation=typing.Sequence[PromptAssociatedProjectsItem],
926
- direction="write",
927
- ),
928
- "skill_name": skill_name,
929
- },
930
- request_options=request_options,
931
- omit=OMIT,
932
- )
933
- try:
934
- if 200 <= _response.status_code < 300:
935
- return typing.cast(
936
- Prompt,
937
- parse_obj_as(
938
- type_=Prompt, # type: ignore
939
- object_=_response.json(),
940
- ),
941
- )
942
- _response_json = _response.json()
943
- except JSONDecodeError:
944
- raise ApiError(status_code=_response.status_code, body=_response.text)
945
- raise ApiError(status_code=_response.status_code, body=_response_json)
946
-
947
- async def batch_predictions(
948
- self,
949
- *,
950
- num_predictions: typing.Optional[int] = None,
951
- modelrun_id: typing.Optional[int] = OMIT,
952
- results: typing.Optional[typing.Sequence[PromptsBatchPredictionsRequestResultsItem]] = OMIT,
953
- request_options: typing.Optional[RequestOptions] = None,
954
- ) -> PromptsBatchPredictionsResponse:
955
- """
956
- Create a new batch prediction.
957
-
958
- Parameters
959
- ----------
960
- num_predictions : typing.Optional[int]
961
- Number of predictions being sent
962
-
963
- modelrun_id : typing.Optional[int]
964
- Model Run ID to associate the prediction with
965
-
966
- results : typing.Optional[typing.Sequence[PromptsBatchPredictionsRequestResultsItem]]
967
-
968
- request_options : typing.Optional[RequestOptions]
969
- Request-specific configuration.
970
-
971
- Returns
972
- -------
973
- PromptsBatchPredictionsResponse
974
-
975
-
976
- Examples
977
- --------
978
- import asyncio
979
-
980
- from label_studio_sdk import AsyncLabelStudio
981
-
982
- client = AsyncLabelStudio(
983
- api_key="YOUR_API_KEY",
984
- )
985
-
986
-
987
- async def main() -> None:
988
- await client.prompts.batch_predictions()
989
-
990
-
991
- asyncio.run(main())
992
- """
993
- _response = await self._client_wrapper.httpx_client.request(
994
- "api/model-run/batch-predictions",
995
- method="POST",
996
- params={
997
- "num_predictions": num_predictions,
998
- },
999
- json={
1000
- "modelrun_id": modelrun_id,
1001
- "results": convert_and_respect_annotation_metadata(
1002
- object_=results,
1003
- annotation=typing.Sequence[PromptsBatchPredictionsRequestResultsItem],
1004
- direction="write",
1005
- ),
1079
+ "organization": organization,
1080
+ "associated_projects": associated_projects,
1006
1081
  },
1007
1082
  headers={
1008
1083
  "content-type": "application/json",
@@ -1013,9 +1088,9 @@ class AsyncPromptsClient:
1013
1088
  try:
1014
1089
  if 200 <= _response.status_code < 300:
1015
1090
  return typing.cast(
1016
- PromptsBatchPredictionsResponse,
1017
- parse_obj_as(
1018
- type_=PromptsBatchPredictionsResponse, # type: ignore
1091
+ ModelInterface,
1092
+ construct_type(
1093
+ type_=ModelInterface, # type: ignore
1019
1094
  object_=_response.json(),
1020
1095
  ),
1021
1096
  )
@@ -1024,35 +1099,38 @@ class AsyncPromptsClient:
1024
1099
  raise ApiError(status_code=_response.status_code, body=_response.text)
1025
1100
  raise ApiError(status_code=_response.status_code, body=_response_json)
1026
1101
 
1027
- async def batch_failed_predictions(
1102
+ async def compatible_projects(
1028
1103
  self,
1029
1104
  *,
1030
- num_failed_predictions: typing.Optional[int] = None,
1031
- modelrun_id: typing.Optional[int] = OMIT,
1032
- failed_predictions: typing.Optional[
1033
- typing.Sequence[PromptsBatchFailedPredictionsRequestFailedPredictionsItem]
1034
- ] = OMIT,
1105
+ ordering: typing.Optional[str] = None,
1106
+ page: typing.Optional[int] = None,
1107
+ page_size: typing.Optional[int] = None,
1108
+ project_type: typing.Optional[PromptsCompatibleProjectsRequestProjectType] = None,
1035
1109
  request_options: typing.Optional[RequestOptions] = None,
1036
- ) -> PromptsBatchFailedPredictionsResponse:
1110
+ ) -> PaginatedAllRolesProjectListList:
1037
1111
  """
1038
- Create a new batch of failed predictions.
1112
+ Retrieve a list of compatible project for prompt.
1039
1113
 
1040
1114
  Parameters
1041
1115
  ----------
1042
- num_failed_predictions : typing.Optional[int]
1043
- Number of failed predictions being sent
1116
+ ordering : typing.Optional[str]
1117
+ Which field to use when ordering the results.
1044
1118
 
1045
- modelrun_id : typing.Optional[int]
1046
- Model Run ID where the failed predictions came from
1119
+ page : typing.Optional[int]
1120
+ A page number within the paginated result set.
1047
1121
 
1048
- failed_predictions : typing.Optional[typing.Sequence[PromptsBatchFailedPredictionsRequestFailedPredictionsItem]]
1122
+ page_size : typing.Optional[int]
1123
+ Number of results to return per page.
1124
+
1125
+ project_type : typing.Optional[PromptsCompatibleProjectsRequestProjectType]
1126
+ Skill to filter by
1049
1127
 
1050
1128
  request_options : typing.Optional[RequestOptions]
1051
1129
  Request-specific configuration.
1052
1130
 
1053
1131
  Returns
1054
1132
  -------
1055
- PromptsBatchFailedPredictionsResponse
1133
+ PaginatedAllRolesProjectListList
1056
1134
 
1057
1135
 
1058
1136
  Examples
@@ -1067,37 +1145,28 @@ class AsyncPromptsClient:
1067
1145
 
1068
1146
 
1069
1147
  async def main() -> None:
1070
- await client.prompts.batch_failed_predictions()
1148
+ await client.prompts.compatible_projects()
1071
1149
 
1072
1150
 
1073
1151
  asyncio.run(main())
1074
1152
  """
1075
1153
  _response = await self._client_wrapper.httpx_client.request(
1076
- "api/model-run/batch-failed-predictions",
1077
- method="POST",
1154
+ "api/prompts/compatible-projects",
1155
+ method="GET",
1078
1156
  params={
1079
- "num_failed_predictions": num_failed_predictions,
1080
- },
1081
- json={
1082
- "modelrun_id": modelrun_id,
1083
- "failed_predictions": convert_and_respect_annotation_metadata(
1084
- object_=failed_predictions,
1085
- annotation=typing.Sequence[PromptsBatchFailedPredictionsRequestFailedPredictionsItem],
1086
- direction="write",
1087
- ),
1088
- },
1089
- headers={
1090
- "content-type": "application/json",
1157
+ "ordering": ordering,
1158
+ "page": page,
1159
+ "page_size": page_size,
1160
+ "project_type": project_type,
1091
1161
  },
1092
1162
  request_options=request_options,
1093
- omit=OMIT,
1094
1163
  )
1095
1164
  try:
1096
1165
  if 200 <= _response.status_code < 300:
1097
1166
  return typing.cast(
1098
- PromptsBatchFailedPredictionsResponse,
1099
- parse_obj_as(
1100
- type_=PromptsBatchFailedPredictionsResponse, # type: ignore
1167
+ PaginatedAllRolesProjectListList,
1168
+ construct_type(
1169
+ type_=PaginatedAllRolesProjectListList, # type: ignore
1101
1170
  object_=_response.json(),
1102
1171
  ),
1103
1172
  )