label-studio-sdk 1.0.5__py3-none-any.whl → 1.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. label_studio_sdk/__init__.py +70 -0
  2. label_studio_sdk/_extensions/eval/categorical.py +83 -0
  3. label_studio_sdk/_extensions/label_studio_tools/core/utils/io.py +35 -17
  4. label_studio_sdk/annotations/__init__.py +3 -0
  5. label_studio_sdk/annotations/client.py +109 -0
  6. label_studio_sdk/annotations/types/__init__.py +5 -0
  7. label_studio_sdk/annotations/types/annotations_create_bulk_response_item.py +29 -0
  8. label_studio_sdk/base_client.py +9 -0
  9. label_studio_sdk/comments/__init__.py +2 -0
  10. label_studio_sdk/comments/client.py +512 -0
  11. label_studio_sdk/converter/converter.py +2 -0
  12. label_studio_sdk/converter/imports/coco.py +14 -13
  13. label_studio_sdk/converter/utils.py +72 -3
  14. label_studio_sdk/core/client_wrapper.py +1 -1
  15. label_studio_sdk/files/client.py +26 -16
  16. label_studio_sdk/label_interface/interface.py +38 -5
  17. label_studio_sdk/model_providers/__init__.py +2 -0
  18. label_studio_sdk/model_providers/client.py +190 -0
  19. label_studio_sdk/projects/client.py +32 -16
  20. label_studio_sdk/projects/exports/client.py +133 -40
  21. label_studio_sdk/prompts/__init__.py +21 -0
  22. label_studio_sdk/prompts/client.py +862 -0
  23. label_studio_sdk/prompts/indicators/__init__.py +2 -0
  24. label_studio_sdk/prompts/indicators/client.py +194 -0
  25. label_studio_sdk/prompts/runs/__init__.py +5 -0
  26. label_studio_sdk/prompts/runs/client.py +354 -0
  27. label_studio_sdk/prompts/runs/types/__init__.py +5 -0
  28. label_studio_sdk/prompts/runs/types/runs_list_request_project_subset.py +5 -0
  29. label_studio_sdk/prompts/types/__init__.py +15 -0
  30. label_studio_sdk/prompts/types/prompts_batch_failed_predictions_request_failed_predictions_item.py +42 -0
  31. label_studio_sdk/prompts/types/prompts_batch_failed_predictions_response.py +29 -0
  32. label_studio_sdk/prompts/types/prompts_batch_predictions_request_results_item.py +62 -0
  33. label_studio_sdk/prompts/types/prompts_batch_predictions_response.py +29 -0
  34. label_studio_sdk/prompts/versions/__init__.py +2 -0
  35. label_studio_sdk/prompts/versions/client.py +921 -0
  36. label_studio_sdk/types/__init__.py +52 -0
  37. label_studio_sdk/types/comment.py +39 -0
  38. label_studio_sdk/types/comment_created_by.py +5 -0
  39. label_studio_sdk/types/inference_run.py +43 -0
  40. label_studio_sdk/types/inference_run_created_by.py +5 -0
  41. label_studio_sdk/types/inference_run_organization.py +5 -0
  42. label_studio_sdk/types/inference_run_project_subset.py +5 -0
  43. label_studio_sdk/types/inference_run_status.py +7 -0
  44. label_studio_sdk/types/key_indicator_value.py +30 -0
  45. label_studio_sdk/types/key_indicators.py +7 -0
  46. label_studio_sdk/types/key_indicators_item.py +51 -0
  47. label_studio_sdk/types/key_indicators_item_additional_kpis_item.py +37 -0
  48. label_studio_sdk/types/key_indicators_item_extra_kpis_item.py +37 -0
  49. label_studio_sdk/types/model_provider_connection.py +41 -0
  50. label_studio_sdk/types/model_provider_connection_created_by.py +5 -0
  51. label_studio_sdk/types/model_provider_connection_organization.py +5 -0
  52. label_studio_sdk/types/model_provider_connection_provider.py +5 -0
  53. label_studio_sdk/types/model_provider_connection_scope.py +5 -0
  54. label_studio_sdk/types/prompt.py +79 -0
  55. label_studio_sdk/types/prompt_created_by.py +5 -0
  56. label_studio_sdk/types/prompt_organization.py +5 -0
  57. label_studio_sdk/types/prompt_version.py +41 -0
  58. label_studio_sdk/types/prompt_version_created_by.py +5 -0
  59. label_studio_sdk/types/prompt_version_organization.py +5 -0
  60. label_studio_sdk/types/prompt_version_provider.py +5 -0
  61. label_studio_sdk/types/refined_prompt_response.py +64 -0
  62. label_studio_sdk/types/refined_prompt_response_refinement_status.py +7 -0
  63. label_studio_sdk/webhooks/client.py +245 -36
  64. label_studio_sdk/workspaces/client.py +20 -20
  65. label_studio_sdk-1.0.7.dist-info/LICENSE +201 -0
  66. {label_studio_sdk-1.0.5.dist-info → label_studio_sdk-1.0.7.dist-info}/METADATA +17 -3
  67. {label_studio_sdk-1.0.5.dist-info → label_studio_sdk-1.0.7.dist-info}/RECORD +68 -19
  68. {label_studio_sdk-1.0.5.dist-info → label_studio_sdk-1.0.7.dist-info}/WHEEL +1 -1
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
@@ -0,0 +1,194 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ from json.decoder import JSONDecodeError
5
+
6
+ from ...core.api_error import ApiError
7
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
+ from ...core.jsonable_encoder import jsonable_encoder
9
+ from ...core.pydantic_utilities import pydantic_v1
10
+ from ...core.request_options import RequestOptions
11
+ from ...types.key_indicator_value import KeyIndicatorValue
12
+ from ...types.key_indicators import KeyIndicators
13
+
14
+
15
+ class IndicatorsClient:
16
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
17
+ self._client_wrapper = client_wrapper
18
+
19
+ def list(self, pk: int, *, request_options: typing.Optional[RequestOptions] = None) -> KeyIndicators:
20
+ """
21
+ Get key indicators for the Prompt dashboard.
22
+
23
+ Parameters
24
+ ----------
25
+ pk : int
26
+ Inference run ID
27
+
28
+ request_options : typing.Optional[RequestOptions]
29
+ Request-specific configuration.
30
+
31
+ Returns
32
+ -------
33
+ KeyIndicators
34
+
35
+
36
+ Examples
37
+ --------
38
+ from label_studio_sdk.client import LabelStudio
39
+
40
+ client = LabelStudio(
41
+ api_key="YOUR_API_KEY",
42
+ )
43
+ client.prompts.indicators.list(
44
+ pk=1,
45
+ )
46
+ """
47
+ _response = self._client_wrapper.httpx_client.request(
48
+ f"api/inference-runs/{jsonable_encoder(pk)}/indicators", method="GET", request_options=request_options
49
+ )
50
+ try:
51
+ if 200 <= _response.status_code < 300:
52
+ return pydantic_v1.parse_obj_as(KeyIndicators, _response.json()) # type: ignore
53
+ _response_json = _response.json()
54
+ except JSONDecodeError:
55
+ raise ApiError(status_code=_response.status_code, body=_response.text)
56
+ raise ApiError(status_code=_response.status_code, body=_response_json)
57
+
58
+ def get(
59
+ self, indicator_key: str, pk: int, *, request_options: typing.Optional[RequestOptions] = None
60
+ ) -> KeyIndicatorValue:
61
+ """
62
+ Get a specific key indicator for the Prompt dashboard.
63
+
64
+ Parameters
65
+ ----------
66
+ indicator_key : str
67
+ Key of the indicator
68
+
69
+ pk : int
70
+ Inference run ID
71
+
72
+ request_options : typing.Optional[RequestOptions]
73
+ Request-specific configuration.
74
+
75
+ Returns
76
+ -------
77
+ KeyIndicatorValue
78
+
79
+
80
+ Examples
81
+ --------
82
+ from label_studio_sdk.client import LabelStudio
83
+
84
+ client = LabelStudio(
85
+ api_key="YOUR_API_KEY",
86
+ )
87
+ client.prompts.indicators.get(
88
+ indicator_key="indicator_key",
89
+ pk=1,
90
+ )
91
+ """
92
+ _response = self._client_wrapper.httpx_client.request(
93
+ f"api/inference-runs/{jsonable_encoder(pk)}/indicators/{jsonable_encoder(indicator_key)}",
94
+ method="GET",
95
+ request_options=request_options,
96
+ )
97
+ try:
98
+ if 200 <= _response.status_code < 300:
99
+ return pydantic_v1.parse_obj_as(KeyIndicatorValue, _response.json()) # type: ignore
100
+ _response_json = _response.json()
101
+ except JSONDecodeError:
102
+ raise ApiError(status_code=_response.status_code, body=_response.text)
103
+ raise ApiError(status_code=_response.status_code, body=_response_json)
104
+
105
+
106
+ class AsyncIndicatorsClient:
107
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
108
+ self._client_wrapper = client_wrapper
109
+
110
+ async def list(self, pk: int, *, request_options: typing.Optional[RequestOptions] = None) -> KeyIndicators:
111
+ """
112
+ Get key indicators for the Prompt dashboard.
113
+
114
+ Parameters
115
+ ----------
116
+ pk : int
117
+ Inference run ID
118
+
119
+ request_options : typing.Optional[RequestOptions]
120
+ Request-specific configuration.
121
+
122
+ Returns
123
+ -------
124
+ KeyIndicators
125
+
126
+
127
+ Examples
128
+ --------
129
+ from label_studio_sdk.client import AsyncLabelStudio
130
+
131
+ client = AsyncLabelStudio(
132
+ api_key="YOUR_API_KEY",
133
+ )
134
+ await client.prompts.indicators.list(
135
+ pk=1,
136
+ )
137
+ """
138
+ _response = await self._client_wrapper.httpx_client.request(
139
+ f"api/inference-runs/{jsonable_encoder(pk)}/indicators", method="GET", request_options=request_options
140
+ )
141
+ try:
142
+ if 200 <= _response.status_code < 300:
143
+ return pydantic_v1.parse_obj_as(KeyIndicators, _response.json()) # type: ignore
144
+ _response_json = _response.json()
145
+ except JSONDecodeError:
146
+ raise ApiError(status_code=_response.status_code, body=_response.text)
147
+ raise ApiError(status_code=_response.status_code, body=_response_json)
148
+
149
+ async def get(
150
+ self, indicator_key: str, pk: int, *, request_options: typing.Optional[RequestOptions] = None
151
+ ) -> KeyIndicatorValue:
152
+ """
153
+ Get a specific key indicator for the Prompt dashboard.
154
+
155
+ Parameters
156
+ ----------
157
+ indicator_key : str
158
+ Key of the indicator
159
+
160
+ pk : int
161
+ Inference run ID
162
+
163
+ request_options : typing.Optional[RequestOptions]
164
+ Request-specific configuration.
165
+
166
+ Returns
167
+ -------
168
+ KeyIndicatorValue
169
+
170
+
171
+ Examples
172
+ --------
173
+ from label_studio_sdk.client import AsyncLabelStudio
174
+
175
+ client = AsyncLabelStudio(
176
+ api_key="YOUR_API_KEY",
177
+ )
178
+ await client.prompts.indicators.get(
179
+ indicator_key="indicator_key",
180
+ pk=1,
181
+ )
182
+ """
183
+ _response = await self._client_wrapper.httpx_client.request(
184
+ f"api/inference-runs/{jsonable_encoder(pk)}/indicators/{jsonable_encoder(indicator_key)}",
185
+ method="GET",
186
+ request_options=request_options,
187
+ )
188
+ try:
189
+ if 200 <= _response.status_code < 300:
190
+ return pydantic_v1.parse_obj_as(KeyIndicatorValue, _response.json()) # type: ignore
191
+ _response_json = _response.json()
192
+ except JSONDecodeError:
193
+ raise ApiError(status_code=_response.status_code, body=_response.text)
194
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .types import RunsListRequestProjectSubset
4
+
5
+ __all__ = ["RunsListRequestProjectSubset"]
@@ -0,0 +1,354 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.pydantic_utilities import pydantic_v1
11
+ from ...core.request_options import RequestOptions
12
+ from ...types.inference_run import InferenceRun
13
+ from ...types.inference_run_created_by import InferenceRunCreatedBy
14
+ from ...types.inference_run_organization import InferenceRunOrganization
15
+ from ...types.inference_run_project_subset import InferenceRunProjectSubset
16
+ from ...types.inference_run_status import InferenceRunStatus
17
+ from .types.runs_list_request_project_subset import RunsListRequestProjectSubset
18
+
19
+ # this is used as the default value for optional parameters
20
+ OMIT = typing.cast(typing.Any, ...)
21
+
22
+
23
+ class RunsClient:
24
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
25
+ self._client_wrapper = client_wrapper
26
+
27
+ def list(
28
+ self,
29
+ id: int,
30
+ version_id: int,
31
+ *,
32
+ project: int,
33
+ project_subset: RunsListRequestProjectSubset,
34
+ request_options: typing.Optional[RequestOptions] = None,
35
+ ) -> InferenceRun:
36
+ """
37
+ Get information (status, etadata, etc) about an existing inference run
38
+
39
+ Parameters
40
+ ----------
41
+ id : int
42
+ Prompt ID
43
+
44
+ version_id : int
45
+ Prompt Version ID
46
+
47
+ project : int
48
+ The ID of the project that this Interence Run makes predictions on
49
+
50
+ project_subset : RunsListRequestProjectSubset
51
+ Defines which tasks are operated on (e.g. HasGT will only operate on tasks with a ground truth annotation, but All will operate on all records)
52
+
53
+ request_options : typing.Optional[RequestOptions]
54
+ Request-specific configuration.
55
+
56
+ Returns
57
+ -------
58
+ InferenceRun
59
+ Success
60
+
61
+ Examples
62
+ --------
63
+ from label_studio_sdk.client import LabelStudio
64
+
65
+ client = LabelStudio(
66
+ api_key="YOUR_API_KEY",
67
+ )
68
+ client.prompts.runs.list(
69
+ id=1,
70
+ version_id=1,
71
+ project=1,
72
+ project_subset="All",
73
+ )
74
+ """
75
+ _response = self._client_wrapper.httpx_client.request(
76
+ f"api/prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/inference-runs",
77
+ method="GET",
78
+ params={"project": project, "project_subset": project_subset},
79
+ request_options=request_options,
80
+ )
81
+ try:
82
+ if 200 <= _response.status_code < 300:
83
+ return pydantic_v1.parse_obj_as(InferenceRun, _response.json()) # type: ignore
84
+ _response_json = _response.json()
85
+ except JSONDecodeError:
86
+ raise ApiError(status_code=_response.status_code, body=_response.text)
87
+ raise ApiError(status_code=_response.status_code, body=_response_json)
88
+
89
+ def create(
90
+ self,
91
+ id: int,
92
+ version_id: int,
93
+ *,
94
+ project: int,
95
+ project_subset: InferenceRunProjectSubset,
96
+ organization: typing.Optional[InferenceRunOrganization] = OMIT,
97
+ model_version: typing.Optional[str] = OMIT,
98
+ created_by: typing.Optional[InferenceRunCreatedBy] = OMIT,
99
+ status: typing.Optional[InferenceRunStatus] = OMIT,
100
+ job_id: typing.Optional[str] = OMIT,
101
+ created_at: typing.Optional[dt.datetime] = OMIT,
102
+ triggered_at: typing.Optional[dt.datetime] = OMIT,
103
+ predictions_updated_at: typing.Optional[dt.datetime] = OMIT,
104
+ completed_at: typing.Optional[dt.datetime] = OMIT,
105
+ request_options: typing.Optional[RequestOptions] = None,
106
+ ) -> InferenceRun:
107
+ """
108
+ Run a prompt inference.
109
+
110
+ Parameters
111
+ ----------
112
+ id : int
113
+ Prompt ID
114
+
115
+ version_id : int
116
+ Prompt Version ID
117
+
118
+ project : int
119
+
120
+ project_subset : InferenceRunProjectSubset
121
+
122
+ organization : typing.Optional[InferenceRunOrganization]
123
+
124
+ model_version : typing.Optional[str]
125
+
126
+ created_by : typing.Optional[InferenceRunCreatedBy]
127
+
128
+ status : typing.Optional[InferenceRunStatus]
129
+
130
+ job_id : typing.Optional[str]
131
+
132
+ created_at : typing.Optional[dt.datetime]
133
+
134
+ triggered_at : typing.Optional[dt.datetime]
135
+
136
+ predictions_updated_at : typing.Optional[dt.datetime]
137
+
138
+ completed_at : typing.Optional[dt.datetime]
139
+
140
+ request_options : typing.Optional[RequestOptions]
141
+ Request-specific configuration.
142
+
143
+ Returns
144
+ -------
145
+ InferenceRun
146
+
147
+
148
+ Examples
149
+ --------
150
+ from label_studio_sdk.client import LabelStudio
151
+
152
+ client = LabelStudio(
153
+ api_key="YOUR_API_KEY",
154
+ )
155
+ client.prompts.runs.create(
156
+ id=1,
157
+ version_id=1,
158
+ project=1,
159
+ project_subset="All",
160
+ )
161
+ """
162
+ _response = self._client_wrapper.httpx_client.request(
163
+ f"api/prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/inference-runs",
164
+ method="POST",
165
+ json={
166
+ "organization": organization,
167
+ "project": project,
168
+ "model_version": model_version,
169
+ "created_by": created_by,
170
+ "project_subset": project_subset,
171
+ "status": status,
172
+ "job_id": job_id,
173
+ "created_at": created_at,
174
+ "triggered_at": triggered_at,
175
+ "predictions_updated_at": predictions_updated_at,
176
+ "completed_at": completed_at,
177
+ },
178
+ request_options=request_options,
179
+ omit=OMIT,
180
+ )
181
+ try:
182
+ if 200 <= _response.status_code < 300:
183
+ return pydantic_v1.parse_obj_as(InferenceRun, _response.json()) # type: ignore
184
+ _response_json = _response.json()
185
+ except JSONDecodeError:
186
+ raise ApiError(status_code=_response.status_code, body=_response.text)
187
+ raise ApiError(status_code=_response.status_code, body=_response_json)
188
+
189
+
190
+ class AsyncRunsClient:
191
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
192
+ self._client_wrapper = client_wrapper
193
+
194
+ async def list(
195
+ self,
196
+ id: int,
197
+ version_id: int,
198
+ *,
199
+ project: int,
200
+ project_subset: RunsListRequestProjectSubset,
201
+ request_options: typing.Optional[RequestOptions] = None,
202
+ ) -> InferenceRun:
203
+ """
204
+ Get information (status, etadata, etc) about an existing inference run
205
+
206
+ Parameters
207
+ ----------
208
+ id : int
209
+ Prompt ID
210
+
211
+ version_id : int
212
+ Prompt Version ID
213
+
214
+ project : int
215
+ The ID of the project that this Interence Run makes predictions on
216
+
217
+ project_subset : RunsListRequestProjectSubset
218
+ Defines which tasks are operated on (e.g. HasGT will only operate on tasks with a ground truth annotation, but All will operate on all records)
219
+
220
+ request_options : typing.Optional[RequestOptions]
221
+ Request-specific configuration.
222
+
223
+ Returns
224
+ -------
225
+ InferenceRun
226
+ Success
227
+
228
+ Examples
229
+ --------
230
+ from label_studio_sdk.client import AsyncLabelStudio
231
+
232
+ client = AsyncLabelStudio(
233
+ api_key="YOUR_API_KEY",
234
+ )
235
+ await client.prompts.runs.list(
236
+ id=1,
237
+ version_id=1,
238
+ project=1,
239
+ project_subset="All",
240
+ )
241
+ """
242
+ _response = await self._client_wrapper.httpx_client.request(
243
+ f"api/prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/inference-runs",
244
+ method="GET",
245
+ params={"project": project, "project_subset": project_subset},
246
+ request_options=request_options,
247
+ )
248
+ try:
249
+ if 200 <= _response.status_code < 300:
250
+ return pydantic_v1.parse_obj_as(InferenceRun, _response.json()) # type: ignore
251
+ _response_json = _response.json()
252
+ except JSONDecodeError:
253
+ raise ApiError(status_code=_response.status_code, body=_response.text)
254
+ raise ApiError(status_code=_response.status_code, body=_response_json)
255
+
256
+ async def create(
257
+ self,
258
+ id: int,
259
+ version_id: int,
260
+ *,
261
+ project: int,
262
+ project_subset: InferenceRunProjectSubset,
263
+ organization: typing.Optional[InferenceRunOrganization] = OMIT,
264
+ model_version: typing.Optional[str] = OMIT,
265
+ created_by: typing.Optional[InferenceRunCreatedBy] = OMIT,
266
+ status: typing.Optional[InferenceRunStatus] = OMIT,
267
+ job_id: typing.Optional[str] = OMIT,
268
+ created_at: typing.Optional[dt.datetime] = OMIT,
269
+ triggered_at: typing.Optional[dt.datetime] = OMIT,
270
+ predictions_updated_at: typing.Optional[dt.datetime] = OMIT,
271
+ completed_at: typing.Optional[dt.datetime] = OMIT,
272
+ request_options: typing.Optional[RequestOptions] = None,
273
+ ) -> InferenceRun:
274
+ """
275
+ Run a prompt inference.
276
+
277
+ Parameters
278
+ ----------
279
+ id : int
280
+ Prompt ID
281
+
282
+ version_id : int
283
+ Prompt Version ID
284
+
285
+ project : int
286
+
287
+ project_subset : InferenceRunProjectSubset
288
+
289
+ organization : typing.Optional[InferenceRunOrganization]
290
+
291
+ model_version : typing.Optional[str]
292
+
293
+ created_by : typing.Optional[InferenceRunCreatedBy]
294
+
295
+ status : typing.Optional[InferenceRunStatus]
296
+
297
+ job_id : typing.Optional[str]
298
+
299
+ created_at : typing.Optional[dt.datetime]
300
+
301
+ triggered_at : typing.Optional[dt.datetime]
302
+
303
+ predictions_updated_at : typing.Optional[dt.datetime]
304
+
305
+ completed_at : typing.Optional[dt.datetime]
306
+
307
+ request_options : typing.Optional[RequestOptions]
308
+ Request-specific configuration.
309
+
310
+ Returns
311
+ -------
312
+ InferenceRun
313
+
314
+
315
+ Examples
316
+ --------
317
+ from label_studio_sdk.client import AsyncLabelStudio
318
+
319
+ client = AsyncLabelStudio(
320
+ api_key="YOUR_API_KEY",
321
+ )
322
+ await client.prompts.runs.create(
323
+ id=1,
324
+ version_id=1,
325
+ project=1,
326
+ project_subset="All",
327
+ )
328
+ """
329
+ _response = await self._client_wrapper.httpx_client.request(
330
+ f"api/prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/inference-runs",
331
+ method="POST",
332
+ json={
333
+ "organization": organization,
334
+ "project": project,
335
+ "model_version": model_version,
336
+ "created_by": created_by,
337
+ "project_subset": project_subset,
338
+ "status": status,
339
+ "job_id": job_id,
340
+ "created_at": created_at,
341
+ "triggered_at": triggered_at,
342
+ "predictions_updated_at": predictions_updated_at,
343
+ "completed_at": completed_at,
344
+ },
345
+ request_options=request_options,
346
+ omit=OMIT,
347
+ )
348
+ try:
349
+ if 200 <= _response.status_code < 300:
350
+ return pydantic_v1.parse_obj_as(InferenceRun, _response.json()) # type: ignore
351
+ _response_json = _response.json()
352
+ except JSONDecodeError:
353
+ raise ApiError(status_code=_response.status_code, body=_response.text)
354
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .runs_list_request_project_subset import RunsListRequestProjectSubset
4
+
5
+ __all__ = ["RunsListRequestProjectSubset"]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ RunsListRequestProjectSubset = typing.Union[typing.Literal["All", "HasGT", "Sample"], typing.Any]
@@ -0,0 +1,15 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .prompts_batch_failed_predictions_request_failed_predictions_item import (
4
+ PromptsBatchFailedPredictionsRequestFailedPredictionsItem,
5
+ )
6
+ from .prompts_batch_failed_predictions_response import PromptsBatchFailedPredictionsResponse
7
+ from .prompts_batch_predictions_request_results_item import PromptsBatchPredictionsRequestResultsItem
8
+ from .prompts_batch_predictions_response import PromptsBatchPredictionsResponse
9
+
10
+ __all__ = [
11
+ "PromptsBatchFailedPredictionsRequestFailedPredictionsItem",
12
+ "PromptsBatchFailedPredictionsResponse",
13
+ "PromptsBatchPredictionsRequestResultsItem",
14
+ "PromptsBatchPredictionsResponse",
15
+ ]
@@ -0,0 +1,42 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ...core.datetime_utils import serialize_datetime
7
+ from ...core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
8
+
9
+
10
+ class PromptsBatchFailedPredictionsRequestFailedPredictionsItem(pydantic_v1.BaseModel):
11
+ task_id: typing.Optional[int] = pydantic_v1.Field(default=None)
12
+ """
13
+ Task ID to associate the prediction with
14
+ """
15
+
16
+ error_type: typing.Optional[str] = pydantic_v1.Field(default=None)
17
+ """
18
+ Type of error (e.g. "Timeout", "Rate Limit", etc)
19
+ """
20
+
21
+ message: typing.Optional[str] = pydantic_v1.Field(default=None)
22
+ """
23
+ Error message details
24
+ """
25
+
26
+ def json(self, **kwargs: typing.Any) -> str:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().json(**kwargs_with_defaults)
29
+
30
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
31
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
33
+
34
+ return deep_union_pydantic_dicts(
35
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
36
+ )
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ extra = pydantic_v1.Extra.allow
42
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ...core.datetime_utils import serialize_datetime
7
+ from ...core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
8
+
9
+
10
+ class PromptsBatchFailedPredictionsResponse(pydantic_v1.BaseModel):
11
+ detail: typing.Optional[str] = None
12
+
13
+ def json(self, **kwargs: typing.Any) -> str:
14
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
15
+ return super().json(**kwargs_with_defaults)
16
+
17
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
18
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
19
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
20
+
21
+ return deep_union_pydantic_dicts(
22
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
23
+ )
24
+
25
+ class Config:
26
+ frozen = True
27
+ smart_union = True
28
+ extra = pydantic_v1.Extra.allow
29
+ json_encoders = {dt.datetime: serialize_datetime}