label-studio-sdk 2.0.6__py3-none-any.whl → 2.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of label-studio-sdk might be problematic. Click here for more details.
- label_studio_sdk/__init__.py +18 -0
- label_studio_sdk/base_client.py +4 -0
- label_studio_sdk/core/client_wrapper.py +1 -1
- label_studio_sdk/label_interface/control_tags.py +38 -0
- label_studio_sdk/label_interface/data_examples.json +10 -0
- label_studio_sdk/label_interface/interface.py +13 -0
- label_studio_sdk/label_interface/object_tags.py +9 -0
- label_studio_sdk/ml/client.py +124 -0
- label_studio_sdk/organizations/__init__.py +3 -2
- label_studio_sdk/organizations/client.py +536 -1
- label_studio_sdk/organizations/invites/__init__.py +2 -0
- label_studio_sdk/organizations/invites/client.py +368 -0
- label_studio_sdk/organizations/types/__init__.py +5 -0
- label_studio_sdk/organizations/types/patched_default_role_request_custom_scripts_editable_by.py +7 -0
- label_studio_sdk/project_templates/__init__.py +2 -0
- label_studio_sdk/project_templates/client.py +909 -0
- label_studio_sdk/types/__init__.py +14 -0
- label_studio_sdk/types/default_role.py +75 -0
- label_studio_sdk/types/default_role_custom_scripts_editable_by.py +7 -0
- label_studio_sdk/types/lse_project.py +223 -0
- label_studio_sdk/types/lse_project_sampling.py +7 -0
- label_studio_sdk/types/lse_project_skip_queue.py +7 -0
- label_studio_sdk/types/lse_task.py +1 -1
- label_studio_sdk/types/lse_task_serializer_for_reviewers.py +1 -1
- label_studio_sdk/types/project_template.py +41 -0
- label_studio_sdk/types/project_template_request.py +38 -0
- {label_studio_sdk-2.0.6.dist-info → label_studio_sdk-2.0.7.dist-info}/METADATA +1 -1
- {label_studio_sdk-2.0.6.dist-info → label_studio_sdk-2.0.7.dist-info}/RECORD +30 -17
- {label_studio_sdk-2.0.6.dist-info → label_studio_sdk-2.0.7.dist-info}/LICENSE +0 -0
- {label_studio_sdk-2.0.6.dist-info → label_studio_sdk-2.0.7.dist-info}/WHEEL +0 -0
label_studio_sdk/__init__.py
CHANGED
|
@@ -46,6 +46,8 @@ from .types import (
|
|
|
46
46
|
ConvertedFormatRequest,
|
|
47
47
|
CountLimit,
|
|
48
48
|
CustomScriptsEditableByEnum,
|
|
49
|
+
DefaultRole,
|
|
50
|
+
DefaultRoleCustomScriptsEditableBy,
|
|
49
51
|
DefaultRoleEnum,
|
|
50
52
|
EditionEnum,
|
|
51
53
|
Export,
|
|
@@ -78,9 +80,12 @@ from .types import (
|
|
|
78
80
|
LseOrganization,
|
|
79
81
|
LseOrganizationCustomScriptsEditableBy,
|
|
80
82
|
LseOrganizationMemberList,
|
|
83
|
+
LseProject,
|
|
81
84
|
LseProjectCreate,
|
|
82
85
|
LseProjectCreateSampling,
|
|
83
86
|
LseProjectCreateSkipQueue,
|
|
87
|
+
LseProjectSampling,
|
|
88
|
+
LseProjectSkipQueue,
|
|
84
89
|
LseProjectUpdate,
|
|
85
90
|
LseProjectUpdateSampling,
|
|
86
91
|
LseProjectUpdateSkipQueue,
|
|
@@ -154,6 +159,8 @@ from .types import (
|
|
|
154
159
|
ProjectSampling,
|
|
155
160
|
ProjectSkipQueue,
|
|
156
161
|
ProjectSubsetEnum,
|
|
162
|
+
ProjectTemplate,
|
|
163
|
+
ProjectTemplateRequest,
|
|
157
164
|
PromptsStatusEnum,
|
|
158
165
|
ProviderEnum,
|
|
159
166
|
ReasonEnum,
|
|
@@ -238,6 +245,7 @@ from . import (
|
|
|
238
245
|
model_providers,
|
|
239
246
|
organizations,
|
|
240
247
|
predictions,
|
|
248
|
+
project_templates,
|
|
241
249
|
projects,
|
|
242
250
|
prompts,
|
|
243
251
|
session_policy,
|
|
@@ -275,6 +283,7 @@ from .export_storage import ExportStorageListTypesResponseItem
|
|
|
275
283
|
from .import_storage import ImportStorageListTypesResponseItem
|
|
276
284
|
from .ml import MlCreateRequestAuthMethod, MlListModelVersionsResponse, MlUpdateRequestAuthMethod
|
|
277
285
|
from .model_providers import ModelProvidersListModelProviderChoicesResponse
|
|
286
|
+
from .organizations import PatchedDefaultRoleRequestCustomScriptsEditableBy
|
|
278
287
|
from .projects import (
|
|
279
288
|
LseProjectCreateRequestSampling,
|
|
280
289
|
LseProjectCreateRequestSkipQueue,
|
|
@@ -391,6 +400,8 @@ __all__ = [
|
|
|
391
400
|
"ConvertedFormatRequest",
|
|
392
401
|
"CountLimit",
|
|
393
402
|
"CustomScriptsEditableByEnum",
|
|
403
|
+
"DefaultRole",
|
|
404
|
+
"DefaultRoleCustomScriptsEditableBy",
|
|
394
405
|
"DefaultRoleEnum",
|
|
395
406
|
"EditionEnum",
|
|
396
407
|
"Export",
|
|
@@ -429,11 +440,14 @@ __all__ = [
|
|
|
429
440
|
"LseOrganization",
|
|
430
441
|
"LseOrganizationCustomScriptsEditableBy",
|
|
431
442
|
"LseOrganizationMemberList",
|
|
443
|
+
"LseProject",
|
|
432
444
|
"LseProjectCreate",
|
|
433
445
|
"LseProjectCreateRequestSampling",
|
|
434
446
|
"LseProjectCreateRequestSkipQueue",
|
|
435
447
|
"LseProjectCreateSampling",
|
|
436
448
|
"LseProjectCreateSkipQueue",
|
|
449
|
+
"LseProjectSampling",
|
|
450
|
+
"LseProjectSkipQueue",
|
|
437
451
|
"LseProjectUpdate",
|
|
438
452
|
"LseProjectUpdateSampling",
|
|
439
453
|
"LseProjectUpdateSkipQueue",
|
|
@@ -498,6 +512,7 @@ __all__ = [
|
|
|
498
512
|
"PaginatedPaginatedProjectMemberList",
|
|
499
513
|
"PaginatedProjectMember",
|
|
500
514
|
"PaginatedRoleBasedTaskList",
|
|
515
|
+
"PatchedDefaultRoleRequestCustomScriptsEditableBy",
|
|
501
516
|
"PatchedLseProjectUpdateRequestSampling",
|
|
502
517
|
"PatchedLseProjectUpdateRequestSkipQueue",
|
|
503
518
|
"Pause",
|
|
@@ -515,6 +530,8 @@ __all__ = [
|
|
|
515
530
|
"ProjectSampling",
|
|
516
531
|
"ProjectSkipQueue",
|
|
517
532
|
"ProjectSubsetEnum",
|
|
533
|
+
"ProjectTemplate",
|
|
534
|
+
"ProjectTemplateRequest",
|
|
518
535
|
"ProjectsDuplicateResponse",
|
|
519
536
|
"ProjectsImportTasksResponse",
|
|
520
537
|
"ProjectsListRequestFilter",
|
|
@@ -630,6 +647,7 @@ __all__ = [
|
|
|
630
647
|
"model_providers",
|
|
631
648
|
"organizations",
|
|
632
649
|
"predictions",
|
|
650
|
+
"project_templates",
|
|
633
651
|
"projects",
|
|
634
652
|
"prompts",
|
|
635
653
|
"session_policy",
|
label_studio_sdk/base_client.py
CHANGED
|
@@ -23,6 +23,7 @@ from .ml.client import MlClient
|
|
|
23
23
|
from .model_providers.client import ModelProvidersClient
|
|
24
24
|
from .prompts.client import PromptsClient
|
|
25
25
|
from .predictions.client import PredictionsClient
|
|
26
|
+
from .project_templates.client import ProjectTemplatesClient
|
|
26
27
|
from .projects.client import ProjectsClient
|
|
27
28
|
from .tasks.client import TasksClient
|
|
28
29
|
from .session_policy.client import SessionPolicyClient
|
|
@@ -51,6 +52,7 @@ from .ml.client import AsyncMlClient
|
|
|
51
52
|
from .model_providers.client import AsyncModelProvidersClient
|
|
52
53
|
from .prompts.client import AsyncPromptsClient
|
|
53
54
|
from .predictions.client import AsyncPredictionsClient
|
|
55
|
+
from .project_templates.client import AsyncProjectTemplatesClient
|
|
54
56
|
from .projects.client import AsyncProjectsClient
|
|
55
57
|
from .tasks.client import AsyncTasksClient
|
|
56
58
|
from .session_policy.client import AsyncSessionPolicyClient
|
|
@@ -142,6 +144,7 @@ class LabelStudioBase:
|
|
|
142
144
|
self.model_providers = ModelProvidersClient(client_wrapper=self._client_wrapper)
|
|
143
145
|
self.prompts = PromptsClient(client_wrapper=self._client_wrapper)
|
|
144
146
|
self.predictions = PredictionsClient(client_wrapper=self._client_wrapper)
|
|
147
|
+
self.project_templates = ProjectTemplatesClient(client_wrapper=self._client_wrapper)
|
|
145
148
|
self.projects = ProjectsClient(client_wrapper=self._client_wrapper)
|
|
146
149
|
self.tasks = TasksClient(client_wrapper=self._client_wrapper)
|
|
147
150
|
self.session_policy = SessionPolicyClient(client_wrapper=self._client_wrapper)
|
|
@@ -233,6 +236,7 @@ class AsyncLabelStudioBase:
|
|
|
233
236
|
self.model_providers = AsyncModelProvidersClient(client_wrapper=self._client_wrapper)
|
|
234
237
|
self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper)
|
|
235
238
|
self.predictions = AsyncPredictionsClient(client_wrapper=self._client_wrapper)
|
|
239
|
+
self.project_templates = AsyncProjectTemplatesClient(client_wrapper=self._client_wrapper)
|
|
236
240
|
self.projects = AsyncProjectsClient(client_wrapper=self._client_wrapper)
|
|
237
241
|
self.tasks = AsyncTasksClient(client_wrapper=self._client_wrapper)
|
|
238
242
|
self.session_policy = AsyncSessionPolicyClient(client_wrapper=self._client_wrapper)
|
|
@@ -39,6 +39,7 @@ _TAG_TO_CLASS = {
|
|
|
39
39
|
"taxonomy": "TaxonomyTag",
|
|
40
40
|
"textarea": "TextAreaTag",
|
|
41
41
|
"timeserieslabels": "TimeSeriesLabelsTag",
|
|
42
|
+
"chatmessage": "ChatMessageTag",
|
|
42
43
|
}
|
|
43
44
|
|
|
44
45
|
|
|
@@ -950,6 +951,43 @@ class RatingTag(ControlTag):
|
|
|
950
951
|
}
|
|
951
952
|
|
|
952
953
|
|
|
954
|
+
class ChatMessageContent(BaseModel):
|
|
955
|
+
role: str
|
|
956
|
+
content: str
|
|
957
|
+
createdAt: Optional[int] = None
|
|
958
|
+
|
|
959
|
+
|
|
960
|
+
class ChatMessageValue(BaseModel):
|
|
961
|
+
chatmessage: ChatMessageContent
|
|
962
|
+
|
|
963
|
+
|
|
964
|
+
class ChatMessageTag(ControlTag):
|
|
965
|
+
"""Control tag for chat messages targeting a `<Chat>` object.
|
|
966
|
+
|
|
967
|
+
This tag is a hybrid where `from_name == to_name` and `type == 'chatmessage'`.
|
|
968
|
+
"""
|
|
969
|
+
tag: str = "ChatMessage"
|
|
970
|
+
_value_class: Type[ChatMessageValue] = ChatMessageValue
|
|
971
|
+
|
|
972
|
+
def to_json_schema(self):
|
|
973
|
+
return {
|
|
974
|
+
"type": "object",
|
|
975
|
+
"required": ["chatmessage"],
|
|
976
|
+
"properties": {
|
|
977
|
+
"chatmessage": {
|
|
978
|
+
"type": "object",
|
|
979
|
+
"required": ["role", "content"],
|
|
980
|
+
"properties": {
|
|
981
|
+
"role": {"type": "string"},
|
|
982
|
+
"content": {"type": "string"},
|
|
983
|
+
"createdAt": {"type": "number"}
|
|
984
|
+
}
|
|
985
|
+
}
|
|
986
|
+
},
|
|
987
|
+
"description": f"Chat message for {self.to_name[0]}"
|
|
988
|
+
}
|
|
989
|
+
|
|
990
|
+
|
|
953
991
|
class RelationsTag(ControlTag):
|
|
954
992
|
""" """
|
|
955
993
|
tag: str = "Relations"
|
|
@@ -10,6 +10,12 @@
|
|
|
10
10
|
"Header": "Task header",
|
|
11
11
|
"Paragraphs": [{"author": "Alice", "text": "Hi, Bob."}, {"author": "Bob", "text": "Hello, Alice!"}, {"author": "Alice", "text": "What's up?"}, {"author": "Bob", "text": "Good. Ciao!"}, {"author": "Alice", "text": "Bye, Bob."}],
|
|
12
12
|
"ParagraphsUrl": "<HOSTNAME>/samples/paragraphs.json?",
|
|
13
|
+
"Chat": [
|
|
14
|
+
{"role": "user", "content": "Hi there!", "createdAt": 1710000000000},
|
|
15
|
+
{"role": "assistant", "content": "Hello! How can I help you today?", "createdAt": 1710000005000},
|
|
16
|
+
{"role": "user", "content": "Can you summarize our onboarding checklist?", "createdAt": 1710000010000},
|
|
17
|
+
{"role": "assistant", "content": "Sure. It includes account setup, roles, labeling instructions, GT tasks, overlap and review.", "createdAt": 1710000015000}
|
|
18
|
+
],
|
|
13
19
|
"Table": {"Card number": 18799210, "First name": "Max", "Last name": "Nobel"},
|
|
14
20
|
"$videoHack": "<video src='<HOSTNAME>/static/samples/opossum_snow.mp4' width=100% controls>",
|
|
15
21
|
"Video": "<HOSTNAME>/static/samples/opossum_snow.mp4",
|
|
@@ -85,6 +91,10 @@
|
|
|
85
91
|
"AudioPlus": "<HOSTNAME>/static/samples/game.wav",
|
|
86
92
|
"Header": "Task header",
|
|
87
93
|
"Paragraphs": [{"author": "Alice", "text": "Hi, Bob."}, {"author": "Bob", "text": "Hello, Alice!"}, {"author": "Alice", "text": "What's up?"}, {"author": "Bob", "text": "Good. Ciao!"}, {"author": "Alice", "text": "Bye, Bob."}],
|
|
94
|
+
"Chat": [
|
|
95
|
+
{"role": "user", "content": "Hello!", "createdAt": 1710000000000},
|
|
96
|
+
{"role": "assistant", "content": "Hi! What can I do for you?", "createdAt": 1710000004000}
|
|
97
|
+
],
|
|
88
98
|
"Table": {"Card number": 18799210, "First name": "Max", "Last name": "Nobel"},
|
|
89
99
|
"$videoHack": "<video src='static/samples/opossum_snow.mp4' width=100% controls>",
|
|
90
100
|
"Video": "<HOSTNAME>/static/samples/opossum_snow.mp4",
|
|
@@ -27,6 +27,7 @@ from label_studio_sdk._legacy.exceptions import (
|
|
|
27
27
|
|
|
28
28
|
from .base import LabelStudioTag
|
|
29
29
|
from .control_tags import (
|
|
30
|
+
ChatMessageTag,
|
|
30
31
|
ControlTag,
|
|
31
32
|
ChoicesTag,
|
|
32
33
|
LabelsTag,
|
|
@@ -623,6 +624,18 @@ class LabelInterface:
|
|
|
623
624
|
if lb:
|
|
624
625
|
labels[lb.parent_name][lb.value] = lb
|
|
625
626
|
|
|
627
|
+
# Special handling: auto-create ChatMessage control for each Chat object
|
|
628
|
+
chat_object_names = [name for name, obj in objects.items() if getattr(obj, 'tag', '').lower() == 'chat']
|
|
629
|
+
for name in chat_object_names:
|
|
630
|
+
if name not in controls:
|
|
631
|
+
controls[name] = ChatMessageTag(
|
|
632
|
+
tag='ChatMessage',
|
|
633
|
+
name=name,
|
|
634
|
+
to_name=[name],
|
|
635
|
+
attr={"name": name, "toName": name}
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
|
|
626
639
|
return controls, objects, labels, xml_tree
|
|
627
640
|
|
|
628
641
|
@classmethod
|
|
@@ -20,6 +20,7 @@ _TAG_TO_CLASS = {
|
|
|
20
20
|
"list": "ListTag",
|
|
21
21
|
"paragraphs": "ParagraphsTag",
|
|
22
22
|
"timeseries": "TimeSeriesTag",
|
|
23
|
+
"chat": "ChatTag",
|
|
23
24
|
}
|
|
24
25
|
|
|
25
26
|
_DATA_EXAMPLES = None
|
|
@@ -306,3 +307,11 @@ class TimeSeriesTag(ObjectTag):
|
|
|
306
307
|
else:
|
|
307
308
|
# data is JSON
|
|
308
309
|
return generate_time_series_json(time_column, value_columns, time_format)
|
|
310
|
+
|
|
311
|
+
class ChatTag(ObjectTag):
|
|
312
|
+
""" """
|
|
313
|
+
tag: str = "Chat"
|
|
314
|
+
|
|
315
|
+
def _generate_example(self, examples, only_urls=False):
|
|
316
|
+
""" """
|
|
317
|
+
return examples.get("Chat")
|
label_studio_sdk/ml/client.py
CHANGED
|
@@ -457,6 +457,64 @@ class MlClient:
|
|
|
457
457
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
458
458
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
459
459
|
|
|
460
|
+
def predict_all_tasks(
|
|
461
|
+
self,
|
|
462
|
+
id: int,
|
|
463
|
+
*,
|
|
464
|
+
batch_size: typing.Optional[int] = None,
|
|
465
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
466
|
+
) -> None:
|
|
467
|
+
"""
|
|
468
|
+
Note: not available in the community edition of Label Studio.
|
|
469
|
+
|
|
470
|
+
Create predictions for all tasks using a specific ML backend so that you can set up an active learning strategy based on the confidence or uncertainty scores associated with the predictions. Creating predictions requires a Label Studio ML backend set up and configured for your project.
|
|
471
|
+
|
|
472
|
+
See [Set up machine learning](https://labelstud.io/guide/ml.html) for more details about a Label Studio ML backend.
|
|
473
|
+
|
|
474
|
+
Reference the ML backend ID in the path of this API call. Get the ML backend ID by [listing the ML backends for a project](https://labelstud.io/api/#operation/api_ml_list).
|
|
475
|
+
|
|
476
|
+
Parameters
|
|
477
|
+
----------
|
|
478
|
+
id : int
|
|
479
|
+
A unique integer value identifying this ML backend.
|
|
480
|
+
|
|
481
|
+
batch_size : typing.Optional[int]
|
|
482
|
+
Computed number of tasks without predictions that the ML backend needs to predict.
|
|
483
|
+
|
|
484
|
+
request_options : typing.Optional[RequestOptions]
|
|
485
|
+
Request-specific configuration.
|
|
486
|
+
|
|
487
|
+
Returns
|
|
488
|
+
-------
|
|
489
|
+
None
|
|
490
|
+
|
|
491
|
+
Examples
|
|
492
|
+
--------
|
|
493
|
+
from label_studio_sdk import LabelStudio
|
|
494
|
+
|
|
495
|
+
client = LabelStudio(
|
|
496
|
+
api_key="YOUR_API_KEY",
|
|
497
|
+
)
|
|
498
|
+
client.ml.predict_all_tasks(
|
|
499
|
+
id=1,
|
|
500
|
+
)
|
|
501
|
+
"""
|
|
502
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
503
|
+
f"api/ml/{jsonable_encoder(id)}/predict",
|
|
504
|
+
method="POST",
|
|
505
|
+
params={
|
|
506
|
+
"batch_size": batch_size,
|
|
507
|
+
},
|
|
508
|
+
request_options=request_options,
|
|
509
|
+
)
|
|
510
|
+
try:
|
|
511
|
+
if 200 <= _response.status_code < 300:
|
|
512
|
+
return
|
|
513
|
+
_response_json = _response.json()
|
|
514
|
+
except JSONDecodeError:
|
|
515
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
516
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
517
|
+
|
|
460
518
|
def train(
|
|
461
519
|
self,
|
|
462
520
|
id: int,
|
|
@@ -1064,6 +1122,72 @@ class AsyncMlClient:
|
|
|
1064
1122
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1065
1123
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1066
1124
|
|
|
1125
|
+
async def predict_all_tasks(
|
|
1126
|
+
self,
|
|
1127
|
+
id: int,
|
|
1128
|
+
*,
|
|
1129
|
+
batch_size: typing.Optional[int] = None,
|
|
1130
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1131
|
+
) -> None:
|
|
1132
|
+
"""
|
|
1133
|
+
Note: not available in the community edition of Label Studio.
|
|
1134
|
+
|
|
1135
|
+
Create predictions for all tasks using a specific ML backend so that you can set up an active learning strategy based on the confidence or uncertainty scores associated with the predictions. Creating predictions requires a Label Studio ML backend set up and configured for your project.
|
|
1136
|
+
|
|
1137
|
+
See [Set up machine learning](https://labelstud.io/guide/ml.html) for more details about a Label Studio ML backend.
|
|
1138
|
+
|
|
1139
|
+
Reference the ML backend ID in the path of this API call. Get the ML backend ID by [listing the ML backends for a project](https://labelstud.io/api/#operation/api_ml_list).
|
|
1140
|
+
|
|
1141
|
+
Parameters
|
|
1142
|
+
----------
|
|
1143
|
+
id : int
|
|
1144
|
+
A unique integer value identifying this ML backend.
|
|
1145
|
+
|
|
1146
|
+
batch_size : typing.Optional[int]
|
|
1147
|
+
Computed number of tasks without predictions that the ML backend needs to predict.
|
|
1148
|
+
|
|
1149
|
+
request_options : typing.Optional[RequestOptions]
|
|
1150
|
+
Request-specific configuration.
|
|
1151
|
+
|
|
1152
|
+
Returns
|
|
1153
|
+
-------
|
|
1154
|
+
None
|
|
1155
|
+
|
|
1156
|
+
Examples
|
|
1157
|
+
--------
|
|
1158
|
+
import asyncio
|
|
1159
|
+
|
|
1160
|
+
from label_studio_sdk import AsyncLabelStudio
|
|
1161
|
+
|
|
1162
|
+
client = AsyncLabelStudio(
|
|
1163
|
+
api_key="YOUR_API_KEY",
|
|
1164
|
+
)
|
|
1165
|
+
|
|
1166
|
+
|
|
1167
|
+
async def main() -> None:
|
|
1168
|
+
await client.ml.predict_all_tasks(
|
|
1169
|
+
id=1,
|
|
1170
|
+
)
|
|
1171
|
+
|
|
1172
|
+
|
|
1173
|
+
asyncio.run(main())
|
|
1174
|
+
"""
|
|
1175
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1176
|
+
f"api/ml/{jsonable_encoder(id)}/predict",
|
|
1177
|
+
method="POST",
|
|
1178
|
+
params={
|
|
1179
|
+
"batch_size": batch_size,
|
|
1180
|
+
},
|
|
1181
|
+
request_options=request_options,
|
|
1182
|
+
)
|
|
1183
|
+
try:
|
|
1184
|
+
if 200 <= _response.status_code < 300:
|
|
1185
|
+
return
|
|
1186
|
+
_response_json = _response.json()
|
|
1187
|
+
except JSONDecodeError:
|
|
1188
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1189
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1190
|
+
|
|
1067
1191
|
async def train(
|
|
1068
1192
|
self,
|
|
1069
1193
|
id: int,
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
|
2
2
|
|
|
3
|
-
from . import
|
|
3
|
+
from .types import PatchedDefaultRoleRequestCustomScriptsEditableBy
|
|
4
|
+
from . import invites, members
|
|
4
5
|
|
|
5
|
-
__all__ = ["members"]
|
|
6
|
+
__all__ = ["PatchedDefaultRoleRequestCustomScriptsEditableBy", "invites", "members"]
|