together 1.5.17__py3-none-any.whl → 2.0.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -63
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/{cli → lib/cli}/api/endpoints.py +108 -75
- together/lib/cli/api/evals.py +588 -0
- together/{cli → lib/cli}/api/files.py +20 -17
- together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +161 -120
- together/lib/cli/api/models.py +140 -0
- together/{cli → lib/cli}/api/utils.py +6 -7
- together/{cli → lib/cli}/cli.py +16 -24
- together/{constants.py → lib/constants.py} +17 -12
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +455 -0
- together/{utils → lib/utils}/__init__.py +6 -14
- together/{utils → lib/utils}/_log.py +11 -16
- together/lib/utils/files.py +628 -0
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +19 -55
- together/resources/__init__.py +225 -33
- together/resources/audio/__init__.py +72 -21
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +574 -122
- together/resources/audio/transcriptions.py +282 -0
- together/resources/audio/translations.py +256 -0
- together/resources/audio/voices.py +135 -0
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -263
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +884 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +598 -395
- together/resources/evals.py +452 -0
- together/resources/files.py +398 -121
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +256 -108
- together/resources/jobs.py +214 -0
- together/resources/models.py +238 -90
- together/resources/rerank.py +190 -92
- together/resources/videos.py +374 -0
- together/types/__init__.py +65 -109
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_job.py +57 -0
- together-2.0.0a8.dist-info/METADATA +680 -0
- together-2.0.0a8.dist-info/RECORD +164 -0
- {together-1.5.17.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
- together-2.0.0a8.dist-info/entry_points.txt +2 -0
- {together-1.5.17.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -729
- together/cli/api/chat.py +0 -276
- together/cli/api/completions.py +0 -119
- together/cli/api/images.py +0 -93
- together/cli/api/models.py +0 -55
- together/client.py +0 -176
- together/error.py +0 -194
- together/filemanager.py +0 -389
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/batch.py +0 -136
- together/resources/code_interpreter.py +0 -82
- together/resources/finetune.py +0 -1064
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/audio_speech.py +0 -110
- together/types/batch.py +0 -53
- together/types/chat_completions.py +0 -197
- together/types/code_interpreter.py +0 -57
- together/types/common.py +0 -66
- together/types/completions.py +0 -107
- together/types/embeddings.py +0 -35
- together/types/endpoints.py +0 -123
- together/types/error.py +0 -16
- together/types/files.py +0 -90
- together/types/finetune.py +0 -398
- together/types/images.py +0 -44
- together/types/models.py +0 -45
- together/types/rerank.py +0 -43
- together/utils/api_helpers.py +0 -124
- together/utils/files.py +0 -425
- together/version.py +0 -6
- together-1.5.17.dist-info/METADATA +0 -525
- together-1.5.17.dist-info/RECORD +0 -69
- together-1.5.17.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from .._models import BaseModel
|
|
7
|
+
from .autoscaling import Autoscaling
|
|
8
|
+
|
|
9
|
+
__all__ = ["DedicatedEndpoint"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class DedicatedEndpoint(BaseModel):
|
|
13
|
+
id: str
|
|
14
|
+
"""Unique identifier for the endpoint"""
|
|
15
|
+
|
|
16
|
+
autoscaling: Autoscaling
|
|
17
|
+
"""Configuration for automatic scaling of the endpoint"""
|
|
18
|
+
|
|
19
|
+
created_at: datetime
|
|
20
|
+
"""Timestamp when the endpoint was created"""
|
|
21
|
+
|
|
22
|
+
display_name: str
|
|
23
|
+
"""Human-readable name for the endpoint"""
|
|
24
|
+
|
|
25
|
+
hardware: str
|
|
26
|
+
"""The hardware configuration used for this endpoint"""
|
|
27
|
+
|
|
28
|
+
model: str
|
|
29
|
+
"""The model deployed on this endpoint"""
|
|
30
|
+
|
|
31
|
+
name: str
|
|
32
|
+
"""System name for the endpoint"""
|
|
33
|
+
|
|
34
|
+
object: Literal["endpoint"]
|
|
35
|
+
"""The type of object"""
|
|
36
|
+
|
|
37
|
+
owner: str
|
|
38
|
+
"""The owner of this endpoint"""
|
|
39
|
+
|
|
40
|
+
state: Literal["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"]
|
|
41
|
+
"""Current state of the endpoint"""
|
|
42
|
+
|
|
43
|
+
type: Literal["dedicated"]
|
|
44
|
+
"""The type of endpoint"""
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from .._models import BaseModel
|
|
7
|
+
|
|
8
|
+
__all__ = ["Embedding", "Data"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Data(BaseModel):
|
|
12
|
+
embedding: List[float]
|
|
13
|
+
|
|
14
|
+
index: int
|
|
15
|
+
|
|
16
|
+
object: Literal["embedding"]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Embedding(BaseModel):
|
|
20
|
+
data: List[Data]
|
|
21
|
+
|
|
22
|
+
model: str
|
|
23
|
+
|
|
24
|
+
object: Literal["list"]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Union
|
|
6
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
7
|
+
|
|
8
|
+
from .._types import SequenceNotStr
|
|
9
|
+
|
|
10
|
+
__all__ = ["EmbeddingCreateParams"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class EmbeddingCreateParams(TypedDict, total=False):
|
|
14
|
+
input: Required[Union[str, SequenceNotStr[str]]]
|
|
15
|
+
"""A string providing the text for the model to embed."""
|
|
16
|
+
|
|
17
|
+
model: Required[
|
|
18
|
+
Union[
|
|
19
|
+
Literal[
|
|
20
|
+
"WhereIsAI/UAE-Large-V1",
|
|
21
|
+
"BAAI/bge-large-en-v1.5",
|
|
22
|
+
"BAAI/bge-base-en-v1.5",
|
|
23
|
+
"togethercomputer/m2-bert-80M-8k-retrieval",
|
|
24
|
+
],
|
|
25
|
+
str,
|
|
26
|
+
]
|
|
27
|
+
]
|
|
28
|
+
"""The name of the embedding model to use.
|
|
29
|
+
|
|
30
|
+
[See all of Together AI's embedding models](https://docs.together.ai/docs/serverless-models#embedding-models)
|
|
31
|
+
"""
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
7
|
+
|
|
8
|
+
from .autoscaling_param import AutoscalingParam
|
|
9
|
+
|
|
10
|
+
__all__ = ["EndpointCreateParams"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class EndpointCreateParams(TypedDict, total=False):
|
|
14
|
+
autoscaling: Required[AutoscalingParam]
|
|
15
|
+
"""Configuration for automatic scaling of the endpoint"""
|
|
16
|
+
|
|
17
|
+
hardware: Required[str]
|
|
18
|
+
"""The hardware configuration to use for this endpoint"""
|
|
19
|
+
|
|
20
|
+
model: Required[str]
|
|
21
|
+
"""The model to deploy on this endpoint"""
|
|
22
|
+
|
|
23
|
+
availability_zone: str
|
|
24
|
+
"""Create the endpoint in a specified availability zone (e.g., us-central-4b)"""
|
|
25
|
+
|
|
26
|
+
disable_prompt_cache: bool
|
|
27
|
+
"""Whether to disable the prompt cache for this endpoint"""
|
|
28
|
+
|
|
29
|
+
disable_speculative_decoding: bool
|
|
30
|
+
"""Whether to disable speculative decoding for this endpoint"""
|
|
31
|
+
|
|
32
|
+
display_name: str
|
|
33
|
+
"""A human-readable name for the endpoint"""
|
|
34
|
+
|
|
35
|
+
inactive_timeout: Optional[int]
|
|
36
|
+
"""
|
|
37
|
+
The number of minutes of inactivity after which the endpoint will be
|
|
38
|
+
automatically stopped. Set to null, omit or set to 0 to disable automatic
|
|
39
|
+
timeout.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
state: Literal["STARTED", "STOPPED"]
|
|
43
|
+
"""The desired state of the endpoint"""
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from .._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["EndpointListAvzonesResponse"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class EndpointListAvzonesResponse(BaseModel):
|
|
11
|
+
avzones: List[str]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Literal, TypedDict
|
|
6
|
+
|
|
7
|
+
__all__ = ["EndpointListParams"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class EndpointListParams(TypedDict, total=False):
|
|
11
|
+
mine: bool
|
|
12
|
+
"""If true, return only endpoints owned by the caller"""
|
|
13
|
+
|
|
14
|
+
type: Literal["dedicated", "serverless"]
|
|
15
|
+
"""Filter endpoints by type"""
|
|
16
|
+
|
|
17
|
+
usage_type: Literal["on-demand", "reserved"]
|
|
18
|
+
"""Filter endpoints by usage type"""
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing_extensions import Literal
|
|
6
|
+
|
|
7
|
+
from .._models import BaseModel
|
|
8
|
+
|
|
9
|
+
__all__ = ["EndpointListResponse", "Data"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Data(BaseModel):
|
|
13
|
+
id: str
|
|
14
|
+
"""Unique identifier for the endpoint"""
|
|
15
|
+
|
|
16
|
+
created_at: datetime
|
|
17
|
+
"""Timestamp when the endpoint was created"""
|
|
18
|
+
|
|
19
|
+
model: str
|
|
20
|
+
"""The model deployed on this endpoint"""
|
|
21
|
+
|
|
22
|
+
name: str
|
|
23
|
+
"""System name for the endpoint"""
|
|
24
|
+
|
|
25
|
+
object: Literal["endpoint"]
|
|
26
|
+
"""The type of object"""
|
|
27
|
+
|
|
28
|
+
owner: str
|
|
29
|
+
"""The owner of this endpoint"""
|
|
30
|
+
|
|
31
|
+
state: Literal["PENDING", "STARTING", "STARTED", "STOPPING", "STOPPED", "ERROR"]
|
|
32
|
+
"""Current state of the endpoint"""
|
|
33
|
+
|
|
34
|
+
type: Literal["serverless", "dedicated"]
|
|
35
|
+
"""The type of endpoint"""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class EndpointListResponse(BaseModel):
|
|
39
|
+
data: List[Data]
|
|
40
|
+
|
|
41
|
+
object: Literal["list"]
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from typing_extensions import Literal, TypedDict
|
|
7
|
+
|
|
8
|
+
from .autoscaling_param import AutoscalingParam
|
|
9
|
+
|
|
10
|
+
__all__ = ["EndpointUpdateParams"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class EndpointUpdateParams(TypedDict, total=False):
|
|
14
|
+
autoscaling: AutoscalingParam
|
|
15
|
+
"""New autoscaling configuration for the endpoint"""
|
|
16
|
+
|
|
17
|
+
display_name: str
|
|
18
|
+
"""A human-readable name for the endpoint"""
|
|
19
|
+
|
|
20
|
+
inactive_timeout: Optional[int]
|
|
21
|
+
"""
|
|
22
|
+
The number of minutes of inactivity after which the endpoint will be
|
|
23
|
+
automatically stopped. Set to 0 to disable automatic timeout.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
state: Literal["STARTED", "STOPPED"]
|
|
27
|
+
"""The desired state of the endpoint"""
|
|
@@ -0,0 +1,263 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Union
|
|
6
|
+
from typing_extensions import Literal, Required, TypeAlias, TypedDict
|
|
7
|
+
|
|
8
|
+
from .._types import SequenceNotStr
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"EvalCreateParams",
|
|
12
|
+
"Parameters",
|
|
13
|
+
"ParametersEvaluationClassifyParameters",
|
|
14
|
+
"ParametersEvaluationClassifyParametersJudge",
|
|
15
|
+
"ParametersEvaluationClassifyParametersModelToEvaluate",
|
|
16
|
+
"ParametersEvaluationClassifyParametersModelToEvaluateEvaluationModelRequest",
|
|
17
|
+
"ParametersEvaluationScoreParameters",
|
|
18
|
+
"ParametersEvaluationScoreParametersJudge",
|
|
19
|
+
"ParametersEvaluationScoreParametersModelToEvaluate",
|
|
20
|
+
"ParametersEvaluationScoreParametersModelToEvaluateEvaluationModelRequest",
|
|
21
|
+
"ParametersEvaluationCompareParameters",
|
|
22
|
+
"ParametersEvaluationCompareParametersJudge",
|
|
23
|
+
"ParametersEvaluationCompareParametersModelA",
|
|
24
|
+
"ParametersEvaluationCompareParametersModelAEvaluationModelRequest",
|
|
25
|
+
"ParametersEvaluationCompareParametersModelB",
|
|
26
|
+
"ParametersEvaluationCompareParametersModelBEvaluationModelRequest",
|
|
27
|
+
]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class EvalCreateParams(TypedDict, total=False):
|
|
31
|
+
parameters: Required[Parameters]
|
|
32
|
+
"""Type-specific parameters for the evaluation"""
|
|
33
|
+
|
|
34
|
+
type: Required[Literal["classify", "score", "compare"]]
|
|
35
|
+
"""The type of evaluation to perform"""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ParametersEvaluationClassifyParametersJudge(TypedDict, total=False):
|
|
39
|
+
model: Required[str]
|
|
40
|
+
"""Name of the judge model"""
|
|
41
|
+
|
|
42
|
+
model_source: Required[Literal["serverless", "dedicated", "external"]]
|
|
43
|
+
"""Source of the judge model."""
|
|
44
|
+
|
|
45
|
+
system_template: Required[str]
|
|
46
|
+
"""System prompt template for the judge"""
|
|
47
|
+
|
|
48
|
+
external_api_token: str
|
|
49
|
+
"""Bearer/API token for external judge models."""
|
|
50
|
+
|
|
51
|
+
external_base_url: str
|
|
52
|
+
"""Base URL for external judge models. Must be OpenAI-compatible base URL."""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ParametersEvaluationClassifyParametersModelToEvaluateEvaluationModelRequest(TypedDict, total=False):
|
|
56
|
+
input_template: Required[str]
|
|
57
|
+
"""Input prompt template"""
|
|
58
|
+
|
|
59
|
+
max_tokens: Required[int]
|
|
60
|
+
"""Maximum number of tokens to generate"""
|
|
61
|
+
|
|
62
|
+
model: Required[str]
|
|
63
|
+
"""Name of the model to evaluate"""
|
|
64
|
+
|
|
65
|
+
model_source: Required[Literal["serverless", "dedicated", "external"]]
|
|
66
|
+
"""Source of the model."""
|
|
67
|
+
|
|
68
|
+
system_template: Required[str]
|
|
69
|
+
"""System prompt template"""
|
|
70
|
+
|
|
71
|
+
temperature: Required[float]
|
|
72
|
+
"""Sampling temperature"""
|
|
73
|
+
|
|
74
|
+
external_api_token: str
|
|
75
|
+
"""Bearer/API token for external models."""
|
|
76
|
+
|
|
77
|
+
external_base_url: str
|
|
78
|
+
"""Base URL for external models. Must be OpenAI-compatible base URL"""
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
ParametersEvaluationClassifyParametersModelToEvaluate: TypeAlias = Union[
|
|
82
|
+
str, ParametersEvaluationClassifyParametersModelToEvaluateEvaluationModelRequest
|
|
83
|
+
]
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class ParametersEvaluationClassifyParameters(TypedDict, total=False):
|
|
87
|
+
input_data_file_path: Required[str]
|
|
88
|
+
"""Data file ID"""
|
|
89
|
+
|
|
90
|
+
judge: Required[ParametersEvaluationClassifyParametersJudge]
|
|
91
|
+
|
|
92
|
+
labels: Required[SequenceNotStr[str]]
|
|
93
|
+
"""List of possible classification labels"""
|
|
94
|
+
|
|
95
|
+
pass_labels: Required[SequenceNotStr[str]]
|
|
96
|
+
"""List of labels that are considered passing"""
|
|
97
|
+
|
|
98
|
+
model_to_evaluate: ParametersEvaluationClassifyParametersModelToEvaluate
|
|
99
|
+
"""Field name in the input data"""
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class ParametersEvaluationScoreParametersJudge(TypedDict, total=False):
|
|
103
|
+
model: Required[str]
|
|
104
|
+
"""Name of the judge model"""
|
|
105
|
+
|
|
106
|
+
model_source: Required[Literal["serverless", "dedicated", "external"]]
|
|
107
|
+
"""Source of the judge model."""
|
|
108
|
+
|
|
109
|
+
system_template: Required[str]
|
|
110
|
+
"""System prompt template for the judge"""
|
|
111
|
+
|
|
112
|
+
external_api_token: str
|
|
113
|
+
"""Bearer/API token for external judge models."""
|
|
114
|
+
|
|
115
|
+
external_base_url: str
|
|
116
|
+
"""Base URL for external judge models. Must be OpenAI-compatible base URL."""
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class ParametersEvaluationScoreParametersModelToEvaluateEvaluationModelRequest(TypedDict, total=False):
|
|
120
|
+
input_template: Required[str]
|
|
121
|
+
"""Input prompt template"""
|
|
122
|
+
|
|
123
|
+
max_tokens: Required[int]
|
|
124
|
+
"""Maximum number of tokens to generate"""
|
|
125
|
+
|
|
126
|
+
model: Required[str]
|
|
127
|
+
"""Name of the model to evaluate"""
|
|
128
|
+
|
|
129
|
+
model_source: Required[Literal["serverless", "dedicated", "external"]]
|
|
130
|
+
"""Source of the model."""
|
|
131
|
+
|
|
132
|
+
system_template: Required[str]
|
|
133
|
+
"""System prompt template"""
|
|
134
|
+
|
|
135
|
+
temperature: Required[float]
|
|
136
|
+
"""Sampling temperature"""
|
|
137
|
+
|
|
138
|
+
external_api_token: str
|
|
139
|
+
"""Bearer/API token for external models."""
|
|
140
|
+
|
|
141
|
+
external_base_url: str
|
|
142
|
+
"""Base URL for external models. Must be OpenAI-compatible base URL"""
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
ParametersEvaluationScoreParametersModelToEvaluate: TypeAlias = Union[
|
|
146
|
+
str, ParametersEvaluationScoreParametersModelToEvaluateEvaluationModelRequest
|
|
147
|
+
]
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class ParametersEvaluationScoreParameters(TypedDict, total=False):
|
|
151
|
+
input_data_file_path: Required[str]
|
|
152
|
+
"""Data file ID"""
|
|
153
|
+
|
|
154
|
+
judge: Required[ParametersEvaluationScoreParametersJudge]
|
|
155
|
+
|
|
156
|
+
max_score: Required[float]
|
|
157
|
+
"""Maximum possible score"""
|
|
158
|
+
|
|
159
|
+
min_score: Required[float]
|
|
160
|
+
"""Minimum possible score"""
|
|
161
|
+
|
|
162
|
+
pass_threshold: Required[float]
|
|
163
|
+
"""Score threshold for passing"""
|
|
164
|
+
|
|
165
|
+
model_to_evaluate: ParametersEvaluationScoreParametersModelToEvaluate
|
|
166
|
+
"""Field name in the input data"""
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class ParametersEvaluationCompareParametersJudge(TypedDict, total=False):
|
|
170
|
+
model: Required[str]
|
|
171
|
+
"""Name of the judge model"""
|
|
172
|
+
|
|
173
|
+
model_source: Required[Literal["serverless", "dedicated", "external"]]
|
|
174
|
+
"""Source of the judge model."""
|
|
175
|
+
|
|
176
|
+
system_template: Required[str]
|
|
177
|
+
"""System prompt template for the judge"""
|
|
178
|
+
|
|
179
|
+
external_api_token: str
|
|
180
|
+
"""Bearer/API token for external judge models."""
|
|
181
|
+
|
|
182
|
+
external_base_url: str
|
|
183
|
+
"""Base URL for external judge models. Must be OpenAI-compatible base URL."""
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class ParametersEvaluationCompareParametersModelAEvaluationModelRequest(TypedDict, total=False):
|
|
187
|
+
input_template: Required[str]
|
|
188
|
+
"""Input prompt template"""
|
|
189
|
+
|
|
190
|
+
max_tokens: Required[int]
|
|
191
|
+
"""Maximum number of tokens to generate"""
|
|
192
|
+
|
|
193
|
+
model: Required[str]
|
|
194
|
+
"""Name of the model to evaluate"""
|
|
195
|
+
|
|
196
|
+
model_source: Required[Literal["serverless", "dedicated", "external"]]
|
|
197
|
+
"""Source of the model."""
|
|
198
|
+
|
|
199
|
+
system_template: Required[str]
|
|
200
|
+
"""System prompt template"""
|
|
201
|
+
|
|
202
|
+
temperature: Required[float]
|
|
203
|
+
"""Sampling temperature"""
|
|
204
|
+
|
|
205
|
+
external_api_token: str
|
|
206
|
+
"""Bearer/API token for external models."""
|
|
207
|
+
|
|
208
|
+
external_base_url: str
|
|
209
|
+
"""Base URL for external models. Must be OpenAI-compatible base URL"""
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
ParametersEvaluationCompareParametersModelA: TypeAlias = Union[
|
|
213
|
+
str, ParametersEvaluationCompareParametersModelAEvaluationModelRequest
|
|
214
|
+
]
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class ParametersEvaluationCompareParametersModelBEvaluationModelRequest(TypedDict, total=False):
|
|
218
|
+
input_template: Required[str]
|
|
219
|
+
"""Input prompt template"""
|
|
220
|
+
|
|
221
|
+
max_tokens: Required[int]
|
|
222
|
+
"""Maximum number of tokens to generate"""
|
|
223
|
+
|
|
224
|
+
model: Required[str]
|
|
225
|
+
"""Name of the model to evaluate"""
|
|
226
|
+
|
|
227
|
+
model_source: Required[Literal["serverless", "dedicated", "external"]]
|
|
228
|
+
"""Source of the model."""
|
|
229
|
+
|
|
230
|
+
system_template: Required[str]
|
|
231
|
+
"""System prompt template"""
|
|
232
|
+
|
|
233
|
+
temperature: Required[float]
|
|
234
|
+
"""Sampling temperature"""
|
|
235
|
+
|
|
236
|
+
external_api_token: str
|
|
237
|
+
"""Bearer/API token for external models."""
|
|
238
|
+
|
|
239
|
+
external_base_url: str
|
|
240
|
+
"""Base URL for external models. Must be OpenAI-compatible base URL"""
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
ParametersEvaluationCompareParametersModelB: TypeAlias = Union[
|
|
244
|
+
str, ParametersEvaluationCompareParametersModelBEvaluationModelRequest
|
|
245
|
+
]
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
class ParametersEvaluationCompareParameters(TypedDict, total=False):
|
|
249
|
+
input_data_file_path: Required[str]
|
|
250
|
+
"""Data file name"""
|
|
251
|
+
|
|
252
|
+
judge: Required[ParametersEvaluationCompareParametersJudge]
|
|
253
|
+
|
|
254
|
+
model_a: ParametersEvaluationCompareParametersModelA
|
|
255
|
+
"""Field name in the input data"""
|
|
256
|
+
|
|
257
|
+
model_b: ParametersEvaluationCompareParametersModelB
|
|
258
|
+
"""Field name in the input data"""
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
Parameters: TypeAlias = Union[
|
|
262
|
+
ParametersEvaluationClassifyParameters, ParametersEvaluationScoreParameters, ParametersEvaluationCompareParameters
|
|
263
|
+
]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from .._models import BaseModel
|
|
7
|
+
|
|
8
|
+
__all__ = ["EvalCreateResponse"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class EvalCreateResponse(BaseModel):
|
|
12
|
+
status: Optional[Literal["pending"]] = None
|
|
13
|
+
"""Initial status of the job"""
|
|
14
|
+
|
|
15
|
+
workflow_id: Optional[str] = None
|
|
16
|
+
"""The ID of the created evaluation job"""
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Annotated, TypedDict
|
|
6
|
+
|
|
7
|
+
from .._utils import PropertyInfo
|
|
8
|
+
|
|
9
|
+
__all__ = ["EvalListParams"]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class EvalListParams(TypedDict, total=False):
|
|
13
|
+
limit: int
|
|
14
|
+
|
|
15
|
+
status: str
|
|
16
|
+
|
|
17
|
+
user_id: Annotated[str, PropertyInfo(alias="userId")]
|
|
18
|
+
"""Admin users can specify a user ID to filter jobs.
|
|
19
|
+
|
|
20
|
+
Pass empty string to get all jobs.
|
|
21
|
+
"""
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
from typing_extensions import TypeAlias
|
|
5
|
+
|
|
6
|
+
from .evaluation_job import EvaluationJob
|
|
7
|
+
|
|
8
|
+
__all__ = ["EvalListResponse"]
|
|
9
|
+
|
|
10
|
+
EvalListResponse: TypeAlias = List[EvaluationJob]
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Union, Optional
|
|
4
|
+
from typing_extensions import Literal, TypeAlias
|
|
5
|
+
|
|
6
|
+
from pydantic import Field as FieldInfo
|
|
7
|
+
|
|
8
|
+
from .._models import BaseModel
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"EvalStatusResponse",
|
|
12
|
+
"Results",
|
|
13
|
+
"ResultsEvaluationClassifyResults",
|
|
14
|
+
"ResultsEvaluationScoreResults",
|
|
15
|
+
"ResultsEvaluationScoreResultsAggregatedScores",
|
|
16
|
+
"ResultsEvaluationCompareResults",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ResultsEvaluationClassifyResults(BaseModel):
|
|
21
|
+
generation_fail_count: Optional[float] = None
|
|
22
|
+
"""Number of failed generations."""
|
|
23
|
+
|
|
24
|
+
invalid_label_count: Optional[float] = None
|
|
25
|
+
"""Number of invalid labels"""
|
|
26
|
+
|
|
27
|
+
judge_fail_count: Optional[float] = None
|
|
28
|
+
"""Number of failed judge generations"""
|
|
29
|
+
|
|
30
|
+
label_counts: Optional[str] = None
|
|
31
|
+
"""JSON string representing label counts"""
|
|
32
|
+
|
|
33
|
+
pass_percentage: Optional[float] = None
|
|
34
|
+
"""Pecentage of pass labels."""
|
|
35
|
+
|
|
36
|
+
result_file_id: Optional[str] = None
|
|
37
|
+
"""Data File ID"""
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ResultsEvaluationScoreResultsAggregatedScores(BaseModel):
|
|
41
|
+
mean_score: Optional[float] = None
|
|
42
|
+
|
|
43
|
+
pass_percentage: Optional[float] = None
|
|
44
|
+
|
|
45
|
+
std_score: Optional[float] = None
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class ResultsEvaluationScoreResults(BaseModel):
|
|
49
|
+
aggregated_scores: Optional[ResultsEvaluationScoreResultsAggregatedScores] = None
|
|
50
|
+
|
|
51
|
+
failed_samples: Optional[float] = None
|
|
52
|
+
"""number of failed samples generated from model"""
|
|
53
|
+
|
|
54
|
+
generation_fail_count: Optional[float] = None
|
|
55
|
+
"""Number of failed generations."""
|
|
56
|
+
|
|
57
|
+
invalid_score_count: Optional[float] = None
|
|
58
|
+
"""number of invalid scores generated from model"""
|
|
59
|
+
|
|
60
|
+
judge_fail_count: Optional[float] = None
|
|
61
|
+
"""Number of failed judge generations"""
|
|
62
|
+
|
|
63
|
+
result_file_id: Optional[str] = None
|
|
64
|
+
"""Data File ID"""
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class ResultsEvaluationCompareResults(BaseModel):
|
|
68
|
+
a_wins: Optional[int] = FieldInfo(alias="A_wins", default=None)
|
|
69
|
+
"""Number of times model A won"""
|
|
70
|
+
|
|
71
|
+
b_wins: Optional[int] = FieldInfo(alias="B_wins", default=None)
|
|
72
|
+
"""Number of times model B won"""
|
|
73
|
+
|
|
74
|
+
generation_fail_count: Optional[float] = None
|
|
75
|
+
"""Number of failed generations."""
|
|
76
|
+
|
|
77
|
+
judge_fail_count: Optional[float] = None
|
|
78
|
+
"""Number of failed judge generations"""
|
|
79
|
+
|
|
80
|
+
num_samples: Optional[int] = None
|
|
81
|
+
"""Total number of samples compared"""
|
|
82
|
+
|
|
83
|
+
result_file_id: Optional[str] = None
|
|
84
|
+
"""Data File ID"""
|
|
85
|
+
|
|
86
|
+
ties: Optional[int] = FieldInfo(alias="Ties", default=None)
|
|
87
|
+
"""Number of ties"""
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
Results: TypeAlias = Union[
|
|
91
|
+
ResultsEvaluationClassifyResults, ResultsEvaluationScoreResults, ResultsEvaluationCompareResults
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class EvalStatusResponse(BaseModel):
|
|
96
|
+
results: Optional[Results] = None
|
|
97
|
+
"""The results of the evaluation job"""
|
|
98
|
+
|
|
99
|
+
status: Optional[Literal["completed", "error", "user_error", "running", "queued", "pending"]] = None
|
|
100
|
+
"""The status of the evaluation job"""
|