together 1.5.17__py3-none-any.whl → 2.0.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -63
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/{cli → lib/cli}/api/endpoints.py +108 -75
- together/lib/cli/api/evals.py +588 -0
- together/{cli → lib/cli}/api/files.py +20 -17
- together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +161 -120
- together/lib/cli/api/models.py +140 -0
- together/{cli → lib/cli}/api/utils.py +6 -7
- together/{cli → lib/cli}/cli.py +16 -24
- together/{constants.py → lib/constants.py} +17 -12
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +455 -0
- together/{utils → lib/utils}/__init__.py +6 -14
- together/{utils → lib/utils}/_log.py +11 -16
- together/lib/utils/files.py +628 -0
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +19 -55
- together/resources/__init__.py +225 -33
- together/resources/audio/__init__.py +72 -21
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +574 -122
- together/resources/audio/transcriptions.py +282 -0
- together/resources/audio/translations.py +256 -0
- together/resources/audio/voices.py +135 -0
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -263
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +884 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +598 -395
- together/resources/evals.py +452 -0
- together/resources/files.py +398 -121
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +256 -108
- together/resources/jobs.py +214 -0
- together/resources/models.py +238 -90
- together/resources/rerank.py +190 -92
- together/resources/videos.py +374 -0
- together/types/__init__.py +65 -109
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_job.py +57 -0
- together-2.0.0a8.dist-info/METADATA +680 -0
- together-2.0.0a8.dist-info/RECORD +164 -0
- {together-1.5.17.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
- together-2.0.0a8.dist-info/entry_points.txt +2 -0
- {together-1.5.17.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -729
- together/cli/api/chat.py +0 -276
- together/cli/api/completions.py +0 -119
- together/cli/api/images.py +0 -93
- together/cli/api/models.py +0 -55
- together/client.py +0 -176
- together/error.py +0 -194
- together/filemanager.py +0 -389
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/batch.py +0 -136
- together/resources/code_interpreter.py +0 -82
- together/resources/finetune.py +0 -1064
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/audio_speech.py +0 -110
- together/types/batch.py +0 -53
- together/types/chat_completions.py +0 -197
- together/types/code_interpreter.py +0 -57
- together/types/common.py +0 -66
- together/types/completions.py +0 -107
- together/types/embeddings.py +0 -35
- together/types/endpoints.py +0 -123
- together/types/error.py +0 -16
- together/types/files.py +0 -90
- together/types/finetune.py +0 -398
- together/types/images.py +0 -44
- together/types/models.py +0 -45
- together/types/rerank.py +0 -43
- together/utils/api_helpers.py +0 -124
- together/utils/files.py +0 -425
- together/version.py +0 -6
- together-1.5.17.dist-info/METADATA +0 -525
- together-1.5.17.dist-info/RECORD +0 -69
- together-1.5.17.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List, Union
|
|
6
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
7
|
+
|
|
8
|
+
from ..._types import FileTypes
|
|
9
|
+
|
|
10
|
+
__all__ = ["TranscriptionCreateParams"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TranscriptionCreateParams(TypedDict, total=False):
|
|
14
|
+
file: Required[FileTypes]
|
|
15
|
+
"""Audio file to transcribe"""
|
|
16
|
+
|
|
17
|
+
diarize: bool
|
|
18
|
+
"""Whether to enable speaker diarization.
|
|
19
|
+
|
|
20
|
+
When enabled, you will get the speaker id for each word in the transcription. In
|
|
21
|
+
the response, in the words array, you will get the speaker id for each word. In
|
|
22
|
+
addition, we also return the speaker_segments array which contains the speaker
|
|
23
|
+
id for each speaker segment along with the start and end time of the segment
|
|
24
|
+
along with all the words in the segment.
|
|
25
|
+
|
|
26
|
+
For eg - ... "speaker_segments": [ "speaker_id": "SPEAKER_00", "start": 0,
|
|
27
|
+
"end": 30.02, "words": [ { "id": 0, "word": "Tijana", "start": 0, "end": 11.475,
|
|
28
|
+
"speaker_id": "SPEAKER_00" }, ...
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
language: str
|
|
32
|
+
"""Optional ISO 639-1 language code.
|
|
33
|
+
|
|
34
|
+
If `auto` is provided, language is auto-detected.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
model: Literal["openai/whisper-large-v3"]
|
|
38
|
+
"""Model to use for transcription"""
|
|
39
|
+
|
|
40
|
+
prompt: str
|
|
41
|
+
"""Optional text to bias decoding."""
|
|
42
|
+
|
|
43
|
+
response_format: Literal["json", "verbose_json"]
|
|
44
|
+
"""The format of the response"""
|
|
45
|
+
|
|
46
|
+
temperature: float
|
|
47
|
+
"""Sampling temperature between 0.0 and 1.0"""
|
|
48
|
+
|
|
49
|
+
timestamp_granularities: Union[Literal["segment", "word"], List[Literal["segment", "word"]]]
|
|
50
|
+
"""Controls level of timestamp detail in verbose_json.
|
|
51
|
+
|
|
52
|
+
Only used when response_format is verbose_json. Can be a single granularity or
|
|
53
|
+
an array to get multiple levels.
|
|
54
|
+
"""
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Union, Optional
|
|
4
|
+
from typing_extensions import Literal, TypeAlias
|
|
5
|
+
|
|
6
|
+
from ..._models import BaseModel
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"TranscriptionCreateResponse",
|
|
10
|
+
"AudioTranscriptionJsonResponse",
|
|
11
|
+
"AudioTranscriptionVerboseJsonResponse",
|
|
12
|
+
"AudioTranscriptionVerboseJsonResponseSegment",
|
|
13
|
+
"AudioTranscriptionVerboseJsonResponseSpeakerSegment",
|
|
14
|
+
"AudioTranscriptionVerboseJsonResponseSpeakerSegmentWord",
|
|
15
|
+
"AudioTranscriptionVerboseJsonResponseWord",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AudioTranscriptionJsonResponse(BaseModel):
|
|
20
|
+
text: str
|
|
21
|
+
"""The transcribed text"""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AudioTranscriptionVerboseJsonResponseSegment(BaseModel):
|
|
25
|
+
id: int
|
|
26
|
+
"""Unique identifier for the segment"""
|
|
27
|
+
|
|
28
|
+
end: float
|
|
29
|
+
"""End time of the segment in seconds"""
|
|
30
|
+
|
|
31
|
+
start: float
|
|
32
|
+
"""Start time of the segment in seconds"""
|
|
33
|
+
|
|
34
|
+
text: str
|
|
35
|
+
"""The text content of the segment"""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class AudioTranscriptionVerboseJsonResponseSpeakerSegmentWord(BaseModel):
|
|
39
|
+
end: float
|
|
40
|
+
"""End time of the word in seconds"""
|
|
41
|
+
|
|
42
|
+
start: float
|
|
43
|
+
"""Start time of the word in seconds"""
|
|
44
|
+
|
|
45
|
+
word: str
|
|
46
|
+
"""The word"""
|
|
47
|
+
|
|
48
|
+
speaker_id: Optional[str] = None
|
|
49
|
+
"""The speaker id for the word (only when diarize is enabled)"""
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class AudioTranscriptionVerboseJsonResponseSpeakerSegment(BaseModel):
|
|
53
|
+
id: int
|
|
54
|
+
"""Unique identifier for the speaker segment"""
|
|
55
|
+
|
|
56
|
+
end: float
|
|
57
|
+
"""End time of the speaker segment in seconds"""
|
|
58
|
+
|
|
59
|
+
speaker_id: str
|
|
60
|
+
"""The speaker identifier"""
|
|
61
|
+
|
|
62
|
+
start: float
|
|
63
|
+
"""Start time of the speaker segment in seconds"""
|
|
64
|
+
|
|
65
|
+
text: str
|
|
66
|
+
"""The full text spoken by this speaker in this segment"""
|
|
67
|
+
|
|
68
|
+
words: List[AudioTranscriptionVerboseJsonResponseSpeakerSegmentWord]
|
|
69
|
+
"""Array of words spoken by this speaker in this segment"""
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class AudioTranscriptionVerboseJsonResponseWord(BaseModel):
|
|
73
|
+
end: float
|
|
74
|
+
"""End time of the word in seconds"""
|
|
75
|
+
|
|
76
|
+
start: float
|
|
77
|
+
"""Start time of the word in seconds"""
|
|
78
|
+
|
|
79
|
+
word: str
|
|
80
|
+
"""The word"""
|
|
81
|
+
|
|
82
|
+
speaker_id: Optional[str] = None
|
|
83
|
+
"""The speaker id for the word (only when diarize is enabled)"""
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class AudioTranscriptionVerboseJsonResponse(BaseModel):
|
|
87
|
+
duration: float
|
|
88
|
+
"""The duration of the audio in seconds"""
|
|
89
|
+
|
|
90
|
+
language: str
|
|
91
|
+
"""The language of the audio"""
|
|
92
|
+
|
|
93
|
+
segments: List[AudioTranscriptionVerboseJsonResponseSegment]
|
|
94
|
+
"""Array of transcription segments"""
|
|
95
|
+
|
|
96
|
+
task: Literal["transcribe", "translate"]
|
|
97
|
+
"""The task performed"""
|
|
98
|
+
|
|
99
|
+
text: str
|
|
100
|
+
"""The transcribed text"""
|
|
101
|
+
|
|
102
|
+
speaker_segments: Optional[List[AudioTranscriptionVerboseJsonResponseSpeakerSegment]] = None
|
|
103
|
+
"""Array of transcription speaker segments (only when diarize is enabled)"""
|
|
104
|
+
|
|
105
|
+
words: Optional[List[AudioTranscriptionVerboseJsonResponseWord]] = None
|
|
106
|
+
"""
|
|
107
|
+
Array of transcription words (only when timestamp_granularities includes 'word')
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
TranscriptionCreateResponse: TypeAlias = Union[AudioTranscriptionJsonResponse, AudioTranscriptionVerboseJsonResponse]
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List, Union
|
|
6
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
7
|
+
|
|
8
|
+
from ..._types import FileTypes
|
|
9
|
+
|
|
10
|
+
__all__ = ["TranslationCreateParams"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TranslationCreateParams(TypedDict, total=False):
|
|
14
|
+
file: Required[FileTypes]
|
|
15
|
+
"""Audio file to translate"""
|
|
16
|
+
|
|
17
|
+
language: str
|
|
18
|
+
"""Target output language.
|
|
19
|
+
|
|
20
|
+
Optional ISO 639-1 language code. If omitted, language is set to English.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
model: Literal["openai/whisper-large-v3"]
|
|
24
|
+
"""Model to use for translation"""
|
|
25
|
+
|
|
26
|
+
prompt: str
|
|
27
|
+
"""Optional text to bias decoding."""
|
|
28
|
+
|
|
29
|
+
response_format: Literal["json", "verbose_json"]
|
|
30
|
+
"""The format of the response"""
|
|
31
|
+
|
|
32
|
+
temperature: float
|
|
33
|
+
"""Sampling temperature between 0.0 and 1.0"""
|
|
34
|
+
|
|
35
|
+
timestamp_granularities: Union[Literal["segment", "word"], List[Literal["segment", "word"]]]
|
|
36
|
+
"""Controls level of timestamp detail in verbose_json.
|
|
37
|
+
|
|
38
|
+
Only used when response_format is verbose_json. Can be a single granularity or
|
|
39
|
+
an array to get multiple levels.
|
|
40
|
+
"""
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Union, Optional
|
|
4
|
+
from typing_extensions import Literal, TypeAlias
|
|
5
|
+
|
|
6
|
+
from ..._models import BaseModel
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"TranslationCreateResponse",
|
|
10
|
+
"AudioTranslationJsonResponse",
|
|
11
|
+
"AudioTranslationVerboseJsonResponse",
|
|
12
|
+
"AudioTranslationVerboseJsonResponseSegment",
|
|
13
|
+
"AudioTranslationVerboseJsonResponseWord",
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AudioTranslationJsonResponse(BaseModel):
|
|
18
|
+
text: str
|
|
19
|
+
"""The translated text"""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class AudioTranslationVerboseJsonResponseSegment(BaseModel):
|
|
23
|
+
id: int
|
|
24
|
+
"""Unique identifier for the segment"""
|
|
25
|
+
|
|
26
|
+
end: float
|
|
27
|
+
"""End time of the segment in seconds"""
|
|
28
|
+
|
|
29
|
+
start: float
|
|
30
|
+
"""Start time of the segment in seconds"""
|
|
31
|
+
|
|
32
|
+
text: str
|
|
33
|
+
"""The text content of the segment"""
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class AudioTranslationVerboseJsonResponseWord(BaseModel):
|
|
37
|
+
end: float
|
|
38
|
+
"""End time of the word in seconds"""
|
|
39
|
+
|
|
40
|
+
start: float
|
|
41
|
+
"""Start time of the word in seconds"""
|
|
42
|
+
|
|
43
|
+
word: str
|
|
44
|
+
"""The word"""
|
|
45
|
+
|
|
46
|
+
speaker_id: Optional[str] = None
|
|
47
|
+
"""The speaker id for the word (only when diarize is enabled)"""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class AudioTranslationVerboseJsonResponse(BaseModel):
|
|
51
|
+
duration: float
|
|
52
|
+
"""The duration of the audio in seconds"""
|
|
53
|
+
|
|
54
|
+
language: str
|
|
55
|
+
"""The target language of the translation"""
|
|
56
|
+
|
|
57
|
+
segments: List[AudioTranslationVerboseJsonResponseSegment]
|
|
58
|
+
"""Array of translation segments"""
|
|
59
|
+
|
|
60
|
+
task: Literal["transcribe", "translate"]
|
|
61
|
+
"""The task performed"""
|
|
62
|
+
|
|
63
|
+
text: str
|
|
64
|
+
"""The translated text"""
|
|
65
|
+
|
|
66
|
+
words: Optional[List[AudioTranslationVerboseJsonResponseWord]] = None
|
|
67
|
+
"""Array of translation words (only when timestamp_granularities includes 'word')"""
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
TranslationCreateResponse: TypeAlias = Union[AudioTranslationJsonResponse, AudioTranslationVerboseJsonResponse]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from ..._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["VoiceListResponse", "Data", "DataVoice"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DataVoice(BaseModel):
|
|
11
|
+
id: str
|
|
12
|
+
|
|
13
|
+
name: str
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Data(BaseModel):
|
|
17
|
+
model: str
|
|
18
|
+
|
|
19
|
+
voices: List[DataVoice]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class VoiceListResponse(BaseModel):
|
|
23
|
+
data: List[Data]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing_extensions import Literal
|
|
4
|
+
|
|
5
|
+
from .._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["AudioSpeechStreamChunk"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AudioSpeechStreamChunk(BaseModel):
|
|
11
|
+
b64: str
|
|
12
|
+
"""base64 encoded audio stream"""
|
|
13
|
+
|
|
14
|
+
model: str
|
|
15
|
+
|
|
16
|
+
object: Literal["audio.tts.chunk"]
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from .._models import BaseModel
|
|
4
|
+
|
|
5
|
+
__all__ = ["Autoscaling"]
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Autoscaling(BaseModel):
|
|
9
|
+
max_replicas: int
|
|
10
|
+
"""The maximum number of replicas to scale up to under load"""
|
|
11
|
+
|
|
12
|
+
min_replicas: int
|
|
13
|
+
"""The minimum number of replicas to maintain, even when there is no load"""
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Required, TypedDict
|
|
6
|
+
|
|
7
|
+
__all__ = ["AutoscalingParam"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AutoscalingParam(TypedDict, total=False):
|
|
11
|
+
max_replicas: Required[int]
|
|
12
|
+
"""The maximum number of replicas to scale up to under load"""
|
|
13
|
+
|
|
14
|
+
min_replicas: Required[int]
|
|
15
|
+
"""The minimum number of replicas to maintain, even when there is no load"""
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Required, TypedDict
|
|
6
|
+
|
|
7
|
+
__all__ = ["BatchCreateParams"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class BatchCreateParams(TypedDict, total=False):
|
|
11
|
+
endpoint: Required[str]
|
|
12
|
+
"""The endpoint to use for batch processing"""
|
|
13
|
+
|
|
14
|
+
input_file_id: Required[str]
|
|
15
|
+
"""ID of the uploaded input file containing batch requests"""
|
|
16
|
+
|
|
17
|
+
completion_window: str
|
|
18
|
+
"""Time window for batch completion (optional)"""
|
|
19
|
+
|
|
20
|
+
model_id: str
|
|
21
|
+
"""Model to use for processing batch requests"""
|
|
22
|
+
|
|
23
|
+
priority: int
|
|
24
|
+
"""Priority for batch processing (optional)"""
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from .._models import BaseModel
|
|
6
|
+
from .batch_job import BatchJob
|
|
7
|
+
|
|
8
|
+
__all__ = ["BatchCreateResponse"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BatchCreateResponse(BaseModel):
|
|
12
|
+
job: Optional[BatchJob] = None
|
|
13
|
+
|
|
14
|
+
warning: Optional[str] = None
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing_extensions import Literal
|
|
6
|
+
|
|
7
|
+
from pydantic import Field as FieldInfo
|
|
8
|
+
|
|
9
|
+
from .._models import BaseModel
|
|
10
|
+
|
|
11
|
+
__all__ = ["BatchJob"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BatchJob(BaseModel):
|
|
15
|
+
id: Optional[str] = None
|
|
16
|
+
|
|
17
|
+
completed_at: Optional[datetime] = None
|
|
18
|
+
|
|
19
|
+
created_at: Optional[datetime] = None
|
|
20
|
+
|
|
21
|
+
endpoint: Optional[str] = None
|
|
22
|
+
|
|
23
|
+
error: Optional[str] = None
|
|
24
|
+
|
|
25
|
+
error_file_id: Optional[str] = None
|
|
26
|
+
|
|
27
|
+
file_size_bytes: Optional[int] = None
|
|
28
|
+
"""Size of input file in bytes"""
|
|
29
|
+
|
|
30
|
+
input_file_id: Optional[str] = None
|
|
31
|
+
|
|
32
|
+
job_deadline: Optional[datetime] = None
|
|
33
|
+
|
|
34
|
+
x_model_id: Optional[str] = FieldInfo(alias="model_id", default=None)
|
|
35
|
+
"""Model used for processing requests"""
|
|
36
|
+
|
|
37
|
+
output_file_id: Optional[str] = None
|
|
38
|
+
|
|
39
|
+
progress: Optional[float] = None
|
|
40
|
+
"""Completion progress (0.0 to 100)"""
|
|
41
|
+
|
|
42
|
+
status: Optional[Literal["VALIDATING", "IN_PROGRESS", "COMPLETED", "FAILED", "EXPIRED", "CANCELLED"]] = None
|
|
43
|
+
"""Current status of the batch job"""
|
|
44
|
+
|
|
45
|
+
user_id: Optional[str] = None
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
from typing_extensions import TypeAlias
|
|
5
|
+
|
|
6
|
+
from .batch_job import BatchJob
|
|
7
|
+
|
|
8
|
+
__all__ = ["BatchListResponse"]
|
|
9
|
+
|
|
10
|
+
BatchListResponse: TypeAlias = List[BatchJob]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .chat_completion import ChatCompletion as ChatCompletion
|
|
6
|
+
from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
|
|
7
|
+
from .chat_completion_usage import ChatCompletionUsage as ChatCompletionUsage
|
|
8
|
+
from .chat_completion_warning import ChatCompletionWarning as ChatCompletionWarning
|
|
9
|
+
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
|
|
10
|
+
from .chat_completion_structured_message_text_param import (
|
|
11
|
+
ChatCompletionStructuredMessageTextParam as ChatCompletionStructuredMessageTextParam,
|
|
12
|
+
)
|
|
13
|
+
from .chat_completion_structured_message_image_url_param import (
|
|
14
|
+
ChatCompletionStructuredMessageImageURLParam as ChatCompletionStructuredMessageImageURLParam,
|
|
15
|
+
)
|
|
16
|
+
from .chat_completion_structured_message_video_url_param import (
|
|
17
|
+
ChatCompletionStructuredMessageVideoURLParam as ChatCompletionStructuredMessageVideoURLParam,
|
|
18
|
+
)
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from ..._models import BaseModel
|
|
7
|
+
from ..log_probs import LogProbs
|
|
8
|
+
from ..tool_choice import ToolChoice
|
|
9
|
+
from .chat_completion_usage import ChatCompletionUsage
|
|
10
|
+
from .chat_completion_warning import ChatCompletionWarning
|
|
11
|
+
|
|
12
|
+
__all__ = ["ChatCompletion", "Choice", "ChoiceMessage", "ChoiceMessageFunctionCall"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ChoiceMessageFunctionCall(BaseModel):
|
|
16
|
+
arguments: str
|
|
17
|
+
|
|
18
|
+
name: str
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ChoiceMessage(BaseModel):
|
|
22
|
+
content: Optional[str] = None
|
|
23
|
+
|
|
24
|
+
role: Literal["assistant"]
|
|
25
|
+
|
|
26
|
+
function_call: Optional[ChoiceMessageFunctionCall] = None
|
|
27
|
+
|
|
28
|
+
reasoning: Optional[str] = None
|
|
29
|
+
|
|
30
|
+
tool_calls: Optional[List[ToolChoice]] = None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Choice(BaseModel):
|
|
34
|
+
finish_reason: Optional[Literal["stop", "eos", "length", "tool_calls", "function_call"]] = None
|
|
35
|
+
|
|
36
|
+
index: Optional[int] = None
|
|
37
|
+
|
|
38
|
+
logprobs: Optional[LogProbs] = None
|
|
39
|
+
|
|
40
|
+
message: Optional[ChoiceMessage] = None
|
|
41
|
+
|
|
42
|
+
seed: Optional[int] = None
|
|
43
|
+
|
|
44
|
+
text: Optional[str] = None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class ChatCompletion(BaseModel):
|
|
48
|
+
id: str
|
|
49
|
+
|
|
50
|
+
choices: List[Choice]
|
|
51
|
+
|
|
52
|
+
created: int
|
|
53
|
+
|
|
54
|
+
model: str
|
|
55
|
+
|
|
56
|
+
object: Literal["chat.completion"]
|
|
57
|
+
|
|
58
|
+
usage: Optional[ChatCompletionUsage] = None
|
|
59
|
+
|
|
60
|
+
warnings: Optional[List[ChatCompletionWarning]] = None
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
from typing_extensions import Literal
|
|
5
|
+
|
|
6
|
+
from ..._models import BaseModel
|
|
7
|
+
from ..tool_choice import ToolChoice
|
|
8
|
+
from .chat_completion_usage import ChatCompletionUsage
|
|
9
|
+
from .chat_completion_warning import ChatCompletionWarning
|
|
10
|
+
|
|
11
|
+
__all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceDeltaFunctionCall"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ChoiceDeltaFunctionCall(BaseModel):
|
|
15
|
+
arguments: str
|
|
16
|
+
|
|
17
|
+
name: str
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ChoiceDelta(BaseModel):
|
|
21
|
+
role: Literal["system", "user", "assistant", "function", "tool"]
|
|
22
|
+
|
|
23
|
+
content: Optional[str] = None
|
|
24
|
+
|
|
25
|
+
function_call: Optional[ChoiceDeltaFunctionCall] = None
|
|
26
|
+
|
|
27
|
+
reasoning: Optional[str] = None
|
|
28
|
+
|
|
29
|
+
token_id: Optional[int] = None
|
|
30
|
+
|
|
31
|
+
tool_calls: Optional[List[ToolChoice]] = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Choice(BaseModel):
|
|
35
|
+
delta: ChoiceDelta
|
|
36
|
+
|
|
37
|
+
finish_reason: Optional[Literal["stop", "eos", "length", "tool_calls", "function_call"]] = None
|
|
38
|
+
|
|
39
|
+
index: int
|
|
40
|
+
|
|
41
|
+
logprobs: Optional[float] = None
|
|
42
|
+
|
|
43
|
+
seed: Optional[int] = None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ChatCompletionChunk(BaseModel):
|
|
47
|
+
id: str
|
|
48
|
+
|
|
49
|
+
choices: List[Choice]
|
|
50
|
+
|
|
51
|
+
created: int
|
|
52
|
+
|
|
53
|
+
model: str
|
|
54
|
+
|
|
55
|
+
object: Literal["chat.completion.chunk"]
|
|
56
|
+
|
|
57
|
+
system_fingerprint: Optional[str] = None
|
|
58
|
+
|
|
59
|
+
usage: Optional[ChatCompletionUsage] = None
|
|
60
|
+
|
|
61
|
+
warnings: Optional[List[ChatCompletionWarning]] = None
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
6
|
+
|
|
7
|
+
__all__ = ["ChatCompletionStructuredMessageImageURLParam", "ImageURL"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ImageURL(TypedDict, total=False):
|
|
11
|
+
url: Required[str]
|
|
12
|
+
"""The URL of the image"""
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ChatCompletionStructuredMessageImageURLParam(TypedDict, total=False):
|
|
16
|
+
image_url: ImageURL
|
|
17
|
+
|
|
18
|
+
type: Literal["image_url"]
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
6
|
+
|
|
7
|
+
__all__ = ["ChatCompletionStructuredMessageTextParam"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ChatCompletionStructuredMessageTextParam(TypedDict, total=False):
|
|
11
|
+
text: Required[str]
|
|
12
|
+
|
|
13
|
+
type: Required[Literal["text"]]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
6
|
+
|
|
7
|
+
__all__ = ["ChatCompletionStructuredMessageVideoURLParam", "VideoURL"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class VideoURL(TypedDict, total=False):
|
|
11
|
+
url: Required[str]
|
|
12
|
+
"""The URL of the video"""
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ChatCompletionStructuredMessageVideoURLParam(TypedDict, total=False):
|
|
16
|
+
type: Required[Literal["video_url"]]
|
|
17
|
+
|
|
18
|
+
video_url: Required[VideoURL]
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from ..._models import BaseModel
|
|
4
|
+
|
|
5
|
+
__all__ = ["ChatCompletionUsage"]
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ChatCompletionUsage(BaseModel):
|
|
9
|
+
completion_tokens: int
|
|
10
|
+
|
|
11
|
+
prompt_tokens: int
|
|
12
|
+
|
|
13
|
+
total_tokens: int
|