together 1.5.34__py3-none-any.whl → 2.0.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -114
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/{cli → lib/cli}/api/endpoints.py +65 -81
- together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
- together/{cli → lib/cli}/api/files.py +20 -17
- together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
- together/{cli → lib/cli}/api/models.py +34 -27
- together/lib/cli/api/utils.py +50 -0
- together/{cli → lib/cli}/cli.py +16 -26
- together/{constants.py → lib/constants.py} +11 -24
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +397 -0
- together/{utils → lib/utils}/__init__.py +6 -14
- together/{utils → lib/utils}/_log.py +11 -16
- together/{utils → lib/utils}/files.py +90 -288
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +19 -55
- together/resources/__init__.py +225 -39
- together/resources/audio/__init__.py +72 -48
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +574 -128
- together/resources/audio/transcriptions.py +247 -261
- together/resources/audio/translations.py +221 -241
- together/resources/audio/voices.py +111 -41
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -263
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +884 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +589 -477
- together/resources/evals.py +452 -0
- together/resources/files.py +397 -129
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +258 -104
- together/resources/jobs.py +214 -0
- together/resources/models.py +223 -193
- together/resources/rerank.py +190 -92
- together/resources/videos.py +286 -214
- together/types/__init__.py +66 -167
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_create_response.py +10 -0
- together/types/video_job.py +57 -0
- together-2.0.0a6.dist-info/METADATA +729 -0
- together-2.0.0a6.dist-info/RECORD +165 -0
- {together-1.5.34.dist-info → together-2.0.0a6.dist-info}/WHEEL +1 -1
- together-2.0.0a6.dist-info/entry_points.txt +2 -0
- {together-1.5.34.dist-info → together-2.0.0a6.dist-info}/licenses/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -770
- together/cli/api/chat.py +0 -298
- together/cli/api/completions.py +0 -119
- together/cli/api/images.py +0 -93
- together/cli/api/utils.py +0 -139
- together/client.py +0 -186
- together/error.py +0 -194
- together/filemanager.py +0 -635
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/batch.py +0 -165
- together/resources/code_interpreter.py +0 -82
- together/resources/evaluation.py +0 -808
- together/resources/finetune.py +0 -1388
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/audio_speech.py +0 -311
- together/types/batch.py +0 -54
- together/types/chat_completions.py +0 -210
- together/types/code_interpreter.py +0 -57
- together/types/common.py +0 -67
- together/types/completions.py +0 -107
- together/types/embeddings.py +0 -35
- together/types/endpoints.py +0 -123
- together/types/error.py +0 -16
- together/types/evaluation.py +0 -93
- together/types/files.py +0 -93
- together/types/finetune.py +0 -464
- together/types/images.py +0 -42
- together/types/models.py +0 -96
- together/types/rerank.py +0 -43
- together/types/videos.py +0 -69
- together/utils/api_helpers.py +0 -124
- together/version.py +0 -6
- together-1.5.34.dist-info/METADATA +0 -583
- together-1.5.34.dist-info/RECORD +0 -77
- together-1.5.34.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
together/types/__init__.py
CHANGED
|
@@ -1,169 +1,68 @@
|
|
|
1
|
-
from
|
|
2
|
-
from together.types.audio_speech import (
|
|
3
|
-
AudioLanguage,
|
|
4
|
-
AudioResponseEncoding,
|
|
5
|
-
AudioResponseFormat,
|
|
6
|
-
AudioSpeechRequest,
|
|
7
|
-
AudioSpeechStreamChunk,
|
|
8
|
-
AudioSpeechStreamEvent,
|
|
9
|
-
AudioSpeechStreamResponse,
|
|
10
|
-
AudioTimestampGranularities,
|
|
11
|
-
AudioTranscriptionRequest,
|
|
12
|
-
AudioTranscriptionResponse,
|
|
13
|
-
AudioTranscriptionResponseFormat,
|
|
14
|
-
AudioTranscriptionVerboseResponse,
|
|
15
|
-
AudioTranslationRequest,
|
|
16
|
-
AudioTranslationResponse,
|
|
17
|
-
AudioTranslationVerboseResponse,
|
|
18
|
-
ModelVoices,
|
|
19
|
-
VoiceListResponse,
|
|
20
|
-
)
|
|
21
|
-
from together.types.batch import BatchEndpoint, BatchJob, BatchJobStatus
|
|
22
|
-
from together.types.chat_completions import (
|
|
23
|
-
ChatCompletionChunk,
|
|
24
|
-
ChatCompletionRequest,
|
|
25
|
-
ChatCompletionResponse,
|
|
26
|
-
)
|
|
27
|
-
from together.types.common import TogetherRequest
|
|
28
|
-
from together.types.completions import (
|
|
29
|
-
CompletionChunk,
|
|
30
|
-
CompletionRequest,
|
|
31
|
-
CompletionResponse,
|
|
32
|
-
)
|
|
33
|
-
from together.types.embeddings import EmbeddingRequest, EmbeddingResponse
|
|
34
|
-
from together.types.endpoints import Autoscaling, DedicatedEndpoint, ListEndpoint
|
|
35
|
-
from together.types.evaluation import (
|
|
36
|
-
ClassifyParameters,
|
|
37
|
-
CompareParameters,
|
|
38
|
-
EvaluationCreateResponse,
|
|
39
|
-
EvaluationJob,
|
|
40
|
-
EvaluationRequest,
|
|
41
|
-
EvaluationStatus,
|
|
42
|
-
EvaluationStatusResponse,
|
|
43
|
-
EvaluationType,
|
|
44
|
-
JudgeModelConfig,
|
|
45
|
-
ModelRequest,
|
|
46
|
-
ScoreParameters,
|
|
47
|
-
)
|
|
48
|
-
from together.types.files import (
|
|
49
|
-
FileDeleteResponse,
|
|
50
|
-
FileList,
|
|
51
|
-
FileObject,
|
|
52
|
-
FilePurpose,
|
|
53
|
-
FileRequest,
|
|
54
|
-
FileResponse,
|
|
55
|
-
FileType,
|
|
56
|
-
)
|
|
57
|
-
from together.types.finetune import (
|
|
58
|
-
CosineLRScheduler,
|
|
59
|
-
CosineLRSchedulerArgs,
|
|
60
|
-
FinetuneCheckpoint,
|
|
61
|
-
FinetuneDeleteResponse,
|
|
62
|
-
FinetuneDownloadResult,
|
|
63
|
-
FinetuneList,
|
|
64
|
-
FinetuneListEvents,
|
|
65
|
-
FinetuneLRScheduler,
|
|
66
|
-
FinetuneMultimodalParams,
|
|
67
|
-
FinetunePriceEstimationRequest,
|
|
68
|
-
FinetunePriceEstimationResponse,
|
|
69
|
-
FinetuneRequest,
|
|
70
|
-
FinetuneResponse,
|
|
71
|
-
FinetuneTrainingLimits,
|
|
72
|
-
FullTrainingType,
|
|
73
|
-
LinearLRScheduler,
|
|
74
|
-
LinearLRSchedulerArgs,
|
|
75
|
-
LoRATrainingType,
|
|
76
|
-
TrainingMethodDPO,
|
|
77
|
-
TrainingMethodSFT,
|
|
78
|
-
TrainingType,
|
|
79
|
-
)
|
|
80
|
-
from together.types.images import ImageRequest, ImageResponse
|
|
81
|
-
from together.types.models import ModelObject, ModelUploadRequest, ModelUploadResponse
|
|
82
|
-
from together.types.rerank import RerankRequest, RerankResponse
|
|
83
|
-
from together.types.videos import CreateVideoBody, CreateVideoResponse, VideoJob
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
84
2
|
|
|
3
|
+
from __future__ import annotations
|
|
85
4
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
"ListEndpoint",
|
|
151
|
-
"Autoscaling",
|
|
152
|
-
"BatchJob",
|
|
153
|
-
"BatchJobStatus",
|
|
154
|
-
"BatchEndpoint",
|
|
155
|
-
"EvaluationType",
|
|
156
|
-
"EvaluationStatus",
|
|
157
|
-
"JudgeModelConfig",
|
|
158
|
-
"ModelRequest",
|
|
159
|
-
"ClassifyParameters",
|
|
160
|
-
"ScoreParameters",
|
|
161
|
-
"CompareParameters",
|
|
162
|
-
"EvaluationRequest",
|
|
163
|
-
"EvaluationCreateResponse",
|
|
164
|
-
"EvaluationJob",
|
|
165
|
-
"EvaluationStatusResponse",
|
|
166
|
-
"CreateVideoBody",
|
|
167
|
-
"CreateVideoResponse",
|
|
168
|
-
"VideoJob",
|
|
169
|
-
]
|
|
5
|
+
from .batch_job import BatchJob as BatchJob
|
|
6
|
+
from .embedding import Embedding as Embedding
|
|
7
|
+
from .file_list import FileList as FileList
|
|
8
|
+
from .file_type import FileType as FileType
|
|
9
|
+
from .log_probs import LogProbs as LogProbs
|
|
10
|
+
from .video_job import VideoJob as VideoJob
|
|
11
|
+
from .completion import Completion as Completion
|
|
12
|
+
from .image_file import ImageFile as ImageFile
|
|
13
|
+
from .autoscaling import Autoscaling as Autoscaling
|
|
14
|
+
from .tool_choice import ToolChoice as ToolChoice
|
|
15
|
+
from .tools_param import ToolsParam as ToolsParam
|
|
16
|
+
from .file_purpose import FilePurpose as FilePurpose
|
|
17
|
+
from .model_object import ModelObject as ModelObject
|
|
18
|
+
from .file_response import FileResponse as FileResponse
|
|
19
|
+
from .evaluation_job import EvaluationJob as EvaluationJob
|
|
20
|
+
from .finetune_event import FinetuneEvent as FinetuneEvent
|
|
21
|
+
from .image_data_b64 import ImageDataB64 as ImageDataB64
|
|
22
|
+
from .image_data_url import ImageDataURL as ImageDataURL
|
|
23
|
+
from .completion_chunk import CompletionChunk as CompletionChunk
|
|
24
|
+
from .eval_list_params import EvalListParams as EvalListParams
|
|
25
|
+
from .execute_response import ExecuteResponse as ExecuteResponse
|
|
26
|
+
from .autoscaling_param import AutoscalingParam as AutoscalingParam
|
|
27
|
+
from .finetune_response import FinetuneResponse as FinetuneResponse
|
|
28
|
+
from .job_list_response import JobListResponse as JobListResponse
|
|
29
|
+
from .tool_choice_param import ToolChoiceParam as ToolChoiceParam
|
|
30
|
+
from .dedicated_endpoint import DedicatedEndpoint as DedicatedEndpoint
|
|
31
|
+
from .eval_create_params import EvalCreateParams as EvalCreateParams
|
|
32
|
+
from .eval_list_response import EvalListResponse as EvalListResponse
|
|
33
|
+
from .batch_create_params import BatchCreateParams as BatchCreateParams
|
|
34
|
+
from .batch_list_response import BatchListResponse as BatchListResponse
|
|
35
|
+
from .finetune_event_type import FinetuneEventType as FinetuneEventType
|
|
36
|
+
from .model_list_response import ModelListResponse as ModelListResponse
|
|
37
|
+
from .model_upload_params import ModelUploadParams as ModelUploadParams
|
|
38
|
+
from .video_create_params import VideoCreateParams as VideoCreateParams
|
|
39
|
+
from .endpoint_list_params import EndpointListParams as EndpointListParams
|
|
40
|
+
from .eval_create_response import EvalCreateResponse as EvalCreateResponse
|
|
41
|
+
from .eval_status_response import EvalStatusResponse as EvalStatusResponse
|
|
42
|
+
from .file_delete_response import FileDeleteResponse as FileDeleteResponse
|
|
43
|
+
from .hardware_list_params import HardwareListParams as HardwareListParams
|
|
44
|
+
from .rerank_create_params import RerankCreateParams as RerankCreateParams
|
|
45
|
+
from .batch_create_response import BatchCreateResponse as BatchCreateResponse
|
|
46
|
+
from .image_generate_params import ImageGenerateParams as ImageGenerateParams
|
|
47
|
+
from .job_retrieve_response import JobRetrieveResponse as JobRetrieveResponse
|
|
48
|
+
from .model_upload_response import ModelUploadResponse as ModelUploadResponse
|
|
49
|
+
from .video_create_response import VideoCreateResponse as VideoCreateResponse
|
|
50
|
+
from .endpoint_create_params import EndpointCreateParams as EndpointCreateParams
|
|
51
|
+
from .endpoint_list_response import EndpointListResponse as EndpointListResponse
|
|
52
|
+
from .endpoint_update_params import EndpointUpdateParams as EndpointUpdateParams
|
|
53
|
+
from .hardware_list_response import HardwareListResponse as HardwareListResponse
|
|
54
|
+
from .rerank_create_response import RerankCreateResponse as RerankCreateResponse
|
|
55
|
+
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
|
|
56
|
+
from .completion_create_params import CompletionCreateParams as CompletionCreateParams
|
|
57
|
+
from .audio_speech_stream_chunk import AudioSpeechStreamChunk as AudioSpeechStreamChunk
|
|
58
|
+
from .fine_tuning_delete_params import FineTuningDeleteParams as FineTuningDeleteParams
|
|
59
|
+
from .fine_tuning_list_response import FineTuningListResponse as FineTuningListResponse
|
|
60
|
+
from .fine_tuning_content_params import FineTuningContentParams as FineTuningContentParams
|
|
61
|
+
from .fine_tuning_cancel_response import FineTuningCancelResponse as FineTuningCancelResponse
|
|
62
|
+
from .fine_tuning_delete_response import FineTuningDeleteResponse as FineTuningDeleteResponse
|
|
63
|
+
from .endpoint_list_avzones_response import EndpointListAvzonesResponse as EndpointListAvzonesResponse
|
|
64
|
+
from .code_interpreter_execute_params import CodeInterpreterExecuteParams as CodeInterpreterExecuteParams
|
|
65
|
+
from .fine_tuning_list_events_response import FineTuningListEventsResponse as FineTuningListEventsResponse
|
|
66
|
+
from .fine_tuning_list_checkpoints_response import (
|
|
67
|
+
FineTuningListCheckpointsResponse as FineTuningListCheckpointsResponse,
|
|
68
|
+
)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .voice_list_response import VoiceListResponse as VoiceListResponse
|
|
6
|
+
from .speech_create_params import SpeechCreateParams as SpeechCreateParams
|
|
7
|
+
from .translation_create_params import TranslationCreateParams as TranslationCreateParams
|
|
8
|
+
from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams
|
|
9
|
+
from .translation_create_response import TranslationCreateResponse as TranslationCreateResponse
|
|
10
|
+
from .transcription_create_response import TranscriptionCreateResponse as TranscriptionCreateResponse
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Union
|
|
6
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
7
|
+
|
|
8
|
+
__all__ = ["SpeechCreateParamsBase", "SpeechCreateParamsNonStreaming", "SpeechCreateParamsStreaming"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SpeechCreateParamsBase(TypedDict, total=False):
|
|
12
|
+
input: Required[str]
|
|
13
|
+
"""Input text to generate the audio for"""
|
|
14
|
+
|
|
15
|
+
model: Required[Union[Literal["cartesia/sonic", "hexgrad/Kokoro-82M", "canopylabs/orpheus-3b-0.1-ft"], str]]
|
|
16
|
+
"""The name of the model to query.
|
|
17
|
+
|
|
18
|
+
[See all of Together AI's chat models](https://docs.together.ai/docs/serverless-models#audio-models)
|
|
19
|
+
The current supported tts models are: - cartesia/sonic - hexgrad/Kokoro-82M -
|
|
20
|
+
canopylabs/orpheus-3b-0.1-ft
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
voice: Required[str]
|
|
24
|
+
"""The voice to use for generating the audio.
|
|
25
|
+
|
|
26
|
+
The voices supported are different for each model. For eg - for
|
|
27
|
+
canopylabs/orpheus-3b-0.1-ft, one of the voices supported is tara, for
|
|
28
|
+
hexgrad/Kokoro-82M, one of the voices supported is af_alloy and for
|
|
29
|
+
cartesia/sonic, one of the voices supported is "friendly sidekick".
|
|
30
|
+
|
|
31
|
+
You can view the voices supported for each model using the /v1/voices endpoint
|
|
32
|
+
sending the model name as the query parameter.
|
|
33
|
+
[View all supported voices here](https://docs.together.ai/docs/text-to-speech#voices-available).
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
language: Literal["en", "de", "fr", "es", "hi", "it", "ja", "ko", "nl", "pl", "pt", "ru", "sv", "tr", "zh"]
|
|
37
|
+
"""Language of input text."""
|
|
38
|
+
|
|
39
|
+
response_encoding: Literal["pcm_f32le", "pcm_s16le", "pcm_mulaw", "pcm_alaw"]
|
|
40
|
+
"""Audio encoding of response"""
|
|
41
|
+
|
|
42
|
+
response_format: Literal["mp3", "wav", "raw"]
|
|
43
|
+
"""The format of audio output.
|
|
44
|
+
|
|
45
|
+
Supported formats are mp3, wav, raw if streaming is false. If streaming is true,
|
|
46
|
+
the only supported format is raw.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
sample_rate: int
|
|
50
|
+
"""Sampling rate to use for the output audio.
|
|
51
|
+
|
|
52
|
+
The default sampling rate for canopylabs/orpheus-3b-0.1-ft and
|
|
53
|
+
hexgrad/Kokoro-82M is 24000 and for cartesia/sonic is 44100.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class SpeechCreateParamsNonStreaming(SpeechCreateParamsBase, total=False):
|
|
58
|
+
stream: Literal[False]
|
|
59
|
+
"""
|
|
60
|
+
If true, output is streamed for several characters at a time instead of waiting
|
|
61
|
+
for the full response. The stream terminates with `data: [DONE]`. If false,
|
|
62
|
+
return the encoded audio as octet stream
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class SpeechCreateParamsStreaming(SpeechCreateParamsBase):
|
|
67
|
+
stream: Required[Literal[True]]
|
|
68
|
+
"""
|
|
69
|
+
If true, output is streamed for several characters at a time instead of waiting
|
|
70
|
+
for the full response. The stream terminates with `data: [DONE]`. If false,
|
|
71
|
+
return the encoded audio as octet stream
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
SpeechCreateParams = Union[SpeechCreateParamsNonStreaming, SpeechCreateParamsStreaming]
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List, Union
|
|
6
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
7
|
+
|
|
8
|
+
from ..._types import FileTypes
|
|
9
|
+
|
|
10
|
+
__all__ = ["TranscriptionCreateParams"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TranscriptionCreateParams(TypedDict, total=False):
|
|
14
|
+
file: Required[FileTypes]
|
|
15
|
+
"""Audio file to transcribe"""
|
|
16
|
+
|
|
17
|
+
diarize: bool
|
|
18
|
+
"""Whether to enable speaker diarization.
|
|
19
|
+
|
|
20
|
+
When enabled, you will get the speaker id for each word in the transcription. In
|
|
21
|
+
the response, in the words array, you will get the speaker id for each word. In
|
|
22
|
+
addition, we also return the speaker_segments array which contains the speaker
|
|
23
|
+
id for each speaker segment along with the start and end time of the segment
|
|
24
|
+
along with all the words in the segment.
|
|
25
|
+
|
|
26
|
+
For eg - ... "speaker_segments": [ "speaker_id": "SPEAKER_00", "start": 0,
|
|
27
|
+
"end": 30.02, "words": [ { "id": 0, "word": "Tijana", "start": 0, "end": 11.475,
|
|
28
|
+
"speaker_id": "SPEAKER_00" }, ...
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
language: str
|
|
32
|
+
"""Optional ISO 639-1 language code.
|
|
33
|
+
|
|
34
|
+
If `auto` is provided, language is auto-detected.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
model: Literal["openai/whisper-large-v3"]
|
|
38
|
+
"""Model to use for transcription"""
|
|
39
|
+
|
|
40
|
+
prompt: str
|
|
41
|
+
"""Optional text to bias decoding."""
|
|
42
|
+
|
|
43
|
+
response_format: Literal["json", "verbose_json"]
|
|
44
|
+
"""The format of the response"""
|
|
45
|
+
|
|
46
|
+
temperature: float
|
|
47
|
+
"""Sampling temperature between 0.0 and 1.0"""
|
|
48
|
+
|
|
49
|
+
timestamp_granularities: Union[Literal["segment", "word"], List[Literal["segment", "word"]]]
|
|
50
|
+
"""Controls level of timestamp detail in verbose_json.
|
|
51
|
+
|
|
52
|
+
Only used when response_format is verbose_json. Can be a single granularity or
|
|
53
|
+
an array to get multiple levels.
|
|
54
|
+
"""
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Union, Optional
|
|
4
|
+
from typing_extensions import Literal, TypeAlias
|
|
5
|
+
|
|
6
|
+
from ..._models import BaseModel
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"TranscriptionCreateResponse",
|
|
10
|
+
"AudioTranscriptionJsonResponse",
|
|
11
|
+
"AudioTranscriptionVerboseJsonResponse",
|
|
12
|
+
"AudioTranscriptionVerboseJsonResponseSegment",
|
|
13
|
+
"AudioTranscriptionVerboseJsonResponseSpeakerSegment",
|
|
14
|
+
"AudioTranscriptionVerboseJsonResponseSpeakerSegmentWord",
|
|
15
|
+
"AudioTranscriptionVerboseJsonResponseWord",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AudioTranscriptionJsonResponse(BaseModel):
|
|
20
|
+
text: str
|
|
21
|
+
"""The transcribed text"""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AudioTranscriptionVerboseJsonResponseSegment(BaseModel):
|
|
25
|
+
id: int
|
|
26
|
+
"""Unique identifier for the segment"""
|
|
27
|
+
|
|
28
|
+
end: float
|
|
29
|
+
"""End time of the segment in seconds"""
|
|
30
|
+
|
|
31
|
+
start: float
|
|
32
|
+
"""Start time of the segment in seconds"""
|
|
33
|
+
|
|
34
|
+
text: str
|
|
35
|
+
"""The text content of the segment"""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class AudioTranscriptionVerboseJsonResponseSpeakerSegmentWord(BaseModel):
|
|
39
|
+
end: float
|
|
40
|
+
"""End time of the word in seconds"""
|
|
41
|
+
|
|
42
|
+
start: float
|
|
43
|
+
"""Start time of the word in seconds"""
|
|
44
|
+
|
|
45
|
+
word: str
|
|
46
|
+
"""The word"""
|
|
47
|
+
|
|
48
|
+
speaker_id: Optional[str] = None
|
|
49
|
+
"""The speaker id for the word (only when diarize is enabled)"""
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class AudioTranscriptionVerboseJsonResponseSpeakerSegment(BaseModel):
|
|
53
|
+
id: int
|
|
54
|
+
"""Unique identifier for the speaker segment"""
|
|
55
|
+
|
|
56
|
+
end: float
|
|
57
|
+
"""End time of the speaker segment in seconds"""
|
|
58
|
+
|
|
59
|
+
speaker_id: str
|
|
60
|
+
"""The speaker identifier"""
|
|
61
|
+
|
|
62
|
+
start: float
|
|
63
|
+
"""Start time of the speaker segment in seconds"""
|
|
64
|
+
|
|
65
|
+
text: str
|
|
66
|
+
"""The full text spoken by this speaker in this segment"""
|
|
67
|
+
|
|
68
|
+
words: List[AudioTranscriptionVerboseJsonResponseSpeakerSegmentWord]
|
|
69
|
+
"""Array of words spoken by this speaker in this segment"""
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class AudioTranscriptionVerboseJsonResponseWord(BaseModel):
|
|
73
|
+
end: float
|
|
74
|
+
"""End time of the word in seconds"""
|
|
75
|
+
|
|
76
|
+
start: float
|
|
77
|
+
"""Start time of the word in seconds"""
|
|
78
|
+
|
|
79
|
+
word: str
|
|
80
|
+
"""The word"""
|
|
81
|
+
|
|
82
|
+
speaker_id: Optional[str] = None
|
|
83
|
+
"""The speaker id for the word (only when diarize is enabled)"""
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class AudioTranscriptionVerboseJsonResponse(BaseModel):
|
|
87
|
+
duration: float
|
|
88
|
+
"""The duration of the audio in seconds"""
|
|
89
|
+
|
|
90
|
+
language: str
|
|
91
|
+
"""The language of the audio"""
|
|
92
|
+
|
|
93
|
+
segments: List[AudioTranscriptionVerboseJsonResponseSegment]
|
|
94
|
+
"""Array of transcription segments"""
|
|
95
|
+
|
|
96
|
+
task: Literal["transcribe", "translate"]
|
|
97
|
+
"""The task performed"""
|
|
98
|
+
|
|
99
|
+
text: str
|
|
100
|
+
"""The transcribed text"""
|
|
101
|
+
|
|
102
|
+
speaker_segments: Optional[List[AudioTranscriptionVerboseJsonResponseSpeakerSegment]] = None
|
|
103
|
+
"""Array of transcription speaker segments (only when diarize is enabled)"""
|
|
104
|
+
|
|
105
|
+
words: Optional[List[AudioTranscriptionVerboseJsonResponseWord]] = None
|
|
106
|
+
"""
|
|
107
|
+
Array of transcription words (only when timestamp_granularities includes 'word')
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
TranscriptionCreateResponse: TypeAlias = Union[AudioTranscriptionJsonResponse, AudioTranscriptionVerboseJsonResponse]
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List, Union
|
|
6
|
+
from typing_extensions import Literal, Required, TypedDict
|
|
7
|
+
|
|
8
|
+
from ..._types import FileTypes
|
|
9
|
+
|
|
10
|
+
__all__ = ["TranslationCreateParams"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TranslationCreateParams(TypedDict, total=False):
|
|
14
|
+
file: Required[FileTypes]
|
|
15
|
+
"""Audio file to translate"""
|
|
16
|
+
|
|
17
|
+
language: str
|
|
18
|
+
"""Target output language.
|
|
19
|
+
|
|
20
|
+
Optional ISO 639-1 language code. If omitted, language is set to English.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
model: Literal["openai/whisper-large-v3"]
|
|
24
|
+
"""Model to use for translation"""
|
|
25
|
+
|
|
26
|
+
prompt: str
|
|
27
|
+
"""Optional text to bias decoding."""
|
|
28
|
+
|
|
29
|
+
response_format: Literal["json", "verbose_json"]
|
|
30
|
+
"""The format of the response"""
|
|
31
|
+
|
|
32
|
+
temperature: float
|
|
33
|
+
"""Sampling temperature between 0.0 and 1.0"""
|
|
34
|
+
|
|
35
|
+
timestamp_granularities: Union[Literal["segment", "word"], List[Literal["segment", "word"]]]
|
|
36
|
+
"""Controls level of timestamp detail in verbose_json.
|
|
37
|
+
|
|
38
|
+
Only used when response_format is verbose_json. Can be a single granularity or
|
|
39
|
+
an array to get multiple levels.
|
|
40
|
+
"""
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List, Union, Optional
|
|
4
|
+
from typing_extensions import Literal, TypeAlias
|
|
5
|
+
|
|
6
|
+
from ..._models import BaseModel
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"TranslationCreateResponse",
|
|
10
|
+
"AudioTranslationJsonResponse",
|
|
11
|
+
"AudioTranslationVerboseJsonResponse",
|
|
12
|
+
"AudioTranslationVerboseJsonResponseSegment",
|
|
13
|
+
"AudioTranslationVerboseJsonResponseWord",
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AudioTranslationJsonResponse(BaseModel):
|
|
18
|
+
text: str
|
|
19
|
+
"""The translated text"""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class AudioTranslationVerboseJsonResponseSegment(BaseModel):
|
|
23
|
+
id: int
|
|
24
|
+
"""Unique identifier for the segment"""
|
|
25
|
+
|
|
26
|
+
end: float
|
|
27
|
+
"""End time of the segment in seconds"""
|
|
28
|
+
|
|
29
|
+
start: float
|
|
30
|
+
"""Start time of the segment in seconds"""
|
|
31
|
+
|
|
32
|
+
text: str
|
|
33
|
+
"""The text content of the segment"""
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class AudioTranslationVerboseJsonResponseWord(BaseModel):
|
|
37
|
+
end: float
|
|
38
|
+
"""End time of the word in seconds"""
|
|
39
|
+
|
|
40
|
+
start: float
|
|
41
|
+
"""Start time of the word in seconds"""
|
|
42
|
+
|
|
43
|
+
word: str
|
|
44
|
+
"""The word"""
|
|
45
|
+
|
|
46
|
+
speaker_id: Optional[str] = None
|
|
47
|
+
"""The speaker id for the word (only when diarize is enabled)"""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class AudioTranslationVerboseJsonResponse(BaseModel):
|
|
51
|
+
duration: float
|
|
52
|
+
"""The duration of the audio in seconds"""
|
|
53
|
+
|
|
54
|
+
language: str
|
|
55
|
+
"""The target language of the translation"""
|
|
56
|
+
|
|
57
|
+
segments: List[AudioTranslationVerboseJsonResponseSegment]
|
|
58
|
+
"""Array of translation segments"""
|
|
59
|
+
|
|
60
|
+
task: Literal["transcribe", "translate"]
|
|
61
|
+
"""The task performed"""
|
|
62
|
+
|
|
63
|
+
text: str
|
|
64
|
+
"""The translated text"""
|
|
65
|
+
|
|
66
|
+
words: Optional[List[AudioTranslationVerboseJsonResponseWord]] = None
|
|
67
|
+
"""Array of translation words (only when timestamp_granularities includes 'word')"""
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
TranslationCreateResponse: TypeAlias = Union[AudioTranslationJsonResponse, AudioTranslationVerboseJsonResponse]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from ..._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["VoiceListResponse", "Data", "DataVoice"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class DataVoice(BaseModel):
|
|
11
|
+
id: str
|
|
12
|
+
|
|
13
|
+
name: str
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Data(BaseModel):
|
|
17
|
+
model: str
|
|
18
|
+
|
|
19
|
+
voices: List[DataVoice]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class VoiceListResponse(BaseModel):
|
|
23
|
+
data: List[Data]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from typing_extensions import Literal
|
|
4
|
+
|
|
5
|
+
from .._models import BaseModel
|
|
6
|
+
|
|
7
|
+
__all__ = ["AudioSpeechStreamChunk"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AudioSpeechStreamChunk(BaseModel):
|
|
11
|
+
b64: str
|
|
12
|
+
"""base64 encoded audio stream"""
|
|
13
|
+
|
|
14
|
+
model: str
|
|
15
|
+
|
|
16
|
+
object: Literal["audio.tts.chunk"]
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from .._models import BaseModel
|
|
4
|
+
|
|
5
|
+
__all__ = ["Autoscaling"]
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Autoscaling(BaseModel):
|
|
9
|
+
max_replicas: int
|
|
10
|
+
"""The maximum number of replicas to scale up to under load"""
|
|
11
|
+
|
|
12
|
+
min_replicas: int
|
|
13
|
+
"""The minimum number of replicas to maintain, even when there is no load"""
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Required, TypedDict
|
|
6
|
+
|
|
7
|
+
__all__ = ["AutoscalingParam"]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AutoscalingParam(TypedDict, total=False):
|
|
11
|
+
max_replicas: Required[int]
|
|
12
|
+
"""The maximum number of replicas to scale up to under load"""
|
|
13
|
+
|
|
14
|
+
min_replicas: Required[int]
|
|
15
|
+
"""The minimum number of replicas to maintain, even when there is no load"""
|