together 1.5.25__py3-none-any.whl → 1.5.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/cli/api/evaluation.py +118 -18
- together/cli/api/finetune.py +27 -0
- together/cli/api/models.py +79 -1
- together/client.py +4 -0
- together/constants.py +14 -2
- together/filemanager.py +230 -5
- together/resources/__init__.py +3 -0
- together/resources/audio/transcriptions.py +16 -4
- together/resources/endpoints.py +4 -4
- together/resources/evaluation.py +98 -14
- together/resources/files.py +12 -3
- together/resources/finetune.py +63 -0
- together/resources/models.py +118 -0
- together/resources/videos.py +303 -0
- together/types/__init__.py +13 -1
- together/types/audio_speech.py +13 -0
- together/types/evaluation.py +9 -3
- together/types/files.py +1 -1
- together/types/finetune.py +5 -0
- together/types/models.py +50 -1
- together/types/videos.py +69 -0
- together/utils/files.py +1 -1
- {together-1.5.25.dist-info → together-1.5.27.dist-info}/METADATA +6 -4
- {together-1.5.25.dist-info → together-1.5.27.dist-info}/RECORD +27 -25
- {together-1.5.25.dist-info → together-1.5.27.dist-info}/WHEEL +1 -1
- {together-1.5.25.dist-info → together-1.5.27.dist-info}/entry_points.txt +0 -0
- {together-1.5.25.dist-info → together-1.5.27.dist-info/licenses}/LICENSE +0 -0
together/resources/finetune.py
CHANGED
|
@@ -13,6 +13,7 @@ from together.types import (
|
|
|
13
13
|
CosineLRScheduler,
|
|
14
14
|
CosineLRSchedulerArgs,
|
|
15
15
|
FinetuneCheckpoint,
|
|
16
|
+
FinetuneDeleteResponse,
|
|
16
17
|
FinetuneDownloadResult,
|
|
17
18
|
FinetuneList,
|
|
18
19
|
FinetuneListEvents,
|
|
@@ -570,6 +571,37 @@ class FineTuning:
|
|
|
570
571
|
|
|
571
572
|
return FinetuneResponse(**response.data)
|
|
572
573
|
|
|
574
|
+
def delete(self, id: str, force: bool = False) -> FinetuneDeleteResponse:
|
|
575
|
+
"""
|
|
576
|
+
Method to delete a fine-tuning job
|
|
577
|
+
|
|
578
|
+
Args:
|
|
579
|
+
id (str): Fine-tune ID to delete. A string that starts with `ft-`.
|
|
580
|
+
force (bool, optional): Force deletion. Defaults to False.
|
|
581
|
+
|
|
582
|
+
Returns:
|
|
583
|
+
FinetuneDeleteResponse: Object containing deletion confirmation message.
|
|
584
|
+
"""
|
|
585
|
+
|
|
586
|
+
requestor = api_requestor.APIRequestor(
|
|
587
|
+
client=self._client,
|
|
588
|
+
)
|
|
589
|
+
|
|
590
|
+
params = {"force": str(force).lower()}
|
|
591
|
+
|
|
592
|
+
response, _, _ = requestor.request(
|
|
593
|
+
options=TogetherRequest(
|
|
594
|
+
method="DELETE",
|
|
595
|
+
url=f"fine-tunes/{id}",
|
|
596
|
+
params=params,
|
|
597
|
+
),
|
|
598
|
+
stream=False,
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
assert isinstance(response, TogetherResponse)
|
|
602
|
+
|
|
603
|
+
return FinetuneDeleteResponse(**response.data)
|
|
604
|
+
|
|
573
605
|
def list_events(self, id: str) -> FinetuneListEvents:
|
|
574
606
|
"""
|
|
575
607
|
Lists events of a fine-tune job
|
|
@@ -1007,6 +1039,37 @@ class AsyncFineTuning:
|
|
|
1007
1039
|
|
|
1008
1040
|
return FinetuneResponse(**response.data)
|
|
1009
1041
|
|
|
1042
|
+
async def delete(self, id: str, force: bool = False) -> FinetuneDeleteResponse:
|
|
1043
|
+
"""
|
|
1044
|
+
Async method to delete a fine-tuning job
|
|
1045
|
+
|
|
1046
|
+
Args:
|
|
1047
|
+
id (str): Fine-tune ID to delete. A string that starts with `ft-`.
|
|
1048
|
+
force (bool, optional): Force deletion. Defaults to False.
|
|
1049
|
+
|
|
1050
|
+
Returns:
|
|
1051
|
+
FinetuneDeleteResponse: Object containing deletion confirmation message.
|
|
1052
|
+
"""
|
|
1053
|
+
|
|
1054
|
+
requestor = api_requestor.APIRequestor(
|
|
1055
|
+
client=self._client,
|
|
1056
|
+
)
|
|
1057
|
+
|
|
1058
|
+
params = {"force": str(force).lower()}
|
|
1059
|
+
|
|
1060
|
+
response, _, _ = await requestor.arequest(
|
|
1061
|
+
options=TogetherRequest(
|
|
1062
|
+
method="DELETE",
|
|
1063
|
+
url=f"fine-tunes/{id}",
|
|
1064
|
+
params=params,
|
|
1065
|
+
),
|
|
1066
|
+
stream=False,
|
|
1067
|
+
)
|
|
1068
|
+
|
|
1069
|
+
assert isinstance(response, TogetherResponse)
|
|
1070
|
+
|
|
1071
|
+
return FinetuneDeleteResponse(**response.data)
|
|
1072
|
+
|
|
1010
1073
|
async def list_events(self, id: str) -> FinetuneListEvents:
|
|
1011
1074
|
"""
|
|
1012
1075
|
List fine-tuning events
|
together/resources/models.py
CHANGED
|
@@ -6,6 +6,8 @@ from together.abstract import api_requestor
|
|
|
6
6
|
from together.together_response import TogetherResponse
|
|
7
7
|
from together.types import (
|
|
8
8
|
ModelObject,
|
|
9
|
+
ModelUploadRequest,
|
|
10
|
+
ModelUploadResponse,
|
|
9
11
|
TogetherClient,
|
|
10
12
|
TogetherRequest,
|
|
11
13
|
)
|
|
@@ -85,6 +87,64 @@ class Models(ModelsBase):
|
|
|
85
87
|
|
|
86
88
|
return models
|
|
87
89
|
|
|
90
|
+
def upload(
|
|
91
|
+
self,
|
|
92
|
+
*,
|
|
93
|
+
model_name: str,
|
|
94
|
+
model_source: str,
|
|
95
|
+
model_type: str = "model",
|
|
96
|
+
hf_token: str | None = None,
|
|
97
|
+
description: str | None = None,
|
|
98
|
+
base_model: str | None = None,
|
|
99
|
+
lora_model: str | None = None,
|
|
100
|
+
) -> ModelUploadResponse:
|
|
101
|
+
"""
|
|
102
|
+
Upload a custom model or adapter from Hugging Face or S3.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
model_name (str): The name to give to your uploaded model
|
|
106
|
+
model_source (str): The source location of the model (Hugging Face repo or S3 path)
|
|
107
|
+
model_type (str, optional): Whether the model is a full model or an adapter. Defaults to "model".
|
|
108
|
+
hf_token (str, optional): Hugging Face token (if uploading from Hugging Face)
|
|
109
|
+
description (str, optional): A description of your model
|
|
110
|
+
base_model (str, optional): The base model to use for an adapter if setting it to run against a serverless pool. Only used for model_type "adapter".
|
|
111
|
+
lora_model (str, optional): The lora pool to use for an adapter if setting it to run against, say, a dedicated pool. Only used for model_type "adapter".
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
ModelUploadResponse: Object containing upload job information
|
|
115
|
+
"""
|
|
116
|
+
requestor = api_requestor.APIRequestor(
|
|
117
|
+
client=self._client,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
data = {
|
|
121
|
+
"model_name": model_name,
|
|
122
|
+
"model_source": model_source,
|
|
123
|
+
"model_type": model_type,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if hf_token is not None:
|
|
127
|
+
data["hf_token"] = hf_token
|
|
128
|
+
if description is not None:
|
|
129
|
+
data["description"] = description
|
|
130
|
+
if base_model is not None:
|
|
131
|
+
data["base_model"] = base_model
|
|
132
|
+
if lora_model is not None:
|
|
133
|
+
data["lora_model"] = lora_model
|
|
134
|
+
|
|
135
|
+
response, _, _ = requestor.request(
|
|
136
|
+
options=TogetherRequest(
|
|
137
|
+
method="POST",
|
|
138
|
+
url="models",
|
|
139
|
+
params=data,
|
|
140
|
+
),
|
|
141
|
+
stream=False,
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
assert isinstance(response, TogetherResponse)
|
|
145
|
+
|
|
146
|
+
return ModelUploadResponse.from_api_response(response.data)
|
|
147
|
+
|
|
88
148
|
|
|
89
149
|
class AsyncModels(ModelsBase):
|
|
90
150
|
async def list(
|
|
@@ -132,3 +192,61 @@ class AsyncModels(ModelsBase):
|
|
|
132
192
|
models.sort(key=lambda x: x.id.lower())
|
|
133
193
|
|
|
134
194
|
return models
|
|
195
|
+
|
|
196
|
+
async def upload(
|
|
197
|
+
self,
|
|
198
|
+
*,
|
|
199
|
+
model_name: str,
|
|
200
|
+
model_source: str,
|
|
201
|
+
model_type: str = "model",
|
|
202
|
+
hf_token: str | None = None,
|
|
203
|
+
description: str | None = None,
|
|
204
|
+
base_model: str | None = None,
|
|
205
|
+
lora_model: str | None = None,
|
|
206
|
+
) -> ModelUploadResponse:
|
|
207
|
+
"""
|
|
208
|
+
Upload a custom model or adapter from Hugging Face or S3.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
model_name (str): The name to give to your uploaded model
|
|
212
|
+
model_source (str): The source location of the model (Hugging Face repo or S3 path)
|
|
213
|
+
model_type (str, optional): Whether the model is a full model or an adapter. Defaults to "model".
|
|
214
|
+
hf_token (str, optional): Hugging Face token (if uploading from Hugging Face)
|
|
215
|
+
description (str, optional): A description of your model
|
|
216
|
+
base_model (str, optional): The base model to use for an adapter if setting it to run against a serverless pool. Only used for model_type "adapter".
|
|
217
|
+
lora_model (str, optional): The lora pool to use for an adapter if setting it to run against, say, a dedicated pool. Only used for model_type "adapter".
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
ModelUploadResponse: Object containing upload job information
|
|
221
|
+
"""
|
|
222
|
+
requestor = api_requestor.APIRequestor(
|
|
223
|
+
client=self._client,
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
data = {
|
|
227
|
+
"model_name": model_name,
|
|
228
|
+
"model_source": model_source,
|
|
229
|
+
"model_type": model_type,
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
if hf_token is not None:
|
|
233
|
+
data["hf_token"] = hf_token
|
|
234
|
+
if description is not None:
|
|
235
|
+
data["description"] = description
|
|
236
|
+
if base_model is not None:
|
|
237
|
+
data["base_model"] = base_model
|
|
238
|
+
if lora_model is not None:
|
|
239
|
+
data["lora_model"] = lora_model
|
|
240
|
+
|
|
241
|
+
response, _, _ = await requestor.arequest(
|
|
242
|
+
options=TogetherRequest(
|
|
243
|
+
method="POST",
|
|
244
|
+
url="models",
|
|
245
|
+
params=data,
|
|
246
|
+
),
|
|
247
|
+
stream=False,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
assert isinstance(response, TogetherResponse)
|
|
251
|
+
|
|
252
|
+
return ModelUploadResponse.from_api_response(response.data)
|
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
import sys
|
|
5
|
+
|
|
6
|
+
from together.abstract import api_requestor
|
|
7
|
+
from together.together_response import TogetherResponse
|
|
8
|
+
from together.types import (
|
|
9
|
+
TogetherClient,
|
|
10
|
+
TogetherRequest,
|
|
11
|
+
)
|
|
12
|
+
from together.types.videos import (
|
|
13
|
+
CreateVideoResponse,
|
|
14
|
+
CreateVideoBody,
|
|
15
|
+
VideoJob,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
if sys.version_info >= (3, 8):
|
|
19
|
+
from typing import Literal
|
|
20
|
+
else:
|
|
21
|
+
from typing_extensions import Literal
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Videos:
|
|
25
|
+
def __init__(self, client: TogetherClient) -> None:
|
|
26
|
+
self._client = client
|
|
27
|
+
|
|
28
|
+
def create(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
model: str,
|
|
32
|
+
prompt: str | None = None,
|
|
33
|
+
height: int | None = None,
|
|
34
|
+
width: int | None = None,
|
|
35
|
+
seconds: str | None = None,
|
|
36
|
+
fps: int | None = None,
|
|
37
|
+
steps: int | None = None,
|
|
38
|
+
seed: int | None = None,
|
|
39
|
+
guidance_scale: float | None = None,
|
|
40
|
+
output_format: Literal["MP4", "WEBM"] | None = None,
|
|
41
|
+
output_quality: int | None = None,
|
|
42
|
+
negative_prompt: str | None = None,
|
|
43
|
+
frame_images: List[Dict[str, Any]] | None = None,
|
|
44
|
+
reference_images: List[str] | None = None,
|
|
45
|
+
**kwargs: Any,
|
|
46
|
+
) -> CreateVideoResponse:
|
|
47
|
+
"""
|
|
48
|
+
Method to generate videos based on a given prompt using a specified model.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
model (str): The model to use for video generation.
|
|
52
|
+
|
|
53
|
+
prompt (str): A description of the desired video. Positive prompt for the generation.
|
|
54
|
+
|
|
55
|
+
height (int, optional): Height of the video to generate in pixels.
|
|
56
|
+
|
|
57
|
+
width (int, optional): Width of the video to generate in pixels.
|
|
58
|
+
|
|
59
|
+
seconds (str, optional): Length of generated video in seconds. Min 1 max 10.
|
|
60
|
+
|
|
61
|
+
fps (int, optional): Frames per second, min 15 max 60. Defaults to 24.
|
|
62
|
+
|
|
63
|
+
steps (int, optional): The number of denoising steps the model performs during video
|
|
64
|
+
generation. More steps typically result in higher quality output but require longer
|
|
65
|
+
processing time. Min 10 max 50. Defaults to 20.
|
|
66
|
+
|
|
67
|
+
seed (int, optional): Seed to use in initializing the video generation. Using the same
|
|
68
|
+
seed allows deterministic video generation. If not provided, a random seed is
|
|
69
|
+
generated for each request. Note: When requesting multiple videos with the same
|
|
70
|
+
seed, the seed will be incremented by 1 (+1) for each video generated.
|
|
71
|
+
|
|
72
|
+
guidance_scale (float, optional): Controls how closely the video generation follows your
|
|
73
|
+
prompt. Higher values make the model adhere more strictly to your text description,
|
|
74
|
+
while lower values allow more creative freedom. Recommended range is 6.0-10.0 for
|
|
75
|
+
most video models. Values above 12 may cause over-guidance artifacts or unnatural
|
|
76
|
+
motion patterns. Defaults to 8.
|
|
77
|
+
|
|
78
|
+
output_format (str, optional): Specifies the format of the output video. Either "MP4"
|
|
79
|
+
or "WEBM". Defaults to "MP4".
|
|
80
|
+
|
|
81
|
+
output_quality (int, optional): Compression quality. Defaults to 20.
|
|
82
|
+
|
|
83
|
+
negative_prompt (str, optional): Similar to prompt, but specifies what to avoid instead
|
|
84
|
+
of what to include. Defaults to None.
|
|
85
|
+
|
|
86
|
+
frame_images (List[Dict[str, Any]], optional): Array of images to guide video generation,
|
|
87
|
+
like keyframes. If size 1, starting frame; if size 2, starting and ending frame;
|
|
88
|
+
if more than 2 then frame must be specified. Defaults to None.
|
|
89
|
+
|
|
90
|
+
reference_images (List[str], optional): An array containing reference images
|
|
91
|
+
used to condition the generation process. These images provide visual guidance to
|
|
92
|
+
help the model generate content that aligns with the style, composition, or
|
|
93
|
+
characteristics of the reference materials. Defaults to None.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
CreateVideoResponse: Object containing video generation job id
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
requestor = api_requestor.APIRequestor(
|
|
100
|
+
client=self._client,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
parameter_payload = CreateVideoBody(
|
|
104
|
+
prompt=prompt,
|
|
105
|
+
model=model,
|
|
106
|
+
height=height,
|
|
107
|
+
width=width,
|
|
108
|
+
seconds=seconds,
|
|
109
|
+
fps=fps,
|
|
110
|
+
steps=steps,
|
|
111
|
+
seed=seed,
|
|
112
|
+
guidance_scale=guidance_scale,
|
|
113
|
+
output_format=output_format,
|
|
114
|
+
output_quality=output_quality,
|
|
115
|
+
negative_prompt=negative_prompt,
|
|
116
|
+
frame_images=frame_images,
|
|
117
|
+
reference_images=reference_images,
|
|
118
|
+
**kwargs,
|
|
119
|
+
).model_dump(exclude_none=True)
|
|
120
|
+
|
|
121
|
+
response, _, _ = requestor.request(
|
|
122
|
+
options=TogetherRequest(
|
|
123
|
+
method="POST",
|
|
124
|
+
url="../v2/videos",
|
|
125
|
+
params=parameter_payload,
|
|
126
|
+
),
|
|
127
|
+
stream=False,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
assert isinstance(response, TogetherResponse)
|
|
131
|
+
|
|
132
|
+
return CreateVideoResponse(**response.data)
|
|
133
|
+
|
|
134
|
+
def retrieve(
|
|
135
|
+
self,
|
|
136
|
+
id: str,
|
|
137
|
+
) -> VideoJob:
|
|
138
|
+
"""
|
|
139
|
+
Method to retrieve a video creation job.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
id (str): The ID of the video creation job to retrieve.
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
VideoJob: Object containing the current status and details of the video creation job
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
requestor = api_requestor.APIRequestor(
|
|
149
|
+
client=self._client,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
response, _, _ = requestor.request(
|
|
153
|
+
options=TogetherRequest(
|
|
154
|
+
method="GET",
|
|
155
|
+
url=f"../v2/videos/{id}",
|
|
156
|
+
),
|
|
157
|
+
stream=False,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
assert isinstance(response, TogetherResponse)
|
|
161
|
+
|
|
162
|
+
return VideoJob(**response.data)
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
class AsyncVideos:
|
|
166
|
+
def __init__(self, client: TogetherClient) -> None:
|
|
167
|
+
self._client = client
|
|
168
|
+
|
|
169
|
+
async def create(
|
|
170
|
+
self,
|
|
171
|
+
*,
|
|
172
|
+
prompt: str,
|
|
173
|
+
model: str,
|
|
174
|
+
height: int | None = None,
|
|
175
|
+
width: int | None = None,
|
|
176
|
+
seconds: float | None = None,
|
|
177
|
+
fps: int | None = None,
|
|
178
|
+
steps: int | None = None,
|
|
179
|
+
seed: int | None = None,
|
|
180
|
+
guidance_scale: float | None = None,
|
|
181
|
+
output_format: Literal["MP4", "WEBM"] | None = None,
|
|
182
|
+
output_quality: int | None = None,
|
|
183
|
+
negative_prompt: str | None = None,
|
|
184
|
+
frame_images: List[Dict[str, Any]] | None = None,
|
|
185
|
+
reference_images: List[str] | None = None,
|
|
186
|
+
**kwargs: Any,
|
|
187
|
+
) -> CreateVideoResponse:
|
|
188
|
+
"""
|
|
189
|
+
Async method to create videos based on a given prompt using a specified model.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
prompt (str): A description of the desired video. Positive prompt for the generation.
|
|
193
|
+
|
|
194
|
+
model (str): The model to use for video generation.
|
|
195
|
+
|
|
196
|
+
height (int, optional): Height of the video to generate in pixels.
|
|
197
|
+
|
|
198
|
+
width (int, optional): Width of the video to generate in pixels.
|
|
199
|
+
|
|
200
|
+
seconds (float, optional): Length of generated video in seconds. Min 1 max 10.
|
|
201
|
+
|
|
202
|
+
fps (int, optional): Frames per second, min 15 max 60. Defaults to 24.
|
|
203
|
+
|
|
204
|
+
steps (int, optional): The number of denoising steps the model performs during video
|
|
205
|
+
generation. More steps typically result in higher quality output but require longer
|
|
206
|
+
processing time. Min 10 max 50. Defaults to 20.
|
|
207
|
+
|
|
208
|
+
seed (int, optional): Seed to use in initializing the video generation. Using the same
|
|
209
|
+
seed allows deterministic video generation. If not provided, a random seed is
|
|
210
|
+
generated for each request. Note: When requesting multiple videos with the same
|
|
211
|
+
seed, the seed will be incremented by 1 (+1) for each video generated.
|
|
212
|
+
|
|
213
|
+
guidance_scale (float, optional): Controls how closely the video generation follows your
|
|
214
|
+
prompt. Higher values make the model adhere more strictly to your text description,
|
|
215
|
+
while lower values allow more creative freedom. Recommended range is 6.0-10.0 for
|
|
216
|
+
most video models. Values above 12 may cause over-guidance artifacts or unnatural
|
|
217
|
+
motion patterns. Defaults to 8.
|
|
218
|
+
|
|
219
|
+
output_format (Literal["MP4", "WEBM"], optional): Specifies the format of the output video. Either "MP4"
|
|
220
|
+
or "WEBM". Defaults to "MP4".
|
|
221
|
+
|
|
222
|
+
output_quality (int, optional): Compression quality. Defaults to 20.
|
|
223
|
+
|
|
224
|
+
negative_prompt (str, optional): Similar to prompt, but specifies what to avoid instead
|
|
225
|
+
of what to include. Defaults to None.
|
|
226
|
+
|
|
227
|
+
frame_images (List[Dict[str, Any]], optional): Array of images to guide video generation,
|
|
228
|
+
like keyframes. If size 1, starting frame; if size 2, starting and ending frame;
|
|
229
|
+
if more than 2 then frame must be specified. Defaults to None.
|
|
230
|
+
|
|
231
|
+
reference_images (List[str], optional): An array containing reference images
|
|
232
|
+
used to condition the generation process. These images provide visual guidance to
|
|
233
|
+
help the model generate content that aligns with the style, composition, or
|
|
234
|
+
characteristics of the reference materials. Defaults to None.
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
CreateVideoResponse: Object containing video creation job id
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
requestor = api_requestor.APIRequestor(
|
|
241
|
+
client=self._client,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
parameter_payload = CreateVideoBody(
|
|
245
|
+
prompt=prompt,
|
|
246
|
+
model=model,
|
|
247
|
+
height=height,
|
|
248
|
+
width=width,
|
|
249
|
+
seconds=seconds,
|
|
250
|
+
fps=fps,
|
|
251
|
+
steps=steps,
|
|
252
|
+
seed=seed,
|
|
253
|
+
guidance_scale=guidance_scale,
|
|
254
|
+
output_format=output_format,
|
|
255
|
+
output_quality=output_quality,
|
|
256
|
+
negative_prompt=negative_prompt,
|
|
257
|
+
frame_images=frame_images,
|
|
258
|
+
reference_images=reference_images,
|
|
259
|
+
**kwargs,
|
|
260
|
+
).model_dump(exclude_none=True)
|
|
261
|
+
|
|
262
|
+
response, _, _ = await requestor.arequest(
|
|
263
|
+
options=TogetherRequest(
|
|
264
|
+
method="POST",
|
|
265
|
+
url="../v2/videos",
|
|
266
|
+
params=parameter_payload,
|
|
267
|
+
),
|
|
268
|
+
stream=False,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
assert isinstance(response, TogetherResponse)
|
|
272
|
+
|
|
273
|
+
return CreateVideoResponse(**response.data)
|
|
274
|
+
|
|
275
|
+
async def retrieve(
|
|
276
|
+
self,
|
|
277
|
+
id: str,
|
|
278
|
+
) -> VideoJob:
|
|
279
|
+
"""
|
|
280
|
+
Async method to retrieve a video creation job.
|
|
281
|
+
|
|
282
|
+
Args:
|
|
283
|
+
id (str): The ID of the video creation job to retrieve.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
VideoJob: Object containing the current status and details of the video creation job
|
|
287
|
+
"""
|
|
288
|
+
|
|
289
|
+
requestor = api_requestor.APIRequestor(
|
|
290
|
+
client=self._client,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
response, _, _ = await requestor.arequest(
|
|
294
|
+
options=TogetherRequest(
|
|
295
|
+
method="GET",
|
|
296
|
+
url=f"../v2/videos/{id}",
|
|
297
|
+
),
|
|
298
|
+
stream=False,
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
assert isinstance(response, TogetherResponse)
|
|
302
|
+
|
|
303
|
+
return VideoJob(**response.data)
|
together/types/__init__.py
CHANGED
|
@@ -52,13 +52,14 @@ from together.types.finetune import (
|
|
|
52
52
|
FinetuneListEvents,
|
|
53
53
|
FinetuneRequest,
|
|
54
54
|
FinetuneResponse,
|
|
55
|
+
FinetuneDeleteResponse,
|
|
55
56
|
FinetuneTrainingLimits,
|
|
56
57
|
FullTrainingType,
|
|
57
58
|
LoRATrainingType,
|
|
58
59
|
TrainingType,
|
|
59
60
|
)
|
|
60
61
|
from together.types.images import ImageRequest, ImageResponse
|
|
61
|
-
from together.types.models import ModelObject
|
|
62
|
+
from together.types.models import ModelObject, ModelUploadRequest, ModelUploadResponse
|
|
62
63
|
from together.types.rerank import RerankRequest, RerankResponse
|
|
63
64
|
from together.types.batch import BatchJob, BatchJobStatus, BatchEndpoint
|
|
64
65
|
from together.types.evaluation import (
|
|
@@ -74,6 +75,11 @@ from together.types.evaluation import (
|
|
|
74
75
|
EvaluationJob,
|
|
75
76
|
EvaluationStatusResponse,
|
|
76
77
|
)
|
|
78
|
+
from together.types.videos import (
|
|
79
|
+
CreateVideoBody,
|
|
80
|
+
CreateVideoResponse,
|
|
81
|
+
VideoJob,
|
|
82
|
+
)
|
|
77
83
|
|
|
78
84
|
|
|
79
85
|
__all__ = [
|
|
@@ -92,6 +98,7 @@ __all__ = [
|
|
|
92
98
|
"FinetuneResponse",
|
|
93
99
|
"FinetuneList",
|
|
94
100
|
"FinetuneListEvents",
|
|
101
|
+
"FinetuneDeleteResponse",
|
|
95
102
|
"FinetuneDownloadResult",
|
|
96
103
|
"FinetuneLRScheduler",
|
|
97
104
|
"LinearLRScheduler",
|
|
@@ -108,6 +115,8 @@ __all__ = [
|
|
|
108
115
|
"ImageRequest",
|
|
109
116
|
"ImageResponse",
|
|
110
117
|
"ModelObject",
|
|
118
|
+
"ModelUploadRequest",
|
|
119
|
+
"ModelUploadResponse",
|
|
111
120
|
"TrainingType",
|
|
112
121
|
"FullTrainingType",
|
|
113
122
|
"LoRATrainingType",
|
|
@@ -148,4 +157,7 @@ __all__ = [
|
|
|
148
157
|
"EvaluationCreateResponse",
|
|
149
158
|
"EvaluationJob",
|
|
150
159
|
"EvaluationStatusResponse",
|
|
160
|
+
"CreateVideoBody",
|
|
161
|
+
"CreateVideoResponse",
|
|
162
|
+
"VideoJob",
|
|
151
163
|
]
|
together/types/audio_speech.py
CHANGED
|
@@ -158,6 +158,17 @@ class AudioTranscriptionWord(BaseModel):
|
|
|
158
158
|
word: str
|
|
159
159
|
start: float
|
|
160
160
|
end: float
|
|
161
|
+
id: Optional[int] = None
|
|
162
|
+
speaker_id: Optional[str] = None
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
class AudioSpeakerSegment(BaseModel):
|
|
166
|
+
id: int
|
|
167
|
+
speaker_id: str
|
|
168
|
+
start: float
|
|
169
|
+
end: float
|
|
170
|
+
text: str
|
|
171
|
+
words: List[AudioTranscriptionWord]
|
|
161
172
|
|
|
162
173
|
|
|
163
174
|
class AudioTranscriptionResponse(BaseModel):
|
|
@@ -165,11 +176,13 @@ class AudioTranscriptionResponse(BaseModel):
|
|
|
165
176
|
|
|
166
177
|
|
|
167
178
|
class AudioTranscriptionVerboseResponse(BaseModel):
|
|
179
|
+
id: Optional[str] = None
|
|
168
180
|
language: Optional[str] = None
|
|
169
181
|
duration: Optional[float] = None
|
|
170
182
|
text: str
|
|
171
183
|
segments: Optional[List[AudioTranscriptionSegment]] = None
|
|
172
184
|
words: Optional[List[AudioTranscriptionWord]] = None
|
|
185
|
+
speaker_segments: Optional[List[AudioSpeakerSegment]] = None
|
|
173
186
|
|
|
174
187
|
|
|
175
188
|
class AudioTranslationResponse(BaseModel):
|
together/types/evaluation.py
CHANGED
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
from datetime import datetime
|
|
4
4
|
from enum import Enum
|
|
5
|
-
from typing import Any, Dict, List, Optional, Union
|
|
5
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
6
6
|
|
|
7
7
|
from pydantic import BaseModel, Field
|
|
8
8
|
|
|
@@ -23,16 +23,22 @@ class EvaluationStatus(str, Enum):
|
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
class JudgeModelConfig(BaseModel):
|
|
26
|
-
|
|
26
|
+
model: str
|
|
27
|
+
model_source: Literal["serverless", "dedicated", "external"]
|
|
27
28
|
system_template: str
|
|
29
|
+
external_api_token: Optional[str] = None
|
|
30
|
+
external_base_url: Optional[str] = None
|
|
28
31
|
|
|
29
32
|
|
|
30
33
|
class ModelRequest(BaseModel):
|
|
31
|
-
|
|
34
|
+
model: str
|
|
35
|
+
model_source: Literal["serverless", "dedicated", "external"]
|
|
32
36
|
max_tokens: int
|
|
33
37
|
temperature: float
|
|
34
38
|
system_template: str
|
|
35
39
|
input_template: str
|
|
40
|
+
external_api_token: Optional[str] = None
|
|
41
|
+
external_base_url: Optional[str] = None
|
|
36
42
|
|
|
37
43
|
|
|
38
44
|
class ClassifyParameters(BaseModel):
|
together/types/files.py
CHANGED
together/types/finetune.py
CHANGED
|
@@ -322,6 +322,11 @@ class FinetuneListEvents(BaseModel):
|
|
|
322
322
|
data: List[FinetuneEvent] | None = None
|
|
323
323
|
|
|
324
324
|
|
|
325
|
+
class FinetuneDeleteResponse(BaseModel):
|
|
326
|
+
# delete message
|
|
327
|
+
message: str
|
|
328
|
+
|
|
329
|
+
|
|
325
330
|
class FinetuneDownloadResult(BaseModel):
|
|
326
331
|
# object type
|
|
327
332
|
object: Literal["local"] | None = None
|