together 1.5.35__py3-none-any.whl → 2.0.0a7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -114
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/{cli → lib/cli}/api/endpoints.py +66 -84
- together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
- together/{cli → lib/cli}/api/files.py +20 -17
- together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
- together/{cli → lib/cli}/api/models.py +34 -27
- together/lib/cli/api/utils.py +50 -0
- together/{cli → lib/cli}/cli.py +16 -26
- together/{constants.py → lib/constants.py} +11 -24
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +397 -0
- together/{utils → lib/utils}/__init__.py +6 -14
- together/{utils → lib/utils}/_log.py +11 -16
- together/{utils → lib/utils}/files.py +90 -288
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +19 -55
- together/resources/__init__.py +225 -39
- together/resources/audio/__init__.py +72 -48
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +574 -128
- together/resources/audio/transcriptions.py +247 -261
- together/resources/audio/translations.py +221 -241
- together/resources/audio/voices.py +111 -41
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -263
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +884 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +589 -490
- together/resources/evals.py +452 -0
- together/resources/files.py +397 -129
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +258 -104
- together/resources/jobs.py +214 -0
- together/resources/models.py +223 -193
- together/resources/rerank.py +190 -92
- together/resources/videos.py +286 -214
- together/types/__init__.py +66 -167
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_create_response.py +10 -0
- together/types/video_job.py +57 -0
- together-2.0.0a7.dist-info/METADATA +730 -0
- together-2.0.0a7.dist-info/RECORD +165 -0
- {together-1.5.35.dist-info → together-2.0.0a7.dist-info}/WHEEL +1 -1
- together-2.0.0a7.dist-info/entry_points.txt +2 -0
- {together-1.5.35.dist-info → together-2.0.0a7.dist-info}/licenses/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -770
- together/cli/api/chat.py +0 -298
- together/cli/api/completions.py +0 -119
- together/cli/api/images.py +0 -93
- together/cli/api/utils.py +0 -139
- together/client.py +0 -186
- together/error.py +0 -194
- together/filemanager.py +0 -635
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/batch.py +0 -165
- together/resources/code_interpreter.py +0 -82
- together/resources/evaluation.py +0 -808
- together/resources/finetune.py +0 -1388
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/audio_speech.py +0 -311
- together/types/batch.py +0 -54
- together/types/chat_completions.py +0 -210
- together/types/code_interpreter.py +0 -57
- together/types/common.py +0 -67
- together/types/completions.py +0 -107
- together/types/embeddings.py +0 -35
- together/types/endpoints.py +0 -123
- together/types/error.py +0 -16
- together/types/evaluation.py +0 -93
- together/types/files.py +0 -93
- together/types/finetune.py +0 -465
- together/types/images.py +0 -42
- together/types/models.py +0 -96
- together/types/rerank.py +0 -43
- together/types/videos.py +0 -69
- together/utils/api_helpers.py +0 -124
- together/version.py +0 -6
- together-1.5.35.dist-info/METADATA +0 -583
- together-1.5.35.dist-info/RECORD +0 -77
- together-1.5.35.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
together/cli/api/chat.py
DELETED
|
@@ -1,298 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import cmd
|
|
4
|
-
import json
|
|
5
|
-
from typing import Any, Dict, List, Tuple
|
|
6
|
-
|
|
7
|
-
import click
|
|
8
|
-
|
|
9
|
-
from together import Together
|
|
10
|
-
from together.types.chat_completions import (
|
|
11
|
-
ChatCompletionChoicesChunk,
|
|
12
|
-
ChatCompletionChunk,
|
|
13
|
-
ChatCompletionResponse,
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class ChatShell(cmd.Cmd):
|
|
18
|
-
intro = "Type /exit to exit, /help, or /? to list commands.\n"
|
|
19
|
-
prompt = ">>> "
|
|
20
|
-
|
|
21
|
-
def __init__(
|
|
22
|
-
self,
|
|
23
|
-
client: Together,
|
|
24
|
-
model: str,
|
|
25
|
-
max_tokens: int | None = None,
|
|
26
|
-
stop: List[str] | None = None,
|
|
27
|
-
temperature: float | None = None,
|
|
28
|
-
top_p: float | None = None,
|
|
29
|
-
top_k: int | None = None,
|
|
30
|
-
repetition_penalty: float | None = None,
|
|
31
|
-
presence_penalty: float | None = None,
|
|
32
|
-
frequency_penalty: float | None = None,
|
|
33
|
-
min_p: float | None = None,
|
|
34
|
-
safety_model: str | None = None,
|
|
35
|
-
system_message: str | None = None,
|
|
36
|
-
) -> None:
|
|
37
|
-
super().__init__()
|
|
38
|
-
self.client = client
|
|
39
|
-
self.model = model
|
|
40
|
-
self.max_tokens = max_tokens
|
|
41
|
-
self.stop = stop
|
|
42
|
-
self.temperature = temperature
|
|
43
|
-
self.top_p = top_p
|
|
44
|
-
self.top_k = top_k
|
|
45
|
-
self.repetition_penalty = repetition_penalty
|
|
46
|
-
self.presence_penalty = presence_penalty
|
|
47
|
-
self.frequency_penalty = frequency_penalty
|
|
48
|
-
self.min_p = min_p
|
|
49
|
-
self.safety_model = safety_model
|
|
50
|
-
self.system_message = system_message
|
|
51
|
-
|
|
52
|
-
self.messages = (
|
|
53
|
-
[{"role": "system", "content": self.system_message}]
|
|
54
|
-
if self.system_message
|
|
55
|
-
else []
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
def precmd(self, line: str) -> str:
|
|
59
|
-
if line.startswith("/"):
|
|
60
|
-
return line[1:]
|
|
61
|
-
else:
|
|
62
|
-
return "say " + line
|
|
63
|
-
|
|
64
|
-
def do_say(self, arg: str) -> None:
|
|
65
|
-
self.messages.append({"role": "user", "content": arg})
|
|
66
|
-
|
|
67
|
-
output = ""
|
|
68
|
-
|
|
69
|
-
for chunk in self.client.chat.completions.create(
|
|
70
|
-
messages=self.messages,
|
|
71
|
-
model=self.model,
|
|
72
|
-
max_tokens=self.max_tokens,
|
|
73
|
-
stop=self.stop,
|
|
74
|
-
temperature=self.temperature,
|
|
75
|
-
top_p=self.top_p,
|
|
76
|
-
top_k=self.top_k,
|
|
77
|
-
repetition_penalty=self.repetition_penalty,
|
|
78
|
-
presence_penalty=self.presence_penalty,
|
|
79
|
-
frequency_penalty=self.frequency_penalty,
|
|
80
|
-
min_p=self.min_p,
|
|
81
|
-
safety_model=self.safety_model,
|
|
82
|
-
stream=True,
|
|
83
|
-
):
|
|
84
|
-
# assertions for type checking
|
|
85
|
-
assert isinstance(chunk, ChatCompletionChunk)
|
|
86
|
-
assert chunk.choices
|
|
87
|
-
assert chunk.choices[0].delta
|
|
88
|
-
|
|
89
|
-
token = chunk.choices[0].delta.content
|
|
90
|
-
|
|
91
|
-
click.echo(token, nl=False)
|
|
92
|
-
|
|
93
|
-
output += token or ""
|
|
94
|
-
|
|
95
|
-
click.echo("\n")
|
|
96
|
-
|
|
97
|
-
self.messages.append({"role": "assistant", "content": output})
|
|
98
|
-
|
|
99
|
-
def do_reset(self, arg: str) -> None:
|
|
100
|
-
self.messages = (
|
|
101
|
-
[{"role": "system", "content": self.system_message}]
|
|
102
|
-
if self.system_message
|
|
103
|
-
else []
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
def do_exit(self, arg: str) -> bool:
|
|
107
|
-
return True
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
@click.command(name="chat.interactive")
|
|
111
|
-
@click.pass_context
|
|
112
|
-
@click.option("--model", type=str, required=True, help="Model name")
|
|
113
|
-
@click.option("--max-tokens", type=int, help="Max tokens to generate")
|
|
114
|
-
@click.option(
|
|
115
|
-
"--stop", type=str, multiple=True, help="List of strings to stop generation"
|
|
116
|
-
)
|
|
117
|
-
@click.option("--temperature", type=float, help="Sampling temperature")
|
|
118
|
-
@click.option("--top-p", type=int, help="Top p sampling")
|
|
119
|
-
@click.option("--top-k", type=float, help="Top k sampling")
|
|
120
|
-
@click.option("--repetition-penalty", type=float, help="Repetition penalty")
|
|
121
|
-
@click.option("--presence-penalty", type=float, help="Presence penalty")
|
|
122
|
-
@click.option("--frequency-penalty", type=float, help="Frequency penalty")
|
|
123
|
-
@click.option("--min-p", type=float, help="Minimum p")
|
|
124
|
-
@click.option("--safety-model", type=str, help="Moderation model")
|
|
125
|
-
@click.option("--system-message", type=str, help="System message to use for the chat")
|
|
126
|
-
def interactive(
|
|
127
|
-
ctx: click.Context,
|
|
128
|
-
model: str,
|
|
129
|
-
max_tokens: int | None = None,
|
|
130
|
-
stop: List[str] | None = None,
|
|
131
|
-
temperature: float | None = None,
|
|
132
|
-
top_p: float | None = None,
|
|
133
|
-
top_k: int | None = None,
|
|
134
|
-
repetition_penalty: float | None = None,
|
|
135
|
-
presence_penalty: float | None = None,
|
|
136
|
-
frequency_penalty: float | None = None,
|
|
137
|
-
min_p: float | None = None,
|
|
138
|
-
safety_model: str | None = None,
|
|
139
|
-
system_message: str | None = None,
|
|
140
|
-
) -> None:
|
|
141
|
-
"""Interactive chat shell"""
|
|
142
|
-
client: Together = ctx.obj
|
|
143
|
-
|
|
144
|
-
ChatShell(
|
|
145
|
-
client=client,
|
|
146
|
-
model=model,
|
|
147
|
-
max_tokens=max_tokens,
|
|
148
|
-
stop=stop,
|
|
149
|
-
temperature=temperature,
|
|
150
|
-
top_p=top_p,
|
|
151
|
-
top_k=top_k,
|
|
152
|
-
repetition_penalty=repetition_penalty,
|
|
153
|
-
presence_penalty=presence_penalty,
|
|
154
|
-
frequency_penalty=frequency_penalty,
|
|
155
|
-
min_p=min_p,
|
|
156
|
-
safety_model=safety_model,
|
|
157
|
-
system_message=system_message,
|
|
158
|
-
).cmdloop()
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
@click.command(name="chat.completions")
|
|
162
|
-
@click.pass_context
|
|
163
|
-
@click.option(
|
|
164
|
-
"--message",
|
|
165
|
-
type=(str, str),
|
|
166
|
-
multiple=True,
|
|
167
|
-
required=True,
|
|
168
|
-
help="Message to generate chat completions from",
|
|
169
|
-
)
|
|
170
|
-
@click.option("--model", type=str, required=True, help="Model name")
|
|
171
|
-
@click.option("--max-tokens", type=int, help="Max tokens to generate")
|
|
172
|
-
@click.option(
|
|
173
|
-
"--stop", type=str, multiple=True, help="List of strings to stop generation"
|
|
174
|
-
)
|
|
175
|
-
@click.option("--temperature", type=float, help="Sampling temperature")
|
|
176
|
-
@click.option("--top-p", type=int, help="Top p sampling")
|
|
177
|
-
@click.option("--top-k", type=float, help="Top k sampling")
|
|
178
|
-
@click.option("--repetition-penalty", type=float, help="Repetition penalty")
|
|
179
|
-
@click.option("--presence-penalty", type=float, help="Presence penalty sampling method")
|
|
180
|
-
@click.option(
|
|
181
|
-
"--frequency-penalty", type=float, help="Frequency penalty sampling method"
|
|
182
|
-
)
|
|
183
|
-
@click.option("--min-p", type=float, help="Min p sampling")
|
|
184
|
-
@click.option(
|
|
185
|
-
"--audio-url",
|
|
186
|
-
type=str,
|
|
187
|
-
multiple=True,
|
|
188
|
-
help="Audio URL to attach to the last user message",
|
|
189
|
-
)
|
|
190
|
-
@click.option("--no-stream", is_flag=True, help="Disable streaming")
|
|
191
|
-
@click.option("--logprobs", type=int, help="Return logprobs. Only works with --raw.")
|
|
192
|
-
@click.option("--echo", is_flag=True, help="Echo prompt. Only works with --raw.")
|
|
193
|
-
@click.option("--n", type=int, help="Number of output generations")
|
|
194
|
-
@click.option("--safety-model", type=str, help="Moderation model")
|
|
195
|
-
@click.option("--raw", is_flag=True, help="Output raw JSON")
|
|
196
|
-
def chat(
|
|
197
|
-
ctx: click.Context,
|
|
198
|
-
message: List[Tuple[str, str]],
|
|
199
|
-
model: str,
|
|
200
|
-
max_tokens: int | None = None,
|
|
201
|
-
stop: List[str] | None = None,
|
|
202
|
-
temperature: float | None = None,
|
|
203
|
-
top_p: float | None = None,
|
|
204
|
-
top_k: int | None = None,
|
|
205
|
-
repetition_penalty: float | None = None,
|
|
206
|
-
presence_penalty: float | None = None,
|
|
207
|
-
frequency_penalty: float | None = None,
|
|
208
|
-
min_p: float | None = None,
|
|
209
|
-
audio_url: List[str] | None = None,
|
|
210
|
-
no_stream: bool = False,
|
|
211
|
-
logprobs: int | None = None,
|
|
212
|
-
echo: bool | None = None,
|
|
213
|
-
n: int | None = None,
|
|
214
|
-
safety_model: str | None = None,
|
|
215
|
-
raw: bool = False,
|
|
216
|
-
) -> None:
|
|
217
|
-
"""Generate chat completions from messages"""
|
|
218
|
-
client: Together = ctx.obj
|
|
219
|
-
|
|
220
|
-
messages: List[Dict[str, Any]] = [
|
|
221
|
-
{"role": msg[0], "content": msg[1]} for msg in message
|
|
222
|
-
]
|
|
223
|
-
|
|
224
|
-
if audio_url and messages:
|
|
225
|
-
last_msg = messages[-1]
|
|
226
|
-
if last_msg["role"] == "user":
|
|
227
|
-
# Convert content to list if it is string
|
|
228
|
-
if isinstance(last_msg["content"], str):
|
|
229
|
-
last_msg["content"] = [{"type": "text", "text": last_msg["content"]}]
|
|
230
|
-
|
|
231
|
-
# Append audio URLs
|
|
232
|
-
for url in audio_url:
|
|
233
|
-
last_msg["content"].append(
|
|
234
|
-
{"type": "audio_url", "audio_url": {"url": url}}
|
|
235
|
-
)
|
|
236
|
-
|
|
237
|
-
response = client.chat.completions.create(
|
|
238
|
-
model=model,
|
|
239
|
-
messages=messages,
|
|
240
|
-
top_p=top_p,
|
|
241
|
-
top_k=top_k,
|
|
242
|
-
temperature=temperature,
|
|
243
|
-
max_tokens=max_tokens,
|
|
244
|
-
stop=stop,
|
|
245
|
-
repetition_penalty=repetition_penalty,
|
|
246
|
-
presence_penalty=presence_penalty,
|
|
247
|
-
frequency_penalty=frequency_penalty,
|
|
248
|
-
min_p=min_p,
|
|
249
|
-
stream=not no_stream,
|
|
250
|
-
logprobs=logprobs,
|
|
251
|
-
echo=echo,
|
|
252
|
-
n=n,
|
|
253
|
-
safety_model=safety_model,
|
|
254
|
-
)
|
|
255
|
-
|
|
256
|
-
if not no_stream:
|
|
257
|
-
for chunk in response:
|
|
258
|
-
# assertions for type checking
|
|
259
|
-
assert isinstance(chunk, ChatCompletionChunk)
|
|
260
|
-
assert chunk.choices
|
|
261
|
-
|
|
262
|
-
if raw:
|
|
263
|
-
click.echo(f"{json.dumps(chunk.model_dump(exclude_none=True))}")
|
|
264
|
-
continue
|
|
265
|
-
|
|
266
|
-
should_print_header = len(chunk.choices) > 1
|
|
267
|
-
for stream_choice in sorted(chunk.choices, key=lambda c: c.index): # type: ignore
|
|
268
|
-
assert isinstance(stream_choice, ChatCompletionChoicesChunk)
|
|
269
|
-
assert stream_choice.delta
|
|
270
|
-
|
|
271
|
-
if should_print_header:
|
|
272
|
-
click.echo(f"\n===== Completion {stream_choice.index} =====\n")
|
|
273
|
-
click.echo(f"{stream_choice.delta.content}", nl=False)
|
|
274
|
-
|
|
275
|
-
if should_print_header:
|
|
276
|
-
click.echo("\n")
|
|
277
|
-
|
|
278
|
-
# new line after stream ends
|
|
279
|
-
click.echo("\n")
|
|
280
|
-
else:
|
|
281
|
-
# assertions for type checking
|
|
282
|
-
assert isinstance(response, ChatCompletionResponse)
|
|
283
|
-
assert isinstance(response.choices, list)
|
|
284
|
-
|
|
285
|
-
if raw:
|
|
286
|
-
click.echo(
|
|
287
|
-
f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}"
|
|
288
|
-
)
|
|
289
|
-
return
|
|
290
|
-
|
|
291
|
-
should_print_header = len(response.choices) > 1
|
|
292
|
-
for i, choice in enumerate(response.choices):
|
|
293
|
-
if should_print_header:
|
|
294
|
-
click.echo(f"===== Completion {i} =====")
|
|
295
|
-
click.echo(choice.message.content) # type: ignore
|
|
296
|
-
|
|
297
|
-
if should_print_header:
|
|
298
|
-
click.echo("\n")
|
together/cli/api/completions.py
DELETED
|
@@ -1,119 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
from typing import List
|
|
5
|
-
|
|
6
|
-
import click
|
|
7
|
-
|
|
8
|
-
from together import Together
|
|
9
|
-
from together.types import CompletionChunk
|
|
10
|
-
from together.types.completions import CompletionChoicesChunk, CompletionResponse
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@click.command()
|
|
14
|
-
@click.pass_context
|
|
15
|
-
@click.argument("prompt", type=str, required=True)
|
|
16
|
-
@click.option("--model", type=str, required=True, help="Model name")
|
|
17
|
-
@click.option("--max-tokens", type=int, help="Max tokens to generate")
|
|
18
|
-
@click.option(
|
|
19
|
-
"--stop", type=str, multiple=True, help="List of strings to stop generation"
|
|
20
|
-
)
|
|
21
|
-
@click.option("--temperature", type=float, help="Sampling temperature")
|
|
22
|
-
@click.option("--top-p", type=int, help="Top p sampling")
|
|
23
|
-
@click.option("--top-k", type=float, help="Top k sampling")
|
|
24
|
-
@click.option("--repetition-penalty", type=float, help="Repetition penalty")
|
|
25
|
-
@click.option("--presence-penalty", type=float, help="Presence penalty")
|
|
26
|
-
@click.option("--frequency-penalty", type=float, help="Frequency penalty")
|
|
27
|
-
@click.option("--min-p", type=float, help="Minimum p")
|
|
28
|
-
@click.option("--no-stream", is_flag=True, help="Disable streaming")
|
|
29
|
-
@click.option("--logprobs", type=int, help="Return logprobs. Only works with --raw.")
|
|
30
|
-
@click.option("--echo", is_flag=True, help="Echo prompt. Only works with --raw.")
|
|
31
|
-
@click.option("--n", type=int, help="Number of output generations")
|
|
32
|
-
@click.option("--safety-model", type=str, help="Moderation model")
|
|
33
|
-
@click.option("--raw", is_flag=True, help="Return raw JSON response")
|
|
34
|
-
def completions(
|
|
35
|
-
ctx: click.Context,
|
|
36
|
-
prompt: str,
|
|
37
|
-
model: str,
|
|
38
|
-
max_tokens: int | None = 512,
|
|
39
|
-
stop: List[str] | None = None,
|
|
40
|
-
temperature: float | None = None,
|
|
41
|
-
top_p: float | None = None,
|
|
42
|
-
top_k: int | None = None,
|
|
43
|
-
repetition_penalty: float | None = None,
|
|
44
|
-
presence_penalty: float | None = None,
|
|
45
|
-
frequency_penalty: float | None = None,
|
|
46
|
-
min_p: float | None = None,
|
|
47
|
-
no_stream: bool = False,
|
|
48
|
-
logprobs: int | None = None,
|
|
49
|
-
echo: bool | None = None,
|
|
50
|
-
n: int | None = None,
|
|
51
|
-
safety_model: str | None = None,
|
|
52
|
-
raw: bool = False,
|
|
53
|
-
) -> None:
|
|
54
|
-
"""Generate text completions"""
|
|
55
|
-
client: Together = ctx.obj
|
|
56
|
-
|
|
57
|
-
response = client.completions.create(
|
|
58
|
-
model=model,
|
|
59
|
-
prompt=prompt,
|
|
60
|
-
top_p=top_p,
|
|
61
|
-
top_k=top_k,
|
|
62
|
-
temperature=temperature,
|
|
63
|
-
max_tokens=max_tokens,
|
|
64
|
-
stop=stop,
|
|
65
|
-
repetition_penalty=repetition_penalty,
|
|
66
|
-
presence_penalty=presence_penalty,
|
|
67
|
-
frequency_penalty=frequency_penalty,
|
|
68
|
-
min_p=min_p,
|
|
69
|
-
stream=not no_stream,
|
|
70
|
-
logprobs=logprobs,
|
|
71
|
-
echo=echo,
|
|
72
|
-
n=n,
|
|
73
|
-
safety_model=safety_model,
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
if not no_stream:
|
|
77
|
-
for chunk in response:
|
|
78
|
-
# assertions for type checking
|
|
79
|
-
assert isinstance(chunk, CompletionChunk)
|
|
80
|
-
assert chunk.choices
|
|
81
|
-
|
|
82
|
-
if raw:
|
|
83
|
-
click.echo(f"{json.dumps(chunk.model_dump(exclude_none=True))}")
|
|
84
|
-
continue
|
|
85
|
-
|
|
86
|
-
should_print_header = len(chunk.choices) > 1
|
|
87
|
-
for stream_choice in sorted(chunk.choices, key=lambda c: c.index): # type: ignore
|
|
88
|
-
# assertions for type checking
|
|
89
|
-
assert isinstance(stream_choice, CompletionChoicesChunk)
|
|
90
|
-
assert stream_choice.delta
|
|
91
|
-
|
|
92
|
-
if should_print_header:
|
|
93
|
-
click.echo(f"\n===== Completion {stream_choice.index} =====\n")
|
|
94
|
-
click.echo(f"{stream_choice.delta.content}", nl=False)
|
|
95
|
-
|
|
96
|
-
if should_print_header:
|
|
97
|
-
click.echo("\n")
|
|
98
|
-
|
|
99
|
-
# new line after stream ends
|
|
100
|
-
click.echo("\n")
|
|
101
|
-
else:
|
|
102
|
-
# assertions for type checking
|
|
103
|
-
assert isinstance(response, CompletionResponse)
|
|
104
|
-
assert isinstance(response.choices, list)
|
|
105
|
-
|
|
106
|
-
if raw:
|
|
107
|
-
click.echo(
|
|
108
|
-
f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}"
|
|
109
|
-
)
|
|
110
|
-
return
|
|
111
|
-
|
|
112
|
-
should_print_header = len(response.choices) > 1
|
|
113
|
-
for i, choice in enumerate(response.choices):
|
|
114
|
-
if should_print_header:
|
|
115
|
-
click.echo(f"===== Completion {i} =====")
|
|
116
|
-
click.echo(choice.text)
|
|
117
|
-
|
|
118
|
-
if should_print_header or not choice.text.endswith("\n"):
|
|
119
|
-
click.echo("\n")
|
together/cli/api/images.py
DELETED
|
@@ -1,93 +0,0 @@
|
|
|
1
|
-
import base64
|
|
2
|
-
import pathlib
|
|
3
|
-
import requests
|
|
4
|
-
|
|
5
|
-
import click
|
|
6
|
-
from PIL import Image
|
|
7
|
-
|
|
8
|
-
from together import Together
|
|
9
|
-
from together.types import ImageResponse
|
|
10
|
-
from together.types.images import ImageChoicesData
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@click.group()
|
|
14
|
-
@click.pass_context
|
|
15
|
-
def images(ctx: click.Context) -> None:
|
|
16
|
-
"""Images generations API commands"""
|
|
17
|
-
pass
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@images.command()
|
|
21
|
-
@click.pass_context
|
|
22
|
-
@click.argument("prompt", type=str, required=True)
|
|
23
|
-
@click.option("--model", type=str, required=True, help="Model name")
|
|
24
|
-
@click.option("--steps", type=int, default=20, help="Number of steps to run generation")
|
|
25
|
-
@click.option("--seed", type=int, default=None, help="Random seed")
|
|
26
|
-
@click.option("--n", type=int, default=1, help="Number of images to generate")
|
|
27
|
-
@click.option("--height", type=int, default=1024, help="Image height")
|
|
28
|
-
@click.option("--width", type=int, default=1024, help="Image width")
|
|
29
|
-
@click.option("--negative-prompt", type=str, default=None, help="Negative prompt")
|
|
30
|
-
@click.option(
|
|
31
|
-
"--output",
|
|
32
|
-
type=click.Path(exists=True, file_okay=False, resolve_path=True),
|
|
33
|
-
required=False,
|
|
34
|
-
default=pathlib.Path("."),
|
|
35
|
-
help="Output directory",
|
|
36
|
-
)
|
|
37
|
-
@click.option("--prefix", type=str, required=False, default="image-")
|
|
38
|
-
@click.option("--no-show", is_flag=True, help="Do not open images in viewer")
|
|
39
|
-
def generate(
|
|
40
|
-
ctx: click.Context,
|
|
41
|
-
prompt: str,
|
|
42
|
-
model: str,
|
|
43
|
-
steps: int,
|
|
44
|
-
seed: int,
|
|
45
|
-
n: int,
|
|
46
|
-
height: int,
|
|
47
|
-
width: int,
|
|
48
|
-
negative_prompt: str,
|
|
49
|
-
output: pathlib.Path,
|
|
50
|
-
prefix: str,
|
|
51
|
-
no_show: bool,
|
|
52
|
-
) -> None:
|
|
53
|
-
"""Generate image"""
|
|
54
|
-
|
|
55
|
-
client: Together = ctx.obj
|
|
56
|
-
|
|
57
|
-
response = client.images.generate(
|
|
58
|
-
prompt=prompt,
|
|
59
|
-
model=model,
|
|
60
|
-
steps=steps,
|
|
61
|
-
seed=seed,
|
|
62
|
-
n=n,
|
|
63
|
-
height=height,
|
|
64
|
-
width=width,
|
|
65
|
-
negative_prompt=negative_prompt,
|
|
66
|
-
)
|
|
67
|
-
|
|
68
|
-
assert isinstance(response, ImageResponse)
|
|
69
|
-
assert isinstance(response.data, list)
|
|
70
|
-
|
|
71
|
-
for i, choice in enumerate(response.data):
|
|
72
|
-
assert isinstance(choice, ImageChoicesData)
|
|
73
|
-
|
|
74
|
-
data = None
|
|
75
|
-
if choice.b64_json:
|
|
76
|
-
data = base64.b64decode(choice.b64_json)
|
|
77
|
-
elif choice.url:
|
|
78
|
-
data = requests.get(choice.url).content
|
|
79
|
-
|
|
80
|
-
if not data:
|
|
81
|
-
click.echo(f"Image [{i + 1}/{len(response.data)}] is empty")
|
|
82
|
-
continue
|
|
83
|
-
|
|
84
|
-
with open(f"{output}/{prefix}{choice.index}.png", "wb") as f:
|
|
85
|
-
f.write(data)
|
|
86
|
-
|
|
87
|
-
click.echo(
|
|
88
|
-
f"Image [{i + 1}/{len(response.data)}] saved to {output}/{prefix}{choice.index}.png"
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
if not no_show:
|
|
92
|
-
image = Image.open(f"{output}/{prefix}{choice.index}.png")
|
|
93
|
-
image.show()
|
together/cli/api/utils.py
DELETED
|
@@ -1,139 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import math
|
|
4
|
-
import re
|
|
5
|
-
from gettext import gettext as _
|
|
6
|
-
from typing import Literal
|
|
7
|
-
from datetime import datetime
|
|
8
|
-
|
|
9
|
-
import click
|
|
10
|
-
|
|
11
|
-
from together.types.finetune import FinetuneResponse, COMPLETED_STATUSES
|
|
12
|
-
|
|
13
|
-
_PROGRESS_BAR_WIDTH = 40
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class AutoIntParamType(click.ParamType):
|
|
17
|
-
name = "integer_or_max"
|
|
18
|
-
_number_class = int
|
|
19
|
-
|
|
20
|
-
def convert(
|
|
21
|
-
self, value: str, param: click.Parameter | None, ctx: click.Context | None
|
|
22
|
-
) -> int | Literal["max"] | None:
|
|
23
|
-
if value == "max":
|
|
24
|
-
return "max"
|
|
25
|
-
try:
|
|
26
|
-
return int(value)
|
|
27
|
-
except ValueError:
|
|
28
|
-
self.fail(
|
|
29
|
-
_("{value!r} is not a valid {number_type}.").format(
|
|
30
|
-
value=value, number_type=self.name
|
|
31
|
-
),
|
|
32
|
-
param,
|
|
33
|
-
ctx,
|
|
34
|
-
)
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class BooleanWithAutoParamType(click.ParamType):
|
|
38
|
-
name = "boolean_or_auto"
|
|
39
|
-
|
|
40
|
-
def convert(
|
|
41
|
-
self, value: str, param: click.Parameter | None, ctx: click.Context | None
|
|
42
|
-
) -> bool | Literal["auto"] | None:
|
|
43
|
-
if value == "auto":
|
|
44
|
-
return "auto"
|
|
45
|
-
try:
|
|
46
|
-
return bool(value)
|
|
47
|
-
except ValueError:
|
|
48
|
-
self.fail(
|
|
49
|
-
_("{value!r} is not a valid {type}.").format(
|
|
50
|
-
value=value, type=self.name
|
|
51
|
-
),
|
|
52
|
-
param,
|
|
53
|
-
ctx,
|
|
54
|
-
)
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
INT_WITH_MAX = AutoIntParamType()
|
|
58
|
-
BOOL_WITH_AUTO = BooleanWithAutoParamType()
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def _human_readable_time(timedelta: float) -> str:
|
|
62
|
-
"""Convert a timedelta to a compact human-readble string
|
|
63
|
-
Examples:
|
|
64
|
-
00:00:10 -> 10s
|
|
65
|
-
01:23:45 -> 1h 23min 45s
|
|
66
|
-
1 Month 23 days 04:56:07 -> 1month 23d 4h 56min 7s
|
|
67
|
-
Args:
|
|
68
|
-
timedelta (float): The timedelta in seconds to convert.
|
|
69
|
-
Returns:
|
|
70
|
-
A string representing the timedelta in a human-readable format.
|
|
71
|
-
"""
|
|
72
|
-
units = [
|
|
73
|
-
(30 * 24 * 60 * 60, "month"), # 30 days
|
|
74
|
-
(24 * 60 * 60, "d"),
|
|
75
|
-
(60 * 60, "h"),
|
|
76
|
-
(60, "min"),
|
|
77
|
-
(1, "s"),
|
|
78
|
-
]
|
|
79
|
-
|
|
80
|
-
total_seconds = int(timedelta)
|
|
81
|
-
parts = []
|
|
82
|
-
|
|
83
|
-
for unit_seconds, unit_name in units:
|
|
84
|
-
if total_seconds >= unit_seconds:
|
|
85
|
-
value = total_seconds // unit_seconds
|
|
86
|
-
total_seconds %= unit_seconds
|
|
87
|
-
parts.append(f"{value}{unit_name}")
|
|
88
|
-
|
|
89
|
-
return " ".join(parts) if parts else "0s"
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
def generate_progress_bar(
|
|
93
|
-
finetune_job: FinetuneResponse, current_time: datetime, use_rich: bool = False
|
|
94
|
-
) -> str:
|
|
95
|
-
"""Generate a progress bar for a finetune job.
|
|
96
|
-
Args:
|
|
97
|
-
finetune_job: The finetune job to generate a progress bar for.
|
|
98
|
-
current_time: The current time.
|
|
99
|
-
use_rich: Whether to use rich formatting.
|
|
100
|
-
Returns:
|
|
101
|
-
A string representing the progress bar.
|
|
102
|
-
"""
|
|
103
|
-
progress = "Progress: [bold red]unavailable[/bold red]"
|
|
104
|
-
if finetune_job.status in COMPLETED_STATUSES:
|
|
105
|
-
progress = "Progress: [bold green]completed[/bold green]"
|
|
106
|
-
elif finetune_job.started_at is not None:
|
|
107
|
-
# Replace 'Z' with '+00:00' for Python 3.10 compatibility
|
|
108
|
-
started_at_str = finetune_job.started_at.replace("Z", "+00:00")
|
|
109
|
-
started_at = datetime.fromisoformat(started_at_str).astimezone()
|
|
110
|
-
|
|
111
|
-
if finetune_job.progress is not None:
|
|
112
|
-
if current_time < started_at:
|
|
113
|
-
return progress
|
|
114
|
-
|
|
115
|
-
if not finetune_job.progress.estimate_available:
|
|
116
|
-
return progress
|
|
117
|
-
|
|
118
|
-
if finetune_job.progress.seconds_remaining <= 0:
|
|
119
|
-
return progress
|
|
120
|
-
|
|
121
|
-
elapsed_time = (current_time - started_at).total_seconds()
|
|
122
|
-
ratio_filled = min(
|
|
123
|
-
elapsed_time / finetune_job.progress.seconds_remaining, 1.0
|
|
124
|
-
)
|
|
125
|
-
percentage = ratio_filled * 100
|
|
126
|
-
filled = math.ceil(ratio_filled * _PROGRESS_BAR_WIDTH)
|
|
127
|
-
bar = "█" * filled + "░" * (_PROGRESS_BAR_WIDTH - filled)
|
|
128
|
-
time_left = "N/A"
|
|
129
|
-
if finetune_job.progress.seconds_remaining > elapsed_time:
|
|
130
|
-
time_left = _human_readable_time(
|
|
131
|
-
finetune_job.progress.seconds_remaining - elapsed_time
|
|
132
|
-
)
|
|
133
|
-
time_text = f"{time_left} left"
|
|
134
|
-
progress = f"Progress: {bar} [bold]{percentage:>3.0f}%[/bold] [yellow]{time_text}[/yellow]"
|
|
135
|
-
|
|
136
|
-
if use_rich:
|
|
137
|
-
return progress
|
|
138
|
-
|
|
139
|
-
return re.sub(r"\[/?[^\]]+\]", "", progress)
|