together 1.5.17__py3-none-any.whl → 2.0.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -63
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/{cli → lib/cli}/api/endpoints.py +108 -75
- together/lib/cli/api/evals.py +588 -0
- together/{cli → lib/cli}/api/files.py +20 -17
- together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +161 -120
- together/lib/cli/api/models.py +140 -0
- together/{cli → lib/cli}/api/utils.py +6 -7
- together/{cli → lib/cli}/cli.py +16 -24
- together/{constants.py → lib/constants.py} +17 -12
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +455 -0
- together/{utils → lib/utils}/__init__.py +6 -14
- together/{utils → lib/utils}/_log.py +11 -16
- together/lib/utils/files.py +628 -0
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +19 -55
- together/resources/__init__.py +225 -33
- together/resources/audio/__init__.py +72 -21
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +574 -122
- together/resources/audio/transcriptions.py +282 -0
- together/resources/audio/translations.py +256 -0
- together/resources/audio/voices.py +135 -0
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -263
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +884 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +598 -395
- together/resources/evals.py +452 -0
- together/resources/files.py +398 -121
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +256 -108
- together/resources/jobs.py +214 -0
- together/resources/models.py +238 -90
- together/resources/rerank.py +190 -92
- together/resources/videos.py +374 -0
- together/types/__init__.py +65 -109
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_job.py +57 -0
- together-2.0.0a8.dist-info/METADATA +680 -0
- together-2.0.0a8.dist-info/RECORD +164 -0
- {together-1.5.17.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
- together-2.0.0a8.dist-info/entry_points.txt +2 -0
- {together-1.5.17.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -729
- together/cli/api/chat.py +0 -276
- together/cli/api/completions.py +0 -119
- together/cli/api/images.py +0 -93
- together/cli/api/models.py +0 -55
- together/client.py +0 -176
- together/error.py +0 -194
- together/filemanager.py +0 -389
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/batch.py +0 -136
- together/resources/code_interpreter.py +0 -82
- together/resources/finetune.py +0 -1064
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/audio_speech.py +0 -110
- together/types/batch.py +0 -53
- together/types/chat_completions.py +0 -197
- together/types/code_interpreter.py +0 -57
- together/types/common.py +0 -66
- together/types/completions.py +0 -107
- together/types/embeddings.py +0 -35
- together/types/endpoints.py +0 -123
- together/types/error.py +0 -16
- together/types/files.py +0 -90
- together/types/finetune.py +0 -398
- together/types/images.py +0 -44
- together/types/models.py +0 -45
- together/types/rerank.py +0 -43
- together/utils/api_helpers.py +0 -124
- together/utils/files.py +0 -425
- together/version.py +0 -6
- together-1.5.17.dist-info/METADATA +0 -525
- together-1.5.17.dist-info/RECORD +0 -69
- together-1.5.17.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
together/cli/api/chat.py
DELETED
|
@@ -1,276 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import cmd
|
|
4
|
-
import json
|
|
5
|
-
from typing import List, Tuple
|
|
6
|
-
|
|
7
|
-
import click
|
|
8
|
-
|
|
9
|
-
from together import Together
|
|
10
|
-
from together.types.chat_completions import (
|
|
11
|
-
ChatCompletionChoicesChunk,
|
|
12
|
-
ChatCompletionChunk,
|
|
13
|
-
ChatCompletionResponse,
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class ChatShell(cmd.Cmd):
|
|
18
|
-
intro = "Type /exit to exit, /help, or /? to list commands.\n"
|
|
19
|
-
prompt = ">>> "
|
|
20
|
-
|
|
21
|
-
def __init__(
|
|
22
|
-
self,
|
|
23
|
-
client: Together,
|
|
24
|
-
model: str,
|
|
25
|
-
max_tokens: int | None = None,
|
|
26
|
-
stop: List[str] | None = None,
|
|
27
|
-
temperature: float | None = None,
|
|
28
|
-
top_p: float | None = None,
|
|
29
|
-
top_k: int | None = None,
|
|
30
|
-
repetition_penalty: float | None = None,
|
|
31
|
-
presence_penalty: float | None = None,
|
|
32
|
-
frequency_penalty: float | None = None,
|
|
33
|
-
min_p: float | None = None,
|
|
34
|
-
safety_model: str | None = None,
|
|
35
|
-
system_message: str | None = None,
|
|
36
|
-
) -> None:
|
|
37
|
-
super().__init__()
|
|
38
|
-
self.client = client
|
|
39
|
-
self.model = model
|
|
40
|
-
self.max_tokens = max_tokens
|
|
41
|
-
self.stop = stop
|
|
42
|
-
self.temperature = temperature
|
|
43
|
-
self.top_p = top_p
|
|
44
|
-
self.top_k = top_k
|
|
45
|
-
self.repetition_penalty = repetition_penalty
|
|
46
|
-
self.presence_penalty = presence_penalty
|
|
47
|
-
self.frequency_penalty = frequency_penalty
|
|
48
|
-
self.min_p = min_p
|
|
49
|
-
self.safety_model = safety_model
|
|
50
|
-
self.system_message = system_message
|
|
51
|
-
|
|
52
|
-
self.messages = (
|
|
53
|
-
[{"role": "system", "content": self.system_message}]
|
|
54
|
-
if self.system_message
|
|
55
|
-
else []
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
def precmd(self, line: str) -> str:
|
|
59
|
-
if line.startswith("/"):
|
|
60
|
-
return line[1:]
|
|
61
|
-
else:
|
|
62
|
-
return "say " + line
|
|
63
|
-
|
|
64
|
-
def do_say(self, arg: str) -> None:
|
|
65
|
-
self.messages.append({"role": "user", "content": arg})
|
|
66
|
-
|
|
67
|
-
output = ""
|
|
68
|
-
|
|
69
|
-
for chunk in self.client.chat.completions.create(
|
|
70
|
-
messages=self.messages,
|
|
71
|
-
model=self.model,
|
|
72
|
-
max_tokens=self.max_tokens,
|
|
73
|
-
stop=self.stop,
|
|
74
|
-
temperature=self.temperature,
|
|
75
|
-
top_p=self.top_p,
|
|
76
|
-
top_k=self.top_k,
|
|
77
|
-
repetition_penalty=self.repetition_penalty,
|
|
78
|
-
presence_penalty=self.presence_penalty,
|
|
79
|
-
frequency_penalty=self.frequency_penalty,
|
|
80
|
-
min_p=self.min_p,
|
|
81
|
-
safety_model=self.safety_model,
|
|
82
|
-
stream=True,
|
|
83
|
-
):
|
|
84
|
-
# assertions for type checking
|
|
85
|
-
assert isinstance(chunk, ChatCompletionChunk)
|
|
86
|
-
assert chunk.choices
|
|
87
|
-
assert chunk.choices[0].delta
|
|
88
|
-
|
|
89
|
-
token = chunk.choices[0].delta.content
|
|
90
|
-
|
|
91
|
-
click.echo(token, nl=False)
|
|
92
|
-
|
|
93
|
-
output += token or ""
|
|
94
|
-
|
|
95
|
-
click.echo("\n")
|
|
96
|
-
|
|
97
|
-
self.messages.append({"role": "assistant", "content": output})
|
|
98
|
-
|
|
99
|
-
def do_reset(self, arg: str) -> None:
|
|
100
|
-
self.messages = (
|
|
101
|
-
[{"role": "system", "content": self.system_message}]
|
|
102
|
-
if self.system_message
|
|
103
|
-
else []
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
def do_exit(self, arg: str) -> bool:
|
|
107
|
-
return True
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
@click.command(name="chat.interactive")
|
|
111
|
-
@click.pass_context
|
|
112
|
-
@click.option("--model", type=str, required=True, help="Model name")
|
|
113
|
-
@click.option("--max-tokens", type=int, help="Max tokens to generate")
|
|
114
|
-
@click.option(
|
|
115
|
-
"--stop", type=str, multiple=True, help="List of strings to stop generation"
|
|
116
|
-
)
|
|
117
|
-
@click.option("--temperature", type=float, help="Sampling temperature")
|
|
118
|
-
@click.option("--top-p", type=int, help="Top p sampling")
|
|
119
|
-
@click.option("--top-k", type=float, help="Top k sampling")
|
|
120
|
-
@click.option("--repetition-penalty", type=float, help="Repetition penalty")
|
|
121
|
-
@click.option("--presence-penalty", type=float, help="Presence penalty")
|
|
122
|
-
@click.option("--frequency-penalty", type=float, help="Frequency penalty")
|
|
123
|
-
@click.option("--min-p", type=float, help="Minimum p")
|
|
124
|
-
@click.option("--safety-model", type=str, help="Moderation model")
|
|
125
|
-
@click.option("--system-message", type=str, help="System message to use for the chat")
|
|
126
|
-
def interactive(
|
|
127
|
-
ctx: click.Context,
|
|
128
|
-
model: str,
|
|
129
|
-
max_tokens: int | None = None,
|
|
130
|
-
stop: List[str] | None = None,
|
|
131
|
-
temperature: float | None = None,
|
|
132
|
-
top_p: float | None = None,
|
|
133
|
-
top_k: int | None = None,
|
|
134
|
-
repetition_penalty: float | None = None,
|
|
135
|
-
presence_penalty: float | None = None,
|
|
136
|
-
frequency_penalty: float | None = None,
|
|
137
|
-
min_p: float | None = None,
|
|
138
|
-
safety_model: str | None = None,
|
|
139
|
-
system_message: str | None = None,
|
|
140
|
-
) -> None:
|
|
141
|
-
"""Interactive chat shell"""
|
|
142
|
-
client: Together = ctx.obj
|
|
143
|
-
|
|
144
|
-
ChatShell(
|
|
145
|
-
client=client,
|
|
146
|
-
model=model,
|
|
147
|
-
max_tokens=max_tokens,
|
|
148
|
-
stop=stop,
|
|
149
|
-
temperature=temperature,
|
|
150
|
-
top_p=top_p,
|
|
151
|
-
top_k=top_k,
|
|
152
|
-
repetition_penalty=repetition_penalty,
|
|
153
|
-
presence_penalty=presence_penalty,
|
|
154
|
-
frequency_penalty=frequency_penalty,
|
|
155
|
-
min_p=min_p,
|
|
156
|
-
safety_model=safety_model,
|
|
157
|
-
system_message=system_message,
|
|
158
|
-
).cmdloop()
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
@click.command(name="chat.completions")
|
|
162
|
-
@click.pass_context
|
|
163
|
-
@click.option(
|
|
164
|
-
"--message",
|
|
165
|
-
type=(str, str),
|
|
166
|
-
multiple=True,
|
|
167
|
-
required=True,
|
|
168
|
-
help="Message to generate chat completions from",
|
|
169
|
-
)
|
|
170
|
-
@click.option("--model", type=str, required=True, help="Model name")
|
|
171
|
-
@click.option("--max-tokens", type=int, help="Max tokens to generate")
|
|
172
|
-
@click.option(
|
|
173
|
-
"--stop", type=str, multiple=True, help="List of strings to stop generation"
|
|
174
|
-
)
|
|
175
|
-
@click.option("--temperature", type=float, help="Sampling temperature")
|
|
176
|
-
@click.option("--top-p", type=int, help="Top p sampling")
|
|
177
|
-
@click.option("--top-k", type=float, help="Top k sampling")
|
|
178
|
-
@click.option("--repetition-penalty", type=float, help="Repetition penalty")
|
|
179
|
-
@click.option("--presence-penalty", type=float, help="Presence penalty sampling method")
|
|
180
|
-
@click.option(
|
|
181
|
-
"--frequency-penalty", type=float, help="Frequency penalty sampling method"
|
|
182
|
-
)
|
|
183
|
-
@click.option("--min-p", type=float, help="Min p sampling")
|
|
184
|
-
@click.option("--no-stream", is_flag=True, help="Disable streaming")
|
|
185
|
-
@click.option("--logprobs", type=int, help="Return logprobs. Only works with --raw.")
|
|
186
|
-
@click.option("--echo", is_flag=True, help="Echo prompt. Only works with --raw.")
|
|
187
|
-
@click.option("--n", type=int, help="Number of output generations")
|
|
188
|
-
@click.option("--safety-model", type=str, help="Moderation model")
|
|
189
|
-
@click.option("--raw", is_flag=True, help="Output raw JSON")
|
|
190
|
-
def chat(
|
|
191
|
-
ctx: click.Context,
|
|
192
|
-
message: List[Tuple[str, str]],
|
|
193
|
-
model: str,
|
|
194
|
-
max_tokens: int | None = None,
|
|
195
|
-
stop: List[str] | None = None,
|
|
196
|
-
temperature: float | None = None,
|
|
197
|
-
top_p: float | None = None,
|
|
198
|
-
top_k: int | None = None,
|
|
199
|
-
repetition_penalty: float | None = None,
|
|
200
|
-
presence_penalty: float | None = None,
|
|
201
|
-
frequency_penalty: float | None = None,
|
|
202
|
-
min_p: float | None = None,
|
|
203
|
-
no_stream: bool = False,
|
|
204
|
-
logprobs: int | None = None,
|
|
205
|
-
echo: bool | None = None,
|
|
206
|
-
n: int | None = None,
|
|
207
|
-
safety_model: str | None = None,
|
|
208
|
-
raw: bool = False,
|
|
209
|
-
) -> None:
|
|
210
|
-
"""Generate chat completions from messages"""
|
|
211
|
-
client: Together = ctx.obj
|
|
212
|
-
|
|
213
|
-
messages = [{"role": msg[0], "content": msg[1]} for msg in message]
|
|
214
|
-
|
|
215
|
-
response = client.chat.completions.create(
|
|
216
|
-
model=model,
|
|
217
|
-
messages=messages,
|
|
218
|
-
top_p=top_p,
|
|
219
|
-
top_k=top_k,
|
|
220
|
-
temperature=temperature,
|
|
221
|
-
max_tokens=max_tokens,
|
|
222
|
-
stop=stop,
|
|
223
|
-
repetition_penalty=repetition_penalty,
|
|
224
|
-
presence_penalty=presence_penalty,
|
|
225
|
-
frequency_penalty=frequency_penalty,
|
|
226
|
-
min_p=min_p,
|
|
227
|
-
stream=not no_stream,
|
|
228
|
-
logprobs=logprobs,
|
|
229
|
-
echo=echo,
|
|
230
|
-
n=n,
|
|
231
|
-
safety_model=safety_model,
|
|
232
|
-
)
|
|
233
|
-
|
|
234
|
-
if not no_stream:
|
|
235
|
-
for chunk in response:
|
|
236
|
-
# assertions for type checking
|
|
237
|
-
assert isinstance(chunk, ChatCompletionChunk)
|
|
238
|
-
assert chunk.choices
|
|
239
|
-
|
|
240
|
-
if raw:
|
|
241
|
-
click.echo(f"{json.dumps(chunk.model_dump(exclude_none=True))}")
|
|
242
|
-
continue
|
|
243
|
-
|
|
244
|
-
should_print_header = len(chunk.choices) > 1
|
|
245
|
-
for stream_choice in sorted(chunk.choices, key=lambda c: c.index): # type: ignore
|
|
246
|
-
assert isinstance(stream_choice, ChatCompletionChoicesChunk)
|
|
247
|
-
assert stream_choice.delta
|
|
248
|
-
|
|
249
|
-
if should_print_header:
|
|
250
|
-
click.echo(f"\n===== Completion {stream_choice.index} =====\n")
|
|
251
|
-
click.echo(f"{stream_choice.delta.content}", nl=False)
|
|
252
|
-
|
|
253
|
-
if should_print_header:
|
|
254
|
-
click.echo("\n")
|
|
255
|
-
|
|
256
|
-
# new line after stream ends
|
|
257
|
-
click.echo("\n")
|
|
258
|
-
else:
|
|
259
|
-
# assertions for type checking
|
|
260
|
-
assert isinstance(response, ChatCompletionResponse)
|
|
261
|
-
assert isinstance(response.choices, list)
|
|
262
|
-
|
|
263
|
-
if raw:
|
|
264
|
-
click.echo(
|
|
265
|
-
f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}"
|
|
266
|
-
)
|
|
267
|
-
return
|
|
268
|
-
|
|
269
|
-
should_print_header = len(response.choices) > 1
|
|
270
|
-
for i, choice in enumerate(response.choices):
|
|
271
|
-
if should_print_header:
|
|
272
|
-
click.echo(f"===== Completion {i} =====")
|
|
273
|
-
click.echo(choice.message.content) # type: ignore
|
|
274
|
-
|
|
275
|
-
if should_print_header:
|
|
276
|
-
click.echo("\n")
|
together/cli/api/completions.py
DELETED
|
@@ -1,119 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
from typing import List
|
|
5
|
-
|
|
6
|
-
import click
|
|
7
|
-
|
|
8
|
-
from together import Together
|
|
9
|
-
from together.types import CompletionChunk
|
|
10
|
-
from together.types.completions import CompletionChoicesChunk, CompletionResponse
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@click.command()
|
|
14
|
-
@click.pass_context
|
|
15
|
-
@click.argument("prompt", type=str, required=True)
|
|
16
|
-
@click.option("--model", type=str, required=True, help="Model name")
|
|
17
|
-
@click.option("--max-tokens", type=int, help="Max tokens to generate")
|
|
18
|
-
@click.option(
|
|
19
|
-
"--stop", type=str, multiple=True, help="List of strings to stop generation"
|
|
20
|
-
)
|
|
21
|
-
@click.option("--temperature", type=float, help="Sampling temperature")
|
|
22
|
-
@click.option("--top-p", type=int, help="Top p sampling")
|
|
23
|
-
@click.option("--top-k", type=float, help="Top k sampling")
|
|
24
|
-
@click.option("--repetition-penalty", type=float, help="Repetition penalty")
|
|
25
|
-
@click.option("--presence-penalty", type=float, help="Presence penalty")
|
|
26
|
-
@click.option("--frequency-penalty", type=float, help="Frequency penalty")
|
|
27
|
-
@click.option("--min-p", type=float, help="Minimum p")
|
|
28
|
-
@click.option("--no-stream", is_flag=True, help="Disable streaming")
|
|
29
|
-
@click.option("--logprobs", type=int, help="Return logprobs. Only works with --raw.")
|
|
30
|
-
@click.option("--echo", is_flag=True, help="Echo prompt. Only works with --raw.")
|
|
31
|
-
@click.option("--n", type=int, help="Number of output generations")
|
|
32
|
-
@click.option("--safety-model", type=str, help="Moderation model")
|
|
33
|
-
@click.option("--raw", is_flag=True, help="Return raw JSON response")
|
|
34
|
-
def completions(
|
|
35
|
-
ctx: click.Context,
|
|
36
|
-
prompt: str,
|
|
37
|
-
model: str,
|
|
38
|
-
max_tokens: int | None = 512,
|
|
39
|
-
stop: List[str] | None = None,
|
|
40
|
-
temperature: float | None = None,
|
|
41
|
-
top_p: float | None = None,
|
|
42
|
-
top_k: int | None = None,
|
|
43
|
-
repetition_penalty: float | None = None,
|
|
44
|
-
presence_penalty: float | None = None,
|
|
45
|
-
frequency_penalty: float | None = None,
|
|
46
|
-
min_p: float | None = None,
|
|
47
|
-
no_stream: bool = False,
|
|
48
|
-
logprobs: int | None = None,
|
|
49
|
-
echo: bool | None = None,
|
|
50
|
-
n: int | None = None,
|
|
51
|
-
safety_model: str | None = None,
|
|
52
|
-
raw: bool = False,
|
|
53
|
-
) -> None:
|
|
54
|
-
"""Generate text completions"""
|
|
55
|
-
client: Together = ctx.obj
|
|
56
|
-
|
|
57
|
-
response = client.completions.create(
|
|
58
|
-
model=model,
|
|
59
|
-
prompt=prompt,
|
|
60
|
-
top_p=top_p,
|
|
61
|
-
top_k=top_k,
|
|
62
|
-
temperature=temperature,
|
|
63
|
-
max_tokens=max_tokens,
|
|
64
|
-
stop=stop,
|
|
65
|
-
repetition_penalty=repetition_penalty,
|
|
66
|
-
presence_penalty=presence_penalty,
|
|
67
|
-
frequency_penalty=frequency_penalty,
|
|
68
|
-
min_p=min_p,
|
|
69
|
-
stream=not no_stream,
|
|
70
|
-
logprobs=logprobs,
|
|
71
|
-
echo=echo,
|
|
72
|
-
n=n,
|
|
73
|
-
safety_model=safety_model,
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
if not no_stream:
|
|
77
|
-
for chunk in response:
|
|
78
|
-
# assertions for type checking
|
|
79
|
-
assert isinstance(chunk, CompletionChunk)
|
|
80
|
-
assert chunk.choices
|
|
81
|
-
|
|
82
|
-
if raw:
|
|
83
|
-
click.echo(f"{json.dumps(chunk.model_dump(exclude_none=True))}")
|
|
84
|
-
continue
|
|
85
|
-
|
|
86
|
-
should_print_header = len(chunk.choices) > 1
|
|
87
|
-
for stream_choice in sorted(chunk.choices, key=lambda c: c.index): # type: ignore
|
|
88
|
-
# assertions for type checking
|
|
89
|
-
assert isinstance(stream_choice, CompletionChoicesChunk)
|
|
90
|
-
assert stream_choice.delta
|
|
91
|
-
|
|
92
|
-
if should_print_header:
|
|
93
|
-
click.echo(f"\n===== Completion {stream_choice.index} =====\n")
|
|
94
|
-
click.echo(f"{stream_choice.delta.content}", nl=False)
|
|
95
|
-
|
|
96
|
-
if should_print_header:
|
|
97
|
-
click.echo("\n")
|
|
98
|
-
|
|
99
|
-
# new line after stream ends
|
|
100
|
-
click.echo("\n")
|
|
101
|
-
else:
|
|
102
|
-
# assertions for type checking
|
|
103
|
-
assert isinstance(response, CompletionResponse)
|
|
104
|
-
assert isinstance(response.choices, list)
|
|
105
|
-
|
|
106
|
-
if raw:
|
|
107
|
-
click.echo(
|
|
108
|
-
f"{json.dumps(response.model_dump(exclude_none=True), indent=4)}"
|
|
109
|
-
)
|
|
110
|
-
return
|
|
111
|
-
|
|
112
|
-
should_print_header = len(response.choices) > 1
|
|
113
|
-
for i, choice in enumerate(response.choices):
|
|
114
|
-
if should_print_header:
|
|
115
|
-
click.echo(f"===== Completion {i} =====")
|
|
116
|
-
click.echo(choice.text)
|
|
117
|
-
|
|
118
|
-
if should_print_header or not choice.text.endswith("\n"):
|
|
119
|
-
click.echo("\n")
|
together/cli/api/images.py
DELETED
|
@@ -1,93 +0,0 @@
|
|
|
1
|
-
import base64
|
|
2
|
-
import pathlib
|
|
3
|
-
import requests
|
|
4
|
-
|
|
5
|
-
import click
|
|
6
|
-
from PIL import Image
|
|
7
|
-
|
|
8
|
-
from together import Together
|
|
9
|
-
from together.types import ImageResponse
|
|
10
|
-
from together.types.images import ImageChoicesData
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
@click.group()
|
|
14
|
-
@click.pass_context
|
|
15
|
-
def images(ctx: click.Context) -> None:
|
|
16
|
-
"""Images generations API commands"""
|
|
17
|
-
pass
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@images.command()
|
|
21
|
-
@click.pass_context
|
|
22
|
-
@click.argument("prompt", type=str, required=True)
|
|
23
|
-
@click.option("--model", type=str, required=True, help="Model name")
|
|
24
|
-
@click.option("--steps", type=int, default=20, help="Number of steps to run generation")
|
|
25
|
-
@click.option("--seed", type=int, default=None, help="Random seed")
|
|
26
|
-
@click.option("--n", type=int, default=1, help="Number of images to generate")
|
|
27
|
-
@click.option("--height", type=int, default=1024, help="Image height")
|
|
28
|
-
@click.option("--width", type=int, default=1024, help="Image width")
|
|
29
|
-
@click.option("--negative-prompt", type=str, default=None, help="Negative prompt")
|
|
30
|
-
@click.option(
|
|
31
|
-
"--output",
|
|
32
|
-
type=click.Path(exists=True, file_okay=False, resolve_path=True),
|
|
33
|
-
required=False,
|
|
34
|
-
default=pathlib.Path("."),
|
|
35
|
-
help="Output directory",
|
|
36
|
-
)
|
|
37
|
-
@click.option("--prefix", type=str, required=False, default="image-")
|
|
38
|
-
@click.option("--no-show", is_flag=True, help="Do not open images in viewer")
|
|
39
|
-
def generate(
|
|
40
|
-
ctx: click.Context,
|
|
41
|
-
prompt: str,
|
|
42
|
-
model: str,
|
|
43
|
-
steps: int,
|
|
44
|
-
seed: int,
|
|
45
|
-
n: int,
|
|
46
|
-
height: int,
|
|
47
|
-
width: int,
|
|
48
|
-
negative_prompt: str,
|
|
49
|
-
output: pathlib.Path,
|
|
50
|
-
prefix: str,
|
|
51
|
-
no_show: bool,
|
|
52
|
-
) -> None:
|
|
53
|
-
"""Generate image"""
|
|
54
|
-
|
|
55
|
-
client: Together = ctx.obj
|
|
56
|
-
|
|
57
|
-
response = client.images.generate(
|
|
58
|
-
prompt=prompt,
|
|
59
|
-
model=model,
|
|
60
|
-
steps=steps,
|
|
61
|
-
seed=seed,
|
|
62
|
-
n=n,
|
|
63
|
-
height=height,
|
|
64
|
-
width=width,
|
|
65
|
-
negative_prompt=negative_prompt,
|
|
66
|
-
)
|
|
67
|
-
|
|
68
|
-
assert isinstance(response, ImageResponse)
|
|
69
|
-
assert isinstance(response.data, list)
|
|
70
|
-
|
|
71
|
-
for i, choice in enumerate(response.data):
|
|
72
|
-
assert isinstance(choice, ImageChoicesData)
|
|
73
|
-
|
|
74
|
-
data = None
|
|
75
|
-
if choice.b64_json:
|
|
76
|
-
data = base64.b64decode(choice.b64_json)
|
|
77
|
-
elif choice.url:
|
|
78
|
-
data = requests.get(choice.url).content
|
|
79
|
-
|
|
80
|
-
if not data:
|
|
81
|
-
click.echo(f"Image [{i + 1}/{len(response.data)}] is empty")
|
|
82
|
-
continue
|
|
83
|
-
|
|
84
|
-
with open(f"{output}/{prefix}{choice.index}.png", "wb") as f:
|
|
85
|
-
f.write(data)
|
|
86
|
-
|
|
87
|
-
click.echo(
|
|
88
|
-
f"Image [{i + 1}/{len(response.data)}] saved to {output}/{prefix}{choice.index}.png"
|
|
89
|
-
)
|
|
90
|
-
|
|
91
|
-
if not no_show:
|
|
92
|
-
image = Image.open(f"{output}/{prefix}{choice.index}.png")
|
|
93
|
-
image.show()
|
together/cli/api/models.py
DELETED
|
@@ -1,55 +0,0 @@
|
|
|
1
|
-
import json as json_lib
|
|
2
|
-
|
|
3
|
-
import click
|
|
4
|
-
from tabulate import tabulate
|
|
5
|
-
|
|
6
|
-
from together import Together
|
|
7
|
-
from together.types.models import ModelObject
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@click.group()
|
|
11
|
-
@click.pass_context
|
|
12
|
-
def models(ctx: click.Context) -> None:
|
|
13
|
-
"""Models API commands"""
|
|
14
|
-
pass
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@models.command()
|
|
18
|
-
@click.option(
|
|
19
|
-
"--type",
|
|
20
|
-
type=click.Choice(["dedicated"]),
|
|
21
|
-
help="Filter models by type (dedicated: models that can be deployed as dedicated endpoints)",
|
|
22
|
-
)
|
|
23
|
-
@click.option(
|
|
24
|
-
"--json",
|
|
25
|
-
is_flag=True,
|
|
26
|
-
help="Output in JSON format",
|
|
27
|
-
)
|
|
28
|
-
@click.pass_context
|
|
29
|
-
def list(ctx: click.Context, type: str | None, json: bool) -> None:
|
|
30
|
-
"""List models"""
|
|
31
|
-
client: Together = ctx.obj
|
|
32
|
-
|
|
33
|
-
response = client.models.list(dedicated=(type == "dedicated"))
|
|
34
|
-
|
|
35
|
-
display_list = []
|
|
36
|
-
|
|
37
|
-
model: ModelObject
|
|
38
|
-
for model in response:
|
|
39
|
-
display_list.append(
|
|
40
|
-
{
|
|
41
|
-
"ID": model.id,
|
|
42
|
-
"Name": model.display_name,
|
|
43
|
-
"Organization": model.organization,
|
|
44
|
-
"Type": model.type,
|
|
45
|
-
"Context Length": model.context_length,
|
|
46
|
-
"License": model.license,
|
|
47
|
-
"Input per 1M token": model.pricing.input,
|
|
48
|
-
"Output per 1M token": model.pricing.output,
|
|
49
|
-
}
|
|
50
|
-
)
|
|
51
|
-
|
|
52
|
-
if json:
|
|
53
|
-
click.echo(json_lib.dumps(display_list, indent=2))
|
|
54
|
-
else:
|
|
55
|
-
click.echo(tabulate(display_list, headers="keys", tablefmt="plain"))
|