together 1.4.1__tar.gz → 1.4.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. {together-1.4.1 → together-1.4.4}/PKG-INFO +93 -23
  2. {together-1.4.1 → together-1.4.4}/README.md +92 -22
  3. {together-1.4.1 → together-1.4.4}/pyproject.toml +1 -1
  4. {together-1.4.1 → together-1.4.4}/src/together/cli/api/finetune.py +67 -5
  5. {together-1.4.1 → together-1.4.4}/src/together/constants.py +6 -0
  6. {together-1.4.1 → together-1.4.4}/src/together/legacy/finetune.py +1 -1
  7. {together-1.4.1 → together-1.4.4}/src/together/resources/finetune.py +173 -15
  8. {together-1.4.1 → together-1.4.4}/src/together/types/__init__.py +6 -0
  9. {together-1.4.1 → together-1.4.4}/src/together/types/chat_completions.py +6 -0
  10. {together-1.4.1 → together-1.4.4}/src/together/types/endpoints.py +3 -3
  11. {together-1.4.1 → together-1.4.4}/src/together/types/finetune.py +45 -0
  12. {together-1.4.1 → together-1.4.4}/src/together/utils/__init__.py +4 -0
  13. {together-1.4.1 → together-1.4.4}/src/together/utils/files.py +139 -66
  14. {together-1.4.1 → together-1.4.4}/src/together/utils/tools.py +53 -2
  15. {together-1.4.1 → together-1.4.4}/LICENSE +0 -0
  16. {together-1.4.1 → together-1.4.4}/src/together/__init__.py +0 -0
  17. {together-1.4.1 → together-1.4.4}/src/together/abstract/__init__.py +0 -0
  18. {together-1.4.1 → together-1.4.4}/src/together/abstract/api_requestor.py +0 -0
  19. {together-1.4.1 → together-1.4.4}/src/together/cli/__init__.py +0 -0
  20. {together-1.4.1 → together-1.4.4}/src/together/cli/api/__init__.py +0 -0
  21. {together-1.4.1 → together-1.4.4}/src/together/cli/api/chat.py +0 -0
  22. {together-1.4.1 → together-1.4.4}/src/together/cli/api/completions.py +0 -0
  23. {together-1.4.1 → together-1.4.4}/src/together/cli/api/endpoints.py +0 -0
  24. {together-1.4.1 → together-1.4.4}/src/together/cli/api/files.py +0 -0
  25. {together-1.4.1 → together-1.4.4}/src/together/cli/api/images.py +0 -0
  26. {together-1.4.1 → together-1.4.4}/src/together/cli/api/models.py +0 -0
  27. {together-1.4.1 → together-1.4.4}/src/together/cli/api/utils.py +0 -0
  28. {together-1.4.1 → together-1.4.4}/src/together/cli/cli.py +0 -0
  29. {together-1.4.1 → together-1.4.4}/src/together/client.py +0 -0
  30. {together-1.4.1 → together-1.4.4}/src/together/error.py +0 -0
  31. {together-1.4.1 → together-1.4.4}/src/together/filemanager.py +0 -0
  32. {together-1.4.1 → together-1.4.4}/src/together/legacy/__init__.py +0 -0
  33. {together-1.4.1 → together-1.4.4}/src/together/legacy/base.py +0 -0
  34. {together-1.4.1 → together-1.4.4}/src/together/legacy/complete.py +0 -0
  35. {together-1.4.1 → together-1.4.4}/src/together/legacy/embeddings.py +0 -0
  36. {together-1.4.1 → together-1.4.4}/src/together/legacy/files.py +0 -0
  37. {together-1.4.1 → together-1.4.4}/src/together/legacy/images.py +0 -0
  38. {together-1.4.1 → together-1.4.4}/src/together/legacy/models.py +0 -0
  39. {together-1.4.1 → together-1.4.4}/src/together/resources/__init__.py +0 -0
  40. {together-1.4.1 → together-1.4.4}/src/together/resources/audio/__init__.py +0 -0
  41. {together-1.4.1 → together-1.4.4}/src/together/resources/audio/speech.py +0 -0
  42. {together-1.4.1 → together-1.4.4}/src/together/resources/chat/__init__.py +0 -0
  43. {together-1.4.1 → together-1.4.4}/src/together/resources/chat/completions.py +0 -0
  44. {together-1.4.1 → together-1.4.4}/src/together/resources/completions.py +0 -0
  45. {together-1.4.1 → together-1.4.4}/src/together/resources/embeddings.py +0 -0
  46. {together-1.4.1 → together-1.4.4}/src/together/resources/endpoints.py +0 -0
  47. {together-1.4.1 → together-1.4.4}/src/together/resources/files.py +0 -0
  48. {together-1.4.1 → together-1.4.4}/src/together/resources/images.py +0 -0
  49. {together-1.4.1 → together-1.4.4}/src/together/resources/models.py +0 -0
  50. {together-1.4.1 → together-1.4.4}/src/together/resources/rerank.py +0 -0
  51. {together-1.4.1 → together-1.4.4}/src/together/together_response.py +0 -0
  52. {together-1.4.1 → together-1.4.4}/src/together/types/abstract.py +0 -0
  53. {together-1.4.1 → together-1.4.4}/src/together/types/audio_speech.py +0 -0
  54. {together-1.4.1 → together-1.4.4}/src/together/types/common.py +0 -0
  55. {together-1.4.1 → together-1.4.4}/src/together/types/completions.py +0 -0
  56. {together-1.4.1 → together-1.4.4}/src/together/types/embeddings.py +0 -0
  57. {together-1.4.1 → together-1.4.4}/src/together/types/error.py +0 -0
  58. {together-1.4.1 → together-1.4.4}/src/together/types/files.py +0 -0
  59. {together-1.4.1 → together-1.4.4}/src/together/types/images.py +0 -0
  60. {together-1.4.1 → together-1.4.4}/src/together/types/models.py +0 -0
  61. {together-1.4.1 → together-1.4.4}/src/together/types/rerank.py +0 -0
  62. {together-1.4.1 → together-1.4.4}/src/together/utils/_log.py +0 -0
  63. {together-1.4.1 → together-1.4.4}/src/together/utils/api_helpers.py +0 -0
  64. {together-1.4.1 → together-1.4.4}/src/together/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: together
3
- Version: 1.4.1
3
+ Version: 1.4.4
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  License: Apache-2.0
6
6
  Author: Together AI
@@ -87,25 +87,101 @@ This repo contains both a Python Library and a CLI. We'll demonstrate how to use
87
87
  ### Chat Completions
88
88
 
89
89
  ```python
90
- import os
91
90
  from together import Together
92
91
 
93
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
92
+ client = Together()
94
93
 
94
+ # Simple text message
95
95
  response = client.chat.completions.create(
96
96
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
97
97
  messages=[{"role": "user", "content": "tell me about new york"}],
98
98
  )
99
99
  print(response.choices[0].message.content)
100
+
101
+ # Multi-modal message with text and image
102
+ response = client.chat.completions.create(
103
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
104
+ messages=[{
105
+ "role": "user",
106
+ "content": [
107
+ {
108
+ "type": "text",
109
+ "text": "What's in this image?"
110
+ },
111
+ {
112
+ "type": "image_url",
113
+ "image_url": {
114
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
115
+ }
116
+ }
117
+ ]
118
+ }]
119
+ )
120
+ print(response.choices[0].message.content)
121
+
122
+ # Multi-modal message with multiple images
123
+ response = client.chat.completions.create(
124
+ model="Qwen/Qwen2.5-VL-72B-Instruct",
125
+ messages=[{
126
+ "role": "user",
127
+ "content": [
128
+ {
129
+ "type": "text",
130
+ "text": "Compare these two images."
131
+ },
132
+ {
133
+ "type": "image_url",
134
+ "image_url": {
135
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
136
+ }
137
+ },
138
+ {
139
+ "type": "image_url",
140
+ "image_url": {
141
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/slack.png"
142
+ }
143
+ }
144
+ ]
145
+ }]
146
+ )
147
+ print(response.choices[0].message.content)
148
+
149
+ # Multi-modal message with text and video
150
+ response = client.chat.completions.create(
151
+ model="Qwen/Qwen2.5-VL-72B-Instruct",
152
+ messages=[{
153
+ "role": "user",
154
+ "content": [
155
+ {
156
+ "type": "text",
157
+ "text": "What's happening in this video?"
158
+ },
159
+ {
160
+ "type": "video_url",
161
+ "video_url": {
162
+ "url": "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4"
163
+ }
164
+ }
165
+ ]
166
+ }]
167
+ )
168
+ print(response.choices[0].message.content)
100
169
  ```
101
170
 
171
+ The chat completions API supports three types of content:
172
+ - Plain text messages using the `content` field directly
173
+ - Multi-modal messages with images using `type: "image_url"`
174
+ - Multi-modal messages with videos using `type: "video_url"`
175
+
176
+ When using multi-modal content, the `content` field becomes an array of content objects, each with its own type and corresponding data.
177
+
102
178
  #### Streaming
103
179
 
104
180
  ```python
105
181
  import os
106
182
  from together import Together
107
183
 
108
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
184
+ client = Together()
109
185
  stream = client.chat.completions.create(
110
186
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
111
187
  messages=[{"role": "user", "content": "tell me about new york"}],
@@ -119,17 +195,17 @@ for chunk in stream:
119
195
  #### Async usage
120
196
 
121
197
  ```python
122
- import os, asyncio
198
+ import asyncio
123
199
  from together import AsyncTogether
124
200
 
125
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
201
+ async_client = AsyncTogether()
126
202
  messages = [
127
203
  "What are the top things to do in San Francisco?",
128
204
  "What country is Paris in?",
129
205
  ]
130
206
 
131
207
  async def async_chat_completion(messages):
132
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
208
+ async_client = AsyncTogether()
133
209
  tasks = [
134
210
  async_client.chat.completions.create(
135
211
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
@@ -150,10 +226,9 @@ asyncio.run(async_chat_completion(messages))
150
226
  Completions are for code and language models shown [here](https://docs.together.ai/docs/inference-models). Below, a code model example is shown.
151
227
 
152
228
  ```python
153
- import os
154
229
  from together import Together
155
230
 
156
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
231
+ client = Together()
157
232
 
158
233
  response = client.completions.create(
159
234
  model="codellama/CodeLlama-34b-Python-hf",
@@ -166,10 +241,9 @@ print(response.choices[0].text)
166
241
  #### Streaming
167
242
 
168
243
  ```python
169
- import os
170
244
  from together import Together
171
245
 
172
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
246
+ client = Together()
173
247
  stream = client.completions.create(
174
248
  model="codellama/CodeLlama-34b-Python-hf",
175
249
  prompt="Write a Next.js component with TailwindCSS for a header component.",
@@ -183,10 +257,10 @@ for chunk in stream:
183
257
  #### Async usage
184
258
 
185
259
  ```python
186
- import os, asyncio
260
+ import asyncio
187
261
  from together import AsyncTogether
188
262
 
189
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
263
+ async_client = AsyncTogether()
190
264
  prompts = [
191
265
  "Write a Next.js component with TailwindCSS for a header component.",
192
266
  "Write a python function for the fibonacci sequence",
@@ -211,10 +285,9 @@ asyncio.run(async_chat_completion(prompts))
211
285
  ### Image generation
212
286
 
213
287
  ```python
214
- import os
215
288
  from together import Together
216
289
 
217
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
290
+ client = Together()
218
291
 
219
292
  response = client.images.generate(
220
293
  prompt="space robots",
@@ -231,7 +304,7 @@ print(response.data[0].b64_json)
231
304
  from typing import List
232
305
  from together import Together
233
306
 
234
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
307
+ client = Together()
235
308
 
236
309
  def get_embeddings(texts: List[str], model: str) -> List[List[float]]:
237
310
  texts = [text.replace("\n", " ") for text in texts]
@@ -250,7 +323,7 @@ print(embeddings)
250
323
  from typing import List
251
324
  from together import Together
252
325
 
253
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
326
+ client = Together()
254
327
 
255
328
  def get_reranked_documents(query: str, documents: List[str], model: str, top_n: int = 3) -> List[str]:
256
329
  outputs = client.rerank.create(model=model, query=query, documents=documents, top_n=top_n)
@@ -272,10 +345,9 @@ Read more about Reranking [here](https://docs.together.ai/docs/rerank-overview).
272
345
  The files API is used for fine-tuning and allows developers to upload data to fine-tune on. It also has several methods to list all files, retrive files, and delete files. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
273
346
 
274
347
  ```python
275
- import os
276
348
  from together import Together
277
349
 
278
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
350
+ client = Together()
279
351
 
280
352
  client.files.upload(file="somedata.jsonl") # uploads a file
281
353
  client.files.list() # lists all uploaded files
@@ -289,10 +361,9 @@ client.files.delete(id="file-d0d318cb-b7d9-493a-bd70-1cfe089d3815") # deletes a
289
361
  The finetune API is used for fine-tuning and allows developers to create finetuning jobs. It also has several methods to list all jobs, retrive statuses and get checkpoints. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
290
362
 
291
363
  ```python
292
- import os
293
364
  from together import Together
294
365
 
295
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
366
+ client = Together()
296
367
 
297
368
  client.fine_tuning.create(
298
369
  training_file = 'file-d0d318cb-b7d9-493a-bd70-1cfe089d3815',
@@ -316,10 +387,9 @@ client.fine_tuning.download(id="ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b") # down
316
387
  This lists all the models that Together supports.
317
388
 
318
389
  ```python
319
- import os
320
390
  from together import Together
321
391
 
322
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
392
+ client = Together()
323
393
 
324
394
  models = client.models.list()
325
395
 
@@ -52,25 +52,101 @@ This repo contains both a Python Library and a CLI. We'll demonstrate how to use
52
52
  ### Chat Completions
53
53
 
54
54
  ```python
55
- import os
56
55
  from together import Together
57
56
 
58
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
57
+ client = Together()
59
58
 
59
+ # Simple text message
60
60
  response = client.chat.completions.create(
61
61
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
62
62
  messages=[{"role": "user", "content": "tell me about new york"}],
63
63
  )
64
64
  print(response.choices[0].message.content)
65
+
66
+ # Multi-modal message with text and image
67
+ response = client.chat.completions.create(
68
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
69
+ messages=[{
70
+ "role": "user",
71
+ "content": [
72
+ {
73
+ "type": "text",
74
+ "text": "What's in this image?"
75
+ },
76
+ {
77
+ "type": "image_url",
78
+ "image_url": {
79
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
80
+ }
81
+ }
82
+ ]
83
+ }]
84
+ )
85
+ print(response.choices[0].message.content)
86
+
87
+ # Multi-modal message with multiple images
88
+ response = client.chat.completions.create(
89
+ model="Qwen/Qwen2.5-VL-72B-Instruct",
90
+ messages=[{
91
+ "role": "user",
92
+ "content": [
93
+ {
94
+ "type": "text",
95
+ "text": "Compare these two images."
96
+ },
97
+ {
98
+ "type": "image_url",
99
+ "image_url": {
100
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
101
+ }
102
+ },
103
+ {
104
+ "type": "image_url",
105
+ "image_url": {
106
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/slack.png"
107
+ }
108
+ }
109
+ ]
110
+ }]
111
+ )
112
+ print(response.choices[0].message.content)
113
+
114
+ # Multi-modal message with text and video
115
+ response = client.chat.completions.create(
116
+ model="Qwen/Qwen2.5-VL-72B-Instruct",
117
+ messages=[{
118
+ "role": "user",
119
+ "content": [
120
+ {
121
+ "type": "text",
122
+ "text": "What's happening in this video?"
123
+ },
124
+ {
125
+ "type": "video_url",
126
+ "video_url": {
127
+ "url": "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4"
128
+ }
129
+ }
130
+ ]
131
+ }]
132
+ )
133
+ print(response.choices[0].message.content)
65
134
  ```
66
135
 
136
+ The chat completions API supports three types of content:
137
+ - Plain text messages using the `content` field directly
138
+ - Multi-modal messages with images using `type: "image_url"`
139
+ - Multi-modal messages with videos using `type: "video_url"`
140
+
141
+ When using multi-modal content, the `content` field becomes an array of content objects, each with its own type and corresponding data.
142
+
67
143
  #### Streaming
68
144
 
69
145
  ```python
70
146
  import os
71
147
  from together import Together
72
148
 
73
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
149
+ client = Together()
74
150
  stream = client.chat.completions.create(
75
151
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
76
152
  messages=[{"role": "user", "content": "tell me about new york"}],
@@ -84,17 +160,17 @@ for chunk in stream:
84
160
  #### Async usage
85
161
 
86
162
  ```python
87
- import os, asyncio
163
+ import asyncio
88
164
  from together import AsyncTogether
89
165
 
90
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
166
+ async_client = AsyncTogether()
91
167
  messages = [
92
168
  "What are the top things to do in San Francisco?",
93
169
  "What country is Paris in?",
94
170
  ]
95
171
 
96
172
  async def async_chat_completion(messages):
97
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
173
+ async_client = AsyncTogether()
98
174
  tasks = [
99
175
  async_client.chat.completions.create(
100
176
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
@@ -115,10 +191,9 @@ asyncio.run(async_chat_completion(messages))
115
191
  Completions are for code and language models shown [here](https://docs.together.ai/docs/inference-models). Below, a code model example is shown.
116
192
 
117
193
  ```python
118
- import os
119
194
  from together import Together
120
195
 
121
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
196
+ client = Together()
122
197
 
123
198
  response = client.completions.create(
124
199
  model="codellama/CodeLlama-34b-Python-hf",
@@ -131,10 +206,9 @@ print(response.choices[0].text)
131
206
  #### Streaming
132
207
 
133
208
  ```python
134
- import os
135
209
  from together import Together
136
210
 
137
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
211
+ client = Together()
138
212
  stream = client.completions.create(
139
213
  model="codellama/CodeLlama-34b-Python-hf",
140
214
  prompt="Write a Next.js component with TailwindCSS for a header component.",
@@ -148,10 +222,10 @@ for chunk in stream:
148
222
  #### Async usage
149
223
 
150
224
  ```python
151
- import os, asyncio
225
+ import asyncio
152
226
  from together import AsyncTogether
153
227
 
154
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
228
+ async_client = AsyncTogether()
155
229
  prompts = [
156
230
  "Write a Next.js component with TailwindCSS for a header component.",
157
231
  "Write a python function for the fibonacci sequence",
@@ -176,10 +250,9 @@ asyncio.run(async_chat_completion(prompts))
176
250
  ### Image generation
177
251
 
178
252
  ```python
179
- import os
180
253
  from together import Together
181
254
 
182
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
255
+ client = Together()
183
256
 
184
257
  response = client.images.generate(
185
258
  prompt="space robots",
@@ -196,7 +269,7 @@ print(response.data[0].b64_json)
196
269
  from typing import List
197
270
  from together import Together
198
271
 
199
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
272
+ client = Together()
200
273
 
201
274
  def get_embeddings(texts: List[str], model: str) -> List[List[float]]:
202
275
  texts = [text.replace("\n", " ") for text in texts]
@@ -215,7 +288,7 @@ print(embeddings)
215
288
  from typing import List
216
289
  from together import Together
217
290
 
218
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
291
+ client = Together()
219
292
 
220
293
  def get_reranked_documents(query: str, documents: List[str], model: str, top_n: int = 3) -> List[str]:
221
294
  outputs = client.rerank.create(model=model, query=query, documents=documents, top_n=top_n)
@@ -237,10 +310,9 @@ Read more about Reranking [here](https://docs.together.ai/docs/rerank-overview).
237
310
  The files API is used for fine-tuning and allows developers to upload data to fine-tune on. It also has several methods to list all files, retrive files, and delete files. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
238
311
 
239
312
  ```python
240
- import os
241
313
  from together import Together
242
314
 
243
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
315
+ client = Together()
244
316
 
245
317
  client.files.upload(file="somedata.jsonl") # uploads a file
246
318
  client.files.list() # lists all uploaded files
@@ -254,10 +326,9 @@ client.files.delete(id="file-d0d318cb-b7d9-493a-bd70-1cfe089d3815") # deletes a
254
326
  The finetune API is used for fine-tuning and allows developers to create finetuning jobs. It also has several methods to list all jobs, retrive statuses and get checkpoints. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
255
327
 
256
328
  ```python
257
- import os
258
329
  from together import Together
259
330
 
260
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
331
+ client = Together()
261
332
 
262
333
  client.fine_tuning.create(
263
334
  training_file = 'file-d0d318cb-b7d9-493a-bd70-1cfe089d3815',
@@ -281,10 +352,9 @@ client.fine_tuning.download(id="ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b") # down
281
352
  This lists all the models that Together supports.
282
353
 
283
354
  ```python
284
- import os
285
355
  from together import Together
286
356
 
287
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
357
+ client = Together()
288
358
 
289
359
  models = client.models.list()
290
360
 
@@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"
12
12
 
13
13
  [tool.poetry]
14
14
  name = "together"
15
- version = "1.4.1"
15
+ version = "1.4.4"
16
16
  authors = [
17
17
  "Together AI <support@together.ai>"
18
18
  ]
@@ -1,9 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import json
4
- from datetime import datetime
4
+ from datetime import datetime, timezone
5
5
  from textwrap import wrap
6
6
  from typing import Any, Literal
7
+ import re
7
8
 
8
9
  import click
9
10
  from click.core import ParameterSource # type: ignore[attr-defined]
@@ -17,8 +18,13 @@ from together.utils import (
17
18
  log_warn,
18
19
  log_warn_once,
19
20
  parse_timestamp,
21
+ format_timestamp,
22
+ )
23
+ from together.types.finetune import (
24
+ DownloadCheckpointType,
25
+ FinetuneTrainingLimits,
26
+ FinetuneEventType,
20
27
  )
21
- from together.types.finetune import DownloadCheckpointType, FinetuneTrainingLimits
22
28
 
23
29
 
24
30
  _CONFIRMATION_MESSAGE = (
@@ -104,6 +110,18 @@ def fine_tuning(ctx: click.Context) -> None:
104
110
  default="all-linear",
105
111
  help="Trainable modules for LoRA adapters. For example, 'all-linear', 'q_proj,v_proj'",
106
112
  )
113
+ @click.option(
114
+ "--training-method",
115
+ type=click.Choice(["sft", "dpo"]),
116
+ default="sft",
117
+ help="Training method to use. Options: sft (supervised fine-tuning), dpo (Direct Preference Optimization)",
118
+ )
119
+ @click.option(
120
+ "--dpo-beta",
121
+ type=float,
122
+ default=0.1,
123
+ help="Beta parameter for DPO training (only used when '--training-method' is 'dpo')",
124
+ )
107
125
  @click.option(
108
126
  "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name"
109
127
  )
@@ -126,6 +144,14 @@ def fine_tuning(ctx: click.Context) -> None:
126
144
  help="Whether to mask the user messages in conversational data or prompts in instruction data. "
127
145
  "`auto` will automatically determine whether to mask the inputs based on the data format.",
128
146
  )
147
+ @click.option(
148
+ "--from-checkpoint",
149
+ type=str,
150
+ default=None,
151
+ help="The checkpoint identifier to continue training from a previous fine-tuning job. "
152
+ "The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}. "
153
+ "The step value is optional, without it the final checkpoint will be used.",
154
+ )
129
155
  def create(
130
156
  ctx: click.Context,
131
157
  training_file: str,
@@ -152,6 +178,9 @@ def create(
152
178
  wandb_name: str,
153
179
  confirm: bool,
154
180
  train_on_inputs: bool | Literal["auto"],
181
+ training_method: str,
182
+ dpo_beta: float,
183
+ from_checkpoint: str,
155
184
  ) -> None:
156
185
  """Start fine-tuning"""
157
186
  client: Together = ctx.obj
@@ -180,6 +209,9 @@ def create(
180
209
  wandb_project_name=wandb_project_name,
181
210
  wandb_name=wandb_name,
182
211
  train_on_inputs=train_on_inputs,
212
+ training_method=training_method,
213
+ dpo_beta=dpo_beta,
214
+ from_checkpoint=from_checkpoint,
183
215
  )
184
216
 
185
217
  model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(
@@ -261,7 +293,9 @@ def list(ctx: click.Context) -> None:
261
293
 
262
294
  response.data = response.data or []
263
295
 
264
- response.data.sort(key=lambda x: parse_timestamp(x.created_at or ""))
296
+ # Use a default datetime for None values to make sure the key function always returns a comparable value
297
+ epoch_start = datetime.fromtimestamp(0, tz=timezone.utc)
298
+ response.data.sort(key=lambda x: parse_timestamp(x.created_at or "") or epoch_start)
265
299
 
266
300
  display_list = []
267
301
  for i in response.data:
@@ -344,6 +378,34 @@ def list_events(ctx: click.Context, fine_tune_id: str) -> None:
344
378
  click.echo(table)
345
379
 
346
380
 
381
+ @fine_tuning.command()
382
+ @click.pass_context
383
+ @click.argument("fine_tune_id", type=str, required=True)
384
+ def list_checkpoints(ctx: click.Context, fine_tune_id: str) -> None:
385
+ """List available checkpoints for a fine-tuning job"""
386
+ client: Together = ctx.obj
387
+
388
+ checkpoints = client.fine_tuning.list_checkpoints(fine_tune_id)
389
+
390
+ display_list = []
391
+ for checkpoint in checkpoints:
392
+ display_list.append(
393
+ {
394
+ "Type": checkpoint.type,
395
+ "Timestamp": format_timestamp(checkpoint.timestamp),
396
+ "Name": checkpoint.name,
397
+ }
398
+ )
399
+
400
+ if display_list:
401
+ click.echo(f"Job {fine_tune_id} contains the following checkpoints:")
402
+ table = tabulate(display_list, headers="keys", tablefmt="grid")
403
+ click.echo(table)
404
+ click.echo("\nTo download a checkpoint, use `together fine-tuning download`")
405
+ else:
406
+ click.echo(f"No checkpoints found for job {fine_tune_id}")
407
+
408
+
347
409
  @fine_tuning.command()
348
410
  @click.pass_context
349
411
  @click.argument("fine_tune_id", type=str, required=True)
@@ -358,7 +420,7 @@ def list_events(ctx: click.Context, fine_tune_id: str) -> None:
358
420
  "--checkpoint-step",
359
421
  type=int,
360
422
  required=False,
361
- default=-1,
423
+ default=None,
362
424
  help="Download fine-tuning checkpoint. Defaults to latest.",
363
425
  )
364
426
  @click.option(
@@ -372,7 +434,7 @@ def download(
372
434
  ctx: click.Context,
373
435
  fine_tune_id: str,
374
436
  output_dir: str,
375
- checkpoint_step: int,
437
+ checkpoint_step: int | None,
376
438
  checkpoint_type: DownloadCheckpointType,
377
439
  ) -> None:
378
440
  """Download fine-tuning checkpoint"""
@@ -39,12 +39,18 @@ class DatasetFormat(enum.Enum):
39
39
  GENERAL = "general"
40
40
  CONVERSATION = "conversation"
41
41
  INSTRUCTION = "instruction"
42
+ PREFERENCE_OPENAI = "preference_openai"
42
43
 
43
44
 
44
45
  JSONL_REQUIRED_COLUMNS_MAP = {
45
46
  DatasetFormat.GENERAL: ["text"],
46
47
  DatasetFormat.CONVERSATION: ["messages"],
47
48
  DatasetFormat.INSTRUCTION: ["prompt", "completion"],
49
+ DatasetFormat.PREFERENCE_OPENAI: [
50
+ "input",
51
+ "preferred_output",
52
+ "non_preferred_output",
53
+ ],
48
54
  }
49
55
  REQUIRED_COLUMNS_MESSAGE = ["role", "content"]
50
56
  POSSIBLE_ROLES_CONVERSATION = ["system", "user", "assistant"]
@@ -161,7 +161,7 @@ class Finetune:
161
161
  cls,
162
162
  fine_tune_id: str,
163
163
  output: str | None = None,
164
- step: int = -1,
164
+ step: int | None = None,
165
165
  ) -> Dict[str, Any]:
166
166
  """Legacy finetuning download function."""
167
167