together 1.2.10__tar.gz → 1.2.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {together-1.2.10 → together-1.2.12}/PKG-INFO +1 -1
  2. {together-1.2.10 → together-1.2.12}/pyproject.toml +2 -2
  3. {together-1.2.10 → together-1.2.12}/src/together/legacy/complete.py +4 -4
  4. {together-1.2.10 → together-1.2.12}/src/together/legacy/embeddings.py +1 -1
  5. {together-1.2.10 → together-1.2.12}/src/together/legacy/images.py +1 -1
  6. {together-1.2.10 → together-1.2.12}/src/together/resources/chat/completions.py +10 -0
  7. {together-1.2.10 → together-1.2.12}/src/together/resources/completions.py +11 -1
  8. {together-1.2.10 → together-1.2.12}/src/together/resources/embeddings.py +5 -1
  9. {together-1.2.10 → together-1.2.12}/src/together/resources/images.py +6 -0
  10. {together-1.2.10 → together-1.2.12}/src/together/resources/rerank.py +4 -0
  11. {together-1.2.10 → together-1.2.12}/src/together/types/chat_completions.py +3 -0
  12. {together-1.2.10 → together-1.2.12}/src/together/types/completions.py +4 -1
  13. {together-1.2.10 → together-1.2.12}/LICENSE +0 -0
  14. {together-1.2.10 → together-1.2.12}/README.md +0 -0
  15. {together-1.2.10 → together-1.2.12}/src/together/__init__.py +0 -0
  16. {together-1.2.10 → together-1.2.12}/src/together/abstract/__init__.py +0 -0
  17. {together-1.2.10 → together-1.2.12}/src/together/abstract/api_requestor.py +0 -0
  18. {together-1.2.10 → together-1.2.12}/src/together/cli/__init__.py +0 -0
  19. {together-1.2.10 → together-1.2.12}/src/together/cli/api/__init__.py +0 -0
  20. {together-1.2.10 → together-1.2.12}/src/together/cli/api/chat.py +0 -0
  21. {together-1.2.10 → together-1.2.12}/src/together/cli/api/completions.py +0 -0
  22. {together-1.2.10 → together-1.2.12}/src/together/cli/api/files.py +0 -0
  23. {together-1.2.10 → together-1.2.12}/src/together/cli/api/finetune.py +0 -0
  24. {together-1.2.10 → together-1.2.12}/src/together/cli/api/images.py +0 -0
  25. {together-1.2.10 → together-1.2.12}/src/together/cli/api/models.py +0 -0
  26. {together-1.2.10 → together-1.2.12}/src/together/cli/cli.py +0 -0
  27. {together-1.2.10 → together-1.2.12}/src/together/client.py +0 -0
  28. {together-1.2.10 → together-1.2.12}/src/together/constants.py +0 -0
  29. {together-1.2.10 → together-1.2.12}/src/together/error.py +0 -0
  30. {together-1.2.10 → together-1.2.12}/src/together/filemanager.py +0 -0
  31. {together-1.2.10 → together-1.2.12}/src/together/legacy/__init__.py +0 -0
  32. {together-1.2.10 → together-1.2.12}/src/together/legacy/base.py +0 -0
  33. {together-1.2.10 → together-1.2.12}/src/together/legacy/files.py +0 -0
  34. {together-1.2.10 → together-1.2.12}/src/together/legacy/finetune.py +0 -0
  35. {together-1.2.10 → together-1.2.12}/src/together/legacy/models.py +0 -0
  36. {together-1.2.10 → together-1.2.12}/src/together/resources/__init__.py +0 -0
  37. {together-1.2.10 → together-1.2.12}/src/together/resources/chat/__init__.py +0 -0
  38. {together-1.2.10 → together-1.2.12}/src/together/resources/files.py +0 -0
  39. {together-1.2.10 → together-1.2.12}/src/together/resources/finetune.py +0 -0
  40. {together-1.2.10 → together-1.2.12}/src/together/resources/models.py +0 -0
  41. {together-1.2.10 → together-1.2.12}/src/together/together_response.py +0 -0
  42. {together-1.2.10 → together-1.2.12}/src/together/types/__init__.py +0 -0
  43. {together-1.2.10 → together-1.2.12}/src/together/types/abstract.py +0 -0
  44. {together-1.2.10 → together-1.2.12}/src/together/types/common.py +0 -0
  45. {together-1.2.10 → together-1.2.12}/src/together/types/embeddings.py +0 -0
  46. {together-1.2.10 → together-1.2.12}/src/together/types/error.py +0 -0
  47. {together-1.2.10 → together-1.2.12}/src/together/types/files.py +0 -0
  48. {together-1.2.10 → together-1.2.12}/src/together/types/finetune.py +0 -0
  49. {together-1.2.10 → together-1.2.12}/src/together/types/images.py +0 -0
  50. {together-1.2.10 → together-1.2.12}/src/together/types/models.py +0 -0
  51. {together-1.2.10 → together-1.2.12}/src/together/types/rerank.py +0 -0
  52. {together-1.2.10 → together-1.2.12}/src/together/utils/__init__.py +0 -0
  53. {together-1.2.10 → together-1.2.12}/src/together/utils/_log.py +0 -0
  54. {together-1.2.10 → together-1.2.12}/src/together/utils/api_helpers.py +0 -0
  55. {together-1.2.10 → together-1.2.12}/src/together/utils/files.py +0 -0
  56. {together-1.2.10 → together-1.2.12}/src/together/utils/tools.py +0 -0
  57. {together-1.2.10 → together-1.2.12}/src/together/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: together
3
- Version: 1.2.10
3
+ Version: 1.2.12
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  Home-page: https://github.com/togethercomputer/together-python
6
6
  License: Apache-2.0
@@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"
12
12
 
13
13
  [tool.poetry]
14
14
  name = "together"
15
- version = "1.2.10"
15
+ version = "1.2.12"
16
16
  authors = [
17
17
  "Together AI <support@together.ai>"
18
18
  ]
@@ -55,7 +55,7 @@ types-tqdm = "^4.65.0.0"
55
55
  types-tabulate = "^0.9.0.3"
56
56
  pre-commit = "3.5.0"
57
57
  types-requests = "^2.31.0.20240218"
58
- pyarrow-stubs = "^10.0.1.7"
58
+ pyarrow-stubs = ">=10.0.1.7,<20240831.0.0.0"
59
59
  mypy = "^1.9.0"
60
60
 
61
61
  [tool.poetry.group.tests]
@@ -14,7 +14,7 @@ class Complete:
14
14
  def create(
15
15
  cls,
16
16
  prompt: str,
17
- **kwargs,
17
+ **kwargs: Any,
18
18
  ) -> Dict[str, Any]:
19
19
  """Legacy completion function."""
20
20
 
@@ -36,7 +36,7 @@ class Complete:
36
36
  def create_streaming(
37
37
  cls,
38
38
  prompt: str,
39
- **kwargs,
39
+ **kwargs: Any,
40
40
  ) -> Iterator[Dict[str, Any]]:
41
41
  """Legacy streaming completion function."""
42
42
 
@@ -59,7 +59,7 @@ class Completion:
59
59
  def create(
60
60
  cls,
61
61
  prompt: str,
62
- **kwargs,
62
+ **kwargs: Any,
63
63
  ) -> CompletionResponse | Iterator[CompletionChunk]:
64
64
  """Completion function."""
65
65
 
@@ -79,7 +79,7 @@ class AsyncComplete:
79
79
  async def create(
80
80
  cls,
81
81
  prompt: str,
82
- **kwargs,
82
+ **kwargs: Any,
83
83
  ) -> CompletionResponse | AsyncGenerator[CompletionChunk, None]:
84
84
  """Async completion function."""
85
85
 
@@ -11,7 +11,7 @@ class Embeddings:
11
11
  def create(
12
12
  cls,
13
13
  input: str,
14
- **kwargs,
14
+ **kwargs: Any,
15
15
  ) -> Dict[str, Any]:
16
16
  """Legacy embeddings function."""
17
17
 
@@ -11,7 +11,7 @@ class Image:
11
11
  def create(
12
12
  cls,
13
13
  prompt: str,
14
- **kwargs,
14
+ **kwargs: Any,
15
15
  ) -> Dict[str, Any]:
16
16
  """Legacy image function."""
17
17
 
@@ -32,6 +32,7 @@ class ChatCompletions:
32
32
  frequency_penalty: float | None = None,
33
33
  min_p: float | None = None,
34
34
  logit_bias: Dict[str, float] | None = None,
35
+ seed: int | None = None,
35
36
  stream: bool = False,
36
37
  logprobs: int | None = None,
37
38
  echo: bool | None = None,
@@ -40,6 +41,7 @@ class ChatCompletions:
40
41
  response_format: Dict[str, str | Dict[str, Any]] | None = None,
41
42
  tools: Dict[str, str | Dict[str, Any]] | None = None,
42
43
  tool_choice: str | Dict[str, str | Dict[str, str]] | None = None,
44
+ **kwargs: Any,
43
45
  ) -> ChatCompletionResponse | Iterator[ChatCompletionChunk]:
44
46
  """
45
47
  Method to generate completions based on a given prompt using a specified model.
@@ -78,6 +80,7 @@ class ChatCompletions:
78
80
  logit_bias (Dict[str, float], optional): A dictionary of tokens and their bias values that modify the
79
81
  likelihood of specific tokens being sampled. Bias values must be in the range [-100, 100].
80
82
  Defaults to None.
83
+ seed (int, optional): A seed value to use for reproducibility.
81
84
  stream (bool, optional): Flag indicating whether to stream the generated completions.
82
85
  Defaults to False.
83
86
  logprobs (int, optional): Number of top-k logprobs to return
@@ -123,6 +126,7 @@ class ChatCompletions:
123
126
  frequency_penalty=frequency_penalty,
124
127
  min_p=min_p,
125
128
  logit_bias=logit_bias,
129
+ seed=seed,
126
130
  stream=stream,
127
131
  logprobs=logprobs,
128
132
  echo=echo,
@@ -131,6 +135,7 @@ class ChatCompletions:
131
135
  response_format=response_format,
132
136
  tools=tools,
133
137
  tool_choice=tool_choice,
138
+ **kwargs,
134
139
  ).model_dump(exclude_none=True)
135
140
 
136
141
  response, _, _ = requestor.request(
@@ -169,6 +174,7 @@ class AsyncChatCompletions:
169
174
  frequency_penalty: float | None = None,
170
175
  min_p: float | None = None,
171
176
  logit_bias: Dict[str, float] | None = None,
177
+ seed: int | None = None,
172
178
  stream: bool = False,
173
179
  logprobs: int | None = None,
174
180
  echo: bool | None = None,
@@ -177,6 +183,7 @@ class AsyncChatCompletions:
177
183
  response_format: Dict[str, Any] | None = None,
178
184
  tools: Dict[str, str | Dict[str, str | Dict[str, Any]]] | None = None,
179
185
  tool_choice: str | Dict[str, str | Dict[str, str]] | None = None,
186
+ **kwargs: Any,
180
187
  ) -> AsyncGenerator[ChatCompletionChunk, None] | ChatCompletionResponse:
181
188
  """
182
189
  Async method to generate completions based on a given prompt using a specified model.
@@ -215,6 +222,7 @@ class AsyncChatCompletions:
215
222
  logit_bias (Dict[str, float], optional): A dictionary of tokens and their bias values that modify the
216
223
  likelihood of specific tokens being sampled. Bias values must be in the range [-100, 100].
217
224
  Defaults to None.
225
+ seed (int, optional): A seed value to use for reproducibility.
218
226
  stream (bool, optional): Flag indicating whether to stream the generated completions.
219
227
  Defaults to False.
220
228
  logprobs (int, optional): Number of top-k logprobs to return
@@ -260,6 +268,7 @@ class AsyncChatCompletions:
260
268
  frequency_penalty=frequency_penalty,
261
269
  min_p=min_p,
262
270
  logit_bias=logit_bias,
271
+ seed=seed,
263
272
  stream=stream,
264
273
  logprobs=logprobs,
265
274
  echo=echo,
@@ -268,6 +277,7 @@ class AsyncChatCompletions:
268
277
  response_format=response_format,
269
278
  tools=tools,
270
279
  tool_choice=tool_choice,
280
+ **kwargs,
271
281
  ).model_dump(exclude_none=True)
272
282
 
273
283
  response, _, _ = await requestor.arequest(
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import AsyncGenerator, Dict, Iterator, List
3
+ from typing import AsyncGenerator, Dict, Iterator, List, Any
4
4
 
5
5
  from together.abstract import api_requestor
6
6
  from together.together_response import TogetherResponse
@@ -32,11 +32,13 @@ class Completions:
32
32
  frequency_penalty: float | None = None,
33
33
  min_p: float | None = None,
34
34
  logit_bias: Dict[str, float] | None = None,
35
+ seed: int | None = None,
35
36
  stream: bool = False,
36
37
  logprobs: int | None = None,
37
38
  echo: bool | None = None,
38
39
  n: int | None = None,
39
40
  safety_model: str | None = None,
41
+ **kwargs: Any,
40
42
  ) -> CompletionResponse | Iterator[CompletionChunk]:
41
43
  """
42
44
  Method to generate completions based on a given prompt using a specified model.
@@ -74,6 +76,7 @@ class Completions:
74
76
  logit_bias (Dict[str, float], optional): A dictionary of tokens and their bias values that modify the
75
77
  likelihood of specific tokens being sampled. Bias values must be in the range [-100, 100].
76
78
  Defaults to None.
79
+ seed (int, optional): Seed value for reproducibility.
77
80
  stream (bool, optional): Flag indicating whether to stream the generated completions.
78
81
  Defaults to False.
79
82
  logprobs (int, optional): Number of top-k logprobs to return
@@ -106,6 +109,7 @@ class Completions:
106
109
  repetition_penalty=repetition_penalty,
107
110
  presence_penalty=presence_penalty,
108
111
  frequency_penalty=frequency_penalty,
112
+ seed=seed,
109
113
  min_p=min_p,
110
114
  logit_bias=logit_bias,
111
115
  stream=stream,
@@ -113,6 +117,7 @@ class Completions:
113
117
  echo=echo,
114
118
  n=n,
115
119
  safety_model=safety_model,
120
+ **kwargs,
116
121
  ).model_dump(exclude_none=True)
117
122
 
118
123
  response, _, _ = requestor.request(
@@ -151,11 +156,13 @@ class AsyncCompletions:
151
156
  frequency_penalty: float | None = None,
152
157
  min_p: float | None = None,
153
158
  logit_bias: Dict[str, float] | None = None,
159
+ seed: int | None = None,
154
160
  stream: bool = False,
155
161
  logprobs: int | None = None,
156
162
  echo: bool | None = None,
157
163
  n: int | None = None,
158
164
  safety_model: str | None = None,
165
+ **kwargs: Any,
159
166
  ) -> AsyncGenerator[CompletionChunk, None] | CompletionResponse:
160
167
  """
161
168
  Async method to generate completions based on a given prompt using a specified model.
@@ -193,6 +200,7 @@ class AsyncCompletions:
193
200
  logit_bias (Dict[str, float], optional): A dictionary of tokens and their bias values that modify the
194
201
  likelihood of specific tokens being sampled. Bias values must be in the range [-100, 100].
195
202
  Defaults to None.
203
+ seed (int, optional): Seed value for reproducibility.
196
204
  stream (bool, optional): Flag indicating whether to stream the generated completions.
197
205
  Defaults to False.
198
206
  logprobs (int, optional): Number of top-k logprobs to return
@@ -227,11 +235,13 @@ class AsyncCompletions:
227
235
  frequency_penalty=frequency_penalty,
228
236
  min_p=min_p,
229
237
  logit_bias=logit_bias,
238
+ seed=seed,
230
239
  stream=stream,
231
240
  logprobs=logprobs,
232
241
  echo=echo,
233
242
  n=n,
234
243
  safety_model=safety_model,
244
+ **kwargs,
235
245
  ).model_dump(exclude_none=True)
236
246
 
237
247
  response, _, _ = await requestor.arequest(
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import List
3
+ from typing import List, Any
4
4
 
5
5
  from together.abstract import api_requestor
6
6
  from together.together_response import TogetherResponse
@@ -21,6 +21,7 @@ class Embeddings:
21
21
  *,
22
22
  input: str | List[str],
23
23
  model: str,
24
+ **kwargs: Any,
24
25
  ) -> EmbeddingResponse:
25
26
  """
26
27
  Method to generate completions based on a given prompt using a specified model.
@@ -40,6 +41,7 @@ class Embeddings:
40
41
  parameter_payload = EmbeddingRequest(
41
42
  input=input,
42
43
  model=model,
44
+ **kwargs,
43
45
  ).model_dump(exclude_none=True)
44
46
 
45
47
  response, _, _ = requestor.request(
@@ -65,6 +67,7 @@ class AsyncEmbeddings:
65
67
  *,
66
68
  input: str | List[str],
67
69
  model: str,
70
+ **kwargs: Any,
68
71
  ) -> EmbeddingResponse:
69
72
  """
70
73
  Async method to generate completions based on a given prompt using a specified model.
@@ -84,6 +87,7 @@ class AsyncEmbeddings:
84
87
  parameter_payload = EmbeddingRequest(
85
88
  input=input,
86
89
  model=model,
90
+ **kwargs,
87
91
  ).model_dump(exclude_none=True)
88
92
 
89
93
  response, _, _ = await requestor.arequest(
@@ -1,5 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from typing import Any
4
+
3
5
  from together.abstract import api_requestor
4
6
  from together.together_response import TogetherResponse
5
7
  from together.types import (
@@ -25,6 +27,7 @@ class Images:
25
27
  height: int | None = 1024,
26
28
  width: int | None = 1024,
27
29
  negative_prompt: str | None = None,
30
+ **kwargs: Any,
28
31
  ) -> ImageResponse:
29
32
  """
30
33
  Method to generate images based on a given prompt using a specified model.
@@ -67,6 +70,7 @@ class Images:
67
70
  height=height,
68
71
  width=width,
69
72
  negative_prompt=negative_prompt,
73
+ **kwargs,
70
74
  ).model_dump(exclude_none=True)
71
75
 
72
76
  response, _, _ = requestor.request(
@@ -98,6 +102,7 @@ class AsyncImages:
98
102
  height: int | None = 1024,
99
103
  width: int | None = 1024,
100
104
  negative_prompt: str | None = None,
105
+ **kwargs: Any,
101
106
  ) -> ImageResponse:
102
107
  """
103
108
  Async method to generate images based on a given prompt using a specified model.
@@ -140,6 +145,7 @@ class AsyncImages:
140
145
  height=height,
141
146
  width=width,
142
147
  negative_prompt=negative_prompt,
148
+ **kwargs,
143
149
  ).model_dump(exclude_none=True)
144
150
 
145
151
  response, _, _ = await requestor.arequest(
@@ -25,6 +25,7 @@ class Rerank:
25
25
  top_n: int | None = None,
26
26
  return_documents: bool = False,
27
27
  rank_fields: List[str] | None = None,
28
+ **kwargs: Any,
28
29
  ) -> RerankResponse:
29
30
  """
30
31
  Method to generate completions based on a given prompt using a specified model.
@@ -52,6 +53,7 @@ class Rerank:
52
53
  top_n=top_n,
53
54
  return_documents=return_documents,
54
55
  rank_fields=rank_fields,
56
+ **kwargs,
55
57
  ).model_dump(exclude_none=True)
56
58
 
57
59
  response, _, _ = requestor.request(
@@ -81,6 +83,7 @@ class AsyncRerank:
81
83
  top_n: int | None = None,
82
84
  return_documents: bool = False,
83
85
  rank_fields: List[str] | None = None,
86
+ **kwargs: Any,
84
87
  ) -> RerankResponse:
85
88
  """
86
89
  Async method to generate completions based on a given prompt using a specified model.
@@ -108,6 +111,7 @@ class AsyncRerank:
108
111
  top_n=top_n,
109
112
  return_documents=return_documents,
110
113
  rank_fields=rank_fields,
114
+ **kwargs,
111
115
  ).model_dump(exclude_none=True)
112
116
 
113
117
  response, _, _ = await requestor.arequest(
@@ -96,6 +96,7 @@ class ChatCompletionRequest(BaseModel):
96
96
  frequency_penalty: float | None = None
97
97
  min_p: float | None = None
98
98
  logit_bias: Dict[str, float] | None = None
99
+ seed: int | None = None
99
100
  # stream SSE token chunks
100
101
  stream: bool = False
101
102
  # return logprobs
@@ -126,6 +127,7 @@ class ChatCompletionRequest(BaseModel):
126
127
  class ChatCompletionChoicesData(BaseModel):
127
128
  index: int | None = None
128
129
  logprobs: LogprobsPart | None = None
130
+ seed: int | None = None
129
131
  finish_reason: FinishReason | None = None
130
132
  message: ChatCompletionMessage | None = None
131
133
 
@@ -150,6 +152,7 @@ class ChatCompletionResponse(BaseModel):
150
152
  class ChatCompletionChoicesChunk(BaseModel):
151
153
  index: int | None = None
152
154
  logprobs: float | None = None
155
+ seed: int | None = None
153
156
  finish_reason: FinishReason | None = None
154
157
  delta: DeltaContent | None = None
155
158
 
@@ -35,6 +35,7 @@ class CompletionRequest(BaseModel):
35
35
  frequency_penalty: float | None = None
36
36
  min_p: float | None = None
37
37
  logit_bias: Dict[str, float] | None = None
38
+ seed: int | None = None
38
39
  # stream SSE token chunks
39
40
  stream: bool = False
40
41
  # return logprobs
@@ -61,13 +62,15 @@ class CompletionRequest(BaseModel):
61
62
  class CompletionChoicesData(BaseModel):
62
63
  index: int
63
64
  logprobs: LogprobsPart | None = None
64
- finish_reason: FinishReason | None = None
65
+ seed: int | None = None
66
+ finish_reason: FinishReason
65
67
  text: str
66
68
 
67
69
 
68
70
  class CompletionChoicesChunk(BaseModel):
69
71
  index: int | None = None
70
72
  logprobs: float | None = None
73
+ seed: int | None = None
71
74
  finish_reason: FinishReason | None = None
72
75
  delta: DeltaContent | None = None
73
76
 
File without changes
File without changes