together 1.5.35__py3-none-any.whl → 2.0.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. together/__init__.py +101 -114
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +66 -84
  30. together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
  33. together/{cli → lib/cli}/api/models.py +34 -27
  34. together/lib/cli/api/utils.py +50 -0
  35. together/{cli → lib/cli}/cli.py +16 -26
  36. together/{constants.py → lib/constants.py} +11 -24
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +397 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/{utils → lib/utils}/files.py +90 -288
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -39
  50. together/resources/audio/__init__.py +72 -48
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -128
  53. together/resources/audio/transcriptions.py +247 -261
  54. together/resources/audio/translations.py +221 -241
  55. together/resources/audio/voices.py +111 -41
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +589 -490
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +397 -129
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +258 -104
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +223 -193
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +286 -214
  75. together/types/__init__.py +66 -167
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_create_response.py +10 -0
  157. together/types/video_job.py +57 -0
  158. together-2.0.0a6.dist-info/METADATA +729 -0
  159. together-2.0.0a6.dist-info/RECORD +165 -0
  160. {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/WHEEL +1 -1
  161. together-2.0.0a6.dist-info/entry_points.txt +2 -0
  162. {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/licenses/LICENSE +1 -1
  163. together/abstract/api_requestor.py +0 -770
  164. together/cli/api/chat.py +0 -298
  165. together/cli/api/completions.py +0 -119
  166. together/cli/api/images.py +0 -93
  167. together/cli/api/utils.py +0 -139
  168. together/client.py +0 -186
  169. together/error.py +0 -194
  170. together/filemanager.py +0 -635
  171. together/legacy/__init__.py +0 -0
  172. together/legacy/base.py +0 -27
  173. together/legacy/complete.py +0 -93
  174. together/legacy/embeddings.py +0 -27
  175. together/legacy/files.py +0 -146
  176. together/legacy/finetune.py +0 -177
  177. together/legacy/images.py +0 -27
  178. together/legacy/models.py +0 -44
  179. together/resources/batch.py +0 -165
  180. together/resources/code_interpreter.py +0 -82
  181. together/resources/evaluation.py +0 -808
  182. together/resources/finetune.py +0 -1388
  183. together/together_response.py +0 -50
  184. together/types/abstract.py +0 -26
  185. together/types/audio_speech.py +0 -311
  186. together/types/batch.py +0 -54
  187. together/types/chat_completions.py +0 -210
  188. together/types/code_interpreter.py +0 -57
  189. together/types/common.py +0 -67
  190. together/types/completions.py +0 -107
  191. together/types/embeddings.py +0 -35
  192. together/types/endpoints.py +0 -123
  193. together/types/error.py +0 -16
  194. together/types/evaluation.py +0 -93
  195. together/types/files.py +0 -93
  196. together/types/finetune.py +0 -465
  197. together/types/images.py +0 -42
  198. together/types/models.py +0 -96
  199. together/types/rerank.py +0 -43
  200. together/types/videos.py +0 -69
  201. together/utils/api_helpers.py +0 -124
  202. together/version.py +0 -6
  203. together-1.5.35.dist-info/METADATA +0 -583
  204. together-1.5.35.dist-info/RECORD +0 -77
  205. together-1.5.35.dist-info/entry_points.txt +0 -3
  206. /together/{abstract → lib/cli}/__init__.py +0 -0
  207. /together/{cli → lib/cli/api}/__init__.py +0 -0
  208. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -1,252 +1,282 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
1
3
  from __future__ import annotations
2
4
 
3
- from typing import List
5
+ from typing_extensions import Literal
6
+
7
+ import httpx
4
8
 
5
- from together.abstract import api_requestor
6
- from together.together_response import TogetherResponse
7
- from together.types import (
8
- ModelObject,
9
- ModelUploadRequest,
10
- ModelUploadResponse,
11
- TogetherClient,
12
- TogetherRequest,
9
+ from ..types import model_upload_params
10
+ from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
11
+ from .._utils import maybe_transform, async_maybe_transform
12
+ from .._compat import cached_property
13
+ from .._resource import SyncAPIResource, AsyncAPIResource
14
+ from .._response import (
15
+ to_raw_response_wrapper,
16
+ to_streamed_response_wrapper,
17
+ async_to_raw_response_wrapper,
18
+ async_to_streamed_response_wrapper,
13
19
  )
20
+ from .._base_client import make_request_options
21
+ from ..types.model_list_response import ModelListResponse
22
+ from ..types.model_upload_response import ModelUploadResponse
14
23
 
24
+ __all__ = ["ModelsResource", "AsyncModelsResource"]
15
25
 
16
- class ModelsBase:
17
- def __init__(self, client: TogetherClient) -> None:
18
- self._client = client
19
26
 
20
- def _filter_dedicated_models(
21
- self, models: List[ModelObject], dedicated_response: TogetherResponse
22
- ) -> List[ModelObject]:
27
+ class ModelsResource(SyncAPIResource):
28
+ @cached_property
29
+ def with_raw_response(self) -> ModelsResourceWithRawResponse:
23
30
  """
24
- Filter models based on dedicated model response.
31
+ This property can be used as a prefix for any HTTP method call to return
32
+ the raw response object instead of the parsed content.
25
33
 
26
- Args:
27
- models (List[ModelObject]): List of all models
28
- dedicated_response (TogetherResponse): Response from autoscale models endpoint
29
-
30
- Returns:
31
- List[ModelObject]: Filtered list of models
34
+ For more information, see https://www.github.com/togethercomputer/together-py#accessing-raw-response-data-eg-headers
32
35
  """
33
- assert isinstance(dedicated_response.data, list)
34
-
35
- # Create a set of dedicated model names for efficient lookup
36
- dedicated_model_names = {model["name"] for model in dedicated_response.data}
36
+ return ModelsResourceWithRawResponse(self)
37
37
 
38
- # Filter models to only include those in dedicated_model_names
39
- # Note: The model.id from ModelObject matches the name field in the autoscale response
40
- return [model for model in models if model.id in dedicated_model_names]
41
-
42
-
43
- class Models(ModelsBase):
44
- def list(
45
- self,
46
- dedicated: bool = False,
47
- ) -> List[ModelObject]:
38
+ @cached_property
39
+ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
48
40
  """
49
- Method to return list of models on the API
41
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
50
42
 
51
- Args:
52
- dedicated (bool, optional): If True, returns only dedicated models. Defaults to False.
53
-
54
- Returns:
55
- List[ModelObject]: List of model objects
43
+ For more information, see https://www.github.com/togethercomputer/together-py#with_streaming_response
56
44
  """
57
- requestor = api_requestor.APIRequestor(
58
- client=self._client,
59
- )
45
+ return ModelsResourceWithStreamingResponse(self)
60
46
 
61
- response, _, _ = requestor.request(
62
- options=TogetherRequest(
63
- method="GET",
64
- url="models",
47
+ def list(
48
+ self,
49
+ *,
50
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
51
+ # The extra values given here take precedence over values defined on the client or passed to this method.
52
+ extra_headers: Headers | None = None,
53
+ extra_query: Query | None = None,
54
+ extra_body: Body | None = None,
55
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
56
+ ) -> ModelListResponse:
57
+ """Lists all of Together's open-source models"""
58
+ return self._get(
59
+ "/models",
60
+ options=make_request_options(
61
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
65
62
  ),
66
- stream=False,
63
+ cast_to=ModelListResponse,
67
64
  )
68
65
 
69
- assert isinstance(response, TogetherResponse)
70
- assert isinstance(response.data, list)
71
-
72
- models = [ModelObject(**model) for model in response.data]
73
-
74
- if dedicated:
75
- # Get dedicated models
76
- dedicated_response, _, _ = requestor.request(
77
- options=TogetherRequest(
78
- method="GET",
79
- url="autoscale/models",
80
- ),
81
- stream=False,
82
- )
83
-
84
- models = self._filter_dedicated_models(models, dedicated_response)
85
-
86
- models.sort(key=lambda x: x.id.lower())
87
-
88
- return models
89
-
90
66
  def upload(
91
67
  self,
92
68
  *,
93
69
  model_name: str,
94
70
  model_source: str,
95
- model_type: str = "model",
96
- hf_token: str | None = None,
97
- description: str | None = None,
98
- base_model: str | None = None,
99
- lora_model: str | None = None,
71
+ base_model: str | Omit = omit,
72
+ description: str | Omit = omit,
73
+ hf_token: str | Omit = omit,
74
+ lora_model: str | Omit = omit,
75
+ model_type: Literal["model", "adapter"] | Omit = omit,
76
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
77
+ # The extra values given here take precedence over values defined on the client or passed to this method.
78
+ extra_headers: Headers | None = None,
79
+ extra_query: Query | None = None,
80
+ extra_body: Body | None = None,
81
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
100
82
  ) -> ModelUploadResponse:
101
83
  """
102
- Upload a custom model or adapter from Hugging Face or S3.
84
+ Upload a custom model or adapter from Hugging Face or S3
103
85
 
104
86
  Args:
105
- model_name (str): The name to give to your uploaded model
106
- model_source (str): The source location of the model (Hugging Face repo or S3 path)
107
- model_type (str, optional): Whether the model is a full model or an adapter. Defaults to "model".
108
- hf_token (str, optional): Hugging Face token (if uploading from Hugging Face)
109
- description (str, optional): A description of your model
110
- base_model (str, optional): The base model to use for an adapter if setting it to run against a serverless pool. Only used for model_type "adapter".
111
- lora_model (str, optional): The lora pool to use for an adapter if setting it to run against, say, a dedicated pool. Only used for model_type "adapter".
112
-
113
- Returns:
114
- ModelUploadResponse: Object containing upload job information
115
- """
116
- requestor = api_requestor.APIRequestor(
117
- client=self._client,
118
- )
87
+ model_name: The name to give to your uploaded model
119
88
 
120
- data = {
121
- "model_name": model_name,
122
- "model_source": model_source,
123
- "model_type": model_type,
124
- }
125
-
126
- if hf_token is not None:
127
- data["hf_token"] = hf_token
128
- if description is not None:
129
- data["description"] = description
130
- if base_model is not None:
131
- data["base_model"] = base_model
132
- if lora_model is not None:
133
- data["lora_model"] = lora_model
134
-
135
- response, _, _ = requestor.request(
136
- options=TogetherRequest(
137
- method="POST",
138
- url="models",
139
- params=data,
140
- ),
141
- stream=False,
142
- )
89
+ model_source: The source location of the model (Hugging Face repo or S3 path)
143
90
 
144
- assert isinstance(response, TogetherResponse)
91
+ base_model: The base model to use for an adapter if setting it to run against a serverless
92
+ pool. Only used for model_type `adapter`.
145
93
 
146
- return ModelUploadResponse.from_api_response(response.data)
94
+ description: A description of your model
147
95
 
96
+ hf_token: Hugging Face token (if uploading from Hugging Face)
148
97
 
149
- class AsyncModels(ModelsBase):
150
- async def list(
151
- self,
152
- dedicated: bool = False,
153
- ) -> List[ModelObject]:
154
- """
155
- Async method to return list of models on API
98
+ lora_model: The lora pool to use for an adapter if setting it to run against, say, a
99
+ dedicated pool. Only used for model_type `adapter`.
156
100
 
157
- Args:
158
- dedicated (bool, optional): If True, returns only dedicated models. Defaults to False.
101
+ model_type: Whether the model is a full model or an adapter
159
102
 
160
- Returns:
161
- List[ModelObject]: List of model objects
162
- """
163
- requestor = api_requestor.APIRequestor(
164
- client=self._client,
165
- )
103
+ extra_headers: Send extra headers
104
+
105
+ extra_query: Add additional query parameters to the request
106
+
107
+ extra_body: Add additional JSON properties to the request
166
108
 
167
- response, _, _ = await requestor.arequest(
168
- options=TogetherRequest(
169
- method="GET",
170
- url="models",
109
+ timeout: Override the client-level default timeout for this request, in seconds
110
+ """
111
+ return self._post(
112
+ "/models",
113
+ body=maybe_transform(
114
+ {
115
+ "model_name": model_name,
116
+ "model_source": model_source,
117
+ "base_model": base_model,
118
+ "description": description,
119
+ "hf_token": hf_token,
120
+ "lora_model": lora_model,
121
+ "model_type": model_type,
122
+ },
123
+ model_upload_params.ModelUploadParams,
124
+ ),
125
+ options=make_request_options(
126
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
171
127
  ),
172
- stream=False,
128
+ cast_to=ModelUploadResponse,
173
129
  )
174
130
 
175
- assert isinstance(response, TogetherResponse)
176
- assert isinstance(response.data, list)
177
131
 
178
- models = [ModelObject(**model) for model in response.data]
132
+ class AsyncModelsResource(AsyncAPIResource):
133
+ @cached_property
134
+ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
135
+ """
136
+ This property can be used as a prefix for any HTTP method call to return
137
+ the raw response object instead of the parsed content.
179
138
 
180
- if dedicated:
181
- # Get dedicated models
182
- dedicated_response, _, _ = await requestor.arequest(
183
- options=TogetherRequest(
184
- method="GET",
185
- url="autoscale/models",
186
- ),
187
- stream=False,
188
- )
139
+ For more information, see https://www.github.com/togethercomputer/together-py#accessing-raw-response-data-eg-headers
140
+ """
141
+ return AsyncModelsResourceWithRawResponse(self)
189
142
 
190
- models = self._filter_dedicated_models(models, dedicated_response)
143
+ @cached_property
144
+ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
145
+ """
146
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
191
147
 
192
- models.sort(key=lambda x: x.id.lower())
148
+ For more information, see https://www.github.com/togethercomputer/together-py#with_streaming_response
149
+ """
150
+ return AsyncModelsResourceWithStreamingResponse(self)
193
151
 
194
- return models
152
+ async def list(
153
+ self,
154
+ *,
155
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
156
+ # The extra values given here take precedence over values defined on the client or passed to this method.
157
+ extra_headers: Headers | None = None,
158
+ extra_query: Query | None = None,
159
+ extra_body: Body | None = None,
160
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
161
+ ) -> ModelListResponse:
162
+ """Lists all of Together's open-source models"""
163
+ return await self._get(
164
+ "/models",
165
+ options=make_request_options(
166
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
167
+ ),
168
+ cast_to=ModelListResponse,
169
+ )
195
170
 
196
171
  async def upload(
197
172
  self,
198
173
  *,
199
174
  model_name: str,
200
175
  model_source: str,
201
- model_type: str = "model",
202
- hf_token: str | None = None,
203
- description: str | None = None,
204
- base_model: str | None = None,
205
- lora_model: str | None = None,
176
+ base_model: str | Omit = omit,
177
+ description: str | Omit = omit,
178
+ hf_token: str | Omit = omit,
179
+ lora_model: str | Omit = omit,
180
+ model_type: Literal["model", "adapter"] | Omit = omit,
181
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
182
+ # The extra values given here take precedence over values defined on the client or passed to this method.
183
+ extra_headers: Headers | None = None,
184
+ extra_query: Query | None = None,
185
+ extra_body: Body | None = None,
186
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
206
187
  ) -> ModelUploadResponse:
207
188
  """
208
- Upload a custom model or adapter from Hugging Face or S3.
189
+ Upload a custom model or adapter from Hugging Face or S3
209
190
 
210
191
  Args:
211
- model_name (str): The name to give to your uploaded model
212
- model_source (str): The source location of the model (Hugging Face repo or S3 path)
213
- model_type (str, optional): Whether the model is a full model or an adapter. Defaults to "model".
214
- hf_token (str, optional): Hugging Face token (if uploading from Hugging Face)
215
- description (str, optional): A description of your model
216
- base_model (str, optional): The base model to use for an adapter if setting it to run against a serverless pool. Only used for model_type "adapter".
217
- lora_model (str, optional): The lora pool to use for an adapter if setting it to run against, say, a dedicated pool. Only used for model_type "adapter".
218
-
219
- Returns:
220
- ModelUploadResponse: Object containing upload job information
192
+ model_name: The name to give to your uploaded model
193
+
194
+ model_source: The source location of the model (Hugging Face repo or S3 path)
195
+
196
+ base_model: The base model to use for an adapter if setting it to run against a serverless
197
+ pool. Only used for model_type `adapter`.
198
+
199
+ description: A description of your model
200
+
201
+ hf_token: Hugging Face token (if uploading from Hugging Face)
202
+
203
+ lora_model: The lora pool to use for an adapter if setting it to run against, say, a
204
+ dedicated pool. Only used for model_type `adapter`.
205
+
206
+ model_type: Whether the model is a full model or an adapter
207
+
208
+ extra_headers: Send extra headers
209
+
210
+ extra_query: Add additional query parameters to the request
211
+
212
+ extra_body: Add additional JSON properties to the request
213
+
214
+ timeout: Override the client-level default timeout for this request, in seconds
221
215
  """
222
- requestor = api_requestor.APIRequestor(
223
- client=self._client,
216
+ return await self._post(
217
+ "/models",
218
+ body=await async_maybe_transform(
219
+ {
220
+ "model_name": model_name,
221
+ "model_source": model_source,
222
+ "base_model": base_model,
223
+ "description": description,
224
+ "hf_token": hf_token,
225
+ "lora_model": lora_model,
226
+ "model_type": model_type,
227
+ },
228
+ model_upload_params.ModelUploadParams,
229
+ ),
230
+ options=make_request_options(
231
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
232
+ ),
233
+ cast_to=ModelUploadResponse,
224
234
  )
225
235
 
226
- data = {
227
- "model_name": model_name,
228
- "model_source": model_source,
229
- "model_type": model_type,
230
- }
231
-
232
- if hf_token is not None:
233
- data["hf_token"] = hf_token
234
- if description is not None:
235
- data["description"] = description
236
- if base_model is not None:
237
- data["base_model"] = base_model
238
- if lora_model is not None:
239
- data["lora_model"] = lora_model
240
-
241
- response, _, _ = await requestor.arequest(
242
- options=TogetherRequest(
243
- method="POST",
244
- url="models",
245
- params=data,
246
- ),
247
- stream=False,
236
+
237
+ class ModelsResourceWithRawResponse:
238
+ def __init__(self, models: ModelsResource) -> None:
239
+ self._models = models
240
+
241
+ self.list = to_raw_response_wrapper(
242
+ models.list,
243
+ )
244
+ self.upload = to_raw_response_wrapper(
245
+ models.upload,
246
+ )
247
+
248
+
249
+ class AsyncModelsResourceWithRawResponse:
250
+ def __init__(self, models: AsyncModelsResource) -> None:
251
+ self._models = models
252
+
253
+ self.list = async_to_raw_response_wrapper(
254
+ models.list,
248
255
  )
256
+ self.upload = async_to_raw_response_wrapper(
257
+ models.upload,
258
+ )
259
+
260
+
261
+ class ModelsResourceWithStreamingResponse:
262
+ def __init__(self, models: ModelsResource) -> None:
263
+ self._models = models
249
264
 
250
- assert isinstance(response, TogetherResponse)
265
+ self.list = to_streamed_response_wrapper(
266
+ models.list,
267
+ )
268
+ self.upload = to_streamed_response_wrapper(
269
+ models.upload,
270
+ )
251
271
 
252
- return ModelUploadResponse.from_api_response(response.data)
272
+
273
+ class AsyncModelsResourceWithStreamingResponse:
274
+ def __init__(self, models: AsyncModelsResource) -> None:
275
+ self._models = models
276
+
277
+ self.list = async_to_streamed_response_wrapper(
278
+ models.list,
279
+ )
280
+ self.upload = async_to_streamed_response_wrapper(
281
+ models.upload,
282
+ )