together 1.5.35__py3-none-any.whl → 2.0.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. together/__init__.py +101 -114
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +66 -84
  30. together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
  33. together/{cli → lib/cli}/api/models.py +34 -27
  34. together/lib/cli/api/utils.py +50 -0
  35. together/{cli → lib/cli}/cli.py +16 -26
  36. together/{constants.py → lib/constants.py} +11 -24
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +397 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/{utils → lib/utils}/files.py +90 -288
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -39
  50. together/resources/audio/__init__.py +72 -48
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -128
  53. together/resources/audio/transcriptions.py +247 -261
  54. together/resources/audio/translations.py +221 -241
  55. together/resources/audio/voices.py +111 -41
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +589 -490
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +397 -129
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +258 -104
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +223 -193
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +286 -214
  75. together/types/__init__.py +66 -167
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_create_response.py +10 -0
  157. together/types/video_job.py +57 -0
  158. together-2.0.0a6.dist-info/METADATA +729 -0
  159. together-2.0.0a6.dist-info/RECORD +165 -0
  160. {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/WHEEL +1 -1
  161. together-2.0.0a6.dist-info/entry_points.txt +2 -0
  162. {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/licenses/LICENSE +1 -1
  163. together/abstract/api_requestor.py +0 -770
  164. together/cli/api/chat.py +0 -298
  165. together/cli/api/completions.py +0 -119
  166. together/cli/api/images.py +0 -93
  167. together/cli/api/utils.py +0 -139
  168. together/client.py +0 -186
  169. together/error.py +0 -194
  170. together/filemanager.py +0 -635
  171. together/legacy/__init__.py +0 -0
  172. together/legacy/base.py +0 -27
  173. together/legacy/complete.py +0 -93
  174. together/legacy/embeddings.py +0 -27
  175. together/legacy/files.py +0 -146
  176. together/legacy/finetune.py +0 -177
  177. together/legacy/images.py +0 -27
  178. together/legacy/models.py +0 -44
  179. together/resources/batch.py +0 -165
  180. together/resources/code_interpreter.py +0 -82
  181. together/resources/evaluation.py +0 -808
  182. together/resources/finetune.py +0 -1388
  183. together/together_response.py +0 -50
  184. together/types/abstract.py +0 -26
  185. together/types/audio_speech.py +0 -311
  186. together/types/batch.py +0 -54
  187. together/types/chat_completions.py +0 -210
  188. together/types/code_interpreter.py +0 -57
  189. together/types/common.py +0 -67
  190. together/types/completions.py +0 -107
  191. together/types/embeddings.py +0 -35
  192. together/types/endpoints.py +0 -123
  193. together/types/error.py +0 -16
  194. together/types/evaluation.py +0 -93
  195. together/types/files.py +0 -93
  196. together/types/finetune.py +0 -465
  197. together/types/images.py +0 -42
  198. together/types/models.py +0 -96
  199. together/types/rerank.py +0 -43
  200. together/types/videos.py +0 -69
  201. together/utils/api_helpers.py +0 -124
  202. together/version.py +0 -6
  203. together-1.5.35.dist-info/METADATA +0 -583
  204. together-1.5.35.dist-info/RECORD +0 -77
  205. together-1.5.35.dist-info/entry_points.txt +0 -3
  206. /together/{abstract → lib/cli}/__init__.py +0 -0
  207. /together/{cli → lib/cli/api}/__init__.py +0 -0
  208. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -0,0 +1,1033 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal
6
+
7
+ import httpx
8
+ from rich import print as rprint
9
+
10
+ from ..types import fine_tuning_delete_params, fine_tuning_content_params
11
+ from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
12
+ from .._utils import maybe_transform, async_maybe_transform
13
+ from .._compat import cached_property
14
+ from .._resource import SyncAPIResource, AsyncAPIResource
15
+ from .._response import (
16
+ BinaryAPIResponse,
17
+ AsyncBinaryAPIResponse,
18
+ StreamedBinaryAPIResponse,
19
+ AsyncStreamedBinaryAPIResponse,
20
+ to_raw_response_wrapper,
21
+ to_streamed_response_wrapper,
22
+ async_to_raw_response_wrapper,
23
+ to_custom_raw_response_wrapper,
24
+ async_to_streamed_response_wrapper,
25
+ to_custom_streamed_response_wrapper,
26
+ async_to_custom_raw_response_wrapper,
27
+ async_to_custom_streamed_response_wrapper,
28
+ )
29
+ from .._base_client import make_request_options
30
+ from ..lib.types.fine_tuning import FinetuneResponse as FinetuneResponseLib, FinetuneTrainingLimits
31
+ from ..types.finetune_response import FinetuneResponse
32
+ from ..lib.resources.fine_tuning import get_model_limits, async_get_model_limits, create_finetune_request
33
+ from ..types.fine_tuning_list_response import FineTuningListResponse
34
+ from ..types.fine_tuning_cancel_response import FineTuningCancelResponse
35
+ from ..types.fine_tuning_delete_response import FineTuningDeleteResponse
36
+ from ..types.fine_tuning_list_events_response import FineTuningListEventsResponse
37
+ from ..types.fine_tuning_list_checkpoints_response import FineTuningListCheckpointsResponse
38
+
39
+ __all__ = ["FineTuningResource", "AsyncFineTuningResource"]
40
+
41
+
42
+ class FineTuningResource(SyncAPIResource):
43
+ @cached_property
44
+ def with_raw_response(self) -> FineTuningResourceWithRawResponse:
45
+ """
46
+ This property can be used as a prefix for any HTTP method call to return
47
+ the raw response object instead of the parsed content.
48
+
49
+ For more information, see https://www.github.com/togethercomputer/together-py#accessing-raw-response-data-eg-headers
50
+ """
51
+ return FineTuningResourceWithRawResponse(self)
52
+
53
+ @cached_property
54
+ def with_streaming_response(self) -> FineTuningResourceWithStreamingResponse:
55
+ """
56
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
57
+
58
+ For more information, see https://www.github.com/togethercomputer/together-py#with_streaming_response
59
+ """
60
+ return FineTuningResourceWithStreamingResponse(self)
61
+
62
+ def create(
63
+ self,
64
+ *,
65
+ training_file: str,
66
+ model: str | None = None,
67
+ n_epochs: int = 1,
68
+ validation_file: str | None = "",
69
+ n_evals: int | None = 0,
70
+ n_checkpoints: int | None = 1,
71
+ batch_size: int | Literal["max"] = "max",
72
+ learning_rate: float | None = 0.00001,
73
+ lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
74
+ min_lr_ratio: float = 0.0,
75
+ scheduler_num_cycles: float = 0.5,
76
+ warmup_ratio: float = 0.0,
77
+ max_grad_norm: float = 1.0,
78
+ weight_decay: float = 0.0,
79
+ lora: bool = True,
80
+ lora_r: int | None = None,
81
+ lora_dropout: float | None = 0,
82
+ lora_alpha: float | None = None,
83
+ lora_trainable_modules: str | None = "all-linear",
84
+ suffix: str | None = None,
85
+ wandb_api_key: str | None = None,
86
+ wandb_base_url: str | None = None,
87
+ wandb_project_name: str | None = None,
88
+ wandb_name: str | None = None,
89
+ verbose: bool = False,
90
+ model_limits: FinetuneTrainingLimits | None = None,
91
+ train_on_inputs: bool | Literal["auto"] | None = None,
92
+ training_method: str = "sft",
93
+ dpo_beta: float | None = None,
94
+ dpo_normalize_logratios_by_length: bool = False,
95
+ rpo_alpha: float | None = None,
96
+ simpo_gamma: float | None = None,
97
+ from_checkpoint: str | None = None,
98
+ from_hf_model: str | None = None,
99
+ hf_model_revision: str | None = None,
100
+ hf_api_token: str | None = None,
101
+ hf_output_repo_name: str | None = None,
102
+ ) -> FinetuneResponseLib:
103
+ """
104
+ Method to initiate a fine-tuning job
105
+
106
+ Args:
107
+ training_file (str): File-ID of a file uploaded to the Together API
108
+ model (str, optional): Name of the base model to run fine-tune job on
109
+ n_epochs (int, optional): Number of epochs for fine-tuning. Defaults to 1.
110
+ validation file (str, optional): File ID of a file uploaded to the Together API for validation.
111
+ n_evals (int, optional): Number of evaluation loops to run. Defaults to 0.
112
+ n_checkpoints (int, optional): Number of checkpoints to save during fine-tuning.
113
+ Defaults to 1.
114
+ batch_size (int or "max"): Batch size for fine-tuning. Defaults to max.
115
+ learning_rate (float, optional): Learning rate multiplier to use for training
116
+ Defaults to 0.00001.
117
+ lr_scheduler_type (Literal["linear", "cosine"]): Learning rate scheduler type. Defaults to "cosine".
118
+ min_lr_ratio (float, optional): Min learning rate ratio of the initial learning rate for
119
+ the learning rate scheduler. Defaults to 0.0.
120
+ scheduler_num_cycles (float, optional): Number or fraction of cycles for the cosine learning rate scheduler. Defaults to 0.5.
121
+ warmup_ratio (float, optional): Warmup ratio for the learning rate scheduler.
122
+ max_grad_norm (float, optional): Max gradient norm. Defaults to 1.0, set to 0 to disable.
123
+ weight_decay (float, optional): Weight decay. Defaults to 0.0.
124
+ lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
125
+ lora_r (int, optional): Rank of LoRA adapters. Defaults to 8.
126
+ lora_dropout (float, optional): Dropout rate for LoRA adapters. Defaults to 0.
127
+ lora_alpha (float, optional): Alpha for LoRA adapters. Defaults to 8.
128
+ lora_trainable_modules (str, optional): Trainable modules for LoRA adapters. Defaults to "all-linear".
129
+ suffix (str, optional): Up to 40 character suffix that will be added to your fine-tuned model name.
130
+ Defaults to None.
131
+ wandb_api_key (str, optional): API key for Weights & Biases integration.
132
+ Defaults to None.
133
+ wandb_base_url (str, optional): Base URL for Weights & Biases integration.
134
+ Defaults to None.
135
+ wandb_project_name (str, optional): Project name for Weights & Biases integration.
136
+ Defaults to None.
137
+ wandb_name (str, optional): Run name for Weights & Biases integration.
138
+ Defaults to None.
139
+ verbose (bool, optional): whether to print the job parameters before submitting a request.
140
+ Defaults to False.
141
+ model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
142
+ Defaults to None.
143
+ train_on_inputs (bool or "auto", optional): Whether to mask the user messages in conversational data or prompts in instruction data.
144
+ "auto" will automatically determine whether to mask the inputs based on the data format.
145
+ For datasets with the "text" field (general format), inputs will not be masked.
146
+ For datasets with the "messages" field (conversational format) or "prompt" and "completion" fields
147
+ (Instruction format), inputs will be masked.
148
+ Defaults to None, or "auto" if training_method is "sft" (set in create_finetune_request).
149
+ training_method (str, optional): Training method. Defaults to "sft".
150
+ Supported methods: "sft", "dpo".
151
+ dpo_beta (float, optional): DPO beta parameter. Defaults to None.
152
+ dpo_normalize_logratios_by_length (bool): Whether or not normalize logratios by sample length. Defaults to False,
153
+ rpo_alpha (float, optional): RPO alpha parameter of DPO training to include NLL in the loss. Defaults to None.
154
+ simpo_gamma: (float, optional): SimPO gamma parameter. Defaults to None.
155
+ from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
156
+ The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
157
+ The step value is optional, without it the final checkpoint will be used.
158
+ from_hf_model (str, optional): The Hugging Face Hub repo to start training from.
159
+ Should be as close as possible to the base model (specified by the `model` argument) in terms of architecture and size.
160
+ hf_model_revision (str, optional): The revision of the Hugging Face Hub model to continue training from. Defaults to None.
161
+ Example: hf_model_revision=None (defaults to the latest revision in `main`) or
162
+ hf_model_revision="607a30d783dfa663caf39e06633721c8d4cfcd7e" (specific commit).
163
+ hf_api_token (str, optional): API key for the Hugging Face Hub. Defaults to None.
164
+ hf_output_repo_name (str, optional): HF repo to upload the fine-tuned model to. Defaults to None.
165
+
166
+ Returns:
167
+ FinetuneResponse: Object containing information about fine-tuning job.
168
+ """
169
+
170
+ if model_limits is None:
171
+ model_name = None
172
+ # mypy doesn't understand that model or from_checkpoint is not None
173
+ if model is not None:
174
+ model_name = model
175
+ elif from_checkpoint is not None:
176
+ model_name = from_checkpoint.split(":")[0]
177
+ else:
178
+ # this branch is unreachable, but mypy doesn't know that
179
+ pass
180
+ model_limits = get_model_limits(self._client, str(model_name))
181
+
182
+ finetune_request = create_finetune_request(
183
+ model_limits=model_limits,
184
+ training_file=training_file,
185
+ model=model,
186
+ n_epochs=n_epochs,
187
+ validation_file=validation_file,
188
+ n_evals=n_evals,
189
+ n_checkpoints=n_checkpoints,
190
+ batch_size=batch_size,
191
+ learning_rate=learning_rate,
192
+ lr_scheduler_type=lr_scheduler_type,
193
+ min_lr_ratio=min_lr_ratio,
194
+ scheduler_num_cycles=scheduler_num_cycles,
195
+ warmup_ratio=warmup_ratio,
196
+ max_grad_norm=max_grad_norm,
197
+ weight_decay=weight_decay,
198
+ lora=lora,
199
+ lora_r=lora_r,
200
+ lora_dropout=lora_dropout,
201
+ lora_alpha=lora_alpha,
202
+ lora_trainable_modules=lora_trainable_modules,
203
+ suffix=suffix,
204
+ wandb_api_key=wandb_api_key,
205
+ wandb_base_url=wandb_base_url,
206
+ wandb_project_name=wandb_project_name,
207
+ wandb_name=wandb_name,
208
+ train_on_inputs=train_on_inputs,
209
+ training_method=training_method,
210
+ dpo_beta=dpo_beta,
211
+ dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
212
+ rpo_alpha=rpo_alpha,
213
+ simpo_gamma=simpo_gamma,
214
+ from_checkpoint=from_checkpoint,
215
+ from_hf_model=from_hf_model,
216
+ hf_model_revision=hf_model_revision,
217
+ hf_api_token=hf_api_token,
218
+ hf_output_repo_name=hf_output_repo_name,
219
+ )
220
+
221
+ if verbose:
222
+ rprint(
223
+ "Submitting a fine-tuning job with the following parameters:",
224
+ finetune_request,
225
+ )
226
+ parameter_payload = finetune_request.model_dump(exclude_none=True)
227
+
228
+ return self._client.post(
229
+ "/fine-tunes",
230
+ body=parameter_payload,
231
+ cast_to=FinetuneResponseLib,
232
+ )
233
+
234
+ def retrieve(
235
+ self,
236
+ id: str,
237
+ *,
238
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
239
+ # The extra values given here take precedence over values defined on the client or passed to this method.
240
+ extra_headers: Headers | None = None,
241
+ extra_query: Query | None = None,
242
+ extra_body: Body | None = None,
243
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
244
+ ) -> FinetuneResponse:
245
+ """
246
+ List the metadata for a single fine-tuning job.
247
+
248
+ Args:
249
+ extra_headers: Send extra headers
250
+
251
+ extra_query: Add additional query parameters to the request
252
+
253
+ extra_body: Add additional JSON properties to the request
254
+
255
+ timeout: Override the client-level default timeout for this request, in seconds
256
+ """
257
+ if not id:
258
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
259
+ return self._get(
260
+ f"/fine-tunes/{id}",
261
+ options=make_request_options(
262
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
263
+ ),
264
+ cast_to=FinetuneResponse,
265
+ )
266
+
267
+ def list(
268
+ self,
269
+ *,
270
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
271
+ # The extra values given here take precedence over values defined on the client or passed to this method.
272
+ extra_headers: Headers | None = None,
273
+ extra_query: Query | None = None,
274
+ extra_body: Body | None = None,
275
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
276
+ ) -> FineTuningListResponse:
277
+ """List the metadata for all fine-tuning jobs.
278
+
279
+ Returns a list of
280
+ FinetuneResponseTruncated objects.
281
+ """
282
+ return self._get(
283
+ "/fine-tunes",
284
+ options=make_request_options(
285
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
286
+ ),
287
+ cast_to=FineTuningListResponse,
288
+ )
289
+
290
+ def delete(
291
+ self,
292
+ id: str,
293
+ *,
294
+ force: bool,
295
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
296
+ # The extra values given here take precedence over values defined on the client or passed to this method.
297
+ extra_headers: Headers | None = None,
298
+ extra_query: Query | None = None,
299
+ extra_body: Body | None = None,
300
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
301
+ ) -> FineTuningDeleteResponse:
302
+ """
303
+ Delete a fine-tuning job.
304
+
305
+ Args:
306
+ extra_headers: Send extra headers
307
+
308
+ extra_query: Add additional query parameters to the request
309
+
310
+ extra_body: Add additional JSON properties to the request
311
+
312
+ timeout: Override the client-level default timeout for this request, in seconds
313
+ """
314
+ if not id:
315
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
316
+ return self._delete(
317
+ f"/fine-tunes/{id}",
318
+ options=make_request_options(
319
+ extra_headers=extra_headers,
320
+ extra_query=extra_query,
321
+ extra_body=extra_body,
322
+ timeout=timeout,
323
+ query=maybe_transform({"force": force}, fine_tuning_delete_params.FineTuningDeleteParams),
324
+ ),
325
+ cast_to=FineTuningDeleteResponse,
326
+ )
327
+
328
+ def cancel(
329
+ self,
330
+ id: str,
331
+ *,
332
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
333
+ # The extra values given here take precedence over values defined on the client or passed to this method.
334
+ extra_headers: Headers | None = None,
335
+ extra_query: Query | None = None,
336
+ extra_body: Body | None = None,
337
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
338
+ ) -> FineTuningCancelResponse:
339
+ """Cancel a currently running fine-tuning job.
340
+
341
+ Returns a FinetuneResponseTruncated
342
+ object.
343
+
344
+ Args:
345
+ extra_headers: Send extra headers
346
+
347
+ extra_query: Add additional query parameters to the request
348
+
349
+ extra_body: Add additional JSON properties to the request
350
+
351
+ timeout: Override the client-level default timeout for this request, in seconds
352
+ """
353
+ if not id:
354
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
355
+ return self._post(
356
+ f"/fine-tunes/{id}/cancel",
357
+ options=make_request_options(
358
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
359
+ ),
360
+ cast_to=FineTuningCancelResponse,
361
+ )
362
+
363
+ def content(
364
+ self,
365
+ *,
366
+ ft_id: str,
367
+ checkpoint: Literal["merged", "adapter", "model_output_path"] | Omit = omit,
368
+ checkpoint_step: int | Omit = omit,
369
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
370
+ # The extra values given here take precedence over values defined on the client or passed to this method.
371
+ extra_headers: Headers | None = None,
372
+ extra_query: Query | None = None,
373
+ extra_body: Body | None = None,
374
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
375
+ ) -> BinaryAPIResponse:
376
+ """
377
+ Download a compressed fine-tuned model or checkpoint.
378
+
379
+ Args:
380
+ ft_id: Fine-tune ID to download. A string that starts with `ft-`.
381
+
382
+ checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is
383
+ required if the checkpoint_step is not set.
384
+
385
+ checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if
386
+ set.
387
+
388
+ extra_headers: Send extra headers
389
+
390
+ extra_query: Add additional query parameters to the request
391
+
392
+ extra_body: Add additional JSON properties to the request
393
+
394
+ timeout: Override the client-level default timeout for this request, in seconds
395
+ """
396
+ extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
397
+ return self._get(
398
+ "/finetune/download",
399
+ options=make_request_options(
400
+ extra_headers=extra_headers,
401
+ extra_query=extra_query,
402
+ extra_body=extra_body,
403
+ timeout=timeout,
404
+ query=maybe_transform(
405
+ {
406
+ "ft_id": ft_id,
407
+ "checkpoint": checkpoint,
408
+ "checkpoint_step": checkpoint_step,
409
+ },
410
+ fine_tuning_content_params.FineTuningContentParams,
411
+ ),
412
+ ),
413
+ cast_to=BinaryAPIResponse,
414
+ )
415
+
416
+ def list_checkpoints(
417
+ self,
418
+ id: str,
419
+ *,
420
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
421
+ # The extra values given here take precedence over values defined on the client or passed to this method.
422
+ extra_headers: Headers | None = None,
423
+ extra_query: Query | None = None,
424
+ extra_body: Body | None = None,
425
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
426
+ ) -> FineTuningListCheckpointsResponse:
427
+ """
428
+ List the checkpoints for a single fine-tuning job.
429
+
430
+ Args:
431
+ extra_headers: Send extra headers
432
+
433
+ extra_query: Add additional query parameters to the request
434
+
435
+ extra_body: Add additional JSON properties to the request
436
+
437
+ timeout: Override the client-level default timeout for this request, in seconds
438
+ """
439
+ if not id:
440
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
441
+ return self._get(
442
+ f"/fine-tunes/{id}/checkpoints",
443
+ options=make_request_options(
444
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
445
+ ),
446
+ cast_to=FineTuningListCheckpointsResponse,
447
+ )
448
+
449
+ def list_events(
450
+ self,
451
+ id: str,
452
+ *,
453
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
454
+ # The extra values given here take precedence over values defined on the client or passed to this method.
455
+ extra_headers: Headers | None = None,
456
+ extra_query: Query | None = None,
457
+ extra_body: Body | None = None,
458
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
459
+ ) -> FineTuningListEventsResponse:
460
+ """
461
+ List the events for a single fine-tuning job.
462
+
463
+ Args:
464
+ extra_headers: Send extra headers
465
+
466
+ extra_query: Add additional query parameters to the request
467
+
468
+ extra_body: Add additional JSON properties to the request
469
+
470
+ timeout: Override the client-level default timeout for this request, in seconds
471
+ """
472
+ if not id:
473
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
474
+ return self._get(
475
+ f"/fine-tunes/{id}/events",
476
+ options=make_request_options(
477
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
478
+ ),
479
+ cast_to=FineTuningListEventsResponse,
480
+ )
481
+
482
+
483
+ class AsyncFineTuningResource(AsyncAPIResource):
484
+ @cached_property
485
+ def with_raw_response(self) -> AsyncFineTuningResourceWithRawResponse:
486
+ """
487
+ This property can be used as a prefix for any HTTP method call to return
488
+ the raw response object instead of the parsed content.
489
+
490
+ For more information, see https://www.github.com/togethercomputer/together-py#accessing-raw-response-data-eg-headers
491
+ """
492
+ return AsyncFineTuningResourceWithRawResponse(self)
493
+
494
+ @cached_property
495
+ def with_streaming_response(self) -> AsyncFineTuningResourceWithStreamingResponse:
496
+ """
497
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
498
+
499
+ For more information, see https://www.github.com/togethercomputer/together-py#with_streaming_response
500
+ """
501
+ return AsyncFineTuningResourceWithStreamingResponse(self)
502
+
503
+ async def create(
504
+ self,
505
+ *,
506
+ training_file: str,
507
+ model: str | None = None,
508
+ n_epochs: int = 1,
509
+ validation_file: str | None = "",
510
+ n_evals: int | None = 0,
511
+ n_checkpoints: int | None = 1,
512
+ batch_size: int | Literal["max"] = "max",
513
+ learning_rate: float | None = 0.00001,
514
+ lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
515
+ min_lr_ratio: float = 0.0,
516
+ scheduler_num_cycles: float = 0.5,
517
+ warmup_ratio: float = 0.0,
518
+ max_grad_norm: float = 1.0,
519
+ weight_decay: float = 0.0,
520
+ lora: bool = True,
521
+ lora_r: int | None = None,
522
+ lora_dropout: float | None = 0,
523
+ lora_alpha: float | None = None,
524
+ lora_trainable_modules: str | None = "all-linear",
525
+ suffix: str | None = None,
526
+ wandb_api_key: str | None = None,
527
+ wandb_base_url: str | None = None,
528
+ wandb_project_name: str | None = None,
529
+ wandb_name: str | None = None,
530
+ verbose: bool = False,
531
+ model_limits: FinetuneTrainingLimits | None = None,
532
+ train_on_inputs: bool | Literal["auto"] | None = None,
533
+ training_method: str = "sft",
534
+ dpo_beta: float | None = None,
535
+ dpo_normalize_logratios_by_length: bool = False,
536
+ rpo_alpha: float | None = None,
537
+ simpo_gamma: float | None = None,
538
+ from_checkpoint: str | None = None,
539
+ from_hf_model: str | None = None,
540
+ hf_model_revision: str | None = None,
541
+ hf_api_token: str | None = None,
542
+ hf_output_repo_name: str | None = None,
543
+ ) -> FinetuneResponse:
544
+ """
545
+ Method to initiate a fine-tuning job
546
+
547
+ Args:
548
+ training_file (str): File-ID of a file uploaded to the Together API
549
+ model (str, optional): Name of the base model to run fine-tune job on
550
+ n_epochs (int, optional): Number of epochs for fine-tuning. Defaults to 1.
551
+ validation file (str, optional): File ID of a file uploaded to the Together API for validation.
552
+ n_evals (int, optional): Number of evaluation loops to run. Defaults to 0.
553
+ n_checkpoints (int, optional): Number of checkpoints to save during fine-tuning.
554
+ Defaults to 1.
555
+ batch_size (int or "max"): Batch size for fine-tuning. Defaults to max.
556
+ learning_rate (float, optional): Learning rate multiplier to use for training
557
+ Defaults to 0.00001.
558
+ lr_scheduler_type (Literal["linear", "cosine"]): Learning rate scheduler type. Defaults to "cosine".
559
+ min_lr_ratio (float, optional): Min learning rate ratio of the initial learning rate for
560
+ the learning rate scheduler. Defaults to 0.0.
561
+ scheduler_num_cycles (float, optional): Number or fraction of cycles for the cosine learning rate scheduler. Defaults to 0.5.
562
+ warmup_ratio (float, optional): Warmup ratio for the learning rate scheduler.
563
+ max_grad_norm (float, optional): Max gradient norm. Defaults to 1.0, set to 0 to disable.
564
+ weight_decay (float, optional): Weight decay. Defaults to 0.0.
565
+ lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
566
+ lora_r (int, optional): Rank of LoRA adapters. Defaults to 8.
567
+ lora_dropout (float, optional): Dropout rate for LoRA adapters. Defaults to 0.
568
+ lora_alpha (float, optional): Alpha for LoRA adapters. Defaults to 8.
569
+ lora_trainable_modules (str, optional): Trainable modules for LoRA adapters. Defaults to "all-linear".
570
+ suffix (str, optional): Up to 40 character suffix that will be added to your fine-tuned model name.
571
+ Defaults to None.
572
+ wandb_api_key (str, optional): API key for Weights & Biases integration.
573
+ Defaults to None.
574
+ wandb_base_url (str, optional): Base URL for Weights & Biases integration.
575
+ Defaults to None.
576
+ wandb_project_name (str, optional): Project name for Weights & Biases integration.
577
+ Defaults to None.
578
+ wandb_name (str, optional): Run name for Weights & Biases integration.
579
+ Defaults to None.
580
+ verbose (bool, optional): whether to print the job parameters before submitting a request.
581
+ Defaults to False.
582
+ model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
583
+ Defaults to None.
584
+ train_on_inputs (bool or "auto", optional): Whether to mask the user messages in conversational data or prompts in instruction data.
585
+ "auto" will automatically determine whether to mask the inputs based on the data format.
586
+ For datasets with the "text" field (general format), inputs will not be masked.
587
+ For datasets with the "messages" field (conversational format) or "prompt" and "completion" fields
588
+ (Instruction format), inputs will be masked.
589
+ Defaults to None, or "auto" if training_method is "sft" (set in create_finetune_request).
590
+ training_method (str, optional): Training method. Defaults to "sft".
591
+ Supported methods: "sft", "dpo".
592
+ dpo_beta (float, optional): DPO beta parameter. Defaults to None.
593
+ dpo_normalize_logratios_by_length (bool): Whether or not normalize logratios by sample length. Defaults to False,
594
+ rpo_alpha (float, optional): RPO alpha parameter of DPO training to include NLL in the loss. Defaults to None.
595
+ simpo_gamma: (float, optional): SimPO gamma parameter. Defaults to None.
596
+ from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
597
+ The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
598
+ The step value is optional, without it the final checkpoint will be used.
599
+ from_hf_model (str, optional): The Hugging Face Hub repo to start training from.
600
+ Should be as close as possible to the base model (specified by the `model` argument) in terms of architecture and size.
601
+ hf_model_revision (str, optional): The revision of the Hugging Face Hub model to continue training from. Defaults to None.
602
+ Example: hf_model_revision=None (defaults to the latest revision in `main`) or
603
+ hf_model_revision="607a30d783dfa663caf39e06633721c8d4cfcd7e" (specific commit).
604
+ hf_api_token (str, optional): API key for the Hugging Face Hub. Defaults to None.
605
+ hf_output_repo_name (str, optional): HF repo to upload the fine-tuned model to. Defaults to None.
606
+
607
+ Returns:
608
+ FinetuneResponse: Object containing information about fine-tuning job.
609
+ """
610
+
611
+ if model_limits is None:
612
+ model_name = None
613
+ # mypy doesn't understand that model or from_checkpoint is not None
614
+ if model is not None:
615
+ model_name = model
616
+ elif from_checkpoint is not None:
617
+ model_name = from_checkpoint.split(":")[0]
618
+ else:
619
+ # this branch is unreachable, but mypy doesn't know that
620
+ pass
621
+ model_limits = await async_get_model_limits(self._client, str(model_name))
622
+
623
+ finetune_request = create_finetune_request(
624
+ model_limits=model_limits,
625
+ training_file=training_file,
626
+ model=model,
627
+ n_epochs=n_epochs,
628
+ validation_file=validation_file,
629
+ n_evals=n_evals,
630
+ n_checkpoints=n_checkpoints,
631
+ batch_size=batch_size,
632
+ learning_rate=learning_rate,
633
+ lr_scheduler_type=lr_scheduler_type,
634
+ min_lr_ratio=min_lr_ratio,
635
+ scheduler_num_cycles=scheduler_num_cycles,
636
+ warmup_ratio=warmup_ratio,
637
+ max_grad_norm=max_grad_norm,
638
+ weight_decay=weight_decay,
639
+ lora=lora,
640
+ lora_r=lora_r,
641
+ lora_dropout=lora_dropout,
642
+ lora_alpha=lora_alpha,
643
+ lora_trainable_modules=lora_trainable_modules,
644
+ suffix=suffix,
645
+ wandb_api_key=wandb_api_key,
646
+ wandb_base_url=wandb_base_url,
647
+ wandb_project_name=wandb_project_name,
648
+ wandb_name=wandb_name,
649
+ train_on_inputs=train_on_inputs,
650
+ training_method=training_method,
651
+ dpo_beta=dpo_beta,
652
+ dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
653
+ rpo_alpha=rpo_alpha,
654
+ simpo_gamma=simpo_gamma,
655
+ from_checkpoint=from_checkpoint,
656
+ from_hf_model=from_hf_model,
657
+ hf_model_revision=hf_model_revision,
658
+ hf_api_token=hf_api_token,
659
+ hf_output_repo_name=hf_output_repo_name,
660
+ )
661
+
662
+ if verbose:
663
+ rprint(
664
+ "Submitting a fine-tuning job with the following parameters:",
665
+ finetune_request,
666
+ )
667
+ parameter_payload = finetune_request.model_dump(exclude_none=True)
668
+
669
+ return await self._client.post(
670
+ "/fine-tunes",
671
+ body=parameter_payload,
672
+ cast_to=FinetuneResponse,
673
+ )
674
+
675
+ async def retrieve(
676
+ self,
677
+ id: str,
678
+ *,
679
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
680
+ # The extra values given here take precedence over values defined on the client or passed to this method.
681
+ extra_headers: Headers | None = None,
682
+ extra_query: Query | None = None,
683
+ extra_body: Body | None = None,
684
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
685
+ ) -> FinetuneResponse:
686
+ """
687
+ List the metadata for a single fine-tuning job.
688
+
689
+ Args:
690
+ extra_headers: Send extra headers
691
+
692
+ extra_query: Add additional query parameters to the request
693
+
694
+ extra_body: Add additional JSON properties to the request
695
+
696
+ timeout: Override the client-level default timeout for this request, in seconds
697
+ """
698
+ if not id:
699
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
700
+ return await self._get(
701
+ f"/fine-tunes/{id}",
702
+ options=make_request_options(
703
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
704
+ ),
705
+ cast_to=FinetuneResponse,
706
+ )
707
+
708
+ async def list(
709
+ self,
710
+ *,
711
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
712
+ # The extra values given here take precedence over values defined on the client or passed to this method.
713
+ extra_headers: Headers | None = None,
714
+ extra_query: Query | None = None,
715
+ extra_body: Body | None = None,
716
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
717
+ ) -> FineTuningListResponse:
718
+ """List the metadata for all fine-tuning jobs.
719
+
720
+ Returns a list of
721
+ FinetuneResponseTruncated objects.
722
+ """
723
+ return await self._get(
724
+ "/fine-tunes",
725
+ options=make_request_options(
726
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
727
+ ),
728
+ cast_to=FineTuningListResponse,
729
+ )
730
+
731
+ async def delete(
732
+ self,
733
+ id: str,
734
+ *,
735
+ force: bool,
736
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
737
+ # The extra values given here take precedence over values defined on the client or passed to this method.
738
+ extra_headers: Headers | None = None,
739
+ extra_query: Query | None = None,
740
+ extra_body: Body | None = None,
741
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
742
+ ) -> FineTuningDeleteResponse:
743
+ """
744
+ Delete a fine-tuning job.
745
+
746
+ Args:
747
+ extra_headers: Send extra headers
748
+
749
+ extra_query: Add additional query parameters to the request
750
+
751
+ extra_body: Add additional JSON properties to the request
752
+
753
+ timeout: Override the client-level default timeout for this request, in seconds
754
+ """
755
+ if not id:
756
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
757
+ return await self._delete(
758
+ f"/fine-tunes/{id}",
759
+ options=make_request_options(
760
+ extra_headers=extra_headers,
761
+ extra_query=extra_query,
762
+ extra_body=extra_body,
763
+ timeout=timeout,
764
+ query=await async_maybe_transform({"force": force}, fine_tuning_delete_params.FineTuningDeleteParams),
765
+ ),
766
+ cast_to=FineTuningDeleteResponse,
767
+ )
768
+
769
+ async def cancel(
770
+ self,
771
+ id: str,
772
+ *,
773
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
774
+ # The extra values given here take precedence over values defined on the client or passed to this method.
775
+ extra_headers: Headers | None = None,
776
+ extra_query: Query | None = None,
777
+ extra_body: Body | None = None,
778
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
779
+ ) -> FineTuningCancelResponse:
780
+ """Cancel a currently running fine-tuning job.
781
+
782
+ Returns a FinetuneResponseTruncated
783
+ object.
784
+
785
+ Args:
786
+ extra_headers: Send extra headers
787
+
788
+ extra_query: Add additional query parameters to the request
789
+
790
+ extra_body: Add additional JSON properties to the request
791
+
792
+ timeout: Override the client-level default timeout for this request, in seconds
793
+ """
794
+ if not id:
795
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
796
+ return await self._post(
797
+ f"/fine-tunes/{id}/cancel",
798
+ options=make_request_options(
799
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
800
+ ),
801
+ cast_to=FineTuningCancelResponse,
802
+ )
803
+
804
+ async def content(
805
+ self,
806
+ *,
807
+ ft_id: str,
808
+ checkpoint: Literal["merged", "adapter", "model_output_path"] | Omit = omit,
809
+ checkpoint_step: int | Omit = omit,
810
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
811
+ # The extra values given here take precedence over values defined on the client or passed to this method.
812
+ extra_headers: Headers | None = None,
813
+ extra_query: Query | None = None,
814
+ extra_body: Body | None = None,
815
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
816
+ ) -> AsyncBinaryAPIResponse:
817
+ """
818
+ Download a compressed fine-tuned model or checkpoint.
819
+
820
+ Args:
821
+ ft_id: Fine-tune ID to download. A string that starts with `ft-`.
822
+
823
+ checkpoint: Specifies checkpoint type to download - `merged` vs `adapter`. This field is
824
+ required if the checkpoint_step is not set.
825
+
826
+ checkpoint_step: Specifies step number for checkpoint to download. Ignores `checkpoint` value if
827
+ set.
828
+
829
+ extra_headers: Send extra headers
830
+
831
+ extra_query: Add additional query parameters to the request
832
+
833
+ extra_body: Add additional JSON properties to the request
834
+
835
+ timeout: Override the client-level default timeout for this request, in seconds
836
+ """
837
+ extra_headers = {"Accept": "application/octet-stream", **(extra_headers or {})}
838
+ return await self._get(
839
+ "/finetune/download",
840
+ options=make_request_options(
841
+ extra_headers=extra_headers,
842
+ extra_query=extra_query,
843
+ extra_body=extra_body,
844
+ timeout=timeout,
845
+ query=await async_maybe_transform(
846
+ {
847
+ "ft_id": ft_id,
848
+ "checkpoint": checkpoint,
849
+ "checkpoint_step": checkpoint_step,
850
+ },
851
+ fine_tuning_content_params.FineTuningContentParams,
852
+ ),
853
+ ),
854
+ cast_to=AsyncBinaryAPIResponse,
855
+ )
856
+
857
+ async def list_checkpoints(
858
+ self,
859
+ id: str,
860
+ *,
861
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
862
+ # The extra values given here take precedence over values defined on the client or passed to this method.
863
+ extra_headers: Headers | None = None,
864
+ extra_query: Query | None = None,
865
+ extra_body: Body | None = None,
866
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
867
+ ) -> FineTuningListCheckpointsResponse:
868
+ """
869
+ List the checkpoints for a single fine-tuning job.
870
+
871
+ Args:
872
+ extra_headers: Send extra headers
873
+
874
+ extra_query: Add additional query parameters to the request
875
+
876
+ extra_body: Add additional JSON properties to the request
877
+
878
+ timeout: Override the client-level default timeout for this request, in seconds
879
+ """
880
+ if not id:
881
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
882
+ return await self._get(
883
+ f"/fine-tunes/{id}/checkpoints",
884
+ options=make_request_options(
885
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
886
+ ),
887
+ cast_to=FineTuningListCheckpointsResponse,
888
+ )
889
+
890
+ async def list_events(
891
+ self,
892
+ id: str,
893
+ *,
894
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
895
+ # The extra values given here take precedence over values defined on the client or passed to this method.
896
+ extra_headers: Headers | None = None,
897
+ extra_query: Query | None = None,
898
+ extra_body: Body | None = None,
899
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
900
+ ) -> FineTuningListEventsResponse:
901
+ """
902
+ List the events for a single fine-tuning job.
903
+
904
+ Args:
905
+ extra_headers: Send extra headers
906
+
907
+ extra_query: Add additional query parameters to the request
908
+
909
+ extra_body: Add additional JSON properties to the request
910
+
911
+ timeout: Override the client-level default timeout for this request, in seconds
912
+ """
913
+ if not id:
914
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
915
+ return await self._get(
916
+ f"/fine-tunes/{id}/events",
917
+ options=make_request_options(
918
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
919
+ ),
920
+ cast_to=FineTuningListEventsResponse,
921
+ )
922
+
923
+
924
+ class FineTuningResourceWithRawResponse:
925
+ def __init__(self, fine_tuning: FineTuningResource) -> None:
926
+ self._fine_tuning = fine_tuning
927
+
928
+ self.retrieve = to_raw_response_wrapper(
929
+ fine_tuning.retrieve,
930
+ )
931
+ self.list = to_raw_response_wrapper(
932
+ fine_tuning.list,
933
+ )
934
+ self.delete = to_raw_response_wrapper(
935
+ fine_tuning.delete,
936
+ )
937
+ self.cancel = to_raw_response_wrapper(
938
+ fine_tuning.cancel,
939
+ )
940
+ self.content = to_custom_raw_response_wrapper(
941
+ fine_tuning.content,
942
+ BinaryAPIResponse,
943
+ )
944
+ self.list_checkpoints = to_raw_response_wrapper(
945
+ fine_tuning.list_checkpoints,
946
+ )
947
+ self.list_events = to_raw_response_wrapper(
948
+ fine_tuning.list_events,
949
+ )
950
+
951
+
952
+ class AsyncFineTuningResourceWithRawResponse:
953
+ def __init__(self, fine_tuning: AsyncFineTuningResource) -> None:
954
+ self._fine_tuning = fine_tuning
955
+
956
+ self.retrieve = async_to_raw_response_wrapper(
957
+ fine_tuning.retrieve,
958
+ )
959
+ self.list = async_to_raw_response_wrapper(
960
+ fine_tuning.list,
961
+ )
962
+ self.delete = async_to_raw_response_wrapper(
963
+ fine_tuning.delete,
964
+ )
965
+ self.cancel = async_to_raw_response_wrapper(
966
+ fine_tuning.cancel,
967
+ )
968
+ self.content = async_to_custom_raw_response_wrapper(
969
+ fine_tuning.content,
970
+ AsyncBinaryAPIResponse,
971
+ )
972
+ self.list_checkpoints = async_to_raw_response_wrapper(
973
+ fine_tuning.list_checkpoints,
974
+ )
975
+ self.list_events = async_to_raw_response_wrapper(
976
+ fine_tuning.list_events,
977
+ )
978
+
979
+
980
+ class FineTuningResourceWithStreamingResponse:
981
+ def __init__(self, fine_tuning: FineTuningResource) -> None:
982
+ self._fine_tuning = fine_tuning
983
+
984
+ self.retrieve = to_streamed_response_wrapper(
985
+ fine_tuning.retrieve,
986
+ )
987
+ self.list = to_streamed_response_wrapper(
988
+ fine_tuning.list,
989
+ )
990
+ self.delete = to_streamed_response_wrapper(
991
+ fine_tuning.delete,
992
+ )
993
+ self.cancel = to_streamed_response_wrapper(
994
+ fine_tuning.cancel,
995
+ )
996
+ self.content = to_custom_streamed_response_wrapper(
997
+ fine_tuning.content,
998
+ StreamedBinaryAPIResponse,
999
+ )
1000
+ self.list_checkpoints = to_streamed_response_wrapper(
1001
+ fine_tuning.list_checkpoints,
1002
+ )
1003
+ self.list_events = to_streamed_response_wrapper(
1004
+ fine_tuning.list_events,
1005
+ )
1006
+
1007
+
1008
+ class AsyncFineTuningResourceWithStreamingResponse:
1009
+ def __init__(self, fine_tuning: AsyncFineTuningResource) -> None:
1010
+ self._fine_tuning = fine_tuning
1011
+
1012
+ self.retrieve = async_to_streamed_response_wrapper(
1013
+ fine_tuning.retrieve,
1014
+ )
1015
+ self.list = async_to_streamed_response_wrapper(
1016
+ fine_tuning.list,
1017
+ )
1018
+ self.delete = async_to_streamed_response_wrapper(
1019
+ fine_tuning.delete,
1020
+ )
1021
+ self.cancel = async_to_streamed_response_wrapper(
1022
+ fine_tuning.cancel,
1023
+ )
1024
+ self.content = async_to_custom_streamed_response_wrapper(
1025
+ fine_tuning.content,
1026
+ AsyncStreamedBinaryAPIResponse,
1027
+ )
1028
+ self.list_checkpoints = async_to_streamed_response_wrapper(
1029
+ fine_tuning.list_checkpoints,
1030
+ )
1031
+ self.list_events = async_to_streamed_response_wrapper(
1032
+ fine_tuning.list_events,
1033
+ )