together 1.5.35__py3-none-any.whl → 2.0.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. together/__init__.py +101 -114
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +66 -84
  30. together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
  33. together/{cli → lib/cli}/api/models.py +34 -27
  34. together/lib/cli/api/utils.py +50 -0
  35. together/{cli → lib/cli}/cli.py +16 -26
  36. together/{constants.py → lib/constants.py} +11 -24
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +397 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/{utils → lib/utils}/files.py +90 -288
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -39
  50. together/resources/audio/__init__.py +72 -48
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -128
  53. together/resources/audio/transcriptions.py +247 -261
  54. together/resources/audio/translations.py +221 -241
  55. together/resources/audio/voices.py +111 -41
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +589 -490
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +397 -129
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +258 -104
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +223 -193
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +286 -214
  75. together/types/__init__.py +66 -167
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_create_response.py +10 -0
  157. together/types/video_job.py +57 -0
  158. together-2.0.0a7.dist-info/METADATA +730 -0
  159. together-2.0.0a7.dist-info/RECORD +165 -0
  160. {together-1.5.35.dist-info → together-2.0.0a7.dist-info}/WHEEL +1 -1
  161. together-2.0.0a7.dist-info/entry_points.txt +2 -0
  162. {together-1.5.35.dist-info → together-2.0.0a7.dist-info}/licenses/LICENSE +1 -1
  163. together/abstract/api_requestor.py +0 -770
  164. together/cli/api/chat.py +0 -298
  165. together/cli/api/completions.py +0 -119
  166. together/cli/api/images.py +0 -93
  167. together/cli/api/utils.py +0 -139
  168. together/client.py +0 -186
  169. together/error.py +0 -194
  170. together/filemanager.py +0 -635
  171. together/legacy/__init__.py +0 -0
  172. together/legacy/base.py +0 -27
  173. together/legacy/complete.py +0 -93
  174. together/legacy/embeddings.py +0 -27
  175. together/legacy/files.py +0 -146
  176. together/legacy/finetune.py +0 -177
  177. together/legacy/images.py +0 -27
  178. together/legacy/models.py +0 -44
  179. together/resources/batch.py +0 -165
  180. together/resources/code_interpreter.py +0 -82
  181. together/resources/evaluation.py +0 -808
  182. together/resources/finetune.py +0 -1388
  183. together/together_response.py +0 -50
  184. together/types/abstract.py +0 -26
  185. together/types/audio_speech.py +0 -311
  186. together/types/batch.py +0 -54
  187. together/types/chat_completions.py +0 -210
  188. together/types/code_interpreter.py +0 -57
  189. together/types/common.py +0 -67
  190. together/types/completions.py +0 -107
  191. together/types/embeddings.py +0 -35
  192. together/types/endpoints.py +0 -123
  193. together/types/error.py +0 -16
  194. together/types/evaluation.py +0 -93
  195. together/types/files.py +0 -93
  196. together/types/finetune.py +0 -465
  197. together/types/images.py +0 -42
  198. together/types/models.py +0 -96
  199. together/types/rerank.py +0 -43
  200. together/types/videos.py +0 -69
  201. together/utils/api_helpers.py +0 -124
  202. together/version.py +0 -6
  203. together-1.5.35.dist-info/METADATA +0 -583
  204. together-1.5.35.dist-info/RECORD +0 -77
  205. together-1.5.35.dist-info/entry_points.txt +0 -3
  206. /together/{abstract → lib/cli}/__init__.py +0 -0
  207. /together/{cli → lib/cli/api}/__init__.py +0 -0
  208. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -0,0 +1,9 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal, TypeAlias
4
+
5
+ __all__ = ["FilePurpose"]
6
+
7
+ FilePurpose: TypeAlias = Literal[
8
+ "fine-tune", "eval", "eval-sample", "eval-output", "eval-summary", "batch-generated", "batch-api"
9
+ ]
@@ -0,0 +1,31 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from pydantic import Field as FieldInfo
4
+
5
+ from .._models import BaseModel
6
+ from .file_type import FileType
7
+ from .file_purpose import FilePurpose
8
+
9
+ __all__ = ["FileResponse"]
10
+
11
+
12
+ class FileResponse(BaseModel):
13
+ id: str
14
+
15
+ bytes: int
16
+
17
+ created_at: int
18
+
19
+ filename: str
20
+
21
+ file_type: FileType = FieldInfo(alias="FileType")
22
+ """The type of the file"""
23
+
24
+ line_count: int = FieldInfo(alias="LineCount")
25
+
26
+ object: str
27
+
28
+ processed: bool = FieldInfo(alias="Processed")
29
+
30
+ purpose: FilePurpose
31
+ """The purpose of the file"""
@@ -0,0 +1,7 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal, TypeAlias
4
+
5
+ __all__ = ["FileType"]
6
+
7
+ FileType: TypeAlias = Literal["csv", "jsonl", "parquet"]
@@ -0,0 +1,194 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Union, Optional
4
+ from datetime import datetime
5
+ from typing_extensions import Literal, TypeAlias
6
+
7
+ from pydantic import Field as FieldInfo
8
+
9
+ from .._models import BaseModel
10
+ from .finetune_event import FinetuneEvent
11
+
12
+ __all__ = [
13
+ "FineTuningCancelResponse",
14
+ "LrScheduler",
15
+ "LrSchedulerLrSchedulerArgs",
16
+ "LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs",
17
+ "LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs",
18
+ "TrainingMethod",
19
+ "TrainingMethodTrainingMethodSft",
20
+ "TrainingMethodTrainingMethodDpo",
21
+ "TrainingType",
22
+ "TrainingTypeFullTrainingType",
23
+ "TrainingTypeLoRaTrainingType",
24
+ ]
25
+
26
+
27
+ class LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs(BaseModel):
28
+ min_lr_ratio: Optional[float] = None
29
+ """The ratio of the final learning rate to the peak learning rate"""
30
+
31
+
32
+ class LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs(BaseModel):
33
+ min_lr_ratio: float
34
+ """The ratio of the final learning rate to the peak learning rate"""
35
+
36
+ num_cycles: float
37
+ """Number or fraction of cycles for the cosine learning rate scheduler"""
38
+
39
+
40
+ LrSchedulerLrSchedulerArgs: TypeAlias = Union[
41
+ LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs, LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs
42
+ ]
43
+
44
+
45
+ class LrScheduler(BaseModel):
46
+ lr_scheduler_type: Literal["linear", "cosine"]
47
+
48
+ lr_scheduler_args: Optional[LrSchedulerLrSchedulerArgs] = None
49
+
50
+
51
+ class TrainingMethodTrainingMethodSft(BaseModel):
52
+ method: Literal["sft"]
53
+
54
+ train_on_inputs: Union[bool, Literal["auto"]]
55
+ """
56
+ Whether to mask the user messages in conversational data or prompts in
57
+ instruction data.
58
+ """
59
+
60
+
61
+ class TrainingMethodTrainingMethodDpo(BaseModel):
62
+ method: Literal["dpo"]
63
+
64
+ dpo_beta: Optional[float] = None
65
+
66
+ dpo_normalize_logratios_by_length: Optional[bool] = None
67
+
68
+ dpo_reference_free: Optional[bool] = None
69
+
70
+ rpo_alpha: Optional[float] = None
71
+
72
+ simpo_gamma: Optional[float] = None
73
+
74
+
75
+ TrainingMethod: TypeAlias = Union[TrainingMethodTrainingMethodSft, TrainingMethodTrainingMethodDpo]
76
+
77
+
78
+ class TrainingTypeFullTrainingType(BaseModel):
79
+ type: Literal["Full"]
80
+
81
+
82
+ class TrainingTypeLoRaTrainingType(BaseModel):
83
+ lora_alpha: int
84
+
85
+ lora_r: int
86
+
87
+ type: Literal["Lora"]
88
+
89
+ lora_dropout: Optional[float] = None
90
+
91
+ lora_trainable_modules: Optional[str] = None
92
+
93
+
94
+ TrainingType: TypeAlias = Union[TrainingTypeFullTrainingType, TrainingTypeLoRaTrainingType]
95
+
96
+
97
+ class FineTuningCancelResponse(BaseModel):
98
+ id: str
99
+ """Unique identifier for the fine-tune job"""
100
+
101
+ created_at: datetime
102
+ """Creation timestamp of the fine-tune job"""
103
+
104
+ status: Literal[
105
+ "pending",
106
+ "queued",
107
+ "running",
108
+ "compressing",
109
+ "uploading",
110
+ "cancel_requested",
111
+ "cancelled",
112
+ "error",
113
+ "completed",
114
+ ]
115
+
116
+ updated_at: datetime
117
+ """Last update timestamp of the fine-tune job"""
118
+
119
+ batch_size: Optional[int] = None
120
+ """Batch size used for training"""
121
+
122
+ events: Optional[List[FinetuneEvent]] = None
123
+ """Events related to this fine-tune job"""
124
+
125
+ from_checkpoint: Optional[str] = None
126
+ """Checkpoint used to continue training"""
127
+
128
+ from_hf_model: Optional[str] = None
129
+ """Hugging Face Hub repo to start training from"""
130
+
131
+ hf_model_revision: Optional[str] = None
132
+ """The revision of the Hugging Face Hub model to continue training from"""
133
+
134
+ learning_rate: Optional[float] = None
135
+ """Learning rate used for training"""
136
+
137
+ lr_scheduler: Optional[LrScheduler] = None
138
+ """Learning rate scheduler configuration"""
139
+
140
+ max_grad_norm: Optional[float] = None
141
+ """Maximum gradient norm for clipping"""
142
+
143
+ model: Optional[str] = None
144
+ """Base model used for fine-tuning"""
145
+
146
+ x_model_output_name: Optional[str] = FieldInfo(alias="model_output_name", default=None)
147
+
148
+ n_checkpoints: Optional[int] = None
149
+ """Number of checkpoints saved during training"""
150
+
151
+ n_epochs: Optional[int] = None
152
+ """Number of training epochs"""
153
+
154
+ n_evals: Optional[int] = None
155
+ """Number of evaluations during training"""
156
+
157
+ owner_address: Optional[str] = None
158
+ """Owner address information"""
159
+
160
+ suffix: Optional[str] = None
161
+ """Suffix added to the fine-tuned model name"""
162
+
163
+ token_count: Optional[int] = None
164
+ """Count of tokens processed"""
165
+
166
+ total_price: Optional[int] = None
167
+ """Total price for the fine-tuning job"""
168
+
169
+ training_file: Optional[str] = None
170
+ """File-ID of the training file"""
171
+
172
+ training_method: Optional[TrainingMethod] = None
173
+ """Method of training used"""
174
+
175
+ training_type: Optional[TrainingType] = None
176
+ """Type of training used (full or LoRA)"""
177
+
178
+ user_id: Optional[str] = None
179
+ """Identifier for the user who created the job"""
180
+
181
+ validation_file: Optional[str] = None
182
+ """File-ID of the validation file"""
183
+
184
+ wandb_name: Optional[str] = None
185
+ """Weights & Biases run name"""
186
+
187
+ wandb_project_name: Optional[str] = None
188
+ """Weights & Biases project name"""
189
+
190
+ warmup_ratio: Optional[float] = None
191
+ """Ratio of warmup steps"""
192
+
193
+ weight_decay: Optional[float] = None
194
+ """Weight decay value used"""
@@ -0,0 +1,24 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["FineTuningContentParams"]
8
+
9
+
10
+ class FineTuningContentParams(TypedDict, total=False):
11
+ ft_id: Required[str]
12
+ """Fine-tune ID to download. A string that starts with `ft-`."""
13
+
14
+ checkpoint: Literal["merged", "adapter", "model_output_path"]
15
+ """Specifies checkpoint type to download - `merged` vs `adapter`.
16
+
17
+ This field is required if the checkpoint_step is not set.
18
+ """
19
+
20
+ checkpoint_step: int
21
+ """Specifies step number for checkpoint to download.
22
+
23
+ Ignores `checkpoint` value if set.
24
+ """
@@ -0,0 +1,11 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+ __all__ = ["FineTuningDeleteParams"]
8
+
9
+
10
+ class FineTuningDeleteParams(TypedDict, total=False):
11
+ force: Required[bool]
@@ -0,0 +1,12 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["FineTuningDeleteResponse"]
8
+
9
+
10
+ class FineTuningDeleteResponse(BaseModel):
11
+ message: Optional[str] = None
12
+ """Message indicating the result of the deletion"""
@@ -0,0 +1,21 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["FineTuningListCheckpointsResponse", "Data"]
8
+
9
+
10
+ class Data(BaseModel):
11
+ checkpoint_type: str
12
+
13
+ created_at: str
14
+
15
+ path: str
16
+
17
+ step: int
18
+
19
+
20
+ class FineTuningListCheckpointsResponse(BaseModel):
21
+ data: List[Data]
@@ -0,0 +1,12 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List
4
+
5
+ from .._models import BaseModel
6
+ from .finetune_event import FinetuneEvent
7
+
8
+ __all__ = ["FineTuningListEventsResponse"]
9
+
10
+
11
+ class FineTuningListEventsResponse(BaseModel):
12
+ data: List[FinetuneEvent]
@@ -0,0 +1,199 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Union, Optional
4
+ from datetime import datetime
5
+ from typing_extensions import Literal, TypeAlias
6
+
7
+ from pydantic import Field as FieldInfo
8
+
9
+ from .._models import BaseModel
10
+ from .finetune_event import FinetuneEvent
11
+
12
+ __all__ = [
13
+ "FineTuningListResponse",
14
+ "Data",
15
+ "DataLrScheduler",
16
+ "DataLrSchedulerLrSchedulerArgs",
17
+ "DataLrSchedulerLrSchedulerArgsLinearLrSchedulerArgs",
18
+ "DataLrSchedulerLrSchedulerArgsCosineLrSchedulerArgs",
19
+ "DataTrainingMethod",
20
+ "DataTrainingMethodTrainingMethodSft",
21
+ "DataTrainingMethodTrainingMethodDpo",
22
+ "DataTrainingType",
23
+ "DataTrainingTypeFullTrainingType",
24
+ "DataTrainingTypeLoRaTrainingType",
25
+ ]
26
+
27
+
28
+ class DataLrSchedulerLrSchedulerArgsLinearLrSchedulerArgs(BaseModel):
29
+ min_lr_ratio: Optional[float] = None
30
+ """The ratio of the final learning rate to the peak learning rate"""
31
+
32
+
33
+ class DataLrSchedulerLrSchedulerArgsCosineLrSchedulerArgs(BaseModel):
34
+ min_lr_ratio: float
35
+ """The ratio of the final learning rate to the peak learning rate"""
36
+
37
+ num_cycles: float
38
+ """Number or fraction of cycles for the cosine learning rate scheduler"""
39
+
40
+
41
+ DataLrSchedulerLrSchedulerArgs: TypeAlias = Union[
42
+ DataLrSchedulerLrSchedulerArgsLinearLrSchedulerArgs, DataLrSchedulerLrSchedulerArgsCosineLrSchedulerArgs
43
+ ]
44
+
45
+
46
+ class DataLrScheduler(BaseModel):
47
+ lr_scheduler_type: Literal["linear", "cosine"]
48
+
49
+ lr_scheduler_args: Optional[DataLrSchedulerLrSchedulerArgs] = None
50
+
51
+
52
+ class DataTrainingMethodTrainingMethodSft(BaseModel):
53
+ method: Literal["sft"]
54
+
55
+ train_on_inputs: Union[bool, Literal["auto"]]
56
+ """
57
+ Whether to mask the user messages in conversational data or prompts in
58
+ instruction data.
59
+ """
60
+
61
+
62
+ class DataTrainingMethodTrainingMethodDpo(BaseModel):
63
+ method: Literal["dpo"]
64
+
65
+ dpo_beta: Optional[float] = None
66
+
67
+ dpo_normalize_logratios_by_length: Optional[bool] = None
68
+
69
+ dpo_reference_free: Optional[bool] = None
70
+
71
+ rpo_alpha: Optional[float] = None
72
+
73
+ simpo_gamma: Optional[float] = None
74
+
75
+
76
+ DataTrainingMethod: TypeAlias = Union[DataTrainingMethodTrainingMethodSft, DataTrainingMethodTrainingMethodDpo]
77
+
78
+
79
+ class DataTrainingTypeFullTrainingType(BaseModel):
80
+ type: Literal["Full"]
81
+
82
+
83
+ class DataTrainingTypeLoRaTrainingType(BaseModel):
84
+ lora_alpha: int
85
+
86
+ lora_r: int
87
+
88
+ type: Literal["Lora"]
89
+
90
+ lora_dropout: Optional[float] = None
91
+
92
+ lora_trainable_modules: Optional[str] = None
93
+
94
+
95
+ DataTrainingType: TypeAlias = Union[DataTrainingTypeFullTrainingType, DataTrainingTypeLoRaTrainingType]
96
+
97
+
98
+ class Data(BaseModel):
99
+ id: str
100
+ """Unique identifier for the fine-tune job"""
101
+
102
+ created_at: datetime
103
+ """Creation timestamp of the fine-tune job"""
104
+
105
+ status: Literal[
106
+ "pending",
107
+ "queued",
108
+ "running",
109
+ "compressing",
110
+ "uploading",
111
+ "cancel_requested",
112
+ "cancelled",
113
+ "error",
114
+ "completed",
115
+ ]
116
+
117
+ updated_at: datetime
118
+ """Last update timestamp of the fine-tune job"""
119
+
120
+ batch_size: Optional[int] = None
121
+ """Batch size used for training"""
122
+
123
+ events: Optional[List[FinetuneEvent]] = None
124
+ """Events related to this fine-tune job"""
125
+
126
+ from_checkpoint: Optional[str] = None
127
+ """Checkpoint used to continue training"""
128
+
129
+ from_hf_model: Optional[str] = None
130
+ """Hugging Face Hub repo to start training from"""
131
+
132
+ hf_model_revision: Optional[str] = None
133
+ """The revision of the Hugging Face Hub model to continue training from"""
134
+
135
+ learning_rate: Optional[float] = None
136
+ """Learning rate used for training"""
137
+
138
+ lr_scheduler: Optional[DataLrScheduler] = None
139
+ """Learning rate scheduler configuration"""
140
+
141
+ max_grad_norm: Optional[float] = None
142
+ """Maximum gradient norm for clipping"""
143
+
144
+ model: Optional[str] = None
145
+ """Base model used for fine-tuning"""
146
+
147
+ x_model_output_name: Optional[str] = FieldInfo(alias="model_output_name", default=None)
148
+
149
+ n_checkpoints: Optional[int] = None
150
+ """Number of checkpoints saved during training"""
151
+
152
+ n_epochs: Optional[int] = None
153
+ """Number of training epochs"""
154
+
155
+ n_evals: Optional[int] = None
156
+ """Number of evaluations during training"""
157
+
158
+ owner_address: Optional[str] = None
159
+ """Owner address information"""
160
+
161
+ suffix: Optional[str] = None
162
+ """Suffix added to the fine-tuned model name"""
163
+
164
+ token_count: Optional[int] = None
165
+ """Count of tokens processed"""
166
+
167
+ total_price: Optional[int] = None
168
+ """Total price for the fine-tuning job"""
169
+
170
+ training_file: Optional[str] = None
171
+ """File-ID of the training file"""
172
+
173
+ training_method: Optional[DataTrainingMethod] = None
174
+ """Method of training used"""
175
+
176
+ training_type: Optional[DataTrainingType] = None
177
+ """Type of training used (full or LoRA)"""
178
+
179
+ user_id: Optional[str] = None
180
+ """Identifier for the user who created the job"""
181
+
182
+ validation_file: Optional[str] = None
183
+ """File-ID of the validation file"""
184
+
185
+ wandb_name: Optional[str] = None
186
+ """Weights & Biases run name"""
187
+
188
+ wandb_project_name: Optional[str] = None
189
+ """Weights & Biases project name"""
190
+
191
+ warmup_ratio: Optional[float] = None
192
+ """Ratio of warmup steps"""
193
+
194
+ weight_decay: Optional[float] = None
195
+ """Weight decay value used"""
196
+
197
+
198
+ class FineTuningListResponse(BaseModel):
199
+ data: List[Data]
@@ -0,0 +1,41 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+ from typing_extensions import Literal
5
+
6
+ from pydantic import Field as FieldInfo
7
+
8
+ from .._models import BaseModel
9
+ from .finetune_event_type import FinetuneEventType
10
+
11
+ __all__ = ["FinetuneEvent"]
12
+
13
+
14
+ class FinetuneEvent(BaseModel):
15
+ checkpoint_path: str
16
+
17
+ created_at: str
18
+
19
+ hash: str
20
+
21
+ message: str
22
+
23
+ x_model_path: str = FieldInfo(alias="model_path")
24
+
25
+ object: Literal["fine-tune-event"]
26
+
27
+ param_count: int
28
+
29
+ step: int
30
+
31
+ token_count: int
32
+
33
+ total_steps: int
34
+
35
+ training_offset: int
36
+
37
+ type: FinetuneEventType
38
+
39
+ wandb_url: str
40
+
41
+ level: Optional[Literal["info", "warning", "error", "legacy_info", "legacy_iwarning", "legacy_ierror"]] = None
@@ -0,0 +1,33 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal, TypeAlias
4
+
5
+ __all__ = ["FinetuneEventType"]
6
+
7
+ FinetuneEventType: TypeAlias = Literal[
8
+ "job_pending",
9
+ "job_start",
10
+ "job_stopped",
11
+ "model_downloading",
12
+ "model_download_complete",
13
+ "training_data_downloading",
14
+ "training_data_download_complete",
15
+ "validation_data_downloading",
16
+ "validation_data_download_complete",
17
+ "wandb_init",
18
+ "training_start",
19
+ "checkpoint_save",
20
+ "billing_limit",
21
+ "epoch_complete",
22
+ "training_complete",
23
+ "model_compressing",
24
+ "model_compression_complete",
25
+ "model_uploading",
26
+ "model_upload_complete",
27
+ "job_complete",
28
+ "job_error",
29
+ "cancel_requested",
30
+ "job_restarted",
31
+ "refund",
32
+ "warning",
33
+ ]