together 1.5.17__py3-none-any.whl → 2.0.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. together/__init__.py +101 -63
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +108 -75
  30. together/lib/cli/api/evals.py +588 -0
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +161 -120
  33. together/lib/cli/api/models.py +140 -0
  34. together/{cli → lib/cli}/api/utils.py +6 -7
  35. together/{cli → lib/cli}/cli.py +16 -24
  36. together/{constants.py → lib/constants.py} +17 -12
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +455 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/lib/utils/files.py +628 -0
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -33
  50. together/resources/audio/__init__.py +72 -21
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -122
  53. together/resources/audio/transcriptions.py +282 -0
  54. together/resources/audio/translations.py +256 -0
  55. together/resources/audio/voices.py +135 -0
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +598 -395
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +398 -121
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +256 -108
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +238 -90
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +374 -0
  75. together/types/__init__.py +65 -109
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_job.py +57 -0
  157. together-2.0.0a8.dist-info/METADATA +680 -0
  158. together-2.0.0a8.dist-info/RECORD +164 -0
  159. {together-1.5.17.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
  160. together-2.0.0a8.dist-info/entry_points.txt +2 -0
  161. {together-1.5.17.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
  162. together/abstract/api_requestor.py +0 -729
  163. together/cli/api/chat.py +0 -276
  164. together/cli/api/completions.py +0 -119
  165. together/cli/api/images.py +0 -93
  166. together/cli/api/models.py +0 -55
  167. together/client.py +0 -176
  168. together/error.py +0 -194
  169. together/filemanager.py +0 -389
  170. together/legacy/__init__.py +0 -0
  171. together/legacy/base.py +0 -27
  172. together/legacy/complete.py +0 -93
  173. together/legacy/embeddings.py +0 -27
  174. together/legacy/files.py +0 -146
  175. together/legacy/finetune.py +0 -177
  176. together/legacy/images.py +0 -27
  177. together/legacy/models.py +0 -44
  178. together/resources/batch.py +0 -136
  179. together/resources/code_interpreter.py +0 -82
  180. together/resources/finetune.py +0 -1064
  181. together/together_response.py +0 -50
  182. together/types/abstract.py +0 -26
  183. together/types/audio_speech.py +0 -110
  184. together/types/batch.py +0 -53
  185. together/types/chat_completions.py +0 -197
  186. together/types/code_interpreter.py +0 -57
  187. together/types/common.py +0 -66
  188. together/types/completions.py +0 -107
  189. together/types/embeddings.py +0 -35
  190. together/types/endpoints.py +0 -123
  191. together/types/error.py +0 -16
  192. together/types/files.py +0 -90
  193. together/types/finetune.py +0 -398
  194. together/types/images.py +0 -44
  195. together/types/models.py +0 -45
  196. together/types/rerank.py +0 -43
  197. together/utils/api_helpers.py +0 -124
  198. together/utils/files.py +0 -425
  199. together/version.py +0 -6
  200. together-1.5.17.dist-info/METADATA +0 -525
  201. together-1.5.17.dist-info/RECORD +0 -69
  202. together-1.5.17.dist-info/entry_points.txt +0 -3
  203. /together/{abstract → lib/cli}/__init__.py +0 -0
  204. /together/{cli → lib/cli/api}/__init__.py +0 -0
  205. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -0,0 +1,199 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Union, Optional
4
+ from datetime import datetime
5
+ from typing_extensions import Literal, TypeAlias
6
+
7
+ from pydantic import Field as FieldInfo
8
+
9
+ from .._models import BaseModel
10
+ from .finetune_event import FinetuneEvent
11
+
12
+ __all__ = [
13
+ "FineTuningListResponse",
14
+ "Data",
15
+ "DataLrScheduler",
16
+ "DataLrSchedulerLrSchedulerArgs",
17
+ "DataLrSchedulerLrSchedulerArgsLinearLrSchedulerArgs",
18
+ "DataLrSchedulerLrSchedulerArgsCosineLrSchedulerArgs",
19
+ "DataTrainingMethod",
20
+ "DataTrainingMethodTrainingMethodSft",
21
+ "DataTrainingMethodTrainingMethodDpo",
22
+ "DataTrainingType",
23
+ "DataTrainingTypeFullTrainingType",
24
+ "DataTrainingTypeLoRaTrainingType",
25
+ ]
26
+
27
+
28
+ class DataLrSchedulerLrSchedulerArgsLinearLrSchedulerArgs(BaseModel):
29
+ min_lr_ratio: Optional[float] = None
30
+ """The ratio of the final learning rate to the peak learning rate"""
31
+
32
+
33
+ class DataLrSchedulerLrSchedulerArgsCosineLrSchedulerArgs(BaseModel):
34
+ min_lr_ratio: float
35
+ """The ratio of the final learning rate to the peak learning rate"""
36
+
37
+ num_cycles: float
38
+ """Number or fraction of cycles for the cosine learning rate scheduler"""
39
+
40
+
41
+ DataLrSchedulerLrSchedulerArgs: TypeAlias = Union[
42
+ DataLrSchedulerLrSchedulerArgsLinearLrSchedulerArgs, DataLrSchedulerLrSchedulerArgsCosineLrSchedulerArgs
43
+ ]
44
+
45
+
46
+ class DataLrScheduler(BaseModel):
47
+ lr_scheduler_type: Literal["linear", "cosine"]
48
+
49
+ lr_scheduler_args: Optional[DataLrSchedulerLrSchedulerArgs] = None
50
+
51
+
52
+ class DataTrainingMethodTrainingMethodSft(BaseModel):
53
+ method: Literal["sft"]
54
+
55
+ train_on_inputs: Union[bool, Literal["auto"]]
56
+ """
57
+ Whether to mask the user messages in conversational data or prompts in
58
+ instruction data.
59
+ """
60
+
61
+
62
+ class DataTrainingMethodTrainingMethodDpo(BaseModel):
63
+ method: Literal["dpo"]
64
+
65
+ dpo_beta: Optional[float] = None
66
+
67
+ dpo_normalize_logratios_by_length: Optional[bool] = None
68
+
69
+ dpo_reference_free: Optional[bool] = None
70
+
71
+ rpo_alpha: Optional[float] = None
72
+
73
+ simpo_gamma: Optional[float] = None
74
+
75
+
76
+ DataTrainingMethod: TypeAlias = Union[DataTrainingMethodTrainingMethodSft, DataTrainingMethodTrainingMethodDpo]
77
+
78
+
79
+ class DataTrainingTypeFullTrainingType(BaseModel):
80
+ type: Literal["Full"]
81
+
82
+
83
+ class DataTrainingTypeLoRaTrainingType(BaseModel):
84
+ lora_alpha: int
85
+
86
+ lora_r: int
87
+
88
+ type: Literal["Lora"]
89
+
90
+ lora_dropout: Optional[float] = None
91
+
92
+ lora_trainable_modules: Optional[str] = None
93
+
94
+
95
+ DataTrainingType: TypeAlias = Union[DataTrainingTypeFullTrainingType, DataTrainingTypeLoRaTrainingType]
96
+
97
+
98
+ class Data(BaseModel):
99
+ id: str
100
+ """Unique identifier for the fine-tune job"""
101
+
102
+ created_at: datetime
103
+ """Creation timestamp of the fine-tune job"""
104
+
105
+ status: Literal[
106
+ "pending",
107
+ "queued",
108
+ "running",
109
+ "compressing",
110
+ "uploading",
111
+ "cancel_requested",
112
+ "cancelled",
113
+ "error",
114
+ "completed",
115
+ ]
116
+
117
+ updated_at: datetime
118
+ """Last update timestamp of the fine-tune job"""
119
+
120
+ batch_size: Optional[int] = None
121
+ """Batch size used for training"""
122
+
123
+ events: Optional[List[FinetuneEvent]] = None
124
+ """Events related to this fine-tune job"""
125
+
126
+ from_checkpoint: Optional[str] = None
127
+ """Checkpoint used to continue training"""
128
+
129
+ from_hf_model: Optional[str] = None
130
+ """Hugging Face Hub repo to start training from"""
131
+
132
+ hf_model_revision: Optional[str] = None
133
+ """The revision of the Hugging Face Hub model to continue training from"""
134
+
135
+ learning_rate: Optional[float] = None
136
+ """Learning rate used for training"""
137
+
138
+ lr_scheduler: Optional[DataLrScheduler] = None
139
+ """Learning rate scheduler configuration"""
140
+
141
+ max_grad_norm: Optional[float] = None
142
+ """Maximum gradient norm for clipping"""
143
+
144
+ model: Optional[str] = None
145
+ """Base model used for fine-tuning"""
146
+
147
+ x_model_output_name: Optional[str] = FieldInfo(alias="model_output_name", default=None)
148
+
149
+ n_checkpoints: Optional[int] = None
150
+ """Number of checkpoints saved during training"""
151
+
152
+ n_epochs: Optional[int] = None
153
+ """Number of training epochs"""
154
+
155
+ n_evals: Optional[int] = None
156
+ """Number of evaluations during training"""
157
+
158
+ owner_address: Optional[str] = None
159
+ """Owner address information"""
160
+
161
+ suffix: Optional[str] = None
162
+ """Suffix added to the fine-tuned model name"""
163
+
164
+ token_count: Optional[int] = None
165
+ """Count of tokens processed"""
166
+
167
+ total_price: Optional[int] = None
168
+ """Total price for the fine-tuning job"""
169
+
170
+ training_file: Optional[str] = None
171
+ """File-ID of the training file"""
172
+
173
+ training_method: Optional[DataTrainingMethod] = None
174
+ """Method of training used"""
175
+
176
+ training_type: Optional[DataTrainingType] = None
177
+ """Type of training used (full or LoRA)"""
178
+
179
+ user_id: Optional[str] = None
180
+ """Identifier for the user who created the job"""
181
+
182
+ validation_file: Optional[str] = None
183
+ """File-ID of the validation file"""
184
+
185
+ wandb_name: Optional[str] = None
186
+ """Weights & Biases run name"""
187
+
188
+ wandb_project_name: Optional[str] = None
189
+ """Weights & Biases project name"""
190
+
191
+ warmup_ratio: Optional[float] = None
192
+ """Ratio of warmup steps"""
193
+
194
+ weight_decay: Optional[float] = None
195
+ """Weight decay value used"""
196
+
197
+
198
+ class FineTuningListResponse(BaseModel):
199
+ data: List[Data]
@@ -0,0 +1,41 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+ from typing_extensions import Literal
5
+
6
+ from pydantic import Field as FieldInfo
7
+
8
+ from .._models import BaseModel
9
+ from .finetune_event_type import FinetuneEventType
10
+
11
+ __all__ = ["FinetuneEvent"]
12
+
13
+
14
+ class FinetuneEvent(BaseModel):
15
+ checkpoint_path: str
16
+
17
+ created_at: str
18
+
19
+ hash: str
20
+
21
+ message: str
22
+
23
+ x_model_path: str = FieldInfo(alias="model_path")
24
+
25
+ object: Literal["fine-tune-event"]
26
+
27
+ param_count: int
28
+
29
+ step: int
30
+
31
+ token_count: int
32
+
33
+ total_steps: int
34
+
35
+ training_offset: int
36
+
37
+ type: FinetuneEventType
38
+
39
+ wandb_url: str
40
+
41
+ level: Optional[Literal["info", "warning", "error", "legacy_info", "legacy_iwarning", "legacy_ierror"]] = None
@@ -0,0 +1,33 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal, TypeAlias
4
+
5
+ __all__ = ["FinetuneEventType"]
6
+
7
+ FinetuneEventType: TypeAlias = Literal[
8
+ "job_pending",
9
+ "job_start",
10
+ "job_stopped",
11
+ "model_downloading",
12
+ "model_download_complete",
13
+ "training_data_downloading",
14
+ "training_data_download_complete",
15
+ "validation_data_downloading",
16
+ "validation_data_download_complete",
17
+ "wandb_init",
18
+ "training_start",
19
+ "checkpoint_save",
20
+ "billing_limit",
21
+ "epoch_complete",
22
+ "training_complete",
23
+ "model_compressing",
24
+ "model_compression_complete",
25
+ "model_uploading",
26
+ "model_upload_complete",
27
+ "job_complete",
28
+ "job_error",
29
+ "cancel_requested",
30
+ "job_restarted",
31
+ "refund",
32
+ "warning",
33
+ ]
@@ -0,0 +1,177 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Union, Optional
4
+ from typing_extensions import Literal, TypeAlias
5
+
6
+ from pydantic import Field as FieldInfo
7
+
8
+ from .._models import BaseModel
9
+ from .finetune_event import FinetuneEvent
10
+
11
+ __all__ = [
12
+ "FinetuneResponse",
13
+ "LrScheduler",
14
+ "LrSchedulerLrSchedulerArgs",
15
+ "LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs",
16
+ "LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs",
17
+ "TrainingMethod",
18
+ "TrainingMethodTrainingMethodSft",
19
+ "TrainingMethodTrainingMethodDpo",
20
+ "TrainingType",
21
+ "TrainingTypeFullTrainingType",
22
+ "TrainingTypeLoRaTrainingType",
23
+ ]
24
+
25
+
26
+ class LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs(BaseModel):
27
+ min_lr_ratio: Optional[float] = None
28
+ """The ratio of the final learning rate to the peak learning rate"""
29
+
30
+
31
+ class LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs(BaseModel):
32
+ min_lr_ratio: float
33
+ """The ratio of the final learning rate to the peak learning rate"""
34
+
35
+ num_cycles: float
36
+ """Number or fraction of cycles for the cosine learning rate scheduler"""
37
+
38
+
39
+ LrSchedulerLrSchedulerArgs: TypeAlias = Union[
40
+ LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs, LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs
41
+ ]
42
+
43
+
44
+ class LrScheduler(BaseModel):
45
+ lr_scheduler_type: Literal["linear", "cosine"]
46
+
47
+ lr_scheduler_args: Optional[LrSchedulerLrSchedulerArgs] = None
48
+
49
+
50
+ class TrainingMethodTrainingMethodSft(BaseModel):
51
+ method: Literal["sft"]
52
+
53
+ train_on_inputs: Union[bool, Literal["auto"]]
54
+ """
55
+ Whether to mask the user messages in conversational data or prompts in
56
+ instruction data.
57
+ """
58
+
59
+
60
+ class TrainingMethodTrainingMethodDpo(BaseModel):
61
+ method: Literal["dpo"]
62
+
63
+ dpo_beta: Optional[float] = None
64
+
65
+ dpo_normalize_logratios_by_length: Optional[bool] = None
66
+
67
+ dpo_reference_free: Optional[bool] = None
68
+
69
+ rpo_alpha: Optional[float] = None
70
+
71
+ simpo_gamma: Optional[float] = None
72
+
73
+
74
+ TrainingMethod: TypeAlias = Union[TrainingMethodTrainingMethodSft, TrainingMethodTrainingMethodDpo]
75
+
76
+
77
+ class TrainingTypeFullTrainingType(BaseModel):
78
+ type: Literal["Full"]
79
+
80
+
81
+ class TrainingTypeLoRaTrainingType(BaseModel):
82
+ lora_alpha: int
83
+
84
+ lora_r: int
85
+
86
+ type: Literal["Lora"]
87
+
88
+ lora_dropout: Optional[float] = None
89
+
90
+ lora_trainable_modules: Optional[str] = None
91
+
92
+
93
+ TrainingType: TypeAlias = Union[TrainingTypeFullTrainingType, TrainingTypeLoRaTrainingType]
94
+
95
+
96
+ class FinetuneResponse(BaseModel):
97
+ id: str
98
+
99
+ status: Literal[
100
+ "pending",
101
+ "queued",
102
+ "running",
103
+ "compressing",
104
+ "uploading",
105
+ "cancel_requested",
106
+ "cancelled",
107
+ "error",
108
+ "completed",
109
+ ]
110
+
111
+ batch_size: Union[int, Literal["max"], None] = None
112
+
113
+ created_at: Optional[str] = None
114
+
115
+ epochs_completed: Optional[int] = None
116
+
117
+ eval_steps: Optional[int] = None
118
+
119
+ events: Optional[List[FinetuneEvent]] = None
120
+
121
+ from_checkpoint: Optional[str] = None
122
+
123
+ from_hf_model: Optional[str] = None
124
+
125
+ hf_model_revision: Optional[str] = None
126
+
127
+ job_id: Optional[str] = None
128
+
129
+ learning_rate: Optional[float] = None
130
+
131
+ lr_scheduler: Optional[LrScheduler] = None
132
+
133
+ max_grad_norm: Optional[float] = None
134
+
135
+ model: Optional[str] = None
136
+
137
+ x_model_output_name: Optional[str] = FieldInfo(alias="model_output_name", default=None)
138
+
139
+ x_model_output_path: Optional[str] = FieldInfo(alias="model_output_path", default=None)
140
+
141
+ n_checkpoints: Optional[int] = None
142
+
143
+ n_epochs: Optional[int] = None
144
+
145
+ n_evals: Optional[int] = None
146
+
147
+ param_count: Optional[int] = None
148
+
149
+ queue_depth: Optional[int] = None
150
+
151
+ token_count: Optional[int] = None
152
+
153
+ total_price: Optional[int] = None
154
+
155
+ train_on_inputs: Union[bool, Literal["auto"], None] = None
156
+
157
+ training_file: Optional[str] = None
158
+
159
+ training_method: Optional[TrainingMethod] = None
160
+
161
+ training_type: Optional[TrainingType] = None
162
+
163
+ trainingfile_numlines: Optional[int] = None
164
+
165
+ trainingfile_size: Optional[int] = None
166
+
167
+ updated_at: Optional[str] = None
168
+
169
+ validation_file: Optional[str] = None
170
+
171
+ wandb_project_name: Optional[str] = None
172
+
173
+ wandb_url: Optional[str] = None
174
+
175
+ warmup_ratio: Optional[float] = None
176
+
177
+ weight_decay: Optional[float] = None
@@ -0,0 +1,16 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import TypedDict
6
+
7
+ __all__ = ["HardwareListParams"]
8
+
9
+
10
+ class HardwareListParams(TypedDict, total=False):
11
+ model: str
12
+ """Filter hardware configurations by model compatibility.
13
+
14
+ When provided, the response includes availability status for each compatible
15
+ configuration.
16
+ """
@@ -0,0 +1,58 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from datetime import datetime
5
+ from typing_extensions import Literal
6
+
7
+ from .._models import BaseModel
8
+
9
+ __all__ = ["HardwareListResponse", "Data", "DataPricing", "DataSpecs", "DataAvailability"]
10
+
11
+
12
+ class DataPricing(BaseModel):
13
+ cents_per_minute: float
14
+ """Cost per minute of endpoint uptime in cents"""
15
+
16
+
17
+ class DataSpecs(BaseModel):
18
+ gpu_count: int
19
+ """Number of GPUs in this configuration"""
20
+
21
+ gpu_link: str
22
+ """The GPU interconnect technology"""
23
+
24
+ gpu_memory: float
25
+ """Amount of GPU memory in GB"""
26
+
27
+ gpu_type: str
28
+ """The type/model of GPU"""
29
+
30
+
31
+ class DataAvailability(BaseModel):
32
+ status: Literal["available", "unavailable", "insufficient"]
33
+ """The availability status of the hardware configuration"""
34
+
35
+
36
+ class Data(BaseModel):
37
+ id: str
38
+ """Unique identifier for the hardware configuration"""
39
+
40
+ object: Literal["hardware"]
41
+
42
+ pricing: DataPricing
43
+ """Pricing details for using an endpoint"""
44
+
45
+ specs: DataSpecs
46
+ """Detailed specifications of a hardware configuration"""
47
+
48
+ updated_at: datetime
49
+ """Timestamp of when the hardware status was last updated"""
50
+
51
+ availability: Optional[DataAvailability] = None
52
+ """Indicates the current availability status of a hardware configuration"""
53
+
54
+
55
+ class HardwareListResponse(BaseModel):
56
+ data: List[Data]
57
+
58
+ object: Literal["list"]
@@ -0,0 +1,15 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["ImageDataB64"]
8
+
9
+
10
+ class ImageDataB64(BaseModel):
11
+ b64_json: str
12
+
13
+ index: int
14
+
15
+ type: Literal["b64_json"]
@@ -0,0 +1,15 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["ImageDataURL"]
8
+
9
+
10
+ class ImageDataURL(BaseModel):
11
+ index: int
12
+
13
+ type: Literal["url"]
14
+
15
+ url: str
@@ -0,0 +1,23 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Union
4
+ from typing_extensions import Literal, Annotated, TypeAlias
5
+
6
+ from .._utils import PropertyInfo
7
+ from .._models import BaseModel
8
+ from .image_data_b64 import ImageDataB64
9
+ from .image_data_url import ImageDataURL
10
+
11
+ __all__ = ["ImageFile", "Data"]
12
+
13
+ Data: TypeAlias = Annotated[Union[ImageDataB64, ImageDataURL], PropertyInfo(discriminator="type")]
14
+
15
+
16
+ class ImageFile(BaseModel):
17
+ id: str
18
+
19
+ data: List[Data]
20
+
21
+ model: str
22
+
23
+ object: Literal["list"]
@@ -0,0 +1,85 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["ImageGenerateParams", "ImageLora"]
9
+
10
+
11
+ class ImageGenerateParams(TypedDict, total=False):
12
+ model: Required[
13
+ Union[
14
+ Literal[
15
+ "black-forest-labs/FLUX.1-schnell-Free",
16
+ "black-forest-labs/FLUX.1-schnell",
17
+ "black-forest-labs/FLUX.1.1-pro",
18
+ ],
19
+ str,
20
+ ]
21
+ ]
22
+ """The model to use for image generation.
23
+
24
+ [See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models)
25
+ """
26
+
27
+ prompt: Required[str]
28
+ """A description of the desired images. Maximum length varies by model."""
29
+
30
+ disable_safety_checker: bool
31
+ """If true, disables the safety checker for image generation."""
32
+
33
+ guidance_scale: float
34
+ """Adjusts the alignment of the generated image with the input prompt.
35
+
36
+ Higher values (e.g., 8-10) make the output more faithful to the prompt, while
37
+ lower values (e.g., 1-5) encourage more creative freedom.
38
+ """
39
+
40
+ height: int
41
+ """Height of the image to generate in number of pixels."""
42
+
43
+ image_loras: Iterable[ImageLora]
44
+ """
45
+ An array of objects that define LoRAs (Low-Rank Adaptations) to influence the
46
+ generated image.
47
+ """
48
+
49
+ image_url: str
50
+ """URL of an image to use for image models that support it."""
51
+
52
+ n: int
53
+ """Number of image results to generate."""
54
+
55
+ negative_prompt: str
56
+ """The prompt or prompts not to guide the image generation."""
57
+
58
+ output_format: Literal["jpeg", "png"]
59
+ """The format of the image response.
60
+
61
+ Can be either be `jpeg` or `png`. Defaults to `jpeg`.
62
+ """
63
+
64
+ response_format: Literal["base64", "url"]
65
+ """Format of the image response. Can be either a base64 string or a URL."""
66
+
67
+ seed: int
68
+ """Seed used for generation. Can be used to reproduce image generations."""
69
+
70
+ steps: int
71
+ """Number of generation steps."""
72
+
73
+ width: int
74
+ """Width of the image to generate in number of pixels."""
75
+
76
+
77
+ class ImageLora(TypedDict, total=False):
78
+ path: Required[str]
79
+ """The URL of the LoRA to apply (e.g.
80
+
81
+ https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA).
82
+ """
83
+
84
+ scale: Required[float]
85
+ """The strength of the LoRA's influence. Most LoRA's recommend a value of 1."""