together 1.5.17__py3-none-any.whl → 2.0.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. together/__init__.py +101 -63
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +108 -75
  30. together/lib/cli/api/evals.py +588 -0
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +161 -120
  33. together/lib/cli/api/models.py +140 -0
  34. together/{cli → lib/cli}/api/utils.py +6 -7
  35. together/{cli → lib/cli}/cli.py +16 -24
  36. together/{constants.py → lib/constants.py} +17 -12
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +455 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/lib/utils/files.py +628 -0
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -33
  50. together/resources/audio/__init__.py +72 -21
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -122
  53. together/resources/audio/transcriptions.py +282 -0
  54. together/resources/audio/translations.py +256 -0
  55. together/resources/audio/voices.py +135 -0
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +598 -395
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +398 -121
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +256 -108
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +238 -90
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +374 -0
  75. together/types/__init__.py +65 -109
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_job.py +57 -0
  157. together-2.0.0a8.dist-info/METADATA +680 -0
  158. together-2.0.0a8.dist-info/RECORD +164 -0
  159. {together-1.5.17.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
  160. together-2.0.0a8.dist-info/entry_points.txt +2 -0
  161. {together-1.5.17.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
  162. together/abstract/api_requestor.py +0 -729
  163. together/cli/api/chat.py +0 -276
  164. together/cli/api/completions.py +0 -119
  165. together/cli/api/images.py +0 -93
  166. together/cli/api/models.py +0 -55
  167. together/client.py +0 -176
  168. together/error.py +0 -194
  169. together/filemanager.py +0 -389
  170. together/legacy/__init__.py +0 -0
  171. together/legacy/base.py +0 -27
  172. together/legacy/complete.py +0 -93
  173. together/legacy/embeddings.py +0 -27
  174. together/legacy/files.py +0 -146
  175. together/legacy/finetune.py +0 -177
  176. together/legacy/images.py +0 -27
  177. together/legacy/models.py +0 -44
  178. together/resources/batch.py +0 -136
  179. together/resources/code_interpreter.py +0 -82
  180. together/resources/finetune.py +0 -1064
  181. together/together_response.py +0 -50
  182. together/types/abstract.py +0 -26
  183. together/types/audio_speech.py +0 -110
  184. together/types/batch.py +0 -53
  185. together/types/chat_completions.py +0 -197
  186. together/types/code_interpreter.py +0 -57
  187. together/types/common.py +0 -66
  188. together/types/completions.py +0 -107
  189. together/types/embeddings.py +0 -35
  190. together/types/endpoints.py +0 -123
  191. together/types/error.py +0 -16
  192. together/types/files.py +0 -90
  193. together/types/finetune.py +0 -398
  194. together/types/images.py +0 -44
  195. together/types/models.py +0 -45
  196. together/types/rerank.py +0 -43
  197. together/utils/api_helpers.py +0 -124
  198. together/utils/files.py +0 -425
  199. together/version.py +0 -6
  200. together-1.5.17.dist-info/METADATA +0 -525
  201. together-1.5.17.dist-info/RECORD +0 -69
  202. together-1.5.17.dist-info/entry_points.txt +0 -3
  203. /together/{abstract → lib/cli}/__init__.py +0 -0
  204. /together/{cli → lib/cli/api}/__init__.py +0 -0
  205. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -0,0 +1,280 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Literal
4
+
5
+ from rich import print as rprint
6
+
7
+ from together.lib.utils import log_warn_once
8
+
9
+ if TYPE_CHECKING:
10
+ from together import Together, AsyncTogether
11
+ from together.lib.types.fine_tuning import (
12
+ TrainingType,
13
+ FinetuneRequest,
14
+ FullTrainingType,
15
+ LoRATrainingType,
16
+ CosineLRScheduler,
17
+ LinearLRScheduler,
18
+ TrainingMethodDPO,
19
+ TrainingMethodSFT,
20
+ FinetuneLRScheduler,
21
+ CosineLRSchedulerArgs,
22
+ LinearLRSchedulerArgs,
23
+ FinetuneTrainingLimits,
24
+ )
25
+
26
+ AVAILABLE_TRAINING_METHODS = {
27
+ "sft",
28
+ "dpo",
29
+ }
30
+
31
+
32
+ def create_finetune_request(
33
+ model_limits: FinetuneTrainingLimits,
34
+ training_file: str,
35
+ model: str | None = None,
36
+ n_epochs: int = 1,
37
+ validation_file: str | None = "",
38
+ n_evals: int | None = 0,
39
+ n_checkpoints: int | None = 1,
40
+ batch_size: int | Literal["max"] = "max",
41
+ learning_rate: float | None = 0.00001,
42
+ lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
43
+ min_lr_ratio: float | None = 0.0,
44
+ scheduler_num_cycles: float = 0.5,
45
+ warmup_ratio: float | None = None,
46
+ max_grad_norm: float = 1.0,
47
+ weight_decay: float | None = 0.0,
48
+ lora: bool = False,
49
+ lora_r: int | None = None,
50
+ lora_dropout: float | None = 0,
51
+ lora_alpha: float | None = None,
52
+ lora_trainable_modules: str | None = "all-linear",
53
+ suffix: str | None = None,
54
+ wandb_api_key: str | None = None,
55
+ wandb_base_url: str | None = None,
56
+ wandb_project_name: str | None = None,
57
+ wandb_name: str | None = None,
58
+ train_on_inputs: bool | Literal["auto"] | None = None,
59
+ training_method: str = "sft",
60
+ dpo_beta: float | None = None,
61
+ dpo_normalize_logratios_by_length: bool = False,
62
+ rpo_alpha: float | None = None,
63
+ simpo_gamma: float | None = None,
64
+ from_checkpoint: str | None = None,
65
+ from_hf_model: str | None = None,
66
+ hf_model_revision: str | None = None,
67
+ hf_api_token: str | None = None,
68
+ hf_output_repo_name: str | None = None,
69
+ ) -> FinetuneRequest:
70
+ if model is not None and from_checkpoint is not None:
71
+ raise ValueError("You must specify either a model or a checkpoint to start a job from, not both")
72
+
73
+ if model is None and from_checkpoint is None:
74
+ raise ValueError("You must specify either a model or a checkpoint")
75
+
76
+ if from_checkpoint is not None and from_hf_model is not None:
77
+ raise ValueError(
78
+ "You must specify either a Hugging Face Hub model or a previous checkpoint from "
79
+ "Together to start a job from, not both"
80
+ )
81
+
82
+ if from_hf_model is not None and model is None:
83
+ raise ValueError("You must specify the base model to fine-tune a model from the Hugging Face Hub")
84
+
85
+ model_or_checkpoint = model or from_checkpoint
86
+
87
+ if warmup_ratio is None:
88
+ warmup_ratio = 0.0
89
+
90
+ training_type: TrainingType = FullTrainingType()
91
+ if lora:
92
+ if model_limits.lora_training is None:
93
+ raise ValueError(f"LoRA adapters are not supported for the selected model ({model_or_checkpoint}).")
94
+
95
+ if lora_dropout is not None:
96
+ if not 0 <= lora_dropout < 1.0:
97
+ raise ValueError("LoRA dropout must be in [0, 1) range.")
98
+
99
+ lora_r = lora_r if lora_r is not None else model_limits.lora_training.max_rank
100
+ lora_alpha = lora_alpha if lora_alpha is not None else lora_r * 2
101
+ training_type = LoRATrainingType(
102
+ lora_r=lora_r,
103
+ lora_alpha=int(lora_alpha),
104
+ lora_dropout=lora_dropout or 0.0,
105
+ lora_trainable_modules=lora_trainable_modules or "all-linear",
106
+ )
107
+
108
+ max_batch_size = model_limits.lora_training.max_batch_size
109
+ min_batch_size = model_limits.lora_training.min_batch_size
110
+ max_batch_size_dpo = model_limits.lora_training.max_batch_size_dpo
111
+ else:
112
+ if model_limits.full_training is None:
113
+ raise ValueError(f"Full training is not supported for the selected model ({model_or_checkpoint}).")
114
+
115
+ max_batch_size = model_limits.full_training.max_batch_size
116
+ min_batch_size = model_limits.full_training.min_batch_size
117
+ max_batch_size_dpo = model_limits.full_training.max_batch_size_dpo
118
+
119
+ if batch_size != "max":
120
+ if training_method == "sft":
121
+ if batch_size > max_batch_size:
122
+ raise ValueError(
123
+ f"Requested batch size of {batch_size} is higher that the maximum allowed value of {max_batch_size}."
124
+ )
125
+ elif training_method == "dpo":
126
+ if batch_size > max_batch_size_dpo:
127
+ raise ValueError(
128
+ f"Requested batch size of {batch_size} is higher that the maximum allowed value of {max_batch_size_dpo}."
129
+ )
130
+
131
+ if batch_size < min_batch_size:
132
+ raise ValueError(
133
+ f"Requested batch size of {batch_size} is lower that the minimum allowed value of {min_batch_size}."
134
+ )
135
+
136
+ if warmup_ratio > 1 or warmup_ratio < 0:
137
+ raise ValueError(f"Warmup ratio should be between 0 and 1 (got {warmup_ratio})")
138
+
139
+ if min_lr_ratio is not None and (min_lr_ratio > 1 or min_lr_ratio < 0):
140
+ raise ValueError(f"Min learning rate ratio should be between 0 and 1 (got {min_lr_ratio})")
141
+
142
+ if max_grad_norm < 0:
143
+ raise ValueError(f"Max gradient norm should be non-negative (got {max_grad_norm})")
144
+
145
+ if weight_decay is not None and (weight_decay < 0):
146
+ raise ValueError(f"Weight decay should be non-negative (got {weight_decay})")
147
+
148
+ if training_method not in AVAILABLE_TRAINING_METHODS:
149
+ raise ValueError(f"training_method must be one of {', '.join(AVAILABLE_TRAINING_METHODS)}")
150
+
151
+ if train_on_inputs is not None and training_method != "sft":
152
+ raise ValueError("train_on_inputs is only supported for SFT training")
153
+
154
+ if dpo_beta is not None and training_method != "dpo":
155
+ raise ValueError("dpo_beta is only supported for DPO training")
156
+ if dpo_normalize_logratios_by_length and training_method != "dpo":
157
+ raise ValueError("dpo_normalize_logratios_by_length=True is only supported for DPO training")
158
+ if rpo_alpha is not None:
159
+ if training_method != "dpo":
160
+ raise ValueError("rpo_alpha is only supported for DPO training")
161
+ if not rpo_alpha >= 0.0:
162
+ raise ValueError(f"rpo_alpha should be non-negative (got {rpo_alpha})")
163
+
164
+ if simpo_gamma is not None:
165
+ if training_method != "dpo":
166
+ raise ValueError("simpo_gamma is only supported for DPO training")
167
+ if not simpo_gamma >= 0.0:
168
+ raise ValueError(f"simpo_gamma should be non-negative (got {simpo_gamma})")
169
+
170
+ lr_scheduler: FinetuneLRScheduler
171
+ if lr_scheduler_type == "cosine":
172
+ if scheduler_num_cycles <= 0.0:
173
+ raise ValueError(f"Number of cycles should be greater than 0 (got {scheduler_num_cycles})")
174
+
175
+ lr_scheduler = CosineLRScheduler(
176
+ lr_scheduler_args=CosineLRSchedulerArgs(min_lr_ratio=min_lr_ratio, num_cycles=scheduler_num_cycles),
177
+ )
178
+ else:
179
+ lr_scheduler = LinearLRScheduler(
180
+ lr_scheduler_args=LinearLRSchedulerArgs(min_lr_ratio=min_lr_ratio),
181
+ )
182
+
183
+ training_method_cls: TrainingMethodSFT | TrainingMethodDPO
184
+ if training_method == "sft":
185
+ if train_on_inputs is None:
186
+ log_warn_once("train_on_inputs is not set for SFT training, it will be set to 'auto'")
187
+ train_on_inputs = "auto"
188
+ training_method_cls = TrainingMethodSFT(train_on_inputs=train_on_inputs)
189
+ elif training_method == "dpo":
190
+ if simpo_gamma is not None and simpo_gamma > 0:
191
+ dpo_reference_free = True
192
+ dpo_normalize_logratios_by_length = True
193
+ rprint(
194
+ f"Parameter simpo_gamma was set to {simpo_gamma}. "
195
+ "SimPO training detected. Reference logits will not be used "
196
+ "and length normalization of log-probabilities will be enabled."
197
+ )
198
+ else:
199
+ dpo_reference_free = False
200
+
201
+ training_method_cls = TrainingMethodDPO(
202
+ dpo_beta=dpo_beta,
203
+ dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
204
+ dpo_reference_free=dpo_reference_free,
205
+ rpo_alpha=rpo_alpha,
206
+ simpo_gamma=simpo_gamma,
207
+ )
208
+
209
+ finetune_request = FinetuneRequest(
210
+ model=model,
211
+ training_file=training_file,
212
+ validation_file=validation_file,
213
+ n_epochs=n_epochs,
214
+ n_evals=n_evals,
215
+ n_checkpoints=n_checkpoints,
216
+ batch_size=batch_size,
217
+ learning_rate=learning_rate or 0.00001,
218
+ lr_scheduler=lr_scheduler,
219
+ warmup_ratio=warmup_ratio,
220
+ max_grad_norm=max_grad_norm,
221
+ weight_decay=weight_decay or 0.0,
222
+ training_type=training_type,
223
+ suffix=suffix,
224
+ wandb_key=wandb_api_key,
225
+ wandb_base_url=wandb_base_url,
226
+ wandb_project_name=wandb_project_name,
227
+ wandb_name=wandb_name,
228
+ training_method=training_method_cls, # pyright: ignore[reportPossiblyUnboundVariable]
229
+ from_checkpoint=from_checkpoint,
230
+ from_hf_model=from_hf_model,
231
+ hf_model_revision=hf_model_revision,
232
+ hf_api_token=hf_api_token,
233
+ hf_output_repo_name=hf_output_repo_name,
234
+ )
235
+
236
+ return finetune_request
237
+
238
+
239
+ def get_model_limits(client: Together, model: str) -> FinetuneTrainingLimits:
240
+ """
241
+ Requests training limits for a specific model
242
+
243
+ Args:
244
+ model_name (str): Name of the model to get limits for
245
+
246
+ Returns:
247
+ FinetuneTrainingLimits: Object containing training limits for the model
248
+ """
249
+
250
+ response = client.get(
251
+ "/fine-tunes/models/limits",
252
+ cast_to=FinetuneTrainingLimits,
253
+ options={
254
+ "params": {"model_name": model},
255
+ },
256
+ )
257
+
258
+ return response
259
+
260
+
261
+ async def async_get_model_limits(client: AsyncTogether, model: str) -> FinetuneTrainingLimits:
262
+ """
263
+ Requests training limits for a specific model
264
+
265
+ Args:
266
+ model_name (str): Name of the model to get limits for
267
+
268
+ Returns:
269
+ FinetuneTrainingLimits: Object containing training limits for the model
270
+ """
271
+
272
+ response = await client.get(
273
+ "/fine-tunes/models/limits",
274
+ cast_to=FinetuneTrainingLimits,
275
+ options={
276
+ "params": {"model_name": model},
277
+ },
278
+ )
279
+
280
+ return response
@@ -0,0 +1,35 @@
1
+ from typing import List
2
+ from typing_extensions import TypeAlias
3
+
4
+ from together import Together
5
+ from together._models import BaseModel
6
+ from together.types.model_list_response import ModelListResponse
7
+
8
+
9
+ class DedicatedModel(BaseModel):
10
+ name: str
11
+ id: str
12
+
13
+
14
+ ModelList: TypeAlias = List[DedicatedModel]
15
+
16
+
17
+ def filter_by_dedicated_models(client: Together, models: ModelListResponse) -> ModelListResponse:
18
+ """
19
+ Filter models based on dedicated model response.
20
+
21
+ Args:
22
+ models (List[BaseModel]): List of all models
23
+ dedicated_response (APIResponse): Response from autoscale models endpoint
24
+
25
+ Returns:
26
+ List[BaseModel]: Filtered list of models
27
+ """
28
+ dedicated_models = client.get("/autoscale/models", cast_to=ModelList)
29
+
30
+ # Create a set of dedicated model names for efficient lookup
31
+ dedicated_model_names = {model.name for model in dedicated_models}
32
+
33
+ # Filter models to only include those in dedicated_model_names
34
+ # Note: The model.id from ModelObject matches the name field in the autoscale response
35
+ return [model for model in models if model.id in dedicated_model_names]
@@ -0,0 +1,13 @@
1
+ from .error import (
2
+ DownloadError,
3
+ FileTypeError,
4
+ )
5
+ from .fine_tuning import (
6
+ FinetuneTrainingLimits,
7
+ )
8
+
9
+ __all__ = [
10
+ "FinetuneTrainingLimits",
11
+ "DownloadError",
12
+ "FileTypeError",
13
+ ]
@@ -0,0 +1,9 @@
1
+ from ..._exceptions import TogetherError
2
+
3
+
4
+ class DownloadError(TogetherError):
5
+ pass
6
+
7
+
8
+ class FileTypeError(TogetherError):
9
+ pass