together 1.5.24__py3-none-any.whl → 1.5.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,9 +27,11 @@ class Evaluation:
27
27
  def create(
28
28
  self,
29
29
  type: str,
30
- judge_model_name: str,
30
+ judge_model: str,
31
+ judge_model_source: str,
31
32
  judge_system_template: str,
32
33
  input_data_file_path: str,
34
+ judge_external_api_token: Optional[str] = None,
33
35
  # Classify-specific parameters
34
36
  labels: Optional[List[str]] = None,
35
37
  pass_labels: Optional[List[str]] = None,
@@ -48,9 +50,11 @@ class Evaluation:
48
50
 
49
51
  Args:
50
52
  type: The type of evaluation ("classify", "score", or "compare")
51
- judge_model_name: Name of the judge model
53
+ judge_model: Name or URL of the judge model
54
+ judge_model_source: Source of the judge model ("serverless", "dedicated", or "external")
52
55
  judge_system_template: System template for the judge
53
56
  input_data_file_path: Path to input data file
57
+ judge_external_api_token: Optional external API token for the judge model
54
58
  labels: List of classification labels (required for classify)
55
59
  pass_labels: List of labels considered as passing (required for classify)
56
60
  min_score: Minimum score value (required for score)
@@ -67,10 +71,17 @@ class Evaluation:
67
71
  client=self._client,
68
72
  )
69
73
 
74
+ if judge_model_source == "external" and not judge_external_api_token:
75
+ raise ValueError(
76
+ "judge_external_api_token is required when judge_model_source is 'external'"
77
+ )
78
+
70
79
  # Build judge config
71
80
  judge_config = JudgeModelConfig(
72
- model_name=judge_model_name,
81
+ model=judge_model,
82
+ model_source=judge_model_source,
73
83
  system_template=judge_system_template,
84
+ external_api_token=judge_external_api_token,
74
85
  )
75
86
  parameters: Union[ClassifyParameters, ScoreParameters, CompareParameters]
76
87
  # Build parameters based on type
@@ -112,7 +123,8 @@ class Evaluation:
112
123
  elif isinstance(model_to_evaluate, dict):
113
124
  # Validate that all required fields are present for model config
114
125
  required_fields = [
115
- "model_name",
126
+ "model",
127
+ "model_source",
116
128
  "max_tokens",
117
129
  "temperature",
118
130
  "system_template",
@@ -128,6 +140,12 @@ class Evaluation:
128
140
  f"All model config parameters are required when using detailed configuration. "
129
141
  f"Missing: {', '.join(missing_fields)}"
130
142
  )
143
+ if model_to_evaluate.get(
144
+ "model_source"
145
+ ) == "external" and not model_to_evaluate.get("external_api_token"):
146
+ raise ValueError(
147
+ "external_api_token is required when model_source is 'external' for model_to_evaluate"
148
+ )
131
149
  parameters.model_to_evaluate = ModelRequest(**model_to_evaluate)
132
150
 
133
151
  elif type == "score":
@@ -163,7 +181,8 @@ class Evaluation:
163
181
  elif isinstance(model_to_evaluate, dict):
164
182
  # Validate that all required fields are present for model config
165
183
  required_fields = [
166
- "model_name",
184
+ "model",
185
+ "model_source",
167
186
  "max_tokens",
168
187
  "temperature",
169
188
  "system_template",
@@ -179,6 +198,12 @@ class Evaluation:
179
198
  f"All model config parameters are required when using detailed configuration. "
180
199
  f"Missing: {', '.join(missing_fields)}"
181
200
  )
201
+ if model_to_evaluate.get(
202
+ "model_source"
203
+ ) == "external" and not model_to_evaluate.get("external_api_token"):
204
+ raise ValueError(
205
+ "external_api_token is required when model_source is 'external' for model_to_evaluate"
206
+ )
182
207
  parameters.model_to_evaluate = ModelRequest(**model_to_evaluate)
183
208
 
184
209
  elif type == "compare":
@@ -223,7 +248,8 @@ class Evaluation:
223
248
  elif isinstance(model_a, dict):
224
249
  # Validate that all required fields are present for model config
225
250
  required_fields = [
226
- "model_name",
251
+ "model",
252
+ "model_source",
227
253
  "max_tokens",
228
254
  "temperature",
229
255
  "system_template",
@@ -237,6 +263,12 @@ class Evaluation:
237
263
  f"All model config parameters are required for model_a when using detailed configuration. "
238
264
  f"Missing: {', '.join(missing_fields)}"
239
265
  )
266
+ if model_a.get("model_source") == "external" and not model_a.get(
267
+ "external_api_token"
268
+ ):
269
+ raise ValueError(
270
+ "external_api_token is required when model_source is 'external' for model_a"
271
+ )
240
272
  parameters.model_a = ModelRequest(**model_a)
241
273
 
242
274
  # Handle model_b
@@ -245,7 +277,8 @@ class Evaluation:
245
277
  elif isinstance(model_b, dict):
246
278
  # Validate that all required fields are present for model config
247
279
  required_fields = [
248
- "model_name",
280
+ "model",
281
+ "model_source",
249
282
  "max_tokens",
250
283
  "temperature",
251
284
  "system_template",
@@ -259,6 +292,12 @@ class Evaluation:
259
292
  f"All model config parameters are required for model_b when using detailed configuration. "
260
293
  f"Missing: {', '.join(missing_fields)}"
261
294
  )
295
+ if model_b.get("model_source") == "external" and not model_b.get(
296
+ "external_api_token"
297
+ ):
298
+ raise ValueError(
299
+ "external_api_token is required when model_source is 'external' for model_b"
300
+ )
262
301
  parameters.model_b = ModelRequest(**model_b)
263
302
 
264
303
  else:
@@ -379,9 +418,11 @@ class AsyncEvaluation:
379
418
  async def create(
380
419
  self,
381
420
  type: str,
382
- judge_model_name: str,
421
+ judge_model: str,
422
+ judge_model_source: str,
383
423
  judge_system_template: str,
384
424
  input_data_file_path: str,
425
+ judge_external_api_token: Optional[str] = None,
385
426
  # Classify-specific parameters
386
427
  labels: Optional[List[str]] = None,
387
428
  pass_labels: Optional[List[str]] = None,
@@ -400,9 +441,11 @@ class AsyncEvaluation:
400
441
 
401
442
  Args:
402
443
  type: The type of evaluation ("classify", "score", or "compare")
403
- judge_model_name: Name of the judge model
444
+ judge_model: Name or URL of the judge model
445
+ judge_model_source: Source of the judge model ("serverless", "dedicated", or "external")
404
446
  judge_system_template: System template for the judge
405
447
  input_data_file_path: Path to input data file
448
+ judge_external_api_token: Optional external API token for the judge model
406
449
  labels: List of classification labels (required for classify)
407
450
  pass_labels: List of labels considered as passing (required for classify)
408
451
  min_score: Minimum score value (required for score)
@@ -419,10 +462,17 @@ class AsyncEvaluation:
419
462
  client=self._client,
420
463
  )
421
464
 
465
+ if judge_model_source == "external" and not judge_external_api_token:
466
+ raise ValueError(
467
+ "judge_external_api_token is required when judge_model_source is 'external'"
468
+ )
469
+
422
470
  # Build judge config
423
471
  judge_config = JudgeModelConfig(
424
- model_name=judge_model_name,
472
+ model=judge_model,
473
+ model_source=judge_model_source,
425
474
  system_template=judge_system_template,
475
+ external_api_token=judge_external_api_token,
426
476
  )
427
477
  parameters: Union[ClassifyParameters, ScoreParameters, CompareParameters]
428
478
  # Build parameters based on type
@@ -464,7 +514,8 @@ class AsyncEvaluation:
464
514
  elif isinstance(model_to_evaluate, dict):
465
515
  # Validate that all required fields are present for model config
466
516
  required_fields = [
467
- "model_name",
517
+ "model",
518
+ "model_source",
468
519
  "max_tokens",
469
520
  "temperature",
470
521
  "system_template",
@@ -480,6 +531,12 @@ class AsyncEvaluation:
480
531
  f"All model config parameters are required when using detailed configuration. "
481
532
  f"Missing: {', '.join(missing_fields)}"
482
533
  )
534
+ if model_to_evaluate.get(
535
+ "model_source"
536
+ ) == "external" and not model_to_evaluate.get("external_api_token"):
537
+ raise ValueError(
538
+ "external_api_token is required when model_source is 'external' for model_to_evaluate"
539
+ )
483
540
  parameters.model_to_evaluate = ModelRequest(**model_to_evaluate)
484
541
 
485
542
  elif type == "score":
@@ -515,7 +572,8 @@ class AsyncEvaluation:
515
572
  elif isinstance(model_to_evaluate, dict):
516
573
  # Validate that all required fields are present for model config
517
574
  required_fields = [
518
- "model_name",
575
+ "model",
576
+ "model_source",
519
577
  "max_tokens",
520
578
  "temperature",
521
579
  "system_template",
@@ -531,6 +589,12 @@ class AsyncEvaluation:
531
589
  f"All model config parameters are required when using detailed configuration. "
532
590
  f"Missing: {', '.join(missing_fields)}"
533
591
  )
592
+ if model_to_evaluate.get(
593
+ "model_source"
594
+ ) == "external" and not model_to_evaluate.get("external_api_token"):
595
+ raise ValueError(
596
+ "external_api_token is required when model_source is 'external' for model_to_evaluate"
597
+ )
534
598
  parameters.model_to_evaluate = ModelRequest(**model_to_evaluate)
535
599
 
536
600
  elif type == "compare":
@@ -575,7 +639,8 @@ class AsyncEvaluation:
575
639
  elif isinstance(model_a, dict):
576
640
  # Validate that all required fields are present for model config
577
641
  required_fields = [
578
- "model_name",
642
+ "model",
643
+ "model_source",
579
644
  "max_tokens",
580
645
  "temperature",
581
646
  "system_template",
@@ -589,6 +654,12 @@ class AsyncEvaluation:
589
654
  f"All model config parameters are required for model_a when using detailed configuration. "
590
655
  f"Missing: {', '.join(missing_fields)}"
591
656
  )
657
+ if model_a.get("model_source") == "external" and not model_a.get(
658
+ "external_api_token"
659
+ ):
660
+ raise ValueError(
661
+ "external_api_token is required when model_source is 'external' for model_a"
662
+ )
592
663
  parameters.model_a = ModelRequest(**model_a)
593
664
 
594
665
  # Handle model_b
@@ -597,7 +668,8 @@ class AsyncEvaluation:
597
668
  elif isinstance(model_b, dict):
598
669
  # Validate that all required fields are present for model config
599
670
  required_fields = [
600
- "model_name",
671
+ "model",
672
+ "model_source",
601
673
  "max_tokens",
602
674
  "temperature",
603
675
  "system_template",
@@ -611,6 +683,12 @@ class AsyncEvaluation:
611
683
  f"All model config parameters are required for model_b when using detailed configuration. "
612
684
  f"Missing: {', '.join(missing_fields)}"
613
685
  )
686
+ if model_b.get("model_source") == "external" and not model_b.get(
687
+ "external_api_token"
688
+ ):
689
+ raise ValueError(
690
+ "external_api_token is required when model_source is 'external' for model_b"
691
+ )
614
692
  parameters.model_b = ModelRequest(**model_b)
615
693
 
616
694
  else:
@@ -1,11 +1,13 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import os
3
4
  from pathlib import Path
4
5
  from pprint import pformat
5
6
 
6
7
  from together.abstract import api_requestor
8
+ from together.constants import MULTIPART_THRESHOLD_GB, NUM_BYTES_IN_GB
7
9
  from together.error import FileTypeError
8
- from together.filemanager import DownloadManager, UploadManager
10
+ from together.filemanager import DownloadManager, UploadManager, MultipartUploadManager
9
11
  from together.together_response import TogetherResponse
10
12
  from together.types import (
11
13
  FileDeleteResponse,
@@ -30,7 +32,6 @@ class Files:
30
32
  purpose: FilePurpose | str = FilePurpose.FineTune,
31
33
  check: bool = True,
32
34
  ) -> FileResponse:
33
- upload_manager = UploadManager(self._client)
34
35
 
35
36
  if check and purpose == FilePurpose.FineTune:
36
37
  report_dict = check_file(file)
@@ -47,7 +48,15 @@ class Files:
47
48
 
48
49
  assert isinstance(purpose, FilePurpose)
49
50
 
50
- return upload_manager.upload("files", file, purpose=purpose, redirect=True)
51
+ file_size = os.stat(file).st_size
52
+ file_size_gb = file_size / NUM_BYTES_IN_GB
53
+
54
+ if file_size_gb > MULTIPART_THRESHOLD_GB:
55
+ multipart_manager = MultipartUploadManager(self._client)
56
+ return multipart_manager.upload("files", file, purpose)
57
+ else:
58
+ upload_manager = UploadManager(self._client)
59
+ return upload_manager.upload("files", file, purpose=purpose, redirect=True)
51
60
 
52
61
  def list(self) -> FileList:
53
62
  requestor = api_requestor.APIRequestor(
@@ -13,6 +13,7 @@ from together.types import (
13
13
  CosineLRScheduler,
14
14
  CosineLRSchedulerArgs,
15
15
  FinetuneCheckpoint,
16
+ FinetuneDeleteResponse,
16
17
  FinetuneDownloadResult,
17
18
  FinetuneList,
18
19
  FinetuneListEvents,
@@ -570,6 +571,37 @@ class FineTuning:
570
571
 
571
572
  return FinetuneResponse(**response.data)
572
573
 
574
+ def delete(self, id: str, force: bool = False) -> FinetuneDeleteResponse:
575
+ """
576
+ Method to delete a fine-tuning job
577
+
578
+ Args:
579
+ id (str): Fine-tune ID to delete. A string that starts with `ft-`.
580
+ force (bool, optional): Force deletion. Defaults to False.
581
+
582
+ Returns:
583
+ FinetuneDeleteResponse: Object containing deletion confirmation message.
584
+ """
585
+
586
+ requestor = api_requestor.APIRequestor(
587
+ client=self._client,
588
+ )
589
+
590
+ params = {"force": str(force).lower()}
591
+
592
+ response, _, _ = requestor.request(
593
+ options=TogetherRequest(
594
+ method="DELETE",
595
+ url=f"fine-tunes/{id}",
596
+ params=params,
597
+ ),
598
+ stream=False,
599
+ )
600
+
601
+ assert isinstance(response, TogetherResponse)
602
+
603
+ return FinetuneDeleteResponse(**response.data)
604
+
573
605
  def list_events(self, id: str) -> FinetuneListEvents:
574
606
  """
575
607
  Lists events of a fine-tune job
@@ -1007,6 +1039,37 @@ class AsyncFineTuning:
1007
1039
 
1008
1040
  return FinetuneResponse(**response.data)
1009
1041
 
1042
+ async def delete(self, id: str, force: bool = False) -> FinetuneDeleteResponse:
1043
+ """
1044
+ Async method to delete a fine-tuning job
1045
+
1046
+ Args:
1047
+ id (str): Fine-tune ID to delete. A string that starts with `ft-`.
1048
+ force (bool, optional): Force deletion. Defaults to False.
1049
+
1050
+ Returns:
1051
+ FinetuneDeleteResponse: Object containing deletion confirmation message.
1052
+ """
1053
+
1054
+ requestor = api_requestor.APIRequestor(
1055
+ client=self._client,
1056
+ )
1057
+
1058
+ params = {"force": str(force).lower()}
1059
+
1060
+ response, _, _ = await requestor.arequest(
1061
+ options=TogetherRequest(
1062
+ method="DELETE",
1063
+ url=f"fine-tunes/{id}",
1064
+ params=params,
1065
+ ),
1066
+ stream=False,
1067
+ )
1068
+
1069
+ assert isinstance(response, TogetherResponse)
1070
+
1071
+ return FinetuneDeleteResponse(**response.data)
1072
+
1010
1073
  async def list_events(self, id: str) -> FinetuneListEvents:
1011
1074
  """
1012
1075
  List fine-tuning events
@@ -6,6 +6,8 @@ from together.abstract import api_requestor
6
6
  from together.together_response import TogetherResponse
7
7
  from together.types import (
8
8
  ModelObject,
9
+ ModelUploadRequest,
10
+ ModelUploadResponse,
9
11
  TogetherClient,
10
12
  TogetherRequest,
11
13
  )
@@ -85,6 +87,64 @@ class Models(ModelsBase):
85
87
 
86
88
  return models
87
89
 
90
+ def upload(
91
+ self,
92
+ *,
93
+ model_name: str,
94
+ model_source: str,
95
+ model_type: str = "model",
96
+ hf_token: str | None = None,
97
+ description: str | None = None,
98
+ base_model: str | None = None,
99
+ lora_model: str | None = None,
100
+ ) -> ModelUploadResponse:
101
+ """
102
+ Upload a custom model or adapter from Hugging Face or S3.
103
+
104
+ Args:
105
+ model_name (str): The name to give to your uploaded model
106
+ model_source (str): The source location of the model (Hugging Face repo or S3 path)
107
+ model_type (str, optional): Whether the model is a full model or an adapter. Defaults to "model".
108
+ hf_token (str, optional): Hugging Face token (if uploading from Hugging Face)
109
+ description (str, optional): A description of your model
110
+ base_model (str, optional): The base model to use for an adapter if setting it to run against a serverless pool. Only used for model_type "adapter".
111
+ lora_model (str, optional): The lora pool to use for an adapter if setting it to run against, say, a dedicated pool. Only used for model_type "adapter".
112
+
113
+ Returns:
114
+ ModelUploadResponse: Object containing upload job information
115
+ """
116
+ requestor = api_requestor.APIRequestor(
117
+ client=self._client,
118
+ )
119
+
120
+ data = {
121
+ "model_name": model_name,
122
+ "model_source": model_source,
123
+ "model_type": model_type,
124
+ }
125
+
126
+ if hf_token is not None:
127
+ data["hf_token"] = hf_token
128
+ if description is not None:
129
+ data["description"] = description
130
+ if base_model is not None:
131
+ data["base_model"] = base_model
132
+ if lora_model is not None:
133
+ data["lora_model"] = lora_model
134
+
135
+ response, _, _ = requestor.request(
136
+ options=TogetherRequest(
137
+ method="POST",
138
+ url="models",
139
+ params=data,
140
+ ),
141
+ stream=False,
142
+ )
143
+
144
+ assert isinstance(response, TogetherResponse)
145
+
146
+ return ModelUploadResponse.from_api_response(response.data)
147
+
88
148
 
89
149
  class AsyncModels(ModelsBase):
90
150
  async def list(
@@ -132,3 +192,61 @@ class AsyncModels(ModelsBase):
132
192
  models.sort(key=lambda x: x.id.lower())
133
193
 
134
194
  return models
195
+
196
+ async def upload(
197
+ self,
198
+ *,
199
+ model_name: str,
200
+ model_source: str,
201
+ model_type: str = "model",
202
+ hf_token: str | None = None,
203
+ description: str | None = None,
204
+ base_model: str | None = None,
205
+ lora_model: str | None = None,
206
+ ) -> ModelUploadResponse:
207
+ """
208
+ Upload a custom model or adapter from Hugging Face or S3.
209
+
210
+ Args:
211
+ model_name (str): The name to give to your uploaded model
212
+ model_source (str): The source location of the model (Hugging Face repo or S3 path)
213
+ model_type (str, optional): Whether the model is a full model or an adapter. Defaults to "model".
214
+ hf_token (str, optional): Hugging Face token (if uploading from Hugging Face)
215
+ description (str, optional): A description of your model
216
+ base_model (str, optional): The base model to use for an adapter if setting it to run against a serverless pool. Only used for model_type "adapter".
217
+ lora_model (str, optional): The lora pool to use for an adapter if setting it to run against, say, a dedicated pool. Only used for model_type "adapter".
218
+
219
+ Returns:
220
+ ModelUploadResponse: Object containing upload job information
221
+ """
222
+ requestor = api_requestor.APIRequestor(
223
+ client=self._client,
224
+ )
225
+
226
+ data = {
227
+ "model_name": model_name,
228
+ "model_source": model_source,
229
+ "model_type": model_type,
230
+ }
231
+
232
+ if hf_token is not None:
233
+ data["hf_token"] = hf_token
234
+ if description is not None:
235
+ data["description"] = description
236
+ if base_model is not None:
237
+ data["base_model"] = base_model
238
+ if lora_model is not None:
239
+ data["lora_model"] = lora_model
240
+
241
+ response, _, _ = await requestor.arequest(
242
+ options=TogetherRequest(
243
+ method="POST",
244
+ url="models",
245
+ params=data,
246
+ ),
247
+ stream=False,
248
+ )
249
+
250
+ assert isinstance(response, TogetherResponse)
251
+
252
+ return ModelUploadResponse.from_api_response(response.data)
@@ -52,13 +52,14 @@ from together.types.finetune import (
52
52
  FinetuneListEvents,
53
53
  FinetuneRequest,
54
54
  FinetuneResponse,
55
+ FinetuneDeleteResponse,
55
56
  FinetuneTrainingLimits,
56
57
  FullTrainingType,
57
58
  LoRATrainingType,
58
59
  TrainingType,
59
60
  )
60
61
  from together.types.images import ImageRequest, ImageResponse
61
- from together.types.models import ModelObject
62
+ from together.types.models import ModelObject, ModelUploadRequest, ModelUploadResponse
62
63
  from together.types.rerank import RerankRequest, RerankResponse
63
64
  from together.types.batch import BatchJob, BatchJobStatus, BatchEndpoint
64
65
  from together.types.evaluation import (
@@ -92,6 +93,7 @@ __all__ = [
92
93
  "FinetuneResponse",
93
94
  "FinetuneList",
94
95
  "FinetuneListEvents",
96
+ "FinetuneDeleteResponse",
95
97
  "FinetuneDownloadResult",
96
98
  "FinetuneLRScheduler",
97
99
  "LinearLRScheduler",
@@ -108,6 +110,8 @@ __all__ = [
108
110
  "ImageRequest",
109
111
  "ImageResponse",
110
112
  "ModelObject",
113
+ "ModelUploadRequest",
114
+ "ModelUploadResponse",
111
115
  "TrainingType",
112
116
  "FullTrainingType",
113
117
  "LoRATrainingType",
together/types/batch.py CHANGED
@@ -20,6 +20,7 @@ class BatchJobStatus(str, Enum):
20
20
  FAILED = "FAILED"
21
21
  EXPIRED = "EXPIRED"
22
22
  CANCELLED = "CANCELLED"
23
+ CANCELING = "CANCELING"
23
24
 
24
25
 
25
26
  class BatchEndpoint(str, Enum):
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from datetime import datetime
4
4
  from enum import Enum
5
- from typing import Any, Dict, List, Optional, Union
5
+ from typing import Any, Dict, List, Literal, Optional, Union
6
6
 
7
7
  from pydantic import BaseModel, Field
8
8
 
@@ -23,16 +23,20 @@ class EvaluationStatus(str, Enum):
23
23
 
24
24
 
25
25
  class JudgeModelConfig(BaseModel):
26
- model_name: str
26
+ model: str
27
+ model_source: Literal["serverless", "dedicated", "external"]
27
28
  system_template: str
29
+ external_api_token: Optional[str] = None
28
30
 
29
31
 
30
32
  class ModelRequest(BaseModel):
31
- model_name: str
33
+ model: str
34
+ model_source: Literal["serverless", "dedicated", "external"]
32
35
  max_tokens: int
33
36
  temperature: float
34
37
  system_template: str
35
38
  input_template: str
39
+ external_api_token: Optional[str] = None
36
40
 
37
41
 
38
42
  class ClassifyParameters(BaseModel):
together/types/files.py CHANGED
@@ -52,7 +52,7 @@ class FileResponse(BaseModel):
52
52
  """
53
53
 
54
54
  id: str
55
- object: Literal[ObjectType.File]
55
+ object: str
56
56
  # created timestamp
57
57
  created_at: int | None = None
58
58
  type: FileType | None = None
@@ -322,6 +322,11 @@ class FinetuneListEvents(BaseModel):
322
322
  data: List[FinetuneEvent] | None = None
323
323
 
324
324
 
325
+ class FinetuneDeleteResponse(BaseModel):
326
+ # delete message
327
+ message: str
328
+
329
+
325
330
  class FinetuneDownloadResult(BaseModel):
326
331
  # object type
327
332
  object: Literal["local"] | None = None
together/types/models.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from enum import Enum
4
- from typing import Literal
4
+ from typing import Any, Dict, Literal, Optional
5
5
 
6
6
  from together.types.abstract import BaseModel
7
7
  from together.types.common import ObjectType
@@ -44,3 +44,52 @@ class ModelObject(BaseModel):
44
44
  license: str | None = None
45
45
  context_length: int | None = None
46
46
  pricing: PricingObject
47
+
48
+
49
+ class ModelUploadRequest(BaseModel):
50
+ model_name: str
51
+ model_source: str
52
+ model_type: Literal["model", "adapter"] = "model"
53
+ hf_token: Optional[str] = None
54
+ description: Optional[str] = None
55
+ base_model: Optional[str] = None
56
+ lora_model: Optional[str] = None
57
+
58
+
59
+ class ModelUploadResponse(BaseModel):
60
+ job_id: Optional[str] = None
61
+ model_name: Optional[str] = None
62
+ model_id: Optional[str] = None
63
+ model_source: Optional[str] = None
64
+ message: str
65
+
66
+ @classmethod
67
+ def from_api_response(cls, response_data: Dict[str, Any]) -> "ModelUploadResponse":
68
+ """Create ModelUploadResponse from API response, handling both flat and nested structures"""
69
+ # Start with the base response
70
+ result: Dict[str, Any] = {"message": response_data.get("message", "")}
71
+
72
+ # Check if we have nested data
73
+ if "data" in response_data and response_data["data"] is not None:
74
+ # Use nested data values
75
+ nested_data = response_data["data"]
76
+ result.update(
77
+ {
78
+ "job_id": nested_data.get("job_id"),
79
+ "model_name": nested_data.get("model_name"),
80
+ "model_id": nested_data.get("model_id"),
81
+ "model_source": nested_data.get("model_source"),
82
+ }
83
+ )
84
+ else:
85
+ # Use top-level values
86
+ result.update(
87
+ {
88
+ "job_id": response_data.get("job_id"),
89
+ "model_name": response_data.get("model_name"),
90
+ "model_id": response_data.get("model_id"),
91
+ "model_source": response_data.get("model_source"),
92
+ }
93
+ )
94
+
95
+ return cls(**result)