together 1.5.20__py3-none-any.whl → 1.5.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -89,18 +89,10 @@ def create_finetune_request(
89
89
 
90
90
  model_or_checkpoint = model or from_checkpoint
91
91
 
92
- if batch_size == "max":
93
- log_warn_once(
94
- "Starting from together>=1.3.0, "
95
- "the default batch size is set to the maximum allowed value for each model."
96
- )
97
92
  if warmup_ratio is None:
98
93
  warmup_ratio = 0.0
99
94
 
100
95
  training_type: TrainingType = FullTrainingType()
101
- max_batch_size: int = 0
102
- max_batch_size_dpo: int = 0
103
- min_batch_size: int = 0
104
96
  if lora:
105
97
  if model_limits.lora_training is None:
106
98
  raise ValueError(
@@ -133,28 +125,23 @@ def create_finetune_request(
133
125
  min_batch_size = model_limits.full_training.min_batch_size
134
126
  max_batch_size_dpo = model_limits.full_training.max_batch_size_dpo
135
127
 
136
- if batch_size == "max":
137
- if training_method == "dpo":
138
- batch_size = max_batch_size_dpo
139
- else:
140
- batch_size = max_batch_size
128
+ if batch_size != "max":
129
+ if training_method == "sft":
130
+ if batch_size > max_batch_size:
131
+ raise ValueError(
132
+ f"Requested batch size of {batch_size} is higher that the maximum allowed value of {max_batch_size}."
133
+ )
134
+ elif training_method == "dpo":
135
+ if batch_size > max_batch_size_dpo:
136
+ raise ValueError(
137
+ f"Requested batch size of {batch_size} is higher that the maximum allowed value of {max_batch_size_dpo}."
138
+ )
141
139
 
142
- if training_method == "sft":
143
- if batch_size > max_batch_size:
144
- raise ValueError(
145
- f"Requested batch size of {batch_size} is higher that the maximum allowed value of {max_batch_size}."
146
- )
147
- elif training_method == "dpo":
148
- if batch_size > max_batch_size_dpo:
140
+ if batch_size < min_batch_size:
149
141
  raise ValueError(
150
- f"Requested batch size of {batch_size} is higher that the maximum allowed value of {max_batch_size_dpo}."
142
+ f"Requested batch size of {batch_size} is lower that the minimum allowed value of {min_batch_size}."
151
143
  )
152
144
 
153
- if batch_size < min_batch_size:
154
- raise ValueError(
155
- f"Requested batch size of {batch_size} is lower that the minimum allowed value of {min_batch_size}."
156
- )
157
-
158
145
  if warmup_ratio > 1 or warmup_ratio < 0:
159
146
  raise ValueError(f"Warmup ratio should be between 0 and 1 (got {warmup_ratio})")
160
147
 
@@ -7,6 +7,14 @@ from together.types.audio_speech import (
7
7
  AudioSpeechStreamChunk,
8
8
  AudioSpeechStreamEvent,
9
9
  AudioSpeechStreamResponse,
10
+ AudioTranscriptionRequest,
11
+ AudioTranslationRequest,
12
+ AudioTranscriptionResponse,
13
+ AudioTranscriptionVerboseResponse,
14
+ AudioTranslationResponse,
15
+ AudioTranslationVerboseResponse,
16
+ AudioTranscriptionResponseFormat,
17
+ AudioTimestampGranularities,
10
18
  )
11
19
  from together.types.chat_completions import (
12
20
  ChatCompletionChunk,
@@ -53,6 +61,19 @@ from together.types.images import ImageRequest, ImageResponse
53
61
  from together.types.models import ModelObject
54
62
  from together.types.rerank import RerankRequest, RerankResponse
55
63
  from together.types.batch import BatchJob, BatchJobStatus, BatchEndpoint
64
+ from together.types.evaluation import (
65
+ EvaluationType,
66
+ EvaluationStatus,
67
+ JudgeModelConfig,
68
+ ModelRequest,
69
+ ClassifyParameters,
70
+ ScoreParameters,
71
+ CompareParameters,
72
+ EvaluationRequest,
73
+ EvaluationCreateResponse,
74
+ EvaluationJob,
75
+ EvaluationStatusResponse,
76
+ )
56
77
 
57
78
 
58
79
  __all__ = [
@@ -102,10 +123,29 @@ __all__ = [
102
123
  "AudioSpeechStreamChunk",
103
124
  "AudioSpeechStreamEvent",
104
125
  "AudioSpeechStreamResponse",
126
+ "AudioTranscriptionRequest",
127
+ "AudioTranslationRequest",
128
+ "AudioTranscriptionResponse",
129
+ "AudioTranscriptionVerboseResponse",
130
+ "AudioTranslationResponse",
131
+ "AudioTranslationVerboseResponse",
132
+ "AudioTranscriptionResponseFormat",
133
+ "AudioTimestampGranularities",
105
134
  "DedicatedEndpoint",
106
135
  "ListEndpoint",
107
136
  "Autoscaling",
108
137
  "BatchJob",
109
138
  "BatchJobStatus",
110
139
  "BatchEndpoint",
140
+ "EvaluationType",
141
+ "EvaluationStatus",
142
+ "JudgeModelConfig",
143
+ "ModelRequest",
144
+ "ClassifyParameters",
145
+ "ScoreParameters",
146
+ "CompareParameters",
147
+ "EvaluationRequest",
148
+ "EvaluationCreateResponse",
149
+ "EvaluationJob",
150
+ "EvaluationStatusResponse",
111
151
  ]
@@ -1,13 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import base64
3
4
  from enum import Enum
4
- from typing import Iterator
5
- import threading
5
+ from typing import BinaryIO, Iterator, List, Optional, Union
6
6
 
7
7
  from pydantic import BaseModel, ConfigDict
8
8
 
9
9
  from together.together_response import TogetherResponse
10
- import base64
11
10
 
12
11
 
13
12
  class AudioResponseFormat(str, Enum):
@@ -79,23 +78,19 @@ class AudioSpeechStreamEventResponse(BaseModel):
79
78
 
80
79
 
81
80
  class AudioSpeechStreamResponse(BaseModel):
82
-
83
81
  response: TogetherResponse | Iterator[TogetherResponse]
84
82
 
85
83
  model_config = ConfigDict(arbitrary_types_allowed=True)
86
84
 
87
85
  def stream_to_file(self, file_path: str) -> None:
88
-
89
86
  if isinstance(self.response, TogetherResponse):
90
87
  # save response to file
91
88
  with open(file_path, "wb") as f:
92
89
  f.write(self.response.data)
93
90
 
94
91
  elif isinstance(self.response, Iterator):
95
-
96
92
  with open(file_path, "wb") as f:
97
93
  for chunk in self.response:
98
-
99
94
  # Try to parse as stream chunk
100
95
  stream_event_response = AudioSpeechStreamEventResponse(
101
96
  response={"data": chunk.data}
@@ -108,3 +103,83 @@ class AudioSpeechStreamResponse(BaseModel):
108
103
  audio = base64.b64decode(stream_event_response.response.data.b64)
109
104
 
110
105
  f.write(audio)
106
+
107
+
108
+ class AudioTranscriptionResponseFormat(str, Enum):
109
+ JSON = "json"
110
+ VERBOSE_JSON = "verbose_json"
111
+
112
+
113
+ class AudioTimestampGranularities(str, Enum):
114
+ SEGMENT = "segment"
115
+ WORD = "word"
116
+
117
+
118
+ class AudioTranscriptionRequest(BaseModel):
119
+ model_config = ConfigDict(arbitrary_types_allowed=True)
120
+
121
+ file: Union[str, BinaryIO]
122
+ model: str = "openai/whisper-large-v3"
123
+ language: Optional[str] = None
124
+ prompt: Optional[str] = None
125
+ response_format: AudioTranscriptionResponseFormat = (
126
+ AudioTranscriptionResponseFormat.JSON
127
+ )
128
+ temperature: float = 0.0
129
+ timestamp_granularities: Optional[AudioTimestampGranularities] = (
130
+ AudioTimestampGranularities.SEGMENT
131
+ )
132
+
133
+
134
+ class AudioTranslationRequest(BaseModel):
135
+ model_config = ConfigDict(arbitrary_types_allowed=True)
136
+
137
+ file: Union[str, BinaryIO]
138
+ model: str = "openai/whisper-large-v3"
139
+ language: Optional[str] = None
140
+ prompt: Optional[str] = None
141
+ response_format: AudioTranscriptionResponseFormat = (
142
+ AudioTranscriptionResponseFormat.JSON
143
+ )
144
+ temperature: float = 0.0
145
+ timestamp_granularities: Optional[AudioTimestampGranularities] = (
146
+ AudioTimestampGranularities.SEGMENT
147
+ )
148
+
149
+
150
+ class AudioTranscriptionSegment(BaseModel):
151
+ id: int
152
+ start: float
153
+ end: float
154
+ text: str
155
+
156
+
157
+ class AudioTranscriptionWord(BaseModel):
158
+ word: str
159
+ start: float
160
+ end: float
161
+
162
+
163
+ class AudioTranscriptionResponse(BaseModel):
164
+ text: str
165
+
166
+
167
+ class AudioTranscriptionVerboseResponse(BaseModel):
168
+ language: Optional[str] = None
169
+ duration: Optional[float] = None
170
+ text: str
171
+ segments: Optional[List[AudioTranscriptionSegment]] = None
172
+ words: Optional[List[AudioTranscriptionWord]] = None
173
+
174
+
175
+ class AudioTranslationResponse(BaseModel):
176
+ text: str
177
+
178
+
179
+ class AudioTranslationVerboseResponse(BaseModel):
180
+ task: Optional[str] = None
181
+ language: Optional[str] = None
182
+ duration: Optional[float] = None
183
+ text: str
184
+ segments: Optional[List[AudioTranscriptionSegment]] = None
185
+ words: Optional[List[AudioTranscriptionWord]] = None
@@ -0,0 +1,87 @@
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime
4
+ from enum import Enum
5
+ from typing import Any, Dict, List, Optional, Union
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+
10
+ class EvaluationType(str, Enum):
11
+ CLASSIFY = "classify"
12
+ SCORE = "score"
13
+ COMPARE = "compare"
14
+
15
+
16
+ class EvaluationStatus(str, Enum):
17
+ PENDING = "pending"
18
+ QUEUED = "queued"
19
+ RUNNING = "running"
20
+ COMPLETED = "completed"
21
+ ERROR = "error"
22
+ USER_ERROR = "user_error"
23
+
24
+
25
+ class JudgeModelConfig(BaseModel):
26
+ model_name: str
27
+ system_template: str
28
+
29
+
30
+ class ModelRequest(BaseModel):
31
+ model_name: str
32
+ max_tokens: int
33
+ temperature: float
34
+ system_template: str
35
+ input_template: str
36
+
37
+
38
+ class ClassifyParameters(BaseModel):
39
+ judge: JudgeModelConfig
40
+ labels: List[str]
41
+ pass_labels: List[str]
42
+ model_to_evaluate: Optional[Union[str, ModelRequest]] = None
43
+ input_data_file_path: str
44
+
45
+
46
+ class ScoreParameters(BaseModel):
47
+ judge: JudgeModelConfig
48
+ min_score: float
49
+ max_score: float
50
+ pass_threshold: float
51
+ model_to_evaluate: Optional[Union[str, ModelRequest]] = None
52
+ input_data_file_path: str
53
+
54
+
55
+ class CompareParameters(BaseModel):
56
+ judge: JudgeModelConfig
57
+ model_a: Optional[Union[str, ModelRequest]] = None
58
+ model_b: Optional[Union[str, ModelRequest]] = None
59
+ input_data_file_path: str
60
+
61
+
62
+ class EvaluationRequest(BaseModel):
63
+ type: EvaluationType
64
+ parameters: Union[ClassifyParameters, ScoreParameters, CompareParameters]
65
+
66
+
67
+ class EvaluationCreateResponse(BaseModel):
68
+ workflow_id: str
69
+ status: EvaluationStatus
70
+
71
+
72
+ class EvaluationJob(BaseModel):
73
+ workflow_id: str = Field(alias="workflow_id")
74
+ type: Optional[EvaluationType] = None
75
+ status: EvaluationStatus
76
+ results: Optional[Dict[str, Any]] = None
77
+ parameters: Optional[Dict[str, Any]] = None
78
+ created_at: Optional[datetime] = None
79
+ updated_at: Optional[datetime] = None
80
+
81
+ class Config:
82
+ populate_by_name = True
83
+
84
+
85
+ class EvaluationStatusResponse(BaseModel):
86
+ status: EvaluationStatus
87
+ results: Optional[Dict[str, Any]] = None
together/types/files.py CHANGED
@@ -14,11 +14,13 @@ from together.types.common import (
14
14
  class FilePurpose(str, Enum):
15
15
  FineTune = "fine-tune"
16
16
  BatchAPI = "batch-api"
17
+ Eval = "eval"
17
18
 
18
19
 
19
20
  class FileType(str, Enum):
20
21
  jsonl = "jsonl"
21
22
  parquet = "parquet"
23
+ csv = "csv"
22
24
 
23
25
 
24
26
  class FileRequest(BaseModel):
@@ -195,7 +195,7 @@ class FinetuneRequest(BaseModel):
195
195
  # number of evaluation loops to run
196
196
  n_evals: int | None = None
197
197
  # training batch size
198
- batch_size: int | None = None
198
+ batch_size: int | Literal["max"] | None = None
199
199
  # up to 40 character suffix for output model name
200
200
  suffix: str | None = None
201
201
  # weights & biases api key