together 1.5.35__py3-none-any.whl → 2.0.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. together/__init__.py +101 -114
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +66 -84
  30. together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
  33. together/{cli → lib/cli}/api/models.py +34 -27
  34. together/lib/cli/api/utils.py +50 -0
  35. together/{cli → lib/cli}/cli.py +16 -26
  36. together/{constants.py → lib/constants.py} +11 -24
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +397 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/{utils → lib/utils}/files.py +90 -288
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -39
  50. together/resources/audio/__init__.py +72 -48
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -128
  53. together/resources/audio/transcriptions.py +247 -261
  54. together/resources/audio/translations.py +221 -241
  55. together/resources/audio/voices.py +111 -41
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +589 -490
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +397 -129
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +258 -104
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +223 -193
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +286 -214
  75. together/types/__init__.py +66 -167
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_create_response.py +10 -0
  157. together/types/video_job.py +57 -0
  158. together-2.0.0a7.dist-info/METADATA +730 -0
  159. together-2.0.0a7.dist-info/RECORD +165 -0
  160. {together-1.5.35.dist-info → together-2.0.0a7.dist-info}/WHEEL +1 -1
  161. together-2.0.0a7.dist-info/entry_points.txt +2 -0
  162. {together-1.5.35.dist-info → together-2.0.0a7.dist-info}/licenses/LICENSE +1 -1
  163. together/abstract/api_requestor.py +0 -770
  164. together/cli/api/chat.py +0 -298
  165. together/cli/api/completions.py +0 -119
  166. together/cli/api/images.py +0 -93
  167. together/cli/api/utils.py +0 -139
  168. together/client.py +0 -186
  169. together/error.py +0 -194
  170. together/filemanager.py +0 -635
  171. together/legacy/__init__.py +0 -0
  172. together/legacy/base.py +0 -27
  173. together/legacy/complete.py +0 -93
  174. together/legacy/embeddings.py +0 -27
  175. together/legacy/files.py +0 -146
  176. together/legacy/finetune.py +0 -177
  177. together/legacy/images.py +0 -27
  178. together/legacy/models.py +0 -44
  179. together/resources/batch.py +0 -165
  180. together/resources/code_interpreter.py +0 -82
  181. together/resources/evaluation.py +0 -808
  182. together/resources/finetune.py +0 -1388
  183. together/together_response.py +0 -50
  184. together/types/abstract.py +0 -26
  185. together/types/audio_speech.py +0 -311
  186. together/types/batch.py +0 -54
  187. together/types/chat_completions.py +0 -210
  188. together/types/code_interpreter.py +0 -57
  189. together/types/common.py +0 -67
  190. together/types/completions.py +0 -107
  191. together/types/embeddings.py +0 -35
  192. together/types/endpoints.py +0 -123
  193. together/types/error.py +0 -16
  194. together/types/evaluation.py +0 -93
  195. together/types/files.py +0 -93
  196. together/types/finetune.py +0 -465
  197. together/types/images.py +0 -42
  198. together/types/models.py +0 -96
  199. together/types/rerank.py +0 -43
  200. together/types/videos.py +0 -69
  201. together/utils/api_helpers.py +0 -124
  202. together/version.py +0 -6
  203. together-1.5.35.dist-info/METADATA +0 -583
  204. together-1.5.35.dist-info/RECORD +0 -77
  205. together-1.5.35.dist-info/entry_points.txt +0 -3
  206. /together/{abstract → lib/cli}/__init__.py +0 -0
  207. /together/{cli → lib/cli/api}/__init__.py +0 -0
  208. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -0,0 +1,263 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union
6
+ from typing_extensions import Literal, Required, TypeAlias, TypedDict
7
+
8
+ from .._types import SequenceNotStr
9
+
10
+ __all__ = [
11
+ "EvalCreateParams",
12
+ "Parameters",
13
+ "ParametersEvaluationClassifyParameters",
14
+ "ParametersEvaluationClassifyParametersJudge",
15
+ "ParametersEvaluationClassifyParametersModelToEvaluate",
16
+ "ParametersEvaluationClassifyParametersModelToEvaluateEvaluationModelRequest",
17
+ "ParametersEvaluationScoreParameters",
18
+ "ParametersEvaluationScoreParametersJudge",
19
+ "ParametersEvaluationScoreParametersModelToEvaluate",
20
+ "ParametersEvaluationScoreParametersModelToEvaluateEvaluationModelRequest",
21
+ "ParametersEvaluationCompareParameters",
22
+ "ParametersEvaluationCompareParametersJudge",
23
+ "ParametersEvaluationCompareParametersModelA",
24
+ "ParametersEvaluationCompareParametersModelAEvaluationModelRequest",
25
+ "ParametersEvaluationCompareParametersModelB",
26
+ "ParametersEvaluationCompareParametersModelBEvaluationModelRequest",
27
+ ]
28
+
29
+
30
+ class EvalCreateParams(TypedDict, total=False):
31
+ parameters: Required[Parameters]
32
+ """Type-specific parameters for the evaluation"""
33
+
34
+ type: Required[Literal["classify", "score", "compare"]]
35
+ """The type of evaluation to perform"""
36
+
37
+
38
+ class ParametersEvaluationClassifyParametersJudge(TypedDict, total=False):
39
+ model: Required[str]
40
+ """Name of the judge model"""
41
+
42
+ model_source: Required[Literal["serverless", "dedicated", "external"]]
43
+ """Source of the judge model."""
44
+
45
+ system_template: Required[str]
46
+ """System prompt template for the judge"""
47
+
48
+ external_api_token: str
49
+ """Bearer/API token for external judge models."""
50
+
51
+ external_base_url: str
52
+ """Base URL for external judge models. Must be OpenAI-compatible base URL."""
53
+
54
+
55
+ class ParametersEvaluationClassifyParametersModelToEvaluateEvaluationModelRequest(TypedDict, total=False):
56
+ input_template: Required[str]
57
+ """Input prompt template"""
58
+
59
+ max_tokens: Required[int]
60
+ """Maximum number of tokens to generate"""
61
+
62
+ model: Required[str]
63
+ """Name of the model to evaluate"""
64
+
65
+ model_source: Required[Literal["serverless", "dedicated", "external"]]
66
+ """Source of the model."""
67
+
68
+ system_template: Required[str]
69
+ """System prompt template"""
70
+
71
+ temperature: Required[float]
72
+ """Sampling temperature"""
73
+
74
+ external_api_token: str
75
+ """Bearer/API token for external models."""
76
+
77
+ external_base_url: str
78
+ """Base URL for external models. Must be OpenAI-compatible base URL"""
79
+
80
+
81
+ ParametersEvaluationClassifyParametersModelToEvaluate: TypeAlias = Union[
82
+ str, ParametersEvaluationClassifyParametersModelToEvaluateEvaluationModelRequest
83
+ ]
84
+
85
+
86
+ class ParametersEvaluationClassifyParameters(TypedDict, total=False):
87
+ input_data_file_path: Required[str]
88
+ """Data file ID"""
89
+
90
+ judge: Required[ParametersEvaluationClassifyParametersJudge]
91
+
92
+ labels: Required[SequenceNotStr[str]]
93
+ """List of possible classification labels"""
94
+
95
+ pass_labels: Required[SequenceNotStr[str]]
96
+ """List of labels that are considered passing"""
97
+
98
+ model_to_evaluate: ParametersEvaluationClassifyParametersModelToEvaluate
99
+ """Field name in the input data"""
100
+
101
+
102
+ class ParametersEvaluationScoreParametersJudge(TypedDict, total=False):
103
+ model: Required[str]
104
+ """Name of the judge model"""
105
+
106
+ model_source: Required[Literal["serverless", "dedicated", "external"]]
107
+ """Source of the judge model."""
108
+
109
+ system_template: Required[str]
110
+ """System prompt template for the judge"""
111
+
112
+ external_api_token: str
113
+ """Bearer/API token for external judge models."""
114
+
115
+ external_base_url: str
116
+ """Base URL for external judge models. Must be OpenAI-compatible base URL."""
117
+
118
+
119
+ class ParametersEvaluationScoreParametersModelToEvaluateEvaluationModelRequest(TypedDict, total=False):
120
+ input_template: Required[str]
121
+ """Input prompt template"""
122
+
123
+ max_tokens: Required[int]
124
+ """Maximum number of tokens to generate"""
125
+
126
+ model: Required[str]
127
+ """Name of the model to evaluate"""
128
+
129
+ model_source: Required[Literal["serverless", "dedicated", "external"]]
130
+ """Source of the model."""
131
+
132
+ system_template: Required[str]
133
+ """System prompt template"""
134
+
135
+ temperature: Required[float]
136
+ """Sampling temperature"""
137
+
138
+ external_api_token: str
139
+ """Bearer/API token for external models."""
140
+
141
+ external_base_url: str
142
+ """Base URL for external models. Must be OpenAI-compatible base URL"""
143
+
144
+
145
+ ParametersEvaluationScoreParametersModelToEvaluate: TypeAlias = Union[
146
+ str, ParametersEvaluationScoreParametersModelToEvaluateEvaluationModelRequest
147
+ ]
148
+
149
+
150
+ class ParametersEvaluationScoreParameters(TypedDict, total=False):
151
+ input_data_file_path: Required[str]
152
+ """Data file ID"""
153
+
154
+ judge: Required[ParametersEvaluationScoreParametersJudge]
155
+
156
+ max_score: Required[float]
157
+ """Maximum possible score"""
158
+
159
+ min_score: Required[float]
160
+ """Minimum possible score"""
161
+
162
+ pass_threshold: Required[float]
163
+ """Score threshold for passing"""
164
+
165
+ model_to_evaluate: ParametersEvaluationScoreParametersModelToEvaluate
166
+ """Field name in the input data"""
167
+
168
+
169
+ class ParametersEvaluationCompareParametersJudge(TypedDict, total=False):
170
+ model: Required[str]
171
+ """Name of the judge model"""
172
+
173
+ model_source: Required[Literal["serverless", "dedicated", "external"]]
174
+ """Source of the judge model."""
175
+
176
+ system_template: Required[str]
177
+ """System prompt template for the judge"""
178
+
179
+ external_api_token: str
180
+ """Bearer/API token for external judge models."""
181
+
182
+ external_base_url: str
183
+ """Base URL for external judge models. Must be OpenAI-compatible base URL."""
184
+
185
+
186
+ class ParametersEvaluationCompareParametersModelAEvaluationModelRequest(TypedDict, total=False):
187
+ input_template: Required[str]
188
+ """Input prompt template"""
189
+
190
+ max_tokens: Required[int]
191
+ """Maximum number of tokens to generate"""
192
+
193
+ model: Required[str]
194
+ """Name of the model to evaluate"""
195
+
196
+ model_source: Required[Literal["serverless", "dedicated", "external"]]
197
+ """Source of the model."""
198
+
199
+ system_template: Required[str]
200
+ """System prompt template"""
201
+
202
+ temperature: Required[float]
203
+ """Sampling temperature"""
204
+
205
+ external_api_token: str
206
+ """Bearer/API token for external models."""
207
+
208
+ external_base_url: str
209
+ """Base URL for external models. Must be OpenAI-compatible base URL"""
210
+
211
+
212
+ ParametersEvaluationCompareParametersModelA: TypeAlias = Union[
213
+ str, ParametersEvaluationCompareParametersModelAEvaluationModelRequest
214
+ ]
215
+
216
+
217
+ class ParametersEvaluationCompareParametersModelBEvaluationModelRequest(TypedDict, total=False):
218
+ input_template: Required[str]
219
+ """Input prompt template"""
220
+
221
+ max_tokens: Required[int]
222
+ """Maximum number of tokens to generate"""
223
+
224
+ model: Required[str]
225
+ """Name of the model to evaluate"""
226
+
227
+ model_source: Required[Literal["serverless", "dedicated", "external"]]
228
+ """Source of the model."""
229
+
230
+ system_template: Required[str]
231
+ """System prompt template"""
232
+
233
+ temperature: Required[float]
234
+ """Sampling temperature"""
235
+
236
+ external_api_token: str
237
+ """Bearer/API token for external models."""
238
+
239
+ external_base_url: str
240
+ """Base URL for external models. Must be OpenAI-compatible base URL"""
241
+
242
+
243
+ ParametersEvaluationCompareParametersModelB: TypeAlias = Union[
244
+ str, ParametersEvaluationCompareParametersModelBEvaluationModelRequest
245
+ ]
246
+
247
+
248
+ class ParametersEvaluationCompareParameters(TypedDict, total=False):
249
+ input_data_file_path: Required[str]
250
+ """Data file name"""
251
+
252
+ judge: Required[ParametersEvaluationCompareParametersJudge]
253
+
254
+ model_a: ParametersEvaluationCompareParametersModelA
255
+ """Field name in the input data"""
256
+
257
+ model_b: ParametersEvaluationCompareParametersModelB
258
+ """Field name in the input data"""
259
+
260
+
261
+ Parameters: TypeAlias = Union[
262
+ ParametersEvaluationClassifyParameters, ParametersEvaluationScoreParameters, ParametersEvaluationCompareParameters
263
+ ]
@@ -0,0 +1,16 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+ from typing_extensions import Literal
5
+
6
+ from .._models import BaseModel
7
+
8
+ __all__ = ["EvalCreateResponse"]
9
+
10
+
11
+ class EvalCreateResponse(BaseModel):
12
+ status: Optional[Literal["pending"]] = None
13
+ """Initial status of the job"""
14
+
15
+ workflow_id: Optional[str] = None
16
+ """The ID of the created evaluation job"""
@@ -0,0 +1,21 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Annotated, TypedDict
6
+
7
+ from .._utils import PropertyInfo
8
+
9
+ __all__ = ["EvalListParams"]
10
+
11
+
12
+ class EvalListParams(TypedDict, total=False):
13
+ limit: int
14
+
15
+ status: str
16
+
17
+ user_id: Annotated[str, PropertyInfo(alias="userId")]
18
+ """Admin users can specify a user ID to filter jobs.
19
+
20
+ Pass empty string to get all jobs.
21
+ """
@@ -0,0 +1,10 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List
4
+ from typing_extensions import TypeAlias
5
+
6
+ from .evaluation_job import EvaluationJob
7
+
8
+ __all__ = ["EvalListResponse"]
9
+
10
+ EvalListResponse: TypeAlias = List[EvaluationJob]
@@ -0,0 +1,100 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Union, Optional
4
+ from typing_extensions import Literal, TypeAlias
5
+
6
+ from pydantic import Field as FieldInfo
7
+
8
+ from .._models import BaseModel
9
+
10
+ __all__ = [
11
+ "EvalStatusResponse",
12
+ "Results",
13
+ "ResultsEvaluationClassifyResults",
14
+ "ResultsEvaluationScoreResults",
15
+ "ResultsEvaluationScoreResultsAggregatedScores",
16
+ "ResultsEvaluationCompareResults",
17
+ ]
18
+
19
+
20
+ class ResultsEvaluationClassifyResults(BaseModel):
21
+ generation_fail_count: Optional[float] = None
22
+ """Number of failed generations."""
23
+
24
+ invalid_label_count: Optional[float] = None
25
+ """Number of invalid labels"""
26
+
27
+ judge_fail_count: Optional[float] = None
28
+ """Number of failed judge generations"""
29
+
30
+ label_counts: Optional[str] = None
31
+ """JSON string representing label counts"""
32
+
33
+ pass_percentage: Optional[float] = None
34
+ """Pecentage of pass labels."""
35
+
36
+ result_file_id: Optional[str] = None
37
+ """Data File ID"""
38
+
39
+
40
+ class ResultsEvaluationScoreResultsAggregatedScores(BaseModel):
41
+ mean_score: Optional[float] = None
42
+
43
+ pass_percentage: Optional[float] = None
44
+
45
+ std_score: Optional[float] = None
46
+
47
+
48
+ class ResultsEvaluationScoreResults(BaseModel):
49
+ aggregated_scores: Optional[ResultsEvaluationScoreResultsAggregatedScores] = None
50
+
51
+ failed_samples: Optional[float] = None
52
+ """number of failed samples generated from model"""
53
+
54
+ generation_fail_count: Optional[float] = None
55
+ """Number of failed generations."""
56
+
57
+ invalid_score_count: Optional[float] = None
58
+ """number of invalid scores generated from model"""
59
+
60
+ judge_fail_count: Optional[float] = None
61
+ """Number of failed judge generations"""
62
+
63
+ result_file_id: Optional[str] = None
64
+ """Data File ID"""
65
+
66
+
67
+ class ResultsEvaluationCompareResults(BaseModel):
68
+ a_wins: Optional[int] = FieldInfo(alias="A_wins", default=None)
69
+ """Number of times model A won"""
70
+
71
+ b_wins: Optional[int] = FieldInfo(alias="B_wins", default=None)
72
+ """Number of times model B won"""
73
+
74
+ generation_fail_count: Optional[float] = None
75
+ """Number of failed generations."""
76
+
77
+ judge_fail_count: Optional[float] = None
78
+ """Number of failed judge generations"""
79
+
80
+ num_samples: Optional[int] = None
81
+ """Total number of samples compared"""
82
+
83
+ result_file_id: Optional[str] = None
84
+ """Data File ID"""
85
+
86
+ ties: Optional[int] = FieldInfo(alias="Ties", default=None)
87
+ """Number of ties"""
88
+
89
+
90
+ Results: TypeAlias = Union[
91
+ ResultsEvaluationClassifyResults, ResultsEvaluationScoreResults, ResultsEvaluationCompareResults
92
+ ]
93
+
94
+
95
+ class EvalStatusResponse(BaseModel):
96
+ results: Optional[Results] = None
97
+ """The results of the evaluation job"""
98
+
99
+ status: Optional[Literal["completed", "error", "user_error", "running", "queued", "pending"]] = None
100
+ """The status of the evaluation job"""
@@ -0,0 +1,139 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Dict, List, Union, Optional
4
+ from datetime import datetime
5
+ from typing_extensions import Literal, TypeAlias
6
+
7
+ from pydantic import Field as FieldInfo
8
+
9
+ from .._models import BaseModel
10
+
11
+ __all__ = [
12
+ "EvaluationJob",
13
+ "Results",
14
+ "ResultsEvaluationClassifyResults",
15
+ "ResultsEvaluationScoreResults",
16
+ "ResultsEvaluationScoreResultsAggregatedScores",
17
+ "ResultsEvaluationCompareResults",
18
+ "ResultsError",
19
+ "StatusUpdate",
20
+ ]
21
+
22
+
23
+ class ResultsEvaluationClassifyResults(BaseModel):
24
+ generation_fail_count: Optional[float] = None
25
+ """Number of failed generations."""
26
+
27
+ invalid_label_count: Optional[float] = None
28
+ """Number of invalid labels"""
29
+
30
+ judge_fail_count: Optional[float] = None
31
+ """Number of failed judge generations"""
32
+
33
+ label_counts: Optional[str] = None
34
+ """JSON string representing label counts"""
35
+
36
+ pass_percentage: Optional[float] = None
37
+ """Pecentage of pass labels."""
38
+
39
+ result_file_id: Optional[str] = None
40
+ """Data File ID"""
41
+
42
+
43
+ class ResultsEvaluationScoreResultsAggregatedScores(BaseModel):
44
+ mean_score: Optional[float] = None
45
+
46
+ pass_percentage: Optional[float] = None
47
+
48
+ std_score: Optional[float] = None
49
+
50
+
51
+ class ResultsEvaluationScoreResults(BaseModel):
52
+ aggregated_scores: Optional[ResultsEvaluationScoreResultsAggregatedScores] = None
53
+
54
+ failed_samples: Optional[float] = None
55
+ """number of failed samples generated from model"""
56
+
57
+ generation_fail_count: Optional[float] = None
58
+ """Number of failed generations."""
59
+
60
+ invalid_score_count: Optional[float] = None
61
+ """number of invalid scores generated from model"""
62
+
63
+ judge_fail_count: Optional[float] = None
64
+ """Number of failed judge generations"""
65
+
66
+ result_file_id: Optional[str] = None
67
+ """Data File ID"""
68
+
69
+
70
+ class ResultsEvaluationCompareResults(BaseModel):
71
+ a_wins: Optional[int] = FieldInfo(alias="A_wins", default=None)
72
+ """Number of times model A won"""
73
+
74
+ b_wins: Optional[int] = FieldInfo(alias="B_wins", default=None)
75
+ """Number of times model B won"""
76
+
77
+ generation_fail_count: Optional[float] = None
78
+ """Number of failed generations."""
79
+
80
+ judge_fail_count: Optional[float] = None
81
+ """Number of failed judge generations"""
82
+
83
+ num_samples: Optional[int] = None
84
+ """Total number of samples compared"""
85
+
86
+ result_file_id: Optional[str] = None
87
+ """Data File ID"""
88
+
89
+ ties: Optional[int] = FieldInfo(alias="Ties", default=None)
90
+ """Number of ties"""
91
+
92
+
93
+ class ResultsError(BaseModel):
94
+ error: Optional[str] = None
95
+
96
+
97
+ Results: TypeAlias = Union[
98
+ ResultsEvaluationClassifyResults, ResultsEvaluationScoreResults, ResultsEvaluationCompareResults, ResultsError, None
99
+ ]
100
+
101
+
102
+ class StatusUpdate(BaseModel):
103
+ message: Optional[str] = None
104
+ """Additional message for this update"""
105
+
106
+ status: Optional[str] = None
107
+ """The status at this update"""
108
+
109
+ timestamp: Optional[datetime] = None
110
+ """When this update occurred"""
111
+
112
+
113
+ class EvaluationJob(BaseModel):
114
+ created_at: Optional[datetime] = None
115
+ """When the job was created"""
116
+
117
+ owner_id: Optional[str] = None
118
+ """ID of the job owner (admin only)"""
119
+
120
+ parameters: Optional[Dict[str, object]] = None
121
+ """The parameters used for this evaluation"""
122
+
123
+ results: Optional[Results] = None
124
+ """Results of the evaluation (when completed)"""
125
+
126
+ status: Optional[Literal["pending", "queued", "running", "completed", "error", "user_error"]] = None
127
+ """Current status of the job"""
128
+
129
+ status_updates: Optional[List[StatusUpdate]] = None
130
+ """History of status updates (admin only)"""
131
+
132
+ type: Optional[Literal["classify", "score", "compare"]] = None
133
+ """The type of evaluation"""
134
+
135
+ updated_at: Optional[datetime] = None
136
+ """When the job was last updated"""
137
+
138
+ workflow_id: Optional[str] = None
139
+ """The evaluation job ID"""
@@ -0,0 +1,108 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Dict, List, Union, Optional
4
+ from typing_extensions import Literal, Annotated, TypeAlias
5
+
6
+ from pydantic import Field as FieldInfo
7
+
8
+ from .._utils import PropertyInfo
9
+ from .._models import BaseModel
10
+
11
+ __all__ = [
12
+ "ExecuteResponse",
13
+ "SuccessfulExecution",
14
+ "SuccessfulExecutionData",
15
+ "SuccessfulExecutionDataOutput",
16
+ "SuccessfulExecutionDataOutputStreamOutput",
17
+ "SuccessfulExecutionDataOutputError",
18
+ "SuccessfulExecutionDataOutputDisplayorExecuteOutput",
19
+ "SuccessfulExecutionDataOutputDisplayorExecuteOutputData",
20
+ "FailedExecution",
21
+ ]
22
+
23
+
24
+ class SuccessfulExecutionDataOutputStreamOutput(BaseModel):
25
+ data: str
26
+
27
+ type: Literal["stdout", "stderr"]
28
+
29
+
30
+ class SuccessfulExecutionDataOutputError(BaseModel):
31
+ data: str
32
+
33
+ type: Literal["error"]
34
+
35
+
36
+ class SuccessfulExecutionDataOutputDisplayorExecuteOutputData(BaseModel):
37
+ application_geo_json: Optional[Dict[str, object]] = FieldInfo(alias="application/geo+json", default=None)
38
+
39
+ application_javascript: Optional[str] = FieldInfo(alias="application/javascript", default=None)
40
+
41
+ application_json: Optional[Dict[str, object]] = FieldInfo(alias="application/json", default=None)
42
+
43
+ application_pdf: Optional[str] = FieldInfo(alias="application/pdf", default=None)
44
+
45
+ application_vnd_vega_v5_json: Optional[Dict[str, object]] = FieldInfo(
46
+ alias="application/vnd.vega.v5+json", default=None
47
+ )
48
+
49
+ application_vnd_vegalite_v4_json: Optional[Dict[str, object]] = FieldInfo(
50
+ alias="application/vnd.vegalite.v4+json", default=None
51
+ )
52
+
53
+ image_gif: Optional[str] = FieldInfo(alias="image/gif", default=None)
54
+
55
+ image_jpeg: Optional[str] = FieldInfo(alias="image/jpeg", default=None)
56
+
57
+ image_png: Optional[str] = FieldInfo(alias="image/png", default=None)
58
+
59
+ image_svg_xml: Optional[str] = FieldInfo(alias="image/svg+xml", default=None)
60
+
61
+ text_html: Optional[str] = FieldInfo(alias="text/html", default=None)
62
+
63
+ text_latex: Optional[str] = FieldInfo(alias="text/latex", default=None)
64
+
65
+ text_markdown: Optional[str] = FieldInfo(alias="text/markdown", default=None)
66
+
67
+ text_plain: Optional[str] = FieldInfo(alias="text/plain", default=None)
68
+
69
+
70
+ class SuccessfulExecutionDataOutputDisplayorExecuteOutput(BaseModel):
71
+ data: SuccessfulExecutionDataOutputDisplayorExecuteOutputData
72
+
73
+ type: Literal["display_data", "execute_result"]
74
+
75
+
76
+ SuccessfulExecutionDataOutput: TypeAlias = Annotated[
77
+ Union[
78
+ SuccessfulExecutionDataOutputStreamOutput,
79
+ SuccessfulExecutionDataOutputError,
80
+ SuccessfulExecutionDataOutputDisplayorExecuteOutput,
81
+ ],
82
+ PropertyInfo(discriminator="type"),
83
+ ]
84
+
85
+
86
+ class SuccessfulExecutionData(BaseModel):
87
+ outputs: List[SuccessfulExecutionDataOutput]
88
+
89
+ session_id: str
90
+ """Identifier of the current session. Used to make follow-up calls."""
91
+
92
+ status: Optional[Literal["success"]] = None
93
+ """Status of the execution. Currently only supports success."""
94
+
95
+
96
+ class SuccessfulExecution(BaseModel):
97
+ data: SuccessfulExecutionData
98
+
99
+ errors: None = None
100
+
101
+
102
+ class FailedExecution(BaseModel):
103
+ data: None = None
104
+
105
+ errors: List[Union[str, Dict[str, object]]]
106
+
107
+
108
+ ExecuteResponse: TypeAlias = Union[SuccessfulExecution, FailedExecution]
@@ -0,0 +1,13 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["FileDeleteResponse"]
8
+
9
+
10
+ class FileDeleteResponse(BaseModel):
11
+ id: Optional[str] = None
12
+
13
+ deleted: Optional[bool] = None
@@ -0,0 +1,12 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List
4
+
5
+ from .._models import BaseModel
6
+ from .file_response import FileResponse
7
+
8
+ __all__ = ["FileList"]
9
+
10
+
11
+ class FileList(BaseModel):
12
+ data: List[FileResponse]