together 1.5.34__py3-none-any.whl → 2.0.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. together/__init__.py +101 -114
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +65 -81
  30. together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
  33. together/{cli → lib/cli}/api/models.py +34 -27
  34. together/lib/cli/api/utils.py +50 -0
  35. together/{cli → lib/cli}/cli.py +16 -26
  36. together/{constants.py → lib/constants.py} +11 -24
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +397 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/{utils → lib/utils}/files.py +90 -288
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -39
  50. together/resources/audio/__init__.py +72 -48
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -128
  53. together/resources/audio/transcriptions.py +247 -261
  54. together/resources/audio/translations.py +221 -241
  55. together/resources/audio/voices.py +111 -41
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +589 -477
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +397 -129
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +258 -104
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +223 -193
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +286 -214
  75. together/types/__init__.py +66 -167
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_create_response.py +10 -0
  157. together/types/video_job.py +57 -0
  158. together-2.0.0a6.dist-info/METADATA +729 -0
  159. together-2.0.0a6.dist-info/RECORD +165 -0
  160. {together-1.5.34.dist-info → together-2.0.0a6.dist-info}/WHEEL +1 -1
  161. together-2.0.0a6.dist-info/entry_points.txt +2 -0
  162. {together-1.5.34.dist-info → together-2.0.0a6.dist-info}/licenses/LICENSE +1 -1
  163. together/abstract/api_requestor.py +0 -770
  164. together/cli/api/chat.py +0 -298
  165. together/cli/api/completions.py +0 -119
  166. together/cli/api/images.py +0 -93
  167. together/cli/api/utils.py +0 -139
  168. together/client.py +0 -186
  169. together/error.py +0 -194
  170. together/filemanager.py +0 -635
  171. together/legacy/__init__.py +0 -0
  172. together/legacy/base.py +0 -27
  173. together/legacy/complete.py +0 -93
  174. together/legacy/embeddings.py +0 -27
  175. together/legacy/files.py +0 -146
  176. together/legacy/finetune.py +0 -177
  177. together/legacy/images.py +0 -27
  178. together/legacy/models.py +0 -44
  179. together/resources/batch.py +0 -165
  180. together/resources/code_interpreter.py +0 -82
  181. together/resources/evaluation.py +0 -808
  182. together/resources/finetune.py +0 -1388
  183. together/together_response.py +0 -50
  184. together/types/abstract.py +0 -26
  185. together/types/audio_speech.py +0 -311
  186. together/types/batch.py +0 -54
  187. together/types/chat_completions.py +0 -210
  188. together/types/code_interpreter.py +0 -57
  189. together/types/common.py +0 -67
  190. together/types/completions.py +0 -107
  191. together/types/embeddings.py +0 -35
  192. together/types/endpoints.py +0 -123
  193. together/types/error.py +0 -16
  194. together/types/evaluation.py +0 -93
  195. together/types/files.py +0 -93
  196. together/types/finetune.py +0 -464
  197. together/types/images.py +0 -42
  198. together/types/models.py +0 -96
  199. together/types/rerank.py +0 -43
  200. together/types/videos.py +0 -69
  201. together/utils/api_helpers.py +0 -124
  202. together/version.py +0 -6
  203. together-1.5.34.dist-info/METADATA +0 -583
  204. together-1.5.34.dist-info/RECORD +0 -77
  205. together-1.5.34.dist-info/entry_points.txt +0 -3
  206. /together/{abstract → lib/cli}/__init__.py +0 -0
  207. /together/{cli → lib/cli/api}/__init__.py +0 -0
  208. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -0,0 +1,177 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Union, Optional
4
+ from typing_extensions import Literal, TypeAlias
5
+
6
+ from pydantic import Field as FieldInfo
7
+
8
+ from .._models import BaseModel
9
+ from .finetune_event import FinetuneEvent
10
+
11
+ __all__ = [
12
+ "FinetuneResponse",
13
+ "LrScheduler",
14
+ "LrSchedulerLrSchedulerArgs",
15
+ "LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs",
16
+ "LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs",
17
+ "TrainingMethod",
18
+ "TrainingMethodTrainingMethodSft",
19
+ "TrainingMethodTrainingMethodDpo",
20
+ "TrainingType",
21
+ "TrainingTypeFullTrainingType",
22
+ "TrainingTypeLoRaTrainingType",
23
+ ]
24
+
25
+
26
+ class LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs(BaseModel):
27
+ min_lr_ratio: Optional[float] = None
28
+ """The ratio of the final learning rate to the peak learning rate"""
29
+
30
+
31
+ class LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs(BaseModel):
32
+ min_lr_ratio: float
33
+ """The ratio of the final learning rate to the peak learning rate"""
34
+
35
+ num_cycles: float
36
+ """Number or fraction of cycles for the cosine learning rate scheduler"""
37
+
38
+
39
+ LrSchedulerLrSchedulerArgs: TypeAlias = Union[
40
+ LrSchedulerLrSchedulerArgsLinearLrSchedulerArgs, LrSchedulerLrSchedulerArgsCosineLrSchedulerArgs
41
+ ]
42
+
43
+
44
+ class LrScheduler(BaseModel):
45
+ lr_scheduler_type: Literal["linear", "cosine"]
46
+
47
+ lr_scheduler_args: Optional[LrSchedulerLrSchedulerArgs] = None
48
+
49
+
50
+ class TrainingMethodTrainingMethodSft(BaseModel):
51
+ method: Literal["sft"]
52
+
53
+ train_on_inputs: Union[bool, Literal["auto"]]
54
+ """
55
+ Whether to mask the user messages in conversational data or prompts in
56
+ instruction data.
57
+ """
58
+
59
+
60
+ class TrainingMethodTrainingMethodDpo(BaseModel):
61
+ method: Literal["dpo"]
62
+
63
+ dpo_beta: Optional[float] = None
64
+
65
+ dpo_normalize_logratios_by_length: Optional[bool] = None
66
+
67
+ dpo_reference_free: Optional[bool] = None
68
+
69
+ rpo_alpha: Optional[float] = None
70
+
71
+ simpo_gamma: Optional[float] = None
72
+
73
+
74
+ TrainingMethod: TypeAlias = Union[TrainingMethodTrainingMethodSft, TrainingMethodTrainingMethodDpo]
75
+
76
+
77
+ class TrainingTypeFullTrainingType(BaseModel):
78
+ type: Literal["Full"]
79
+
80
+
81
+ class TrainingTypeLoRaTrainingType(BaseModel):
82
+ lora_alpha: int
83
+
84
+ lora_r: int
85
+
86
+ type: Literal["Lora"]
87
+
88
+ lora_dropout: Optional[float] = None
89
+
90
+ lora_trainable_modules: Optional[str] = None
91
+
92
+
93
+ TrainingType: TypeAlias = Union[TrainingTypeFullTrainingType, TrainingTypeLoRaTrainingType]
94
+
95
+
96
+ class FinetuneResponse(BaseModel):
97
+ id: str
98
+
99
+ status: Literal[
100
+ "pending",
101
+ "queued",
102
+ "running",
103
+ "compressing",
104
+ "uploading",
105
+ "cancel_requested",
106
+ "cancelled",
107
+ "error",
108
+ "completed",
109
+ ]
110
+
111
+ batch_size: Union[int, Literal["max"], None] = None
112
+
113
+ created_at: Optional[str] = None
114
+
115
+ epochs_completed: Optional[int] = None
116
+
117
+ eval_steps: Optional[int] = None
118
+
119
+ events: Optional[List[FinetuneEvent]] = None
120
+
121
+ from_checkpoint: Optional[str] = None
122
+
123
+ from_hf_model: Optional[str] = None
124
+
125
+ hf_model_revision: Optional[str] = None
126
+
127
+ job_id: Optional[str] = None
128
+
129
+ learning_rate: Optional[float] = None
130
+
131
+ lr_scheduler: Optional[LrScheduler] = None
132
+
133
+ max_grad_norm: Optional[float] = None
134
+
135
+ model: Optional[str] = None
136
+
137
+ x_model_output_name: Optional[str] = FieldInfo(alias="model_output_name", default=None)
138
+
139
+ x_model_output_path: Optional[str] = FieldInfo(alias="model_output_path", default=None)
140
+
141
+ n_checkpoints: Optional[int] = None
142
+
143
+ n_epochs: Optional[int] = None
144
+
145
+ n_evals: Optional[int] = None
146
+
147
+ param_count: Optional[int] = None
148
+
149
+ queue_depth: Optional[int] = None
150
+
151
+ token_count: Optional[int] = None
152
+
153
+ total_price: Optional[int] = None
154
+
155
+ train_on_inputs: Union[bool, Literal["auto"], None] = None
156
+
157
+ training_file: Optional[str] = None
158
+
159
+ training_method: Optional[TrainingMethod] = None
160
+
161
+ training_type: Optional[TrainingType] = None
162
+
163
+ trainingfile_numlines: Optional[int] = None
164
+
165
+ trainingfile_size: Optional[int] = None
166
+
167
+ updated_at: Optional[str] = None
168
+
169
+ validation_file: Optional[str] = None
170
+
171
+ wandb_project_name: Optional[str] = None
172
+
173
+ wandb_url: Optional[str] = None
174
+
175
+ warmup_ratio: Optional[float] = None
176
+
177
+ weight_decay: Optional[float] = None
@@ -0,0 +1,16 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import TypedDict
6
+
7
+ __all__ = ["HardwareListParams"]
8
+
9
+
10
+ class HardwareListParams(TypedDict, total=False):
11
+ model: str
12
+ """Filter hardware configurations by model compatibility.
13
+
14
+ When provided, the response includes availability status for each compatible
15
+ configuration.
16
+ """
@@ -0,0 +1,58 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from datetime import datetime
5
+ from typing_extensions import Literal
6
+
7
+ from .._models import BaseModel
8
+
9
+ __all__ = ["HardwareListResponse", "Data", "DataPricing", "DataSpecs", "DataAvailability"]
10
+
11
+
12
+ class DataPricing(BaseModel):
13
+ cents_per_minute: float
14
+ """Cost per minute of endpoint uptime in cents"""
15
+
16
+
17
+ class DataSpecs(BaseModel):
18
+ gpu_count: int
19
+ """Number of GPUs in this configuration"""
20
+
21
+ gpu_link: str
22
+ """The GPU interconnect technology"""
23
+
24
+ gpu_memory: float
25
+ """Amount of GPU memory in GB"""
26
+
27
+ gpu_type: str
28
+ """The type/model of GPU"""
29
+
30
+
31
+ class DataAvailability(BaseModel):
32
+ status: Literal["available", "unavailable", "insufficient"]
33
+ """The availability status of the hardware configuration"""
34
+
35
+
36
+ class Data(BaseModel):
37
+ id: str
38
+ """Unique identifier for the hardware configuration"""
39
+
40
+ object: Literal["hardware"]
41
+
42
+ pricing: DataPricing
43
+ """Pricing details for using an endpoint"""
44
+
45
+ specs: DataSpecs
46
+ """Detailed specifications of a hardware configuration"""
47
+
48
+ updated_at: datetime
49
+ """Timestamp of when the hardware status was last updated"""
50
+
51
+ availability: Optional[DataAvailability] = None
52
+ """Indicates the current availability status of a hardware configuration"""
53
+
54
+
55
+ class HardwareListResponse(BaseModel):
56
+ data: List[Data]
57
+
58
+ object: Literal["list"]
@@ -0,0 +1,15 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["ImageDataB64"]
8
+
9
+
10
+ class ImageDataB64(BaseModel):
11
+ b64_json: str
12
+
13
+ index: int
14
+
15
+ type: Literal["b64_json"]
@@ -0,0 +1,15 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["ImageDataURL"]
8
+
9
+
10
+ class ImageDataURL(BaseModel):
11
+ index: int
12
+
13
+ type: Literal["url"]
14
+
15
+ url: str
@@ -0,0 +1,23 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Union
4
+ from typing_extensions import Literal, Annotated, TypeAlias
5
+
6
+ from .._utils import PropertyInfo
7
+ from .._models import BaseModel
8
+ from .image_data_b64 import ImageDataB64
9
+ from .image_data_url import ImageDataURL
10
+
11
+ __all__ = ["ImageFile", "Data"]
12
+
13
+ Data: TypeAlias = Annotated[Union[ImageDataB64, ImageDataURL], PropertyInfo(discriminator="type")]
14
+
15
+
16
+ class ImageFile(BaseModel):
17
+ id: str
18
+
19
+ data: List[Data]
20
+
21
+ model: str
22
+
23
+ object: Literal["list"]
@@ -0,0 +1,85 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["ImageGenerateParams", "ImageLora"]
9
+
10
+
11
+ class ImageGenerateParams(TypedDict, total=False):
12
+ model: Required[
13
+ Union[
14
+ Literal[
15
+ "black-forest-labs/FLUX.1-schnell-Free",
16
+ "black-forest-labs/FLUX.1-schnell",
17
+ "black-forest-labs/FLUX.1.1-pro",
18
+ ],
19
+ str,
20
+ ]
21
+ ]
22
+ """The model to use for image generation.
23
+
24
+ [See all of Together AI's image models](https://docs.together.ai/docs/serverless-models#image-models)
25
+ """
26
+
27
+ prompt: Required[str]
28
+ """A description of the desired images. Maximum length varies by model."""
29
+
30
+ disable_safety_checker: bool
31
+ """If true, disables the safety checker for image generation."""
32
+
33
+ guidance_scale: float
34
+ """Adjusts the alignment of the generated image with the input prompt.
35
+
36
+ Higher values (e.g., 8-10) make the output more faithful to the prompt, while
37
+ lower values (e.g., 1-5) encourage more creative freedom.
38
+ """
39
+
40
+ height: int
41
+ """Height of the image to generate in number of pixels."""
42
+
43
+ image_loras: Iterable[ImageLora]
44
+ """
45
+ An array of objects that define LoRAs (Low-Rank Adaptations) to influence the
46
+ generated image.
47
+ """
48
+
49
+ image_url: str
50
+ """URL of an image to use for image models that support it."""
51
+
52
+ n: int
53
+ """Number of image results to generate."""
54
+
55
+ negative_prompt: str
56
+ """The prompt or prompts not to guide the image generation."""
57
+
58
+ output_format: Literal["jpeg", "png"]
59
+ """The format of the image response.
60
+
61
+ Can be either be `jpeg` or `png`. Defaults to `jpeg`.
62
+ """
63
+
64
+ response_format: Literal["base64", "url"]
65
+ """Format of the image response. Can be either a base64 string or a URL."""
66
+
67
+ seed: int
68
+ """Seed used for generation. Can be used to reproduce image generations."""
69
+
70
+ steps: int
71
+ """Number of generation steps."""
72
+
73
+ width: int
74
+ """Width of the image to generate in number of pixels."""
75
+
76
+
77
+ class ImageLora(TypedDict, total=False):
78
+ path: Required[str]
79
+ """The URL of the LoRA to apply (e.g.
80
+
81
+ https://huggingface.co/strangerzonehf/Flux-Midjourney-Mix2-LoRA).
82
+ """
83
+
84
+ scale: Required[float]
85
+ """The strength of the LoRA's influence. Most LoRA's recommend a value of 1."""
@@ -0,0 +1,47 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from datetime import datetime
5
+ from typing_extensions import Literal
6
+
7
+ from pydantic import Field as FieldInfo
8
+
9
+ from .._models import BaseModel
10
+
11
+ __all__ = ["JobListResponse", "Data", "DataArgs", "DataStatusUpdate"]
12
+
13
+
14
+ class DataArgs(BaseModel):
15
+ description: Optional[str] = None
16
+
17
+ x_model_name: Optional[str] = FieldInfo(alias="modelName", default=None)
18
+
19
+ x_model_source: Optional[str] = FieldInfo(alias="modelSource", default=None)
20
+
21
+
22
+ class DataStatusUpdate(BaseModel):
23
+ message: str
24
+
25
+ status: str
26
+
27
+ timestamp: datetime
28
+
29
+
30
+ class Data(BaseModel):
31
+ args: DataArgs
32
+
33
+ created_at: datetime
34
+
35
+ job_id: str
36
+
37
+ status: Literal["Queued", "Running", "Complete", "Failed"]
38
+
39
+ status_updates: List[DataStatusUpdate]
40
+
41
+ type: str
42
+
43
+ updated_at: datetime
44
+
45
+
46
+ class JobListResponse(BaseModel):
47
+ data: List[Data]
@@ -0,0 +1,43 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from datetime import datetime
5
+ from typing_extensions import Literal
6
+
7
+ from pydantic import Field as FieldInfo
8
+
9
+ from .._models import BaseModel
10
+
11
+ __all__ = ["JobRetrieveResponse", "Args", "StatusUpdate"]
12
+
13
+
14
+ class Args(BaseModel):
15
+ description: Optional[str] = None
16
+
17
+ x_model_name: Optional[str] = FieldInfo(alias="modelName", default=None)
18
+
19
+ x_model_source: Optional[str] = FieldInfo(alias="modelSource", default=None)
20
+
21
+
22
+ class StatusUpdate(BaseModel):
23
+ message: str
24
+
25
+ status: str
26
+
27
+ timestamp: datetime
28
+
29
+
30
+ class JobRetrieveResponse(BaseModel):
31
+ args: Args
32
+
33
+ created_at: datetime
34
+
35
+ job_id: str
36
+
37
+ status: Literal["Queued", "Running", "Complete", "Failed"]
38
+
39
+ status_updates: List[StatusUpdate]
40
+
41
+ type: str
42
+
43
+ updated_at: datetime
@@ -0,0 +1,18 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["LogProbs"]
8
+
9
+
10
+ class LogProbs(BaseModel):
11
+ token_ids: Optional[List[float]] = None
12
+ """List of token IDs corresponding to the logprobs"""
13
+
14
+ token_logprobs: Optional[List[float]] = None
15
+ """List of token log probabilities"""
16
+
17
+ tokens: Optional[List[str]] = None
18
+ """List of token strings"""
@@ -0,0 +1,10 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List
4
+ from typing_extensions import TypeAlias
5
+
6
+ from .model_object import ModelObject
7
+
8
+ __all__ = ["ModelListResponse"]
9
+
10
+ ModelListResponse: TypeAlias = List[ModelObject]
@@ -0,0 +1,42 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import Optional
4
+ from typing_extensions import Literal
5
+
6
+ from .._models import BaseModel
7
+
8
+ __all__ = ["ModelObject", "Pricing"]
9
+
10
+
11
+ class Pricing(BaseModel):
12
+ base: float
13
+
14
+ finetune: float
15
+
16
+ hourly: float
17
+
18
+ input: float
19
+
20
+ output: float
21
+
22
+
23
+ class ModelObject(BaseModel):
24
+ id: str
25
+
26
+ created: int
27
+
28
+ object: str
29
+
30
+ type: Literal["chat", "language", "code", "image", "embedding", "moderation", "rerank"]
31
+
32
+ context_length: Optional[int] = None
33
+
34
+ display_name: Optional[str] = None
35
+
36
+ license: Optional[str] = None
37
+
38
+ link: Optional[str] = None
39
+
40
+ organization: Optional[str] = None
41
+
42
+ pricing: Optional[Pricing] = None
@@ -0,0 +1,36 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["ModelUploadParams"]
8
+
9
+
10
+ class ModelUploadParams(TypedDict, total=False):
11
+ model_name: Required[str]
12
+ """The name to give to your uploaded model"""
13
+
14
+ model_source: Required[str]
15
+ """The source location of the model (Hugging Face repo or S3 path)"""
16
+
17
+ base_model: str
18
+ """
19
+ The base model to use for an adapter if setting it to run against a serverless
20
+ pool. Only used for model_type `adapter`.
21
+ """
22
+
23
+ description: str
24
+ """A description of your model"""
25
+
26
+ hf_token: str
27
+ """Hugging Face token (if uploading from Hugging Face)"""
28
+
29
+ lora_model: str
30
+ """
31
+ The lora pool to use for an adapter if setting it to run against, say, a
32
+ dedicated pool. Only used for model_type `adapter`.
33
+ """
34
+
35
+ model_type: Literal["model", "adapter"]
36
+ """Whether the model is a full model or an adapter"""
@@ -0,0 +1,23 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from pydantic import Field as FieldInfo
4
+
5
+ from .._models import BaseModel
6
+
7
+ __all__ = ["ModelUploadResponse", "Data"]
8
+
9
+
10
+ class Data(BaseModel):
11
+ job_id: str
12
+
13
+ x_model_id: str = FieldInfo(alias="model_id")
14
+
15
+ x_model_name: str = FieldInfo(alias="model_name")
16
+
17
+ x_model_source: str = FieldInfo(alias="model_source")
18
+
19
+
20
+ class ModelUploadResponse(BaseModel):
21
+ data: Data
22
+
23
+ message: str
@@ -0,0 +1,36 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Dict, Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ from .._types import SequenceNotStr
9
+
10
+ __all__ = ["RerankCreateParams"]
11
+
12
+
13
+ class RerankCreateParams(TypedDict, total=False):
14
+ documents: Required[Union[Iterable[Dict[str, object]], SequenceNotStr[str]]]
15
+ """List of documents, which can be either strings or objects."""
16
+
17
+ model: Required[Union[Literal["Salesforce/Llama-Rank-v1"], str]]
18
+ """The model to be used for the rerank request.
19
+
20
+ [See all of Together AI's rerank models](https://docs.together.ai/docs/serverless-models#rerank-models)
21
+ """
22
+
23
+ query: Required[str]
24
+ """The search query to be used for ranking."""
25
+
26
+ rank_fields: SequenceNotStr[str]
27
+ """List of keys in the JSON Object document to rank by.
28
+
29
+ Defaults to use all supplied keys for ranking.
30
+ """
31
+
32
+ return_documents: bool
33
+ """Whether to return supplied documents with the response."""
34
+
35
+ top_n: int
36
+ """The number of top results to return."""
@@ -0,0 +1,36 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from typing_extensions import Literal
5
+
6
+ from .._models import BaseModel
7
+ from .chat.chat_completion_usage import ChatCompletionUsage
8
+
9
+ __all__ = ["RerankCreateResponse", "Result", "ResultDocument"]
10
+
11
+
12
+ class ResultDocument(BaseModel):
13
+ text: Optional[str] = None
14
+
15
+
16
+ class Result(BaseModel):
17
+ document: ResultDocument
18
+
19
+ index: int
20
+
21
+ relevance_score: float
22
+
23
+
24
+ class RerankCreateResponse(BaseModel):
25
+ model: str
26
+ """The model to be used for the rerank request."""
27
+
28
+ object: Literal["rerank"]
29
+ """Object type"""
30
+
31
+ results: List[Result]
32
+
33
+ id: Optional[str] = None
34
+ """Request ID"""
35
+
36
+ usage: Optional[ChatCompletionUsage] = None