@ai-sdk/togetherai 0.2.14 → 1.0.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,75 +1,274 @@
1
1
  # @ai-sdk/togetherai
2
2
 
3
- ## 0.2.14
3
+ ## 1.0.0-alpha.1
4
4
 
5
5
  ### Patch Changes
6
6
 
7
- - Updated dependencies [d87b9d1]
8
- - @ai-sdk/provider-utils@2.2.8
9
- - @ai-sdk/openai-compatible@0.2.14
7
+ - Updated dependencies [3f2f00c]
8
+ - @ai-sdk/provider@2.0.0-alpha.1
9
+ - @ai-sdk/openai-compatible@1.0.0-alpha.1
10
+ - @ai-sdk/provider-utils@3.0.0-alpha.1
10
11
 
11
- ## 0.2.13
12
+ ## 1.0.0-canary.19
12
13
 
13
14
  ### Patch Changes
14
15
 
15
- - Updated dependencies [23571c9]
16
- - @ai-sdk/openai-compatible@0.2.13
16
+ - Updated dependencies [faf8446]
17
+ - @ai-sdk/provider-utils@3.0.0-canary.19
18
+ - @ai-sdk/openai-compatible@1.0.0-canary.19
17
19
 
18
- ## 0.2.12
20
+ ## 1.0.0-canary.18
19
21
 
20
22
  ### Patch Changes
21
23
 
22
- - Updated dependencies [13492fe]
23
- - @ai-sdk/openai-compatible@0.2.12
24
+ - Updated dependencies [40acf9b]
25
+ - @ai-sdk/provider-utils@3.0.0-canary.18
26
+ - @ai-sdk/openai-compatible@1.0.0-canary.18
24
27
 
25
- ## 0.2.11
28
+ ## 1.0.0-canary.17
29
+
30
+ ### Major Changes
31
+
32
+ - 516be5b: ### Move Image Model Settings into generate options
33
+
34
+ Image Models no longer have settings. Instead, `maxImagesPerCall` can be passed directly to `generateImage()`. All other image settings can be passed to `providerOptions[provider]`.
35
+
36
+ Before
37
+
38
+ ```js
39
+ await generateImage({
40
+ model: luma.image('photon-flash-1', {
41
+ maxImagesPerCall: 5,
42
+ pollIntervalMillis: 500,
43
+ }),
44
+ prompt,
45
+ n: 10,
46
+ });
47
+ ```
48
+
49
+ After
50
+
51
+ ```js
52
+ await generateImage({
53
+ model: luma.image('photon-flash-1'),
54
+ prompt,
55
+ n: 10,
56
+ maxImagesPerCall: 5,
57
+ providerOptions: {
58
+ luma: { pollIntervalMillis: 5 },
59
+ },
60
+ });
61
+ ```
62
+
63
+ Pull Request: https://github.com/vercel/ai/pull/6180
64
+
65
+ ### Patch Changes
66
+
67
+ - Updated dependencies [516be5b]
68
+ - Updated dependencies [ea7a7c9]
69
+ - @ai-sdk/openai-compatible@1.0.0-canary.17
70
+ - @ai-sdk/provider-utils@3.0.0-canary.17
71
+
72
+ ## 1.0.0-canary.16
73
+
74
+ ### Patch Changes
75
+
76
+ - Updated dependencies [87b828f]
77
+ - @ai-sdk/provider-utils@3.0.0-canary.16
78
+ - @ai-sdk/openai-compatible@1.0.0-canary.16
79
+
80
+ ## 1.0.0-canary.15
81
+
82
+ ### Patch Changes
83
+
84
+ - Updated dependencies [a571d6e]
85
+ - Updated dependencies [a8c8bd5]
86
+ - Updated dependencies [7979f7f]
87
+ - Updated dependencies [41fa418]
88
+ - @ai-sdk/provider-utils@3.0.0-canary.15
89
+ - @ai-sdk/provider@2.0.0-canary.14
90
+ - @ai-sdk/openai-compatible@1.0.0-canary.15
91
+
92
+ ## 1.0.0-canary.14
93
+
94
+ ### Patch Changes
95
+
96
+ - Updated dependencies [957b739]
97
+ - Updated dependencies [9bd5ab5]
98
+ - @ai-sdk/provider-utils@3.0.0-canary.14
99
+ - @ai-sdk/provider@2.0.0-canary.13
100
+ - @ai-sdk/openai-compatible@1.0.0-canary.14
101
+
102
+ ## 1.0.0-canary.13
103
+
104
+ ### Patch Changes
105
+
106
+ - d9209ca: fix (image-model): `specificationVersion: v1` -> `v2`
107
+ - Updated dependencies [7b3ae3f]
108
+ - Updated dependencies [d9209ca]
109
+ - Updated dependencies [0ff02bb]
110
+ - @ai-sdk/provider@2.0.0-canary.12
111
+ - @ai-sdk/openai-compatible@1.0.0-canary.13
112
+ - @ai-sdk/provider-utils@3.0.0-canary.13
113
+
114
+ ## 1.0.0-canary.12
115
+
116
+ ### Patch Changes
117
+
118
+ - Updated dependencies [9bf7291]
119
+ - Updated dependencies [4617fab]
120
+ - Updated dependencies [e030615]
121
+ - @ai-sdk/provider@2.0.0-canary.11
122
+ - @ai-sdk/openai-compatible@1.0.0-canary.12
123
+ - @ai-sdk/provider-utils@3.0.0-canary.12
124
+
125
+ ## 1.0.0-canary.11
126
+
127
+ ### Patch Changes
128
+
129
+ - 9301f86: refactor (image-model): rename `ImageModelV1` to `ImageModelV2`
130
+ - Updated dependencies [db72adc]
131
+ - Updated dependencies [42e32b0]
132
+ - Updated dependencies [66962ed]
133
+ - Updated dependencies [9301f86]
134
+ - Updated dependencies [a3f768e]
135
+ - @ai-sdk/openai-compatible@1.0.0-canary.11
136
+ - @ai-sdk/provider-utils@3.0.0-canary.11
137
+ - @ai-sdk/provider@2.0.0-canary.10
138
+
139
+ ## 1.0.0-canary.10
140
+
141
+ ### Patch Changes
142
+
143
+ - Updated dependencies [cf8280e]
144
+ - Updated dependencies [e86be6f]
145
+ - @ai-sdk/openai-compatible@1.0.0-canary.10
146
+ - @ai-sdk/provider@2.0.0-canary.9
147
+ - @ai-sdk/provider-utils@3.0.0-canary.10
148
+
149
+ ## 1.0.0-canary.9
150
+
151
+ ### Patch Changes
152
+
153
+ - Updated dependencies [95857aa]
154
+ - Updated dependencies [7ea4132]
155
+ - @ai-sdk/provider@2.0.0-canary.8
156
+ - @ai-sdk/openai-compatible@1.0.0-canary.9
157
+ - @ai-sdk/provider-utils@3.0.0-canary.9
158
+
159
+ ## 1.0.0-canary.8
26
160
 
27
161
  ### Patch Changes
28
162
 
29
- - Updated dependencies [b5c9cd4]
30
- - @ai-sdk/openai-compatible@0.2.11
163
+ - Updated dependencies [5d142ab]
164
+ - Updated dependencies [b6b43c7]
165
+ - Updated dependencies [b9a6121]
166
+ - Updated dependencies [8aa9e20]
167
+ - Updated dependencies [3795467]
168
+ - @ai-sdk/provider-utils@3.0.0-canary.8
169
+ - @ai-sdk/provider@2.0.0-canary.7
170
+ - @ai-sdk/openai-compatible@1.0.0-canary.8
31
171
 
32
- ## 0.2.10
172
+ ## 1.0.0-canary.7
33
173
 
34
174
  ### Patch Changes
35
175
 
36
- - Updated dependencies [beef951]
37
- - @ai-sdk/provider@1.1.3
38
- - @ai-sdk/openai-compatible@0.2.10
39
- - @ai-sdk/provider-utils@2.2.7
176
+ - fa49207: feat(providers/openai-compatible): convert to providerOptions
177
+ - 26735b5: chore(embedding-model): add v2 interface
178
+ - Updated dependencies [fa49207]
179
+ - Updated dependencies [26735b5]
180
+ - Updated dependencies [443d8ec]
181
+ - Updated dependencies [14c9410]
182
+ - Updated dependencies [d9c98f4]
183
+ - Updated dependencies [c4a2fec]
184
+ - Updated dependencies [0054544]
185
+ - Updated dependencies [9e9c809]
186
+ - Updated dependencies [32831c6]
187
+ - Updated dependencies [d0f9495]
188
+ - Updated dependencies [fd65bc6]
189
+ - Updated dependencies [393138b]
190
+ - Updated dependencies [7182d14]
191
+ - @ai-sdk/openai-compatible@1.0.0-canary.7
192
+ - @ai-sdk/provider@2.0.0-canary.6
193
+ - @ai-sdk/provider-utils@3.0.0-canary.7
40
194
 
41
- ## 0.2.9
195
+ ## 1.0.0-canary.6
42
196
 
43
197
  ### Patch Changes
44
198
 
45
- - Updated dependencies [1bbc698]
46
- - @ai-sdk/openai-compatible@0.2.9
199
+ - Updated dependencies [6db02c9]
200
+ - Updated dependencies [411e483]
201
+ - Updated dependencies [79457bd]
202
+ - Updated dependencies [ad80501]
203
+ - Updated dependencies [1766ede]
204
+ - Updated dependencies [f10304b]
205
+ - @ai-sdk/openai-compatible@1.0.0-canary.6
206
+ - @ai-sdk/provider@2.0.0-canary.5
207
+ - @ai-sdk/provider-utils@3.0.0-canary.6
47
208
 
48
- ## 0.2.8
209
+ ## 1.0.0-canary.5
49
210
 
50
211
  ### Patch Changes
51
212
 
52
- - Updated dependencies [013faa8]
53
- - @ai-sdk/provider@1.1.2
54
- - @ai-sdk/openai-compatible@0.2.8
55
- - @ai-sdk/provider-utils@2.2.6
213
+ - Updated dependencies [6f6bb89]
214
+ - @ai-sdk/provider@2.0.0-canary.4
215
+ - @ai-sdk/openai-compatible@1.0.0-canary.5
216
+ - @ai-sdk/provider-utils@3.0.0-canary.5
56
217
 
57
- ## 0.2.7
218
+ ## 1.0.0-canary.4
58
219
 
59
220
  ### Patch Changes
60
221
 
61
- - Updated dependencies [c21fa6d]
62
- - @ai-sdk/provider-utils@2.2.5
63
- - @ai-sdk/provider@1.1.1
64
- - @ai-sdk/openai-compatible@0.2.7
222
+ - Updated dependencies [d1a1aa1]
223
+ - @ai-sdk/provider@2.0.0-canary.3
224
+ - @ai-sdk/openai-compatible@1.0.0-canary.4
225
+ - @ai-sdk/provider-utils@3.0.0-canary.4
226
+
227
+ ## 1.0.0-canary.3
228
+
229
+ ### Patch Changes
230
+
231
+ - Updated dependencies [a166433]
232
+ - Updated dependencies [abf9a79]
233
+ - Updated dependencies [9f95b35]
234
+ - Updated dependencies [0a87932]
235
+ - Updated dependencies [6dc848c]
236
+ - @ai-sdk/provider-utils@3.0.0-canary.3
237
+ - @ai-sdk/provider@2.0.0-canary.2
238
+ - @ai-sdk/openai-compatible@1.0.0-canary.3
239
+
240
+ ## 1.0.0-canary.2
241
+
242
+ ### Patch Changes
243
+
244
+ - Updated dependencies [c57e248]
245
+ - Updated dependencies [33f4a6a]
246
+ - @ai-sdk/provider@2.0.0-canary.1
247
+ - @ai-sdk/openai-compatible@1.0.0-canary.2
248
+ - @ai-sdk/provider-utils@3.0.0-canary.2
249
+
250
+ ## 1.0.0-canary.1
251
+
252
+ ### Patch Changes
253
+
254
+ - Updated dependencies [060370c]
255
+ - Updated dependencies [0c0c0b3]
256
+ - Updated dependencies [63d791d]
257
+ - @ai-sdk/provider-utils@3.0.0-canary.1
258
+ - @ai-sdk/openai-compatible@1.0.0-canary.1
259
+
260
+ ## 1.0.0-canary.0
261
+
262
+ ### Major Changes
65
263
 
66
- ## 0.2.6
264
+ - d5f588f: AI SDK 5
67
265
 
68
266
  ### Patch Changes
69
267
 
70
- - Updated dependencies [2c19b9a]
71
- - @ai-sdk/provider-utils@2.2.4
72
- - @ai-sdk/openai-compatible@0.2.6
268
+ - Updated dependencies [d5f588f]
269
+ - @ai-sdk/provider-utils@3.0.0-canary.0
270
+ - @ai-sdk/openai-compatible@1.0.0-canary.0
271
+ - @ai-sdk/provider@2.0.0-canary.0
73
272
 
74
273
  ## 0.2.5
75
274
 
package/dist/index.d.mts CHANGED
@@ -1,27 +1,14 @@
1
- import { ProviderV1, LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { OpenAICompatibleChatSettings, OpenAICompatibleEmbeddingSettings, OpenAICompatibleCompletionSettings } from '@ai-sdk/openai-compatible';
4
3
  export { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';
5
4
 
6
5
  type TogetherAIChatModelId = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' | 'meta-llama/Llama-3-8b-chat-hf' | 'meta-llama/Llama-3-70b-chat-hf' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'microsoft/WizardLM-2-8x22B' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'databricks/dbrx-instruct' | 'deepseek-ai/deepseek-llm-67b-chat' | 'deepseek-ai/DeepSeek-V3' | 'google/gemma-2b-it' | 'Gryphe/MythoMax-L2-13b' | 'meta-llama/Llama-2-13b-chat-hf' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' | 'Qwen/Qwen2.5-7B-Instruct-Turbo' | 'Qwen/Qwen2.5-72B-Instruct-Turbo' | 'Qwen/Qwen2-72B-Instruct' | 'upstage/SOLAR-10.7B-Instruct-v1.0' | (string & {});
7
- interface TogetherAIChatSettings extends OpenAICompatibleChatSettings {
8
- }
9
6
 
10
7
  type TogetherAIEmbeddingModelId = 'togethercomputer/m2-bert-80M-2k-retrieval' | 'togethercomputer/m2-bert-80M-32k-retrieval' | 'togethercomputer/m2-bert-80M-8k-retrieval' | 'WhereIsAI/UAE-Large-V1' | 'BAAI/bge-large-en-v1.5' | 'BAAI/bge-base-en-v1.5' | 'sentence-transformers/msmarco-bert-base-dot-v5' | 'bert-base-uncased' | (string & {});
11
- interface TogetherAIEmbeddingSettings extends OpenAICompatibleEmbeddingSettings {
12
- }
13
8
 
14
9
  type TogetherAICompletionModelId = 'meta-llama/Llama-2-70b-hf' | 'mistralai/Mistral-7B-v0.1' | 'mistralai/Mixtral-8x7B-v0.1' | 'Meta-Llama/Llama-Guard-7b' | 'codellama/CodeLlama-34b-Instruct-hf' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | (string & {});
15
- interface TogetherAICompletionSettings extends OpenAICompatibleCompletionSettings {
16
- }
17
10
 
18
11
  type TogetherAIImageModelId = 'stabilityai/stable-diffusion-xl-base-1.0' | 'black-forest-labs/FLUX.1-dev' | 'black-forest-labs/FLUX.1-dev-lora' | 'black-forest-labs/FLUX.1-schnell' | 'black-forest-labs/FLUX.1-canny' | 'black-forest-labs/FLUX.1-depth' | 'black-forest-labs/FLUX.1-redux' | 'black-forest-labs/FLUX.1.1-pro' | 'black-forest-labs/FLUX.1-pro' | 'black-forest-labs/FLUX.1-schnell-Free' | (string & {});
19
- interface TogetherAIImageSettings {
20
- /**
21
- Override the maximum number of images per call (default 1)
22
- */
23
- maxImagesPerCall?: number;
24
- }
25
12
 
26
13
  interface TogetherAIProviderSettings {
27
14
  /**
@@ -42,35 +29,36 @@ interface TogetherAIProviderSettings {
42
29
  */
43
30
  fetch?: FetchFunction;
44
31
  }
45
- interface TogetherAIProvider extends ProviderV1 {
32
+ interface TogetherAIProvider extends ProviderV2 {
46
33
  /**
47
34
  Creates a model for text generation.
48
35
  */
49
- (modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
36
+ (modelId: TogetherAIChatModelId): LanguageModelV2;
50
37
  /**
51
38
  Creates a chat model for text generation.
52
39
  */
53
- chatModel(modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
40
+ chatModel(modelId: TogetherAIChatModelId): LanguageModelV2;
54
41
  /**
55
42
  Creates a chat model for text generation.
56
43
  */
57
- languageModel(modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
44
+ languageModel(modelId: TogetherAIChatModelId): LanguageModelV2;
58
45
  /**
59
46
  Creates a completion model for text generation.
60
47
  */
61
- completionModel(modelId: TogetherAICompletionModelId, settings?: TogetherAICompletionSettings): LanguageModelV1;
48
+ completionModel(modelId: TogetherAICompletionModelId): LanguageModelV2;
62
49
  /**
63
50
  Creates a text embedding model for text generation.
64
51
  */
65
- textEmbeddingModel(modelId: TogetherAIEmbeddingModelId, settings?: TogetherAIEmbeddingSettings): EmbeddingModelV1<string>;
52
+ textEmbeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV2<string>;
66
53
  /**
67
- Creates a model for image generation.
68
- */
69
- image(modelId: TogetherAIImageModelId, settings?: TogetherAIImageSettings): ImageModelV1;
54
+ Creates a model for image generation.
55
+ @deprecated Use `imageModel` instead.
56
+ */
57
+ image(modelId: TogetherAIImageModelId): ImageModelV2;
70
58
  /**
71
- Creates a model for image generation.
72
- */
73
- imageModel(modelId: TogetherAIImageModelId, settings?: TogetherAIImageSettings): ImageModelV1;
59
+ Creates a model for image generation.
60
+ */
61
+ imageModel(modelId: TogetherAIImageModelId): ImageModelV2;
74
62
  }
75
63
  declare function createTogetherAI(options?: TogetherAIProviderSettings): TogetherAIProvider;
76
64
  declare const togetherai: TogetherAIProvider;
package/dist/index.d.ts CHANGED
@@ -1,27 +1,14 @@
1
- import { ProviderV1, LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { OpenAICompatibleChatSettings, OpenAICompatibleEmbeddingSettings, OpenAICompatibleCompletionSettings } from '@ai-sdk/openai-compatible';
4
3
  export { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';
5
4
 
6
5
  type TogetherAIChatModelId = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' | 'meta-llama/Llama-3-8b-chat-hf' | 'meta-llama/Llama-3-70b-chat-hf' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'microsoft/WizardLM-2-8x22B' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'databricks/dbrx-instruct' | 'deepseek-ai/deepseek-llm-67b-chat' | 'deepseek-ai/DeepSeek-V3' | 'google/gemma-2b-it' | 'Gryphe/MythoMax-L2-13b' | 'meta-llama/Llama-2-13b-chat-hf' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' | 'Qwen/Qwen2.5-7B-Instruct-Turbo' | 'Qwen/Qwen2.5-72B-Instruct-Turbo' | 'Qwen/Qwen2-72B-Instruct' | 'upstage/SOLAR-10.7B-Instruct-v1.0' | (string & {});
7
- interface TogetherAIChatSettings extends OpenAICompatibleChatSettings {
8
- }
9
6
 
10
7
  type TogetherAIEmbeddingModelId = 'togethercomputer/m2-bert-80M-2k-retrieval' | 'togethercomputer/m2-bert-80M-32k-retrieval' | 'togethercomputer/m2-bert-80M-8k-retrieval' | 'WhereIsAI/UAE-Large-V1' | 'BAAI/bge-large-en-v1.5' | 'BAAI/bge-base-en-v1.5' | 'sentence-transformers/msmarco-bert-base-dot-v5' | 'bert-base-uncased' | (string & {});
11
- interface TogetherAIEmbeddingSettings extends OpenAICompatibleEmbeddingSettings {
12
- }
13
8
 
14
9
  type TogetherAICompletionModelId = 'meta-llama/Llama-2-70b-hf' | 'mistralai/Mistral-7B-v0.1' | 'mistralai/Mixtral-8x7B-v0.1' | 'Meta-Llama/Llama-Guard-7b' | 'codellama/CodeLlama-34b-Instruct-hf' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | (string & {});
15
- interface TogetherAICompletionSettings extends OpenAICompatibleCompletionSettings {
16
- }
17
10
 
18
11
  type TogetherAIImageModelId = 'stabilityai/stable-diffusion-xl-base-1.0' | 'black-forest-labs/FLUX.1-dev' | 'black-forest-labs/FLUX.1-dev-lora' | 'black-forest-labs/FLUX.1-schnell' | 'black-forest-labs/FLUX.1-canny' | 'black-forest-labs/FLUX.1-depth' | 'black-forest-labs/FLUX.1-redux' | 'black-forest-labs/FLUX.1.1-pro' | 'black-forest-labs/FLUX.1-pro' | 'black-forest-labs/FLUX.1-schnell-Free' | (string & {});
19
- interface TogetherAIImageSettings {
20
- /**
21
- Override the maximum number of images per call (default 1)
22
- */
23
- maxImagesPerCall?: number;
24
- }
25
12
 
26
13
  interface TogetherAIProviderSettings {
27
14
  /**
@@ -42,35 +29,36 @@ interface TogetherAIProviderSettings {
42
29
  */
43
30
  fetch?: FetchFunction;
44
31
  }
45
- interface TogetherAIProvider extends ProviderV1 {
32
+ interface TogetherAIProvider extends ProviderV2 {
46
33
  /**
47
34
  Creates a model for text generation.
48
35
  */
49
- (modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
36
+ (modelId: TogetherAIChatModelId): LanguageModelV2;
50
37
  /**
51
38
  Creates a chat model for text generation.
52
39
  */
53
- chatModel(modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
40
+ chatModel(modelId: TogetherAIChatModelId): LanguageModelV2;
54
41
  /**
55
42
  Creates a chat model for text generation.
56
43
  */
57
- languageModel(modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
44
+ languageModel(modelId: TogetherAIChatModelId): LanguageModelV2;
58
45
  /**
59
46
  Creates a completion model for text generation.
60
47
  */
61
- completionModel(modelId: TogetherAICompletionModelId, settings?: TogetherAICompletionSettings): LanguageModelV1;
48
+ completionModel(modelId: TogetherAICompletionModelId): LanguageModelV2;
62
49
  /**
63
50
  Creates a text embedding model for text generation.
64
51
  */
65
- textEmbeddingModel(modelId: TogetherAIEmbeddingModelId, settings?: TogetherAIEmbeddingSettings): EmbeddingModelV1<string>;
52
+ textEmbeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV2<string>;
66
53
  /**
67
- Creates a model for image generation.
68
- */
69
- image(modelId: TogetherAIImageModelId, settings?: TogetherAIImageSettings): ImageModelV1;
54
+ Creates a model for image generation.
55
+ @deprecated Use `imageModel` instead.
56
+ */
57
+ image(modelId: TogetherAIImageModelId): ImageModelV2;
70
58
  /**
71
- Creates a model for image generation.
72
- */
73
- imageModel(modelId: TogetherAIImageModelId, settings?: TogetherAIImageSettings): ImageModelV1;
59
+ Creates a model for image generation.
60
+ */
61
+ imageModel(modelId: TogetherAIImageModelId): ImageModelV2;
74
62
  }
75
63
  declare function createTogetherAI(options?: TogetherAIProviderSettings): TogetherAIProvider;
76
64
  declare const togetherai: TogetherAIProvider;
package/dist/index.js CHANGED
@@ -33,19 +33,15 @@ var import_provider_utils2 = require("@ai-sdk/provider-utils");
33
33
  var import_provider_utils = require("@ai-sdk/provider-utils");
34
34
  var import_zod = require("zod");
35
35
  var TogetherAIImageModel = class {
36
- constructor(modelId, settings, config) {
36
+ constructor(modelId, config) {
37
37
  this.modelId = modelId;
38
- this.settings = settings;
39
38
  this.config = config;
40
- this.specificationVersion = "v1";
39
+ this.specificationVersion = "v2";
40
+ this.maxImagesPerCall = 1;
41
41
  }
42
42
  get provider() {
43
43
  return this.config.provider;
44
44
  }
45
- get maxImagesPerCall() {
46
- var _a;
47
- return (_a = this.settings.maxImagesPerCall) != null ? _a : 1;
48
- }
49
45
  async doGenerate({
50
46
  prompt,
51
47
  n,
@@ -135,27 +131,25 @@ function createTogetherAI(options = {}) {
135
131
  headers: getHeaders,
136
132
  fetch: options.fetch
137
133
  });
138
- const createChatModel = (modelId, settings = {}) => {
139
- return new import_openai_compatible.OpenAICompatibleChatLanguageModel(modelId, settings, {
140
- ...getCommonModelConfig("chat"),
141
- defaultObjectGenerationMode: "tool"
142
- });
134
+ const createChatModel = (modelId) => {
135
+ return new import_openai_compatible.OpenAICompatibleChatLanguageModel(
136
+ modelId,
137
+ getCommonModelConfig("chat")
138
+ );
143
139
  };
144
- const createCompletionModel = (modelId, settings = {}) => new import_openai_compatible.OpenAICompatibleCompletionLanguageModel(
140
+ const createCompletionModel = (modelId) => new import_openai_compatible.OpenAICompatibleCompletionLanguageModel(
145
141
  modelId,
146
- settings,
147
142
  getCommonModelConfig("completion")
148
143
  );
149
- const createTextEmbeddingModel = (modelId, settings = {}) => new import_openai_compatible.OpenAICompatibleEmbeddingModel(
144
+ const createTextEmbeddingModel = (modelId) => new import_openai_compatible.OpenAICompatibleEmbeddingModel(
150
145
  modelId,
151
- settings,
152
146
  getCommonModelConfig("embedding")
153
147
  );
154
- const createImageModel = (modelId, settings = {}) => new TogetherAIImageModel(modelId, settings, {
148
+ const createImageModel = (modelId) => new TogetherAIImageModel(modelId, {
155
149
  ...getCommonModelConfig("image"),
156
150
  baseURL: baseURL != null ? baseURL : "https://api.together.xyz/v1/"
157
151
  });
158
- const provider = (modelId, settings) => createChatModel(modelId, settings);
152
+ const provider = (modelId) => createChatModel(modelId);
159
153
  provider.completionModel = createCompletionModel;
160
154
  provider.languageModel = createChatModel;
161
155
  provider.chatModel = createChatModel;
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/togetherai-provider.ts","../src/togetherai-image-model.ts"],"sourcesContent":["export { createTogetherAI, togetherai } from './togetherai-provider';\nexport type {\n TogetherAIProvider,\n TogetherAIProviderSettings,\n} from './togetherai-provider';\nexport type { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';\n","import {\n LanguageModelV1,\n EmbeddingModelV1,\n ProviderV1,\n ImageModelV1,\n} from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport {\n TogetherAIChatModelId,\n TogetherAIChatSettings,\n} from './togetherai-chat-settings';\nimport {\n TogetherAIEmbeddingModelId,\n TogetherAIEmbeddingSettings,\n} from './togetherai-embedding-settings';\nimport {\n TogetherAICompletionModelId,\n TogetherAICompletionSettings,\n} from './togetherai-completion-settings';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport {\n TogetherAIImageModelId,\n TogetherAIImageSettings,\n} from './togetherai-image-settings';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV1 {\n /**\nCreates a model for text generation.\n*/\n (\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(\n modelId: TogetherAICompletionModelId,\n settings?: TogetherAICompletionSettings,\n ): LanguageModelV1;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId: TogetherAIEmbeddingModelId,\n settings?: TogetherAIEmbeddingSettings,\n ): EmbeddingModelV1<string>;\n\n /**\n Creates a model for image generation.\n */\n image(\n modelId: TogetherAIImageModelId,\n settings?: TogetherAIImageSettings,\n ): ImageModelV1;\n\n /**\n Creates a model for image generation.\n */\n imageModel(\n modelId: TogetherAIImageModelId,\n settings?: TogetherAIImageSettings,\n ): ImageModelV1;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (\n modelId: TogetherAIChatModelId,\n settings: TogetherAIChatSettings = {},\n ) => {\n return new OpenAICompatibleChatLanguageModel(modelId, settings, {\n ...getCommonModelConfig('chat'),\n defaultObjectGenerationMode: 'tool',\n });\n };\n\n const createCompletionModel = (\n modelId: TogetherAICompletionModelId,\n settings: TogetherAICompletionSettings = {},\n ) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n settings,\n getCommonModelConfig('completion'),\n );\n\n const createTextEmbeddingModel = (\n modelId: TogetherAIEmbeddingModelId,\n settings: TogetherAIEmbeddingSettings = {},\n ) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n settings,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (\n modelId: TogetherAIImageModelId,\n settings: TogetherAIImageSettings = {},\n ) =>\n new TogetherAIImageModel(modelId, settings, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ) => createChatModel(modelId, settings);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { ImageModelV1, ImageModelV1CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport {\n TogetherAIImageModelId,\n TogetherAIImageSettings,\n} from './togetherai-image-settings';\nimport { z } from 'zod';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV1 {\n readonly specificationVersion = 'v1';\n\n get provider(): string {\n return this.config.provider;\n }\n\n get maxImagesPerCall(): number {\n return this.settings.maxImagesPerCall ?? 1;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n readonly settings: TogetherAIImageSettings,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV1['doGenerate']>>\n > {\n const warnings: Array<ImageModelV1CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n response_format: 'base64',\n ...(providerOptions.togetherai ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACMA,+BAIO;AACP,IAAAA,yBAIO;;;ACdP,4BAMO;AAKP,iBAAkB;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAWxD,YACW,SACA,UACD,QACR;AAHS;AACA;AACD;AAbV,SAAS,uBAAuB;AAAA,EAc7B;AAAA,EAZH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAEA,IAAI,mBAA2B;AA/BjC;AAgCI,YAAO,UAAK,SAAS,qBAAd,YAAkC;AAAA,EAC3C;AAAA,EAQA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AAnDJ;AAoDI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,UAAM,qCAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,aAAS,sCAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,iBAAiB;AAAA,QACjB,IAAI,qBAAgB,eAAhB,YAA8B,CAAC;AAAA,MACrC;AAAA,MACA,2BAAuB,sDAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,+BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,aAAE,OAAO;AAAA,EAC7C,MAAM,aAAE;AAAA,IACN,aAAE,OAAO;AAAA,MACP,UAAU,aAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAM,wBAAwB,aAAE,OAAO;AAAA,EACrC,OAAO,aAAE,OAAO;AAAA,IACd,SAAS,aAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;;;ADRM,SAAS,iBACd,UAAsC,CAAC,GACnB;AAlHtB;AAmHE,QAAM,cAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,cAAU,mCAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CACtB,SACA,WAAmC,CAAC,MACjC;AACH,WAAO,IAAI,2DAAkC,SAAS,UAAU;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,6BAA6B;AAAA,IAC/B,CAAC;AAAA,EACH;AAEA,QAAM,wBAAwB,CAC5B,SACA,WAAyC,CAAC,MAE1C,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,2BAA2B,CAC/B,SACA,WAAwC,CAAC,MAEzC,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CACvB,SACA,WAAoC,CAAC,MAErC,IAAI,qBAAqB,SAAS,UAAU;AAAA,IAC1C,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CACf,SACA,aACG,gBAAgB,SAAS,QAAQ;AAEtC,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":["import_provider_utils"]}
1
+ {"version":3,"sources":["../src/index.ts","../src/togetherai-provider.ts","../src/togetherai-image-model.ts"],"sourcesContent":["export { createTogetherAI, togetherai } from './togetherai-provider';\nexport type {\n TogetherAIProvider,\n TogetherAIProviderSettings,\n} from './togetherai-provider';\nexport type { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';\n","import {\n LanguageModelV2,\n EmbeddingModelV2,\n ProviderV2,\n ImageModelV2,\n} from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIChatModelId } from './togetherai-chat-options';\nimport { TogetherAIEmbeddingModelId } from './togetherai-embedding-options';\nimport { TogetherAICompletionModelId } from './togetherai-completion-options';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV2 {\n /**\nCreates a model for text generation.\n*/\n (modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(modelId: TogetherAICompletionModelId): LanguageModelV2;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId: TogetherAIEmbeddingModelId,\n ): EmbeddingModelV2<string>;\n\n /**\nCreates a model for image generation.\n@deprecated Use `imageModel` instead.\n*/\n image(modelId: TogetherAIImageModelId): ImageModelV2;\n\n /**\nCreates a model for image generation.\n*/\n imageModel(modelId: TogetherAIImageModelId): ImageModelV2;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId: TogetherAIChatModelId) => {\n return new OpenAICompatibleChatLanguageModel(\n modelId,\n getCommonModelConfig('chat'),\n );\n };\n\n const createCompletionModel = (modelId: TogetherAICompletionModelId) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n getCommonModelConfig('completion'),\n );\n\n const createTextEmbeddingModel = (modelId: TogetherAIEmbeddingModelId) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (modelId: TogetherAIImageModelId) =>\n new TogetherAIImageModel(modelId, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (modelId: TogetherAIChatModelId) => createChatModel(modelId);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { ImageModelV2, ImageModelV2CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\nimport { z } from 'zod';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV2 {\n readonly specificationVersion = 'v2';\n readonly maxImagesPerCall = 1;\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV2['doGenerate']>>\n > {\n const warnings: Array<ImageModelV2CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n response_format: 'base64',\n ...(providerOptions.togetherai ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACMA,+BAIO;AACP,IAAAA,yBAIO;;;ACdP,4BAMO;AAEP,iBAAkB;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAQxD,YACW,SACD,QACR;AAFS;AACD;AATV,SAAS,uBAAuB;AAChC,SAAS,mBAAmB;AAAA,EASzB;AAAA,EAPH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAOA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AA5CJ;AA6CI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,UAAM,qCAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,aAAS,sCAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,iBAAiB;AAAA,QACjB,IAAI,qBAAgB,eAAhB,YAA8B,CAAC;AAAA,MACrC;AAAA,MACA,2BAAuB,sDAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,+BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,aAAE,OAAO;AAAA,EAC7C,MAAM,aAAE;AAAA,IACN,aAAE,OAAO;AAAA,MACP,UAAU,aAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAM,wBAAwB,aAAE,OAAO;AAAA,EACrC,OAAO,aAAE,OAAO;AAAA,IACd,SAAS,aAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;;;AD/BM,SAAS,iBACd,UAAsC,CAAC,GACnB;AApFtB;AAqFE,QAAM,cAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,cAAU,mCAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAmC;AAC1D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,qBAAqB,MAAM;AAAA,IAC7B;AAAA,EACF;AAEA,QAAM,wBAAwB,CAAC,YAC7B,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,2BAA2B,CAAC,YAChC,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CAAC,YACxB,IAAI,qBAAqB,SAAS;AAAA,IAChC,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CAAC,YAAmC,gBAAgB,OAAO;AAE5E,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":["import_provider_utils"]}
package/dist/index.mjs CHANGED
@@ -18,19 +18,15 @@ import {
18
18
  } from "@ai-sdk/provider-utils";
19
19
  import { z } from "zod";
20
20
  var TogetherAIImageModel = class {
21
- constructor(modelId, settings, config) {
21
+ constructor(modelId, config) {
22
22
  this.modelId = modelId;
23
- this.settings = settings;
24
23
  this.config = config;
25
- this.specificationVersion = "v1";
24
+ this.specificationVersion = "v2";
25
+ this.maxImagesPerCall = 1;
26
26
  }
27
27
  get provider() {
28
28
  return this.config.provider;
29
29
  }
30
- get maxImagesPerCall() {
31
- var _a;
32
- return (_a = this.settings.maxImagesPerCall) != null ? _a : 1;
33
- }
34
30
  async doGenerate({
35
31
  prompt,
36
32
  n,
@@ -120,27 +116,25 @@ function createTogetherAI(options = {}) {
120
116
  headers: getHeaders,
121
117
  fetch: options.fetch
122
118
  });
123
- const createChatModel = (modelId, settings = {}) => {
124
- return new OpenAICompatibleChatLanguageModel(modelId, settings, {
125
- ...getCommonModelConfig("chat"),
126
- defaultObjectGenerationMode: "tool"
127
- });
119
+ const createChatModel = (modelId) => {
120
+ return new OpenAICompatibleChatLanguageModel(
121
+ modelId,
122
+ getCommonModelConfig("chat")
123
+ );
128
124
  };
129
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
125
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(
130
126
  modelId,
131
- settings,
132
127
  getCommonModelConfig("completion")
133
128
  );
134
- const createTextEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
129
+ const createTextEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(
135
130
  modelId,
136
- settings,
137
131
  getCommonModelConfig("embedding")
138
132
  );
139
- const createImageModel = (modelId, settings = {}) => new TogetherAIImageModel(modelId, settings, {
133
+ const createImageModel = (modelId) => new TogetherAIImageModel(modelId, {
140
134
  ...getCommonModelConfig("image"),
141
135
  baseURL: baseURL != null ? baseURL : "https://api.together.xyz/v1/"
142
136
  });
143
- const provider = (modelId, settings) => createChatModel(modelId, settings);
137
+ const provider = (modelId) => createChatModel(modelId);
144
138
  provider.completionModel = createCompletionModel;
145
139
  provider.languageModel = createChatModel;
146
140
  provider.chatModel = createChatModel;
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/togetherai-provider.ts","../src/togetherai-image-model.ts"],"sourcesContent":["import {\n LanguageModelV1,\n EmbeddingModelV1,\n ProviderV1,\n ImageModelV1,\n} from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport {\n TogetherAIChatModelId,\n TogetherAIChatSettings,\n} from './togetherai-chat-settings';\nimport {\n TogetherAIEmbeddingModelId,\n TogetherAIEmbeddingSettings,\n} from './togetherai-embedding-settings';\nimport {\n TogetherAICompletionModelId,\n TogetherAICompletionSettings,\n} from './togetherai-completion-settings';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport {\n TogetherAIImageModelId,\n TogetherAIImageSettings,\n} from './togetherai-image-settings';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV1 {\n /**\nCreates a model for text generation.\n*/\n (\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(\n modelId: TogetherAICompletionModelId,\n settings?: TogetherAICompletionSettings,\n ): LanguageModelV1;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId: TogetherAIEmbeddingModelId,\n settings?: TogetherAIEmbeddingSettings,\n ): EmbeddingModelV1<string>;\n\n /**\n Creates a model for image generation.\n */\n image(\n modelId: TogetherAIImageModelId,\n settings?: TogetherAIImageSettings,\n ): ImageModelV1;\n\n /**\n Creates a model for image generation.\n */\n imageModel(\n modelId: TogetherAIImageModelId,\n settings?: TogetherAIImageSettings,\n ): ImageModelV1;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (\n modelId: TogetherAIChatModelId,\n settings: TogetherAIChatSettings = {},\n ) => {\n return new OpenAICompatibleChatLanguageModel(modelId, settings, {\n ...getCommonModelConfig('chat'),\n defaultObjectGenerationMode: 'tool',\n });\n };\n\n const createCompletionModel = (\n modelId: TogetherAICompletionModelId,\n settings: TogetherAICompletionSettings = {},\n ) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n settings,\n getCommonModelConfig('completion'),\n );\n\n const createTextEmbeddingModel = (\n modelId: TogetherAIEmbeddingModelId,\n settings: TogetherAIEmbeddingSettings = {},\n ) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n settings,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (\n modelId: TogetherAIImageModelId,\n settings: TogetherAIImageSettings = {},\n ) =>\n new TogetherAIImageModel(modelId, settings, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ) => createChatModel(modelId, settings);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { ImageModelV1, ImageModelV1CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport {\n TogetherAIImageModelId,\n TogetherAIImageSettings,\n} from './togetherai-image-settings';\nimport { z } from 'zod';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV1 {\n readonly specificationVersion = 'v1';\n\n get provider(): string {\n return this.config.provider;\n }\n\n get maxImagesPerCall(): number {\n return this.settings.maxImagesPerCall ?? 1;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n readonly settings: TogetherAIImageSettings,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV1['doGenerate']>>\n > {\n const warnings: Array<ImageModelV1CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n response_format: 'base64',\n ...(providerOptions.togetherai ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n"],"mappings":";AAMA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,OACK;;;ACdP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,OACK;AAKP,SAAS,SAAS;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAWxD,YACW,SACA,UACD,QACR;AAHS;AACA;AACD;AAbV,SAAS,uBAAuB;AAAA,EAc7B;AAAA,EAZH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAEA,IAAI,mBAA2B;AA/BjC;AAgCI,YAAO,UAAK,SAAS,qBAAd,YAAkC;AAAA,EAC3C;AAAA,EAQA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AAnDJ;AAoDI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,MAAM,cAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,SAAS,eAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,iBAAiB;AAAA,QACjB,IAAI,qBAAgB,eAAhB,YAA8B,CAAC;AAAA,MACrC;AAAA,MACA,uBAAuB,+BAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,2BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,EAAE,OAAO;AAAA,EAC7C,MAAM,EAAE;AAAA,IACN,EAAE,OAAO;AAAA,MACP,UAAU,EAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAM,wBAAwB,EAAE,OAAO;AAAA,EACrC,OAAO,EAAE,OAAO;AAAA,IACd,SAAS,EAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;;;ADRM,SAAS,iBACd,UAAsC,CAAC,GACnB;AAlHtB;AAmHE,QAAM,UAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,UAAU,WAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CACtB,SACA,WAAmC,CAAC,MACjC;AACH,WAAO,IAAI,kCAAkC,SAAS,UAAU;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,6BAA6B;AAAA,IAC/B,CAAC;AAAA,EACH;AAEA,QAAM,wBAAwB,CAC5B,SACA,WAAyC,CAAC,MAE1C,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,2BAA2B,CAC/B,SACA,WAAwC,CAAC,MAEzC,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CACvB,SACA,WAAoC,CAAC,MAErC,IAAI,qBAAqB,SAAS,UAAU;AAAA,IAC1C,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CACf,SACA,aACG,gBAAgB,SAAS,QAAQ;AAEtC,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":[]}
1
+ {"version":3,"sources":["../src/togetherai-provider.ts","../src/togetherai-image-model.ts"],"sourcesContent":["import {\n LanguageModelV2,\n EmbeddingModelV2,\n ProviderV2,\n ImageModelV2,\n} from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIChatModelId } from './togetherai-chat-options';\nimport { TogetherAIEmbeddingModelId } from './togetherai-embedding-options';\nimport { TogetherAICompletionModelId } from './togetherai-completion-options';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV2 {\n /**\nCreates a model for text generation.\n*/\n (modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(modelId: TogetherAICompletionModelId): LanguageModelV2;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId: TogetherAIEmbeddingModelId,\n ): EmbeddingModelV2<string>;\n\n /**\nCreates a model for image generation.\n@deprecated Use `imageModel` instead.\n*/\n image(modelId: TogetherAIImageModelId): ImageModelV2;\n\n /**\nCreates a model for image generation.\n*/\n imageModel(modelId: TogetherAIImageModelId): ImageModelV2;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId: TogetherAIChatModelId) => {\n return new OpenAICompatibleChatLanguageModel(\n modelId,\n getCommonModelConfig('chat'),\n );\n };\n\n const createCompletionModel = (modelId: TogetherAICompletionModelId) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n getCommonModelConfig('completion'),\n );\n\n const createTextEmbeddingModel = (modelId: TogetherAIEmbeddingModelId) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (modelId: TogetherAIImageModelId) =>\n new TogetherAIImageModel(modelId, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (modelId: TogetherAIChatModelId) => createChatModel(modelId);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { ImageModelV2, ImageModelV2CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\nimport { z } from 'zod';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV2 {\n readonly specificationVersion = 'v2';\n readonly maxImagesPerCall = 1;\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV2['doGenerate']>>\n > {\n const warnings: Array<ImageModelV2CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n response_format: 'base64',\n ...(providerOptions.togetherai ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n"],"mappings":";AAMA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,OACK;;;ACdP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,OACK;AAEP,SAAS,SAAS;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAQxD,YACW,SACD,QACR;AAFS;AACD;AATV,SAAS,uBAAuB;AAChC,SAAS,mBAAmB;AAAA,EASzB;AAAA,EAPH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAOA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AA5CJ;AA6CI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,MAAM,cAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,SAAS,eAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,iBAAiB;AAAA,QACjB,IAAI,qBAAgB,eAAhB,YAA8B,CAAC;AAAA,MACrC;AAAA,MACA,uBAAuB,+BAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,2BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,EAAE,OAAO;AAAA,EAC7C,MAAM,EAAE;AAAA,IACN,EAAE,OAAO;AAAA,MACP,UAAU,EAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAM,wBAAwB,EAAE,OAAO;AAAA,EACrC,OAAO,EAAE,OAAO;AAAA,IACd,SAAS,EAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;;;AD/BM,SAAS,iBACd,UAAsC,CAAC,GACnB;AApFtB;AAqFE,QAAM,UAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,UAAU,WAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAmC;AAC1D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,qBAAqB,MAAM;AAAA,IAC7B;AAAA,EACF;AAEA,QAAM,wBAAwB,CAAC,YAC7B,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,2BAA2B,CAAC,YAChC,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CAAC,YACxB,IAAI,qBAAqB,SAAS;AAAA,IAChC,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CAAC,YAAmC,gBAAgB,OAAO;AAE5E,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":[]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/togetherai",
3
- "version": "0.2.14",
3
+ "version": "1.0.0-alpha.1",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -19,19 +19,19 @@
19
19
  }
20
20
  },
21
21
  "dependencies": {
22
- "@ai-sdk/openai-compatible": "0.2.14",
23
- "@ai-sdk/provider": "1.1.3",
24
- "@ai-sdk/provider-utils": "2.2.8"
22
+ "@ai-sdk/openai-compatible": "1.0.0-alpha.1",
23
+ "@ai-sdk/provider": "2.0.0-alpha.1",
24
+ "@ai-sdk/provider-utils": "3.0.0-alpha.1"
25
25
  },
26
26
  "devDependencies": {
27
27
  "@types/node": "20.17.24",
28
28
  "tsup": "^8",
29
- "typescript": "5.6.3",
30
- "zod": "3.23.8",
29
+ "typescript": "5.8.3",
30
+ "zod": "3.24.4",
31
31
  "@vercel/ai-tsconfig": "0.0.0"
32
32
  },
33
33
  "peerDependencies": {
34
- "zod": "^3.0.0"
34
+ "zod": "^3.24.0"
35
35
  },
36
36
  "engines": {
37
37
  "node": ">=18"
@@ -51,13 +51,15 @@
51
51
  "ai"
52
52
  ],
53
53
  "scripts": {
54
- "build": "tsup",
55
- "build:watch": "tsup --watch",
56
- "clean": "rm -rf dist",
54
+ "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
55
+ "build:watch": "pnpm clean && tsup --watch",
56
+ "clean": "rm -rf dist *.tsbuildinfo",
57
57
  "lint": "eslint \"./**/*.ts*\"",
58
- "type-check": "tsc --noEmit",
58
+ "type-check": "tsc --build",
59
59
  "prettier-check": "prettier --check \"./**/*.ts*\"",
60
60
  "test": "pnpm test:node && pnpm test:edge",
61
+ "test:update": "pnpm test:node -u",
62
+ "test:watch": "vitest --config vitest.node.config.js",
61
63
  "test:edge": "vitest --config vitest.edge.config.js --run",
62
64
  "test:node": "vitest --config vitest.node.config.js --run"
63
65
  }