@ai-sdk/togetherai 0.2.14 → 1.0.0-alpha.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,75 +1,347 @@
1
1
  # @ai-sdk/togetherai
2
2
 
3
- ## 0.2.14
3
+ ## 1.0.0-alpha.10
4
4
 
5
5
  ### Patch Changes
6
6
 
7
- - Updated dependencies [d87b9d1]
8
- - @ai-sdk/provider-utils@2.2.8
9
- - @ai-sdk/openai-compatible@0.2.14
7
+ - Updated dependencies [c4df419]
8
+ - @ai-sdk/provider@2.0.0-alpha.10
9
+ - @ai-sdk/openai-compatible@1.0.0-alpha.10
10
+ - @ai-sdk/provider-utils@3.0.0-alpha.10
10
11
 
11
- ## 0.2.13
12
+ ## 1.0.0-alpha.9
12
13
 
13
14
  ### Patch Changes
14
15
 
15
- - Updated dependencies [23571c9]
16
- - @ai-sdk/openai-compatible@0.2.13
16
+ - Updated dependencies [811dff3]
17
+ - @ai-sdk/provider@2.0.0-alpha.9
18
+ - @ai-sdk/openai-compatible@1.0.0-alpha.9
19
+ - @ai-sdk/provider-utils@3.0.0-alpha.9
17
20
 
18
- ## 0.2.12
21
+ ## 1.0.0-alpha.8
19
22
 
20
23
  ### Patch Changes
21
24
 
22
- - Updated dependencies [13492fe]
23
- - @ai-sdk/openai-compatible@0.2.12
25
+ - Updated dependencies [4fef487]
26
+ - Updated dependencies [9222aeb]
27
+ - @ai-sdk/provider-utils@3.0.0-alpha.8
28
+ - @ai-sdk/provider@2.0.0-alpha.8
29
+ - @ai-sdk/openai-compatible@1.0.0-alpha.8
24
30
 
25
- ## 0.2.11
31
+ ## 1.0.0-alpha.7
26
32
 
27
33
  ### Patch Changes
28
34
 
29
- - Updated dependencies [b5c9cd4]
30
- - @ai-sdk/openai-compatible@0.2.11
35
+ - Updated dependencies [5c56081]
36
+ - @ai-sdk/provider@2.0.0-alpha.7
37
+ - @ai-sdk/openai-compatible@1.0.0-alpha.7
38
+ - @ai-sdk/provider-utils@3.0.0-alpha.7
31
39
 
32
- ## 0.2.10
40
+ ## 1.0.0-alpha.6
33
41
 
34
42
  ### Patch Changes
35
43
 
36
- - Updated dependencies [beef951]
37
- - @ai-sdk/provider@1.1.3
38
- - @ai-sdk/openai-compatible@0.2.10
39
- - @ai-sdk/provider-utils@2.2.7
44
+ - Updated dependencies [0d2c085]
45
+ - @ai-sdk/provider@2.0.0-alpha.6
46
+ - @ai-sdk/openai-compatible@1.0.0-alpha.6
47
+ - @ai-sdk/provider-utils@3.0.0-alpha.6
40
48
 
41
- ## 0.2.9
49
+ ## 1.0.0-alpha.4
42
50
 
43
51
  ### Patch Changes
44
52
 
45
- - Updated dependencies [1bbc698]
46
- - @ai-sdk/openai-compatible@0.2.9
53
+ - Updated dependencies [dc714f3]
54
+ - @ai-sdk/provider@2.0.0-alpha.4
55
+ - @ai-sdk/openai-compatible@1.0.0-alpha.4
56
+ - @ai-sdk/provider-utils@3.0.0-alpha.4
47
57
 
48
- ## 0.2.8
58
+ ## 1.0.0-alpha.3
49
59
 
50
60
  ### Patch Changes
51
61
 
52
- - Updated dependencies [013faa8]
53
- - @ai-sdk/provider@1.1.2
54
- - @ai-sdk/openai-compatible@0.2.8
55
- - @ai-sdk/provider-utils@2.2.6
62
+ - Updated dependencies [6b98118]
63
+ - @ai-sdk/provider@2.0.0-alpha.3
64
+ - @ai-sdk/openai-compatible@1.0.0-alpha.3
65
+ - @ai-sdk/provider-utils@3.0.0-alpha.3
56
66
 
57
- ## 0.2.7
67
+ ## 1.0.0-alpha.2
58
68
 
59
69
  ### Patch Changes
60
70
 
61
- - Updated dependencies [c21fa6d]
62
- - @ai-sdk/provider-utils@2.2.5
63
- - @ai-sdk/provider@1.1.1
64
- - @ai-sdk/openai-compatible@0.2.7
71
+ - Updated dependencies [26535e0]
72
+ - @ai-sdk/provider@2.0.0-alpha.2
73
+ - @ai-sdk/openai-compatible@1.0.0-alpha.2
74
+ - @ai-sdk/provider-utils@3.0.0-alpha.2
65
75
 
66
- ## 0.2.6
76
+ ## 1.0.0-alpha.1
67
77
 
68
78
  ### Patch Changes
69
79
 
70
- - Updated dependencies [2c19b9a]
71
- - @ai-sdk/provider-utils@2.2.4
72
- - @ai-sdk/openai-compatible@0.2.6
80
+ - Updated dependencies [3f2f00c]
81
+ - @ai-sdk/provider@2.0.0-alpha.1
82
+ - @ai-sdk/openai-compatible@1.0.0-alpha.1
83
+ - @ai-sdk/provider-utils@3.0.0-alpha.1
84
+
85
+ ## 1.0.0-canary.19
86
+
87
+ ### Patch Changes
88
+
89
+ - Updated dependencies [faf8446]
90
+ - @ai-sdk/provider-utils@3.0.0-canary.19
91
+ - @ai-sdk/openai-compatible@1.0.0-canary.19
92
+
93
+ ## 1.0.0-canary.18
94
+
95
+ ### Patch Changes
96
+
97
+ - Updated dependencies [40acf9b]
98
+ - @ai-sdk/provider-utils@3.0.0-canary.18
99
+ - @ai-sdk/openai-compatible@1.0.0-canary.18
100
+
101
+ ## 1.0.0-canary.17
102
+
103
+ ### Major Changes
104
+
105
+ - 516be5b: ### Move Image Model Settings into generate options
106
+
107
+ Image Models no longer have settings. Instead, `maxImagesPerCall` can be passed directly to `generateImage()`. All other image settings can be passed to `providerOptions[provider]`.
108
+
109
+ Before
110
+
111
+ ```js
112
+ await generateImage({
113
+ model: luma.image('photon-flash-1', {
114
+ maxImagesPerCall: 5,
115
+ pollIntervalMillis: 500,
116
+ }),
117
+ prompt,
118
+ n: 10,
119
+ });
120
+ ```
121
+
122
+ After
123
+
124
+ ```js
125
+ await generateImage({
126
+ model: luma.image('photon-flash-1'),
127
+ prompt,
128
+ n: 10,
129
+ maxImagesPerCall: 5,
130
+ providerOptions: {
131
+ luma: { pollIntervalMillis: 5 },
132
+ },
133
+ });
134
+ ```
135
+
136
+ Pull Request: https://github.com/vercel/ai/pull/6180
137
+
138
+ ### Patch Changes
139
+
140
+ - Updated dependencies [516be5b]
141
+ - Updated dependencies [ea7a7c9]
142
+ - @ai-sdk/openai-compatible@1.0.0-canary.17
143
+ - @ai-sdk/provider-utils@3.0.0-canary.17
144
+
145
+ ## 1.0.0-canary.16
146
+
147
+ ### Patch Changes
148
+
149
+ - Updated dependencies [87b828f]
150
+ - @ai-sdk/provider-utils@3.0.0-canary.16
151
+ - @ai-sdk/openai-compatible@1.0.0-canary.16
152
+
153
+ ## 1.0.0-canary.15
154
+
155
+ ### Patch Changes
156
+
157
+ - Updated dependencies [a571d6e]
158
+ - Updated dependencies [a8c8bd5]
159
+ - Updated dependencies [7979f7f]
160
+ - Updated dependencies [41fa418]
161
+ - @ai-sdk/provider-utils@3.0.0-canary.15
162
+ - @ai-sdk/provider@2.0.0-canary.14
163
+ - @ai-sdk/openai-compatible@1.0.0-canary.15
164
+
165
+ ## 1.0.0-canary.14
166
+
167
+ ### Patch Changes
168
+
169
+ - Updated dependencies [957b739]
170
+ - Updated dependencies [9bd5ab5]
171
+ - @ai-sdk/provider-utils@3.0.0-canary.14
172
+ - @ai-sdk/provider@2.0.0-canary.13
173
+ - @ai-sdk/openai-compatible@1.0.0-canary.14
174
+
175
+ ## 1.0.0-canary.13
176
+
177
+ ### Patch Changes
178
+
179
+ - d9209ca: fix (image-model): `specificationVersion: v1` -> `v2`
180
+ - Updated dependencies [7b3ae3f]
181
+ - Updated dependencies [d9209ca]
182
+ - Updated dependencies [0ff02bb]
183
+ - @ai-sdk/provider@2.0.0-canary.12
184
+ - @ai-sdk/openai-compatible@1.0.0-canary.13
185
+ - @ai-sdk/provider-utils@3.0.0-canary.13
186
+
187
+ ## 1.0.0-canary.12
188
+
189
+ ### Patch Changes
190
+
191
+ - Updated dependencies [9bf7291]
192
+ - Updated dependencies [4617fab]
193
+ - Updated dependencies [e030615]
194
+ - @ai-sdk/provider@2.0.0-canary.11
195
+ - @ai-sdk/openai-compatible@1.0.0-canary.12
196
+ - @ai-sdk/provider-utils@3.0.0-canary.12
197
+
198
+ ## 1.0.0-canary.11
199
+
200
+ ### Patch Changes
201
+
202
+ - 9301f86: refactor (image-model): rename `ImageModelV1` to `ImageModelV2`
203
+ - Updated dependencies [db72adc]
204
+ - Updated dependencies [42e32b0]
205
+ - Updated dependencies [66962ed]
206
+ - Updated dependencies [9301f86]
207
+ - Updated dependencies [a3f768e]
208
+ - @ai-sdk/openai-compatible@1.0.0-canary.11
209
+ - @ai-sdk/provider-utils@3.0.0-canary.11
210
+ - @ai-sdk/provider@2.0.0-canary.10
211
+
212
+ ## 1.0.0-canary.10
213
+
214
+ ### Patch Changes
215
+
216
+ - Updated dependencies [cf8280e]
217
+ - Updated dependencies [e86be6f]
218
+ - @ai-sdk/openai-compatible@1.0.0-canary.10
219
+ - @ai-sdk/provider@2.0.0-canary.9
220
+ - @ai-sdk/provider-utils@3.0.0-canary.10
221
+
222
+ ## 1.0.0-canary.9
223
+
224
+ ### Patch Changes
225
+
226
+ - Updated dependencies [95857aa]
227
+ - Updated dependencies [7ea4132]
228
+ - @ai-sdk/provider@2.0.0-canary.8
229
+ - @ai-sdk/openai-compatible@1.0.0-canary.9
230
+ - @ai-sdk/provider-utils@3.0.0-canary.9
231
+
232
+ ## 1.0.0-canary.8
233
+
234
+ ### Patch Changes
235
+
236
+ - Updated dependencies [5d142ab]
237
+ - Updated dependencies [b6b43c7]
238
+ - Updated dependencies [b9a6121]
239
+ - Updated dependencies [8aa9e20]
240
+ - Updated dependencies [3795467]
241
+ - @ai-sdk/provider-utils@3.0.0-canary.8
242
+ - @ai-sdk/provider@2.0.0-canary.7
243
+ - @ai-sdk/openai-compatible@1.0.0-canary.8
244
+
245
+ ## 1.0.0-canary.7
246
+
247
+ ### Patch Changes
248
+
249
+ - fa49207: feat(providers/openai-compatible): convert to providerOptions
250
+ - 26735b5: chore(embedding-model): add v2 interface
251
+ - Updated dependencies [fa49207]
252
+ - Updated dependencies [26735b5]
253
+ - Updated dependencies [443d8ec]
254
+ - Updated dependencies [14c9410]
255
+ - Updated dependencies [d9c98f4]
256
+ - Updated dependencies [c4a2fec]
257
+ - Updated dependencies [0054544]
258
+ - Updated dependencies [9e9c809]
259
+ - Updated dependencies [32831c6]
260
+ - Updated dependencies [d0f9495]
261
+ - Updated dependencies [fd65bc6]
262
+ - Updated dependencies [393138b]
263
+ - Updated dependencies [7182d14]
264
+ - @ai-sdk/openai-compatible@1.0.0-canary.7
265
+ - @ai-sdk/provider@2.0.0-canary.6
266
+ - @ai-sdk/provider-utils@3.0.0-canary.7
267
+
268
+ ## 1.0.0-canary.6
269
+
270
+ ### Patch Changes
271
+
272
+ - Updated dependencies [6db02c9]
273
+ - Updated dependencies [411e483]
274
+ - Updated dependencies [79457bd]
275
+ - Updated dependencies [ad80501]
276
+ - Updated dependencies [1766ede]
277
+ - Updated dependencies [f10304b]
278
+ - @ai-sdk/openai-compatible@1.0.0-canary.6
279
+ - @ai-sdk/provider@2.0.0-canary.5
280
+ - @ai-sdk/provider-utils@3.0.0-canary.6
281
+
282
+ ## 1.0.0-canary.5
283
+
284
+ ### Patch Changes
285
+
286
+ - Updated dependencies [6f6bb89]
287
+ - @ai-sdk/provider@2.0.0-canary.4
288
+ - @ai-sdk/openai-compatible@1.0.0-canary.5
289
+ - @ai-sdk/provider-utils@3.0.0-canary.5
290
+
291
+ ## 1.0.0-canary.4
292
+
293
+ ### Patch Changes
294
+
295
+ - Updated dependencies [d1a1aa1]
296
+ - @ai-sdk/provider@2.0.0-canary.3
297
+ - @ai-sdk/openai-compatible@1.0.0-canary.4
298
+ - @ai-sdk/provider-utils@3.0.0-canary.4
299
+
300
+ ## 1.0.0-canary.3
301
+
302
+ ### Patch Changes
303
+
304
+ - Updated dependencies [a166433]
305
+ - Updated dependencies [abf9a79]
306
+ - Updated dependencies [9f95b35]
307
+ - Updated dependencies [0a87932]
308
+ - Updated dependencies [6dc848c]
309
+ - @ai-sdk/provider-utils@3.0.0-canary.3
310
+ - @ai-sdk/provider@2.0.0-canary.2
311
+ - @ai-sdk/openai-compatible@1.0.0-canary.3
312
+
313
+ ## 1.0.0-canary.2
314
+
315
+ ### Patch Changes
316
+
317
+ - Updated dependencies [c57e248]
318
+ - Updated dependencies [33f4a6a]
319
+ - @ai-sdk/provider@2.0.0-canary.1
320
+ - @ai-sdk/openai-compatible@1.0.0-canary.2
321
+ - @ai-sdk/provider-utils@3.0.0-canary.2
322
+
323
+ ## 1.0.0-canary.1
324
+
325
+ ### Patch Changes
326
+
327
+ - Updated dependencies [060370c]
328
+ - Updated dependencies [0c0c0b3]
329
+ - Updated dependencies [63d791d]
330
+ - @ai-sdk/provider-utils@3.0.0-canary.1
331
+ - @ai-sdk/openai-compatible@1.0.0-canary.1
332
+
333
+ ## 1.0.0-canary.0
334
+
335
+ ### Major Changes
336
+
337
+ - d5f588f: AI SDK 5
338
+
339
+ ### Patch Changes
340
+
341
+ - Updated dependencies [d5f588f]
342
+ - @ai-sdk/provider-utils@3.0.0-canary.0
343
+ - @ai-sdk/openai-compatible@1.0.0-canary.0
344
+ - @ai-sdk/provider@2.0.0-canary.0
73
345
 
74
346
  ## 0.2.5
75
347
 
package/dist/index.d.mts CHANGED
@@ -1,27 +1,14 @@
1
- import { ProviderV1, LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { OpenAICompatibleChatSettings, OpenAICompatibleEmbeddingSettings, OpenAICompatibleCompletionSettings } from '@ai-sdk/openai-compatible';
4
3
  export { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';
5
4
 
6
5
  type TogetherAIChatModelId = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' | 'meta-llama/Llama-3-8b-chat-hf' | 'meta-llama/Llama-3-70b-chat-hf' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'microsoft/WizardLM-2-8x22B' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'databricks/dbrx-instruct' | 'deepseek-ai/deepseek-llm-67b-chat' | 'deepseek-ai/DeepSeek-V3' | 'google/gemma-2b-it' | 'Gryphe/MythoMax-L2-13b' | 'meta-llama/Llama-2-13b-chat-hf' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' | 'Qwen/Qwen2.5-7B-Instruct-Turbo' | 'Qwen/Qwen2.5-72B-Instruct-Turbo' | 'Qwen/Qwen2-72B-Instruct' | 'upstage/SOLAR-10.7B-Instruct-v1.0' | (string & {});
7
- interface TogetherAIChatSettings extends OpenAICompatibleChatSettings {
8
- }
9
6
 
10
7
  type TogetherAIEmbeddingModelId = 'togethercomputer/m2-bert-80M-2k-retrieval' | 'togethercomputer/m2-bert-80M-32k-retrieval' | 'togethercomputer/m2-bert-80M-8k-retrieval' | 'WhereIsAI/UAE-Large-V1' | 'BAAI/bge-large-en-v1.5' | 'BAAI/bge-base-en-v1.5' | 'sentence-transformers/msmarco-bert-base-dot-v5' | 'bert-base-uncased' | (string & {});
11
- interface TogetherAIEmbeddingSettings extends OpenAICompatibleEmbeddingSettings {
12
- }
13
8
 
14
9
  type TogetherAICompletionModelId = 'meta-llama/Llama-2-70b-hf' | 'mistralai/Mistral-7B-v0.1' | 'mistralai/Mixtral-8x7B-v0.1' | 'Meta-Llama/Llama-Guard-7b' | 'codellama/CodeLlama-34b-Instruct-hf' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | (string & {});
15
- interface TogetherAICompletionSettings extends OpenAICompatibleCompletionSettings {
16
- }
17
10
 
18
11
  type TogetherAIImageModelId = 'stabilityai/stable-diffusion-xl-base-1.0' | 'black-forest-labs/FLUX.1-dev' | 'black-forest-labs/FLUX.1-dev-lora' | 'black-forest-labs/FLUX.1-schnell' | 'black-forest-labs/FLUX.1-canny' | 'black-forest-labs/FLUX.1-depth' | 'black-forest-labs/FLUX.1-redux' | 'black-forest-labs/FLUX.1.1-pro' | 'black-forest-labs/FLUX.1-pro' | 'black-forest-labs/FLUX.1-schnell-Free' | (string & {});
19
- interface TogetherAIImageSettings {
20
- /**
21
- Override the maximum number of images per call (default 1)
22
- */
23
- maxImagesPerCall?: number;
24
- }
25
12
 
26
13
  interface TogetherAIProviderSettings {
27
14
  /**
@@ -42,35 +29,36 @@ interface TogetherAIProviderSettings {
42
29
  */
43
30
  fetch?: FetchFunction;
44
31
  }
45
- interface TogetherAIProvider extends ProviderV1 {
32
+ interface TogetherAIProvider extends ProviderV2 {
46
33
  /**
47
34
  Creates a model for text generation.
48
35
  */
49
- (modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
36
+ (modelId: TogetherAIChatModelId): LanguageModelV2;
50
37
  /**
51
38
  Creates a chat model for text generation.
52
39
  */
53
- chatModel(modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
40
+ chatModel(modelId: TogetherAIChatModelId): LanguageModelV2;
54
41
  /**
55
42
  Creates a chat model for text generation.
56
43
  */
57
- languageModel(modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
44
+ languageModel(modelId: TogetherAIChatModelId): LanguageModelV2;
58
45
  /**
59
46
  Creates a completion model for text generation.
60
47
  */
61
- completionModel(modelId: TogetherAICompletionModelId, settings?: TogetherAICompletionSettings): LanguageModelV1;
48
+ completionModel(modelId: TogetherAICompletionModelId): LanguageModelV2;
62
49
  /**
63
50
  Creates a text embedding model for text generation.
64
51
  */
65
- textEmbeddingModel(modelId: TogetherAIEmbeddingModelId, settings?: TogetherAIEmbeddingSettings): EmbeddingModelV1<string>;
52
+ textEmbeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV2<string>;
66
53
  /**
67
- Creates a model for image generation.
68
- */
69
- image(modelId: TogetherAIImageModelId, settings?: TogetherAIImageSettings): ImageModelV1;
54
+ Creates a model for image generation.
55
+ @deprecated Use `imageModel` instead.
56
+ */
57
+ image(modelId: TogetherAIImageModelId): ImageModelV2;
70
58
  /**
71
- Creates a model for image generation.
72
- */
73
- imageModel(modelId: TogetherAIImageModelId, settings?: TogetherAIImageSettings): ImageModelV1;
59
+ Creates a model for image generation.
60
+ */
61
+ imageModel(modelId: TogetherAIImageModelId): ImageModelV2;
74
62
  }
75
63
  declare function createTogetherAI(options?: TogetherAIProviderSettings): TogetherAIProvider;
76
64
  declare const togetherai: TogetherAIProvider;
package/dist/index.d.ts CHANGED
@@ -1,27 +1,14 @@
1
- import { ProviderV1, LanguageModelV1, EmbeddingModelV1, ImageModelV1 } from '@ai-sdk/provider';
1
+ import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2 } from '@ai-sdk/provider';
2
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
3
- import { OpenAICompatibleChatSettings, OpenAICompatibleEmbeddingSettings, OpenAICompatibleCompletionSettings } from '@ai-sdk/openai-compatible';
4
3
  export { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';
5
4
 
6
5
  type TogetherAIChatModelId = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-70B-Instruct-Turbo' | 'meta-llama/Llama-3.2-3B-Instruct-Turbo' | 'meta-llama/Meta-Llama-3-8B-Instruct-Lite' | 'meta-llama/Meta-Llama-3-70B-Instruct-Lite' | 'meta-llama/Llama-3-8b-chat-hf' | 'meta-llama/Llama-3-70b-chat-hf' | 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | 'Qwen/QwQ-32B-Preview' | 'microsoft/WizardLM-2-8x22B' | 'google/gemma-2-27b-it' | 'google/gemma-2-9b-it' | 'databricks/dbrx-instruct' | 'deepseek-ai/deepseek-llm-67b-chat' | 'deepseek-ai/DeepSeek-V3' | 'google/gemma-2b-it' | 'Gryphe/MythoMax-L2-13b' | 'meta-llama/Llama-2-13b-chat-hf' | 'mistralai/Mistral-7B-Instruct-v0.1' | 'mistralai/Mistral-7B-Instruct-v0.2' | 'mistralai/Mistral-7B-Instruct-v0.3' | 'mistralai/Mixtral-8x7B-Instruct-v0.1' | 'mistralai/Mixtral-8x22B-Instruct-v0.1' | 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' | 'Qwen/Qwen2.5-7B-Instruct-Turbo' | 'Qwen/Qwen2.5-72B-Instruct-Turbo' | 'Qwen/Qwen2-72B-Instruct' | 'upstage/SOLAR-10.7B-Instruct-v1.0' | (string & {});
7
- interface TogetherAIChatSettings extends OpenAICompatibleChatSettings {
8
- }
9
6
 
10
7
  type TogetherAIEmbeddingModelId = 'togethercomputer/m2-bert-80M-2k-retrieval' | 'togethercomputer/m2-bert-80M-32k-retrieval' | 'togethercomputer/m2-bert-80M-8k-retrieval' | 'WhereIsAI/UAE-Large-V1' | 'BAAI/bge-large-en-v1.5' | 'BAAI/bge-base-en-v1.5' | 'sentence-transformers/msmarco-bert-base-dot-v5' | 'bert-base-uncased' | (string & {});
11
- interface TogetherAIEmbeddingSettings extends OpenAICompatibleEmbeddingSettings {
12
- }
13
8
 
14
9
  type TogetherAICompletionModelId = 'meta-llama/Llama-2-70b-hf' | 'mistralai/Mistral-7B-v0.1' | 'mistralai/Mixtral-8x7B-v0.1' | 'Meta-Llama/Llama-Guard-7b' | 'codellama/CodeLlama-34b-Instruct-hf' | 'Qwen/Qwen2.5-Coder-32B-Instruct' | (string & {});
15
- interface TogetherAICompletionSettings extends OpenAICompatibleCompletionSettings {
16
- }
17
10
 
18
11
  type TogetherAIImageModelId = 'stabilityai/stable-diffusion-xl-base-1.0' | 'black-forest-labs/FLUX.1-dev' | 'black-forest-labs/FLUX.1-dev-lora' | 'black-forest-labs/FLUX.1-schnell' | 'black-forest-labs/FLUX.1-canny' | 'black-forest-labs/FLUX.1-depth' | 'black-forest-labs/FLUX.1-redux' | 'black-forest-labs/FLUX.1.1-pro' | 'black-forest-labs/FLUX.1-pro' | 'black-forest-labs/FLUX.1-schnell-Free' | (string & {});
19
- interface TogetherAIImageSettings {
20
- /**
21
- Override the maximum number of images per call (default 1)
22
- */
23
- maxImagesPerCall?: number;
24
- }
25
12
 
26
13
  interface TogetherAIProviderSettings {
27
14
  /**
@@ -42,35 +29,36 @@ interface TogetherAIProviderSettings {
42
29
  */
43
30
  fetch?: FetchFunction;
44
31
  }
45
- interface TogetherAIProvider extends ProviderV1 {
32
+ interface TogetherAIProvider extends ProviderV2 {
46
33
  /**
47
34
  Creates a model for text generation.
48
35
  */
49
- (modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
36
+ (modelId: TogetherAIChatModelId): LanguageModelV2;
50
37
  /**
51
38
  Creates a chat model for text generation.
52
39
  */
53
- chatModel(modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
40
+ chatModel(modelId: TogetherAIChatModelId): LanguageModelV2;
54
41
  /**
55
42
  Creates a chat model for text generation.
56
43
  */
57
- languageModel(modelId: TogetherAIChatModelId, settings?: TogetherAIChatSettings): LanguageModelV1;
44
+ languageModel(modelId: TogetherAIChatModelId): LanguageModelV2;
58
45
  /**
59
46
  Creates a completion model for text generation.
60
47
  */
61
- completionModel(modelId: TogetherAICompletionModelId, settings?: TogetherAICompletionSettings): LanguageModelV1;
48
+ completionModel(modelId: TogetherAICompletionModelId): LanguageModelV2;
62
49
  /**
63
50
  Creates a text embedding model for text generation.
64
51
  */
65
- textEmbeddingModel(modelId: TogetherAIEmbeddingModelId, settings?: TogetherAIEmbeddingSettings): EmbeddingModelV1<string>;
52
+ textEmbeddingModel(modelId: TogetherAIEmbeddingModelId): EmbeddingModelV2<string>;
66
53
  /**
67
- Creates a model for image generation.
68
- */
69
- image(modelId: TogetherAIImageModelId, settings?: TogetherAIImageSettings): ImageModelV1;
54
+ Creates a model for image generation.
55
+ @deprecated Use `imageModel` instead.
56
+ */
57
+ image(modelId: TogetherAIImageModelId): ImageModelV2;
70
58
  /**
71
- Creates a model for image generation.
72
- */
73
- imageModel(modelId: TogetherAIImageModelId, settings?: TogetherAIImageSettings): ImageModelV1;
59
+ Creates a model for image generation.
60
+ */
61
+ imageModel(modelId: TogetherAIImageModelId): ImageModelV2;
74
62
  }
75
63
  declare function createTogetherAI(options?: TogetherAIProviderSettings): TogetherAIProvider;
76
64
  declare const togetherai: TogetherAIProvider;
package/dist/index.js CHANGED
@@ -33,19 +33,15 @@ var import_provider_utils2 = require("@ai-sdk/provider-utils");
33
33
  var import_provider_utils = require("@ai-sdk/provider-utils");
34
34
  var import_zod = require("zod");
35
35
  var TogetherAIImageModel = class {
36
- constructor(modelId, settings, config) {
36
+ constructor(modelId, config) {
37
37
  this.modelId = modelId;
38
- this.settings = settings;
39
38
  this.config = config;
40
- this.specificationVersion = "v1";
39
+ this.specificationVersion = "v2";
40
+ this.maxImagesPerCall = 1;
41
41
  }
42
42
  get provider() {
43
43
  return this.config.provider;
44
44
  }
45
- get maxImagesPerCall() {
46
- var _a;
47
- return (_a = this.settings.maxImagesPerCall) != null ? _a : 1;
48
- }
49
45
  async doGenerate({
50
46
  prompt,
51
47
  n,
@@ -135,27 +131,25 @@ function createTogetherAI(options = {}) {
135
131
  headers: getHeaders,
136
132
  fetch: options.fetch
137
133
  });
138
- const createChatModel = (modelId, settings = {}) => {
139
- return new import_openai_compatible.OpenAICompatibleChatLanguageModel(modelId, settings, {
140
- ...getCommonModelConfig("chat"),
141
- defaultObjectGenerationMode: "tool"
142
- });
134
+ const createChatModel = (modelId) => {
135
+ return new import_openai_compatible.OpenAICompatibleChatLanguageModel(
136
+ modelId,
137
+ getCommonModelConfig("chat")
138
+ );
143
139
  };
144
- const createCompletionModel = (modelId, settings = {}) => new import_openai_compatible.OpenAICompatibleCompletionLanguageModel(
140
+ const createCompletionModel = (modelId) => new import_openai_compatible.OpenAICompatibleCompletionLanguageModel(
145
141
  modelId,
146
- settings,
147
142
  getCommonModelConfig("completion")
148
143
  );
149
- const createTextEmbeddingModel = (modelId, settings = {}) => new import_openai_compatible.OpenAICompatibleEmbeddingModel(
144
+ const createTextEmbeddingModel = (modelId) => new import_openai_compatible.OpenAICompatibleEmbeddingModel(
150
145
  modelId,
151
- settings,
152
146
  getCommonModelConfig("embedding")
153
147
  );
154
- const createImageModel = (modelId, settings = {}) => new TogetherAIImageModel(modelId, settings, {
148
+ const createImageModel = (modelId) => new TogetherAIImageModel(modelId, {
155
149
  ...getCommonModelConfig("image"),
156
150
  baseURL: baseURL != null ? baseURL : "https://api.together.xyz/v1/"
157
151
  });
158
- const provider = (modelId, settings) => createChatModel(modelId, settings);
152
+ const provider = (modelId) => createChatModel(modelId);
159
153
  provider.completionModel = createCompletionModel;
160
154
  provider.languageModel = createChatModel;
161
155
  provider.chatModel = createChatModel;
package/dist/index.js.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/index.ts","../src/togetherai-provider.ts","../src/togetherai-image-model.ts"],"sourcesContent":["export { createTogetherAI, togetherai } from './togetherai-provider';\nexport type {\n TogetherAIProvider,\n TogetherAIProviderSettings,\n} from './togetherai-provider';\nexport type { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';\n","import {\n LanguageModelV1,\n EmbeddingModelV1,\n ProviderV1,\n ImageModelV1,\n} from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport {\n TogetherAIChatModelId,\n TogetherAIChatSettings,\n} from './togetherai-chat-settings';\nimport {\n TogetherAIEmbeddingModelId,\n TogetherAIEmbeddingSettings,\n} from './togetherai-embedding-settings';\nimport {\n TogetherAICompletionModelId,\n TogetherAICompletionSettings,\n} from './togetherai-completion-settings';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport {\n TogetherAIImageModelId,\n TogetherAIImageSettings,\n} from './togetherai-image-settings';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV1 {\n /**\nCreates a model for text generation.\n*/\n (\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(\n modelId: TogetherAICompletionModelId,\n settings?: TogetherAICompletionSettings,\n ): LanguageModelV1;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId: TogetherAIEmbeddingModelId,\n settings?: TogetherAIEmbeddingSettings,\n ): EmbeddingModelV1<string>;\n\n /**\n Creates a model for image generation.\n */\n image(\n modelId: TogetherAIImageModelId,\n settings?: TogetherAIImageSettings,\n ): ImageModelV1;\n\n /**\n Creates a model for image generation.\n */\n imageModel(\n modelId: TogetherAIImageModelId,\n settings?: TogetherAIImageSettings,\n ): ImageModelV1;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (\n modelId: TogetherAIChatModelId,\n settings: TogetherAIChatSettings = {},\n ) => {\n return new OpenAICompatibleChatLanguageModel(modelId, settings, {\n ...getCommonModelConfig('chat'),\n defaultObjectGenerationMode: 'tool',\n });\n };\n\n const createCompletionModel = (\n modelId: TogetherAICompletionModelId,\n settings: TogetherAICompletionSettings = {},\n ) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n settings,\n getCommonModelConfig('completion'),\n );\n\n const createTextEmbeddingModel = (\n modelId: TogetherAIEmbeddingModelId,\n settings: TogetherAIEmbeddingSettings = {},\n ) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n settings,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (\n modelId: TogetherAIImageModelId,\n settings: TogetherAIImageSettings = {},\n ) =>\n new TogetherAIImageModel(modelId, settings, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ) => createChatModel(modelId, settings);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { ImageModelV1, ImageModelV1CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport {\n TogetherAIImageModelId,\n TogetherAIImageSettings,\n} from './togetherai-image-settings';\nimport { z } from 'zod';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV1 {\n readonly specificationVersion = 'v1';\n\n get provider(): string {\n return this.config.provider;\n }\n\n get maxImagesPerCall(): number {\n return this.settings.maxImagesPerCall ?? 1;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n readonly settings: TogetherAIImageSettings,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV1['doGenerate']>>\n > {\n const warnings: Array<ImageModelV1CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n response_format: 'base64',\n ...(providerOptions.togetherai ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACMA,+BAIO;AACP,IAAAA,yBAIO;;;ACdP,4BAMO;AAKP,iBAAkB;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAWxD,YACW,SACA,UACD,QACR;AAHS;AACA;AACD;AAbV,SAAS,uBAAuB;AAAA,EAc7B;AAAA,EAZH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAEA,IAAI,mBAA2B;AA/BjC;AAgCI,YAAO,UAAK,SAAS,qBAAd,YAAkC;AAAA,EAC3C;AAAA,EAQA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AAnDJ;AAoDI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,UAAM,qCAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,aAAS,sCAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,iBAAiB;AAAA,QACjB,IAAI,qBAAgB,eAAhB,YAA8B,CAAC;AAAA,MACrC;AAAA,MACA,2BAAuB,sDAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,+BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,aAAE,OAAO;AAAA,EAC7C,MAAM,aAAE;AAAA,IACN,aAAE,OAAO;AAAA,MACP,UAAU,aAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAM,wBAAwB,aAAE,OAAO;AAAA,EACrC,OAAO,aAAE,OAAO;AAAA,IACd,SAAS,aAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;;;ADRM,SAAS,iBACd,UAAsC,CAAC,GACnB;AAlHtB;AAmHE,QAAM,cAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,cAAU,mCAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CACtB,SACA,WAAmC,CAAC,MACjC;AACH,WAAO,IAAI,2DAAkC,SAAS,UAAU;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,6BAA6B;AAAA,IAC/B,CAAC;AAAA,EACH;AAEA,QAAM,wBAAwB,CAC5B,SACA,WAAyC,CAAC,MAE1C,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,2BAA2B,CAC/B,SACA,WAAwC,CAAC,MAEzC,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CACvB,SACA,WAAoC,CAAC,MAErC,IAAI,qBAAqB,SAAS,UAAU;AAAA,IAC1C,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CACf,SACA,aACG,gBAAgB,SAAS,QAAQ;AAEtC,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":["import_provider_utils"]}
1
+ {"version":3,"sources":["../src/index.ts","../src/togetherai-provider.ts","../src/togetherai-image-model.ts"],"sourcesContent":["export { createTogetherAI, togetherai } from './togetherai-provider';\nexport type {\n TogetherAIProvider,\n TogetherAIProviderSettings,\n} from './togetherai-provider';\nexport type { OpenAICompatibleErrorData as TogetherAIErrorData } from '@ai-sdk/openai-compatible';\n","import {\n LanguageModelV2,\n EmbeddingModelV2,\n ProviderV2,\n ImageModelV2,\n} from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIChatModelId } from './togetherai-chat-options';\nimport { TogetherAIEmbeddingModelId } from './togetherai-embedding-options';\nimport { TogetherAICompletionModelId } from './togetherai-completion-options';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV2 {\n /**\nCreates a model for text generation.\n*/\n (modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(modelId: TogetherAICompletionModelId): LanguageModelV2;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId: TogetherAIEmbeddingModelId,\n ): EmbeddingModelV2<string>;\n\n /**\nCreates a model for image generation.\n@deprecated Use `imageModel` instead.\n*/\n image(modelId: TogetherAIImageModelId): ImageModelV2;\n\n /**\nCreates a model for image generation.\n*/\n imageModel(modelId: TogetherAIImageModelId): ImageModelV2;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId: TogetherAIChatModelId) => {\n return new OpenAICompatibleChatLanguageModel(\n modelId,\n getCommonModelConfig('chat'),\n );\n };\n\n const createCompletionModel = (modelId: TogetherAICompletionModelId) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n getCommonModelConfig('completion'),\n );\n\n const createTextEmbeddingModel = (modelId: TogetherAIEmbeddingModelId) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (modelId: TogetherAIImageModelId) =>\n new TogetherAIImageModel(modelId, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (modelId: TogetherAIChatModelId) => createChatModel(modelId);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { ImageModelV2, ImageModelV2CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\nimport { z } from 'zod';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV2 {\n readonly specificationVersion = 'v2';\n readonly maxImagesPerCall = 1;\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV2['doGenerate']>>\n > {\n const warnings: Array<ImageModelV2CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n response_format: 'base64',\n ...(providerOptions.togetherai ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACMA,+BAIO;AACP,IAAAA,yBAIO;;;ACdP,4BAMO;AAEP,iBAAkB;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAQxD,YACW,SACD,QACR;AAFS;AACD;AATV,SAAS,uBAAuB;AAChC,SAAS,mBAAmB;AAAA,EASzB;AAAA,EAPH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAOA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AA5CJ;AA6CI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,UAAM,qCAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,aAAS,sCAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,iBAAiB;AAAA,QACjB,IAAI,qBAAgB,eAAhB,YAA8B,CAAC;AAAA,MACrC;AAAA,MACA,2BAAuB,sDAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,+BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,aAAE,OAAO;AAAA,EAC7C,MAAM,aAAE;AAAA,IACN,aAAE,OAAO;AAAA,MACP,UAAU,aAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAM,wBAAwB,aAAE,OAAO;AAAA,EACrC,OAAO,aAAE,OAAO;AAAA,IACd,SAAS,aAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;;;AD/BM,SAAS,iBACd,UAAsC,CAAC,GACnB;AApFtB;AAqFE,QAAM,cAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,cAAU,mCAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAmC;AAC1D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,qBAAqB,MAAM;AAAA,IAC7B;AAAA,EACF;AAEA,QAAM,wBAAwB,CAAC,YAC7B,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,2BAA2B,CAAC,YAChC,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CAAC,YACxB,IAAI,qBAAqB,SAAS;AAAA,IAChC,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CAAC,YAAmC,gBAAgB,OAAO;AAE5E,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":["import_provider_utils"]}
package/dist/index.mjs CHANGED
@@ -18,19 +18,15 @@ import {
18
18
  } from "@ai-sdk/provider-utils";
19
19
  import { z } from "zod";
20
20
  var TogetherAIImageModel = class {
21
- constructor(modelId, settings, config) {
21
+ constructor(modelId, config) {
22
22
  this.modelId = modelId;
23
- this.settings = settings;
24
23
  this.config = config;
25
- this.specificationVersion = "v1";
24
+ this.specificationVersion = "v2";
25
+ this.maxImagesPerCall = 1;
26
26
  }
27
27
  get provider() {
28
28
  return this.config.provider;
29
29
  }
30
- get maxImagesPerCall() {
31
- var _a;
32
- return (_a = this.settings.maxImagesPerCall) != null ? _a : 1;
33
- }
34
30
  async doGenerate({
35
31
  prompt,
36
32
  n,
@@ -120,27 +116,25 @@ function createTogetherAI(options = {}) {
120
116
  headers: getHeaders,
121
117
  fetch: options.fetch
122
118
  });
123
- const createChatModel = (modelId, settings = {}) => {
124
- return new OpenAICompatibleChatLanguageModel(modelId, settings, {
125
- ...getCommonModelConfig("chat"),
126
- defaultObjectGenerationMode: "tool"
127
- });
119
+ const createChatModel = (modelId) => {
120
+ return new OpenAICompatibleChatLanguageModel(
121
+ modelId,
122
+ getCommonModelConfig("chat")
123
+ );
128
124
  };
129
- const createCompletionModel = (modelId, settings = {}) => new OpenAICompatibleCompletionLanguageModel(
125
+ const createCompletionModel = (modelId) => new OpenAICompatibleCompletionLanguageModel(
130
126
  modelId,
131
- settings,
132
127
  getCommonModelConfig("completion")
133
128
  );
134
- const createTextEmbeddingModel = (modelId, settings = {}) => new OpenAICompatibleEmbeddingModel(
129
+ const createTextEmbeddingModel = (modelId) => new OpenAICompatibleEmbeddingModel(
135
130
  modelId,
136
- settings,
137
131
  getCommonModelConfig("embedding")
138
132
  );
139
- const createImageModel = (modelId, settings = {}) => new TogetherAIImageModel(modelId, settings, {
133
+ const createImageModel = (modelId) => new TogetherAIImageModel(modelId, {
140
134
  ...getCommonModelConfig("image"),
141
135
  baseURL: baseURL != null ? baseURL : "https://api.together.xyz/v1/"
142
136
  });
143
- const provider = (modelId, settings) => createChatModel(modelId, settings);
137
+ const provider = (modelId) => createChatModel(modelId);
144
138
  provider.completionModel = createCompletionModel;
145
139
  provider.languageModel = createChatModel;
146
140
  provider.chatModel = createChatModel;
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/togetherai-provider.ts","../src/togetherai-image-model.ts"],"sourcesContent":["import {\n LanguageModelV1,\n EmbeddingModelV1,\n ProviderV1,\n ImageModelV1,\n} from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport {\n TogetherAIChatModelId,\n TogetherAIChatSettings,\n} from './togetherai-chat-settings';\nimport {\n TogetherAIEmbeddingModelId,\n TogetherAIEmbeddingSettings,\n} from './togetherai-embedding-settings';\nimport {\n TogetherAICompletionModelId,\n TogetherAICompletionSettings,\n} from './togetherai-completion-settings';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport {\n TogetherAIImageModelId,\n TogetherAIImageSettings,\n} from './togetherai-image-settings';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV1 {\n /**\nCreates a model for text generation.\n*/\n (\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ): LanguageModelV1;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(\n modelId: TogetherAICompletionModelId,\n settings?: TogetherAICompletionSettings,\n ): LanguageModelV1;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId: TogetherAIEmbeddingModelId,\n settings?: TogetherAIEmbeddingSettings,\n ): EmbeddingModelV1<string>;\n\n /**\n Creates a model for image generation.\n */\n image(\n modelId: TogetherAIImageModelId,\n settings?: TogetherAIImageSettings,\n ): ImageModelV1;\n\n /**\n Creates a model for image generation.\n */\n imageModel(\n modelId: TogetherAIImageModelId,\n settings?: TogetherAIImageSettings,\n ): ImageModelV1;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (\n modelId: TogetherAIChatModelId,\n settings: TogetherAIChatSettings = {},\n ) => {\n return new OpenAICompatibleChatLanguageModel(modelId, settings, {\n ...getCommonModelConfig('chat'),\n defaultObjectGenerationMode: 'tool',\n });\n };\n\n const createCompletionModel = (\n modelId: TogetherAICompletionModelId,\n settings: TogetherAICompletionSettings = {},\n ) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n settings,\n getCommonModelConfig('completion'),\n );\n\n const createTextEmbeddingModel = (\n modelId: TogetherAIEmbeddingModelId,\n settings: TogetherAIEmbeddingSettings = {},\n ) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n settings,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (\n modelId: TogetherAIImageModelId,\n settings: TogetherAIImageSettings = {},\n ) =>\n new TogetherAIImageModel(modelId, settings, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (\n modelId: TogetherAIChatModelId,\n settings?: TogetherAIChatSettings,\n ) => createChatModel(modelId, settings);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { ImageModelV1, ImageModelV1CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport {\n TogetherAIImageModelId,\n TogetherAIImageSettings,\n} from './togetherai-image-settings';\nimport { z } from 'zod';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV1 {\n readonly specificationVersion = 'v1';\n\n get provider(): string {\n return this.config.provider;\n }\n\n get maxImagesPerCall(): number {\n return this.settings.maxImagesPerCall ?? 1;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n readonly settings: TogetherAIImageSettings,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV1['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV1['doGenerate']>>\n > {\n const warnings: Array<ImageModelV1CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n response_format: 'base64',\n ...(providerOptions.togetherai ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n"],"mappings":";AAMA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,OACK;;;ACdP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,OACK;AAKP,SAAS,SAAS;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAWxD,YACW,SACA,UACD,QACR;AAHS;AACA;AACD;AAbV,SAAS,uBAAuB;AAAA,EAc7B;AAAA,EAZH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAEA,IAAI,mBAA2B;AA/BjC;AAgCI,YAAO,UAAK,SAAS,qBAAd,YAAkC;AAAA,EAC3C;AAAA,EAQA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AAnDJ;AAoDI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,MAAM,cAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,SAAS,eAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,iBAAiB;AAAA,QACjB,IAAI,qBAAgB,eAAhB,YAA8B,CAAC;AAAA,MACrC;AAAA,MACA,uBAAuB,+BAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,2BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,EAAE,OAAO;AAAA,EAC7C,MAAM,EAAE;AAAA,IACN,EAAE,OAAO;AAAA,MACP,UAAU,EAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAM,wBAAwB,EAAE,OAAO;AAAA,EACrC,OAAO,EAAE,OAAO;AAAA,IACd,SAAS,EAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;;;ADRM,SAAS,iBACd,UAAsC,CAAC,GACnB;AAlHtB;AAmHE,QAAM,UAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,UAAU,WAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CACtB,SACA,WAAmC,CAAC,MACjC;AACH,WAAO,IAAI,kCAAkC,SAAS,UAAU;AAAA,MAC9D,GAAG,qBAAqB,MAAM;AAAA,MAC9B,6BAA6B;AAAA,IAC/B,CAAC;AAAA,EACH;AAEA,QAAM,wBAAwB,CAC5B,SACA,WAAyC,CAAC,MAE1C,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,2BAA2B,CAC/B,SACA,WAAwC,CAAC,MAEzC,IAAI;AAAA,IACF;AAAA,IACA;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CACvB,SACA,WAAoC,CAAC,MAErC,IAAI,qBAAqB,SAAS,UAAU;AAAA,IAC1C,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CACf,SACA,aACG,gBAAgB,SAAS,QAAQ;AAEtC,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":[]}
1
+ {"version":3,"sources":["../src/togetherai-provider.ts","../src/togetherai-image-model.ts"],"sourcesContent":["import {\n LanguageModelV2,\n EmbeddingModelV2,\n ProviderV2,\n ImageModelV2,\n} from '@ai-sdk/provider';\nimport {\n OpenAICompatibleChatLanguageModel,\n OpenAICompatibleCompletionLanguageModel,\n OpenAICompatibleEmbeddingModel,\n} from '@ai-sdk/openai-compatible';\nimport {\n FetchFunction,\n loadApiKey,\n withoutTrailingSlash,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIChatModelId } from './togetherai-chat-options';\nimport { TogetherAIEmbeddingModelId } from './togetherai-embedding-options';\nimport { TogetherAICompletionModelId } from './togetherai-completion-options';\nimport { TogetherAIImageModel } from './togetherai-image-model';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\n\nexport interface TogetherAIProviderSettings {\n /**\nTogetherAI API key.\n*/\n apiKey?: string;\n /**\nBase URL for the API calls.\n*/\n baseURL?: string;\n /**\nCustom headers to include in the requests.\n*/\n headers?: Record<string, string>;\n /**\nCustom fetch implementation. You can use it as a middleware to intercept requests,\nor to provide a custom fetch implementation for e.g. testing.\n*/\n fetch?: FetchFunction;\n}\n\nexport interface TogetherAIProvider extends ProviderV2 {\n /**\nCreates a model for text generation.\n*/\n (modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n chatModel(modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a chat model for text generation.\n*/\n languageModel(modelId: TogetherAIChatModelId): LanguageModelV2;\n\n /**\nCreates a completion model for text generation.\n*/\n completionModel(modelId: TogetherAICompletionModelId): LanguageModelV2;\n\n /**\nCreates a text embedding model for text generation.\n*/\n textEmbeddingModel(\n modelId: TogetherAIEmbeddingModelId,\n ): EmbeddingModelV2<string>;\n\n /**\nCreates a model for image generation.\n@deprecated Use `imageModel` instead.\n*/\n image(modelId: TogetherAIImageModelId): ImageModelV2;\n\n /**\nCreates a model for image generation.\n*/\n imageModel(modelId: TogetherAIImageModelId): ImageModelV2;\n}\n\nexport function createTogetherAI(\n options: TogetherAIProviderSettings = {},\n): TogetherAIProvider {\n const baseURL = withoutTrailingSlash(\n options.baseURL ?? 'https://api.together.xyz/v1/',\n );\n const getHeaders = () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: options.apiKey,\n environmentVariableName: 'TOGETHER_AI_API_KEY',\n description: 'TogetherAI',\n })}`,\n ...options.headers,\n });\n\n interface CommonModelConfig {\n provider: string;\n url: ({ path }: { path: string }) => string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n }\n\n const getCommonModelConfig = (modelType: string): CommonModelConfig => ({\n provider: `togetherai.${modelType}`,\n url: ({ path }) => `${baseURL}${path}`,\n headers: getHeaders,\n fetch: options.fetch,\n });\n\n const createChatModel = (modelId: TogetherAIChatModelId) => {\n return new OpenAICompatibleChatLanguageModel(\n modelId,\n getCommonModelConfig('chat'),\n );\n };\n\n const createCompletionModel = (modelId: TogetherAICompletionModelId) =>\n new OpenAICompatibleCompletionLanguageModel(\n modelId,\n getCommonModelConfig('completion'),\n );\n\n const createTextEmbeddingModel = (modelId: TogetherAIEmbeddingModelId) =>\n new OpenAICompatibleEmbeddingModel(\n modelId,\n getCommonModelConfig('embedding'),\n );\n\n const createImageModel = (modelId: TogetherAIImageModelId) =>\n new TogetherAIImageModel(modelId, {\n ...getCommonModelConfig('image'),\n baseURL: baseURL ?? 'https://api.together.xyz/v1/',\n });\n\n const provider = (modelId: TogetherAIChatModelId) => createChatModel(modelId);\n\n provider.completionModel = createCompletionModel;\n provider.languageModel = createChatModel;\n provider.chatModel = createChatModel;\n provider.textEmbeddingModel = createTextEmbeddingModel;\n provider.image = createImageModel;\n provider.imageModel = createImageModel;\n\n return provider;\n}\n\nexport const togetherai = createTogetherAI();\n","import { ImageModelV2, ImageModelV2CallWarning } from '@ai-sdk/provider';\nimport {\n combineHeaders,\n createJsonResponseHandler,\n createJsonErrorResponseHandler,\n FetchFunction,\n postJsonToApi,\n} from '@ai-sdk/provider-utils';\nimport { TogetherAIImageModelId } from './togetherai-image-settings';\nimport { z } from 'zod';\n\ninterface TogetherAIImageModelConfig {\n provider: string;\n baseURL: string;\n headers: () => Record<string, string>;\n fetch?: FetchFunction;\n _internal?: {\n currentDate?: () => Date;\n };\n}\n\nexport class TogetherAIImageModel implements ImageModelV2 {\n readonly specificationVersion = 'v2';\n readonly maxImagesPerCall = 1;\n\n get provider(): string {\n return this.config.provider;\n }\n\n constructor(\n readonly modelId: TogetherAIImageModelId,\n private config: TogetherAIImageModelConfig,\n ) {}\n\n async doGenerate({\n prompt,\n n,\n size,\n seed,\n providerOptions,\n headers,\n abortSignal,\n }: Parameters<ImageModelV2['doGenerate']>[0]): Promise<\n Awaited<ReturnType<ImageModelV2['doGenerate']>>\n > {\n const warnings: Array<ImageModelV2CallWarning> = [];\n\n if (size != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'aspectRatio',\n details:\n 'This model does not support the `aspectRatio` option. Use `size` instead.',\n });\n }\n\n const currentDate = this.config._internal?.currentDate?.() ?? new Date();\n const splitSize = size?.split('x');\n // https://docs.together.ai/reference/post_images-generations\n const { value: response, responseHeaders } = await postJsonToApi({\n url: `${this.config.baseURL}/images/generations`,\n headers: combineHeaders(this.config.headers(), headers),\n body: {\n model: this.modelId,\n prompt,\n seed,\n n,\n ...(splitSize && {\n width: parseInt(splitSize[0]),\n height: parseInt(splitSize[1]),\n }),\n response_format: 'base64',\n ...(providerOptions.togetherai ?? {}),\n },\n failedResponseHandler: createJsonErrorResponseHandler({\n errorSchema: togetheraiErrorSchema,\n errorToMessage: data => data.error.message,\n }),\n successfulResponseHandler: createJsonResponseHandler(\n togetheraiImageResponseSchema,\n ),\n abortSignal,\n fetch: this.config.fetch,\n });\n\n return {\n images: response.data.map(item => item.b64_json),\n warnings,\n response: {\n timestamp: currentDate,\n modelId: this.modelId,\n headers: responseHeaders,\n },\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiImageResponseSchema = z.object({\n data: z.array(\n z.object({\n b64_json: z.string(),\n }),\n ),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst togetheraiErrorSchema = z.object({\n error: z.object({\n message: z.string(),\n }),\n});\n"],"mappings":";AAMA;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP;AAAA,EAEE;AAAA,EACA;AAAA,OACK;;;ACdP;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,OACK;AAEP,SAAS,SAAS;AAYX,IAAM,uBAAN,MAAmD;AAAA,EAQxD,YACW,SACD,QACR;AAFS;AACD;AATV,SAAS,uBAAuB;AAChC,SAAS,mBAAmB;AAAA,EASzB;AAAA,EAPH,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAOA,MAAM,WAAW;AAAA,IACf;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAEE;AA5CJ;AA6CI,UAAM,WAA2C,CAAC;AAElD,QAAI,QAAQ,MAAM;AAChB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SACE;AAAA,MACJ,CAAC;AAAA,IACH;AAEA,UAAM,eAAc,sBAAK,OAAO,cAAZ,mBAAuB,gBAAvB,4CAA0C,oBAAI,KAAK;AACvE,UAAM,YAAY,6BAAM,MAAM;AAE9B,UAAM,EAAE,OAAO,UAAU,gBAAgB,IAAI,MAAM,cAAc;AAAA,MAC/D,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,SAAS,eAAe,KAAK,OAAO,QAAQ,GAAG,OAAO;AAAA,MACtD,MAAM;AAAA,QACJ,OAAO,KAAK;AAAA,QACZ;AAAA,QACA;AAAA,QACA;AAAA,QACA,GAAI,aAAa;AAAA,UACf,OAAO,SAAS,UAAU,CAAC,CAAC;AAAA,UAC5B,QAAQ,SAAS,UAAU,CAAC,CAAC;AAAA,QAC/B;AAAA,QACA,iBAAiB;AAAA,QACjB,IAAI,qBAAgB,eAAhB,YAA8B,CAAC;AAAA,MACrC;AAAA,MACA,uBAAuB,+BAA+B;AAAA,QACpD,aAAa;AAAA,QACb,gBAAgB,UAAQ,KAAK,MAAM;AAAA,MACrC,CAAC;AAAA,MACD,2BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA;AAAA,MACA,OAAO,KAAK,OAAO;AAAA,IACrB,CAAC;AAED,WAAO;AAAA,MACL,QAAQ,SAAS,KAAK,IAAI,UAAQ,KAAK,QAAQ;AAAA,MAC/C;AAAA,MACA,UAAU;AAAA,QACR,WAAW;AAAA,QACX,SAAS,KAAK;AAAA,QACd,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,gCAAgC,EAAE,OAAO;AAAA,EAC7C,MAAM,EAAE;AAAA,IACN,EAAE,OAAO;AAAA,MACP,UAAU,EAAE,OAAO;AAAA,IACrB,CAAC;AAAA,EACH;AACF,CAAC;AAID,IAAM,wBAAwB,EAAE,OAAO;AAAA,EACrC,OAAO,EAAE,OAAO;AAAA,IACd,SAAS,EAAE,OAAO;AAAA,EACpB,CAAC;AACH,CAAC;;;AD/BM,SAAS,iBACd,UAAsC,CAAC,GACnB;AApFtB;AAqFE,QAAM,UAAU;AAAA,KACd,aAAQ,YAAR,YAAmB;AAAA,EACrB;AACA,QAAM,aAAa,OAAO;AAAA,IACxB,eAAe,UAAU,WAAW;AAAA,MAClC,QAAQ,QAAQ;AAAA,MAChB,yBAAyB;AAAA,MACzB,aAAa;AAAA,IACf,CAAC,CAAC;AAAA,IACF,GAAG,QAAQ;AAAA,EACb;AASA,QAAM,uBAAuB,CAAC,eAA0C;AAAA,IACtE,UAAU,cAAc,SAAS;AAAA,IACjC,KAAK,CAAC,EAAE,KAAK,MAAM,GAAG,OAAO,GAAG,IAAI;AAAA,IACpC,SAAS;AAAA,IACT,OAAO,QAAQ;AAAA,EACjB;AAEA,QAAM,kBAAkB,CAAC,YAAmC;AAC1D,WAAO,IAAI;AAAA,MACT;AAAA,MACA,qBAAqB,MAAM;AAAA,IAC7B;AAAA,EACF;AAEA,QAAM,wBAAwB,CAAC,YAC7B,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,YAAY;AAAA,EACnC;AAEF,QAAM,2BAA2B,CAAC,YAChC,IAAI;AAAA,IACF;AAAA,IACA,qBAAqB,WAAW;AAAA,EAClC;AAEF,QAAM,mBAAmB,CAAC,YACxB,IAAI,qBAAqB,SAAS;AAAA,IAChC,GAAG,qBAAqB,OAAO;AAAA,IAC/B,SAAS,4BAAW;AAAA,EACtB,CAAC;AAEH,QAAM,WAAW,CAAC,YAAmC,gBAAgB,OAAO;AAE5E,WAAS,kBAAkB;AAC3B,WAAS,gBAAgB;AACzB,WAAS,YAAY;AACrB,WAAS,qBAAqB;AAC9B,WAAS,QAAQ;AACjB,WAAS,aAAa;AAEtB,SAAO;AACT;AAEO,IAAM,aAAa,iBAAiB;","names":[]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/togetherai",
3
- "version": "0.2.14",
3
+ "version": "1.0.0-alpha.10",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -19,19 +19,19 @@
19
19
  }
20
20
  },
21
21
  "dependencies": {
22
- "@ai-sdk/openai-compatible": "0.2.14",
23
- "@ai-sdk/provider": "1.1.3",
24
- "@ai-sdk/provider-utils": "2.2.8"
22
+ "@ai-sdk/openai-compatible": "1.0.0-alpha.10",
23
+ "@ai-sdk/provider": "2.0.0-alpha.10",
24
+ "@ai-sdk/provider-utils": "3.0.0-alpha.10"
25
25
  },
26
26
  "devDependencies": {
27
27
  "@types/node": "20.17.24",
28
28
  "tsup": "^8",
29
- "typescript": "5.6.3",
30
- "zod": "3.23.8",
29
+ "typescript": "5.8.3",
30
+ "zod": "3.25.49",
31
31
  "@vercel/ai-tsconfig": "0.0.0"
32
32
  },
33
33
  "peerDependencies": {
34
- "zod": "^3.0.0"
34
+ "zod": "^3.25.49"
35
35
  },
36
36
  "engines": {
37
37
  "node": ">=18"
@@ -51,13 +51,15 @@
51
51
  "ai"
52
52
  ],
53
53
  "scripts": {
54
- "build": "tsup",
55
- "build:watch": "tsup --watch",
56
- "clean": "rm -rf dist",
54
+ "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
55
+ "build:watch": "pnpm clean && tsup --watch",
56
+ "clean": "rm -rf dist *.tsbuildinfo",
57
57
  "lint": "eslint \"./**/*.ts*\"",
58
- "type-check": "tsc --noEmit",
58
+ "type-check": "tsc --build",
59
59
  "prettier-check": "prettier --check \"./**/*.ts*\"",
60
60
  "test": "pnpm test:node && pnpm test:edge",
61
+ "test:update": "pnpm test:node -u",
62
+ "test:watch": "vitest --config vitest.node.config.js",
61
63
  "test:edge": "vitest --config vitest.edge.config.js --run",
62
64
  "test:node": "vitest --config vitest.node.config.js --run"
63
65
  }