@ai-sdk/cohere 2.0.8 → 2.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -0
- package/dist/index.d.mts +13 -2
- package/dist/index.d.ts +13 -2
- package/dist/index.js +208 -120
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +211 -122
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,20 @@
|
|
|
1
1
|
# @ai-sdk/cohere
|
|
2
2
|
|
|
3
|
+
## 2.0.10
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [0294b58]
|
|
8
|
+
- @ai-sdk/provider-utils@3.0.9
|
|
9
|
+
|
|
10
|
+
## 2.0.9
|
|
11
|
+
|
|
12
|
+
### Patch Changes
|
|
13
|
+
|
|
14
|
+
- 0816d3a: feat(provider/cohere): reasoning model support
|
|
15
|
+
|
|
16
|
+
Reasoning is now supported for all Cohere models that support it (`command-a-reasoning-08-2025` as of today). See https://docs.cohere.com/docs/reasoning
|
|
17
|
+
|
|
3
18
|
## 2.0.8
|
|
4
19
|
|
|
5
20
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -1,7 +1,18 @@
|
|
|
1
1
|
import { ProviderV2, LanguageModelV2, EmbeddingModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
+
import { z } from 'zod/v4';
|
|
3
4
|
|
|
4
|
-
type CohereChatModelId = 'command-a-03-2025' | 'command-r7b-12-2024' | 'command-r-plus-04-2024' | 'command-r-plus' | 'command-r-08-2024' | 'command-r-03-2024' | 'command-r' | 'command' | 'command-nightly' | 'command-light' | 'command-light-nightly' | (string & {});
|
|
5
|
+
type CohereChatModelId = 'command-a-03-2025' | 'command-a-reasoning-08-2025' | 'command-r7b-12-2024' | 'command-r-plus-04-2024' | 'command-r-plus' | 'command-r-08-2024' | 'command-r-03-2024' | 'command-r' | 'command' | 'command-nightly' | 'command-light' | 'command-light-nightly' | (string & {});
|
|
6
|
+
declare const cohereChatModelOptions: z.ZodObject<{
|
|
7
|
+
thinking: z.ZodOptional<z.ZodObject<{
|
|
8
|
+
type: z.ZodOptional<z.ZodEnum<{
|
|
9
|
+
enabled: "enabled";
|
|
10
|
+
disabled: "disabled";
|
|
11
|
+
}>>;
|
|
12
|
+
tokenBudget: z.ZodOptional<z.ZodNumber>;
|
|
13
|
+
}, z.core.$strip>>;
|
|
14
|
+
}, z.core.$strip>;
|
|
15
|
+
type CohereChatModelOptions = z.infer<typeof cohereChatModelOptions>;
|
|
5
16
|
|
|
6
17
|
type CohereEmbeddingModelId = 'embed-english-v3.0' | 'embed-multilingual-v3.0' | 'embed-english-light-v3.0' | 'embed-multilingual-light-v3.0' | 'embed-english-v2.0' | 'embed-english-light-v2.0' | 'embed-multilingual-v2.0' | (string & {});
|
|
7
18
|
|
|
@@ -48,4 +59,4 @@ Default Cohere provider instance.
|
|
|
48
59
|
*/
|
|
49
60
|
declare const cohere: CohereProvider;
|
|
50
61
|
|
|
51
|
-
export { type CohereProvider, type CohereProviderSettings, cohere, createCohere };
|
|
62
|
+
export { type CohereChatModelOptions, type CohereProvider, type CohereProviderSettings, cohere, createCohere };
|
package/dist/index.d.ts
CHANGED
|
@@ -1,7 +1,18 @@
|
|
|
1
1
|
import { ProviderV2, LanguageModelV2, EmbeddingModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
|
+
import { z } from 'zod/v4';
|
|
3
4
|
|
|
4
|
-
type CohereChatModelId = 'command-a-03-2025' | 'command-r7b-12-2024' | 'command-r-plus-04-2024' | 'command-r-plus' | 'command-r-08-2024' | 'command-r-03-2024' | 'command-r' | 'command' | 'command-nightly' | 'command-light' | 'command-light-nightly' | (string & {});
|
|
5
|
+
type CohereChatModelId = 'command-a-03-2025' | 'command-a-reasoning-08-2025' | 'command-r7b-12-2024' | 'command-r-plus-04-2024' | 'command-r-plus' | 'command-r-08-2024' | 'command-r-03-2024' | 'command-r' | 'command' | 'command-nightly' | 'command-light' | 'command-light-nightly' | (string & {});
|
|
6
|
+
declare const cohereChatModelOptions: z.ZodObject<{
|
|
7
|
+
thinking: z.ZodOptional<z.ZodObject<{
|
|
8
|
+
type: z.ZodOptional<z.ZodEnum<{
|
|
9
|
+
enabled: "enabled";
|
|
10
|
+
disabled: "disabled";
|
|
11
|
+
}>>;
|
|
12
|
+
tokenBudget: z.ZodOptional<z.ZodNumber>;
|
|
13
|
+
}, z.core.$strip>>;
|
|
14
|
+
}, z.core.$strip>;
|
|
15
|
+
type CohereChatModelOptions = z.infer<typeof cohereChatModelOptions>;
|
|
5
16
|
|
|
6
17
|
type CohereEmbeddingModelId = 'embed-english-v3.0' | 'embed-multilingual-v3.0' | 'embed-english-light-v3.0' | 'embed-multilingual-light-v3.0' | 'embed-english-v2.0' | 'embed-english-light-v2.0' | 'embed-multilingual-v2.0' | (string & {});
|
|
7
18
|
|
|
@@ -48,4 +59,4 @@ Default Cohere provider instance.
|
|
|
48
59
|
*/
|
|
49
60
|
declare const cohere: CohereProvider;
|
|
50
61
|
|
|
51
|
-
export { type CohereProvider, type CohereProviderSettings, cohere, createCohere };
|
|
62
|
+
export { type CohereChatModelOptions, type CohereProvider, type CohereProviderSettings, cohere, createCohere };
|
package/dist/index.js
CHANGED
|
@@ -31,13 +31,30 @@ var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
|
31
31
|
|
|
32
32
|
// src/cohere-chat-language-model.ts
|
|
33
33
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
34
|
-
var
|
|
34
|
+
var import_v43 = require("zod/v4");
|
|
35
|
+
|
|
36
|
+
// src/cohere-chat-options.ts
|
|
37
|
+
var import_v4 = require("zod/v4");
|
|
38
|
+
var cohereChatModelOptions = import_v4.z.object({
|
|
39
|
+
/**
|
|
40
|
+
* Configuration for reasoning features (optional)
|
|
41
|
+
*
|
|
42
|
+
* Can be set to an object with the two properties `type` and `tokenBudget`. `type` can be set to `'enabled'` or `'disabled'` (defaults to `'enabled'`).
|
|
43
|
+
* `tokenBudget` is the maximum number of tokens the model can use for thinking, which must be set to a positive integer. The model will stop thinking if it reaches the thinking token budget and will proceed with the response
|
|
44
|
+
*
|
|
45
|
+
* @see https://docs.cohere.com/reference/chat#request.body.thinking
|
|
46
|
+
*/
|
|
47
|
+
thinking: import_v4.z.object({
|
|
48
|
+
type: import_v4.z.enum(["enabled", "disabled"]).optional(),
|
|
49
|
+
tokenBudget: import_v4.z.number().optional()
|
|
50
|
+
}).optional()
|
|
51
|
+
});
|
|
35
52
|
|
|
36
53
|
// src/cohere-error.ts
|
|
37
54
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
38
|
-
var
|
|
39
|
-
var cohereErrorDataSchema =
|
|
40
|
-
message:
|
|
55
|
+
var import_v42 = require("zod/v4");
|
|
56
|
+
var cohereErrorDataSchema = import_v42.z.object({
|
|
57
|
+
message: import_v42.z.string()
|
|
41
58
|
});
|
|
42
59
|
var cohereFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
43
60
|
errorSchema: cohereErrorDataSchema,
|
|
@@ -244,7 +261,7 @@ var CohereChatLanguageModel = class {
|
|
|
244
261
|
get provider() {
|
|
245
262
|
return this.config.provider;
|
|
246
263
|
}
|
|
247
|
-
getArgs({
|
|
264
|
+
async getArgs({
|
|
248
265
|
prompt,
|
|
249
266
|
maxOutputTokens,
|
|
250
267
|
temperature,
|
|
@@ -256,8 +273,15 @@ var CohereChatLanguageModel = class {
|
|
|
256
273
|
responseFormat,
|
|
257
274
|
seed,
|
|
258
275
|
tools,
|
|
259
|
-
toolChoice
|
|
276
|
+
toolChoice,
|
|
277
|
+
providerOptions
|
|
260
278
|
}) {
|
|
279
|
+
var _a, _b;
|
|
280
|
+
const cohereOptions = (_a = await (0, import_provider_utils2.parseProviderOptions)({
|
|
281
|
+
provider: "cohere",
|
|
282
|
+
providerOptions,
|
|
283
|
+
schema: cohereChatModelOptions
|
|
284
|
+
})) != null ? _a : {};
|
|
261
285
|
const {
|
|
262
286
|
messages: chatPrompt,
|
|
263
287
|
documents: cohereDocuments,
|
|
@@ -289,14 +313,21 @@ var CohereChatLanguageModel = class {
|
|
|
289
313
|
tools: cohereTools,
|
|
290
314
|
tool_choice: cohereToolChoice,
|
|
291
315
|
// documents for RAG:
|
|
292
|
-
...cohereDocuments.length > 0 && { documents: cohereDocuments }
|
|
316
|
+
...cohereDocuments.length > 0 && { documents: cohereDocuments },
|
|
317
|
+
// reasoning
|
|
318
|
+
...cohereOptions.thinking && {
|
|
319
|
+
thinking: {
|
|
320
|
+
type: (_b = cohereOptions.thinking.type) != null ? _b : "enabled",
|
|
321
|
+
token_budget: cohereOptions.thinking.tokenBudget
|
|
322
|
+
}
|
|
323
|
+
}
|
|
293
324
|
},
|
|
294
325
|
warnings: [...toolWarnings, ...promptWarnings]
|
|
295
326
|
};
|
|
296
327
|
}
|
|
297
328
|
async doGenerate(options) {
|
|
298
|
-
var _a, _b, _c, _d, _e, _f
|
|
299
|
-
const { args, warnings } = this.getArgs(options);
|
|
329
|
+
var _a, _b, _c, _d, _e, _f;
|
|
330
|
+
const { args, warnings } = await this.getArgs(options);
|
|
300
331
|
const {
|
|
301
332
|
responseHeaders,
|
|
302
333
|
value: response,
|
|
@@ -313,16 +344,23 @@ var CohereChatLanguageModel = class {
|
|
|
313
344
|
fetch: this.config.fetch
|
|
314
345
|
});
|
|
315
346
|
const content = [];
|
|
316
|
-
|
|
317
|
-
|
|
347
|
+
for (const item of (_a = response.message.content) != null ? _a : []) {
|
|
348
|
+
if (item.type === "text" && item.text.length > 0) {
|
|
349
|
+
content.push({ type: "text", text: item.text });
|
|
350
|
+
continue;
|
|
351
|
+
}
|
|
352
|
+
if (item.type === "thinking" && item.thinking.length > 0) {
|
|
353
|
+
content.push({ type: "reasoning", text: item.thinking });
|
|
354
|
+
continue;
|
|
355
|
+
}
|
|
318
356
|
}
|
|
319
|
-
for (const citation of (
|
|
357
|
+
for (const citation of (_b = response.message.citations) != null ? _b : []) {
|
|
320
358
|
content.push({
|
|
321
359
|
type: "source",
|
|
322
360
|
sourceType: "document",
|
|
323
361
|
id: this.config.generateId(),
|
|
324
362
|
mediaType: "text/plain",
|
|
325
|
-
title: ((
|
|
363
|
+
title: ((_d = (_c = citation.sources[0]) == null ? void 0 : _c.document) == null ? void 0 : _d.title) || "Document",
|
|
326
364
|
providerMetadata: {
|
|
327
365
|
cohere: {
|
|
328
366
|
start: citation.start,
|
|
@@ -334,7 +372,7 @@ var CohereChatLanguageModel = class {
|
|
|
334
372
|
}
|
|
335
373
|
});
|
|
336
374
|
}
|
|
337
|
-
for (const toolCall of (
|
|
375
|
+
for (const toolCall of (_e = response.message.tool_calls) != null ? _e : []) {
|
|
338
376
|
content.push({
|
|
339
377
|
type: "tool-call",
|
|
340
378
|
toolCallId: toolCall.id,
|
|
@@ -355,7 +393,7 @@ var CohereChatLanguageModel = class {
|
|
|
355
393
|
request: { body: args },
|
|
356
394
|
response: {
|
|
357
395
|
// TODO timestamp, model id
|
|
358
|
-
id: (
|
|
396
|
+
id: (_f = response.generation_id) != null ? _f : void 0,
|
|
359
397
|
headers: responseHeaders,
|
|
360
398
|
body: rawResponse
|
|
361
399
|
},
|
|
@@ -363,7 +401,7 @@ var CohereChatLanguageModel = class {
|
|
|
363
401
|
};
|
|
364
402
|
}
|
|
365
403
|
async doStream(options) {
|
|
366
|
-
const { args, warnings } = this.getArgs(options);
|
|
404
|
+
const { args, warnings } = await this.getArgs(options);
|
|
367
405
|
const { responseHeaders, value: response } = await (0, import_provider_utils2.postJsonToApi)({
|
|
368
406
|
url: `${this.config.baseURL}/chat`,
|
|
369
407
|
headers: (0, import_provider_utils2.combineHeaders)(this.config.headers(), options.headers),
|
|
@@ -382,6 +420,7 @@ var CohereChatLanguageModel = class {
|
|
|
382
420
|
totalTokens: void 0
|
|
383
421
|
};
|
|
384
422
|
let pendingToolCall = null;
|
|
423
|
+
let isActiveReasoning = false;
|
|
385
424
|
return {
|
|
386
425
|
stream: response.pipeThrough(
|
|
387
426
|
new TransformStream({
|
|
@@ -402,6 +441,14 @@ var CohereChatLanguageModel = class {
|
|
|
402
441
|
const type = value.type;
|
|
403
442
|
switch (type) {
|
|
404
443
|
case "content-start": {
|
|
444
|
+
if (value.delta.message.content.type === "thinking") {
|
|
445
|
+
controller.enqueue({
|
|
446
|
+
type: "reasoning-start",
|
|
447
|
+
id: String(value.index)
|
|
448
|
+
});
|
|
449
|
+
isActiveReasoning = true;
|
|
450
|
+
return;
|
|
451
|
+
}
|
|
405
452
|
controller.enqueue({
|
|
406
453
|
type: "text-start",
|
|
407
454
|
id: String(value.index)
|
|
@@ -409,6 +456,14 @@ var CohereChatLanguageModel = class {
|
|
|
409
456
|
return;
|
|
410
457
|
}
|
|
411
458
|
case "content-delta": {
|
|
459
|
+
if ("thinking" in value.delta.message.content) {
|
|
460
|
+
controller.enqueue({
|
|
461
|
+
type: "reasoning-delta",
|
|
462
|
+
id: String(value.index),
|
|
463
|
+
delta: value.delta.message.content.thinking
|
|
464
|
+
});
|
|
465
|
+
return;
|
|
466
|
+
}
|
|
412
467
|
controller.enqueue({
|
|
413
468
|
type: "text-delta",
|
|
414
469
|
id: String(value.index),
|
|
@@ -417,6 +472,14 @@ var CohereChatLanguageModel = class {
|
|
|
417
472
|
return;
|
|
418
473
|
}
|
|
419
474
|
case "content-end": {
|
|
475
|
+
if (isActiveReasoning) {
|
|
476
|
+
controller.enqueue({
|
|
477
|
+
type: "reasoning-end",
|
|
478
|
+
id: String(value.index)
|
|
479
|
+
});
|
|
480
|
+
isActiveReasoning = false;
|
|
481
|
+
return;
|
|
482
|
+
}
|
|
420
483
|
controller.enqueue({
|
|
421
484
|
type: "text-end",
|
|
422
485
|
id: String(value.index)
|
|
@@ -512,120 +575,145 @@ var CohereChatLanguageModel = class {
|
|
|
512
575
|
};
|
|
513
576
|
}
|
|
514
577
|
};
|
|
515
|
-
var cohereChatResponseSchema =
|
|
516
|
-
generation_id:
|
|
517
|
-
message:
|
|
518
|
-
role:
|
|
519
|
-
content:
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
578
|
+
var cohereChatResponseSchema = import_v43.z.object({
|
|
579
|
+
generation_id: import_v43.z.string().nullish(),
|
|
580
|
+
message: import_v43.z.object({
|
|
581
|
+
role: import_v43.z.string(),
|
|
582
|
+
content: import_v43.z.array(
|
|
583
|
+
import_v43.z.union([
|
|
584
|
+
import_v43.z.object({
|
|
585
|
+
type: import_v43.z.literal("text"),
|
|
586
|
+
text: import_v43.z.string()
|
|
587
|
+
}),
|
|
588
|
+
import_v43.z.object({
|
|
589
|
+
type: import_v43.z.literal("thinking"),
|
|
590
|
+
thinking: import_v43.z.string()
|
|
591
|
+
})
|
|
592
|
+
])
|
|
524
593
|
).nullish(),
|
|
525
|
-
tool_plan:
|
|
526
|
-
tool_calls:
|
|
527
|
-
|
|
528
|
-
id:
|
|
529
|
-
type:
|
|
530
|
-
function:
|
|
531
|
-
name:
|
|
532
|
-
arguments:
|
|
594
|
+
tool_plan: import_v43.z.string().nullish(),
|
|
595
|
+
tool_calls: import_v43.z.array(
|
|
596
|
+
import_v43.z.object({
|
|
597
|
+
id: import_v43.z.string(),
|
|
598
|
+
type: import_v43.z.literal("function"),
|
|
599
|
+
function: import_v43.z.object({
|
|
600
|
+
name: import_v43.z.string(),
|
|
601
|
+
arguments: import_v43.z.string()
|
|
533
602
|
})
|
|
534
603
|
})
|
|
535
604
|
).nullish(),
|
|
536
|
-
citations:
|
|
537
|
-
|
|
538
|
-
start:
|
|
539
|
-
end:
|
|
540
|
-
text:
|
|
541
|
-
sources:
|
|
542
|
-
|
|
543
|
-
type:
|
|
544
|
-
id:
|
|
545
|
-
document:
|
|
546
|
-
id:
|
|
547
|
-
text:
|
|
548
|
-
title:
|
|
605
|
+
citations: import_v43.z.array(
|
|
606
|
+
import_v43.z.object({
|
|
607
|
+
start: import_v43.z.number(),
|
|
608
|
+
end: import_v43.z.number(),
|
|
609
|
+
text: import_v43.z.string(),
|
|
610
|
+
sources: import_v43.z.array(
|
|
611
|
+
import_v43.z.object({
|
|
612
|
+
type: import_v43.z.string().optional(),
|
|
613
|
+
id: import_v43.z.string().optional(),
|
|
614
|
+
document: import_v43.z.object({
|
|
615
|
+
id: import_v43.z.string().optional(),
|
|
616
|
+
text: import_v43.z.string(),
|
|
617
|
+
title: import_v43.z.string()
|
|
549
618
|
})
|
|
550
619
|
})
|
|
551
620
|
),
|
|
552
|
-
type:
|
|
621
|
+
type: import_v43.z.string().optional()
|
|
553
622
|
})
|
|
554
623
|
).nullish()
|
|
555
624
|
}),
|
|
556
|
-
finish_reason:
|
|
557
|
-
usage:
|
|
558
|
-
billed_units:
|
|
559
|
-
input_tokens:
|
|
560
|
-
output_tokens:
|
|
625
|
+
finish_reason: import_v43.z.string(),
|
|
626
|
+
usage: import_v43.z.object({
|
|
627
|
+
billed_units: import_v43.z.object({
|
|
628
|
+
input_tokens: import_v43.z.number(),
|
|
629
|
+
output_tokens: import_v43.z.number()
|
|
561
630
|
}),
|
|
562
|
-
tokens:
|
|
563
|
-
input_tokens:
|
|
564
|
-
output_tokens:
|
|
631
|
+
tokens: import_v43.z.object({
|
|
632
|
+
input_tokens: import_v43.z.number(),
|
|
633
|
+
output_tokens: import_v43.z.number()
|
|
565
634
|
})
|
|
566
635
|
})
|
|
567
636
|
});
|
|
568
|
-
var cohereChatChunkSchema =
|
|
569
|
-
|
|
570
|
-
type:
|
|
637
|
+
var cohereChatChunkSchema = import_v43.z.discriminatedUnion("type", [
|
|
638
|
+
import_v43.z.object({
|
|
639
|
+
type: import_v43.z.literal("citation-start")
|
|
571
640
|
}),
|
|
572
|
-
|
|
573
|
-
type:
|
|
641
|
+
import_v43.z.object({
|
|
642
|
+
type: import_v43.z.literal("citation-end")
|
|
574
643
|
}),
|
|
575
|
-
|
|
576
|
-
type:
|
|
577
|
-
index:
|
|
644
|
+
import_v43.z.object({
|
|
645
|
+
type: import_v43.z.literal("content-start"),
|
|
646
|
+
index: import_v43.z.number(),
|
|
647
|
+
delta: import_v43.z.object({
|
|
648
|
+
message: import_v43.z.object({
|
|
649
|
+
content: import_v43.z.union([
|
|
650
|
+
import_v43.z.object({
|
|
651
|
+
type: import_v43.z.literal("text"),
|
|
652
|
+
text: import_v43.z.string()
|
|
653
|
+
}),
|
|
654
|
+
import_v43.z.object({
|
|
655
|
+
type: import_v43.z.literal("thinking"),
|
|
656
|
+
thinking: import_v43.z.string()
|
|
657
|
+
})
|
|
658
|
+
])
|
|
659
|
+
})
|
|
660
|
+
})
|
|
578
661
|
}),
|
|
579
|
-
|
|
580
|
-
type:
|
|
581
|
-
index:
|
|
582
|
-
delta:
|
|
583
|
-
message:
|
|
584
|
-
content:
|
|
585
|
-
|
|
586
|
-
|
|
662
|
+
import_v43.z.object({
|
|
663
|
+
type: import_v43.z.literal("content-delta"),
|
|
664
|
+
index: import_v43.z.number(),
|
|
665
|
+
delta: import_v43.z.object({
|
|
666
|
+
message: import_v43.z.object({
|
|
667
|
+
content: import_v43.z.union([
|
|
668
|
+
import_v43.z.object({
|
|
669
|
+
text: import_v43.z.string()
|
|
670
|
+
}),
|
|
671
|
+
import_v43.z.object({
|
|
672
|
+
thinking: import_v43.z.string()
|
|
673
|
+
})
|
|
674
|
+
])
|
|
587
675
|
})
|
|
588
676
|
})
|
|
589
677
|
}),
|
|
590
|
-
|
|
591
|
-
type:
|
|
592
|
-
index:
|
|
678
|
+
import_v43.z.object({
|
|
679
|
+
type: import_v43.z.literal("content-end"),
|
|
680
|
+
index: import_v43.z.number()
|
|
593
681
|
}),
|
|
594
|
-
|
|
595
|
-
type:
|
|
596
|
-
id:
|
|
682
|
+
import_v43.z.object({
|
|
683
|
+
type: import_v43.z.literal("message-start"),
|
|
684
|
+
id: import_v43.z.string().nullish()
|
|
597
685
|
}),
|
|
598
|
-
|
|
599
|
-
type:
|
|
600
|
-
delta:
|
|
601
|
-
finish_reason:
|
|
602
|
-
usage:
|
|
603
|
-
tokens:
|
|
604
|
-
input_tokens:
|
|
605
|
-
output_tokens:
|
|
686
|
+
import_v43.z.object({
|
|
687
|
+
type: import_v43.z.literal("message-end"),
|
|
688
|
+
delta: import_v43.z.object({
|
|
689
|
+
finish_reason: import_v43.z.string(),
|
|
690
|
+
usage: import_v43.z.object({
|
|
691
|
+
tokens: import_v43.z.object({
|
|
692
|
+
input_tokens: import_v43.z.number(),
|
|
693
|
+
output_tokens: import_v43.z.number()
|
|
606
694
|
})
|
|
607
695
|
})
|
|
608
696
|
})
|
|
609
697
|
}),
|
|
610
698
|
// https://docs.cohere.com/v2/docs/streaming#tool-use-stream-events-for-tool-calling
|
|
611
|
-
|
|
612
|
-
type:
|
|
613
|
-
delta:
|
|
614
|
-
message:
|
|
615
|
-
tool_plan:
|
|
699
|
+
import_v43.z.object({
|
|
700
|
+
type: import_v43.z.literal("tool-plan-delta"),
|
|
701
|
+
delta: import_v43.z.object({
|
|
702
|
+
message: import_v43.z.object({
|
|
703
|
+
tool_plan: import_v43.z.string()
|
|
616
704
|
})
|
|
617
705
|
})
|
|
618
706
|
}),
|
|
619
|
-
|
|
620
|
-
type:
|
|
621
|
-
delta:
|
|
622
|
-
message:
|
|
623
|
-
tool_calls:
|
|
624
|
-
id:
|
|
625
|
-
type:
|
|
626
|
-
function:
|
|
627
|
-
name:
|
|
628
|
-
arguments:
|
|
707
|
+
import_v43.z.object({
|
|
708
|
+
type: import_v43.z.literal("tool-call-start"),
|
|
709
|
+
delta: import_v43.z.object({
|
|
710
|
+
message: import_v43.z.object({
|
|
711
|
+
tool_calls: import_v43.z.object({
|
|
712
|
+
id: import_v43.z.string(),
|
|
713
|
+
type: import_v43.z.literal("function"),
|
|
714
|
+
function: import_v43.z.object({
|
|
715
|
+
name: import_v43.z.string(),
|
|
716
|
+
arguments: import_v43.z.string()
|
|
629
717
|
})
|
|
630
718
|
})
|
|
631
719
|
})
|
|
@@ -634,31 +722,31 @@ var cohereChatChunkSchema = import_v42.z.discriminatedUnion("type", [
|
|
|
634
722
|
// A single tool call's `arguments` stream in chunks and must be accumulated
|
|
635
723
|
// in a string and so the full tool object info can only be parsed once we see
|
|
636
724
|
// `tool-call-end`.
|
|
637
|
-
|
|
638
|
-
type:
|
|
639
|
-
delta:
|
|
640
|
-
message:
|
|
641
|
-
tool_calls:
|
|
642
|
-
function:
|
|
643
|
-
arguments:
|
|
725
|
+
import_v43.z.object({
|
|
726
|
+
type: import_v43.z.literal("tool-call-delta"),
|
|
727
|
+
delta: import_v43.z.object({
|
|
728
|
+
message: import_v43.z.object({
|
|
729
|
+
tool_calls: import_v43.z.object({
|
|
730
|
+
function: import_v43.z.object({
|
|
731
|
+
arguments: import_v43.z.string()
|
|
644
732
|
})
|
|
645
733
|
})
|
|
646
734
|
})
|
|
647
735
|
})
|
|
648
736
|
}),
|
|
649
|
-
|
|
650
|
-
type:
|
|
737
|
+
import_v43.z.object({
|
|
738
|
+
type: import_v43.z.literal("tool-call-end")
|
|
651
739
|
})
|
|
652
740
|
]);
|
|
653
741
|
|
|
654
742
|
// src/cohere-embedding-model.ts
|
|
655
743
|
var import_provider3 = require("@ai-sdk/provider");
|
|
656
744
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
657
|
-
var
|
|
745
|
+
var import_v45 = require("zod/v4");
|
|
658
746
|
|
|
659
747
|
// src/cohere-embedding-options.ts
|
|
660
|
-
var
|
|
661
|
-
var cohereEmbeddingOptions =
|
|
748
|
+
var import_v44 = require("zod/v4");
|
|
749
|
+
var cohereEmbeddingOptions = import_v44.z.object({
|
|
662
750
|
/**
|
|
663
751
|
* Specifies the type of input passed to the model. Default is `search_query`.
|
|
664
752
|
*
|
|
@@ -667,7 +755,7 @@ var cohereEmbeddingOptions = import_v43.z.object({
|
|
|
667
755
|
* - "classification": Used for embeddings passed through a text classifier.
|
|
668
756
|
* - "clustering": Used for embeddings run through a clustering algorithm.
|
|
669
757
|
*/
|
|
670
|
-
inputType:
|
|
758
|
+
inputType: import_v44.z.enum(["search_document", "search_query", "classification", "clustering"]).optional(),
|
|
671
759
|
/**
|
|
672
760
|
* Specifies how the API will handle inputs longer than the maximum token length.
|
|
673
761
|
* Default is `END`.
|
|
@@ -676,7 +764,7 @@ var cohereEmbeddingOptions = import_v43.z.object({
|
|
|
676
764
|
* - "START": Will discard the start of the input until the remaining input is exactly the maximum input token length for the model.
|
|
677
765
|
* - "END": Will discard the end of the input until the remaining input is exactly the maximum input token length for the model.
|
|
678
766
|
*/
|
|
679
|
-
truncate:
|
|
767
|
+
truncate: import_v44.z.enum(["NONE", "START", "END"]).optional()
|
|
680
768
|
});
|
|
681
769
|
|
|
682
770
|
// src/cohere-embedding-model.ts
|
|
@@ -742,13 +830,13 @@ var CohereEmbeddingModel = class {
|
|
|
742
830
|
};
|
|
743
831
|
}
|
|
744
832
|
};
|
|
745
|
-
var cohereTextEmbeddingResponseSchema =
|
|
746
|
-
embeddings:
|
|
747
|
-
float:
|
|
833
|
+
var cohereTextEmbeddingResponseSchema = import_v45.z.object({
|
|
834
|
+
embeddings: import_v45.z.object({
|
|
835
|
+
float: import_v45.z.array(import_v45.z.array(import_v45.z.number()))
|
|
748
836
|
}),
|
|
749
|
-
meta:
|
|
750
|
-
billed_units:
|
|
751
|
-
input_tokens:
|
|
837
|
+
meta: import_v45.z.object({
|
|
838
|
+
billed_units: import_v45.z.object({
|
|
839
|
+
input_tokens: import_v45.z.number()
|
|
752
840
|
})
|
|
753
841
|
})
|
|
754
842
|
});
|