ai 3.2.33 → 3.2.35
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +529 -432
- package/dist/index.d.ts +529 -432
- package/dist/index.js +448 -440
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +439 -426
- package/dist/index.mjs.map +1 -1
- package/package.json +14 -10
- package/rsc/dist/index.d.ts +13 -0
- package/rsc/dist/rsc-server.d.mts +13 -0
- package/rsc/dist/rsc-server.mjs +35 -4
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/dist/index.mjs
CHANGED
@@ -13,6 +13,157 @@ import {
|
|
13
13
|
} from "@ai-sdk/ui-utils";
|
14
14
|
import { generateId as generateIdImpl } from "@ai-sdk/provider-utils";
|
15
15
|
|
16
|
+
// core/telemetry/get-base-telemetry-attributes.ts
|
17
|
+
function getBaseTelemetryAttributes({
|
18
|
+
operationName,
|
19
|
+
model,
|
20
|
+
settings,
|
21
|
+
telemetry,
|
22
|
+
headers
|
23
|
+
}) {
|
24
|
+
var _a;
|
25
|
+
return {
|
26
|
+
"ai.model.provider": model.provider,
|
27
|
+
"ai.model.id": model.modelId,
|
28
|
+
// settings:
|
29
|
+
...Object.entries(settings).reduce((attributes, [key, value]) => {
|
30
|
+
attributes[`ai.settings.${key}`] = value;
|
31
|
+
return attributes;
|
32
|
+
}, {}),
|
33
|
+
// special telemetry information
|
34
|
+
"operation.name": operationName,
|
35
|
+
"resource.name": telemetry == null ? void 0 : telemetry.functionId,
|
36
|
+
"ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
|
37
|
+
// add metadata as attributes:
|
38
|
+
...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
|
39
|
+
(attributes, [key, value]) => {
|
40
|
+
attributes[`ai.telemetry.metadata.${key}`] = value;
|
41
|
+
return attributes;
|
42
|
+
},
|
43
|
+
{}
|
44
|
+
),
|
45
|
+
// request headers
|
46
|
+
...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
|
47
|
+
if (value !== void 0) {
|
48
|
+
attributes[`ai.request.headers.${key}`] = value;
|
49
|
+
}
|
50
|
+
return attributes;
|
51
|
+
}, {})
|
52
|
+
};
|
53
|
+
}
|
54
|
+
|
55
|
+
// core/telemetry/get-tracer.ts
|
56
|
+
import { trace } from "@opentelemetry/api";
|
57
|
+
|
58
|
+
// core/telemetry/noop-tracer.ts
|
59
|
+
var noopTracer = {
|
60
|
+
startSpan() {
|
61
|
+
return noopSpan;
|
62
|
+
},
|
63
|
+
startActiveSpan(name, arg1, arg2, arg3) {
|
64
|
+
if (typeof arg1 === "function") {
|
65
|
+
return arg1(noopSpan);
|
66
|
+
}
|
67
|
+
if (typeof arg2 === "function") {
|
68
|
+
return arg2(noopSpan);
|
69
|
+
}
|
70
|
+
if (typeof arg3 === "function") {
|
71
|
+
return arg3(noopSpan);
|
72
|
+
}
|
73
|
+
}
|
74
|
+
};
|
75
|
+
var noopSpan = {
|
76
|
+
spanContext() {
|
77
|
+
return noopSpanContext;
|
78
|
+
},
|
79
|
+
setAttribute() {
|
80
|
+
return this;
|
81
|
+
},
|
82
|
+
setAttributes() {
|
83
|
+
return this;
|
84
|
+
},
|
85
|
+
addEvent() {
|
86
|
+
return this;
|
87
|
+
},
|
88
|
+
addLink() {
|
89
|
+
return this;
|
90
|
+
},
|
91
|
+
addLinks() {
|
92
|
+
return this;
|
93
|
+
},
|
94
|
+
setStatus() {
|
95
|
+
return this;
|
96
|
+
},
|
97
|
+
updateName() {
|
98
|
+
return this;
|
99
|
+
},
|
100
|
+
end() {
|
101
|
+
return this;
|
102
|
+
},
|
103
|
+
isRecording() {
|
104
|
+
return false;
|
105
|
+
},
|
106
|
+
recordException() {
|
107
|
+
return this;
|
108
|
+
}
|
109
|
+
};
|
110
|
+
var noopSpanContext = {
|
111
|
+
traceId: "",
|
112
|
+
spanId: "",
|
113
|
+
traceFlags: 0
|
114
|
+
};
|
115
|
+
|
116
|
+
// core/telemetry/get-tracer.ts
|
117
|
+
var testTracer = void 0;
|
118
|
+
function getTracer({ isEnabled }) {
|
119
|
+
if (!isEnabled) {
|
120
|
+
return noopTracer;
|
121
|
+
}
|
122
|
+
if (testTracer) {
|
123
|
+
return testTracer;
|
124
|
+
}
|
125
|
+
return trace.getTracer("ai");
|
126
|
+
}
|
127
|
+
|
128
|
+
// core/telemetry/record-span.ts
|
129
|
+
import { SpanStatusCode } from "@opentelemetry/api";
|
130
|
+
function recordSpan({
|
131
|
+
name,
|
132
|
+
tracer,
|
133
|
+
attributes,
|
134
|
+
fn,
|
135
|
+
endWhenDone = true
|
136
|
+
}) {
|
137
|
+
return tracer.startActiveSpan(name, { attributes }, async (span) => {
|
138
|
+
try {
|
139
|
+
const result = await fn(span);
|
140
|
+
if (endWhenDone) {
|
141
|
+
span.end();
|
142
|
+
}
|
143
|
+
return result;
|
144
|
+
} catch (error) {
|
145
|
+
try {
|
146
|
+
if (error instanceof Error) {
|
147
|
+
span.recordException({
|
148
|
+
name: error.name,
|
149
|
+
message: error.message,
|
150
|
+
stack: error.stack
|
151
|
+
});
|
152
|
+
span.setStatus({
|
153
|
+
code: SpanStatusCode.ERROR,
|
154
|
+
message: error.message
|
155
|
+
});
|
156
|
+
} else {
|
157
|
+
span.setStatus({ code: SpanStatusCode.ERROR });
|
158
|
+
}
|
159
|
+
} finally {
|
160
|
+
span.end();
|
161
|
+
}
|
162
|
+
throw error;
|
163
|
+
}
|
164
|
+
});
|
165
|
+
}
|
166
|
+
|
16
167
|
// core/util/retry-with-exponential-backoff.ts
|
17
168
|
import { APICallError, RetryError } from "@ai-sdk/provider";
|
18
169
|
import { getErrorMessage, isAbortError } from "@ai-sdk/provider-utils";
|
@@ -81,21 +232,72 @@ async function embed({
|
|
81
232
|
value,
|
82
233
|
maxRetries,
|
83
234
|
abortSignal,
|
84
|
-
headers
|
235
|
+
headers,
|
236
|
+
experimental_telemetry: telemetry
|
85
237
|
}) {
|
86
238
|
var _a;
|
87
|
-
const
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
239
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
240
|
+
operationName: "ai.embed",
|
241
|
+
model,
|
242
|
+
telemetry,
|
243
|
+
headers,
|
244
|
+
settings: { maxRetries }
|
245
|
+
});
|
246
|
+
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
247
|
+
return recordSpan({
|
248
|
+
name: "ai.embed",
|
249
|
+
attributes: {
|
250
|
+
...baseTelemetryAttributes,
|
251
|
+
// specific settings that only make sense on the outer level:
|
252
|
+
"ai.value": JSON.stringify(value)
|
253
|
+
},
|
254
|
+
tracer,
|
255
|
+
fn: async (span) => {
|
256
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
257
|
+
const { embedding, usage, rawResponse } = await retry(
|
258
|
+
() => (
|
259
|
+
// nested spans to align with the embedMany telemetry data:
|
260
|
+
recordSpan({
|
261
|
+
name: "ai.embed.doEmbed",
|
262
|
+
attributes: {
|
263
|
+
...baseTelemetryAttributes,
|
264
|
+
// specific settings that only make sense on the outer level:
|
265
|
+
"ai.values": [JSON.stringify(value)]
|
266
|
+
},
|
267
|
+
tracer,
|
268
|
+
fn: async (doEmbedSpan) => {
|
269
|
+
var _a2;
|
270
|
+
const modelResponse = await model.doEmbed({
|
271
|
+
values: [value],
|
272
|
+
abortSignal,
|
273
|
+
headers
|
274
|
+
});
|
275
|
+
const embedding2 = modelResponse.embeddings[0];
|
276
|
+
const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
|
277
|
+
doEmbedSpan.setAttributes({
|
278
|
+
"ai.embeddings": modelResponse.embeddings.map(
|
279
|
+
(embedding3) => JSON.stringify(embedding3)
|
280
|
+
),
|
281
|
+
"ai.usage.tokens": usage2.tokens
|
282
|
+
});
|
283
|
+
return {
|
284
|
+
embedding: embedding2,
|
285
|
+
usage: usage2,
|
286
|
+
rawResponse: modelResponse.rawResponse
|
287
|
+
};
|
288
|
+
}
|
289
|
+
})
|
290
|
+
)
|
291
|
+
);
|
292
|
+
span.setAttributes({
|
293
|
+
"ai.embedding": JSON.stringify(embedding),
|
294
|
+
"ai.usage.tokens": usage.tokens
|
295
|
+
});
|
296
|
+
return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
|
297
|
+
}
|
96
298
|
});
|
97
299
|
}
|
98
|
-
var
|
300
|
+
var DefaultEmbedResult = class {
|
99
301
|
constructor(options) {
|
100
302
|
this.value = options.value;
|
101
303
|
this.embedding = options.embedding;
|
@@ -131,7 +333,7 @@ async function embedMany({
|
|
131
333
|
const modelResponse = await retry(
|
132
334
|
() => model.doEmbed({ values, abortSignal, headers })
|
133
335
|
);
|
134
|
-
return new
|
336
|
+
return new DefaultEmbedManyResult({
|
135
337
|
values,
|
136
338
|
embeddings: modelResponse.embeddings,
|
137
339
|
usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN }
|
@@ -147,9 +349,9 @@ async function embedMany({
|
|
147
349
|
embeddings.push(...modelResponse.embeddings);
|
148
350
|
tokens += (_c = (_b = modelResponse.usage) == null ? void 0 : _b.tokens) != null ? _c : NaN;
|
149
351
|
}
|
150
|
-
return new
|
352
|
+
return new DefaultEmbedManyResult({ values, embeddings, usage: { tokens } });
|
151
353
|
}
|
152
|
-
var
|
354
|
+
var DefaultEmbedManyResult = class {
|
153
355
|
constructor(options) {
|
154
356
|
this.values = options.values;
|
155
357
|
this.embeddings = options.embeddings;
|
@@ -427,6 +629,7 @@ function prepareCallSettings({
|
|
427
629
|
topP,
|
428
630
|
presencePenalty,
|
429
631
|
frequencyPenalty,
|
632
|
+
stopSequences,
|
430
633
|
seed,
|
431
634
|
maxRetries
|
432
635
|
}) {
|
@@ -513,6 +716,7 @@ function prepareCallSettings({
|
|
513
716
|
topP,
|
514
717
|
presencePenalty,
|
515
718
|
frequencyPenalty,
|
719
|
+
stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
|
516
720
|
seed,
|
517
721
|
maxRetries: maxRetries != null ? maxRetries : 2
|
518
722
|
};
|
@@ -527,12 +731,6 @@ function calculateCompletionTokenUsage(usage) {
|
|
527
731
|
};
|
528
732
|
}
|
529
733
|
|
530
|
-
// core/util/convert-zod-to-json-schema.ts
|
531
|
-
import zodToJsonSchema from "zod-to-json-schema";
|
532
|
-
function convertZodToJSONSchema(zodSchema) {
|
533
|
-
return zodToJsonSchema(zodSchema);
|
534
|
-
}
|
535
|
-
|
536
734
|
// core/util/prepare-response-headers.ts
|
537
735
|
function prepareResponseHeaders(init, { contentType }) {
|
538
736
|
var _a;
|
@@ -543,6 +741,41 @@ function prepareResponseHeaders(init, { contentType }) {
|
|
543
741
|
return headers;
|
544
742
|
}
|
545
743
|
|
744
|
+
// core/util/schema.ts
|
745
|
+
import { validatorSymbol } from "@ai-sdk/provider-utils";
|
746
|
+
import zodToJsonSchema from "zod-to-json-schema";
|
747
|
+
var schemaSymbol = Symbol("vercel.ai.schema");
|
748
|
+
function jsonSchema(jsonSchema2, {
|
749
|
+
validate
|
750
|
+
} = {}) {
|
751
|
+
return {
|
752
|
+
[schemaSymbol]: true,
|
753
|
+
_type: void 0,
|
754
|
+
// should never be used directly
|
755
|
+
[validatorSymbol]: true,
|
756
|
+
jsonSchema: jsonSchema2,
|
757
|
+
validate
|
758
|
+
};
|
759
|
+
}
|
760
|
+
function isSchema(value) {
|
761
|
+
return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
|
762
|
+
}
|
763
|
+
function asSchema(schema) {
|
764
|
+
return isSchema(schema) ? schema : zodSchema(schema);
|
765
|
+
}
|
766
|
+
function zodSchema(zodSchema2) {
|
767
|
+
return jsonSchema(
|
768
|
+
// we assume that zodToJsonSchema will return a valid JSONSchema7:
|
769
|
+
zodToJsonSchema(zodSchema2),
|
770
|
+
{
|
771
|
+
validate: (value) => {
|
772
|
+
const result = zodSchema2.safeParse(value);
|
773
|
+
return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
|
774
|
+
}
|
775
|
+
}
|
776
|
+
);
|
777
|
+
}
|
778
|
+
|
546
779
|
// core/generate-object/inject-json-schema-into-system.ts
|
547
780
|
var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
|
548
781
|
var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
|
@@ -565,7 +798,7 @@ function injectJsonSchemaIntoSystem({
|
|
565
798
|
// core/generate-object/generate-object.ts
|
566
799
|
async function generateObject({
|
567
800
|
model,
|
568
|
-
schema,
|
801
|
+
schema: inputSchema,
|
569
802
|
mode,
|
570
803
|
system,
|
571
804
|
prompt,
|
@@ -573,133 +806,183 @@ async function generateObject({
|
|
573
806
|
maxRetries,
|
574
807
|
abortSignal,
|
575
808
|
headers,
|
809
|
+
experimental_telemetry: telemetry,
|
576
810
|
...settings
|
577
811
|
}) {
|
578
|
-
var _a
|
579
|
-
const
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
abortSignal,
|
604
|
-
headers
|
605
|
-
});
|
606
|
-
});
|
607
|
-
if (generateResult.text === void 0) {
|
608
|
-
throw new NoObjectGeneratedError();
|
812
|
+
var _a;
|
813
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
814
|
+
operationName: "ai.generateObject",
|
815
|
+
model,
|
816
|
+
telemetry,
|
817
|
+
headers,
|
818
|
+
settings: { ...settings, maxRetries }
|
819
|
+
});
|
820
|
+
const schema = asSchema(inputSchema);
|
821
|
+
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
822
|
+
return recordSpan({
|
823
|
+
name: "ai.generateObject",
|
824
|
+
attributes: {
|
825
|
+
...baseTelemetryAttributes,
|
826
|
+
// specific settings that only make sense on the outer level:
|
827
|
+
"ai.prompt": JSON.stringify({ system, prompt, messages }),
|
828
|
+
"ai.settings.jsonSchema": JSON.stringify(schema.jsonSchema),
|
829
|
+
"ai.settings.mode": mode
|
830
|
+
},
|
831
|
+
tracer,
|
832
|
+
fn: async (span) => {
|
833
|
+
var _a2, _b;
|
834
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
835
|
+
if (mode === "auto" || mode == null) {
|
836
|
+
mode = model.defaultObjectGenerationMode;
|
609
837
|
}
|
610
|
-
result
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
838
|
+
let result;
|
839
|
+
let finishReason;
|
840
|
+
let usage;
|
841
|
+
let warnings;
|
842
|
+
let rawResponse;
|
843
|
+
let logprobs;
|
844
|
+
switch (mode) {
|
845
|
+
case "json": {
|
846
|
+
const validatedPrompt = getValidatedPrompt({
|
847
|
+
system: injectJsonSchemaIntoSystem({
|
848
|
+
system,
|
849
|
+
schema: schema.jsonSchema
|
850
|
+
}),
|
851
|
+
prompt,
|
852
|
+
messages
|
853
|
+
});
|
854
|
+
const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
|
855
|
+
const inputFormat = validatedPrompt.type;
|
856
|
+
const generateResult = await retry(
|
857
|
+
() => recordSpan({
|
858
|
+
name: "ai.generateObject.doGenerate",
|
859
|
+
attributes: {
|
860
|
+
...baseTelemetryAttributes,
|
861
|
+
"ai.prompt.format": inputFormat,
|
862
|
+
"ai.prompt.messages": JSON.stringify(promptMessages),
|
863
|
+
"ai.settings.mode": mode
|
864
|
+
},
|
865
|
+
tracer,
|
866
|
+
fn: async (span2) => {
|
867
|
+
const result2 = await model.doGenerate({
|
868
|
+
mode: { type: "object-json" },
|
869
|
+
...prepareCallSettings(settings),
|
870
|
+
inputFormat,
|
871
|
+
prompt: promptMessages,
|
872
|
+
abortSignal,
|
873
|
+
headers
|
874
|
+
});
|
875
|
+
span2.setAttributes({
|
876
|
+
"ai.finishReason": result2.finishReason,
|
877
|
+
"ai.usage.promptTokens": result2.usage.promptTokens,
|
878
|
+
"ai.usage.completionTokens": result2.usage.completionTokens,
|
879
|
+
"ai.result.text": result2.text
|
880
|
+
});
|
881
|
+
return result2;
|
882
|
+
}
|
883
|
+
})
|
884
|
+
);
|
885
|
+
if (generateResult.text === void 0) {
|
886
|
+
throw new NoObjectGeneratedError();
|
887
|
+
}
|
888
|
+
result = generateResult.text;
|
889
|
+
finishReason = generateResult.finishReason;
|
890
|
+
usage = generateResult.usage;
|
891
|
+
warnings = generateResult.warnings;
|
892
|
+
rawResponse = generateResult.rawResponse;
|
893
|
+
logprobs = generateResult.logprobs;
|
894
|
+
break;
|
895
|
+
}
|
896
|
+
case "tool": {
|
897
|
+
const validatedPrompt = getValidatedPrompt({
|
898
|
+
system,
|
899
|
+
prompt,
|
900
|
+
messages
|
901
|
+
});
|
902
|
+
const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
|
903
|
+
const inputFormat = validatedPrompt.type;
|
904
|
+
const generateResult = await retry(
|
905
|
+
() => recordSpan({
|
906
|
+
name: "ai.generateObject.doGenerate",
|
907
|
+
attributes: {
|
908
|
+
...baseTelemetryAttributes,
|
909
|
+
"ai.prompt.format": inputFormat,
|
910
|
+
"ai.prompt.messages": JSON.stringify(promptMessages),
|
911
|
+
"ai.settings.mode": mode
|
912
|
+
},
|
913
|
+
tracer,
|
914
|
+
fn: async (span2) => {
|
915
|
+
const result2 = await model.doGenerate({
|
916
|
+
mode: {
|
917
|
+
type: "object-tool",
|
918
|
+
tool: {
|
919
|
+
type: "function",
|
920
|
+
name: "json",
|
921
|
+
description: "Respond with a JSON object.",
|
922
|
+
parameters: schema.jsonSchema
|
923
|
+
}
|
924
|
+
},
|
925
|
+
...prepareCallSettings(settings),
|
926
|
+
inputFormat,
|
927
|
+
prompt: promptMessages,
|
928
|
+
abortSignal,
|
929
|
+
headers
|
930
|
+
});
|
931
|
+
span2.setAttributes({
|
932
|
+
"ai.finishReason": result2.finishReason,
|
933
|
+
"ai.usage.promptTokens": result2.usage.promptTokens,
|
934
|
+
"ai.usage.completionTokens": result2.usage.completionTokens,
|
935
|
+
"ai.result.text": result2.text,
|
936
|
+
"ai.result.toolCalls": JSON.stringify(result2.toolCalls)
|
937
|
+
});
|
938
|
+
return result2;
|
939
|
+
}
|
940
|
+
})
|
941
|
+
);
|
942
|
+
const functionArgs = (_b = (_a2 = generateResult.toolCalls) == null ? void 0 : _a2[0]) == null ? void 0 : _b.args;
|
943
|
+
if (functionArgs === void 0) {
|
944
|
+
throw new NoObjectGeneratedError();
|
945
|
+
}
|
946
|
+
result = functionArgs;
|
947
|
+
finishReason = generateResult.finishReason;
|
948
|
+
usage = generateResult.usage;
|
949
|
+
warnings = generateResult.warnings;
|
950
|
+
rawResponse = generateResult.rawResponse;
|
951
|
+
logprobs = generateResult.logprobs;
|
952
|
+
break;
|
953
|
+
}
|
954
|
+
case void 0: {
|
955
|
+
throw new Error(
|
956
|
+
"Model does not have a default object generation mode."
|
957
|
+
);
|
958
|
+
}
|
959
|
+
default: {
|
960
|
+
const _exhaustiveCheck = mode;
|
961
|
+
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
962
|
+
}
|
636
963
|
}
|
637
|
-
|
638
|
-
|
639
|
-
|
640
|
-
warnings = generateResult.warnings;
|
641
|
-
rawResponse = generateResult.rawResponse;
|
642
|
-
logprobs = generateResult.logprobs;
|
643
|
-
break;
|
644
|
-
}
|
645
|
-
case "tool": {
|
646
|
-
const validatedPrompt = getValidatedPrompt({
|
647
|
-
system,
|
648
|
-
prompt,
|
649
|
-
messages
|
650
|
-
});
|
651
|
-
const generateResult = await retry(
|
652
|
-
() => model.doGenerate({
|
653
|
-
mode: {
|
654
|
-
type: "object-tool",
|
655
|
-
tool: {
|
656
|
-
type: "function",
|
657
|
-
name: "json",
|
658
|
-
description: "Respond with a JSON object.",
|
659
|
-
parameters: jsonSchema
|
660
|
-
}
|
661
|
-
},
|
662
|
-
...prepareCallSettings(settings),
|
663
|
-
inputFormat: validatedPrompt.type,
|
664
|
-
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
665
|
-
abortSignal,
|
666
|
-
headers
|
667
|
-
})
|
668
|
-
);
|
669
|
-
const functionArgs = (_b = (_a = generateResult.toolCalls) == null ? void 0 : _a[0]) == null ? void 0 : _b.args;
|
670
|
-
if (functionArgs === void 0) {
|
671
|
-
throw new NoObjectGeneratedError();
|
964
|
+
const parseResult = safeParseJSON({ text: result, schema });
|
965
|
+
if (!parseResult.success) {
|
966
|
+
throw parseResult.error;
|
672
967
|
}
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
968
|
+
span.setAttributes({
|
969
|
+
"ai.finishReason": finishReason,
|
970
|
+
"ai.usage.promptTokens": usage.promptTokens,
|
971
|
+
"ai.usage.completionTokens": usage.completionTokens,
|
972
|
+
"ai.result.object": JSON.stringify(parseResult.value)
|
973
|
+
});
|
974
|
+
return new DefaultGenerateObjectResult({
|
975
|
+
object: parseResult.value,
|
976
|
+
finishReason,
|
977
|
+
usage: calculateCompletionTokenUsage(usage),
|
978
|
+
warnings,
|
979
|
+
rawResponse,
|
980
|
+
logprobs
|
981
|
+
});
|
687
982
|
}
|
688
|
-
}
|
689
|
-
const parseResult = safeParseJSON({ text: result, schema });
|
690
|
-
if (!parseResult.success) {
|
691
|
-
throw parseResult.error;
|
692
|
-
}
|
693
|
-
return new GenerateObjectResult({
|
694
|
-
object: parseResult.value,
|
695
|
-
finishReason,
|
696
|
-
usage: calculateCompletionTokenUsage(usage),
|
697
|
-
warnings,
|
698
|
-
rawResponse,
|
699
|
-
logprobs
|
700
983
|
});
|
701
984
|
}
|
702
|
-
var
|
985
|
+
var DefaultGenerateObjectResult = class {
|
703
986
|
constructor(options) {
|
704
987
|
this.object = options.object;
|
705
988
|
this.finishReason = options.finishReason;
|
@@ -708,10 +991,6 @@ var GenerateObjectResult = class {
|
|
708
991
|
this.rawResponse = options.rawResponse;
|
709
992
|
this.logprobs = options.logprobs;
|
710
993
|
}
|
711
|
-
/**
|
712
|
-
Converts the object to a JSON response.
|
713
|
-
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
714
|
-
*/
|
715
994
|
toJsonResponse(init) {
|
716
995
|
var _a;
|
717
996
|
return new Response(JSON.stringify(this.object), {
|
@@ -789,7 +1068,7 @@ var DelayedPromise = class {
|
|
789
1068
|
// core/generate-object/stream-object.ts
|
790
1069
|
async function streamObject({
|
791
1070
|
model,
|
792
|
-
schema,
|
1071
|
+
schema: inputSchema,
|
793
1072
|
mode,
|
794
1073
|
system,
|
795
1074
|
prompt,
|
@@ -801,7 +1080,7 @@ async function streamObject({
|
|
801
1080
|
...settings
|
802
1081
|
}) {
|
803
1082
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
804
|
-
const
|
1083
|
+
const schema = asSchema(inputSchema);
|
805
1084
|
if (mode === "auto" || mode == null) {
|
806
1085
|
mode = model.defaultObjectGenerationMode;
|
807
1086
|
}
|
@@ -810,7 +1089,10 @@ async function streamObject({
|
|
810
1089
|
switch (mode) {
|
811
1090
|
case "json": {
|
812
1091
|
const validatedPrompt = getValidatedPrompt({
|
813
|
-
system: injectJsonSchemaIntoSystem({
|
1092
|
+
system: injectJsonSchemaIntoSystem({
|
1093
|
+
system,
|
1094
|
+
schema: schema.jsonSchema
|
1095
|
+
}),
|
814
1096
|
prompt,
|
815
1097
|
messages
|
816
1098
|
});
|
@@ -837,35 +1119,6 @@ async function streamObject({
|
|
837
1119
|
};
|
838
1120
|
break;
|
839
1121
|
}
|
840
|
-
case "grammar": {
|
841
|
-
const validatedPrompt = getValidatedPrompt({
|
842
|
-
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
843
|
-
prompt,
|
844
|
-
messages
|
845
|
-
});
|
846
|
-
callOptions = {
|
847
|
-
mode: { type: "object-grammar", schema: jsonSchema },
|
848
|
-
...prepareCallSettings(settings),
|
849
|
-
inputFormat: validatedPrompt.type,
|
850
|
-
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
851
|
-
abortSignal,
|
852
|
-
headers
|
853
|
-
};
|
854
|
-
transformer = {
|
855
|
-
transform: (chunk, controller) => {
|
856
|
-
switch (chunk.type) {
|
857
|
-
case "text-delta":
|
858
|
-
controller.enqueue(chunk.textDelta);
|
859
|
-
break;
|
860
|
-
case "finish":
|
861
|
-
case "error":
|
862
|
-
controller.enqueue(chunk);
|
863
|
-
break;
|
864
|
-
}
|
865
|
-
}
|
866
|
-
};
|
867
|
-
break;
|
868
|
-
}
|
869
1122
|
case "tool": {
|
870
1123
|
const validatedPrompt = getValidatedPrompt({
|
871
1124
|
system,
|
@@ -879,7 +1132,7 @@ async function streamObject({
|
|
879
1132
|
type: "function",
|
880
1133
|
name: "json",
|
881
1134
|
description: "Respond with a JSON object.",
|
882
|
-
parameters: jsonSchema
|
1135
|
+
parameters: schema.jsonSchema
|
883
1136
|
}
|
884
1137
|
},
|
885
1138
|
...prepareCallSettings(settings),
|
@@ -912,7 +1165,7 @@ async function streamObject({
|
|
912
1165
|
}
|
913
1166
|
}
|
914
1167
|
const result = await retry(() => model.doStream(callOptions));
|
915
|
-
return new
|
1168
|
+
return new DefaultStreamObjectResult({
|
916
1169
|
stream: result.stream.pipeThrough(new TransformStream(transformer)),
|
917
1170
|
warnings: result.warnings,
|
918
1171
|
rawResponse: result.rawResponse,
|
@@ -920,7 +1173,7 @@ async function streamObject({
|
|
920
1173
|
onFinish
|
921
1174
|
});
|
922
1175
|
}
|
923
|
-
var
|
1176
|
+
var DefaultStreamObjectResult = class {
|
924
1177
|
constructor({
|
925
1178
|
stream,
|
926
1179
|
warnings,
|
@@ -1016,18 +1269,9 @@ var StreamObjectResult = class {
|
|
1016
1269
|
})
|
1017
1270
|
);
|
1018
1271
|
}
|
1019
|
-
/**
|
1020
|
-
The generated object (typed according to the schema). Resolved when the response is finished.
|
1021
|
-
*/
|
1022
1272
|
get object() {
|
1023
1273
|
return this.objectPromise.value;
|
1024
1274
|
}
|
1025
|
-
/**
|
1026
|
-
Stream of partial objects. It gets more complete as the stream progresses.
|
1027
|
-
|
1028
|
-
Note that the partial object is not validated.
|
1029
|
-
If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
|
1030
|
-
*/
|
1031
1275
|
get partialObjectStream() {
|
1032
1276
|
return createAsyncIterableStream(this.originalStream, {
|
1033
1277
|
transform(chunk, controller) {
|
@@ -1049,10 +1293,6 @@ var StreamObjectResult = class {
|
|
1049
1293
|
}
|
1050
1294
|
});
|
1051
1295
|
}
|
1052
|
-
/**
|
1053
|
-
Text stream of the JSON representation of the generated object. It contains text chunks.
|
1054
|
-
When the stream is finished, the object is valid JSON that can be parsed.
|
1055
|
-
*/
|
1056
1296
|
get textStream() {
|
1057
1297
|
return createAsyncIterableStream(this.originalStream, {
|
1058
1298
|
transform(chunk, controller) {
|
@@ -1074,9 +1314,6 @@ var StreamObjectResult = class {
|
|
1074
1314
|
}
|
1075
1315
|
});
|
1076
1316
|
}
|
1077
|
-
/**
|
1078
|
-
Stream of different types of events, including partial objects, errors, and finish events.
|
1079
|
-
*/
|
1080
1317
|
get fullStream() {
|
1081
1318
|
return createAsyncIterableStream(this.originalStream, {
|
1082
1319
|
transform(chunk, controller) {
|
@@ -1084,14 +1321,6 @@ var StreamObjectResult = class {
|
|
1084
1321
|
}
|
1085
1322
|
});
|
1086
1323
|
}
|
1087
|
-
/**
|
1088
|
-
Writes text delta output to a Node.js response-like object.
|
1089
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1090
|
-
writes each text delta as a separate chunk.
|
1091
|
-
|
1092
|
-
@param response A Node.js response-like object (ServerResponse).
|
1093
|
-
@param init Optional headers and status code.
|
1094
|
-
*/
|
1095
1324
|
pipeTextStreamToResponse(response, init) {
|
1096
1325
|
var _a;
|
1097
1326
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -1115,14 +1344,6 @@ var StreamObjectResult = class {
|
|
1115
1344
|
};
|
1116
1345
|
read();
|
1117
1346
|
}
|
1118
|
-
/**
|
1119
|
-
Creates a simple text stream response.
|
1120
|
-
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
1121
|
-
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
1122
|
-
Non-text-delta events are ignored.
|
1123
|
-
|
1124
|
-
@param init Optional headers and status code.
|
1125
|
-
*/
|
1126
1347
|
toTextStreamResponse(init) {
|
1127
1348
|
var _a;
|
1128
1349
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
@@ -1156,163 +1377,12 @@ function prepareToolsAndToolChoice({
|
|
1156
1377
|
type: "function",
|
1157
1378
|
name,
|
1158
1379
|
description: tool2.description,
|
1159
|
-
parameters:
|
1380
|
+
parameters: asSchema(tool2.parameters).jsonSchema
|
1160
1381
|
})),
|
1161
1382
|
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
1162
1383
|
};
|
1163
1384
|
}
|
1164
1385
|
|
1165
|
-
// core/telemetry/get-base-telemetry-attributes.ts
|
1166
|
-
function getBaseTelemetryAttributes({
|
1167
|
-
operationName,
|
1168
|
-
model,
|
1169
|
-
settings,
|
1170
|
-
telemetry,
|
1171
|
-
headers
|
1172
|
-
}) {
|
1173
|
-
var _a;
|
1174
|
-
return {
|
1175
|
-
"ai.model.provider": model.provider,
|
1176
|
-
"ai.model.id": model.modelId,
|
1177
|
-
// settings:
|
1178
|
-
...Object.entries(settings).reduce((attributes, [key, value]) => {
|
1179
|
-
attributes[`ai.settings.${key}`] = value;
|
1180
|
-
return attributes;
|
1181
|
-
}, {}),
|
1182
|
-
// special telemetry information
|
1183
|
-
"operation.name": operationName,
|
1184
|
-
"resource.name": telemetry == null ? void 0 : telemetry.functionId,
|
1185
|
-
"ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
|
1186
|
-
// add metadata as attributes:
|
1187
|
-
...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
|
1188
|
-
(attributes, [key, value]) => {
|
1189
|
-
attributes[`ai.telemetry.metadata.${key}`] = value;
|
1190
|
-
return attributes;
|
1191
|
-
},
|
1192
|
-
{}
|
1193
|
-
),
|
1194
|
-
// request headers
|
1195
|
-
...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
|
1196
|
-
if (value !== void 0) {
|
1197
|
-
attributes[`ai.request.headers.${key}`] = value;
|
1198
|
-
}
|
1199
|
-
return attributes;
|
1200
|
-
}, {})
|
1201
|
-
};
|
1202
|
-
}
|
1203
|
-
|
1204
|
-
// core/telemetry/get-tracer.ts
|
1205
|
-
import { trace } from "@opentelemetry/api";
|
1206
|
-
|
1207
|
-
// core/telemetry/noop-tracer.ts
|
1208
|
-
var noopTracer = {
|
1209
|
-
startSpan() {
|
1210
|
-
return noopSpan;
|
1211
|
-
},
|
1212
|
-
startActiveSpan(name, arg1, arg2, arg3) {
|
1213
|
-
if (typeof arg1 === "function") {
|
1214
|
-
return arg1(noopSpan);
|
1215
|
-
}
|
1216
|
-
if (typeof arg2 === "function") {
|
1217
|
-
return arg2(noopSpan);
|
1218
|
-
}
|
1219
|
-
if (typeof arg3 === "function") {
|
1220
|
-
return arg3(noopSpan);
|
1221
|
-
}
|
1222
|
-
}
|
1223
|
-
};
|
1224
|
-
var noopSpan = {
|
1225
|
-
spanContext() {
|
1226
|
-
return noopSpanContext;
|
1227
|
-
},
|
1228
|
-
setAttribute() {
|
1229
|
-
return this;
|
1230
|
-
},
|
1231
|
-
setAttributes() {
|
1232
|
-
return this;
|
1233
|
-
},
|
1234
|
-
addEvent() {
|
1235
|
-
return this;
|
1236
|
-
},
|
1237
|
-
addLink() {
|
1238
|
-
return this;
|
1239
|
-
},
|
1240
|
-
addLinks() {
|
1241
|
-
return this;
|
1242
|
-
},
|
1243
|
-
setStatus() {
|
1244
|
-
return this;
|
1245
|
-
},
|
1246
|
-
updateName() {
|
1247
|
-
return this;
|
1248
|
-
},
|
1249
|
-
end() {
|
1250
|
-
return this;
|
1251
|
-
},
|
1252
|
-
isRecording() {
|
1253
|
-
return false;
|
1254
|
-
},
|
1255
|
-
recordException() {
|
1256
|
-
return this;
|
1257
|
-
}
|
1258
|
-
};
|
1259
|
-
var noopSpanContext = {
|
1260
|
-
traceId: "",
|
1261
|
-
spanId: "",
|
1262
|
-
traceFlags: 0
|
1263
|
-
};
|
1264
|
-
|
1265
|
-
// core/telemetry/get-tracer.ts
|
1266
|
-
var testTracer = void 0;
|
1267
|
-
function getTracer({ isEnabled }) {
|
1268
|
-
if (!isEnabled) {
|
1269
|
-
return noopTracer;
|
1270
|
-
}
|
1271
|
-
if (testTracer) {
|
1272
|
-
return testTracer;
|
1273
|
-
}
|
1274
|
-
return trace.getTracer("ai");
|
1275
|
-
}
|
1276
|
-
|
1277
|
-
// core/telemetry/record-span.ts
|
1278
|
-
import { SpanStatusCode } from "@opentelemetry/api";
|
1279
|
-
function recordSpan({
|
1280
|
-
name,
|
1281
|
-
tracer,
|
1282
|
-
attributes,
|
1283
|
-
fn,
|
1284
|
-
endWhenDone = true
|
1285
|
-
}) {
|
1286
|
-
return tracer.startActiveSpan(name, { attributes }, async (span) => {
|
1287
|
-
try {
|
1288
|
-
const result = await fn(span);
|
1289
|
-
if (endWhenDone) {
|
1290
|
-
span.end();
|
1291
|
-
}
|
1292
|
-
return result;
|
1293
|
-
} catch (error) {
|
1294
|
-
try {
|
1295
|
-
if (error instanceof Error) {
|
1296
|
-
span.recordException({
|
1297
|
-
name: error.name,
|
1298
|
-
message: error.message,
|
1299
|
-
stack: error.stack
|
1300
|
-
});
|
1301
|
-
span.setStatus({
|
1302
|
-
code: SpanStatusCode.ERROR,
|
1303
|
-
message: error.message
|
1304
|
-
});
|
1305
|
-
} else {
|
1306
|
-
span.setStatus({ code: SpanStatusCode.ERROR });
|
1307
|
-
}
|
1308
|
-
} finally {
|
1309
|
-
span.end();
|
1310
|
-
}
|
1311
|
-
throw error;
|
1312
|
-
}
|
1313
|
-
});
|
1314
|
-
}
|
1315
|
-
|
1316
1386
|
// core/generate-text/tool-call.ts
|
1317
1387
|
import {
|
1318
1388
|
InvalidToolArgumentsError,
|
@@ -1336,7 +1406,7 @@ function parseToolCall({
|
|
1336
1406
|
}
|
1337
1407
|
const parseResult = safeParseJSON2({
|
1338
1408
|
text: toolCall.args,
|
1339
|
-
schema: tool2.parameters
|
1409
|
+
schema: asSchema(tool2.parameters)
|
1340
1410
|
});
|
1341
1411
|
if (parseResult.success === false) {
|
1342
1412
|
throw new InvalidToolArgumentsError({
|
@@ -1488,7 +1558,7 @@ async function generateText({
|
|
1488
1558
|
"ai.result.text": currentModelResponse.text,
|
1489
1559
|
"ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
|
1490
1560
|
});
|
1491
|
-
return new
|
1561
|
+
return new DefaultGenerateTextResult({
|
1492
1562
|
// Always return a string so that the caller doesn't have to check for undefined.
|
1493
1563
|
// If they need to check if the model did not return any text,
|
1494
1564
|
// they can check the length of the string:
|
@@ -1548,7 +1618,7 @@ async function executeTools({
|
|
1548
1618
|
(result) => result != null
|
1549
1619
|
);
|
1550
1620
|
}
|
1551
|
-
var
|
1621
|
+
var DefaultGenerateTextResult = class {
|
1552
1622
|
constructor(options) {
|
1553
1623
|
this.text = options.text;
|
1554
1624
|
this.toolCalls = options.toolCalls;
|
@@ -1915,7 +1985,7 @@ async function streamText({
|
|
1915
1985
|
}
|
1916
1986
|
})
|
1917
1987
|
);
|
1918
|
-
return new
|
1988
|
+
return new DefaultStreamTextResult({
|
1919
1989
|
stream: runToolsTransformation({
|
1920
1990
|
tools,
|
1921
1991
|
generatorStream: stream,
|
@@ -1931,7 +2001,7 @@ async function streamText({
|
|
1931
2001
|
}
|
1932
2002
|
});
|
1933
2003
|
}
|
1934
|
-
var
|
2004
|
+
var DefaultStreamTextResult = class {
|
1935
2005
|
constructor({
|
1936
2006
|
stream,
|
1937
2007
|
warnings,
|
@@ -2069,11 +2139,6 @@ var StreamTextResult = class {
|
|
2069
2139
|
this.originalStream = stream2;
|
2070
2140
|
return stream1;
|
2071
2141
|
}
|
2072
|
-
/**
|
2073
|
-
A text stream that returns only the generated text deltas. You can use it
|
2074
|
-
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
2075
|
-
stream will throw the error.
|
2076
|
-
*/
|
2077
2142
|
get textStream() {
|
2078
2143
|
return createAsyncIterableStream(this.teeStream(), {
|
2079
2144
|
transform(chunk, controller) {
|
@@ -2087,12 +2152,6 @@ var StreamTextResult = class {
|
|
2087
2152
|
}
|
2088
2153
|
});
|
2089
2154
|
}
|
2090
|
-
/**
|
2091
|
-
A stream with all events, including text deltas, tool calls, tool results, and
|
2092
|
-
errors.
|
2093
|
-
You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
2094
|
-
stream will throw the error.
|
2095
|
-
*/
|
2096
2155
|
get fullStream() {
|
2097
2156
|
return createAsyncIterableStream(this.teeStream(), {
|
2098
2157
|
transform(chunk, controller) {
|
@@ -2106,15 +2165,6 @@ var StreamTextResult = class {
|
|
2106
2165
|
}
|
2107
2166
|
});
|
2108
2167
|
}
|
2109
|
-
/**
|
2110
|
-
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
|
2111
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
2112
|
-
|
2113
|
-
@param callbacks
|
2114
|
-
Stream callbacks that will be called when the stream emits events.
|
2115
|
-
|
2116
|
-
@returns an `AIStream` object.
|
2117
|
-
*/
|
2118
2168
|
toAIStream(callbacks = {}) {
|
2119
2169
|
let aggregatedResponse = "";
|
2120
2170
|
const callbackTransformer = new TransformStream({
|
@@ -2198,14 +2248,6 @@ var StreamTextResult = class {
|
|
2198
2248
|
});
|
2199
2249
|
return this.fullStream.pipeThrough(callbackTransformer).pipeThrough(streamPartsTransformer).pipeThrough(new TextEncoderStream());
|
2200
2250
|
}
|
2201
|
-
/**
|
2202
|
-
Writes stream data output to a Node.js response-like object.
|
2203
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
2204
|
-
writes each stream data part as a separate chunk.
|
2205
|
-
|
2206
|
-
@param response A Node.js response-like object (ServerResponse).
|
2207
|
-
@param init Optional headers and status code.
|
2208
|
-
*/
|
2209
2251
|
pipeAIStreamToResponse(response, init) {
|
2210
2252
|
var _a;
|
2211
2253
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -2229,14 +2271,6 @@ var StreamTextResult = class {
|
|
2229
2271
|
};
|
2230
2272
|
read();
|
2231
2273
|
}
|
2232
|
-
/**
|
2233
|
-
Writes text delta output to a Node.js response-like object.
|
2234
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
2235
|
-
writes each text delta as a separate chunk.
|
2236
|
-
|
2237
|
-
@param response A Node.js response-like object (ServerResponse).
|
2238
|
-
@param init Optional headers and status code.
|
2239
|
-
*/
|
2240
2274
|
pipeTextStreamToResponse(response, init) {
|
2241
2275
|
var _a;
|
2242
2276
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -2260,15 +2294,6 @@ var StreamTextResult = class {
|
|
2260
2294
|
};
|
2261
2295
|
read();
|
2262
2296
|
}
|
2263
|
-
/**
|
2264
|
-
Converts the result to a streamed response object with a stream data part stream.
|
2265
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
2266
|
-
|
2267
|
-
@param options An object with an init property (ResponseInit) and a data property.
|
2268
|
-
You can also pass in a ResponseInit directly (deprecated).
|
2269
|
-
|
2270
|
-
@return A response object.
|
2271
|
-
*/
|
2272
2297
|
toAIStreamResponse(options) {
|
2273
2298
|
var _a;
|
2274
2299
|
const init = options == null ? void 0 : "init" in options ? options.init : {
|
@@ -2286,13 +2311,6 @@ var StreamTextResult = class {
|
|
2286
2311
|
})
|
2287
2312
|
});
|
2288
2313
|
}
|
2289
|
-
/**
|
2290
|
-
Creates a simple text stream response.
|
2291
|
-
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
2292
|
-
Non-text-delta events are ignored.
|
2293
|
-
|
2294
|
-
@param init Optional headers and status code.
|
2295
|
-
*/
|
2296
2314
|
toTextStreamResponse(init) {
|
2297
2315
|
var _a;
|
2298
2316
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
@@ -3640,11 +3658,7 @@ export {
|
|
3640
3658
|
AnthropicStream,
|
3641
3659
|
AssistantResponse,
|
3642
3660
|
CohereStream,
|
3643
|
-
EmbedManyResult,
|
3644
|
-
EmbedResult,
|
3645
3661
|
EmptyResponseBodyError,
|
3646
|
-
GenerateObjectResult,
|
3647
|
-
GenerateTextResult,
|
3648
3662
|
GoogleGenerativeAIStream,
|
3649
3663
|
HuggingFaceStream,
|
3650
3664
|
InkeepStream,
|
@@ -3668,8 +3682,6 @@ export {
|
|
3668
3682
|
ReplicateStream,
|
3669
3683
|
RetryError2 as RetryError,
|
3670
3684
|
StreamData2 as StreamData,
|
3671
|
-
StreamObjectResult,
|
3672
|
-
StreamTextResult,
|
3673
3685
|
StreamingTextResponse,
|
3674
3686
|
ToolCallParseError,
|
3675
3687
|
TypeValidationError,
|
@@ -3697,6 +3709,7 @@ export {
|
|
3697
3709
|
generateId2 as generateId,
|
3698
3710
|
generateObject,
|
3699
3711
|
generateText,
|
3712
|
+
jsonSchema,
|
3700
3713
|
nanoid,
|
3701
3714
|
parseComplexResponse,
|
3702
3715
|
parseStreamPart,
|