ai 3.2.33 → 3.2.35
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +529 -432
- package/dist/index.d.ts +529 -432
- package/dist/index.js +448 -440
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +439 -426
- package/dist/index.mjs.map +1 -1
- package/package.json +14 -10
- package/rsc/dist/index.d.ts +13 -0
- package/rsc/dist/rsc-server.d.mts +13 -0
- package/rsc/dist/rsc-server.mjs +35 -4
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/dist/index.js
CHANGED
@@ -40,11 +40,7 @@ __export(streams_exports, {
|
|
40
40
|
AnthropicStream: () => AnthropicStream,
|
41
41
|
AssistantResponse: () => AssistantResponse,
|
42
42
|
CohereStream: () => CohereStream,
|
43
|
-
EmbedManyResult: () => EmbedManyResult,
|
44
|
-
EmbedResult: () => EmbedResult,
|
45
43
|
EmptyResponseBodyError: () => import_provider8.EmptyResponseBodyError,
|
46
|
-
GenerateObjectResult: () => GenerateObjectResult,
|
47
|
-
GenerateTextResult: () => GenerateTextResult,
|
48
44
|
GoogleGenerativeAIStream: () => GoogleGenerativeAIStream,
|
49
45
|
HuggingFaceStream: () => HuggingFaceStream,
|
50
46
|
InkeepStream: () => InkeepStream,
|
@@ -68,8 +64,6 @@ __export(streams_exports, {
|
|
68
64
|
ReplicateStream: () => ReplicateStream,
|
69
65
|
RetryError: () => import_provider8.RetryError,
|
70
66
|
StreamData: () => StreamData2,
|
71
|
-
StreamObjectResult: () => StreamObjectResult,
|
72
|
-
StreamTextResult: () => StreamTextResult,
|
73
67
|
StreamingTextResponse: () => StreamingTextResponse,
|
74
68
|
ToolCallParseError: () => import_provider8.ToolCallParseError,
|
75
69
|
TypeValidationError: () => import_provider8.TypeValidationError,
|
@@ -97,6 +91,7 @@ __export(streams_exports, {
|
|
97
91
|
generateId: () => generateId2,
|
98
92
|
generateObject: () => generateObject,
|
99
93
|
generateText: () => generateText,
|
94
|
+
jsonSchema: () => jsonSchema,
|
100
95
|
nanoid: () => nanoid,
|
101
96
|
parseComplexResponse: () => import_ui_utils6.parseComplexResponse,
|
102
97
|
parseStreamPart: () => import_ui_utils6.parseStreamPart,
|
@@ -110,7 +105,158 @@ __export(streams_exports, {
|
|
110
105
|
});
|
111
106
|
module.exports = __toCommonJS(streams_exports);
|
112
107
|
var import_ui_utils6 = require("@ai-sdk/ui-utils");
|
113
|
-
var
|
108
|
+
var import_provider_utils8 = require("@ai-sdk/provider-utils");
|
109
|
+
|
110
|
+
// core/telemetry/get-base-telemetry-attributes.ts
|
111
|
+
function getBaseTelemetryAttributes({
|
112
|
+
operationName,
|
113
|
+
model,
|
114
|
+
settings,
|
115
|
+
telemetry,
|
116
|
+
headers
|
117
|
+
}) {
|
118
|
+
var _a;
|
119
|
+
return {
|
120
|
+
"ai.model.provider": model.provider,
|
121
|
+
"ai.model.id": model.modelId,
|
122
|
+
// settings:
|
123
|
+
...Object.entries(settings).reduce((attributes, [key, value]) => {
|
124
|
+
attributes[`ai.settings.${key}`] = value;
|
125
|
+
return attributes;
|
126
|
+
}, {}),
|
127
|
+
// special telemetry information
|
128
|
+
"operation.name": operationName,
|
129
|
+
"resource.name": telemetry == null ? void 0 : telemetry.functionId,
|
130
|
+
"ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
|
131
|
+
// add metadata as attributes:
|
132
|
+
...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
|
133
|
+
(attributes, [key, value]) => {
|
134
|
+
attributes[`ai.telemetry.metadata.${key}`] = value;
|
135
|
+
return attributes;
|
136
|
+
},
|
137
|
+
{}
|
138
|
+
),
|
139
|
+
// request headers
|
140
|
+
...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
|
141
|
+
if (value !== void 0) {
|
142
|
+
attributes[`ai.request.headers.${key}`] = value;
|
143
|
+
}
|
144
|
+
return attributes;
|
145
|
+
}, {})
|
146
|
+
};
|
147
|
+
}
|
148
|
+
|
149
|
+
// core/telemetry/get-tracer.ts
|
150
|
+
var import_api = require("@opentelemetry/api");
|
151
|
+
|
152
|
+
// core/telemetry/noop-tracer.ts
|
153
|
+
var noopTracer = {
|
154
|
+
startSpan() {
|
155
|
+
return noopSpan;
|
156
|
+
},
|
157
|
+
startActiveSpan(name, arg1, arg2, arg3) {
|
158
|
+
if (typeof arg1 === "function") {
|
159
|
+
return arg1(noopSpan);
|
160
|
+
}
|
161
|
+
if (typeof arg2 === "function") {
|
162
|
+
return arg2(noopSpan);
|
163
|
+
}
|
164
|
+
if (typeof arg3 === "function") {
|
165
|
+
return arg3(noopSpan);
|
166
|
+
}
|
167
|
+
}
|
168
|
+
};
|
169
|
+
var noopSpan = {
|
170
|
+
spanContext() {
|
171
|
+
return noopSpanContext;
|
172
|
+
},
|
173
|
+
setAttribute() {
|
174
|
+
return this;
|
175
|
+
},
|
176
|
+
setAttributes() {
|
177
|
+
return this;
|
178
|
+
},
|
179
|
+
addEvent() {
|
180
|
+
return this;
|
181
|
+
},
|
182
|
+
addLink() {
|
183
|
+
return this;
|
184
|
+
},
|
185
|
+
addLinks() {
|
186
|
+
return this;
|
187
|
+
},
|
188
|
+
setStatus() {
|
189
|
+
return this;
|
190
|
+
},
|
191
|
+
updateName() {
|
192
|
+
return this;
|
193
|
+
},
|
194
|
+
end() {
|
195
|
+
return this;
|
196
|
+
},
|
197
|
+
isRecording() {
|
198
|
+
return false;
|
199
|
+
},
|
200
|
+
recordException() {
|
201
|
+
return this;
|
202
|
+
}
|
203
|
+
};
|
204
|
+
var noopSpanContext = {
|
205
|
+
traceId: "",
|
206
|
+
spanId: "",
|
207
|
+
traceFlags: 0
|
208
|
+
};
|
209
|
+
|
210
|
+
// core/telemetry/get-tracer.ts
|
211
|
+
var testTracer = void 0;
|
212
|
+
function getTracer({ isEnabled }) {
|
213
|
+
if (!isEnabled) {
|
214
|
+
return noopTracer;
|
215
|
+
}
|
216
|
+
if (testTracer) {
|
217
|
+
return testTracer;
|
218
|
+
}
|
219
|
+
return import_api.trace.getTracer("ai");
|
220
|
+
}
|
221
|
+
|
222
|
+
// core/telemetry/record-span.ts
|
223
|
+
var import_api2 = require("@opentelemetry/api");
|
224
|
+
function recordSpan({
|
225
|
+
name,
|
226
|
+
tracer,
|
227
|
+
attributes,
|
228
|
+
fn,
|
229
|
+
endWhenDone = true
|
230
|
+
}) {
|
231
|
+
return tracer.startActiveSpan(name, { attributes }, async (span) => {
|
232
|
+
try {
|
233
|
+
const result = await fn(span);
|
234
|
+
if (endWhenDone) {
|
235
|
+
span.end();
|
236
|
+
}
|
237
|
+
return result;
|
238
|
+
} catch (error) {
|
239
|
+
try {
|
240
|
+
if (error instanceof Error) {
|
241
|
+
span.recordException({
|
242
|
+
name: error.name,
|
243
|
+
message: error.message,
|
244
|
+
stack: error.stack
|
245
|
+
});
|
246
|
+
span.setStatus({
|
247
|
+
code: import_api2.SpanStatusCode.ERROR,
|
248
|
+
message: error.message
|
249
|
+
});
|
250
|
+
} else {
|
251
|
+
span.setStatus({ code: import_api2.SpanStatusCode.ERROR });
|
252
|
+
}
|
253
|
+
} finally {
|
254
|
+
span.end();
|
255
|
+
}
|
256
|
+
throw error;
|
257
|
+
}
|
258
|
+
});
|
259
|
+
}
|
114
260
|
|
115
261
|
// core/util/retry-with-exponential-backoff.ts
|
116
262
|
var import_provider = require("@ai-sdk/provider");
|
@@ -180,21 +326,72 @@ async function embed({
|
|
180
326
|
value,
|
181
327
|
maxRetries,
|
182
328
|
abortSignal,
|
183
|
-
headers
|
329
|
+
headers,
|
330
|
+
experimental_telemetry: telemetry
|
184
331
|
}) {
|
185
332
|
var _a;
|
186
|
-
const
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
333
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
334
|
+
operationName: "ai.embed",
|
335
|
+
model,
|
336
|
+
telemetry,
|
337
|
+
headers,
|
338
|
+
settings: { maxRetries }
|
339
|
+
});
|
340
|
+
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
341
|
+
return recordSpan({
|
342
|
+
name: "ai.embed",
|
343
|
+
attributes: {
|
344
|
+
...baseTelemetryAttributes,
|
345
|
+
// specific settings that only make sense on the outer level:
|
346
|
+
"ai.value": JSON.stringify(value)
|
347
|
+
},
|
348
|
+
tracer,
|
349
|
+
fn: async (span) => {
|
350
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
351
|
+
const { embedding, usage, rawResponse } = await retry(
|
352
|
+
() => (
|
353
|
+
// nested spans to align with the embedMany telemetry data:
|
354
|
+
recordSpan({
|
355
|
+
name: "ai.embed.doEmbed",
|
356
|
+
attributes: {
|
357
|
+
...baseTelemetryAttributes,
|
358
|
+
// specific settings that only make sense on the outer level:
|
359
|
+
"ai.values": [JSON.stringify(value)]
|
360
|
+
},
|
361
|
+
tracer,
|
362
|
+
fn: async (doEmbedSpan) => {
|
363
|
+
var _a2;
|
364
|
+
const modelResponse = await model.doEmbed({
|
365
|
+
values: [value],
|
366
|
+
abortSignal,
|
367
|
+
headers
|
368
|
+
});
|
369
|
+
const embedding2 = modelResponse.embeddings[0];
|
370
|
+
const usage2 = (_a2 = modelResponse.usage) != null ? _a2 : { tokens: NaN };
|
371
|
+
doEmbedSpan.setAttributes({
|
372
|
+
"ai.embeddings": modelResponse.embeddings.map(
|
373
|
+
(embedding3) => JSON.stringify(embedding3)
|
374
|
+
),
|
375
|
+
"ai.usage.tokens": usage2.tokens
|
376
|
+
});
|
377
|
+
return {
|
378
|
+
embedding: embedding2,
|
379
|
+
usage: usage2,
|
380
|
+
rawResponse: modelResponse.rawResponse
|
381
|
+
};
|
382
|
+
}
|
383
|
+
})
|
384
|
+
)
|
385
|
+
);
|
386
|
+
span.setAttributes({
|
387
|
+
"ai.embedding": JSON.stringify(embedding),
|
388
|
+
"ai.usage.tokens": usage.tokens
|
389
|
+
});
|
390
|
+
return new DefaultEmbedResult({ value, embedding, usage, rawResponse });
|
391
|
+
}
|
195
392
|
});
|
196
393
|
}
|
197
|
-
var
|
394
|
+
var DefaultEmbedResult = class {
|
198
395
|
constructor(options) {
|
199
396
|
this.value = options.value;
|
200
397
|
this.embedding = options.embedding;
|
@@ -230,7 +427,7 @@ async function embedMany({
|
|
230
427
|
const modelResponse = await retry(
|
231
428
|
() => model.doEmbed({ values, abortSignal, headers })
|
232
429
|
);
|
233
|
-
return new
|
430
|
+
return new DefaultEmbedManyResult({
|
234
431
|
values,
|
235
432
|
embeddings: modelResponse.embeddings,
|
236
433
|
usage: (_a = modelResponse.usage) != null ? _a : { tokens: NaN }
|
@@ -246,9 +443,9 @@ async function embedMany({
|
|
246
443
|
embeddings.push(...modelResponse.embeddings);
|
247
444
|
tokens += (_c = (_b = modelResponse.usage) == null ? void 0 : _b.tokens) != null ? _c : NaN;
|
248
445
|
}
|
249
|
-
return new
|
446
|
+
return new DefaultEmbedManyResult({ values, embeddings, usage: { tokens } });
|
250
447
|
}
|
251
|
-
var
|
448
|
+
var DefaultEmbedManyResult = class {
|
252
449
|
constructor(options) {
|
253
450
|
this.values = options.values;
|
254
451
|
this.embeddings = options.embeddings;
|
@@ -258,7 +455,7 @@ var EmbedManyResult = class {
|
|
258
455
|
|
259
456
|
// core/generate-object/generate-object.ts
|
260
457
|
var import_provider5 = require("@ai-sdk/provider");
|
261
|
-
var
|
458
|
+
var import_provider_utils5 = require("@ai-sdk/provider-utils");
|
262
459
|
|
263
460
|
// core/util/detect-image-mimetype.ts
|
264
461
|
var mimeTypeSignatures = [
|
@@ -523,6 +720,7 @@ function prepareCallSettings({
|
|
523
720
|
topP,
|
524
721
|
presencePenalty,
|
525
722
|
frequencyPenalty,
|
723
|
+
stopSequences,
|
526
724
|
seed,
|
527
725
|
maxRetries
|
528
726
|
}) {
|
@@ -609,6 +807,7 @@ function prepareCallSettings({
|
|
609
807
|
topP,
|
610
808
|
presencePenalty,
|
611
809
|
frequencyPenalty,
|
810
|
+
stopSequences: stopSequences != null && stopSequences.length > 0 ? stopSequences : void 0,
|
612
811
|
seed,
|
613
812
|
maxRetries: maxRetries != null ? maxRetries : 2
|
614
813
|
};
|
@@ -623,12 +822,6 @@ function calculateCompletionTokenUsage(usage) {
|
|
623
822
|
};
|
624
823
|
}
|
625
824
|
|
626
|
-
// core/util/convert-zod-to-json-schema.ts
|
627
|
-
var import_zod_to_json_schema = __toESM(require("zod-to-json-schema"));
|
628
|
-
function convertZodToJSONSchema(zodSchema) {
|
629
|
-
return (0, import_zod_to_json_schema.default)(zodSchema);
|
630
|
-
}
|
631
|
-
|
632
825
|
// core/util/prepare-response-headers.ts
|
633
826
|
function prepareResponseHeaders(init, { contentType }) {
|
634
827
|
var _a;
|
@@ -639,6 +832,41 @@ function prepareResponseHeaders(init, { contentType }) {
|
|
639
832
|
return headers;
|
640
833
|
}
|
641
834
|
|
835
|
+
// core/util/schema.ts
|
836
|
+
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
837
|
+
var import_zod_to_json_schema = __toESM(require("zod-to-json-schema"));
|
838
|
+
var schemaSymbol = Symbol("vercel.ai.schema");
|
839
|
+
function jsonSchema(jsonSchema2, {
|
840
|
+
validate
|
841
|
+
} = {}) {
|
842
|
+
return {
|
843
|
+
[schemaSymbol]: true,
|
844
|
+
_type: void 0,
|
845
|
+
// should never be used directly
|
846
|
+
[import_provider_utils4.validatorSymbol]: true,
|
847
|
+
jsonSchema: jsonSchema2,
|
848
|
+
validate
|
849
|
+
};
|
850
|
+
}
|
851
|
+
function isSchema(value) {
|
852
|
+
return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
|
853
|
+
}
|
854
|
+
function asSchema(schema) {
|
855
|
+
return isSchema(schema) ? schema : zodSchema(schema);
|
856
|
+
}
|
857
|
+
function zodSchema(zodSchema2) {
|
858
|
+
return jsonSchema(
|
859
|
+
// we assume that zodToJsonSchema will return a valid JSONSchema7:
|
860
|
+
(0, import_zod_to_json_schema.default)(zodSchema2),
|
861
|
+
{
|
862
|
+
validate: (value) => {
|
863
|
+
const result = zodSchema2.safeParse(value);
|
864
|
+
return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
|
865
|
+
}
|
866
|
+
}
|
867
|
+
);
|
868
|
+
}
|
869
|
+
|
642
870
|
// core/generate-object/inject-json-schema-into-system.ts
|
643
871
|
var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
|
644
872
|
var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
|
@@ -661,7 +889,7 @@ function injectJsonSchemaIntoSystem({
|
|
661
889
|
// core/generate-object/generate-object.ts
|
662
890
|
async function generateObject({
|
663
891
|
model,
|
664
|
-
schema,
|
892
|
+
schema: inputSchema,
|
665
893
|
mode,
|
666
894
|
system,
|
667
895
|
prompt,
|
@@ -669,133 +897,183 @@ async function generateObject({
|
|
669
897
|
maxRetries,
|
670
898
|
abortSignal,
|
671
899
|
headers,
|
900
|
+
experimental_telemetry: telemetry,
|
672
901
|
...settings
|
673
902
|
}) {
|
674
|
-
var _a
|
675
|
-
const
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
903
|
+
var _a;
|
904
|
+
const baseTelemetryAttributes = getBaseTelemetryAttributes({
|
905
|
+
operationName: "ai.generateObject",
|
906
|
+
model,
|
907
|
+
telemetry,
|
908
|
+
headers,
|
909
|
+
settings: { ...settings, maxRetries }
|
910
|
+
});
|
911
|
+
const schema = asSchema(inputSchema);
|
912
|
+
const tracer = getTracer({ isEnabled: (_a = telemetry == null ? void 0 : telemetry.isEnabled) != null ? _a : false });
|
913
|
+
return recordSpan({
|
914
|
+
name: "ai.generateObject",
|
915
|
+
attributes: {
|
916
|
+
...baseTelemetryAttributes,
|
917
|
+
// specific settings that only make sense on the outer level:
|
918
|
+
"ai.prompt": JSON.stringify({ system, prompt, messages }),
|
919
|
+
"ai.settings.jsonSchema": JSON.stringify(schema.jsonSchema),
|
920
|
+
"ai.settings.mode": mode
|
921
|
+
},
|
922
|
+
tracer,
|
923
|
+
fn: async (span) => {
|
924
|
+
var _a2, _b;
|
925
|
+
const retry = retryWithExponentialBackoff({ maxRetries });
|
926
|
+
if (mode === "auto" || mode == null) {
|
927
|
+
mode = model.defaultObjectGenerationMode;
|
928
|
+
}
|
929
|
+
let result;
|
930
|
+
let finishReason;
|
931
|
+
let usage;
|
932
|
+
let warnings;
|
933
|
+
let rawResponse;
|
934
|
+
let logprobs;
|
935
|
+
switch (mode) {
|
936
|
+
case "json": {
|
937
|
+
const validatedPrompt = getValidatedPrompt({
|
938
|
+
system: injectJsonSchemaIntoSystem({
|
939
|
+
system,
|
940
|
+
schema: schema.jsonSchema
|
941
|
+
}),
|
942
|
+
prompt,
|
943
|
+
messages
|
944
|
+
});
|
945
|
+
const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
|
946
|
+
const inputFormat = validatedPrompt.type;
|
947
|
+
const generateResult = await retry(
|
948
|
+
() => recordSpan({
|
949
|
+
name: "ai.generateObject.doGenerate",
|
950
|
+
attributes: {
|
951
|
+
...baseTelemetryAttributes,
|
952
|
+
"ai.prompt.format": inputFormat,
|
953
|
+
"ai.prompt.messages": JSON.stringify(promptMessages),
|
954
|
+
"ai.settings.mode": mode
|
955
|
+
},
|
956
|
+
tracer,
|
957
|
+
fn: async (span2) => {
|
958
|
+
const result2 = await model.doGenerate({
|
959
|
+
mode: { type: "object-json" },
|
960
|
+
...prepareCallSettings(settings),
|
961
|
+
inputFormat,
|
962
|
+
prompt: promptMessages,
|
963
|
+
abortSignal,
|
964
|
+
headers
|
965
|
+
});
|
966
|
+
span2.setAttributes({
|
967
|
+
"ai.finishReason": result2.finishReason,
|
968
|
+
"ai.usage.promptTokens": result2.usage.promptTokens,
|
969
|
+
"ai.usage.completionTokens": result2.usage.completionTokens,
|
970
|
+
"ai.result.text": result2.text
|
971
|
+
});
|
972
|
+
return result2;
|
973
|
+
}
|
974
|
+
})
|
975
|
+
);
|
976
|
+
if (generateResult.text === void 0) {
|
977
|
+
throw new import_provider5.NoObjectGeneratedError();
|
978
|
+
}
|
979
|
+
result = generateResult.text;
|
980
|
+
finishReason = generateResult.finishReason;
|
981
|
+
usage = generateResult.usage;
|
982
|
+
warnings = generateResult.warnings;
|
983
|
+
rawResponse = generateResult.rawResponse;
|
984
|
+
logprobs = generateResult.logprobs;
|
985
|
+
break;
|
986
|
+
}
|
987
|
+
case "tool": {
|
988
|
+
const validatedPrompt = getValidatedPrompt({
|
989
|
+
system,
|
990
|
+
prompt,
|
991
|
+
messages
|
992
|
+
});
|
993
|
+
const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
|
994
|
+
const inputFormat = validatedPrompt.type;
|
995
|
+
const generateResult = await retry(
|
996
|
+
() => recordSpan({
|
997
|
+
name: "ai.generateObject.doGenerate",
|
998
|
+
attributes: {
|
999
|
+
...baseTelemetryAttributes,
|
1000
|
+
"ai.prompt.format": inputFormat,
|
1001
|
+
"ai.prompt.messages": JSON.stringify(promptMessages),
|
1002
|
+
"ai.settings.mode": mode
|
1003
|
+
},
|
1004
|
+
tracer,
|
1005
|
+
fn: async (span2) => {
|
1006
|
+
const result2 = await model.doGenerate({
|
1007
|
+
mode: {
|
1008
|
+
type: "object-tool",
|
1009
|
+
tool: {
|
1010
|
+
type: "function",
|
1011
|
+
name: "json",
|
1012
|
+
description: "Respond with a JSON object.",
|
1013
|
+
parameters: schema.jsonSchema
|
1014
|
+
}
|
1015
|
+
},
|
1016
|
+
...prepareCallSettings(settings),
|
1017
|
+
inputFormat,
|
1018
|
+
prompt: promptMessages,
|
1019
|
+
abortSignal,
|
1020
|
+
headers
|
1021
|
+
});
|
1022
|
+
span2.setAttributes({
|
1023
|
+
"ai.finishReason": result2.finishReason,
|
1024
|
+
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1025
|
+
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1026
|
+
"ai.result.text": result2.text,
|
1027
|
+
"ai.result.toolCalls": JSON.stringify(result2.toolCalls)
|
1028
|
+
});
|
1029
|
+
return result2;
|
1030
|
+
}
|
1031
|
+
})
|
1032
|
+
);
|
1033
|
+
const functionArgs = (_b = (_a2 = generateResult.toolCalls) == null ? void 0 : _a2[0]) == null ? void 0 : _b.args;
|
1034
|
+
if (functionArgs === void 0) {
|
1035
|
+
throw new import_provider5.NoObjectGeneratedError();
|
1036
|
+
}
|
1037
|
+
result = functionArgs;
|
1038
|
+
finishReason = generateResult.finishReason;
|
1039
|
+
usage = generateResult.usage;
|
1040
|
+
warnings = generateResult.warnings;
|
1041
|
+
rawResponse = generateResult.rawResponse;
|
1042
|
+
logprobs = generateResult.logprobs;
|
1043
|
+
break;
|
1044
|
+
}
|
1045
|
+
case void 0: {
|
1046
|
+
throw new Error(
|
1047
|
+
"Model does not have a default object generation mode."
|
1048
|
+
);
|
1049
|
+
}
|
1050
|
+
default: {
|
1051
|
+
const _exhaustiveCheck = mode;
|
1052
|
+
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
1053
|
+
}
|
705
1054
|
}
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
warnings = generateResult.warnings;
|
710
|
-
rawResponse = generateResult.rawResponse;
|
711
|
-
logprobs = generateResult.logprobs;
|
712
|
-
break;
|
713
|
-
}
|
714
|
-
case "grammar": {
|
715
|
-
const validatedPrompt = getValidatedPrompt({
|
716
|
-
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
717
|
-
prompt,
|
718
|
-
messages
|
719
|
-
});
|
720
|
-
const generateResult = await retry(
|
721
|
-
() => model.doGenerate({
|
722
|
-
mode: { type: "object-grammar", schema: jsonSchema },
|
723
|
-
...prepareCallSettings(settings),
|
724
|
-
inputFormat: validatedPrompt.type,
|
725
|
-
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
726
|
-
abortSignal,
|
727
|
-
headers
|
728
|
-
})
|
729
|
-
);
|
730
|
-
if (generateResult.text === void 0) {
|
731
|
-
throw new import_provider5.NoObjectGeneratedError();
|
1055
|
+
const parseResult = (0, import_provider_utils5.safeParseJSON)({ text: result, schema });
|
1056
|
+
if (!parseResult.success) {
|
1057
|
+
throw parseResult.error;
|
732
1058
|
}
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
1059
|
+
span.setAttributes({
|
1060
|
+
"ai.finishReason": finishReason,
|
1061
|
+
"ai.usage.promptTokens": usage.promptTokens,
|
1062
|
+
"ai.usage.completionTokens": usage.completionTokens,
|
1063
|
+
"ai.result.object": JSON.stringify(parseResult.value)
|
1064
|
+
});
|
1065
|
+
return new DefaultGenerateObjectResult({
|
1066
|
+
object: parseResult.value,
|
1067
|
+
finishReason,
|
1068
|
+
usage: calculateCompletionTokenUsage(usage),
|
1069
|
+
warnings,
|
1070
|
+
rawResponse,
|
1071
|
+
logprobs
|
746
1072
|
});
|
747
|
-
const generateResult = await retry(
|
748
|
-
() => model.doGenerate({
|
749
|
-
mode: {
|
750
|
-
type: "object-tool",
|
751
|
-
tool: {
|
752
|
-
type: "function",
|
753
|
-
name: "json",
|
754
|
-
description: "Respond with a JSON object.",
|
755
|
-
parameters: jsonSchema
|
756
|
-
}
|
757
|
-
},
|
758
|
-
...prepareCallSettings(settings),
|
759
|
-
inputFormat: validatedPrompt.type,
|
760
|
-
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
761
|
-
abortSignal,
|
762
|
-
headers
|
763
|
-
})
|
764
|
-
);
|
765
|
-
const functionArgs = (_b = (_a = generateResult.toolCalls) == null ? void 0 : _a[0]) == null ? void 0 : _b.args;
|
766
|
-
if (functionArgs === void 0) {
|
767
|
-
throw new import_provider5.NoObjectGeneratedError();
|
768
|
-
}
|
769
|
-
result = functionArgs;
|
770
|
-
finishReason = generateResult.finishReason;
|
771
|
-
usage = generateResult.usage;
|
772
|
-
warnings = generateResult.warnings;
|
773
|
-
rawResponse = generateResult.rawResponse;
|
774
|
-
logprobs = generateResult.logprobs;
|
775
|
-
break;
|
776
|
-
}
|
777
|
-
case void 0: {
|
778
|
-
throw new Error("Model does not have a default object generation mode.");
|
779
|
-
}
|
780
|
-
default: {
|
781
|
-
const _exhaustiveCheck = mode;
|
782
|
-
throw new Error(`Unsupported mode: ${_exhaustiveCheck}`);
|
783
1073
|
}
|
784
|
-
}
|
785
|
-
const parseResult = (0, import_provider_utils4.safeParseJSON)({ text: result, schema });
|
786
|
-
if (!parseResult.success) {
|
787
|
-
throw parseResult.error;
|
788
|
-
}
|
789
|
-
return new GenerateObjectResult({
|
790
|
-
object: parseResult.value,
|
791
|
-
finishReason,
|
792
|
-
usage: calculateCompletionTokenUsage(usage),
|
793
|
-
warnings,
|
794
|
-
rawResponse,
|
795
|
-
logprobs
|
796
1074
|
});
|
797
1075
|
}
|
798
|
-
var
|
1076
|
+
var DefaultGenerateObjectResult = class {
|
799
1077
|
constructor(options) {
|
800
1078
|
this.object = options.object;
|
801
1079
|
this.finishReason = options.finishReason;
|
@@ -804,10 +1082,6 @@ var GenerateObjectResult = class {
|
|
804
1082
|
this.rawResponse = options.rawResponse;
|
805
1083
|
this.logprobs = options.logprobs;
|
806
1084
|
}
|
807
|
-
/**
|
808
|
-
Converts the object to a JSON response.
|
809
|
-
The response will have a status code of 200 and a content type of `application/json; charset=utf-8`.
|
810
|
-
*/
|
811
1085
|
toJsonResponse(init) {
|
812
1086
|
var _a;
|
813
1087
|
return new Response(JSON.stringify(this.object), {
|
@@ -821,7 +1095,7 @@ var GenerateObjectResult = class {
|
|
821
1095
|
var experimental_generateObject = generateObject;
|
822
1096
|
|
823
1097
|
// core/generate-object/stream-object.ts
|
824
|
-
var
|
1098
|
+
var import_provider_utils6 = require("@ai-sdk/provider-utils");
|
825
1099
|
var import_ui_utils = require("@ai-sdk/ui-utils");
|
826
1100
|
|
827
1101
|
// core/util/async-iterable-stream.ts
|
@@ -882,7 +1156,7 @@ var DelayedPromise = class {
|
|
882
1156
|
// core/generate-object/stream-object.ts
|
883
1157
|
async function streamObject({
|
884
1158
|
model,
|
885
|
-
schema,
|
1159
|
+
schema: inputSchema,
|
886
1160
|
mode,
|
887
1161
|
system,
|
888
1162
|
prompt,
|
@@ -894,7 +1168,7 @@ async function streamObject({
|
|
894
1168
|
...settings
|
895
1169
|
}) {
|
896
1170
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
897
|
-
const
|
1171
|
+
const schema = asSchema(inputSchema);
|
898
1172
|
if (mode === "auto" || mode == null) {
|
899
1173
|
mode = model.defaultObjectGenerationMode;
|
900
1174
|
}
|
@@ -903,7 +1177,10 @@ async function streamObject({
|
|
903
1177
|
switch (mode) {
|
904
1178
|
case "json": {
|
905
1179
|
const validatedPrompt = getValidatedPrompt({
|
906
|
-
system: injectJsonSchemaIntoSystem({
|
1180
|
+
system: injectJsonSchemaIntoSystem({
|
1181
|
+
system,
|
1182
|
+
schema: schema.jsonSchema
|
1183
|
+
}),
|
907
1184
|
prompt,
|
908
1185
|
messages
|
909
1186
|
});
|
@@ -930,35 +1207,6 @@ async function streamObject({
|
|
930
1207
|
};
|
931
1208
|
break;
|
932
1209
|
}
|
933
|
-
case "grammar": {
|
934
|
-
const validatedPrompt = getValidatedPrompt({
|
935
|
-
system: injectJsonSchemaIntoSystem({ system, schema: jsonSchema }),
|
936
|
-
prompt,
|
937
|
-
messages
|
938
|
-
});
|
939
|
-
callOptions = {
|
940
|
-
mode: { type: "object-grammar", schema: jsonSchema },
|
941
|
-
...prepareCallSettings(settings),
|
942
|
-
inputFormat: validatedPrompt.type,
|
943
|
-
prompt: convertToLanguageModelPrompt(validatedPrompt),
|
944
|
-
abortSignal,
|
945
|
-
headers
|
946
|
-
};
|
947
|
-
transformer = {
|
948
|
-
transform: (chunk, controller) => {
|
949
|
-
switch (chunk.type) {
|
950
|
-
case "text-delta":
|
951
|
-
controller.enqueue(chunk.textDelta);
|
952
|
-
break;
|
953
|
-
case "finish":
|
954
|
-
case "error":
|
955
|
-
controller.enqueue(chunk);
|
956
|
-
break;
|
957
|
-
}
|
958
|
-
}
|
959
|
-
};
|
960
|
-
break;
|
961
|
-
}
|
962
1210
|
case "tool": {
|
963
1211
|
const validatedPrompt = getValidatedPrompt({
|
964
1212
|
system,
|
@@ -972,7 +1220,7 @@ async function streamObject({
|
|
972
1220
|
type: "function",
|
973
1221
|
name: "json",
|
974
1222
|
description: "Respond with a JSON object.",
|
975
|
-
parameters: jsonSchema
|
1223
|
+
parameters: schema.jsonSchema
|
976
1224
|
}
|
977
1225
|
},
|
978
1226
|
...prepareCallSettings(settings),
|
@@ -1005,7 +1253,7 @@ async function streamObject({
|
|
1005
1253
|
}
|
1006
1254
|
}
|
1007
1255
|
const result = await retry(() => model.doStream(callOptions));
|
1008
|
-
return new
|
1256
|
+
return new DefaultStreamObjectResult({
|
1009
1257
|
stream: result.stream.pipeThrough(new TransformStream(transformer)),
|
1010
1258
|
warnings: result.warnings,
|
1011
1259
|
rawResponse: result.rawResponse,
|
@@ -1013,7 +1261,7 @@ async function streamObject({
|
|
1013
1261
|
onFinish
|
1014
1262
|
});
|
1015
1263
|
}
|
1016
|
-
var
|
1264
|
+
var DefaultStreamObjectResult = class {
|
1017
1265
|
constructor({
|
1018
1266
|
stream,
|
1019
1267
|
warnings,
|
@@ -1069,7 +1317,7 @@ var StreamObjectResult = class {
|
|
1069
1317
|
usage = calculateCompletionTokenUsage(chunk.usage);
|
1070
1318
|
controller.enqueue({ ...chunk, usage });
|
1071
1319
|
resolveUsage(usage);
|
1072
|
-
const validationResult = (0,
|
1320
|
+
const validationResult = (0, import_provider_utils6.safeValidateTypes)({
|
1073
1321
|
value: latestObject,
|
1074
1322
|
schema
|
1075
1323
|
});
|
@@ -1109,18 +1357,9 @@ var StreamObjectResult = class {
|
|
1109
1357
|
})
|
1110
1358
|
);
|
1111
1359
|
}
|
1112
|
-
/**
|
1113
|
-
The generated object (typed according to the schema). Resolved when the response is finished.
|
1114
|
-
*/
|
1115
1360
|
get object() {
|
1116
1361
|
return this.objectPromise.value;
|
1117
1362
|
}
|
1118
|
-
/**
|
1119
|
-
Stream of partial objects. It gets more complete as the stream progresses.
|
1120
|
-
|
1121
|
-
Note that the partial object is not validated.
|
1122
|
-
If you want to be certain that the actual content matches your schema, you need to implement your own validation for partial results.
|
1123
|
-
*/
|
1124
1363
|
get partialObjectStream() {
|
1125
1364
|
return createAsyncIterableStream(this.originalStream, {
|
1126
1365
|
transform(chunk, controller) {
|
@@ -1142,10 +1381,6 @@ var StreamObjectResult = class {
|
|
1142
1381
|
}
|
1143
1382
|
});
|
1144
1383
|
}
|
1145
|
-
/**
|
1146
|
-
Text stream of the JSON representation of the generated object. It contains text chunks.
|
1147
|
-
When the stream is finished, the object is valid JSON that can be parsed.
|
1148
|
-
*/
|
1149
1384
|
get textStream() {
|
1150
1385
|
return createAsyncIterableStream(this.originalStream, {
|
1151
1386
|
transform(chunk, controller) {
|
@@ -1167,9 +1402,6 @@ var StreamObjectResult = class {
|
|
1167
1402
|
}
|
1168
1403
|
});
|
1169
1404
|
}
|
1170
|
-
/**
|
1171
|
-
Stream of different types of events, including partial objects, errors, and finish events.
|
1172
|
-
*/
|
1173
1405
|
get fullStream() {
|
1174
1406
|
return createAsyncIterableStream(this.originalStream, {
|
1175
1407
|
transform(chunk, controller) {
|
@@ -1177,14 +1409,6 @@ var StreamObjectResult = class {
|
|
1177
1409
|
}
|
1178
1410
|
});
|
1179
1411
|
}
|
1180
|
-
/**
|
1181
|
-
Writes text delta output to a Node.js response-like object.
|
1182
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
1183
|
-
writes each text delta as a separate chunk.
|
1184
|
-
|
1185
|
-
@param response A Node.js response-like object (ServerResponse).
|
1186
|
-
@param init Optional headers and status code.
|
1187
|
-
*/
|
1188
1412
|
pipeTextStreamToResponse(response, init) {
|
1189
1413
|
var _a;
|
1190
1414
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -1208,14 +1432,6 @@ var StreamObjectResult = class {
|
|
1208
1432
|
};
|
1209
1433
|
read();
|
1210
1434
|
}
|
1211
|
-
/**
|
1212
|
-
Creates a simple text stream response.
|
1213
|
-
The response has a `Content-Type` header set to `text/plain; charset=utf-8`.
|
1214
|
-
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
1215
|
-
Non-text-delta events are ignored.
|
1216
|
-
|
1217
|
-
@param init Optional headers and status code.
|
1218
|
-
*/
|
1219
1435
|
toTextStreamResponse(init) {
|
1220
1436
|
var _a;
|
1221
1437
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
@@ -1249,166 +1465,15 @@ function prepareToolsAndToolChoice({
|
|
1249
1465
|
type: "function",
|
1250
1466
|
name,
|
1251
1467
|
description: tool2.description,
|
1252
|
-
parameters:
|
1468
|
+
parameters: asSchema(tool2.parameters).jsonSchema
|
1253
1469
|
})),
|
1254
1470
|
toolChoice: toolChoice == null ? { type: "auto" } : typeof toolChoice === "string" ? { type: toolChoice } : { type: "tool", toolName: toolChoice.toolName }
|
1255
1471
|
};
|
1256
1472
|
}
|
1257
1473
|
|
1258
|
-
// core/telemetry/get-base-telemetry-attributes.ts
|
1259
|
-
function getBaseTelemetryAttributes({
|
1260
|
-
operationName,
|
1261
|
-
model,
|
1262
|
-
settings,
|
1263
|
-
telemetry,
|
1264
|
-
headers
|
1265
|
-
}) {
|
1266
|
-
var _a;
|
1267
|
-
return {
|
1268
|
-
"ai.model.provider": model.provider,
|
1269
|
-
"ai.model.id": model.modelId,
|
1270
|
-
// settings:
|
1271
|
-
...Object.entries(settings).reduce((attributes, [key, value]) => {
|
1272
|
-
attributes[`ai.settings.${key}`] = value;
|
1273
|
-
return attributes;
|
1274
|
-
}, {}),
|
1275
|
-
// special telemetry information
|
1276
|
-
"operation.name": operationName,
|
1277
|
-
"resource.name": telemetry == null ? void 0 : telemetry.functionId,
|
1278
|
-
"ai.telemetry.functionId": telemetry == null ? void 0 : telemetry.functionId,
|
1279
|
-
// add metadata as attributes:
|
1280
|
-
...Object.entries((_a = telemetry == null ? void 0 : telemetry.metadata) != null ? _a : {}).reduce(
|
1281
|
-
(attributes, [key, value]) => {
|
1282
|
-
attributes[`ai.telemetry.metadata.${key}`] = value;
|
1283
|
-
return attributes;
|
1284
|
-
},
|
1285
|
-
{}
|
1286
|
-
),
|
1287
|
-
// request headers
|
1288
|
-
...Object.entries(headers != null ? headers : {}).reduce((attributes, [key, value]) => {
|
1289
|
-
if (value !== void 0) {
|
1290
|
-
attributes[`ai.request.headers.${key}`] = value;
|
1291
|
-
}
|
1292
|
-
return attributes;
|
1293
|
-
}, {})
|
1294
|
-
};
|
1295
|
-
}
|
1296
|
-
|
1297
|
-
// core/telemetry/get-tracer.ts
|
1298
|
-
var import_api = require("@opentelemetry/api");
|
1299
|
-
|
1300
|
-
// core/telemetry/noop-tracer.ts
|
1301
|
-
var noopTracer = {
|
1302
|
-
startSpan() {
|
1303
|
-
return noopSpan;
|
1304
|
-
},
|
1305
|
-
startActiveSpan(name, arg1, arg2, arg3) {
|
1306
|
-
if (typeof arg1 === "function") {
|
1307
|
-
return arg1(noopSpan);
|
1308
|
-
}
|
1309
|
-
if (typeof arg2 === "function") {
|
1310
|
-
return arg2(noopSpan);
|
1311
|
-
}
|
1312
|
-
if (typeof arg3 === "function") {
|
1313
|
-
return arg3(noopSpan);
|
1314
|
-
}
|
1315
|
-
}
|
1316
|
-
};
|
1317
|
-
var noopSpan = {
|
1318
|
-
spanContext() {
|
1319
|
-
return noopSpanContext;
|
1320
|
-
},
|
1321
|
-
setAttribute() {
|
1322
|
-
return this;
|
1323
|
-
},
|
1324
|
-
setAttributes() {
|
1325
|
-
return this;
|
1326
|
-
},
|
1327
|
-
addEvent() {
|
1328
|
-
return this;
|
1329
|
-
},
|
1330
|
-
addLink() {
|
1331
|
-
return this;
|
1332
|
-
},
|
1333
|
-
addLinks() {
|
1334
|
-
return this;
|
1335
|
-
},
|
1336
|
-
setStatus() {
|
1337
|
-
return this;
|
1338
|
-
},
|
1339
|
-
updateName() {
|
1340
|
-
return this;
|
1341
|
-
},
|
1342
|
-
end() {
|
1343
|
-
return this;
|
1344
|
-
},
|
1345
|
-
isRecording() {
|
1346
|
-
return false;
|
1347
|
-
},
|
1348
|
-
recordException() {
|
1349
|
-
return this;
|
1350
|
-
}
|
1351
|
-
};
|
1352
|
-
var noopSpanContext = {
|
1353
|
-
traceId: "",
|
1354
|
-
spanId: "",
|
1355
|
-
traceFlags: 0
|
1356
|
-
};
|
1357
|
-
|
1358
|
-
// core/telemetry/get-tracer.ts
|
1359
|
-
var testTracer = void 0;
|
1360
|
-
function getTracer({ isEnabled }) {
|
1361
|
-
if (!isEnabled) {
|
1362
|
-
return noopTracer;
|
1363
|
-
}
|
1364
|
-
if (testTracer) {
|
1365
|
-
return testTracer;
|
1366
|
-
}
|
1367
|
-
return import_api.trace.getTracer("ai");
|
1368
|
-
}
|
1369
|
-
|
1370
|
-
// core/telemetry/record-span.ts
|
1371
|
-
var import_api2 = require("@opentelemetry/api");
|
1372
|
-
function recordSpan({
|
1373
|
-
name,
|
1374
|
-
tracer,
|
1375
|
-
attributes,
|
1376
|
-
fn,
|
1377
|
-
endWhenDone = true
|
1378
|
-
}) {
|
1379
|
-
return tracer.startActiveSpan(name, { attributes }, async (span) => {
|
1380
|
-
try {
|
1381
|
-
const result = await fn(span);
|
1382
|
-
if (endWhenDone) {
|
1383
|
-
span.end();
|
1384
|
-
}
|
1385
|
-
return result;
|
1386
|
-
} catch (error) {
|
1387
|
-
try {
|
1388
|
-
if (error instanceof Error) {
|
1389
|
-
span.recordException({
|
1390
|
-
name: error.name,
|
1391
|
-
message: error.message,
|
1392
|
-
stack: error.stack
|
1393
|
-
});
|
1394
|
-
span.setStatus({
|
1395
|
-
code: import_api2.SpanStatusCode.ERROR,
|
1396
|
-
message: error.message
|
1397
|
-
});
|
1398
|
-
} else {
|
1399
|
-
span.setStatus({ code: import_api2.SpanStatusCode.ERROR });
|
1400
|
-
}
|
1401
|
-
} finally {
|
1402
|
-
span.end();
|
1403
|
-
}
|
1404
|
-
throw error;
|
1405
|
-
}
|
1406
|
-
});
|
1407
|
-
}
|
1408
|
-
|
1409
1474
|
// core/generate-text/tool-call.ts
|
1410
1475
|
var import_provider6 = require("@ai-sdk/provider");
|
1411
|
-
var
|
1476
|
+
var import_provider_utils7 = require("@ai-sdk/provider-utils");
|
1412
1477
|
function parseToolCall({
|
1413
1478
|
toolCall,
|
1414
1479
|
tools
|
@@ -1424,9 +1489,9 @@ function parseToolCall({
|
|
1424
1489
|
availableTools: Object.keys(tools)
|
1425
1490
|
});
|
1426
1491
|
}
|
1427
|
-
const parseResult = (0,
|
1492
|
+
const parseResult = (0, import_provider_utils7.safeParseJSON)({
|
1428
1493
|
text: toolCall.args,
|
1429
|
-
schema: tool2.parameters
|
1494
|
+
schema: asSchema(tool2.parameters)
|
1430
1495
|
});
|
1431
1496
|
if (parseResult.success === false) {
|
1432
1497
|
throw new import_provider6.InvalidToolArgumentsError({
|
@@ -1578,7 +1643,7 @@ async function generateText({
|
|
1578
1643
|
"ai.result.text": currentModelResponse.text,
|
1579
1644
|
"ai.result.toolCalls": JSON.stringify(currentModelResponse.toolCalls)
|
1580
1645
|
});
|
1581
|
-
return new
|
1646
|
+
return new DefaultGenerateTextResult({
|
1582
1647
|
// Always return a string so that the caller doesn't have to check for undefined.
|
1583
1648
|
// If they need to check if the model did not return any text,
|
1584
1649
|
// they can check the length of the string:
|
@@ -1638,7 +1703,7 @@ async function executeTools({
|
|
1638
1703
|
(result) => result != null
|
1639
1704
|
);
|
1640
1705
|
}
|
1641
|
-
var
|
1706
|
+
var DefaultGenerateTextResult = class {
|
1642
1707
|
constructor(options) {
|
1643
1708
|
this.text = options.text;
|
1644
1709
|
this.toolCalls = options.toolCalls;
|
@@ -2005,7 +2070,7 @@ async function streamText({
|
|
2005
2070
|
}
|
2006
2071
|
})
|
2007
2072
|
);
|
2008
|
-
return new
|
2073
|
+
return new DefaultStreamTextResult({
|
2009
2074
|
stream: runToolsTransformation({
|
2010
2075
|
tools,
|
2011
2076
|
generatorStream: stream,
|
@@ -2021,7 +2086,7 @@ async function streamText({
|
|
2021
2086
|
}
|
2022
2087
|
});
|
2023
2088
|
}
|
2024
|
-
var
|
2089
|
+
var DefaultStreamTextResult = class {
|
2025
2090
|
constructor({
|
2026
2091
|
stream,
|
2027
2092
|
warnings,
|
@@ -2159,11 +2224,6 @@ var StreamTextResult = class {
|
|
2159
2224
|
this.originalStream = stream2;
|
2160
2225
|
return stream1;
|
2161
2226
|
}
|
2162
|
-
/**
|
2163
|
-
A text stream that returns only the generated text deltas. You can use it
|
2164
|
-
as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
2165
|
-
stream will throw the error.
|
2166
|
-
*/
|
2167
2227
|
get textStream() {
|
2168
2228
|
return createAsyncIterableStream(this.teeStream(), {
|
2169
2229
|
transform(chunk, controller) {
|
@@ -2177,12 +2237,6 @@ var StreamTextResult = class {
|
|
2177
2237
|
}
|
2178
2238
|
});
|
2179
2239
|
}
|
2180
|
-
/**
|
2181
|
-
A stream with all events, including text deltas, tool calls, tool results, and
|
2182
|
-
errors.
|
2183
|
-
You can use it as either an AsyncIterable or a ReadableStream. When an error occurs, the
|
2184
|
-
stream will throw the error.
|
2185
|
-
*/
|
2186
2240
|
get fullStream() {
|
2187
2241
|
return createAsyncIterableStream(this.teeStream(), {
|
2188
2242
|
transform(chunk, controller) {
|
@@ -2196,15 +2250,6 @@ var StreamTextResult = class {
|
|
2196
2250
|
}
|
2197
2251
|
});
|
2198
2252
|
}
|
2199
|
-
/**
|
2200
|
-
Converts the result to an `AIStream` object that is compatible with `StreamingTextResponse`.
|
2201
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
2202
|
-
|
2203
|
-
@param callbacks
|
2204
|
-
Stream callbacks that will be called when the stream emits events.
|
2205
|
-
|
2206
|
-
@returns an `AIStream` object.
|
2207
|
-
*/
|
2208
2253
|
toAIStream(callbacks = {}) {
|
2209
2254
|
let aggregatedResponse = "";
|
2210
2255
|
const callbackTransformer = new TransformStream({
|
@@ -2288,14 +2333,6 @@ var StreamTextResult = class {
|
|
2288
2333
|
});
|
2289
2334
|
return this.fullStream.pipeThrough(callbackTransformer).pipeThrough(streamPartsTransformer).pipeThrough(new TextEncoderStream());
|
2290
2335
|
}
|
2291
|
-
/**
|
2292
|
-
Writes stream data output to a Node.js response-like object.
|
2293
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
2294
|
-
writes each stream data part as a separate chunk.
|
2295
|
-
|
2296
|
-
@param response A Node.js response-like object (ServerResponse).
|
2297
|
-
@param init Optional headers and status code.
|
2298
|
-
*/
|
2299
2336
|
pipeAIStreamToResponse(response, init) {
|
2300
2337
|
var _a;
|
2301
2338
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -2319,14 +2356,6 @@ var StreamTextResult = class {
|
|
2319
2356
|
};
|
2320
2357
|
read();
|
2321
2358
|
}
|
2322
|
-
/**
|
2323
|
-
Writes text delta output to a Node.js response-like object.
|
2324
|
-
It sets a `Content-Type` header to `text/plain; charset=utf-8` and
|
2325
|
-
writes each text delta as a separate chunk.
|
2326
|
-
|
2327
|
-
@param response A Node.js response-like object (ServerResponse).
|
2328
|
-
@param init Optional headers and status code.
|
2329
|
-
*/
|
2330
2359
|
pipeTextStreamToResponse(response, init) {
|
2331
2360
|
var _a;
|
2332
2361
|
response.writeHead((_a = init == null ? void 0 : init.status) != null ? _a : 200, {
|
@@ -2350,15 +2379,6 @@ var StreamTextResult = class {
|
|
2350
2379
|
};
|
2351
2380
|
read();
|
2352
2381
|
}
|
2353
|
-
/**
|
2354
|
-
Converts the result to a streamed response object with a stream data part stream.
|
2355
|
-
It can be used with the `useChat` and `useCompletion` hooks.
|
2356
|
-
|
2357
|
-
@param options An object with an init property (ResponseInit) and a data property.
|
2358
|
-
You can also pass in a ResponseInit directly (deprecated).
|
2359
|
-
|
2360
|
-
@return A response object.
|
2361
|
-
*/
|
2362
2382
|
toAIStreamResponse(options) {
|
2363
2383
|
var _a;
|
2364
2384
|
const init = options == null ? void 0 : "init" in options ? options.init : {
|
@@ -2376,13 +2396,6 @@ var StreamTextResult = class {
|
|
2376
2396
|
})
|
2377
2397
|
});
|
2378
2398
|
}
|
2379
|
-
/**
|
2380
|
-
Creates a simple text stream response.
|
2381
|
-
Each text delta is encoded as UTF-8 and sent as a separate chunk.
|
2382
|
-
Non-text-delta events are ignored.
|
2383
|
-
|
2384
|
-
@param init Optional headers and status code.
|
2385
|
-
*/
|
2386
2399
|
toTextStreamResponse(init) {
|
2387
2400
|
var _a;
|
2388
2401
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
@@ -3693,8 +3706,8 @@ var StreamingTextResponse = class extends Response {
|
|
3693
3706
|
};
|
3694
3707
|
|
3695
3708
|
// streams/index.ts
|
3696
|
-
var generateId2 =
|
3697
|
-
var nanoid =
|
3709
|
+
var generateId2 = import_provider_utils8.generateId;
|
3710
|
+
var nanoid = import_provider_utils8.generateId;
|
3698
3711
|
// Annotate the CommonJS export names for ESM import in node:
|
3699
3712
|
0 && (module.exports = {
|
3700
3713
|
AIStream,
|
@@ -3707,11 +3720,7 @@ var nanoid = import_provider_utils7.generateId;
|
|
3707
3720
|
AnthropicStream,
|
3708
3721
|
AssistantResponse,
|
3709
3722
|
CohereStream,
|
3710
|
-
EmbedManyResult,
|
3711
|
-
EmbedResult,
|
3712
3723
|
EmptyResponseBodyError,
|
3713
|
-
GenerateObjectResult,
|
3714
|
-
GenerateTextResult,
|
3715
3724
|
GoogleGenerativeAIStream,
|
3716
3725
|
HuggingFaceStream,
|
3717
3726
|
InkeepStream,
|
@@ -3735,8 +3744,6 @@ var nanoid = import_provider_utils7.generateId;
|
|
3735
3744
|
ReplicateStream,
|
3736
3745
|
RetryError,
|
3737
3746
|
StreamData,
|
3738
|
-
StreamObjectResult,
|
3739
|
-
StreamTextResult,
|
3740
3747
|
StreamingTextResponse,
|
3741
3748
|
ToolCallParseError,
|
3742
3749
|
TypeValidationError,
|
@@ -3764,6 +3771,7 @@ var nanoid = import_provider_utils7.generateId;
|
|
3764
3771
|
generateId,
|
3765
3772
|
generateObject,
|
3766
3773
|
generateText,
|
3774
|
+
jsonSchema,
|
3767
3775
|
nanoid,
|
3768
3776
|
parseComplexResponse,
|
3769
3777
|
parseStreamPart,
|