ai 3.3.26 → 3.3.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/dist/index.d.mts +169 -55
- package/dist/index.d.ts +169 -55
- package/dist/index.js +211 -41
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +205 -37
- package/dist/index.mjs.map +1 -1
- package/package.json +8 -8
- package/rsc/dist/index.d.ts +8 -2
- package/rsc/dist/rsc-server.d.mts +8 -2
- package/rsc/dist/rsc-server.mjs +10 -4
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/dist/index.mjs
CHANGED
@@ -588,7 +588,7 @@ var DefaultEmbedManyResult = class {
|
|
588
588
|
};
|
589
589
|
|
590
590
|
// core/generate-object/generate-object.ts
|
591
|
-
import { safeParseJSON } from "@ai-sdk/provider-utils";
|
591
|
+
import { createIdGenerator, safeParseJSON } from "@ai-sdk/provider-utils";
|
592
592
|
|
593
593
|
// core/prompt/convert-to-language-model-prompt.ts
|
594
594
|
import { getErrorMessage as getErrorMessage2 } from "@ai-sdk/provider-utils";
|
@@ -974,7 +974,13 @@ function convertToLanguageModelMessage(message, downloadedImages) {
|
|
974
974
|
content: message.content.filter(
|
975
975
|
// remove empty text parts:
|
976
976
|
(part) => part.type !== "text" || part.text !== ""
|
977
|
-
)
|
977
|
+
).map((part) => {
|
978
|
+
const { experimental_providerMetadata, ...rest } = part;
|
979
|
+
return {
|
980
|
+
...rest,
|
981
|
+
providerMetadata: experimental_providerMetadata
|
982
|
+
};
|
983
|
+
}),
|
978
984
|
providerMetadata: message.experimental_providerMetadata
|
979
985
|
};
|
980
986
|
}
|
@@ -1307,8 +1313,8 @@ function validatePrompt(prompt) {
|
|
1307
1313
|
throw new Error("unreachable");
|
1308
1314
|
}
|
1309
1315
|
|
1310
|
-
// core/types/
|
1311
|
-
function
|
1316
|
+
// core/types/usage.ts
|
1317
|
+
function calculateLanguageModelUsage(usage) {
|
1312
1318
|
return {
|
1313
1319
|
promptTokens: usage.promptTokens,
|
1314
1320
|
completionTokens: usage.completionTokens,
|
@@ -1643,6 +1649,7 @@ function validateObjectGenerationInput({
|
|
1643
1649
|
}
|
1644
1650
|
|
1645
1651
|
// core/generate-object/generate-object.ts
|
1652
|
+
var originalGenerateId = createIdGenerator({ prefix: "aiobj-", length: 24 });
|
1646
1653
|
async function generateObject({
|
1647
1654
|
model,
|
1648
1655
|
schema: inputSchema,
|
@@ -1657,6 +1664,10 @@ async function generateObject({
|
|
1657
1664
|
abortSignal,
|
1658
1665
|
headers,
|
1659
1666
|
experimental_telemetry: telemetry,
|
1667
|
+
_internal: {
|
1668
|
+
generateId: generateId3 = originalGenerateId,
|
1669
|
+
currentDate = () => /* @__PURE__ */ new Date()
|
1670
|
+
} = {},
|
1660
1671
|
...settings
|
1661
1672
|
}) {
|
1662
1673
|
var _a11;
|
@@ -1710,6 +1721,7 @@ async function generateObject({
|
|
1710
1721
|
let usage;
|
1711
1722
|
let warnings;
|
1712
1723
|
let rawResponse;
|
1724
|
+
let response;
|
1713
1725
|
let logprobs;
|
1714
1726
|
let providerMetadata;
|
1715
1727
|
switch (mode) {
|
@@ -1758,6 +1770,7 @@ async function generateObject({
|
|
1758
1770
|
}),
|
1759
1771
|
tracer,
|
1760
1772
|
fn: async (span2) => {
|
1773
|
+
var _a12, _b, _c, _d, _e, _f;
|
1761
1774
|
const result2 = await model.doGenerate({
|
1762
1775
|
mode: {
|
1763
1776
|
type: "object-json",
|
@@ -1774,12 +1787,20 @@ async function generateObject({
|
|
1774
1787
|
if (result2.text === void 0) {
|
1775
1788
|
throw new NoObjectGeneratedError();
|
1776
1789
|
}
|
1790
|
+
const responseData = {
|
1791
|
+
id: (_b = (_a12 = result2.response) == null ? void 0 : _a12.id) != null ? _b : generateId3(),
|
1792
|
+
timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
|
1793
|
+
modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId
|
1794
|
+
};
|
1777
1795
|
span2.setAttributes(
|
1778
1796
|
selectTelemetryAttributes({
|
1779
1797
|
telemetry,
|
1780
1798
|
attributes: {
|
1781
1799
|
"ai.response.finishReason": result2.finishReason,
|
1782
1800
|
"ai.response.object": { output: () => result2.text },
|
1801
|
+
"ai.response.id": responseData.id,
|
1802
|
+
"ai.response.model": responseData.modelId,
|
1803
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
1783
1804
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1784
1805
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1785
1806
|
// deprecated:
|
@@ -1787,12 +1808,14 @@ async function generateObject({
|
|
1787
1808
|
"ai.result.object": { output: () => result2.text },
|
1788
1809
|
// standardized gen-ai llm span attributes:
|
1789
1810
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1811
|
+
"gen_ai.response.id": responseData.id,
|
1812
|
+
"gen_ai.response.model": responseData.modelId,
|
1790
1813
|
"gen_ai.usage.prompt_tokens": result2.usage.promptTokens,
|
1791
1814
|
"gen_ai.usage.completion_tokens": result2.usage.completionTokens
|
1792
1815
|
}
|
1793
1816
|
})
|
1794
1817
|
);
|
1795
|
-
return { ...result2, objectText: result2.text };
|
1818
|
+
return { ...result2, objectText: result2.text, responseData };
|
1796
1819
|
}
|
1797
1820
|
})
|
1798
1821
|
);
|
@@ -1803,6 +1826,7 @@ async function generateObject({
|
|
1803
1826
|
rawResponse = generateResult.rawResponse;
|
1804
1827
|
logprobs = generateResult.logprobs;
|
1805
1828
|
providerMetadata = generateResult.providerMetadata;
|
1829
|
+
response = generateResult.responseData;
|
1806
1830
|
break;
|
1807
1831
|
}
|
1808
1832
|
case "tool": {
|
@@ -1847,7 +1871,7 @@ async function generateObject({
|
|
1847
1871
|
}),
|
1848
1872
|
tracer,
|
1849
1873
|
fn: async (span2) => {
|
1850
|
-
var _a12, _b;
|
1874
|
+
var _a12, _b, _c, _d, _e, _f, _g, _h;
|
1851
1875
|
const result2 = await model.doGenerate({
|
1852
1876
|
mode: {
|
1853
1877
|
type: "object-tool",
|
@@ -1868,12 +1892,20 @@ async function generateObject({
|
|
1868
1892
|
if (objectText === void 0) {
|
1869
1893
|
throw new NoObjectGeneratedError();
|
1870
1894
|
}
|
1895
|
+
const responseData = {
|
1896
|
+
id: (_d = (_c = result2.response) == null ? void 0 : _c.id) != null ? _d : generateId3(),
|
1897
|
+
timestamp: (_f = (_e = result2.response) == null ? void 0 : _e.timestamp) != null ? _f : currentDate(),
|
1898
|
+
modelId: (_h = (_g = result2.response) == null ? void 0 : _g.modelId) != null ? _h : model.modelId
|
1899
|
+
};
|
1871
1900
|
span2.setAttributes(
|
1872
1901
|
selectTelemetryAttributes({
|
1873
1902
|
telemetry,
|
1874
1903
|
attributes: {
|
1875
1904
|
"ai.response.finishReason": result2.finishReason,
|
1876
1905
|
"ai.response.object": { output: () => objectText },
|
1906
|
+
"ai.response.id": responseData.id,
|
1907
|
+
"ai.response.model": responseData.modelId,
|
1908
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
1877
1909
|
"ai.usage.promptTokens": result2.usage.promptTokens,
|
1878
1910
|
"ai.usage.completionTokens": result2.usage.completionTokens,
|
1879
1911
|
// deprecated:
|
@@ -1881,12 +1913,14 @@ async function generateObject({
|
|
1881
1913
|
"ai.result.object": { output: () => objectText },
|
1882
1914
|
// standardized gen-ai llm span attributes:
|
1883
1915
|
"gen_ai.response.finish_reasons": [result2.finishReason],
|
1916
|
+
"gen_ai.response.id": responseData.id,
|
1917
|
+
"gen_ai.response.model": responseData.modelId,
|
1884
1918
|
"gen_ai.usage.input_tokens": result2.usage.promptTokens,
|
1885
1919
|
"gen_ai.usage.output_tokens": result2.usage.completionTokens
|
1886
1920
|
}
|
1887
1921
|
})
|
1888
1922
|
);
|
1889
|
-
return { ...result2, objectText };
|
1923
|
+
return { ...result2, objectText, responseData };
|
1890
1924
|
}
|
1891
1925
|
})
|
1892
1926
|
);
|
@@ -1897,6 +1931,7 @@ async function generateObject({
|
|
1897
1931
|
rawResponse = generateResult.rawResponse;
|
1898
1932
|
logprobs = generateResult.logprobs;
|
1899
1933
|
providerMetadata = generateResult.providerMetadata;
|
1934
|
+
response = generateResult.responseData;
|
1900
1935
|
break;
|
1901
1936
|
}
|
1902
1937
|
case void 0: {
|
@@ -1940,9 +1975,12 @@ async function generateObject({
|
|
1940
1975
|
return new DefaultGenerateObjectResult({
|
1941
1976
|
object: validationResult.value,
|
1942
1977
|
finishReason,
|
1943
|
-
usage:
|
1978
|
+
usage: calculateLanguageModelUsage(usage),
|
1944
1979
|
warnings,
|
1945
|
-
|
1980
|
+
response: {
|
1981
|
+
...response,
|
1982
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
1983
|
+
},
|
1946
1984
|
logprobs,
|
1947
1985
|
providerMetadata
|
1948
1986
|
});
|
@@ -1955,9 +1993,12 @@ var DefaultGenerateObjectResult = class {
|
|
1955
1993
|
this.finishReason = options.finishReason;
|
1956
1994
|
this.usage = options.usage;
|
1957
1995
|
this.warnings = options.warnings;
|
1958
|
-
this.rawResponse = options.rawResponse;
|
1959
|
-
this.logprobs = options.logprobs;
|
1960
1996
|
this.experimental_providerMetadata = options.providerMetadata;
|
1997
|
+
this.response = options.response;
|
1998
|
+
this.rawResponse = {
|
1999
|
+
headers: options.response.headers
|
2000
|
+
};
|
2001
|
+
this.logprobs = options.logprobs;
|
1961
2002
|
}
|
1962
2003
|
toJsonResponse(init) {
|
1963
2004
|
var _a11;
|
@@ -2037,6 +2078,8 @@ function now() {
|
|
2037
2078
|
}
|
2038
2079
|
|
2039
2080
|
// core/generate-object/stream-object.ts
|
2081
|
+
import { createIdGenerator as createIdGenerator2 } from "@ai-sdk/provider-utils";
|
2082
|
+
var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj-", length: 24 });
|
2040
2083
|
async function streamObject({
|
2041
2084
|
model,
|
2042
2085
|
schema: inputSchema,
|
@@ -2052,7 +2095,11 @@ async function streamObject({
|
|
2052
2095
|
headers,
|
2053
2096
|
experimental_telemetry: telemetry,
|
2054
2097
|
onFinish,
|
2055
|
-
_internal: {
|
2098
|
+
_internal: {
|
2099
|
+
generateId: generateId3 = originalGenerateId2,
|
2100
|
+
currentDate = () => /* @__PURE__ */ new Date(),
|
2101
|
+
now: now2 = now
|
2102
|
+
} = {},
|
2056
2103
|
...settings
|
2057
2104
|
}) {
|
2058
2105
|
var _a11;
|
@@ -2136,6 +2183,7 @@ async function streamObject({
|
|
2136
2183
|
case "text-delta":
|
2137
2184
|
controller.enqueue(chunk.textDelta);
|
2138
2185
|
break;
|
2186
|
+
case "response-metadata":
|
2139
2187
|
case "finish":
|
2140
2188
|
case "error":
|
2141
2189
|
controller.enqueue(chunk);
|
@@ -2176,6 +2224,7 @@ async function streamObject({
|
|
2176
2224
|
case "tool-call-delta":
|
2177
2225
|
controller.enqueue(chunk.argsTextDelta);
|
2178
2226
|
break;
|
2227
|
+
case "response-metadata":
|
2179
2228
|
case "finish":
|
2180
2229
|
case "error":
|
2181
2230
|
controller.enqueue(chunk);
|
@@ -2247,7 +2296,10 @@ async function streamObject({
|
|
2247
2296
|
doStreamSpan,
|
2248
2297
|
telemetry,
|
2249
2298
|
startTimestampMs,
|
2250
|
-
|
2299
|
+
modelId: model.modelId,
|
2300
|
+
now: now2,
|
2301
|
+
currentDate,
|
2302
|
+
generateId: generateId3
|
2251
2303
|
});
|
2252
2304
|
}
|
2253
2305
|
});
|
@@ -2263,7 +2315,10 @@ var DefaultStreamObjectResult = class {
|
|
2263
2315
|
doStreamSpan,
|
2264
2316
|
telemetry,
|
2265
2317
|
startTimestampMs,
|
2266
|
-
|
2318
|
+
modelId,
|
2319
|
+
now: now2,
|
2320
|
+
currentDate,
|
2321
|
+
generateId: generateId3
|
2267
2322
|
}) {
|
2268
2323
|
this.warnings = warnings;
|
2269
2324
|
this.rawResponse = rawResponse;
|
@@ -2271,6 +2326,8 @@ var DefaultStreamObjectResult = class {
|
|
2271
2326
|
this.objectPromise = new DelayedPromise();
|
2272
2327
|
const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
|
2273
2328
|
this.usage = usagePromise;
|
2329
|
+
const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
|
2330
|
+
this.response = responsePromise;
|
2274
2331
|
const {
|
2275
2332
|
resolve: resolveProviderMetadata,
|
2276
2333
|
promise: providerMetadataPromise
|
@@ -2283,6 +2340,11 @@ var DefaultStreamObjectResult = class {
|
|
2283
2340
|
let error;
|
2284
2341
|
let accumulatedText = "";
|
2285
2342
|
let textDelta = "";
|
2343
|
+
let response = {
|
2344
|
+
id: generateId3(),
|
2345
|
+
timestamp: currentDate(),
|
2346
|
+
modelId
|
2347
|
+
};
|
2286
2348
|
let latestObjectJson = void 0;
|
2287
2349
|
let latestObject = void 0;
|
2288
2350
|
let isFirstChunk = true;
|
@@ -2291,6 +2353,7 @@ var DefaultStreamObjectResult = class {
|
|
2291
2353
|
this.originalStream = stream.pipeThrough(
|
2292
2354
|
new TransformStream({
|
2293
2355
|
async transform(chunk, controller) {
|
2356
|
+
var _a11, _b, _c;
|
2294
2357
|
if (isFirstChunk) {
|
2295
2358
|
const msToFirstChunk = now2() - startTimestampMs;
|
2296
2359
|
isFirstChunk = false;
|
@@ -2331,16 +2394,28 @@ var DefaultStreamObjectResult = class {
|
|
2331
2394
|
return;
|
2332
2395
|
}
|
2333
2396
|
switch (chunk.type) {
|
2397
|
+
case "response-metadata": {
|
2398
|
+
response = {
|
2399
|
+
id: (_a11 = chunk.id) != null ? _a11 : response.id,
|
2400
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : response.timestamp,
|
2401
|
+
modelId: (_c = chunk.modelId) != null ? _c : response.modelId
|
2402
|
+
};
|
2403
|
+
break;
|
2404
|
+
}
|
2334
2405
|
case "finish": {
|
2335
2406
|
if (textDelta !== "") {
|
2336
2407
|
controller.enqueue({ type: "text-delta", textDelta });
|
2337
2408
|
}
|
2338
2409
|
finishReason = chunk.finishReason;
|
2339
|
-
usage =
|
2410
|
+
usage = calculateLanguageModelUsage(chunk.usage);
|
2340
2411
|
providerMetadata = chunk.providerMetadata;
|
2341
|
-
controller.enqueue({ ...chunk, usage });
|
2412
|
+
controller.enqueue({ ...chunk, usage, response });
|
2342
2413
|
resolveUsage(usage);
|
2343
2414
|
resolveProviderMetadata(providerMetadata);
|
2415
|
+
resolveResponse({
|
2416
|
+
...response,
|
2417
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
2418
|
+
});
|
2344
2419
|
const validationResult = outputStrategy.validateFinalResult(latestObjectJson);
|
2345
2420
|
if (validationResult.success) {
|
2346
2421
|
object = validationResult.value;
|
@@ -2373,15 +2448,20 @@ var DefaultStreamObjectResult = class {
|
|
2373
2448
|
"ai.response.object": {
|
2374
2449
|
output: () => JSON.stringify(object)
|
2375
2450
|
},
|
2451
|
+
"ai.response.id": response.id,
|
2452
|
+
"ai.response.model": response.modelId,
|
2453
|
+
"ai.response.timestamp": response.timestamp.toISOString(),
|
2376
2454
|
"ai.usage.promptTokens": finalUsage.promptTokens,
|
2377
2455
|
"ai.usage.completionTokens": finalUsage.completionTokens,
|
2378
2456
|
// deprecated
|
2379
2457
|
"ai.finishReason": finishReason,
|
2380
2458
|
"ai.result.object": { output: () => JSON.stringify(object) },
|
2381
2459
|
// standardized gen-ai llm span attributes:
|
2460
|
+
"gen_ai.response.finish_reasons": [finishReason],
|
2461
|
+
"gen_ai.response.id": response.id,
|
2462
|
+
"gen_ai.response.model": response.modelId,
|
2382
2463
|
"gen_ai.usage.input_tokens": finalUsage.promptTokens,
|
2383
|
-
"gen_ai.usage.output_tokens": finalUsage.completionTokens
|
2384
|
-
"gen_ai.response.finish_reasons": [finishReason]
|
2464
|
+
"gen_ai.usage.output_tokens": finalUsage.completionTokens
|
2385
2465
|
}
|
2386
2466
|
})
|
2387
2467
|
);
|
@@ -2405,6 +2485,10 @@ var DefaultStreamObjectResult = class {
|
|
2405
2485
|
object,
|
2406
2486
|
error,
|
2407
2487
|
rawResponse,
|
2488
|
+
response: {
|
2489
|
+
...response,
|
2490
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
2491
|
+
},
|
2408
2492
|
warnings,
|
2409
2493
|
experimental_providerMetadata: providerMetadata
|
2410
2494
|
}));
|
@@ -2507,6 +2591,9 @@ var DefaultStreamObjectResult = class {
|
|
2507
2591
|
};
|
2508
2592
|
var experimental_streamObject = streamObject;
|
2509
2593
|
|
2594
|
+
// core/generate-text/generate-text.ts
|
2595
|
+
import { createIdGenerator as createIdGenerator3 } from "@ai-sdk/provider-utils";
|
2596
|
+
|
2510
2597
|
// core/prompt/prepare-tools-and-tool-choice.ts
|
2511
2598
|
import { asSchema as asSchema2 } from "@ai-sdk/ui-utils";
|
2512
2599
|
|
@@ -2688,6 +2775,7 @@ function parseToolCall({
|
|
2688
2775
|
}
|
2689
2776
|
|
2690
2777
|
// core/generate-text/generate-text.ts
|
2778
|
+
var originalGenerateId3 = createIdGenerator3({ prefix: "aitxt-", length: 24 });
|
2691
2779
|
async function generateText({
|
2692
2780
|
model,
|
2693
2781
|
tools,
|
@@ -2701,6 +2789,10 @@ async function generateText({
|
|
2701
2789
|
maxAutomaticRoundtrips = 0,
|
2702
2790
|
maxToolRoundtrips = maxAutomaticRoundtrips,
|
2703
2791
|
experimental_telemetry: telemetry,
|
2792
|
+
_internal: {
|
2793
|
+
generateId: generateId3 = originalGenerateId3,
|
2794
|
+
currentDate = () => /* @__PURE__ */ new Date()
|
2795
|
+
} = {},
|
2704
2796
|
...settings
|
2705
2797
|
}) {
|
2706
2798
|
var _a11;
|
@@ -2730,7 +2822,7 @@ async function generateText({
|
|
2730
2822
|
}),
|
2731
2823
|
tracer,
|
2732
2824
|
fn: async (span) => {
|
2733
|
-
var _a12, _b, _c;
|
2825
|
+
var _a12, _b, _c, _d, _e;
|
2734
2826
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
2735
2827
|
const validatedPrompt = validatePrompt({
|
2736
2828
|
system,
|
@@ -2788,6 +2880,7 @@ async function generateText({
|
|
2788
2880
|
}),
|
2789
2881
|
tracer,
|
2790
2882
|
fn: async (span2) => {
|
2883
|
+
var _a13, _b2, _c2, _d2, _e2, _f;
|
2791
2884
|
const result = await model.doGenerate({
|
2792
2885
|
mode,
|
2793
2886
|
...callSettings,
|
@@ -2796,6 +2889,11 @@ async function generateText({
|
|
2796
2889
|
abortSignal,
|
2797
2890
|
headers
|
2798
2891
|
});
|
2892
|
+
const responseData = {
|
2893
|
+
id: (_b2 = (_a13 = result.response) == null ? void 0 : _a13.id) != null ? _b2 : generateId3(),
|
2894
|
+
timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
|
2895
|
+
modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : model.modelId
|
2896
|
+
};
|
2799
2897
|
span2.setAttributes(
|
2800
2898
|
selectTelemetryAttributes({
|
2801
2899
|
telemetry,
|
@@ -2807,6 +2905,9 @@ async function generateText({
|
|
2807
2905
|
"ai.response.toolCalls": {
|
2808
2906
|
output: () => JSON.stringify(result.toolCalls)
|
2809
2907
|
},
|
2908
|
+
"ai.response.id": responseData.id,
|
2909
|
+
"ai.response.model": responseData.modelId,
|
2910
|
+
"ai.response.timestamp": responseData.timestamp.toISOString(),
|
2810
2911
|
"ai.usage.promptTokens": result.usage.promptTokens,
|
2811
2912
|
"ai.usage.completionTokens": result.usage.completionTokens,
|
2812
2913
|
// deprecated:
|
@@ -2819,12 +2920,14 @@ async function generateText({
|
|
2819
2920
|
},
|
2820
2921
|
// standardized gen-ai llm span attributes:
|
2821
2922
|
"gen_ai.response.finish_reasons": [result.finishReason],
|
2923
|
+
"gen_ai.response.id": responseData.id,
|
2924
|
+
"gen_ai.response.model": responseData.modelId,
|
2822
2925
|
"gen_ai.usage.input_tokens": result.usage.promptTokens,
|
2823
2926
|
"gen_ai.usage.output_tokens": result.usage.completionTokens
|
2824
2927
|
}
|
2825
2928
|
})
|
2826
2929
|
);
|
2827
|
-
return result;
|
2930
|
+
return { ...result, response: responseData };
|
2828
2931
|
}
|
2829
2932
|
})
|
2830
2933
|
);
|
@@ -2837,7 +2940,7 @@ async function generateText({
|
|
2837
2940
|
tracer,
|
2838
2941
|
telemetry
|
2839
2942
|
});
|
2840
|
-
const currentUsage =
|
2943
|
+
const currentUsage = calculateLanguageModelUsage(
|
2841
2944
|
currentModelResponse.usage
|
2842
2945
|
);
|
2843
2946
|
usage.completionTokens += currentUsage.completionTokens;
|
@@ -2850,7 +2953,11 @@ async function generateText({
|
|
2850
2953
|
finishReason: currentModelResponse.finishReason,
|
2851
2954
|
usage: currentUsage,
|
2852
2955
|
warnings: currentModelResponse.warnings,
|
2853
|
-
logprobs: currentModelResponse.logprobs
|
2956
|
+
logprobs: currentModelResponse.logprobs,
|
2957
|
+
response: {
|
2958
|
+
...currentModelResponse.response,
|
2959
|
+
headers: (_c = currentModelResponse.rawResponse) == null ? void 0 : _c.headers
|
2960
|
+
}
|
2854
2961
|
});
|
2855
2962
|
const newResponseMessages = toResponseMessages({
|
2856
2963
|
text: currentModelResponse.text,
|
@@ -2897,13 +3004,16 @@ async function generateText({
|
|
2897
3004
|
// Always return a string so that the caller doesn't have to check for undefined.
|
2898
3005
|
// If they need to check if the model did not return any text,
|
2899
3006
|
// they can check the length of the string:
|
2900
|
-
text: (
|
3007
|
+
text: (_d = currentModelResponse.text) != null ? _d : "",
|
2901
3008
|
toolCalls: currentToolCalls,
|
2902
3009
|
toolResults: currentToolResults,
|
2903
3010
|
finishReason: currentModelResponse.finishReason,
|
2904
3011
|
usage,
|
2905
3012
|
warnings: currentModelResponse.warnings,
|
2906
|
-
|
3013
|
+
response: {
|
3014
|
+
...currentModelResponse.response,
|
3015
|
+
headers: (_e = currentModelResponse.rawResponse) == null ? void 0 : _e.headers
|
3016
|
+
},
|
2907
3017
|
logprobs: currentModelResponse.logprobs,
|
2908
3018
|
responseMessages,
|
2909
3019
|
roundtrips,
|
@@ -2979,13 +3089,20 @@ var DefaultGenerateTextResult = class {
|
|
2979
3089
|
this.finishReason = options.finishReason;
|
2980
3090
|
this.usage = options.usage;
|
2981
3091
|
this.warnings = options.warnings;
|
2982
|
-
this.
|
2983
|
-
this.logprobs = options.logprobs;
|
3092
|
+
this.response = options.response;
|
2984
3093
|
this.responseMessages = options.responseMessages;
|
2985
3094
|
this.roundtrips = options.roundtrips;
|
2986
3095
|
this.experimental_providerMetadata = options.providerMetadata;
|
3096
|
+
this.rawResponse = {
|
3097
|
+
headers: options.response.headers
|
3098
|
+
};
|
3099
|
+
this.logprobs = options.logprobs;
|
2987
3100
|
}
|
2988
3101
|
};
|
3102
|
+
var experimental_generateText = generateText;
|
3103
|
+
|
3104
|
+
// core/generate-text/stream-text.ts
|
3105
|
+
import { createIdGenerator as createIdGenerator4 } from "@ai-sdk/provider-utils";
|
2989
3106
|
|
2990
3107
|
// core/util/create-stitchable-stream.ts
|
2991
3108
|
function createStitchableStream() {
|
@@ -3160,6 +3277,7 @@ function runToolsTransformation({
|
|
3160
3277
|
const chunkType = chunk.type;
|
3161
3278
|
switch (chunkType) {
|
3162
3279
|
case "text-delta":
|
3280
|
+
case "response-metadata":
|
3163
3281
|
case "error": {
|
3164
3282
|
controller.enqueue(chunk);
|
3165
3283
|
break;
|
@@ -3280,7 +3398,7 @@ function runToolsTransformation({
|
|
3280
3398
|
type: "finish",
|
3281
3399
|
finishReason: chunk.finishReason,
|
3282
3400
|
logprobs: chunk.logprobs,
|
3283
|
-
usage:
|
3401
|
+
usage: calculateLanguageModelUsage(chunk.usage),
|
3284
3402
|
experimental_providerMetadata: chunk.providerMetadata
|
3285
3403
|
});
|
3286
3404
|
break;
|
@@ -3326,6 +3444,7 @@ function runToolsTransformation({
|
|
3326
3444
|
}
|
3327
3445
|
|
3328
3446
|
// core/generate-text/stream-text.ts
|
3447
|
+
var originalGenerateId4 = createIdGenerator4({ prefix: "aitxt-", length: 24 });
|
3329
3448
|
async function streamText({
|
3330
3449
|
model,
|
3331
3450
|
tools,
|
@@ -3341,7 +3460,11 @@ async function streamText({
|
|
3341
3460
|
experimental_toolCallStreaming: toolCallStreaming = false,
|
3342
3461
|
onChunk,
|
3343
3462
|
onFinish,
|
3344
|
-
_internal: {
|
3463
|
+
_internal: {
|
3464
|
+
now: now2 = now,
|
3465
|
+
generateId: generateId3 = originalGenerateId4,
|
3466
|
+
currentDate = () => /* @__PURE__ */ new Date()
|
3467
|
+
} = {},
|
3345
3468
|
...settings
|
3346
3469
|
}) {
|
3347
3470
|
var _a11;
|
@@ -3467,7 +3590,10 @@ async function streamText({
|
|
3467
3590
|
maxToolRoundtrips,
|
3468
3591
|
startRoundtrip,
|
3469
3592
|
promptMessages,
|
3470
|
-
|
3593
|
+
modelId: model.modelId,
|
3594
|
+
now: now2,
|
3595
|
+
currentDate,
|
3596
|
+
generateId: generateId3
|
3471
3597
|
});
|
3472
3598
|
}
|
3473
3599
|
});
|
@@ -3486,7 +3612,10 @@ var DefaultStreamTextResult = class {
|
|
3486
3612
|
maxToolRoundtrips,
|
3487
3613
|
startRoundtrip,
|
3488
3614
|
promptMessages,
|
3489
|
-
|
3615
|
+
modelId,
|
3616
|
+
now: now2,
|
3617
|
+
currentDate,
|
3618
|
+
generateId: generateId3
|
3490
3619
|
}) {
|
3491
3620
|
this.warnings = warnings;
|
3492
3621
|
this.rawResponse = rawResponse;
|
@@ -3505,6 +3634,8 @@ var DefaultStreamTextResult = class {
|
|
3505
3634
|
promise: providerMetadataPromise
|
3506
3635
|
} = createResolvablePromise();
|
3507
3636
|
this.experimental_providerMetadata = providerMetadataPromise;
|
3637
|
+
const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
|
3638
|
+
this.response = responsePromise;
|
3508
3639
|
const {
|
3509
3640
|
stream: stitchableStream,
|
3510
3641
|
addStream,
|
@@ -3536,10 +3667,16 @@ var DefaultStreamTextResult = class {
|
|
3536
3667
|
let roundtripFirstChunk = true;
|
3537
3668
|
let roundtripText = "";
|
3538
3669
|
let roundtripLogProbs;
|
3670
|
+
let roundtripResponse = {
|
3671
|
+
id: generateId3(),
|
3672
|
+
timestamp: currentDate(),
|
3673
|
+
modelId
|
3674
|
+
};
|
3539
3675
|
addStream(
|
3540
3676
|
stream2.pipeThrough(
|
3541
3677
|
new TransformStream({
|
3542
3678
|
async transform(chunk, controller) {
|
3679
|
+
var _a11, _b, _c;
|
3543
3680
|
if (roundtripFirstChunk) {
|
3544
3681
|
const msToFirstChunk = now2() - startTimestamp;
|
3545
3682
|
roundtripFirstChunk = false;
|
@@ -3559,22 +3696,33 @@ var DefaultStreamTextResult = class {
|
|
3559
3696
|
}
|
3560
3697
|
const chunkType = chunk.type;
|
3561
3698
|
switch (chunkType) {
|
3562
|
-
case "text-delta":
|
3699
|
+
case "text-delta": {
|
3563
3700
|
controller.enqueue(chunk);
|
3564
3701
|
roundtripText += chunk.textDelta;
|
3565
3702
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3566
3703
|
break;
|
3567
|
-
|
3704
|
+
}
|
3705
|
+
case "tool-call": {
|
3568
3706
|
controller.enqueue(chunk);
|
3569
3707
|
roundtripToolCalls.push(chunk);
|
3570
3708
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3571
3709
|
break;
|
3572
|
-
|
3710
|
+
}
|
3711
|
+
case "tool-result": {
|
3573
3712
|
controller.enqueue(chunk);
|
3574
3713
|
roundtripToolResults.push(chunk);
|
3575
3714
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3576
3715
|
break;
|
3577
|
-
|
3716
|
+
}
|
3717
|
+
case "response-metadata": {
|
3718
|
+
roundtripResponse = {
|
3719
|
+
id: (_a11 = chunk.id) != null ? _a11 : roundtripResponse.id,
|
3720
|
+
timestamp: (_b = chunk.timestamp) != null ? _b : roundtripResponse.timestamp,
|
3721
|
+
modelId: (_c = chunk.modelId) != null ? _c : roundtripResponse.modelId
|
3722
|
+
};
|
3723
|
+
break;
|
3724
|
+
}
|
3725
|
+
case "finish": {
|
3578
3726
|
roundtripUsage = chunk.usage;
|
3579
3727
|
roundtripFinishReason = chunk.finishReason;
|
3580
3728
|
roundtripProviderMetadata = chunk.experimental_providerMetadata;
|
@@ -3586,16 +3734,18 @@ var DefaultStreamTextResult = class {
|
|
3586
3734
|
"ai.response.avgCompletionTokensPerSecond": 1e3 * roundtripUsage.completionTokens / msToFinish
|
3587
3735
|
});
|
3588
3736
|
break;
|
3737
|
+
}
|
3589
3738
|
case "tool-call-streaming-start":
|
3590
3739
|
case "tool-call-delta": {
|
3591
3740
|
controller.enqueue(chunk);
|
3592
3741
|
await (onChunk == null ? void 0 : onChunk({ chunk }));
|
3593
3742
|
break;
|
3594
3743
|
}
|
3595
|
-
case "error":
|
3744
|
+
case "error": {
|
3596
3745
|
controller.enqueue(chunk);
|
3597
3746
|
roundtripFinishReason = "error";
|
3598
3747
|
break;
|
3748
|
+
}
|
3599
3749
|
default: {
|
3600
3750
|
const exhaustiveCheck = chunkType;
|
3601
3751
|
throw new Error(`Unknown chunk type: ${exhaustiveCheck}`);
|
@@ -3609,7 +3759,8 @@ var DefaultStreamTextResult = class {
|
|
3609
3759
|
finishReason: roundtripFinishReason,
|
3610
3760
|
usage: roundtripUsage,
|
3611
3761
|
experimental_providerMetadata: roundtripProviderMetadata,
|
3612
|
-
logprobs: roundtripLogProbs
|
3762
|
+
logprobs: roundtripLogProbs,
|
3763
|
+
response: roundtripResponse
|
3613
3764
|
});
|
3614
3765
|
const telemetryToolCalls = roundtripToolCalls.length > 0 ? JSON.stringify(roundtripToolCalls) : void 0;
|
3615
3766
|
try {
|
@@ -3622,6 +3773,9 @@ var DefaultStreamTextResult = class {
|
|
3622
3773
|
"ai.response.toolCalls": {
|
3623
3774
|
output: () => telemetryToolCalls
|
3624
3775
|
},
|
3776
|
+
"ai.response.id": roundtripResponse.id,
|
3777
|
+
"ai.response.model": roundtripResponse.modelId,
|
3778
|
+
"ai.response.timestamp": roundtripResponse.timestamp.toISOString(),
|
3625
3779
|
"ai.usage.promptTokens": roundtripUsage.promptTokens,
|
3626
3780
|
"ai.usage.completionTokens": roundtripUsage.completionTokens,
|
3627
3781
|
// deprecated
|
@@ -3632,6 +3786,8 @@ var DefaultStreamTextResult = class {
|
|
3632
3786
|
},
|
3633
3787
|
// standardized gen-ai llm span attributes:
|
3634
3788
|
"gen_ai.response.finish_reasons": [roundtripFinishReason],
|
3789
|
+
"gen_ai.response.id": roundtripResponse.id,
|
3790
|
+
"gen_ai.response.model": roundtripResponse.modelId,
|
3635
3791
|
"gen_ai.usage.input_tokens": roundtripUsage.promptTokens,
|
3636
3792
|
"gen_ai.usage.output_tokens": roundtripUsage.completionTokens
|
3637
3793
|
}
|
@@ -3687,7 +3843,8 @@ var DefaultStreamTextResult = class {
|
|
3687
3843
|
finishReason: roundtripFinishReason,
|
3688
3844
|
usage: combinedUsage,
|
3689
3845
|
experimental_providerMetadata: roundtripProviderMetadata,
|
3690
|
-
logprobs: roundtripLogProbs
|
3846
|
+
logprobs: roundtripLogProbs,
|
3847
|
+
response: roundtripResponse
|
3691
3848
|
});
|
3692
3849
|
closeStitchableStream();
|
3693
3850
|
rootSpan.setAttributes(
|
@@ -3716,6 +3873,10 @@ var DefaultStreamTextResult = class {
|
|
3716
3873
|
resolveToolCalls(roundtripToolCalls);
|
3717
3874
|
resolveProviderMetadata(roundtripProviderMetadata);
|
3718
3875
|
resolveToolResults(roundtripToolResults);
|
3876
|
+
resolveResponse({
|
3877
|
+
...roundtripResponse,
|
3878
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
3879
|
+
});
|
3719
3880
|
await (onFinish == null ? void 0 : onFinish({
|
3720
3881
|
finishReason: roundtripFinishReason,
|
3721
3882
|
usage: combinedUsage,
|
@@ -3727,6 +3888,10 @@ var DefaultStreamTextResult = class {
|
|
3727
3888
|
// The type exposed to the users will be correctly inferred.
|
3728
3889
|
toolResults: roundtripToolResults,
|
3729
3890
|
rawResponse,
|
3891
|
+
response: {
|
3892
|
+
...roundtripResponse,
|
3893
|
+
headers: rawResponse == null ? void 0 : rawResponse.headers
|
3894
|
+
},
|
3730
3895
|
warnings,
|
3731
3896
|
experimental_providerMetadata: roundtripProviderMetadata
|
3732
3897
|
}));
|
@@ -3969,6 +4134,7 @@ var DefaultStreamTextResult = class {
|
|
3969
4134
|
});
|
3970
4135
|
}
|
3971
4136
|
};
|
4137
|
+
var experimental_streamText = streamText;
|
3972
4138
|
|
3973
4139
|
// core/prompt/attachments-to-parts.ts
|
3974
4140
|
function attachmentsToParts(attachments) {
|
@@ -5416,7 +5582,9 @@ export {
|
|
5416
5582
|
experimental_createProviderRegistry,
|
5417
5583
|
experimental_customProvider,
|
5418
5584
|
experimental_generateObject,
|
5585
|
+
experimental_generateText,
|
5419
5586
|
experimental_streamObject,
|
5587
|
+
experimental_streamText,
|
5420
5588
|
formatStreamPart,
|
5421
5589
|
generateId2 as generateId,
|
5422
5590
|
generateObject,
|