ai 3.1.19 → 3.1.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +28 -2
- package/dist/index.d.ts +28 -2
- package/dist/index.js +129 -91
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +129 -91
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
- package/rsc/dist/rsc-server.mjs +61 -57
- package/rsc/dist/rsc-server.mjs.map +1 -1
package/dist/index.d.mts
CHANGED
@@ -814,7 +814,9 @@ Generate a text and call tools for a given prompt using a language model.
|
|
814
814
|
This function does not stream the output. If you want to stream the output, use `streamText` instead.
|
815
815
|
|
816
816
|
@param model - The language model to use.
|
817
|
+
|
817
818
|
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
819
|
+
@param toolChoice - The tool choice strategy. Default: 'auto'.
|
818
820
|
|
819
821
|
@param system - A system message that will be part of the prompt.
|
820
822
|
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
@@ -839,10 +841,12 @@ If set and supported by the model, calls will generate deterministic results.
|
|
839
841
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
840
842
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
841
843
|
|
844
|
+
@param maxAutomaticRoundtrips - Maximal number of automatic roundtrips for tool calls.
|
845
|
+
|
842
846
|
@returns
|
843
847
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
844
848
|
*/
|
845
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
849
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, maxAutomaticRoundtrips, ...settings }: CallSettings & Prompt & {
|
846
850
|
/**
|
847
851
|
The language model to use.
|
848
852
|
*/
|
@@ -855,6 +859,19 @@ The tools that the model can call. The model needs to support calling tools.
|
|
855
859
|
The tool choice strategy. Default: 'auto'.
|
856
860
|
*/
|
857
861
|
toolChoice?: CoreToolChoice<TOOLS>;
|
862
|
+
/**
|
863
|
+
Maximal number of automatic roundtrips for tool calls.
|
864
|
+
|
865
|
+
An automatic tool call roundtrip is another LLM call with the
|
866
|
+
tool call results when all tool calls of the last assistant
|
867
|
+
message have results.
|
868
|
+
|
869
|
+
A maximum number is required to prevent infinite loops in the
|
870
|
+
case of misconfigured tools.
|
871
|
+
|
872
|
+
By default, it's set to 0, which will disable the feature.
|
873
|
+
*/
|
874
|
+
maxAutomaticRoundtrips?: number;
|
858
875
|
}): Promise<GenerateTextResult<TOOLS>>;
|
859
876
|
/**
|
860
877
|
The result of a `generateText` call.
|
@@ -918,6 +935,7 @@ declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
918
935
|
headers?: Record<string, string>;
|
919
936
|
};
|
920
937
|
logprobs: LogProbs | undefined;
|
938
|
+
responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
|
921
939
|
});
|
922
940
|
}
|
923
941
|
/**
|
@@ -1864,6 +1882,9 @@ interface CompletionUsage {
|
|
1864
1882
|
total_tokens: number;
|
1865
1883
|
}
|
1866
1884
|
type AsyncIterableOpenAIStreamReturnTypes = AsyncIterable<ChatCompletionChunk> | AsyncIterable<Completion> | AsyncIterable<AzureChatCompletions>;
|
1885
|
+
/**
|
1886
|
+
* @deprecated Use the [OpenAI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/openai) instead.
|
1887
|
+
*/
|
1867
1888
|
declare function OpenAIStream(res: Response | AsyncIterableOpenAIStreamReturnTypes, callbacks?: OpenAIStreamCallbacks): ReadableStream;
|
1868
1889
|
|
1869
1890
|
interface FunctionCallPayload {
|
@@ -2081,6 +2102,8 @@ interface MessageStopEvent {
|
|
2081
2102
|
* Accepts either a fetch Response from the Anthropic `POST /v1/complete` endpoint,
|
2082
2103
|
* or the return value of `await client.completions.create({ stream: true })`
|
2083
2104
|
* from the `@anthropic-ai/sdk` package.
|
2105
|
+
*
|
2106
|
+
* @deprecated Use the [Anthropic provider](https://sdk.vercel.ai/providers/ai-sdk-providers/anthropic) instead.
|
2084
2107
|
*/
|
2085
2108
|
declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk> | AsyncIterable<MessageStreamEvent>, cb?: AIStreamCallbacksAndOptions): ReadableStream;
|
2086
2109
|
|
@@ -2171,6 +2194,9 @@ interface TextPart {
|
|
2171
2194
|
text: string;
|
2172
2195
|
inlineData?: never;
|
2173
2196
|
}
|
2197
|
+
/**
|
2198
|
+
* @deprecated Use the [Google Generative AI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/google-generative-ai) instead.
|
2199
|
+
*/
|
2174
2200
|
declare function GoogleGenerativeAIStream(response: {
|
2175
2201
|
stream: AsyncIterable<GenerateContentResponse>;
|
2176
2202
|
}, cb?: AIStreamCallbacksAndOptions): ReadableStream;
|
@@ -2222,7 +2248,7 @@ declare namespace langchainAdapter {
|
|
2222
2248
|
}
|
2223
2249
|
|
2224
2250
|
/**
|
2225
|
-
@deprecated Use LangChainAdapter.
|
2251
|
+
* @deprecated Use [LangChainAdapter](https://sdk.vercel.ai/providers/adapters/langchain) instead.
|
2226
2252
|
*/
|
2227
2253
|
declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
|
2228
2254
|
stream: ReadableStream<any>;
|
package/dist/index.d.ts
CHANGED
@@ -814,7 +814,9 @@ Generate a text and call tools for a given prompt using a language model.
|
|
814
814
|
This function does not stream the output. If you want to stream the output, use `streamText` instead.
|
815
815
|
|
816
816
|
@param model - The language model to use.
|
817
|
+
|
817
818
|
@param tools - Tools that are accessible to and can be called by the model. The model needs to support calling tools.
|
819
|
+
@param toolChoice - The tool choice strategy. Default: 'auto'.
|
818
820
|
|
819
821
|
@param system - A system message that will be part of the prompt.
|
820
822
|
@param prompt - A simple text prompt. You can either use `prompt` or `messages` but not both.
|
@@ -839,10 +841,12 @@ If set and supported by the model, calls will generate deterministic results.
|
|
839
841
|
@param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
|
840
842
|
@param abortSignal - An optional abort signal that can be used to cancel the call.
|
841
843
|
|
844
|
+
@param maxAutomaticRoundtrips - Maximal number of automatic roundtrips for tool calls.
|
845
|
+
|
842
846
|
@returns
|
843
847
|
A result object that contains the generated text, the results of the tool calls, and additional information.
|
844
848
|
*/
|
845
|
-
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, ...settings }: CallSettings & Prompt & {
|
849
|
+
declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, maxAutomaticRoundtrips, ...settings }: CallSettings & Prompt & {
|
846
850
|
/**
|
847
851
|
The language model to use.
|
848
852
|
*/
|
@@ -855,6 +859,19 @@ The tools that the model can call. The model needs to support calling tools.
|
|
855
859
|
The tool choice strategy. Default: 'auto'.
|
856
860
|
*/
|
857
861
|
toolChoice?: CoreToolChoice<TOOLS>;
|
862
|
+
/**
|
863
|
+
Maximal number of automatic roundtrips for tool calls.
|
864
|
+
|
865
|
+
An automatic tool call roundtrip is another LLM call with the
|
866
|
+
tool call results when all tool calls of the last assistant
|
867
|
+
message have results.
|
868
|
+
|
869
|
+
A maximum number is required to prevent infinite loops in the
|
870
|
+
case of misconfigured tools.
|
871
|
+
|
872
|
+
By default, it's set to 0, which will disable the feature.
|
873
|
+
*/
|
874
|
+
maxAutomaticRoundtrips?: number;
|
858
875
|
}): Promise<GenerateTextResult<TOOLS>>;
|
859
876
|
/**
|
860
877
|
The result of a `generateText` call.
|
@@ -918,6 +935,7 @@ declare class GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
|
|
918
935
|
headers?: Record<string, string>;
|
919
936
|
};
|
920
937
|
logprobs: LogProbs | undefined;
|
938
|
+
responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
|
921
939
|
});
|
922
940
|
}
|
923
941
|
/**
|
@@ -1864,6 +1882,9 @@ interface CompletionUsage {
|
|
1864
1882
|
total_tokens: number;
|
1865
1883
|
}
|
1866
1884
|
type AsyncIterableOpenAIStreamReturnTypes = AsyncIterable<ChatCompletionChunk> | AsyncIterable<Completion> | AsyncIterable<AzureChatCompletions>;
|
1885
|
+
/**
|
1886
|
+
* @deprecated Use the [OpenAI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/openai) instead.
|
1887
|
+
*/
|
1867
1888
|
declare function OpenAIStream(res: Response | AsyncIterableOpenAIStreamReturnTypes, callbacks?: OpenAIStreamCallbacks): ReadableStream;
|
1868
1889
|
|
1869
1890
|
interface FunctionCallPayload {
|
@@ -2081,6 +2102,8 @@ interface MessageStopEvent {
|
|
2081
2102
|
* Accepts either a fetch Response from the Anthropic `POST /v1/complete` endpoint,
|
2082
2103
|
* or the return value of `await client.completions.create({ stream: true })`
|
2083
2104
|
* from the `@anthropic-ai/sdk` package.
|
2105
|
+
*
|
2106
|
+
* @deprecated Use the [Anthropic provider](https://sdk.vercel.ai/providers/ai-sdk-providers/anthropic) instead.
|
2084
2107
|
*/
|
2085
2108
|
declare function AnthropicStream(res: Response | AsyncIterable<CompletionChunk> | AsyncIterable<MessageStreamEvent>, cb?: AIStreamCallbacksAndOptions): ReadableStream;
|
2086
2109
|
|
@@ -2171,6 +2194,9 @@ interface TextPart {
|
|
2171
2194
|
text: string;
|
2172
2195
|
inlineData?: never;
|
2173
2196
|
}
|
2197
|
+
/**
|
2198
|
+
* @deprecated Use the [Google Generative AI provider](https://sdk.vercel.ai/providers/ai-sdk-providers/google-generative-ai) instead.
|
2199
|
+
*/
|
2174
2200
|
declare function GoogleGenerativeAIStream(response: {
|
2175
2201
|
stream: AsyncIterable<GenerateContentResponse>;
|
2176
2202
|
}, cb?: AIStreamCallbacksAndOptions): ReadableStream;
|
@@ -2222,7 +2248,7 @@ declare namespace langchainAdapter {
|
|
2222
2248
|
}
|
2223
2249
|
|
2224
2250
|
/**
|
2225
|
-
@deprecated Use LangChainAdapter.
|
2251
|
+
* @deprecated Use [LangChainAdapter](https://sdk.vercel.ai/providers/adapters/langchain) instead.
|
2226
2252
|
*/
|
2227
2253
|
declare function LangChainStream(callbacks?: AIStreamCallbacksAndOptions): {
|
2228
2254
|
stream: ReadableStream<any>;
|
package/dist/index.js
CHANGED
@@ -309,7 +309,8 @@ function convertToLanguageModelPrompt(prompt) {
|
|
309
309
|
if (prompt.system != null) {
|
310
310
|
languageModelMessages.push({ role: "system", content: prompt.system });
|
311
311
|
}
|
312
|
-
|
312
|
+
const promptType = prompt.type;
|
313
|
+
switch (promptType) {
|
313
314
|
case "prompt": {
|
314
315
|
languageModelMessages.push({
|
315
316
|
role: "user",
|
@@ -319,72 +320,75 @@ function convertToLanguageModelPrompt(prompt) {
|
|
319
320
|
}
|
320
321
|
case "messages": {
|
321
322
|
languageModelMessages.push(
|
322
|
-
...prompt.messages.map(
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
323
|
+
...prompt.messages.map(convertToLanguageModelMessage)
|
324
|
+
);
|
325
|
+
break;
|
326
|
+
}
|
327
|
+
default: {
|
328
|
+
const _exhaustiveCheck = promptType;
|
329
|
+
throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
|
330
|
+
}
|
331
|
+
}
|
332
|
+
return languageModelMessages;
|
333
|
+
}
|
334
|
+
function convertToLanguageModelMessage(message) {
|
335
|
+
switch (message.role) {
|
336
|
+
case "system": {
|
337
|
+
return { role: "system", content: message.content };
|
338
|
+
}
|
339
|
+
case "user": {
|
340
|
+
if (typeof message.content === "string") {
|
341
|
+
return {
|
342
|
+
role: "user",
|
343
|
+
content: [{ type: "text", text: message.content }]
|
344
|
+
};
|
345
|
+
}
|
346
|
+
return {
|
347
|
+
role: "user",
|
348
|
+
content: message.content.map(
|
349
|
+
(part) => {
|
350
|
+
var _a;
|
351
|
+
switch (part.type) {
|
352
|
+
case "text": {
|
353
|
+
return part;
|
333
354
|
}
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
case "image": {
|
344
|
-
if (part.image instanceof URL) {
|
345
|
-
return {
|
346
|
-
type: "image",
|
347
|
-
image: part.image,
|
348
|
-
mimeType: part.mimeType
|
349
|
-
};
|
350
|
-
}
|
351
|
-
const imageUint8 = convertDataContentToUint8Array(
|
352
|
-
part.image
|
353
|
-
);
|
354
|
-
return {
|
355
|
-
type: "image",
|
356
|
-
image: imageUint8,
|
357
|
-
mimeType: (_a = part.mimeType) != null ? _a : detectImageMimeType(imageUint8)
|
358
|
-
};
|
359
|
-
}
|
360
|
-
}
|
361
|
-
}
|
362
|
-
)
|
363
|
-
};
|
364
|
-
}
|
365
|
-
case "assistant": {
|
366
|
-
if (typeof message.content === "string") {
|
355
|
+
case "image": {
|
356
|
+
if (part.image instanceof URL) {
|
357
|
+
return {
|
358
|
+
type: "image",
|
359
|
+
image: part.image,
|
360
|
+
mimeType: part.mimeType
|
361
|
+
};
|
362
|
+
}
|
363
|
+
const imageUint8 = convertDataContentToUint8Array(part.image);
|
367
364
|
return {
|
368
|
-
|
369
|
-
|
365
|
+
type: "image",
|
366
|
+
image: imageUint8,
|
367
|
+
mimeType: (_a = part.mimeType) != null ? _a : detectImageMimeType(imageUint8)
|
370
368
|
};
|
371
369
|
}
|
372
|
-
return { role: "assistant", content: message.content };
|
373
|
-
}
|
374
|
-
case "tool": {
|
375
|
-
return message;
|
376
370
|
}
|
377
371
|
}
|
378
|
-
|
379
|
-
|
380
|
-
|
372
|
+
)
|
373
|
+
};
|
374
|
+
}
|
375
|
+
case "assistant": {
|
376
|
+
if (typeof message.content === "string") {
|
377
|
+
return {
|
378
|
+
role: "assistant",
|
379
|
+
content: [{ type: "text", text: message.content }]
|
380
|
+
};
|
381
|
+
}
|
382
|
+
return { role: "assistant", content: message.content };
|
383
|
+
}
|
384
|
+
case "tool": {
|
385
|
+
return message;
|
381
386
|
}
|
382
387
|
default: {
|
383
|
-
const _exhaustiveCheck =
|
384
|
-
throw new Error(`Unsupported
|
388
|
+
const _exhaustiveCheck = message;
|
389
|
+
throw new Error(`Unsupported message role: ${_exhaustiveCheck}`);
|
385
390
|
}
|
386
391
|
}
|
387
|
-
return languageModelMessages;
|
388
392
|
}
|
389
393
|
|
390
394
|
// core/prompt/get-validated-prompt.ts
|
@@ -1339,40 +1343,66 @@ async function generateText({
|
|
1339
1343
|
messages,
|
1340
1344
|
maxRetries,
|
1341
1345
|
abortSignal,
|
1346
|
+
maxAutomaticRoundtrips = 0,
|
1342
1347
|
...settings
|
1343
1348
|
}) {
|
1344
|
-
var _a, _b;
|
1349
|
+
var _a, _b, _c;
|
1345
1350
|
const retry = retryWithExponentialBackoff({ maxRetries });
|
1346
1351
|
const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
|
1347
|
-
const
|
1348
|
-
|
1349
|
-
|
1350
|
-
|
1351
|
-
|
1352
|
-
|
1353
|
-
|
1354
|
-
|
1355
|
-
|
1356
|
-
|
1352
|
+
const mode = {
|
1353
|
+
type: "regular",
|
1354
|
+
...prepareToolsAndToolChoice({ tools, toolChoice })
|
1355
|
+
};
|
1356
|
+
const callSettings = prepareCallSettings(settings);
|
1357
|
+
const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
|
1358
|
+
let currentModelResponse;
|
1359
|
+
let currentToolCalls = [];
|
1360
|
+
let currentToolResults = [];
|
1361
|
+
let roundtrips = 0;
|
1362
|
+
const responseMessages = [];
|
1363
|
+
do {
|
1364
|
+
currentModelResponse = await retry(() => {
|
1365
|
+
return model.doGenerate({
|
1366
|
+
mode,
|
1367
|
+
...callSettings,
|
1368
|
+
// once we have a roundtrip, we need to switch to messages format:
|
1369
|
+
inputFormat: roundtrips === 0 ? validatedPrompt.type : "messages",
|
1370
|
+
prompt: promptMessages,
|
1371
|
+
abortSignal
|
1372
|
+
});
|
1357
1373
|
});
|
1358
|
-
|
1359
|
-
|
1360
|
-
|
1361
|
-
|
1362
|
-
|
1363
|
-
|
1374
|
+
currentToolCalls = ((_a = currentModelResponse.toolCalls) != null ? _a : []).map(
|
1375
|
+
(modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
|
1376
|
+
);
|
1377
|
+
currentToolResults = tools == null ? [] : await executeTools({ toolCalls: currentToolCalls, tools });
|
1378
|
+
const newResponseMessages = toResponseMessages({
|
1379
|
+
text: (_b = currentModelResponse.text) != null ? _b : "",
|
1380
|
+
toolCalls: currentToolCalls,
|
1381
|
+
toolResults: currentToolResults
|
1382
|
+
});
|
1383
|
+
responseMessages.push(...newResponseMessages);
|
1384
|
+
promptMessages.push(
|
1385
|
+
...newResponseMessages.map(convertToLanguageModelMessage)
|
1386
|
+
);
|
1387
|
+
} while (
|
1388
|
+
// there are tool calls:
|
1389
|
+
currentToolCalls.length > 0 && // all current tool calls have results:
|
1390
|
+
currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
|
1391
|
+
roundtrips++ < maxAutomaticRoundtrips
|
1392
|
+
);
|
1364
1393
|
return new GenerateTextResult({
|
1365
1394
|
// Always return a string so that the caller doesn't have to check for undefined.
|
1366
1395
|
// If they need to check if the model did not return any text,
|
1367
1396
|
// they can check the length of the string:
|
1368
|
-
text: (
|
1369
|
-
toolCalls,
|
1370
|
-
toolResults,
|
1371
|
-
finishReason:
|
1372
|
-
usage: calculateTokenUsage(
|
1373
|
-
warnings:
|
1374
|
-
rawResponse:
|
1375
|
-
logprobs:
|
1397
|
+
text: (_c = currentModelResponse.text) != null ? _c : "",
|
1398
|
+
toolCalls: currentToolCalls,
|
1399
|
+
toolResults: currentToolResults,
|
1400
|
+
finishReason: currentModelResponse.finishReason,
|
1401
|
+
usage: calculateTokenUsage(currentModelResponse.usage),
|
1402
|
+
warnings: currentModelResponse.warnings,
|
1403
|
+
rawResponse: currentModelResponse.rawResponse,
|
1404
|
+
logprobs: currentModelResponse.logprobs,
|
1405
|
+
responseMessages
|
1376
1406
|
});
|
1377
1407
|
}
|
1378
1408
|
async function executeTools({
|
@@ -1408,7 +1438,7 @@ var GenerateTextResult = class {
|
|
1408
1438
|
this.warnings = options.warnings;
|
1409
1439
|
this.rawResponse = options.rawResponse;
|
1410
1440
|
this.logprobs = options.logprobs;
|
1411
|
-
this.responseMessages =
|
1441
|
+
this.responseMessages = options.responseMessages;
|
1412
1442
|
}
|
1413
1443
|
};
|
1414
1444
|
function toResponseMessages({
|
@@ -1436,6 +1466,16 @@ function toResponseMessages({
|
|
1436
1466
|
}
|
1437
1467
|
var experimental_generateText = generateText;
|
1438
1468
|
|
1469
|
+
// core/util/prepare-response-headers.ts
|
1470
|
+
function prepareResponseHeaders(init, { contentType }) {
|
1471
|
+
var _a;
|
1472
|
+
const headers = new Headers((_a = init == null ? void 0 : init.headers) != null ? _a : {});
|
1473
|
+
if (!headers.has("Content-Type")) {
|
1474
|
+
headers.set("Content-Type", contentType);
|
1475
|
+
}
|
1476
|
+
return headers;
|
1477
|
+
}
|
1478
|
+
|
1439
1479
|
// core/generate-text/run-tools-transformation.ts
|
1440
1480
|
var import_provider7 = require("@ai-sdk/provider");
|
1441
1481
|
|
@@ -1908,10 +1948,9 @@ var StreamTextResult = class {
|
|
1908
1948
|
var _a;
|
1909
1949
|
return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
|
1910
1950
|
status: (_a = init == null ? void 0 : init.status) != null ? _a : 200,
|
1911
|
-
headers: {
|
1912
|
-
|
1913
|
-
|
1914
|
-
}
|
1951
|
+
headers: prepareResponseHeaders(init, {
|
1952
|
+
contentType: "text/plain; charset=utf-8"
|
1953
|
+
})
|
1915
1954
|
});
|
1916
1955
|
}
|
1917
1956
|
};
|
@@ -3522,10 +3561,9 @@ var StreamingTextResponse = class extends Response {
|
|
3522
3561
|
super(processedStream, {
|
3523
3562
|
...init,
|
3524
3563
|
status: 200,
|
3525
|
-
headers: {
|
3526
|
-
|
3527
|
-
|
3528
|
-
}
|
3564
|
+
headers: prepareResponseHeaders(init, {
|
3565
|
+
contentType: "text/plain; charset=utf-8"
|
3566
|
+
})
|
3529
3567
|
});
|
3530
3568
|
}
|
3531
3569
|
};
|