modelfusion 0.114.0 → 0.115.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/README.md +5 -6
  3. package/model-provider/index.cjs +0 -1
  4. package/model-provider/index.d.ts +0 -1
  5. package/model-provider/index.js +0 -1
  6. package/model-provider/openai/AbstractOpenAIChatModel.cjs +2 -2
  7. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +16 -16
  8. package/model-provider/openai/AbstractOpenAIChatModel.js +2 -2
  9. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +2 -2
  10. package/package.json +1 -1
  11. package/model-provider/anthropic/AnthropicApiConfiguration.cjs +0 -31
  12. package/model-provider/anthropic/AnthropicApiConfiguration.d.ts +0 -10
  13. package/model-provider/anthropic/AnthropicApiConfiguration.js +0 -27
  14. package/model-provider/anthropic/AnthropicError.cjs +0 -16
  15. package/model-provider/anthropic/AnthropicError.d.ts +0 -26
  16. package/model-provider/anthropic/AnthropicError.js +0 -13
  17. package/model-provider/anthropic/AnthropicFacade.cjs +0 -24
  18. package/model-provider/anthropic/AnthropicFacade.d.ts +0 -18
  19. package/model-provider/anthropic/AnthropicFacade.js +0 -19
  20. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +0 -82
  21. package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +0 -17
  22. package/model-provider/anthropic/AnthropicPromptTemplate.js +0 -76
  23. package/model-provider/anthropic/AnthropicPromptTemplate.test.cjs +0 -49
  24. package/model-provider/anthropic/AnthropicPromptTemplate.test.d.ts +0 -1
  25. package/model-provider/anthropic/AnthropicPromptTemplate.test.js +0 -47
  26. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +0 -254
  27. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +0 -153
  28. package/model-provider/anthropic/AnthropicTextGenerationModel.js +0 -250
  29. package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +0 -44
  30. package/model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts +0 -1
  31. package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +0 -42
  32. package/model-provider/anthropic/index.cjs +0 -33
  33. package/model-provider/anthropic/index.d.ts +0 -5
  34. package/model-provider/anthropic/index.js +0 -4
@@ -1,76 +0,0 @@
1
- import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
2
- import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
3
- const HUMAN_PREFIX = "\n\nHuman:";
4
- const ASSISTANT_PREFIX = "\n\nAssistant:";
5
- /**
6
- * Formats a text prompt as an Anthropic prompt.
7
- */
8
- export function text() {
9
- return {
10
- format(prompt) {
11
- let text = "";
12
- text += HUMAN_PREFIX;
13
- text += prompt;
14
- text += ASSISTANT_PREFIX;
15
- return text;
16
- },
17
- stopSequences: [],
18
- };
19
- }
20
- /**
21
- * Formats an instruction prompt as an Anthropic prompt.
22
- */
23
- export function instruction() {
24
- return {
25
- format(prompt) {
26
- const instruction = validateContentIsString(prompt.instruction, prompt);
27
- let text = prompt.system ?? "";
28
- text += HUMAN_PREFIX;
29
- text += instruction;
30
- text += ASSISTANT_PREFIX;
31
- if (prompt.responsePrefix != null) {
32
- text += prompt.responsePrefix;
33
- }
34
- return text;
35
- },
36
- stopSequences: [],
37
- };
38
- }
39
- /**
40
- * Formats a chat prompt as an Anthropic prompt.
41
- *
42
- * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
43
- */
44
- export function chat() {
45
- return {
46
- format(prompt) {
47
- let text = prompt.system ?? "";
48
- for (const { role, content } of prompt.messages) {
49
- switch (role) {
50
- case "user": {
51
- const textContent = validateContentIsString(content, prompt);
52
- text += HUMAN_PREFIX;
53
- text += textContent;
54
- break;
55
- }
56
- case "assistant": {
57
- text += ASSISTANT_PREFIX;
58
- text += content;
59
- break;
60
- }
61
- case "tool": {
62
- throw new InvalidPromptError("Tool messages are not supported.", prompt);
63
- }
64
- default: {
65
- const _exhaustiveCheck = role;
66
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
67
- }
68
- }
69
- }
70
- // AI message prefix:
71
- text += ASSISTANT_PREFIX;
72
- return text;
73
- },
74
- stopSequences: [],
75
- };
76
- }
@@ -1,49 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const AnthropicPromptTemplate_js_1 = require("./AnthropicPromptTemplate.cjs");
4
- describe("text prompt", () => {
5
- it("should format prompt", () => {
6
- const prompt = (0, AnthropicPromptTemplate_js_1.text)().format("prompt");
7
- expect(prompt).toMatchSnapshot();
8
- });
9
- });
10
- describe("instruction prompt", () => {
11
- it("should format prompt with instruction", () => {
12
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
13
- instruction: "instruction",
14
- });
15
- expect(prompt).toMatchSnapshot();
16
- });
17
- it("should format prompt with system and instruction", () => {
18
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
19
- system: "system",
20
- instruction: "instruction",
21
- });
22
- expect(prompt).toMatchSnapshot();
23
- });
24
- it("should format prompt with instruction and response prefix", () => {
25
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
26
- instruction: "instruction",
27
- responsePrefix: "response prefix",
28
- });
29
- expect(prompt).toMatchSnapshot();
30
- });
31
- });
32
- describe("chat prompt", () => {
33
- it("should format prompt with user message", () => {
34
- const prompt = (0, AnthropicPromptTemplate_js_1.chat)().format({
35
- messages: [{ role: "user", content: "user message" }],
36
- });
37
- expect(prompt).toMatchSnapshot();
38
- });
39
- it("should format prompt with user-assistant-user messages", () => {
40
- const prompt = (0, AnthropicPromptTemplate_js_1.chat)().format({
41
- messages: [
42
- { role: "user", content: "1st user message" },
43
- { role: "assistant", content: "assistant message" },
44
- { role: "user", content: "2nd user message" },
45
- ],
46
- });
47
- expect(prompt).toMatchSnapshot();
48
- });
49
- });
@@ -1,47 +0,0 @@
1
- import { chat, instruction, text } from "./AnthropicPromptTemplate.js";
2
- describe("text prompt", () => {
3
- it("should format prompt", () => {
4
- const prompt = text().format("prompt");
5
- expect(prompt).toMatchSnapshot();
6
- });
7
- });
8
- describe("instruction prompt", () => {
9
- it("should format prompt with instruction", () => {
10
- const prompt = instruction().format({
11
- instruction: "instruction",
12
- });
13
- expect(prompt).toMatchSnapshot();
14
- });
15
- it("should format prompt with system and instruction", () => {
16
- const prompt = instruction().format({
17
- system: "system",
18
- instruction: "instruction",
19
- });
20
- expect(prompt).toMatchSnapshot();
21
- });
22
- it("should format prompt with instruction and response prefix", () => {
23
- const prompt = instruction().format({
24
- instruction: "instruction",
25
- responsePrefix: "response prefix",
26
- });
27
- expect(prompt).toMatchSnapshot();
28
- });
29
- });
30
- describe("chat prompt", () => {
31
- it("should format prompt with user message", () => {
32
- const prompt = chat().format({
33
- messages: [{ role: "user", content: "user message" }],
34
- });
35
- expect(prompt).toMatchSnapshot();
36
- });
37
- it("should format prompt with user-assistant-user messages", () => {
38
- const prompt = chat().format({
39
- messages: [
40
- { role: "user", content: "1st user message" },
41
- { role: "assistant", content: "assistant message" },
42
- { role: "user", content: "2nd user message" },
43
- ],
44
- });
45
- expect(prompt).toMatchSnapshot();
46
- });
47
- });
@@ -1,254 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.AnthropicTextGenerationResponseFormat = exports.AnthropicTextGenerationModel = exports.ANTHROPIC_TEXT_GENERATION_MODELS = void 0;
4
- const zod_1 = require("zod");
5
- const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
- const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
9
- const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
10
- const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
11
- const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
12
- const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
13
- const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
14
- const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
15
- const AnthropicApiConfiguration_js_1 = require("./AnthropicApiConfiguration.cjs");
16
- const AnthropicError_js_1 = require("./AnthropicError.cjs");
17
- const AnthropicPromptTemplate_js_1 = require("./AnthropicPromptTemplate.cjs");
18
- exports.ANTHROPIC_TEXT_GENERATION_MODELS = {
19
- "claude-instant-1": {
20
- contextWindowSize: 100000,
21
- },
22
- "claude-instant-1.2": {
23
- contextWindowSize: 100000,
24
- },
25
- "claude-2": {
26
- contextWindowSize: 200000,
27
- },
28
- "claude-2.0": {
29
- contextWindowSize: 100000,
30
- },
31
- "claude-2.1": {
32
- contextWindowSize: 200000,
33
- },
34
- };
35
- /**
36
- * Create a text generation model that calls the Anthropic API.
37
- *
38
- * @see https://docs.anthropic.com/claude/reference/complete_post
39
- */
40
- class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
41
- constructor(settings) {
42
- super({ settings });
43
- Object.defineProperty(this, "provider", {
44
- enumerable: true,
45
- configurable: true,
46
- writable: true,
47
- value: "anthropic"
48
- });
49
- Object.defineProperty(this, "contextWindowSize", {
50
- enumerable: true,
51
- configurable: true,
52
- writable: true,
53
- value: void 0
54
- });
55
- Object.defineProperty(this, "tokenizer", {
56
- enumerable: true,
57
- configurable: true,
58
- writable: true,
59
- value: undefined
60
- });
61
- Object.defineProperty(this, "countPromptTokens", {
62
- enumerable: true,
63
- configurable: true,
64
- writable: true,
65
- value: undefined
66
- });
67
- this.contextWindowSize =
68
- exports.ANTHROPIC_TEXT_GENERATION_MODELS[this.settings.model].contextWindowSize;
69
- }
70
- get modelName() {
71
- return this.settings.model;
72
- }
73
- async callAPI(prompt, callOptions, options) {
74
- const api = this.settings.api ?? new AnthropicApiConfiguration_js_1.AnthropicApiConfiguration();
75
- const responseFormat = options.responseFormat;
76
- const abortSignal = callOptions.run?.abortSignal;
77
- const userId = this.settings.userId;
78
- return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
79
- retry: api.retry,
80
- throttle: api.throttle,
81
- call: async () => (0, postToApi_js_1.postJsonToApi)({
82
- url: api.assembleUrl(`/complete`),
83
- headers: api.headers({
84
- functionType: callOptions.functionType,
85
- functionId: callOptions.functionId,
86
- run: callOptions.run,
87
- callId: callOptions.callId,
88
- }),
89
- body: {
90
- model: this.settings.model,
91
- prompt,
92
- stream: responseFormat.stream,
93
- max_tokens_to_sample: this.settings.maxGenerationTokens ?? 100,
94
- temperature: this.settings.temperature,
95
- top_k: this.settings.topK,
96
- top_p: this.settings.topP,
97
- stop_sequences: this.settings.stopSequences,
98
- metadata: userId != null ? { user_id: userId } : undefined,
99
- },
100
- failedResponseHandler: AnthropicError_js_1.failedAnthropicCallResponseHandler,
101
- successfulResponseHandler: responseFormat.handler,
102
- abortSignal,
103
- }),
104
- });
105
- }
106
- get settingsForEvent() {
107
- const eventSettingProperties = [
108
- ...TextGenerationModel_js_1.textGenerationModelProperties,
109
- "temperature",
110
- "topK",
111
- "topP",
112
- "userId",
113
- ];
114
- return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
115
- }
116
- async doGenerateTexts(prompt, options) {
117
- return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
118
- responseFormat: exports.AnthropicTextGenerationResponseFormat.json,
119
- }));
120
- }
121
- restoreGeneratedTexts(rawResponse) {
122
- return this.processTextGenerationResponse((0, validateTypes_js_1.validateTypes)({
123
- structure: rawResponse,
124
- schema: (0, ZodSchema_js_1.zodSchema)(anthropicTextGenerationResponseSchema),
125
- }));
126
- }
127
- processTextGenerationResponse(response) {
128
- return {
129
- response,
130
- textGenerationResults: [
131
- {
132
- text: response.completion,
133
- finishReason: this.translateFinishReason(response.stop_reason),
134
- },
135
- ],
136
- };
137
- }
138
- translateFinishReason(finishReason) {
139
- switch (finishReason) {
140
- case "stop_sequence":
141
- return "stop";
142
- case "max_tokens":
143
- return "length";
144
- default:
145
- return "unknown";
146
- }
147
- }
148
- doStreamText(prompt, options) {
149
- return this.callAPI(prompt, options, {
150
- responseFormat: exports.AnthropicTextGenerationResponseFormat.deltaIterable,
151
- });
152
- }
153
- extractTextDelta(delta) {
154
- const chunk = delta;
155
- return chunk.completion;
156
- }
157
- /**
158
- * Returns this model with a text prompt template.
159
- */
160
- withTextPrompt() {
161
- return this.withPromptTemplate((0, AnthropicPromptTemplate_js_1.text)());
162
- }
163
- /**
164
- * Returns this model with an instruction prompt template.
165
- */
166
- withInstructionPrompt() {
167
- return this.withPromptTemplate((0, AnthropicPromptTemplate_js_1.instruction)());
168
- }
169
- /**
170
- * Returns this model with a chat prompt template.
171
- */
172
- withChatPrompt() {
173
- return this.withPromptTemplate((0, AnthropicPromptTemplate_js_1.chat)());
174
- }
175
- withPromptTemplate(promptTemplate) {
176
- return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
177
- model: this.withSettings({
178
- stopSequences: [
179
- ...(this.settings.stopSequences ?? []),
180
- ...promptTemplate.stopSequences,
181
- ],
182
- }),
183
- promptTemplate,
184
- });
185
- }
186
- withSettings(additionalSettings) {
187
- return new AnthropicTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
188
- }
189
- }
190
- exports.AnthropicTextGenerationModel = AnthropicTextGenerationModel;
191
- const anthropicTextGenerationResponseSchema = zod_1.z.object({
192
- completion: zod_1.z.string(),
193
- stop_reason: zod_1.z.string(),
194
- model: zod_1.z.string(),
195
- });
196
- const anthropicTextStreamChunkSchema = zod_1.z.object({
197
- completion: zod_1.z.string(),
198
- stop_reason: zod_1.z.string().nullable(),
199
- model: zod_1.z.string(),
200
- });
201
- async function createAnthropicFullDeltaIterableQueue(stream) {
202
- const queue = new AsyncQueue_js_1.AsyncQueue();
203
- // process the stream asynchonously (no 'await' on purpose):
204
- (0, parseEventSourceStream_js_1.parseEventSourceStream)({ stream })
205
- .then(async (events) => {
206
- try {
207
- for await (const event of events) {
208
- if (event.event === "error") {
209
- queue.push({ type: "error", error: event.data });
210
- queue.close();
211
- return;
212
- }
213
- if (event.event !== "completion") {
214
- continue;
215
- }
216
- const data = event.data;
217
- const eventData = (0, parseJSON_js_1.parseJSON)({
218
- text: data,
219
- schema: (0, ZodSchema_js_1.zodSchema)(anthropicTextStreamChunkSchema),
220
- });
221
- queue.push({ type: "delta", deltaValue: eventData });
222
- if (eventData.stop_reason != null) {
223
- queue.close();
224
- }
225
- }
226
- }
227
- catch (error) {
228
- queue.push({ type: "error", error });
229
- queue.close();
230
- }
231
- })
232
- .catch((error) => {
233
- queue.push({ type: "error", error });
234
- queue.close();
235
- });
236
- return queue;
237
- }
238
- exports.AnthropicTextGenerationResponseFormat = {
239
- /**
240
- * Returns the response as a JSON object.
241
- */
242
- json: {
243
- stream: false,
244
- handler: (0, postToApi_js_1.createJsonResponseHandler)((0, ZodSchema_js_1.zodSchema)(anthropicTextGenerationResponseSchema)),
245
- },
246
- /**
247
- * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
248
- * of the response stream.
249
- */
250
- deltaIterable: {
251
- stream: true,
252
- handler: async ({ response }) => createAnthropicFullDeltaIterableQueue(response.body),
253
- },
254
- };
@@ -1,153 +0,0 @@
1
- import { z } from "zod";
2
- import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
- import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
- import { ResponseHandler } from "../../core/api/postToApi.js";
5
- import { AbstractModel } from "../../model-function/AbstractModel.js";
6
- import { Delta } from "../../model-function/Delta.js";
7
- import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
- import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
- import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
- import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
11
- export declare const ANTHROPIC_TEXT_GENERATION_MODELS: {
12
- "claude-instant-1": {
13
- contextWindowSize: number;
14
- };
15
- "claude-instant-1.2": {
16
- contextWindowSize: number;
17
- };
18
- "claude-2": {
19
- contextWindowSize: number;
20
- };
21
- "claude-2.0": {
22
- contextWindowSize: number;
23
- };
24
- "claude-2.1": {
25
- contextWindowSize: number;
26
- };
27
- };
28
- export type AnthropicTextGenerationModelType = keyof typeof ANTHROPIC_TEXT_GENERATION_MODELS;
29
- export interface AnthropicTextGenerationModelSettings extends TextGenerationModelSettings {
30
- api?: ApiConfiguration;
31
- model: AnthropicTextGenerationModelType;
32
- temperature?: number;
33
- topP?: number;
34
- topK?: number;
35
- userId?: number;
36
- }
37
- /**
38
- * Create a text generation model that calls the Anthropic API.
39
- *
40
- * @see https://docs.anthropic.com/claude/reference/complete_post
41
- */
42
- export declare class AnthropicTextGenerationModel extends AbstractModel<AnthropicTextGenerationModelSettings> implements TextStreamingModel<string, AnthropicTextGenerationModelSettings> {
43
- constructor(settings: AnthropicTextGenerationModelSettings);
44
- readonly provider: "anthropic";
45
- get modelName(): "claude-instant-1" | "claude-instant-1.2" | "claude-2" | "claude-2.0" | "claude-2.1";
46
- readonly contextWindowSize: number;
47
- readonly tokenizer: undefined;
48
- readonly countPromptTokens: undefined;
49
- callAPI<RESPONSE>(prompt: string, callOptions: FunctionCallOptions, options: {
50
- responseFormat: AnthropicTextGenerationResponseFormatType<RESPONSE>;
51
- }): Promise<RESPONSE>;
52
- get settingsForEvent(): Partial<AnthropicTextGenerationModelSettings>;
53
- doGenerateTexts(prompt: string, options: FunctionCallOptions): Promise<{
54
- response: {
55
- model: string;
56
- completion: string;
57
- stop_reason: string;
58
- };
59
- textGenerationResults: {
60
- text: string;
61
- finishReason: TextGenerationFinishReason;
62
- }[];
63
- }>;
64
- restoreGeneratedTexts(rawResponse: unknown): {
65
- response: {
66
- model: string;
67
- completion: string;
68
- stop_reason: string;
69
- };
70
- textGenerationResults: {
71
- text: string;
72
- finishReason: TextGenerationFinishReason;
73
- }[];
74
- };
75
- processTextGenerationResponse(response: AnthropicTextGenerationResponse): {
76
- response: {
77
- model: string;
78
- completion: string;
79
- stop_reason: string;
80
- };
81
- textGenerationResults: {
82
- text: string;
83
- finishReason: TextGenerationFinishReason;
84
- }[];
85
- };
86
- private translateFinishReason;
87
- doStreamText(prompt: string, options: FunctionCallOptions): Promise<AsyncIterable<Delta<{
88
- model: string;
89
- completion: string;
90
- stop_reason: string | null;
91
- }>>>;
92
- extractTextDelta(delta: unknown): string;
93
- /**
94
- * Returns this model with a text prompt template.
95
- */
96
- withTextPrompt(): PromptTemplateTextStreamingModel<string, string, AnthropicTextGenerationModelSettings, this>;
97
- /**
98
- * Returns this model with an instruction prompt template.
99
- */
100
- withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, string, AnthropicTextGenerationModelSettings, this>;
101
- /**
102
- * Returns this model with a chat prompt template.
103
- */
104
- withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, string, AnthropicTextGenerationModelSettings, this>;
105
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, AnthropicTextGenerationModelSettings, this>;
106
- withSettings(additionalSettings: Partial<AnthropicTextGenerationModelSettings>): this;
107
- }
108
- declare const anthropicTextGenerationResponseSchema: z.ZodObject<{
109
- completion: z.ZodString;
110
- stop_reason: z.ZodString;
111
- model: z.ZodString;
112
- }, "strip", z.ZodTypeAny, {
113
- model: string;
114
- completion: string;
115
- stop_reason: string;
116
- }, {
117
- model: string;
118
- completion: string;
119
- stop_reason: string;
120
- }>;
121
- export type AnthropicTextGenerationResponse = z.infer<typeof anthropicTextGenerationResponseSchema>;
122
- export type AnthropicTextGenerationResponseFormatType<T> = {
123
- stream: boolean;
124
- handler: ResponseHandler<T>;
125
- };
126
- export declare const AnthropicTextGenerationResponseFormat: {
127
- /**
128
- * Returns the response as a JSON object.
129
- */
130
- json: {
131
- stream: false;
132
- handler: ResponseHandler<{
133
- model: string;
134
- completion: string;
135
- stop_reason: string;
136
- }>;
137
- };
138
- /**
139
- * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
140
- * of the response stream.
141
- */
142
- deltaIterable: {
143
- stream: true;
144
- handler: ({ response }: {
145
- response: Response;
146
- }) => Promise<AsyncIterable<Delta<{
147
- model: string;
148
- completion: string;
149
- stop_reason: string | null;
150
- }>>>;
151
- };
152
- };
153
- export {};