modelfusion 0.114.1 → 0.116.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/CHANGELOG.md +52 -0
  2. package/README.md +5 -6
  3. package/classifier/SemanticClassifier.cjs +75 -0
  4. package/classifier/SemanticClassifier.d.ts +28 -0
  5. package/classifier/SemanticClassifier.js +71 -0
  6. package/classifier/index.cjs +17 -0
  7. package/classifier/index.d.ts +1 -0
  8. package/classifier/index.js +1 -0
  9. package/index.cjs +1 -0
  10. package/index.d.ts +1 -0
  11. package/index.js +1 -0
  12. package/model-provider/index.cjs +0 -1
  13. package/model-provider/index.d.ts +0 -1
  14. package/model-provider/index.js +0 -1
  15. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +13 -13
  16. package/model-provider/ollama/OllamaChatModel.d.ts +9 -9
  17. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +12 -12
  18. package/package.json +1 -1
  19. package/model-provider/anthropic/AnthropicApiConfiguration.cjs +0 -31
  20. package/model-provider/anthropic/AnthropicApiConfiguration.d.ts +0 -10
  21. package/model-provider/anthropic/AnthropicApiConfiguration.js +0 -27
  22. package/model-provider/anthropic/AnthropicError.cjs +0 -16
  23. package/model-provider/anthropic/AnthropicError.d.ts +0 -26
  24. package/model-provider/anthropic/AnthropicError.js +0 -13
  25. package/model-provider/anthropic/AnthropicFacade.cjs +0 -24
  26. package/model-provider/anthropic/AnthropicFacade.d.ts +0 -18
  27. package/model-provider/anthropic/AnthropicFacade.js +0 -19
  28. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +0 -82
  29. package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +0 -17
  30. package/model-provider/anthropic/AnthropicPromptTemplate.js +0 -76
  31. package/model-provider/anthropic/AnthropicPromptTemplate.test.cjs +0 -49
  32. package/model-provider/anthropic/AnthropicPromptTemplate.test.d.ts +0 -1
  33. package/model-provider/anthropic/AnthropicPromptTemplate.test.js +0 -47
  34. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +0 -254
  35. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +0 -153
  36. package/model-provider/anthropic/AnthropicTextGenerationModel.js +0 -250
  37. package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +0 -44
  38. package/model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts +0 -1
  39. package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +0 -42
  40. package/model-provider/anthropic/index.cjs +0 -33
  41. package/model-provider/anthropic/index.d.ts +0 -5
  42. package/model-provider/anthropic/index.js +0 -4
@@ -1,26 +0,0 @@
1
- import { z } from "zod";
2
- declare const anthropicErrorDataSchema: z.ZodObject<{
3
- error: z.ZodObject<{
4
- type: z.ZodString;
5
- message: z.ZodString;
6
- }, "strip", z.ZodTypeAny, {
7
- message: string;
8
- type: string;
9
- }, {
10
- message: string;
11
- type: string;
12
- }>;
13
- }, "strip", z.ZodTypeAny, {
14
- error: {
15
- message: string;
16
- type: string;
17
- };
18
- }, {
19
- error: {
20
- message: string;
21
- type: string;
22
- };
23
- }>;
24
- export type AnthropicErrorData = z.infer<typeof anthropicErrorDataSchema>;
25
- export declare const failedAnthropicCallResponseHandler: import("../../core/api/postToApi.js").ResponseHandler<import("../../index.js").ApiCallError>;
26
- export {};
@@ -1,13 +0,0 @@
1
- import { z } from "zod";
2
- import { createJsonErrorResponseHandler } from "../../core/api/postToApi.js";
3
- import { zodSchema } from "../../core/schema/ZodSchema.js";
4
- const anthropicErrorDataSchema = z.object({
5
- error: z.object({
6
- type: z.string(),
7
- message: z.string(),
8
- }),
9
- });
10
- export const failedAnthropicCallResponseHandler = createJsonErrorResponseHandler({
11
- errorSchema: zodSchema(anthropicErrorDataSchema),
12
- errorToMessage: (error) => error.error.message,
13
- });
@@ -1,24 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.TextGenerator = exports.Api = void 0;
4
- const AnthropicApiConfiguration_js_1 = require("./AnthropicApiConfiguration.cjs");
5
- const AnthropicTextGenerationModel_js_1 = require("./AnthropicTextGenerationModel.cjs");
6
- /**
7
- * Creates an API configuration for the Anthropic API.
8
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
9
- */
10
- function Api(settings) {
11
- return new AnthropicApiConfiguration_js_1.AnthropicApiConfiguration(settings);
12
- }
13
- exports.Api = Api;
14
- /**
15
- * Create a text generation model that calls the Anthropic API.
16
- *
17
- * @see https://docs.anthropic.com/claude/reference/complete_post
18
- *
19
- * @return A new instance of {@link AnthropicTextGenerationModel}.
20
- */
21
- function TextGenerator(settings) {
22
- return new AnthropicTextGenerationModel_js_1.AnthropicTextGenerationModel(settings);
23
- }
24
- exports.TextGenerator = TextGenerator;
@@ -1,18 +0,0 @@
1
- import { PartialBaseUrlPartsApiConfigurationOptions } from "../../core/api/BaseUrlApiConfiguration.js";
2
- import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
3
- import { AnthropicTextGenerationModel, AnthropicTextGenerationModelSettings } from "./AnthropicTextGenerationModel.js";
4
- /**
5
- * Creates an API configuration for the Anthropic API.
6
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
7
- */
8
- export declare function Api(settings: PartialBaseUrlPartsApiConfigurationOptions & {
9
- apiKey?: string;
10
- }): AnthropicApiConfiguration;
11
- /**
12
- * Create a text generation model that calls the Anthropic API.
13
- *
14
- * @see https://docs.anthropic.com/claude/reference/complete_post
15
- *
16
- * @return A new instance of {@link AnthropicTextGenerationModel}.
17
- */
18
- export declare function TextGenerator(settings: AnthropicTextGenerationModelSettings): AnthropicTextGenerationModel;
@@ -1,19 +0,0 @@
1
- import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
2
- import { AnthropicTextGenerationModel, } from "./AnthropicTextGenerationModel.js";
3
- /**
4
- * Creates an API configuration for the Anthropic API.
5
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
6
- */
7
- export function Api(settings) {
8
- return new AnthropicApiConfiguration(settings);
9
- }
10
- /**
11
- * Create a text generation model that calls the Anthropic API.
12
- *
13
- * @see https://docs.anthropic.com/claude/reference/complete_post
14
- *
15
- * @return A new instance of {@link AnthropicTextGenerationModel}.
16
- */
17
- export function TextGenerator(settings) {
18
- return new AnthropicTextGenerationModel(settings);
19
- }
@@ -1,82 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = exports.text = void 0;
4
- const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
5
- const InvalidPromptError_js_1 = require("../../model-function/generate-text/prompt-template/InvalidPromptError.cjs");
6
- const HUMAN_PREFIX = "\n\nHuman:";
7
- const ASSISTANT_PREFIX = "\n\nAssistant:";
8
- /**
9
- * Formats a text prompt as an Anthropic prompt.
10
- */
11
- function text() {
12
- return {
13
- format(prompt) {
14
- let text = "";
15
- text += HUMAN_PREFIX;
16
- text += prompt;
17
- text += ASSISTANT_PREFIX;
18
- return text;
19
- },
20
- stopSequences: [],
21
- };
22
- }
23
- exports.text = text;
24
- /**
25
- * Formats an instruction prompt as an Anthropic prompt.
26
- */
27
- function instruction() {
28
- return {
29
- format(prompt) {
30
- const instruction = (0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt);
31
- let text = prompt.system ?? "";
32
- text += HUMAN_PREFIX;
33
- text += instruction;
34
- text += ASSISTANT_PREFIX;
35
- if (prompt.responsePrefix != null) {
36
- text += prompt.responsePrefix;
37
- }
38
- return text;
39
- },
40
- stopSequences: [],
41
- };
42
- }
43
- exports.instruction = instruction;
44
- /**
45
- * Formats a chat prompt as an Anthropic prompt.
46
- *
47
- * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
48
- */
49
- function chat() {
50
- return {
51
- format(prompt) {
52
- let text = prompt.system ?? "";
53
- for (const { role, content } of prompt.messages) {
54
- switch (role) {
55
- case "user": {
56
- const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
57
- text += HUMAN_PREFIX;
58
- text += textContent;
59
- break;
60
- }
61
- case "assistant": {
62
- text += ASSISTANT_PREFIX;
63
- text += content;
64
- break;
65
- }
66
- case "tool": {
67
- throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
68
- }
69
- default: {
70
- const _exhaustiveCheck = role;
71
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
72
- }
73
- }
74
- }
75
- // AI message prefix:
76
- text += ASSISTANT_PREFIX;
77
- return text;
78
- },
79
- stopSequences: [],
80
- };
81
- }
82
- exports.chat = chat;
@@ -1,17 +0,0 @@
1
- import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
- import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
- import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
- /**
5
- * Formats a text prompt as an Anthropic prompt.
6
- */
7
- export declare function text(): TextGenerationPromptTemplate<string, string>;
8
- /**
9
- * Formats an instruction prompt as an Anthropic prompt.
10
- */
11
- export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, string>;
12
- /**
13
- * Formats a chat prompt as an Anthropic prompt.
14
- *
15
- * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
16
- */
17
- export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
@@ -1,76 +0,0 @@
1
- import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
2
- import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
3
- const HUMAN_PREFIX = "\n\nHuman:";
4
- const ASSISTANT_PREFIX = "\n\nAssistant:";
5
- /**
6
- * Formats a text prompt as an Anthropic prompt.
7
- */
8
- export function text() {
9
- return {
10
- format(prompt) {
11
- let text = "";
12
- text += HUMAN_PREFIX;
13
- text += prompt;
14
- text += ASSISTANT_PREFIX;
15
- return text;
16
- },
17
- stopSequences: [],
18
- };
19
- }
20
- /**
21
- * Formats an instruction prompt as an Anthropic prompt.
22
- */
23
- export function instruction() {
24
- return {
25
- format(prompt) {
26
- const instruction = validateContentIsString(prompt.instruction, prompt);
27
- let text = prompt.system ?? "";
28
- text += HUMAN_PREFIX;
29
- text += instruction;
30
- text += ASSISTANT_PREFIX;
31
- if (prompt.responsePrefix != null) {
32
- text += prompt.responsePrefix;
33
- }
34
- return text;
35
- },
36
- stopSequences: [],
37
- };
38
- }
39
- /**
40
- * Formats a chat prompt as an Anthropic prompt.
41
- *
42
- * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
43
- */
44
- export function chat() {
45
- return {
46
- format(prompt) {
47
- let text = prompt.system ?? "";
48
- for (const { role, content } of prompt.messages) {
49
- switch (role) {
50
- case "user": {
51
- const textContent = validateContentIsString(content, prompt);
52
- text += HUMAN_PREFIX;
53
- text += textContent;
54
- break;
55
- }
56
- case "assistant": {
57
- text += ASSISTANT_PREFIX;
58
- text += content;
59
- break;
60
- }
61
- case "tool": {
62
- throw new InvalidPromptError("Tool messages are not supported.", prompt);
63
- }
64
- default: {
65
- const _exhaustiveCheck = role;
66
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
67
- }
68
- }
69
- }
70
- // AI message prefix:
71
- text += ASSISTANT_PREFIX;
72
- return text;
73
- },
74
- stopSequences: [],
75
- };
76
- }
@@ -1,49 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const AnthropicPromptTemplate_js_1 = require("./AnthropicPromptTemplate.cjs");
4
- describe("text prompt", () => {
5
- it("should format prompt", () => {
6
- const prompt = (0, AnthropicPromptTemplate_js_1.text)().format("prompt");
7
- expect(prompt).toMatchSnapshot();
8
- });
9
- });
10
- describe("instruction prompt", () => {
11
- it("should format prompt with instruction", () => {
12
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
13
- instruction: "instruction",
14
- });
15
- expect(prompt).toMatchSnapshot();
16
- });
17
- it("should format prompt with system and instruction", () => {
18
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
19
- system: "system",
20
- instruction: "instruction",
21
- });
22
- expect(prompt).toMatchSnapshot();
23
- });
24
- it("should format prompt with instruction and response prefix", () => {
25
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
26
- instruction: "instruction",
27
- responsePrefix: "response prefix",
28
- });
29
- expect(prompt).toMatchSnapshot();
30
- });
31
- });
32
- describe("chat prompt", () => {
33
- it("should format prompt with user message", () => {
34
- const prompt = (0, AnthropicPromptTemplate_js_1.chat)().format({
35
- messages: [{ role: "user", content: "user message" }],
36
- });
37
- expect(prompt).toMatchSnapshot();
38
- });
39
- it("should format prompt with user-assistant-user messages", () => {
40
- const prompt = (0, AnthropicPromptTemplate_js_1.chat)().format({
41
- messages: [
42
- { role: "user", content: "1st user message" },
43
- { role: "assistant", content: "assistant message" },
44
- { role: "user", content: "2nd user message" },
45
- ],
46
- });
47
- expect(prompt).toMatchSnapshot();
48
- });
49
- });
@@ -1,47 +0,0 @@
1
- import { chat, instruction, text } from "./AnthropicPromptTemplate.js";
2
- describe("text prompt", () => {
3
- it("should format prompt", () => {
4
- const prompt = text().format("prompt");
5
- expect(prompt).toMatchSnapshot();
6
- });
7
- });
8
- describe("instruction prompt", () => {
9
- it("should format prompt with instruction", () => {
10
- const prompt = instruction().format({
11
- instruction: "instruction",
12
- });
13
- expect(prompt).toMatchSnapshot();
14
- });
15
- it("should format prompt with system and instruction", () => {
16
- const prompt = instruction().format({
17
- system: "system",
18
- instruction: "instruction",
19
- });
20
- expect(prompt).toMatchSnapshot();
21
- });
22
- it("should format prompt with instruction and response prefix", () => {
23
- const prompt = instruction().format({
24
- instruction: "instruction",
25
- responsePrefix: "response prefix",
26
- });
27
- expect(prompt).toMatchSnapshot();
28
- });
29
- });
30
- describe("chat prompt", () => {
31
- it("should format prompt with user message", () => {
32
- const prompt = chat().format({
33
- messages: [{ role: "user", content: "user message" }],
34
- });
35
- expect(prompt).toMatchSnapshot();
36
- });
37
- it("should format prompt with user-assistant-user messages", () => {
38
- const prompt = chat().format({
39
- messages: [
40
- { role: "user", content: "1st user message" },
41
- { role: "assistant", content: "assistant message" },
42
- { role: "user", content: "2nd user message" },
43
- ],
44
- });
45
- expect(prompt).toMatchSnapshot();
46
- });
47
- });
@@ -1,254 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.AnthropicTextGenerationResponseFormat = exports.AnthropicTextGenerationModel = exports.ANTHROPIC_TEXT_GENERATION_MODELS = void 0;
4
- const zod_1 = require("zod");
5
- const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
- const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
9
- const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
10
- const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
11
- const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
12
- const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
13
- const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
14
- const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
15
- const AnthropicApiConfiguration_js_1 = require("./AnthropicApiConfiguration.cjs");
16
- const AnthropicError_js_1 = require("./AnthropicError.cjs");
17
- const AnthropicPromptTemplate_js_1 = require("./AnthropicPromptTemplate.cjs");
18
- exports.ANTHROPIC_TEXT_GENERATION_MODELS = {
19
- "claude-instant-1": {
20
- contextWindowSize: 100000,
21
- },
22
- "claude-instant-1.2": {
23
- contextWindowSize: 100000,
24
- },
25
- "claude-2": {
26
- contextWindowSize: 200000,
27
- },
28
- "claude-2.0": {
29
- contextWindowSize: 100000,
30
- },
31
- "claude-2.1": {
32
- contextWindowSize: 200000,
33
- },
34
- };
35
- /**
36
- * Create a text generation model that calls the Anthropic API.
37
- *
38
- * @see https://docs.anthropic.com/claude/reference/complete_post
39
- */
40
- class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
41
- constructor(settings) {
42
- super({ settings });
43
- Object.defineProperty(this, "provider", {
44
- enumerable: true,
45
- configurable: true,
46
- writable: true,
47
- value: "anthropic"
48
- });
49
- Object.defineProperty(this, "contextWindowSize", {
50
- enumerable: true,
51
- configurable: true,
52
- writable: true,
53
- value: void 0
54
- });
55
- Object.defineProperty(this, "tokenizer", {
56
- enumerable: true,
57
- configurable: true,
58
- writable: true,
59
- value: undefined
60
- });
61
- Object.defineProperty(this, "countPromptTokens", {
62
- enumerable: true,
63
- configurable: true,
64
- writable: true,
65
- value: undefined
66
- });
67
- this.contextWindowSize =
68
- exports.ANTHROPIC_TEXT_GENERATION_MODELS[this.settings.model].contextWindowSize;
69
- }
70
- get modelName() {
71
- return this.settings.model;
72
- }
73
- async callAPI(prompt, callOptions, options) {
74
- const api = this.settings.api ?? new AnthropicApiConfiguration_js_1.AnthropicApiConfiguration();
75
- const responseFormat = options.responseFormat;
76
- const abortSignal = callOptions.run?.abortSignal;
77
- const userId = this.settings.userId;
78
- return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
79
- retry: api.retry,
80
- throttle: api.throttle,
81
- call: async () => (0, postToApi_js_1.postJsonToApi)({
82
- url: api.assembleUrl(`/complete`),
83
- headers: api.headers({
84
- functionType: callOptions.functionType,
85
- functionId: callOptions.functionId,
86
- run: callOptions.run,
87
- callId: callOptions.callId,
88
- }),
89
- body: {
90
- model: this.settings.model,
91
- prompt,
92
- stream: responseFormat.stream,
93
- max_tokens_to_sample: this.settings.maxGenerationTokens ?? 100,
94
- temperature: this.settings.temperature,
95
- top_k: this.settings.topK,
96
- top_p: this.settings.topP,
97
- stop_sequences: this.settings.stopSequences,
98
- metadata: userId != null ? { user_id: userId } : undefined,
99
- },
100
- failedResponseHandler: AnthropicError_js_1.failedAnthropicCallResponseHandler,
101
- successfulResponseHandler: responseFormat.handler,
102
- abortSignal,
103
- }),
104
- });
105
- }
106
- get settingsForEvent() {
107
- const eventSettingProperties = [
108
- ...TextGenerationModel_js_1.textGenerationModelProperties,
109
- "temperature",
110
- "topK",
111
- "topP",
112
- "userId",
113
- ];
114
- return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
115
- }
116
- async doGenerateTexts(prompt, options) {
117
- return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
118
- responseFormat: exports.AnthropicTextGenerationResponseFormat.json,
119
- }));
120
- }
121
- restoreGeneratedTexts(rawResponse) {
122
- return this.processTextGenerationResponse((0, validateTypes_js_1.validateTypes)({
123
- structure: rawResponse,
124
- schema: (0, ZodSchema_js_1.zodSchema)(anthropicTextGenerationResponseSchema),
125
- }));
126
- }
127
- processTextGenerationResponse(response) {
128
- return {
129
- response,
130
- textGenerationResults: [
131
- {
132
- text: response.completion,
133
- finishReason: this.translateFinishReason(response.stop_reason),
134
- },
135
- ],
136
- };
137
- }
138
- translateFinishReason(finishReason) {
139
- switch (finishReason) {
140
- case "stop_sequence":
141
- return "stop";
142
- case "max_tokens":
143
- return "length";
144
- default:
145
- return "unknown";
146
- }
147
- }
148
- doStreamText(prompt, options) {
149
- return this.callAPI(prompt, options, {
150
- responseFormat: exports.AnthropicTextGenerationResponseFormat.deltaIterable,
151
- });
152
- }
153
- extractTextDelta(delta) {
154
- const chunk = delta;
155
- return chunk.completion;
156
- }
157
- /**
158
- * Returns this model with a text prompt template.
159
- */
160
- withTextPrompt() {
161
- return this.withPromptTemplate((0, AnthropicPromptTemplate_js_1.text)());
162
- }
163
- /**
164
- * Returns this model with an instruction prompt template.
165
- */
166
- withInstructionPrompt() {
167
- return this.withPromptTemplate((0, AnthropicPromptTemplate_js_1.instruction)());
168
- }
169
- /**
170
- * Returns this model with a chat prompt template.
171
- */
172
- withChatPrompt() {
173
- return this.withPromptTemplate((0, AnthropicPromptTemplate_js_1.chat)());
174
- }
175
- withPromptTemplate(promptTemplate) {
176
- return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
177
- model: this.withSettings({
178
- stopSequences: [
179
- ...(this.settings.stopSequences ?? []),
180
- ...promptTemplate.stopSequences,
181
- ],
182
- }),
183
- promptTemplate,
184
- });
185
- }
186
- withSettings(additionalSettings) {
187
- return new AnthropicTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
188
- }
189
- }
190
- exports.AnthropicTextGenerationModel = AnthropicTextGenerationModel;
191
- const anthropicTextGenerationResponseSchema = zod_1.z.object({
192
- completion: zod_1.z.string(),
193
- stop_reason: zod_1.z.string(),
194
- model: zod_1.z.string(),
195
- });
196
- const anthropicTextStreamChunkSchema = zod_1.z.object({
197
- completion: zod_1.z.string(),
198
- stop_reason: zod_1.z.string().nullable(),
199
- model: zod_1.z.string(),
200
- });
201
- async function createAnthropicFullDeltaIterableQueue(stream) {
202
- const queue = new AsyncQueue_js_1.AsyncQueue();
203
- // process the stream asynchonously (no 'await' on purpose):
204
- (0, parseEventSourceStream_js_1.parseEventSourceStream)({ stream })
205
- .then(async (events) => {
206
- try {
207
- for await (const event of events) {
208
- if (event.event === "error") {
209
- queue.push({ type: "error", error: event.data });
210
- queue.close();
211
- return;
212
- }
213
- if (event.event !== "completion") {
214
- continue;
215
- }
216
- const data = event.data;
217
- const eventData = (0, parseJSON_js_1.parseJSON)({
218
- text: data,
219
- schema: (0, ZodSchema_js_1.zodSchema)(anthropicTextStreamChunkSchema),
220
- });
221
- queue.push({ type: "delta", deltaValue: eventData });
222
- if (eventData.stop_reason != null) {
223
- queue.close();
224
- }
225
- }
226
- }
227
- catch (error) {
228
- queue.push({ type: "error", error });
229
- queue.close();
230
- }
231
- })
232
- .catch((error) => {
233
- queue.push({ type: "error", error });
234
- queue.close();
235
- });
236
- return queue;
237
- }
238
- exports.AnthropicTextGenerationResponseFormat = {
239
- /**
240
- * Returns the response as a JSON object.
241
- */
242
- json: {
243
- stream: false,
244
- handler: (0, postToApi_js_1.createJsonResponseHandler)((0, ZodSchema_js_1.zodSchema)(anthropicTextGenerationResponseSchema)),
245
- },
246
- /**
247
- * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
248
- * of the response stream.
249
- */
250
- deltaIterable: {
251
- stream: true,
252
- handler: async ({ response }) => createAnthropicFullDeltaIterableQueue(response.body),
253
- },
254
- };