modelfusion 0.114.1 → 0.115.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/README.md +5 -6
  3. package/model-provider/index.cjs +0 -1
  4. package/model-provider/index.d.ts +0 -1
  5. package/model-provider/index.js +0 -1
  6. package/package.json +1 -1
  7. package/model-provider/anthropic/AnthropicApiConfiguration.cjs +0 -31
  8. package/model-provider/anthropic/AnthropicApiConfiguration.d.ts +0 -10
  9. package/model-provider/anthropic/AnthropicApiConfiguration.js +0 -27
  10. package/model-provider/anthropic/AnthropicError.cjs +0 -16
  11. package/model-provider/anthropic/AnthropicError.d.ts +0 -26
  12. package/model-provider/anthropic/AnthropicError.js +0 -13
  13. package/model-provider/anthropic/AnthropicFacade.cjs +0 -24
  14. package/model-provider/anthropic/AnthropicFacade.d.ts +0 -18
  15. package/model-provider/anthropic/AnthropicFacade.js +0 -19
  16. package/model-provider/anthropic/AnthropicPromptTemplate.cjs +0 -82
  17. package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +0 -17
  18. package/model-provider/anthropic/AnthropicPromptTemplate.js +0 -76
  19. package/model-provider/anthropic/AnthropicPromptTemplate.test.cjs +0 -49
  20. package/model-provider/anthropic/AnthropicPromptTemplate.test.d.ts +0 -1
  21. package/model-provider/anthropic/AnthropicPromptTemplate.test.js +0 -47
  22. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +0 -254
  23. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +0 -153
  24. package/model-provider/anthropic/AnthropicTextGenerationModel.js +0 -250
  25. package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +0 -44
  26. package/model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts +0 -1
  27. package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +0 -42
  28. package/model-provider/anthropic/index.cjs +0 -33
  29. package/model-provider/anthropic/index.d.ts +0 -5
  30. package/model-provider/anthropic/index.js +0 -4
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.115.0 - 2024-01-05
4
+
5
+ ### Removed
6
+
7
+ - Anthropic support. Anthropic has a strong stance against open-source models and against non-US AI. I will not support them by providing a ModelFusion integration.
8
+
3
9
  ## v0.114.1 - 2024-01-05
4
10
 
5
11
  ### Fixed
package/README.md CHANGED
@@ -59,7 +59,7 @@ const text = await generateText(
59
59
  );
60
60
  ```
61
61
 
62
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
62
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
63
63
 
64
64
  #### streamText
65
65
 
@@ -76,7 +76,7 @@ for await (const textPart of textStream) {
76
76
  }
77
77
  ```
78
78
 
79
- Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
79
+ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Mistral](https://modelfusion.dev/integration/model-provider/mistral), [Cohere](https://modelfusion.dev/integration/model-provider/cohere)
80
80
 
81
81
  #### streamText with multi-modal prompt
82
82
 
@@ -412,9 +412,9 @@ Prompt templates let you use higher level prompt structures (such as text, instr
412
412
 
413
413
  ```ts
414
414
  const text = await generateText(
415
- anthropic
416
- .TextGenerator({
417
- model: "claude-instant-1",
415
+ openai
416
+ .ChatTextGenerator({
417
+ // ...
418
418
  })
419
419
  .withTextPrompt(),
420
420
  "Write a short story about a robot learning to love"
@@ -473,7 +473,6 @@ const textStream = await streamText(
473
473
  | Prompt Template | Text Prompt | Instruction Prompt | Chat Prompt |
474
474
  | ---------------- | ----------- | ------------------ | ----------- |
475
475
  | OpenAI Chat | ✅ | ✅ | ✅ |
476
- | Anthropic | ✅ | ✅ | ✅ |
477
476
  | Llama 2 | ✅ | ✅ | ✅ |
478
477
  | ChatML | ✅ | ✅ | ✅ |
479
478
  | NeuralChat | ✅ | ✅ | ✅ |
@@ -14,7 +14,6 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
- __exportStar(require("./anthropic/index.cjs"), exports);
18
17
  __exportStar(require("./automatic1111/index.cjs"), exports);
19
18
  __exportStar(require("./cohere/index.cjs"), exports);
20
19
  __exportStar(require("./elevenlabs/index.cjs"), exports);
@@ -1,4 +1,3 @@
1
- export * from "./anthropic/index.js";
2
1
  export * from "./automatic1111/index.js";
3
2
  export * from "./cohere/index.js";
4
3
  export * from "./elevenlabs/index.js";
@@ -1,4 +1,3 @@
1
- export * from "./anthropic/index.js";
2
1
  export * from "./automatic1111/index.js";
3
2
  export * from "./cohere/index.js";
4
3
  export * from "./elevenlabs/index.js";
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building AI applications.",
4
- "version": "0.114.1",
4
+ "version": "0.115.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -1,31 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.AnthropicApiConfiguration = void 0;
4
- const BaseUrlApiConfiguration_js_1 = require("../../core/api/BaseUrlApiConfiguration.cjs");
5
- const loadApiKey_js_1 = require("../../core/api/loadApiKey.cjs");
6
- /**
7
- * Creates an API configuration for the Anthropic API.
8
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
9
- */
10
- class AnthropicApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUrlApiConfigurationWithDefaults {
11
- constructor(settings = {}) {
12
- super({
13
- ...settings,
14
- headers: {
15
- "x-api-key": (0, loadApiKey_js_1.loadApiKey)({
16
- apiKey: settings.apiKey,
17
- environmentVariableName: "ANTHROPIC_API_KEY",
18
- description: "Anthropic",
19
- }),
20
- "anthropic-version": "2023-06-01",
21
- },
22
- baseUrlDefaults: {
23
- protocol: "https",
24
- host: "api.anthropic.com",
25
- port: "443",
26
- path: "/v1",
27
- },
28
- });
29
- }
30
- }
31
- exports.AnthropicApiConfiguration = AnthropicApiConfiguration;
@@ -1,10 +0,0 @@
1
- import { BaseUrlApiConfigurationWithDefaults, PartialBaseUrlPartsApiConfigurationOptions } from "../../core/api/BaseUrlApiConfiguration.js";
2
- /**
3
- * Creates an API configuration for the Anthropic API.
4
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
5
- */
6
- export declare class AnthropicApiConfiguration extends BaseUrlApiConfigurationWithDefaults {
7
- constructor(settings?: PartialBaseUrlPartsApiConfigurationOptions & {
8
- apiKey?: string;
9
- });
10
- }
@@ -1,27 +0,0 @@
1
- import { BaseUrlApiConfigurationWithDefaults, } from "../../core/api/BaseUrlApiConfiguration.js";
2
- import { loadApiKey } from "../../core/api/loadApiKey.js";
3
- /**
4
- * Creates an API configuration for the Anthropic API.
5
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
6
- */
7
- export class AnthropicApiConfiguration extends BaseUrlApiConfigurationWithDefaults {
8
- constructor(settings = {}) {
9
- super({
10
- ...settings,
11
- headers: {
12
- "x-api-key": loadApiKey({
13
- apiKey: settings.apiKey,
14
- environmentVariableName: "ANTHROPIC_API_KEY",
15
- description: "Anthropic",
16
- }),
17
- "anthropic-version": "2023-06-01",
18
- },
19
- baseUrlDefaults: {
20
- protocol: "https",
21
- host: "api.anthropic.com",
22
- port: "443",
23
- path: "/v1",
24
- },
25
- });
26
- }
27
- }
@@ -1,16 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.failedAnthropicCallResponseHandler = void 0;
4
- const zod_1 = require("zod");
5
- const postToApi_js_1 = require("../../core/api/postToApi.cjs");
6
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
- const anthropicErrorDataSchema = zod_1.z.object({
8
- error: zod_1.z.object({
9
- type: zod_1.z.string(),
10
- message: zod_1.z.string(),
11
- }),
12
- });
13
- exports.failedAnthropicCallResponseHandler = (0, postToApi_js_1.createJsonErrorResponseHandler)({
14
- errorSchema: (0, ZodSchema_js_1.zodSchema)(anthropicErrorDataSchema),
15
- errorToMessage: (error) => error.error.message,
16
- });
@@ -1,26 +0,0 @@
1
- import { z } from "zod";
2
- declare const anthropicErrorDataSchema: z.ZodObject<{
3
- error: z.ZodObject<{
4
- type: z.ZodString;
5
- message: z.ZodString;
6
- }, "strip", z.ZodTypeAny, {
7
- message: string;
8
- type: string;
9
- }, {
10
- message: string;
11
- type: string;
12
- }>;
13
- }, "strip", z.ZodTypeAny, {
14
- error: {
15
- message: string;
16
- type: string;
17
- };
18
- }, {
19
- error: {
20
- message: string;
21
- type: string;
22
- };
23
- }>;
24
- export type AnthropicErrorData = z.infer<typeof anthropicErrorDataSchema>;
25
- export declare const failedAnthropicCallResponseHandler: import("../../core/api/postToApi.js").ResponseHandler<import("../../index.js").ApiCallError>;
26
- export {};
@@ -1,13 +0,0 @@
1
- import { z } from "zod";
2
- import { createJsonErrorResponseHandler } from "../../core/api/postToApi.js";
3
- import { zodSchema } from "../../core/schema/ZodSchema.js";
4
- const anthropicErrorDataSchema = z.object({
5
- error: z.object({
6
- type: z.string(),
7
- message: z.string(),
8
- }),
9
- });
10
- export const failedAnthropicCallResponseHandler = createJsonErrorResponseHandler({
11
- errorSchema: zodSchema(anthropicErrorDataSchema),
12
- errorToMessage: (error) => error.error.message,
13
- });
@@ -1,24 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.TextGenerator = exports.Api = void 0;
4
- const AnthropicApiConfiguration_js_1 = require("./AnthropicApiConfiguration.cjs");
5
- const AnthropicTextGenerationModel_js_1 = require("./AnthropicTextGenerationModel.cjs");
6
- /**
7
- * Creates an API configuration for the Anthropic API.
8
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
9
- */
10
- function Api(settings) {
11
- return new AnthropicApiConfiguration_js_1.AnthropicApiConfiguration(settings);
12
- }
13
- exports.Api = Api;
14
- /**
15
- * Create a text generation model that calls the Anthropic API.
16
- *
17
- * @see https://docs.anthropic.com/claude/reference/complete_post
18
- *
19
- * @return A new instance of {@link AnthropicTextGenerationModel}.
20
- */
21
- function TextGenerator(settings) {
22
- return new AnthropicTextGenerationModel_js_1.AnthropicTextGenerationModel(settings);
23
- }
24
- exports.TextGenerator = TextGenerator;
@@ -1,18 +0,0 @@
1
- import { PartialBaseUrlPartsApiConfigurationOptions } from "../../core/api/BaseUrlApiConfiguration.js";
2
- import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
3
- import { AnthropicTextGenerationModel, AnthropicTextGenerationModelSettings } from "./AnthropicTextGenerationModel.js";
4
- /**
5
- * Creates an API configuration for the Anthropic API.
6
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
7
- */
8
- export declare function Api(settings: PartialBaseUrlPartsApiConfigurationOptions & {
9
- apiKey?: string;
10
- }): AnthropicApiConfiguration;
11
- /**
12
- * Create a text generation model that calls the Anthropic API.
13
- *
14
- * @see https://docs.anthropic.com/claude/reference/complete_post
15
- *
16
- * @return A new instance of {@link AnthropicTextGenerationModel}.
17
- */
18
- export declare function TextGenerator(settings: AnthropicTextGenerationModelSettings): AnthropicTextGenerationModel;
@@ -1,19 +0,0 @@
1
- import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
2
- import { AnthropicTextGenerationModel, } from "./AnthropicTextGenerationModel.js";
3
- /**
4
- * Creates an API configuration for the Anthropic API.
5
- * It calls the API at https://api.anthropic.com/v1 and uses the `ANTHROPIC_API_KEY` env variable by default.
6
- */
7
- export function Api(settings) {
8
- return new AnthropicApiConfiguration(settings);
9
- }
10
- /**
11
- * Create a text generation model that calls the Anthropic API.
12
- *
13
- * @see https://docs.anthropic.com/claude/reference/complete_post
14
- *
15
- * @return A new instance of {@link AnthropicTextGenerationModel}.
16
- */
17
- export function TextGenerator(settings) {
18
- return new AnthropicTextGenerationModel(settings);
19
- }
@@ -1,82 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = exports.text = void 0;
4
- const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
5
- const InvalidPromptError_js_1 = require("../../model-function/generate-text/prompt-template/InvalidPromptError.cjs");
6
- const HUMAN_PREFIX = "\n\nHuman:";
7
- const ASSISTANT_PREFIX = "\n\nAssistant:";
8
- /**
9
- * Formats a text prompt as an Anthropic prompt.
10
- */
11
- function text() {
12
- return {
13
- format(prompt) {
14
- let text = "";
15
- text += HUMAN_PREFIX;
16
- text += prompt;
17
- text += ASSISTANT_PREFIX;
18
- return text;
19
- },
20
- stopSequences: [],
21
- };
22
- }
23
- exports.text = text;
24
- /**
25
- * Formats an instruction prompt as an Anthropic prompt.
26
- */
27
- function instruction() {
28
- return {
29
- format(prompt) {
30
- const instruction = (0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt);
31
- let text = prompt.system ?? "";
32
- text += HUMAN_PREFIX;
33
- text += instruction;
34
- text += ASSISTANT_PREFIX;
35
- if (prompt.responsePrefix != null) {
36
- text += prompt.responsePrefix;
37
- }
38
- return text;
39
- },
40
- stopSequences: [],
41
- };
42
- }
43
- exports.instruction = instruction;
44
- /**
45
- * Formats a chat prompt as an Anthropic prompt.
46
- *
47
- * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
48
- */
49
- function chat() {
50
- return {
51
- format(prompt) {
52
- let text = prompt.system ?? "";
53
- for (const { role, content } of prompt.messages) {
54
- switch (role) {
55
- case "user": {
56
- const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
57
- text += HUMAN_PREFIX;
58
- text += textContent;
59
- break;
60
- }
61
- case "assistant": {
62
- text += ASSISTANT_PREFIX;
63
- text += content;
64
- break;
65
- }
66
- case "tool": {
67
- throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
68
- }
69
- default: {
70
- const _exhaustiveCheck = role;
71
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
72
- }
73
- }
74
- }
75
- // AI message prefix:
76
- text += ASSISTANT_PREFIX;
77
- return text;
78
- },
79
- stopSequences: [],
80
- };
81
- }
82
- exports.chat = chat;
@@ -1,17 +0,0 @@
1
- import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
- import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
3
- import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
4
- /**
5
- * Formats a text prompt as an Anthropic prompt.
6
- */
7
- export declare function text(): TextGenerationPromptTemplate<string, string>;
8
- /**
9
- * Formats an instruction prompt as an Anthropic prompt.
10
- */
11
- export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, string>;
12
- /**
13
- * Formats a chat prompt as an Anthropic prompt.
14
- *
15
- * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
16
- */
17
- export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
@@ -1,76 +0,0 @@
1
- import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
2
- import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
3
- const HUMAN_PREFIX = "\n\nHuman:";
4
- const ASSISTANT_PREFIX = "\n\nAssistant:";
5
- /**
6
- * Formats a text prompt as an Anthropic prompt.
7
- */
8
- export function text() {
9
- return {
10
- format(prompt) {
11
- let text = "";
12
- text += HUMAN_PREFIX;
13
- text += prompt;
14
- text += ASSISTANT_PREFIX;
15
- return text;
16
- },
17
- stopSequences: [],
18
- };
19
- }
20
- /**
21
- * Formats an instruction prompt as an Anthropic prompt.
22
- */
23
- export function instruction() {
24
- return {
25
- format(prompt) {
26
- const instruction = validateContentIsString(prompt.instruction, prompt);
27
- let text = prompt.system ?? "";
28
- text += HUMAN_PREFIX;
29
- text += instruction;
30
- text += ASSISTANT_PREFIX;
31
- if (prompt.responsePrefix != null) {
32
- text += prompt.responsePrefix;
33
- }
34
- return text;
35
- },
36
- stopSequences: [],
37
- };
38
- }
39
- /**
40
- * Formats a chat prompt as an Anthropic prompt.
41
- *
42
- * @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
43
- */
44
- export function chat() {
45
- return {
46
- format(prompt) {
47
- let text = prompt.system ?? "";
48
- for (const { role, content } of prompt.messages) {
49
- switch (role) {
50
- case "user": {
51
- const textContent = validateContentIsString(content, prompt);
52
- text += HUMAN_PREFIX;
53
- text += textContent;
54
- break;
55
- }
56
- case "assistant": {
57
- text += ASSISTANT_PREFIX;
58
- text += content;
59
- break;
60
- }
61
- case "tool": {
62
- throw new InvalidPromptError("Tool messages are not supported.", prompt);
63
- }
64
- default: {
65
- const _exhaustiveCheck = role;
66
- throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
67
- }
68
- }
69
- }
70
- // AI message prefix:
71
- text += ASSISTANT_PREFIX;
72
- return text;
73
- },
74
- stopSequences: [],
75
- };
76
- }
@@ -1,49 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- const AnthropicPromptTemplate_js_1 = require("./AnthropicPromptTemplate.cjs");
4
- describe("text prompt", () => {
5
- it("should format prompt", () => {
6
- const prompt = (0, AnthropicPromptTemplate_js_1.text)().format("prompt");
7
- expect(prompt).toMatchSnapshot();
8
- });
9
- });
10
- describe("instruction prompt", () => {
11
- it("should format prompt with instruction", () => {
12
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
13
- instruction: "instruction",
14
- });
15
- expect(prompt).toMatchSnapshot();
16
- });
17
- it("should format prompt with system and instruction", () => {
18
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
19
- system: "system",
20
- instruction: "instruction",
21
- });
22
- expect(prompt).toMatchSnapshot();
23
- });
24
- it("should format prompt with instruction and response prefix", () => {
25
- const prompt = (0, AnthropicPromptTemplate_js_1.instruction)().format({
26
- instruction: "instruction",
27
- responsePrefix: "response prefix",
28
- });
29
- expect(prompt).toMatchSnapshot();
30
- });
31
- });
32
- describe("chat prompt", () => {
33
- it("should format prompt with user message", () => {
34
- const prompt = (0, AnthropicPromptTemplate_js_1.chat)().format({
35
- messages: [{ role: "user", content: "user message" }],
36
- });
37
- expect(prompt).toMatchSnapshot();
38
- });
39
- it("should format prompt with user-assistant-user messages", () => {
40
- const prompt = (0, AnthropicPromptTemplate_js_1.chat)().format({
41
- messages: [
42
- { role: "user", content: "1st user message" },
43
- { role: "assistant", content: "assistant message" },
44
- { role: "user", content: "2nd user message" },
45
- ],
46
- });
47
- expect(prompt).toMatchSnapshot();
48
- });
49
- });
@@ -1,47 +0,0 @@
1
- import { chat, instruction, text } from "./AnthropicPromptTemplate.js";
2
- describe("text prompt", () => {
3
- it("should format prompt", () => {
4
- const prompt = text().format("prompt");
5
- expect(prompt).toMatchSnapshot();
6
- });
7
- });
8
- describe("instruction prompt", () => {
9
- it("should format prompt with instruction", () => {
10
- const prompt = instruction().format({
11
- instruction: "instruction",
12
- });
13
- expect(prompt).toMatchSnapshot();
14
- });
15
- it("should format prompt with system and instruction", () => {
16
- const prompt = instruction().format({
17
- system: "system",
18
- instruction: "instruction",
19
- });
20
- expect(prompt).toMatchSnapshot();
21
- });
22
- it("should format prompt with instruction and response prefix", () => {
23
- const prompt = instruction().format({
24
- instruction: "instruction",
25
- responsePrefix: "response prefix",
26
- });
27
- expect(prompt).toMatchSnapshot();
28
- });
29
- });
30
- describe("chat prompt", () => {
31
- it("should format prompt with user message", () => {
32
- const prompt = chat().format({
33
- messages: [{ role: "user", content: "user message" }],
34
- });
35
- expect(prompt).toMatchSnapshot();
36
- });
37
- it("should format prompt with user-assistant-user messages", () => {
38
- const prompt = chat().format({
39
- messages: [
40
- { role: "user", content: "1st user message" },
41
- { role: "assistant", content: "assistant message" },
42
- { role: "user", content: "2nd user message" },
43
- ],
44
- });
45
- expect(prompt).toMatchSnapshot();
46
- });
47
- });