@langchain/anthropic 0.1.20 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models.cjs +15 -1
- package/dist/chat_models.d.ts +2 -2
- package/dist/chat_models.js +15 -1
- package/dist/load/import_map.cjs +1 -1
- package/dist/load/import_map.js +1 -1
- package/package.json +6 -5
- package/dist/tests/agent.int.test.d.ts +0 -1
- package/dist/tests/agent.int.test.js +0 -39
- package/dist/tests/chat_models-tools.int.test.d.ts +0 -1
- package/dist/tests/chat_models-tools.int.test.js +0 -278
- package/dist/tests/chat_models.int.test.d.ts +0 -1
- package/dist/tests/chat_models.int.test.js +0 -275
- package/dist/tests/chat_models.test.d.ts +0 -1
- package/dist/tests/chat_models.test.js +0 -88
package/dist/chat_models.cjs
CHANGED
|
@@ -27,11 +27,23 @@ function _formatImage(imageUrl) {
|
|
|
27
27
|
};
|
|
28
28
|
}
|
|
29
29
|
function anthropicResponseToChatMessages(messages, additionalKwargs) {
|
|
30
|
+
const usage = additionalKwargs.usage;
|
|
31
|
+
const usageMetadata = usage != null
|
|
32
|
+
? {
|
|
33
|
+
input_tokens: usage.input_tokens ?? 0,
|
|
34
|
+
output_tokens: usage.output_tokens ?? 0,
|
|
35
|
+
total_tokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0),
|
|
36
|
+
}
|
|
37
|
+
: undefined;
|
|
30
38
|
if (messages.length === 1 && messages[0].type === "text") {
|
|
31
39
|
return [
|
|
32
40
|
{
|
|
33
41
|
text: messages[0].text,
|
|
34
|
-
message: new messages_1.AIMessage(
|
|
42
|
+
message: new messages_1.AIMessage({
|
|
43
|
+
content: messages[0].text,
|
|
44
|
+
additional_kwargs: additionalKwargs,
|
|
45
|
+
usage_metadata: usageMetadata,
|
|
46
|
+
}),
|
|
35
47
|
},
|
|
36
48
|
];
|
|
37
49
|
}
|
|
@@ -45,6 +57,7 @@ function anthropicResponseToChatMessages(messages, additionalKwargs) {
|
|
|
45
57
|
content: messages,
|
|
46
58
|
additional_kwargs: additionalKwargs,
|
|
47
59
|
tool_calls: toolCalls,
|
|
60
|
+
usage_metadata: usageMetadata,
|
|
48
61
|
}),
|
|
49
62
|
},
|
|
50
63
|
];
|
|
@@ -590,6 +603,7 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
|
|
|
590
603
|
}, options);
|
|
591
604
|
const { content, ...additionalKwargs } = response;
|
|
592
605
|
const generations = anthropicResponseToChatMessages(content, additionalKwargs);
|
|
606
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
593
607
|
const { role: _role, type: _type, ...rest } = additionalKwargs;
|
|
594
608
|
return { generations, llmOutput: rest };
|
|
595
609
|
}
|
package/dist/chat_models.d.ts
CHANGED
|
@@ -6,7 +6,7 @@ import { ChatGeneration, ChatGenerationChunk, type ChatResult } from "@langchain
|
|
|
6
6
|
import { BaseChatModel, LangSmithParams, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
|
|
7
7
|
import { StructuredOutputMethodOptions, type BaseLanguageModelCallOptions, BaseLanguageModelInput } from "@langchain/core/language_models/base";
|
|
8
8
|
import { StructuredToolInterface } from "@langchain/core/tools";
|
|
9
|
-
import { Runnable
|
|
9
|
+
import { Runnable } from "@langchain/core/runnables";
|
|
10
10
|
import { ToolCall } from "@langchain/core/messages/tool";
|
|
11
11
|
import { z } from "zod";
|
|
12
12
|
import { AnthropicToolResponse } from "./types.js";
|
|
@@ -151,7 +151,7 @@ export declare class ChatAnthropicMessages<CallOptions extends ChatAnthropicCall
|
|
|
151
151
|
* @throws {Error} If a mix of AnthropicTools and StructuredTools are passed.
|
|
152
152
|
*/
|
|
153
153
|
formatStructuredToolToAnthropic(tools: ChatAnthropicCallOptions["tools"]): AnthropicTool[] | undefined;
|
|
154
|
-
bindTools(tools: (AnthropicTool | StructuredToolInterface)[], kwargs?: Partial<CallOptions>):
|
|
154
|
+
bindTools(tools: (AnthropicTool | StructuredToolInterface)[], kwargs?: Partial<CallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions>;
|
|
155
155
|
/**
|
|
156
156
|
* Get the parameters used to invoke the model
|
|
157
157
|
*/
|
package/dist/chat_models.js
CHANGED
|
@@ -24,11 +24,23 @@ function _formatImage(imageUrl) {
|
|
|
24
24
|
};
|
|
25
25
|
}
|
|
26
26
|
function anthropicResponseToChatMessages(messages, additionalKwargs) {
|
|
27
|
+
const usage = additionalKwargs.usage;
|
|
28
|
+
const usageMetadata = usage != null
|
|
29
|
+
? {
|
|
30
|
+
input_tokens: usage.input_tokens ?? 0,
|
|
31
|
+
output_tokens: usage.output_tokens ?? 0,
|
|
32
|
+
total_tokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0),
|
|
33
|
+
}
|
|
34
|
+
: undefined;
|
|
27
35
|
if (messages.length === 1 && messages[0].type === "text") {
|
|
28
36
|
return [
|
|
29
37
|
{
|
|
30
38
|
text: messages[0].text,
|
|
31
|
-
message: new AIMessage(
|
|
39
|
+
message: new AIMessage({
|
|
40
|
+
content: messages[0].text,
|
|
41
|
+
additional_kwargs: additionalKwargs,
|
|
42
|
+
usage_metadata: usageMetadata,
|
|
43
|
+
}),
|
|
32
44
|
},
|
|
33
45
|
];
|
|
34
46
|
}
|
|
@@ -42,6 +54,7 @@ function anthropicResponseToChatMessages(messages, additionalKwargs) {
|
|
|
42
54
|
content: messages,
|
|
43
55
|
additional_kwargs: additionalKwargs,
|
|
44
56
|
tool_calls: toolCalls,
|
|
57
|
+
usage_metadata: usageMetadata,
|
|
45
58
|
}),
|
|
46
59
|
},
|
|
47
60
|
];
|
|
@@ -586,6 +599,7 @@ export class ChatAnthropicMessages extends BaseChatModel {
|
|
|
586
599
|
}, options);
|
|
587
600
|
const { content, ...additionalKwargs } = response;
|
|
588
601
|
const generations = anthropicResponseToChatMessages(content, additionalKwargs);
|
|
602
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
589
603
|
const { role: _role, type: _type, ...rest } = additionalKwargs;
|
|
590
604
|
return { generations, llmOutput: rest };
|
|
591
605
|
}
|
package/dist/load/import_map.cjs
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
// Auto-generated by
|
|
2
|
+
// Auto-generated by build script. Do not edit manually.
|
|
3
3
|
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
4
4
|
if (k2 === undefined) k2 = k;
|
|
5
5
|
var desc = Object.getOwnPropertyDescriptor(m, k);
|
package/dist/load/import_map.js
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/anthropic",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "Anthropic integrations for LangChain.js",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -14,7 +14,8 @@
|
|
|
14
14
|
},
|
|
15
15
|
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-anthropic/",
|
|
16
16
|
"scripts": {
|
|
17
|
-
"build": "yarn
|
|
17
|
+
"build": "yarn turbo:command build:internal --filter=@langchain/anthropic",
|
|
18
|
+
"build:internal": "yarn lc-build:v2 --create-entrypoints --pre --tree-shaking --gen-maps",
|
|
18
19
|
"build:deps": "yarn run turbo:command build --filter=@langchain/core",
|
|
19
20
|
"build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/",
|
|
20
21
|
"build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && yarn move-cjs-to-dist && rimraf dist-cjs",
|
|
@@ -24,7 +25,7 @@
|
|
|
24
25
|
"lint:dpdm": "dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
|
|
25
26
|
"lint": "yarn lint:eslint && yarn lint:dpdm",
|
|
26
27
|
"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
|
|
27
|
-
"clean": "
|
|
28
|
+
"clean": "rm -rf .turbo dist/",
|
|
28
29
|
"prepack": "yarn build",
|
|
29
30
|
"test": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
|
|
30
31
|
"test:watch": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
|
|
@@ -40,7 +41,7 @@
|
|
|
40
41
|
"license": "MIT",
|
|
41
42
|
"dependencies": {
|
|
42
43
|
"@anthropic-ai/sdk": "^0.21.0",
|
|
43
|
-
"@langchain/core": "
|
|
44
|
+
"@langchain/core": ">=0.2.5 <0.3.0",
|
|
44
45
|
"fast-xml-parser": "^4.3.5",
|
|
45
46
|
"zod": "^3.22.4",
|
|
46
47
|
"zod-to-json-schema": "^3.22.4"
|
|
@@ -48,7 +49,7 @@
|
|
|
48
49
|
"devDependencies": {
|
|
49
50
|
"@jest/globals": "^29.5.0",
|
|
50
51
|
"@langchain/community": "workspace:*",
|
|
51
|
-
"@langchain/scripts": "~0.0",
|
|
52
|
+
"@langchain/scripts": "~0.0.14",
|
|
52
53
|
"@swc/core": "^1.3.90",
|
|
53
54
|
"@swc/jest": "^0.2.29",
|
|
54
55
|
"dpdm": "^3.12.0",
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
// import { test, expect } from "@jest/globals";
|
|
2
|
-
// import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
3
|
-
// import { TavilySearchResults } from "@langchain/community/tools/tavily_search";
|
|
4
|
-
// import { AgentExecutor, createToolCallingAgent } from "langchain/agents";
|
|
5
|
-
// import { ChatAnthropic } from "../index.js";
|
|
6
|
-
// const tools = [new TavilySearchResults({ maxResults: 1 })];
|
|
7
|
-
// TODO: This test breaks CI build due to dependencies. Figure out a way around it.
|
|
8
|
-
test("createToolCallingAgent works", async () => {
|
|
9
|
-
// const prompt = ChatPromptTemplate.fromMessages([
|
|
10
|
-
// ["system", "You are a helpful assistant"],
|
|
11
|
-
// ["placeholder", "{chat_history}"],
|
|
12
|
-
// ["human", "{input}"],
|
|
13
|
-
// ["placeholder", "{agent_scratchpad}"],
|
|
14
|
-
// ]);
|
|
15
|
-
// const llm = new ChatAnthropic({
|
|
16
|
-
// modelName: "claude-3-sonnet-20240229",
|
|
17
|
-
// temperature: 0,
|
|
18
|
-
// });
|
|
19
|
-
// const agent = await createToolCallingAgent({
|
|
20
|
-
// llm,
|
|
21
|
-
// tools,
|
|
22
|
-
// prompt,
|
|
23
|
-
// });
|
|
24
|
-
// const agentExecutor = new AgentExecutor({
|
|
25
|
-
// agent,
|
|
26
|
-
// tools,
|
|
27
|
-
// });
|
|
28
|
-
// const input = "what is the current weather in SF?";
|
|
29
|
-
// const result = await agentExecutor.invoke({
|
|
30
|
-
// input,
|
|
31
|
-
// });
|
|
32
|
-
// console.log(result);
|
|
33
|
-
// expect(result.input).toBe(input);
|
|
34
|
-
// expect(typeof result.output).toBe("string");
|
|
35
|
-
// // Length greater than 10 because any less than that would warrant
|
|
36
|
-
// // an investigation into why such a short generation was returned.
|
|
37
|
-
// expect(result.output.length).toBeGreaterThan(10);
|
|
38
|
-
});
|
|
39
|
-
export {};
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
|
@@ -1,278 +0,0 @@
|
|
|
1
|
-
/* eslint-disable no-process-env */
|
|
2
|
-
import { expect, test } from "@jest/globals";
|
|
3
|
-
import { AIMessage, HumanMessage, ToolMessage } from "@langchain/core/messages";
|
|
4
|
-
import { StructuredTool } from "@langchain/core/tools";
|
|
5
|
-
import { z } from "zod";
|
|
6
|
-
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
7
|
-
import { ChatAnthropic } from "../chat_models.js";
|
|
8
|
-
const zodSchema = z
|
|
9
|
-
.object({
|
|
10
|
-
location: z.string().describe("The name of city to get the weather for."),
|
|
11
|
-
})
|
|
12
|
-
.describe("Get the weather of a specific location and return the temperature in Celsius.");
|
|
13
|
-
class WeatherTool extends StructuredTool {
|
|
14
|
-
constructor() {
|
|
15
|
-
super(...arguments);
|
|
16
|
-
Object.defineProperty(this, "schema", {
|
|
17
|
-
enumerable: true,
|
|
18
|
-
configurable: true,
|
|
19
|
-
writable: true,
|
|
20
|
-
value: z.object({
|
|
21
|
-
location: z.string().describe("The name of city to get the weather for."),
|
|
22
|
-
})
|
|
23
|
-
});
|
|
24
|
-
Object.defineProperty(this, "description", {
|
|
25
|
-
enumerable: true,
|
|
26
|
-
configurable: true,
|
|
27
|
-
writable: true,
|
|
28
|
-
value: "Get the weather of a specific location and return the temperature in Celsius."
|
|
29
|
-
});
|
|
30
|
-
Object.defineProperty(this, "name", {
|
|
31
|
-
enumerable: true,
|
|
32
|
-
configurable: true,
|
|
33
|
-
writable: true,
|
|
34
|
-
value: "get_weather"
|
|
35
|
-
});
|
|
36
|
-
}
|
|
37
|
-
async _call(input) {
|
|
38
|
-
console.log(`WeatherTool called with input: ${input}`);
|
|
39
|
-
return `The weather in ${input.location} is 25°C`;
|
|
40
|
-
}
|
|
41
|
-
}
|
|
42
|
-
const model = new ChatAnthropic({
|
|
43
|
-
modelName: "claude-3-sonnet-20240229",
|
|
44
|
-
temperature: 0,
|
|
45
|
-
});
|
|
46
|
-
const anthropicTool = {
|
|
47
|
-
name: "get_weather",
|
|
48
|
-
description: "Get the weather of a specific location and return the temperature in Celsius.",
|
|
49
|
-
input_schema: {
|
|
50
|
-
type: "object",
|
|
51
|
-
properties: {
|
|
52
|
-
location: {
|
|
53
|
-
type: "string",
|
|
54
|
-
description: "The name of city to get the weather for.",
|
|
55
|
-
},
|
|
56
|
-
},
|
|
57
|
-
required: ["location"],
|
|
58
|
-
},
|
|
59
|
-
};
|
|
60
|
-
test("Few shotting with tool calls", async () => {
|
|
61
|
-
const chat = model.bindTools([new WeatherTool()]);
|
|
62
|
-
const res = await chat.invoke([
|
|
63
|
-
new HumanMessage("What is the weather in SF?"),
|
|
64
|
-
new AIMessage({
|
|
65
|
-
content: "Let me look up the current weather.",
|
|
66
|
-
tool_calls: [
|
|
67
|
-
{
|
|
68
|
-
id: "toolu_feiwjf9u98r389u498",
|
|
69
|
-
name: "get_weather",
|
|
70
|
-
args: {
|
|
71
|
-
location: "SF",
|
|
72
|
-
},
|
|
73
|
-
},
|
|
74
|
-
],
|
|
75
|
-
}),
|
|
76
|
-
new ToolMessage({
|
|
77
|
-
tool_call_id: "toolu_feiwjf9u98r389u498",
|
|
78
|
-
content: "It is currently 24 degrees with hail in San Francisco.",
|
|
79
|
-
}),
|
|
80
|
-
new AIMessage("It is currently 24 degrees in San Francisco with hail in San Francisco."),
|
|
81
|
-
new HumanMessage("What did you say the weather was?"),
|
|
82
|
-
]);
|
|
83
|
-
console.log(res);
|
|
84
|
-
expect(res.content).toContain("24");
|
|
85
|
-
});
|
|
86
|
-
test("Can bind & invoke StructuredTools", async () => {
|
|
87
|
-
const tools = [new WeatherTool()];
|
|
88
|
-
const modelWithTools = model.bindTools(tools);
|
|
89
|
-
const result = await modelWithTools.invoke("What is the weather in SF today?");
|
|
90
|
-
console.log({
|
|
91
|
-
tool_calls: JSON.stringify(result.content, null, 2),
|
|
92
|
-
}, "Can bind & invoke StructuredTools");
|
|
93
|
-
expect(Array.isArray(result.content)).toBeTruthy();
|
|
94
|
-
if (!Array.isArray(result.content)) {
|
|
95
|
-
throw new Error("Content is not an array");
|
|
96
|
-
}
|
|
97
|
-
let toolCall;
|
|
98
|
-
result.content.forEach((item) => {
|
|
99
|
-
if (item.type === "tool_use") {
|
|
100
|
-
toolCall = item;
|
|
101
|
-
}
|
|
102
|
-
});
|
|
103
|
-
if (!toolCall) {
|
|
104
|
-
throw new Error("No tool call found");
|
|
105
|
-
}
|
|
106
|
-
expect(toolCall).toBeTruthy();
|
|
107
|
-
const { name, input } = toolCall;
|
|
108
|
-
expect(toolCall.input).toEqual(result.tool_calls?.[0].args);
|
|
109
|
-
expect(name).toBe("get_weather");
|
|
110
|
-
expect(input).toBeTruthy();
|
|
111
|
-
expect(input.location).toBeTruthy();
|
|
112
|
-
const result2 = await modelWithTools.invoke([
|
|
113
|
-
new HumanMessage("What is the weather in SF today?"),
|
|
114
|
-
result,
|
|
115
|
-
new ToolMessage({
|
|
116
|
-
tool_call_id: result.tool_calls?.[0].id ?? "",
|
|
117
|
-
content: "The weather in San Francisco is currently 59 degrees and sunny.",
|
|
118
|
-
}),
|
|
119
|
-
new AIMessage("The weather in San Francisco is currently 59 degrees and sunny."),
|
|
120
|
-
new HumanMessage("What did you say the weather was?"),
|
|
121
|
-
]);
|
|
122
|
-
console.log(result2);
|
|
123
|
-
// This should work, but Anthorpic is too skeptical
|
|
124
|
-
expect(result2.content).toContain("59");
|
|
125
|
-
});
|
|
126
|
-
test("Can bind & invoke AnthropicTools", async () => {
|
|
127
|
-
const modelWithTools = model.bind({
|
|
128
|
-
tools: [anthropicTool],
|
|
129
|
-
});
|
|
130
|
-
const result = await modelWithTools.invoke("What is the weather in London today?");
|
|
131
|
-
console.log({
|
|
132
|
-
tool_calls: JSON.stringify(result.content, null, 2),
|
|
133
|
-
}, "Can bind & invoke StructuredTools");
|
|
134
|
-
expect(Array.isArray(result.content)).toBeTruthy();
|
|
135
|
-
if (!Array.isArray(result.content)) {
|
|
136
|
-
throw new Error("Content is not an array");
|
|
137
|
-
}
|
|
138
|
-
let toolCall;
|
|
139
|
-
result.content.forEach((item) => {
|
|
140
|
-
if (item.type === "tool_use") {
|
|
141
|
-
toolCall = item;
|
|
142
|
-
}
|
|
143
|
-
});
|
|
144
|
-
if (!toolCall) {
|
|
145
|
-
throw new Error("No tool call found");
|
|
146
|
-
}
|
|
147
|
-
expect(toolCall).toBeTruthy();
|
|
148
|
-
const { name, input } = toolCall;
|
|
149
|
-
expect(name).toBe("get_weather");
|
|
150
|
-
expect(input).toBeTruthy();
|
|
151
|
-
expect(input.location).toBeTruthy();
|
|
152
|
-
});
|
|
153
|
-
test("Can bind & stream AnthropicTools", async () => {
|
|
154
|
-
const modelWithTools = model.bind({
|
|
155
|
-
tools: [anthropicTool],
|
|
156
|
-
});
|
|
157
|
-
const result = await modelWithTools.stream("What is the weather in London today?");
|
|
158
|
-
let finalMessage;
|
|
159
|
-
for await (const item of result) {
|
|
160
|
-
console.log("item", JSON.stringify(item, null, 2));
|
|
161
|
-
finalMessage = item;
|
|
162
|
-
}
|
|
163
|
-
if (!finalMessage) {
|
|
164
|
-
throw new Error("No final message returned");
|
|
165
|
-
}
|
|
166
|
-
console.log({
|
|
167
|
-
tool_calls: JSON.stringify(finalMessage.content, null, 2),
|
|
168
|
-
}, "Can bind & invoke StructuredTools");
|
|
169
|
-
expect(Array.isArray(finalMessage.content)).toBeTruthy();
|
|
170
|
-
if (!Array.isArray(finalMessage.content)) {
|
|
171
|
-
throw new Error("Content is not an array");
|
|
172
|
-
}
|
|
173
|
-
let toolCall;
|
|
174
|
-
finalMessage.content.forEach((item) => {
|
|
175
|
-
if (item.type === "tool_use") {
|
|
176
|
-
toolCall = item;
|
|
177
|
-
}
|
|
178
|
-
});
|
|
179
|
-
if (!toolCall) {
|
|
180
|
-
throw new Error("No tool call found");
|
|
181
|
-
}
|
|
182
|
-
expect(toolCall).toBeTruthy();
|
|
183
|
-
const { name, input } = toolCall;
|
|
184
|
-
expect(name).toBe("get_weather");
|
|
185
|
-
expect(input).toBeTruthy();
|
|
186
|
-
expect(input.location).toBeTruthy();
|
|
187
|
-
});
|
|
188
|
-
test("withStructuredOutput with zod schema", async () => {
|
|
189
|
-
const modelWithTools = model.withStructuredOutput(zodSchema, {
|
|
190
|
-
name: "get_weather",
|
|
191
|
-
});
|
|
192
|
-
const result = await modelWithTools.invoke("What is the weather in London today?");
|
|
193
|
-
console.log({
|
|
194
|
-
result,
|
|
195
|
-
}, "withStructuredOutput with zod schema");
|
|
196
|
-
expect(typeof result.location).toBe("string");
|
|
197
|
-
});
|
|
198
|
-
test("withStructuredOutput with AnthropicTool", async () => {
|
|
199
|
-
const modelWithTools = model.withStructuredOutput(anthropicTool, {
|
|
200
|
-
name: anthropicTool.name,
|
|
201
|
-
});
|
|
202
|
-
const result = await modelWithTools.invoke("What is the weather in London today?");
|
|
203
|
-
console.log({
|
|
204
|
-
result,
|
|
205
|
-
}, "withStructuredOutput with AnthropicTool");
|
|
206
|
-
expect(typeof result.location).toBe("string");
|
|
207
|
-
});
|
|
208
|
-
test("withStructuredOutput JSON Schema only", async () => {
|
|
209
|
-
const jsonSchema = zodToJsonSchema(zodSchema);
|
|
210
|
-
const modelWithTools = model.withStructuredOutput(jsonSchema, {
|
|
211
|
-
name: "get_weather",
|
|
212
|
-
});
|
|
213
|
-
const result = await modelWithTools.invoke("What is the weather in London today?");
|
|
214
|
-
console.log({
|
|
215
|
-
result,
|
|
216
|
-
}, "withStructuredOutput JSON Schema only");
|
|
217
|
-
expect(typeof result.location).toBe("string");
|
|
218
|
-
});
|
|
219
|
-
test("Can pass tool_choice", async () => {
|
|
220
|
-
const tool1 = {
|
|
221
|
-
name: "get_weather",
|
|
222
|
-
description: "Get the weather of a specific location and return the temperature in Celsius.",
|
|
223
|
-
input_schema: {
|
|
224
|
-
type: "object",
|
|
225
|
-
properties: {
|
|
226
|
-
location: {
|
|
227
|
-
type: "string",
|
|
228
|
-
description: "The name of city to get the weather for.",
|
|
229
|
-
},
|
|
230
|
-
},
|
|
231
|
-
required: ["location"],
|
|
232
|
-
},
|
|
233
|
-
};
|
|
234
|
-
const tool2 = {
|
|
235
|
-
name: "calculator",
|
|
236
|
-
description: "Calculate any math expression and return the result.",
|
|
237
|
-
input_schema: {
|
|
238
|
-
type: "object",
|
|
239
|
-
properties: {
|
|
240
|
-
expression: {
|
|
241
|
-
type: "string",
|
|
242
|
-
description: "The math expression to calculate.",
|
|
243
|
-
},
|
|
244
|
-
},
|
|
245
|
-
required: ["expression"],
|
|
246
|
-
},
|
|
247
|
-
};
|
|
248
|
-
const tools = [tool1, tool2];
|
|
249
|
-
const modelWithTools = model.bindTools(tools, {
|
|
250
|
-
tool_choice: {
|
|
251
|
-
type: "tool",
|
|
252
|
-
name: "get_weather",
|
|
253
|
-
},
|
|
254
|
-
});
|
|
255
|
-
const result = await modelWithTools.invoke("What is the sum of 272818 and 281818?");
|
|
256
|
-
console.log({
|
|
257
|
-
tool_calls: JSON.stringify(result.content, null, 2),
|
|
258
|
-
}, "Can bind & invoke StructuredTools");
|
|
259
|
-
expect(Array.isArray(result.content)).toBeTruthy();
|
|
260
|
-
if (!Array.isArray(result.content)) {
|
|
261
|
-
throw new Error("Content is not an array");
|
|
262
|
-
}
|
|
263
|
-
let toolCall;
|
|
264
|
-
result.content.forEach((item) => {
|
|
265
|
-
if (item.type === "tool_use") {
|
|
266
|
-
toolCall = item;
|
|
267
|
-
}
|
|
268
|
-
});
|
|
269
|
-
if (!toolCall) {
|
|
270
|
-
throw new Error("No tool call found");
|
|
271
|
-
}
|
|
272
|
-
expect(toolCall).toBeTruthy();
|
|
273
|
-
const { name, input } = toolCall;
|
|
274
|
-
expect(toolCall.input).toEqual(result.tool_calls?.[0].args);
|
|
275
|
-
expect(name).toBe("get_weather");
|
|
276
|
-
expect(input).toBeTruthy();
|
|
277
|
-
expect(input.location).toBeTruthy();
|
|
278
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
|
@@ -1,275 +0,0 @@
|
|
|
1
|
-
/* eslint-disable no-process-env */
|
|
2
|
-
import { expect, test } from "@jest/globals";
|
|
3
|
-
import { HumanMessage } from "@langchain/core/messages";
|
|
4
|
-
import { ChatPromptValue } from "@langchain/core/prompt_values";
|
|
5
|
-
import { PromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts";
|
|
6
|
-
import { CallbackManager } from "@langchain/core/callbacks/manager";
|
|
7
|
-
import { ChatAnthropic } from "../chat_models.js";
|
|
8
|
-
test("Test ChatAnthropic", async () => {
|
|
9
|
-
const chat = new ChatAnthropic({
|
|
10
|
-
modelName: "claude-3-sonnet-20240229",
|
|
11
|
-
maxRetries: 0,
|
|
12
|
-
});
|
|
13
|
-
const message = new HumanMessage("Hello!");
|
|
14
|
-
const res = await chat.invoke([message]);
|
|
15
|
-
console.log({ res });
|
|
16
|
-
expect(res.response_metadata.usage).toBeDefined();
|
|
17
|
-
});
|
|
18
|
-
test("Test ChatAnthropic Generate", async () => {
|
|
19
|
-
const chat = new ChatAnthropic({
|
|
20
|
-
modelName: "claude-3-sonnet-20240229",
|
|
21
|
-
maxRetries: 0,
|
|
22
|
-
});
|
|
23
|
-
const message = new HumanMessage("Hello!");
|
|
24
|
-
const res = await chat.generate([[message], [message]]);
|
|
25
|
-
expect(res.generations.length).toBe(2);
|
|
26
|
-
for (const generation of res.generations) {
|
|
27
|
-
expect(generation.length).toBe(1);
|
|
28
|
-
for (const message of generation) {
|
|
29
|
-
console.log(message.text);
|
|
30
|
-
}
|
|
31
|
-
}
|
|
32
|
-
console.log({ res });
|
|
33
|
-
});
|
|
34
|
-
test.skip("Test ChatAnthropic Generate w/ ClientOptions", async () => {
|
|
35
|
-
const chat = new ChatAnthropic({
|
|
36
|
-
modelName: "claude-3-sonnet-20240229",
|
|
37
|
-
maxRetries: 0,
|
|
38
|
-
clientOptions: {
|
|
39
|
-
defaultHeaders: {
|
|
40
|
-
"Helicone-Auth": "HELICONE_API_KEY",
|
|
41
|
-
},
|
|
42
|
-
},
|
|
43
|
-
});
|
|
44
|
-
const message = new HumanMessage("Hello!");
|
|
45
|
-
const res = await chat.generate([[message], [message]]);
|
|
46
|
-
expect(res.generations.length).toBe(2);
|
|
47
|
-
for (const generation of res.generations) {
|
|
48
|
-
expect(generation.length).toBe(1);
|
|
49
|
-
for (const message of generation) {
|
|
50
|
-
console.log(message.text);
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
console.log({ res });
|
|
54
|
-
});
|
|
55
|
-
test("Test ChatAnthropic Generate with a signal in call options", async () => {
|
|
56
|
-
const chat = new ChatAnthropic({
|
|
57
|
-
modelName: "claude-3-sonnet-20240229",
|
|
58
|
-
maxRetries: 0,
|
|
59
|
-
});
|
|
60
|
-
const controller = new AbortController();
|
|
61
|
-
const message = new HumanMessage("How is your day going? Be extremely verbose!");
|
|
62
|
-
await expect(() => {
|
|
63
|
-
const res = chat.generate([[message], [message]], {
|
|
64
|
-
signal: controller.signal,
|
|
65
|
-
});
|
|
66
|
-
setTimeout(() => {
|
|
67
|
-
controller.abort();
|
|
68
|
-
}, 1000);
|
|
69
|
-
return res;
|
|
70
|
-
}).rejects.toThrow();
|
|
71
|
-
}, 10000);
|
|
72
|
-
test("Test ChatAnthropic tokenUsage with a batch", async () => {
|
|
73
|
-
const model = new ChatAnthropic({
|
|
74
|
-
temperature: 0,
|
|
75
|
-
maxRetries: 0,
|
|
76
|
-
modelName: "claude-3-sonnet-20240229",
|
|
77
|
-
});
|
|
78
|
-
const res = await model.generate([
|
|
79
|
-
[new HumanMessage(`Hello!`)],
|
|
80
|
-
[new HumanMessage(`Hi!`)],
|
|
81
|
-
]);
|
|
82
|
-
console.log({ res });
|
|
83
|
-
});
|
|
84
|
-
test("Test ChatAnthropic in streaming mode", async () => {
|
|
85
|
-
let nrNewTokens = 0;
|
|
86
|
-
let streamedCompletion = "";
|
|
87
|
-
const model = new ChatAnthropic({
|
|
88
|
-
modelName: "claude-3-sonnet-20240229",
|
|
89
|
-
maxRetries: 0,
|
|
90
|
-
streaming: true,
|
|
91
|
-
callbacks: CallbackManager.fromHandlers({
|
|
92
|
-
async handleLLMNewToken(token) {
|
|
93
|
-
nrNewTokens += 1;
|
|
94
|
-
streamedCompletion += token;
|
|
95
|
-
},
|
|
96
|
-
}),
|
|
97
|
-
});
|
|
98
|
-
const message = new HumanMessage("Hello!");
|
|
99
|
-
const res = await model.invoke([message]);
|
|
100
|
-
console.log({ res });
|
|
101
|
-
expect(nrNewTokens > 0).toBe(true);
|
|
102
|
-
expect(res.content).toBe(streamedCompletion);
|
|
103
|
-
});
|
|
104
|
-
test("Test ChatAnthropic in streaming mode with a signal", async () => {
|
|
105
|
-
let nrNewTokens = 0;
|
|
106
|
-
let streamedCompletion = "";
|
|
107
|
-
const model = new ChatAnthropic({
|
|
108
|
-
modelName: "claude-3-sonnet-20240229",
|
|
109
|
-
maxRetries: 0,
|
|
110
|
-
streaming: true,
|
|
111
|
-
callbacks: CallbackManager.fromHandlers({
|
|
112
|
-
async handleLLMNewToken(token) {
|
|
113
|
-
nrNewTokens += 1;
|
|
114
|
-
streamedCompletion += token;
|
|
115
|
-
},
|
|
116
|
-
}),
|
|
117
|
-
});
|
|
118
|
-
const controller = new AbortController();
|
|
119
|
-
const message = new HumanMessage("Hello! Give me an extremely verbose response");
|
|
120
|
-
await expect(() => {
|
|
121
|
-
const res = model.invoke([message], {
|
|
122
|
-
signal: controller.signal,
|
|
123
|
-
});
|
|
124
|
-
setTimeout(() => {
|
|
125
|
-
controller.abort();
|
|
126
|
-
}, 500);
|
|
127
|
-
return res;
|
|
128
|
-
}).rejects.toThrow();
|
|
129
|
-
console.log({ nrNewTokens, streamedCompletion });
|
|
130
|
-
}, 5000);
|
|
131
|
-
test.skip("Test ChatAnthropic prompt value", async () => {
|
|
132
|
-
const chat = new ChatAnthropic({
|
|
133
|
-
modelName: "claude-3-sonnet-20240229",
|
|
134
|
-
maxRetries: 0,
|
|
135
|
-
});
|
|
136
|
-
const message = new HumanMessage("Hello!");
|
|
137
|
-
const res = await chat.generatePrompt([new ChatPromptValue([message])]);
|
|
138
|
-
expect(res.generations.length).toBe(1);
|
|
139
|
-
for (const generation of res.generations) {
|
|
140
|
-
for (const g of generation) {
|
|
141
|
-
console.log(g.text);
|
|
142
|
-
}
|
|
143
|
-
}
|
|
144
|
-
console.log({ res });
|
|
145
|
-
});
|
|
146
|
-
test.skip("ChatAnthropic, docs, prompt templates", async () => {
|
|
147
|
-
const chat = new ChatAnthropic({
|
|
148
|
-
modelName: "claude-3-sonnet-20240229",
|
|
149
|
-
maxRetries: 0,
|
|
150
|
-
temperature: 0,
|
|
151
|
-
});
|
|
152
|
-
const systemPrompt = PromptTemplate.fromTemplate("You are a helpful assistant that translates {input_language} to {output_language}.");
|
|
153
|
-
const chatPrompt = ChatPromptTemplate.fromMessages([
|
|
154
|
-
new SystemMessagePromptTemplate(systemPrompt),
|
|
155
|
-
HumanMessagePromptTemplate.fromTemplate("{text}"),
|
|
156
|
-
]);
|
|
157
|
-
const responseA = await chat.generatePrompt([
|
|
158
|
-
await chatPrompt.formatPromptValue({
|
|
159
|
-
input_language: "English",
|
|
160
|
-
output_language: "French",
|
|
161
|
-
text: "I love programming.",
|
|
162
|
-
}),
|
|
163
|
-
]);
|
|
164
|
-
console.log(responseA.generations);
|
|
165
|
-
});
|
|
166
|
-
test.skip("ChatAnthropic, longer chain of messages", async () => {
|
|
167
|
-
const chat = new ChatAnthropic({
|
|
168
|
-
modelName: "claude-3-sonnet-20240229",
|
|
169
|
-
maxRetries: 0,
|
|
170
|
-
temperature: 0,
|
|
171
|
-
});
|
|
172
|
-
const chatPrompt = ChatPromptTemplate.fromMessages([
|
|
173
|
-
HumanMessagePromptTemplate.fromTemplate(`Hi, my name is Joe!`),
|
|
174
|
-
AIMessagePromptTemplate.fromTemplate(`Nice to meet you, Joe!`),
|
|
175
|
-
HumanMessagePromptTemplate.fromTemplate("{text}"),
|
|
176
|
-
]);
|
|
177
|
-
const responseA = await chat.generatePrompt([
|
|
178
|
-
await chatPrompt.formatPromptValue({
|
|
179
|
-
text: "What did I just say my name was?",
|
|
180
|
-
}),
|
|
181
|
-
]);
|
|
182
|
-
console.log(responseA.generations);
|
|
183
|
-
});
|
|
184
|
-
test.skip("ChatAnthropic, Anthropic apiUrl set manually via constructor", async () => {
|
|
185
|
-
// Pass the default URL through (should use this, and work as normal)
|
|
186
|
-
const anthropicApiUrl = "https://api.anthropic.com";
|
|
187
|
-
const chat = new ChatAnthropic({
|
|
188
|
-
modelName: "claude-3-sonnet-20240229",
|
|
189
|
-
maxRetries: 0,
|
|
190
|
-
anthropicApiUrl,
|
|
191
|
-
});
|
|
192
|
-
const message = new HumanMessage("Hello!");
|
|
193
|
-
const res = await chat.call([message]);
|
|
194
|
-
console.log({ res });
|
|
195
|
-
});
|
|
196
|
-
test("Test ChatAnthropic stream method", async () => {
|
|
197
|
-
const model = new ChatAnthropic({
|
|
198
|
-
maxTokens: 50,
|
|
199
|
-
maxRetries: 0,
|
|
200
|
-
modelName: "claude-3-sonnet-20240229",
|
|
201
|
-
});
|
|
202
|
-
const stream = await model.stream("Print hello world.");
|
|
203
|
-
const chunks = [];
|
|
204
|
-
for await (const chunk of stream) {
|
|
205
|
-
console.log(chunk);
|
|
206
|
-
chunks.push(chunk);
|
|
207
|
-
}
|
|
208
|
-
expect(chunks.length).toBeGreaterThan(1);
|
|
209
|
-
});
|
|
210
|
-
test("Test ChatAnthropic stream method with abort", async () => {
|
|
211
|
-
await expect(async () => {
|
|
212
|
-
const model = new ChatAnthropic({
|
|
213
|
-
maxTokens: 500,
|
|
214
|
-
maxRetries: 0,
|
|
215
|
-
modelName: "claude-3-sonnet-20240229",
|
|
216
|
-
});
|
|
217
|
-
const stream = await model.stream("How is your day going? Be extremely verbose.", {
|
|
218
|
-
signal: AbortSignal.timeout(1000),
|
|
219
|
-
});
|
|
220
|
-
for await (const chunk of stream) {
|
|
221
|
-
console.log(chunk);
|
|
222
|
-
}
|
|
223
|
-
}).rejects.toThrow();
|
|
224
|
-
});
|
|
225
|
-
test("Test ChatAnthropic stream method with early break", async () => {
|
|
226
|
-
const model = new ChatAnthropic({
|
|
227
|
-
maxTokens: 50,
|
|
228
|
-
maxRetries: 0,
|
|
229
|
-
modelName: "claude-3-sonnet-20240229",
|
|
230
|
-
});
|
|
231
|
-
const stream = await model.stream("How is your day going? Be extremely verbose.");
|
|
232
|
-
let i = 0;
|
|
233
|
-
for await (const chunk of stream) {
|
|
234
|
-
console.log(chunk);
|
|
235
|
-
i += 1;
|
|
236
|
-
if (i > 10) {
|
|
237
|
-
break;
|
|
238
|
-
}
|
|
239
|
-
}
|
|
240
|
-
});
|
|
241
|
-
test("Test ChatAnthropic headers passed through", async () => {
|
|
242
|
-
const chat = new ChatAnthropic({
|
|
243
|
-
modelName: "claude-3-sonnet-20240229",
|
|
244
|
-
maxRetries: 0,
|
|
245
|
-
apiKey: "NOT_REAL",
|
|
246
|
-
clientOptions: {
|
|
247
|
-
defaultHeaders: {
|
|
248
|
-
"X-Api-Key": process.env.ANTHROPIC_API_KEY,
|
|
249
|
-
},
|
|
250
|
-
},
|
|
251
|
-
});
|
|
252
|
-
const message = new HumanMessage("Hello!");
|
|
253
|
-
const res = await chat.invoke([message]);
|
|
254
|
-
console.log({ res });
|
|
255
|
-
});
|
|
256
|
-
test("Test ChatAnthropic multimodal", async () => {
|
|
257
|
-
const chat = new ChatAnthropic({
|
|
258
|
-
modelName: "claude-3-sonnet-20240229",
|
|
259
|
-
maxRetries: 0,
|
|
260
|
-
});
|
|
261
|
-
const res = await chat.invoke([
|
|
262
|
-
new HumanMessage({
|
|
263
|
-
content: [
|
|
264
|
-
{
|
|
265
|
-
type: "image_url",
|
|
266
|
-
image_url: {
|
|
267
|
-
url: "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAMCAggHCQgGCQgICAcICAgICAgICAYICAgHDAgHCAgICAgIBggICAgICAgICBYICAgICwkKCAgNDQoIDggICQgBAwQEBgUGCgYGCBALCg0QCg0NEA0KCg8LDQoKCgoLDgoQDQoLDQoKCg4NDQ0NDgsQDw0OCg4NDQ4NDQoJDg8OCP/AABEIALAAsAMBEQACEQEDEQH/xAAdAAEAAgEFAQAAAAAAAAAAAAAABwgJAQIEBQYD/8QANBAAAgIBAwIDBwQCAgIDAAAAAQIAAwQFERIIEwYhMQcUFyJVldQjQVGBcZEJMzJiFRYk/8QAGwEBAAMAAwEAAAAAAAAAAAAAAAQFBgEDBwL/xAA5EQACAQIDBQQJBAIBBQAAAAAAAQIDEQQhMQVBUWGREhRxgRMVIjJSU8HR8CNyobFCguEGJGKi4v/aAAwDAQACEQMRAD8ApfJplBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBANl16qOTEKB6kkAD+z5Tkcj0On+z7Ub1FlOmanejeavj6dqV6kfsQ1OK4IP8AIM6pVYR1kuqJdLCV6qvCnJ/6v66nL+Ems/RNc+y63+BOvvFL411O/wBW4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6HE1D2e6lQpsu0zU6EXzZ8jTtSoUD9yWuxUAA/kmdkasJaSXVHRVwlekrzpyX+r+mh56m9WHJSGU+hUgg/wBjynaRORvnAEAQBAEAQBAEAQCbennpVzfER95LHE0tX4tlsnJr2B2srw6yQLCpBQ3Me1W+4/VZLKlh4jFRo5ay4cPH7f0XWA2XUxft37MONs34ffRcy/Xsu6bdG0UK2Nh1tkAbHMyAt+Wx2HIi11/SDcQe3jrTXv6IJRVcRUqe88uC0Nxhdn0MMv0458XnJ+e7wVlyJPJkYsTSAIAgCAIAgCAIBqDAIx9qHTbo2tBmycOtcgjYZmOBRlqdjxJtQDuhdye3ette/qhkmliKlP3XlwehXYrZ9DEr9SOfFZS6rXwd1yKCdQ3Srm+HT7yGOXpbPxXLVOLUMTtXXmVgkVliQgvU9qx9h+kz11Ne4fFRrZaS4cfD7f2YfH7LqYT279qHHevH76PlvhKTClEAQBAEAQBAJp6WOn0+I80i7mumYnF8x1LIbSSe3iV2DYq13ElnQ8q6gdijWUuIeKxHoY5e89PuXWy8D3qp7S9iOvN/D9+XiZRNN06uiuvHqrSqmpFrqqrVUrrrUBUREUBVVVAAUAAATNNtu7PR4xUUoxVkskloktxyCZwfRj26jetHPtzrMXSM4Uabj7Vrfj10O2ZdsDbb3bqrCKEYmpeyED8Hs53LZVwvsPg4qN6kbt+OS8t5hdobYqOo44edorK6SzfmtFpz14H16f8Arkz6cmrD1e9crBvsFZy3ropvxC2yo7NTXXXbjhtuXcTmisz91hX2yr4KLjemrNbuPXeMDtuoqihiGnF/5ZJx55ZNceF76GQSUJuhAEAQBAEAhb239WWl+H391s7mXnbAnExu2WqUjdWyLHda6Qw2IXdrCCGFZX5pMo4WdXNZLiyoxm1KOFfZl7UuCtdeN2kvzcRB4d/5JMV7OOVpWRRSWAFmPk1ZTKN9uT1PRi+QHnsj2H12DHYGXLZzS9mV3zVvuVFL/qGDlapSaXFST6qyfS/3tb4M8a4up49WoYlyZGLcCUsTf1B2ZGVgHrsRgVNbqrIwIYAjaVc4Sg+zJWZqaVWFWCnB3T0/PodnqOnV312Y9taW02o1dtViq9dlbAq6OjAqyspIKkEEGfKbTuj7lFSTjJXTyaejXAxd9U/T6fDmYBTzbTMvm+G7FnNRBHcxLLDuWankCrueVlRG5dq7nOlwuI9NHP3lr9zzjamA7rU9n3Jacn8P25eBC0mFKIAgCAIBtdwASfQDc/4nIbsZXulr2ZDR9HwsYpxybqxmZe4Xl71cquyMR69hO3jg+fy0r5n1OWxNX0lRvdovBflz1DZuG7vh4xtZtXl+55vpp5EsyKWZ5X2seH783TdRwsZgmVk4OVRQzMUUXPRYle7gEoCxA5gEqDvsdp2U5KM03omv7I+Ig6lKUIuzaaXmigPtb6HNQ0bEytTGXjZeLiKlhWuu6rINPMLbY1bFqkXHQ908b7CyK+wUqFe+pY2FSSjZpvnl+MwmJ2JVw9OVTtqUYq+Sadt+WaVtd9+W+uLLv5HzB8j/AIlgZ8yRdGfUXXq2JXpGTZtquFUE+cnfMxU2Wu9CzEvaicEsG+/MdzYLbsmexmHdOXaS9l/w+H2PQ9kY9V6apyftxVtdUtJc3x58iykrjQCAIAgFdurzqbPh+lMHFKHVspC6FuLLh427Icp0O4d2ZWREb5WZLGbktJrssMJhvSu8vdX8vh9zP7X2i8LBRp27b46Rj8Vt73JebyVnCfSz0jNqh/8AsGsrZZRcxuoxrms7ua7HmcvLYkOaXJ5Ctjvkb8n/AE+K3TcVi+x+nS6rdyX33eJTbL2S636+JTaeaTveTf8AlLlwjv35ZFmfHnSnoWo47Yo0/FxLOBWnJw8ejHuobb5GVqkUOqnY9qwOjDyI9CKyGKqwd+03ybdjS19mYarHs+jSe5pJNdP6KudBPiTIwNYz/D1jA1WJk91AWKLqGJctDWVg+QFlfdQtsGcVY+//AFgSzx0VKmqi5dJK/wCeZm9iVJ0sRPDye6WWdu1BpXWeV78M8uGd/wCURuCJuqX2YjWNHzMYJyyaKzmYm3Hl71SrOqKW8h307mOT5fLc3mPUSsNV9HUT3aPwf5crNpYbvGHlG2azj+5Zrrp5mKFHBAI9CNx/iak8vTubpwBAEAQDtPCekLk5WHiON0yczFx3H8pbkVVMP7VyJ8zfZi3wTfRHdRh26kI8ZRXk5IzREf6mPPXTSAIB1/iPQa8yjIwrVD05NFuPYrAFWrsrat1YHyIKsRsf2nMXZpo+ZR7UXF77rqYW2xHrJqsHG2smu1T6rapKWKf8OCP6mxvfNHj1nH2XqsnfW6yOVpGr241teVRY9ORS4sqtrPF67B6Mp/2NiCGBIIYMQeGlJWaujsp1JU5KcHZrQyZdK/U3X4ipONdwq1fGQNkVL5JkVbhfe8cE/wDgWKq1e5NFjKD8ttLPm8ThnSd17r0+35qej7N2hHFQs8prVfVcv6J4kIuBAKtdWnV8uj89I090fVeP/wCi8hXq05CvIcg26PmMpDCpgVqUrZaCGqrussLhPSe3P3f7/wCOf4s9tTaXd16On77/APXn48EU58OYl+RremrrRyHbJzdPbI9+LvZZjW21vUlgs5FMe4OqmshVrrscca9jtcSaVKXotydrcVr58zH04znioLFXd3G/a17L08E3u5vJEveGeobX/Cuq2YmttbbjX3NflUu7ZC1VW2OTlaZZuzDHrIbbGXZOFbV9qmwfLElh6Venelqsl4rc+fP6FtT2hicHiHDEu8W7u+ii8lKObtHL3fH/AC1tn1AdReJ4exVvJW/MyEJwcVWG9x2G1zkb8MVNwTbt83kqhmYCVVDDyqytot7/ADeanG46GFh2nm37q4/8c/qVr/4/fZ9k5Obm+J7+Xa430V2soVcrNuuW3LtT+RQUNZKjj3L2QHlRYqWOPqJRVJcvJJWRnth4epKpLE1FqnZ8XJ3b8MuG/LQvdKQ2ZqB/qAYXfFmkLjZWZiINkxszKx0H8JVkW1KP6VAJsIPtRT4pPqjyKtDsVJx4SkvJSdjq59HSIAgCAdp4T1dcbKw8tzsmNmYuQ5/hKsiq1j/SoTPma7UWuKa6o7qM+xUhLhKL8lJXM0RP+pjz100gCAIBjA6x/Y9ZpGq35KofcdSssy8ewA8Vvcl8rHJ3OzrazXAeQNVq8d+3Zx0mDrKpTS3rLy3P6HnG18I6FdzS9mWa/c9V9fPkQTJxRnf+AfHeRpOXj6pjHa/GsDhd+K2p6W0WHY/p31lqidiVDchsyqR8VIKpFxlo/wAv5EjD15UKiqw1X8revMy++DfFtOo4uNqNDcsfKprvrJ8iFZQeLD1Dod0KnzVlI/aZKcXCTi9UerUqkasFOLumk14M8T1L+0uzRdHzdRp8skKlGO2wPC+6xKUt2PkezzN3E7g8NtjvO7D01UqKL03+CzIe0MQ8Ph5VI66Lxbsv7Ks9D3ThTqG/iXOBvSvJsGHTae4L8lWDXZ2QzMzXMt7MoWzzNyW2PzPaYWeNxDj+nDLLPw4dPsZ7Y+CVb/ua3tO7tfitZPzyS5XJS6zOlu3XAmrYSh9Rpq7N2OzKozMYF3RUZyEXIqZ325lVtVyrMOFUjYPEql7MtP6f2J+1tmvE2qU/fWWusfo1/P8AVWfbjruoWabpFGrl/wD5Wq/UOyMhO3mV6QFxaU98BCuzW5dNxW2wcraqeZawku1pQjFVJOn7uWmna1y8uhmMdUqOhSjiPfTlr73o0rXfi1k96V7nq/YP0n6lr99OdqgysfS6qqKw2QbK8rKx6kWrHxcdG2toxlrUA3lU+Q71c3ta+rpr4qFJONOzlnpom9/N8vpkTMBsyriZKeITUEla+rSyUbapLyvzeZkT0fR6saqvFprSmilFrqqrUJXXWo2VEUABVUDbYSgbbd3qbyMVFWSskcucH0ag/wCoBhd8WauuTlZmWh3TIzMrIQ/yluRbap/tXBmwguzFLgkuiPIq0+3UnLjKT8nJ2Orn0dIgCAIBtdAQQfQjY/4nIauZXulr2nDWNHw8kvyyaKxh5e/Hl71SqozsF8h307eQB5fLcvkPQZbE0vR1Gt2q8H+WPUNm4nvGHjK92spfuWT66+ZLMilmIAgHm/aL4ExtVxL9PyaVvptRtkb1WwA9uyths1dqNsRYhDKf39Z905uElKLszor0YVoOE1dP86mH7R/DORdi5OeKz2sI4iZZIKtU+Q11dPJSvl+rS1ZBIKsyDY7krrXJKSjxvbyzPKY0ZuMprSNlLim21p4rPh1t6fA9ieq34Ka1RhW5OA7XKbMcC6ypq7DU/doT9cLyBPNK7ECglmT0nW60FLsN2fPnnroSI4KvKl6aMLxz0zeTavbW3hfy3Wq/4+fbVQKbPDd9wW7vWZGnK2wW2l17l9FTehsS0W5PA/M62uV5CqzhV4+i7+kS5Px4/T8z02wcXHsvDyed24+DzaXg7u3PLLSderP2f3arombi0KXyEFWVVWBu1jU2pc1SD93sqWxAP3dlkHC1FCqm9NOuRd7ToOvhpwjrk14xadv4K7dEPU5gYOI2iZ+RXiql1l2Hk2fJjtVae5ZVbaSUrsW42WB7O2jpYqg8k+exxuGnKXbgr8eOWXmUGxtpUqdP0FV9m12m9Gm72/8AFp8dfEmb22dZmlaXjv7nk42pag4K0U49q3U1t5fqZV1LFErTfl2g4st/8VCjnZXDo4Oc37ScVvv9L/iLXG7Xo0IfpyU57kndeLa0X8vRcq59OnsAzPFWY3iTVmezBa3uMbQOWo2qdhSibcUwa+IrPEBSq9pB/wBjV2GIrxoR9HT1/r/6M/s7A1MbU7ziHeN75/5tbuUF/Oml28h0oDfCAIBE/VL7TRo+j5uSr8cm6s4eJtx5e9XKyK6hvJuwncyCPP5aW8j6GVhqXpKiW7V+C/LFZtLE93w8pXzeUf3PJdNfIxQIgAAHoBsP8TUnl6VjdOAIAgCAIBNPSx1BHw5mE3c20zL4JmIoZjUQT28uusblmp5EMiDlZUTsHaulDDxWH9NHL3lp9i62Xj+61Pa9yWvJ/F9+XgZRNN1Ku+uvIqsS2m1FsqtrZXrsrYBkdHUlWVlIIYEggzNNNOzPR4yUkpRd081bRp7zkTg+jUQCH9Q8FeJjnNdVrmImmPx/QfTKXuqAVOXa2ZeTO5tAe29hWq1bpeS8lKdLs2cH2v3Zfn5kVjpYr0t1VXY4djNaaZ+OumWpGh9j2vaVi6pp+NVpep4+ouxQXY9ZzMnKybbGy8rVbNsHENdKMdiot2Raa0pbtjud/pac5RlK6a4PJJaJasivD4inCcIdmSle11m3JttyeStn/RJ/sG8A6no2LgaTaultiY+MwuuxmzUyDlFue4rek1XGxmd3yWspLvuwoTnskevONSTkr58bafm7dxJuDpVaNONOXZsln2b6+evjv4I6jVejTRLMp9TqTLw8xrRkV24eVZT7vkcuZtorKvUjM25KMj1+Z2RdzOxYuoo9l2a5rVcOJGnsnDubqxTjLVOMmrPilnG/k1yJxrXYAbkkADkdtyf5OwA3Pr5AD+APSQi5K7e1zod0nVrnzanu07KtZnuOMK3x7rWO7WPjuNlsY7sWoenmzMzB2YtLCljZ012XmuevUoMVsWhXk5puEnra1m+Nnl0tffmeY8Df8dum49iXZmZkZ4Q79gImJjv/AALQj23Mv/qt6BvRuQJU9lTaE5K0Vb+X9iNQ2BRg71JOfKyUemb/AJ/gtXhYSVIlNaLXVWqpXWiqqIigBURVACqoAAUAAASrbvmzTpJKy0PtByIBx9R1KuiuzItsSqmpGsttsZUrrrUFnd3YhVVVBJYkAATlJt2R8ykopyk7JZtvRJbzF31T9QR8R5gNPNdMxOSYaMGQ2kkdzLsrOxVruICo45V1AbhGsuQaXC4f0Mc/eev2PONqY7vVT2fcjpzfxfbl4kLSYUogCAIAgCAIBNvTz1VZvh0+7FTl6Wz8mxGfi1DE72WYdhBFZYkuaGHasfc/os9lrQ8RhY1s9JcePj9/7LrAbUnhPYt2ocN68Pto+W+/fsv6ktG1oKuNmVrkEbnDyCKMtTsOQFTkd0LuB3KGtr39HMoquHqU/eWXFaG4wu0KGJX6cs+DykvJ6+KuuZJxEjFiaQBAEAQBAEAQBANQIBGHtR6ktG0UMuTmVtkAbjDxyt+Wx2PEGpG/SDcSO5kNTXv6uJJpYepV91ZcXoV2K2hQwy/UlnwWcn5bvF2XMoL1DdVWb4iPuwU4mlq/JcRX5NewO9dmZYABYVIDilR2q32P6rJXat7h8LGjnrLjw8Pv/Rh8ftSpi/Yt2YcL5vx+2i5kJSYUogCAIAgCAIAgCAbLqFYcWAZT6hgCD/R8pyOZ6HT/AGg6lQorp1PU6EXyVMfUdSoUD9gFpykAA/gCdUqUJaxXREuli69JWhUkv9n9Tl/FvWfreufetb/PnX3el8C6Hf6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/wA+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819Tiah7QdRvU13anqd6N5MmRqOpXqR+4K3ZTgg/wROyNKEdIrojoqYuvVVp1JP/Z/TU89TQqjioCgegAAA/oeU7SJzN84AgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgH/9k=",
|
|
268
|
-
},
|
|
269
|
-
},
|
|
270
|
-
{ type: "text", text: "What is this a logo for?" },
|
|
271
|
-
],
|
|
272
|
-
}),
|
|
273
|
-
]);
|
|
274
|
-
console.log(res);
|
|
275
|
-
});
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export {};
|
|
@@ -1,88 +0,0 @@
|
|
|
1
|
-
import { jest, test } from "@jest/globals";
|
|
2
|
-
import { AIMessage } from "@langchain/core/messages";
|
|
3
|
-
import { z } from "zod";
|
|
4
|
-
import { OutputParserException } from "@langchain/core/output_parsers";
|
|
5
|
-
import { ChatAnthropic } from "../chat_models.js";
|
|
6
|
-
test("withStructuredOutput with output validation", async () => {
|
|
7
|
-
const model = new ChatAnthropic({
|
|
8
|
-
modelName: "claude-3-haiku-20240307",
|
|
9
|
-
temperature: 0,
|
|
10
|
-
anthropicApiKey: "testing",
|
|
11
|
-
});
|
|
12
|
-
jest
|
|
13
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
14
|
-
.spyOn(model, "invoke")
|
|
15
|
-
.mockResolvedValue(new AIMessage({
|
|
16
|
-
content: [
|
|
17
|
-
{
|
|
18
|
-
type: "tool_use",
|
|
19
|
-
id: "notreal",
|
|
20
|
-
name: "Extractor",
|
|
21
|
-
input: "Incorrect string tool call input",
|
|
22
|
-
},
|
|
23
|
-
],
|
|
24
|
-
}));
|
|
25
|
-
const schema = z.object({
|
|
26
|
-
alerts: z
|
|
27
|
-
.array(z.object({
|
|
28
|
-
description: z.string().describe("A description of the alert."),
|
|
29
|
-
severity: z
|
|
30
|
-
.enum(["HIGH", "MEDIUM", "LOW"])
|
|
31
|
-
.describe("How severe the alert is."),
|
|
32
|
-
}))
|
|
33
|
-
.describe("Important security or infrastructure alerts present in the given text."),
|
|
34
|
-
});
|
|
35
|
-
const modelWithStructuredOutput = model.withStructuredOutput(schema, {
|
|
36
|
-
name: "Extractor",
|
|
37
|
-
});
|
|
38
|
-
await expect(async () => {
|
|
39
|
-
await modelWithStructuredOutput.invoke(`
|
|
40
|
-
Enumeration of Kernel Modules via Proc
|
|
41
|
-
Prompt for Credentials with OSASCRIPT
|
|
42
|
-
User Login
|
|
43
|
-
Modification of Standard Authentication Module
|
|
44
|
-
Suspicious Automator Workflows Execution
|
|
45
|
-
`);
|
|
46
|
-
}).rejects.toThrowError(OutputParserException);
|
|
47
|
-
});
|
|
48
|
-
test("withStructuredOutput with proper output", async () => {
|
|
49
|
-
const model = new ChatAnthropic({
|
|
50
|
-
modelName: "claude-3-haiku-20240307",
|
|
51
|
-
temperature: 0,
|
|
52
|
-
anthropicApiKey: "testing",
|
|
53
|
-
});
|
|
54
|
-
jest
|
|
55
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
56
|
-
.spyOn(model, "invoke")
|
|
57
|
-
.mockResolvedValue(new AIMessage({
|
|
58
|
-
content: [
|
|
59
|
-
{
|
|
60
|
-
type: "tool_use",
|
|
61
|
-
id: "notreal",
|
|
62
|
-
name: "Extractor",
|
|
63
|
-
input: { alerts: [{ description: "test", severity: "LOW" }] },
|
|
64
|
-
},
|
|
65
|
-
],
|
|
66
|
-
}));
|
|
67
|
-
const schema = z.object({
|
|
68
|
-
alerts: z
|
|
69
|
-
.array(z.object({
|
|
70
|
-
description: z.string().describe("A description of the alert."),
|
|
71
|
-
severity: z
|
|
72
|
-
.enum(["HIGH", "MEDIUM", "LOW"])
|
|
73
|
-
.describe("How severe the alert is."),
|
|
74
|
-
}))
|
|
75
|
-
.describe("Important security or infrastructure alerts present in the given text."),
|
|
76
|
-
});
|
|
77
|
-
const modelWithStructuredOutput = model.withStructuredOutput(schema, {
|
|
78
|
-
name: "Extractor",
|
|
79
|
-
});
|
|
80
|
-
const result = await modelWithStructuredOutput.invoke(`
|
|
81
|
-
Enumeration of Kernel Modules via Proc
|
|
82
|
-
Prompt for Credentials with OSASCRIPT
|
|
83
|
-
User Login
|
|
84
|
-
Modification of Standard Authentication Module
|
|
85
|
-
Suspicious Automator Workflows Execution
|
|
86
|
-
`);
|
|
87
|
-
console.log(result);
|
|
88
|
-
});
|