@langchain/anthropic 0.3.3 → 0.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models.cjs +29 -13
- package/dist/chat_models.js +29 -13
- package/dist/utils/errors.cjs +32 -0
- package/dist/utils/errors.d.ts +3 -0
- package/dist/utils/errors.js +27 -0
- package/package.json +1 -1
package/dist/chat_models.cjs
CHANGED
|
@@ -15,6 +15,7 @@ const output_parsers_js_1 = require("./output_parsers.cjs");
|
|
|
15
15
|
const tools_js_1 = require("./utils/tools.cjs");
|
|
16
16
|
const message_inputs_js_1 = require("./utils/message_inputs.cjs");
|
|
17
17
|
const message_outputs_js_1 = require("./utils/message_outputs.cjs");
|
|
18
|
+
const errors_js_1 = require("./utils/errors.cjs");
|
|
18
19
|
function _toolsInParams(params) {
|
|
19
20
|
return !!(params.tools && params.tools.length > 0);
|
|
20
21
|
}
|
|
@@ -690,7 +691,7 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
|
|
|
690
691
|
const newToolCallChunk = (0, tools_js_1.extractToolCallChunk)(chunk);
|
|
691
692
|
// Extract the text content token for text field and runManager.
|
|
692
693
|
const token = extractToken(chunk);
|
|
693
|
-
|
|
694
|
+
const generationChunk = new outputs_1.ChatGenerationChunk({
|
|
694
695
|
message: new messages_1.AIMessageChunk({
|
|
695
696
|
// Just yield chunk as it is and tool_use will be concat by BaseChatModel._generateUncached().
|
|
696
697
|
content: chunk.content,
|
|
@@ -702,9 +703,8 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
|
|
|
702
703
|
}),
|
|
703
704
|
text: token ?? "",
|
|
704
705
|
});
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
}
|
|
706
|
+
yield generationChunk;
|
|
707
|
+
await runManager?.handleLLMNewToken(token ?? "", undefined, undefined, undefined, undefined, { chunk: generationChunk });
|
|
708
708
|
}
|
|
709
709
|
}
|
|
710
710
|
/** @ignore */
|
|
@@ -774,11 +774,19 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
|
|
|
774
774
|
maxRetries: 0,
|
|
775
775
|
});
|
|
776
776
|
}
|
|
777
|
-
const makeCompletionRequest = async () =>
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
777
|
+
const makeCompletionRequest = async () => {
|
|
778
|
+
try {
|
|
779
|
+
return await this.streamingClient.messages.create({
|
|
780
|
+
...request,
|
|
781
|
+
...this.invocationKwargs,
|
|
782
|
+
stream: true,
|
|
783
|
+
}, options);
|
|
784
|
+
}
|
|
785
|
+
catch (e) {
|
|
786
|
+
const error = (0, errors_js_1.wrapAnthropicClientError)(e);
|
|
787
|
+
throw error;
|
|
788
|
+
}
|
|
789
|
+
};
|
|
782
790
|
return this.caller.call(makeCompletionRequest);
|
|
783
791
|
}
|
|
784
792
|
/** @ignore */
|
|
@@ -793,10 +801,18 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
|
|
|
793
801
|
maxRetries: 0,
|
|
794
802
|
});
|
|
795
803
|
}
|
|
796
|
-
const makeCompletionRequest = async () =>
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
804
|
+
const makeCompletionRequest = async () => {
|
|
805
|
+
try {
|
|
806
|
+
return await this.batchClient.messages.create({
|
|
807
|
+
...request,
|
|
808
|
+
...this.invocationKwargs,
|
|
809
|
+
}, options);
|
|
810
|
+
}
|
|
811
|
+
catch (e) {
|
|
812
|
+
const error = (0, errors_js_1.wrapAnthropicClientError)(e);
|
|
813
|
+
throw error;
|
|
814
|
+
}
|
|
815
|
+
};
|
|
800
816
|
return this.caller.callWithOptions({ signal: options.signal ?? undefined }, makeCompletionRequest);
|
|
801
817
|
}
|
|
802
818
|
_llmType() {
|
package/dist/chat_models.js
CHANGED
|
@@ -12,6 +12,7 @@ import { AnthropicToolsOutputParser } from "./output_parsers.js";
|
|
|
12
12
|
import { extractToolCallChunk, handleToolChoice } from "./utils/tools.js";
|
|
13
13
|
import { _convertMessagesToAnthropicPayload } from "./utils/message_inputs.js";
|
|
14
14
|
import { _makeMessageChunkFromAnthropicEvent, anthropicResponseToChatMessages, } from "./utils/message_outputs.js";
|
|
15
|
+
import { wrapAnthropicClientError } from "./utils/errors.js";
|
|
15
16
|
function _toolsInParams(params) {
|
|
16
17
|
return !!(params.tools && params.tools.length > 0);
|
|
17
18
|
}
|
|
@@ -687,7 +688,7 @@ export class ChatAnthropicMessages extends BaseChatModel {
|
|
|
687
688
|
const newToolCallChunk = extractToolCallChunk(chunk);
|
|
688
689
|
// Extract the text content token for text field and runManager.
|
|
689
690
|
const token = extractToken(chunk);
|
|
690
|
-
|
|
691
|
+
const generationChunk = new ChatGenerationChunk({
|
|
691
692
|
message: new AIMessageChunk({
|
|
692
693
|
// Just yield chunk as it is and tool_use will be concat by BaseChatModel._generateUncached().
|
|
693
694
|
content: chunk.content,
|
|
@@ -699,9 +700,8 @@ export class ChatAnthropicMessages extends BaseChatModel {
|
|
|
699
700
|
}),
|
|
700
701
|
text: token ?? "",
|
|
701
702
|
});
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
}
|
|
703
|
+
yield generationChunk;
|
|
704
|
+
await runManager?.handleLLMNewToken(token ?? "", undefined, undefined, undefined, undefined, { chunk: generationChunk });
|
|
705
705
|
}
|
|
706
706
|
}
|
|
707
707
|
/** @ignore */
|
|
@@ -771,11 +771,19 @@ export class ChatAnthropicMessages extends BaseChatModel {
|
|
|
771
771
|
maxRetries: 0,
|
|
772
772
|
});
|
|
773
773
|
}
|
|
774
|
-
const makeCompletionRequest = async () =>
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
774
|
+
const makeCompletionRequest = async () => {
|
|
775
|
+
try {
|
|
776
|
+
return await this.streamingClient.messages.create({
|
|
777
|
+
...request,
|
|
778
|
+
...this.invocationKwargs,
|
|
779
|
+
stream: true,
|
|
780
|
+
}, options);
|
|
781
|
+
}
|
|
782
|
+
catch (e) {
|
|
783
|
+
const error = wrapAnthropicClientError(e);
|
|
784
|
+
throw error;
|
|
785
|
+
}
|
|
786
|
+
};
|
|
779
787
|
return this.caller.call(makeCompletionRequest);
|
|
780
788
|
}
|
|
781
789
|
/** @ignore */
|
|
@@ -790,10 +798,18 @@ export class ChatAnthropicMessages extends BaseChatModel {
|
|
|
790
798
|
maxRetries: 0,
|
|
791
799
|
});
|
|
792
800
|
}
|
|
793
|
-
const makeCompletionRequest = async () =>
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
801
|
+
const makeCompletionRequest = async () => {
|
|
802
|
+
try {
|
|
803
|
+
return await this.batchClient.messages.create({
|
|
804
|
+
...request,
|
|
805
|
+
...this.invocationKwargs,
|
|
806
|
+
}, options);
|
|
807
|
+
}
|
|
808
|
+
catch (e) {
|
|
809
|
+
const error = wrapAnthropicClientError(e);
|
|
810
|
+
throw error;
|
|
811
|
+
}
|
|
812
|
+
};
|
|
797
813
|
return this.caller.callWithOptions({ signal: options.signal ?? undefined }, makeCompletionRequest);
|
|
798
814
|
}
|
|
799
815
|
_llmType() {
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
3
|
+
/* eslint-disable no-param-reassign */
|
|
4
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
5
|
+
exports.wrapAnthropicClientError = exports.addLangChainErrorFields = void 0;
|
|
6
|
+
function addLangChainErrorFields(error, lc_error_code) {
|
|
7
|
+
error.lc_error_code = lc_error_code;
|
|
8
|
+
error.message = `${error.message}\n\nTroubleshooting URL: https://js.langchain.com/docs/troubleshooting/errors/${lc_error_code}/\n`;
|
|
9
|
+
return error;
|
|
10
|
+
}
|
|
11
|
+
exports.addLangChainErrorFields = addLangChainErrorFields;
|
|
12
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
13
|
+
function wrapAnthropicClientError(e) {
|
|
14
|
+
let error;
|
|
15
|
+
if (e.status === 400 && e.message.includes("tool")) {
|
|
16
|
+
error = addLangChainErrorFields(e, "INVALID_TOOL_RESULTS");
|
|
17
|
+
}
|
|
18
|
+
else if (e.status === 401) {
|
|
19
|
+
error = addLangChainErrorFields(e, "MODEL_AUTHENTICATION");
|
|
20
|
+
}
|
|
21
|
+
else if (e.status === 404) {
|
|
22
|
+
error = addLangChainErrorFields(e, "MODEL_NOT_FOUND");
|
|
23
|
+
}
|
|
24
|
+
else if (e.status === 429) {
|
|
25
|
+
error = addLangChainErrorFields(e, "MODEL_RATE_LIMIT");
|
|
26
|
+
}
|
|
27
|
+
else {
|
|
28
|
+
error = e;
|
|
29
|
+
}
|
|
30
|
+
return error;
|
|
31
|
+
}
|
|
32
|
+
exports.wrapAnthropicClientError = wrapAnthropicClientError;
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
export type LangChainErrorCodes = "INVALID_PROMPT_INPUT" | "INVALID_TOOL_RESULTS" | "MESSAGE_COERCION_FAILURE" | "MODEL_AUTHENTICATION" | "MODEL_NOT_FOUND" | "MODEL_RATE_LIMIT" | "OUTPUT_PARSING_FAILURE";
|
|
2
|
+
export declare function addLangChainErrorFields(error: any, lc_error_code: LangChainErrorCodes): any;
|
|
3
|
+
export declare function wrapAnthropicClientError(e: any): any;
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
2
|
+
/* eslint-disable no-param-reassign */
|
|
3
|
+
export function addLangChainErrorFields(error, lc_error_code) {
|
|
4
|
+
error.lc_error_code = lc_error_code;
|
|
5
|
+
error.message = `${error.message}\n\nTroubleshooting URL: https://js.langchain.com/docs/troubleshooting/errors/${lc_error_code}/\n`;
|
|
6
|
+
return error;
|
|
7
|
+
}
|
|
8
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
9
|
+
export function wrapAnthropicClientError(e) {
|
|
10
|
+
let error;
|
|
11
|
+
if (e.status === 400 && e.message.includes("tool")) {
|
|
12
|
+
error = addLangChainErrorFields(e, "INVALID_TOOL_RESULTS");
|
|
13
|
+
}
|
|
14
|
+
else if (e.status === 401) {
|
|
15
|
+
error = addLangChainErrorFields(e, "MODEL_AUTHENTICATION");
|
|
16
|
+
}
|
|
17
|
+
else if (e.status === 404) {
|
|
18
|
+
error = addLangChainErrorFields(e, "MODEL_NOT_FOUND");
|
|
19
|
+
}
|
|
20
|
+
else if (e.status === 429) {
|
|
21
|
+
error = addLangChainErrorFields(e, "MODEL_RATE_LIMIT");
|
|
22
|
+
}
|
|
23
|
+
else {
|
|
24
|
+
error = e;
|
|
25
|
+
}
|
|
26
|
+
return error;
|
|
27
|
+
}
|