langchain 0.0.143 → 0.0.144
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/callbacks/handlers/llmonitor.cjs +1 -0
- package/callbacks/handlers/llmonitor.d.ts +1 -0
- package/callbacks/handlers/llmonitor.js +1 -0
- package/dist/agents/mrkl/outputParser.cjs +1 -1
- package/dist/agents/mrkl/outputParser.js +1 -1
- package/dist/base_language/index.cjs +2 -1
- package/dist/base_language/index.d.ts +7 -2
- package/dist/base_language/index.js +2 -1
- package/dist/callbacks/handlers/llmonitor.cjs +223 -0
- package/dist/callbacks/handlers/llmonitor.d.ts +35 -0
- package/dist/callbacks/handlers/llmonitor.js +215 -0
- package/dist/chains/openai_functions/extraction.d.ts +4 -4
- package/dist/chains/openai_functions/openapi.d.ts +3 -3
- package/dist/chains/openai_functions/structured_output.d.ts +5 -4
- package/dist/chains/openai_functions/tagging.d.ts +4 -4
- package/dist/chains/openai_moderation.cjs +1 -0
- package/dist/chains/openai_moderation.js +1 -0
- package/dist/chat_models/base.cjs +4 -3
- package/dist/chat_models/base.d.ts +3 -3
- package/dist/chat_models/base.js +5 -4
- package/dist/chat_models/minimax.d.ts +6 -28
- package/dist/chat_models/openai.d.ts +2 -3
- package/dist/document_loaders/fs/openai_whisper_audio.cjs +32 -0
- package/dist/document_loaders/fs/openai_whisper_audio.d.ts +11 -0
- package/dist/document_loaders/fs/openai_whisper_audio.js +28 -0
- package/dist/document_loaders/web/github.cjs +210 -24
- package/dist/document_loaders/web/github.d.ts +44 -1
- package/dist/document_loaders/web/github.js +210 -24
- package/dist/document_loaders/web/recursive_url.cjs +13 -0
- package/dist/document_loaders/web/recursive_url.js +13 -0
- package/dist/embeddings/hf_transformers.cjs +71 -0
- package/dist/embeddings/hf_transformers.d.ts +29 -0
- package/dist/embeddings/hf_transformers.js +67 -0
- package/dist/experimental/chat_models/anthropic_functions.d.ts +2 -5
- package/dist/load/import_constants.cjs +3 -0
- package/dist/load/import_constants.js +3 -0
- package/dist/prompts/chat.cjs +27 -1
- package/dist/prompts/chat.d.ts +3 -2
- package/dist/prompts/chat.js +28 -2
- package/dist/schema/index.cjs +44 -1
- package/dist/schema/index.d.ts +10 -0
- package/dist/schema/index.js +41 -0
- package/dist/tools/serpapi.cjs +108 -13
- package/dist/tools/serpapi.js +108 -13
- package/dist/vectorstores/redis.cjs +12 -4
- package/dist/vectorstores/redis.d.ts +8 -0
- package/dist/vectorstores/redis.js +12 -4
- package/dist/vectorstores/tigris.cjs +2 -0
- package/dist/vectorstores/tigris.d.ts +2 -3
- package/dist/vectorstores/tigris.js +2 -0
- package/dist/vectorstores/vectara.cjs +30 -12
- package/dist/vectorstores/vectara.d.ts +1 -1
- package/dist/vectorstores/vectara.js +30 -12
- package/document_loaders/fs/openai_whisper_audio.cjs +1 -0
- package/document_loaders/fs/openai_whisper_audio.d.ts +1 -0
- package/document_loaders/fs/openai_whisper_audio.js +1 -0
- package/embeddings/hf_transformers.cjs +1 -0
- package/embeddings/hf_transformers.d.ts +1 -0
- package/embeddings/hf_transformers.js +1 -0
- package/package.json +36 -6
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../../dist/callbacks/handlers/llmonitor.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/callbacks/handlers/llmonitor.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/callbacks/handlers/llmonitor.js'
|
|
@@ -41,7 +41,7 @@ class ZeroShotAgentOutputParser extends types_js_1.AgentActionOutputParser {
|
|
|
41
41
|
log: text,
|
|
42
42
|
};
|
|
43
43
|
}
|
|
44
|
-
const match = /Action:
|
|
44
|
+
const match = /Action:([\s\S]*?)(?:\nAction Input:([\s\S]*?))?$/.exec(text);
|
|
45
45
|
if (!match) {
|
|
46
46
|
throw new output_parser_js_1.OutputParserException(`Could not parse LLM output: ${text}`);
|
|
47
47
|
}
|
|
@@ -38,7 +38,7 @@ export class ZeroShotAgentOutputParser extends AgentActionOutputParser {
|
|
|
38
38
|
log: text,
|
|
39
39
|
};
|
|
40
40
|
}
|
|
41
|
-
const match = /Action:
|
|
41
|
+
const match = /Action:([\s\S]*?)(?:\nAction Input:([\s\S]*?))?$/.exec(text);
|
|
42
42
|
if (!match) {
|
|
43
43
|
throw new OutputParserException(`Could not parse LLM output: ${text}`);
|
|
44
44
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.calculateMaxTokens = exports.BaseLanguageModel = exports.BaseLangChain = void 0;
|
|
4
|
+
const index_js_1 = require("../schema/index.cjs");
|
|
4
5
|
const async_caller_js_1 = require("../util/async_caller.cjs");
|
|
5
6
|
const count_tokens_js_1 = require("./count_tokens.cjs");
|
|
6
7
|
const tiktoken_js_1 = require("../util/tiktoken.cjs");
|
|
@@ -110,7 +111,7 @@ class BaseLanguageModel extends BaseLangChain {
|
|
|
110
111
|
return new base_js_1.StringPromptValue(input);
|
|
111
112
|
}
|
|
112
113
|
else if (Array.isArray(input)) {
|
|
113
|
-
return new chat_js_1.ChatPromptValue(input);
|
|
114
|
+
return new chat_js_1.ChatPromptValue(input.map(index_js_1.coerceMessageLikeToMessage));
|
|
114
115
|
}
|
|
115
116
|
else {
|
|
116
117
|
return input;
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import { BaseMessage, BaseMessageLike, BasePromptValue, LLMResult } from "../schema/index.js";
|
|
2
3
|
import { BaseCallbackConfig, CallbackManager, Callbacks } from "../callbacks/manager.js";
|
|
3
4
|
import { AsyncCaller, AsyncCallerParams } from "../util/async_caller.js";
|
|
4
5
|
import { Runnable, RunnableConfig } from "../schema/runnable.js";
|
|
@@ -56,7 +57,11 @@ export interface BaseLanguageModelCallOptions extends BaseCallbackConfig {
|
|
|
56
57
|
*/
|
|
57
58
|
signal?: AbortSignal;
|
|
58
59
|
}
|
|
59
|
-
export
|
|
60
|
+
export interface BaseFunctionCallOptions extends BaseLanguageModelCallOptions {
|
|
61
|
+
function_call?: OpenAIClient.Chat.ChatCompletionCreateParams.FunctionCallOption;
|
|
62
|
+
functions?: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];
|
|
63
|
+
}
|
|
64
|
+
export type BaseLanguageModelInput = BasePromptValue | string | BaseMessageLike[];
|
|
60
65
|
/**
|
|
61
66
|
* Base class for language models.
|
|
62
67
|
*/
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { coerceMessageLikeToMessage, } from "../schema/index.js";
|
|
1
2
|
import { AsyncCaller } from "../util/async_caller.js";
|
|
2
3
|
import { getModelNameForTiktoken } from "./count_tokens.js";
|
|
3
4
|
import { encodingForModel } from "../util/tiktoken.js";
|
|
@@ -106,7 +107,7 @@ export class BaseLanguageModel extends BaseLangChain {
|
|
|
106
107
|
return new StringPromptValue(input);
|
|
107
108
|
}
|
|
108
109
|
else if (Array.isArray(input)) {
|
|
109
|
-
return new ChatPromptValue(input);
|
|
110
|
+
return new ChatPromptValue(input.map(coerceMessageLikeToMessage));
|
|
110
111
|
}
|
|
111
112
|
else {
|
|
112
113
|
return input;
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.LLMonitorHandler = exports.convertToLLMonitorMessages = void 0;
|
|
7
|
+
const llmonitor_1 = __importDefault(require("llmonitor"));
|
|
8
|
+
const env_js_1 = require("../../util/env.cjs");
|
|
9
|
+
const base_js_1 = require("../base.cjs");
|
|
10
|
+
// Langchain Helpers
|
|
11
|
+
// Input can be either a single message, an array of message, or an array of array of messages (batch requests)
|
|
12
|
+
const parseRole = (id) => {
|
|
13
|
+
const roleHint = id[id.length - 1];
|
|
14
|
+
if (roleHint.includes("Human"))
|
|
15
|
+
return "user";
|
|
16
|
+
if (roleHint.includes("System"))
|
|
17
|
+
return "system";
|
|
18
|
+
if (roleHint.includes("AI"))
|
|
19
|
+
return "ai";
|
|
20
|
+
if (roleHint.includes("Function"))
|
|
21
|
+
return "function";
|
|
22
|
+
return "ai";
|
|
23
|
+
};
|
|
24
|
+
const convertToLLMonitorMessages = (input) => {
|
|
25
|
+
const parseMessage = (raw) => {
|
|
26
|
+
if (typeof raw === "string")
|
|
27
|
+
return raw;
|
|
28
|
+
// sometimes the message is nested in a "message" property
|
|
29
|
+
if ("message" in raw)
|
|
30
|
+
return parseMessage(raw.message);
|
|
31
|
+
// Serialize
|
|
32
|
+
const message = JSON.parse(JSON.stringify(raw));
|
|
33
|
+
try {
|
|
34
|
+
// "id" contains an array describing the constructor, with last item actual schema type
|
|
35
|
+
const role = parseRole(message.id);
|
|
36
|
+
const obj = message.kwargs;
|
|
37
|
+
const text = message.text ?? obj.content;
|
|
38
|
+
const functionCall = obj.additional_kwargs?.function_call;
|
|
39
|
+
return {
|
|
40
|
+
role,
|
|
41
|
+
text,
|
|
42
|
+
functionCall,
|
|
43
|
+
};
|
|
44
|
+
}
|
|
45
|
+
catch (e) {
|
|
46
|
+
// if parsing fails, return the original message
|
|
47
|
+
return message.text ?? message;
|
|
48
|
+
}
|
|
49
|
+
};
|
|
50
|
+
if (Array.isArray(input)) {
|
|
51
|
+
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
|
52
|
+
// @ts-ignore Confuses the compiler
|
|
53
|
+
return input.length === 1
|
|
54
|
+
? (0, exports.convertToLLMonitorMessages)(input[0])
|
|
55
|
+
: input.map(exports.convertToLLMonitorMessages);
|
|
56
|
+
}
|
|
57
|
+
return parseMessage(input);
|
|
58
|
+
};
|
|
59
|
+
exports.convertToLLMonitorMessages = convertToLLMonitorMessages;
|
|
60
|
+
const parseInput = (rawInput) => {
|
|
61
|
+
if (!rawInput)
|
|
62
|
+
return null;
|
|
63
|
+
const { input, inputs, question } = rawInput;
|
|
64
|
+
if (input)
|
|
65
|
+
return input;
|
|
66
|
+
if (inputs)
|
|
67
|
+
return inputs;
|
|
68
|
+
if (question)
|
|
69
|
+
return question;
|
|
70
|
+
return rawInput;
|
|
71
|
+
};
|
|
72
|
+
const parseOutput = (rawOutput) => {
|
|
73
|
+
if (!rawOutput)
|
|
74
|
+
return null;
|
|
75
|
+
const { text, output, answer } = rawOutput;
|
|
76
|
+
if (text)
|
|
77
|
+
return text;
|
|
78
|
+
if (answer)
|
|
79
|
+
return answer;
|
|
80
|
+
if (output)
|
|
81
|
+
return output;
|
|
82
|
+
return rawOutput;
|
|
83
|
+
};
|
|
84
|
+
class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
|
|
85
|
+
constructor(fields = {}) {
|
|
86
|
+
super(fields);
|
|
87
|
+
Object.defineProperty(this, "name", {
|
|
88
|
+
enumerable: true,
|
|
89
|
+
configurable: true,
|
|
90
|
+
writable: true,
|
|
91
|
+
value: "llmonitor_handler"
|
|
92
|
+
});
|
|
93
|
+
Object.defineProperty(this, "monitor", {
|
|
94
|
+
enumerable: true,
|
|
95
|
+
configurable: true,
|
|
96
|
+
writable: true,
|
|
97
|
+
value: void 0
|
|
98
|
+
});
|
|
99
|
+
this.monitor = llmonitor_1.default;
|
|
100
|
+
if (fields) {
|
|
101
|
+
const { appId, apiUrl, verbose } = fields;
|
|
102
|
+
this.monitor.init({
|
|
103
|
+
verbose,
|
|
104
|
+
appId: appId ?? (0, env_js_1.getEnvironmentVariable)("LLMONITOR_APP_ID"),
|
|
105
|
+
apiUrl: apiUrl ?? (0, env_js_1.getEnvironmentVariable)("LLMONITOR_API_URL"),
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
async handleLLMStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata) {
|
|
110
|
+
const params = {
|
|
111
|
+
...(extraParams?.invocation_params || {}),
|
|
112
|
+
...(metadata || {}),
|
|
113
|
+
};
|
|
114
|
+
const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
|
|
115
|
+
const userId = params?.userId || undefined;
|
|
116
|
+
const userProps = params?.userProps || undefined;
|
|
117
|
+
await this.monitor.trackEvent("llm", "start", {
|
|
118
|
+
runId,
|
|
119
|
+
parentRunId,
|
|
120
|
+
name,
|
|
121
|
+
input: (0, exports.convertToLLMonitorMessages)(prompts),
|
|
122
|
+
extra: params,
|
|
123
|
+
userId,
|
|
124
|
+
userProps,
|
|
125
|
+
tags,
|
|
126
|
+
runtime: "langchain-js",
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
async handleChatModelStart(llm, messages, runId, parentRunId, extraParams, tags, metadata) {
|
|
130
|
+
const params = {
|
|
131
|
+
...(extraParams?.invocation_params || {}),
|
|
132
|
+
...(metadata || {}),
|
|
133
|
+
};
|
|
134
|
+
const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
|
|
135
|
+
const userId = params?.userId || undefined;
|
|
136
|
+
const userProps = params?.userProps || undefined;
|
|
137
|
+
await this.monitor.trackEvent("llm", "start", {
|
|
138
|
+
runId,
|
|
139
|
+
parentRunId,
|
|
140
|
+
name,
|
|
141
|
+
input: (0, exports.convertToLLMonitorMessages)(messages),
|
|
142
|
+
extra: params,
|
|
143
|
+
userId,
|
|
144
|
+
userProps,
|
|
145
|
+
tags,
|
|
146
|
+
runtime: "langchain-js",
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
async handleLLMEnd(output, runId) {
|
|
150
|
+
const { generations, llmOutput } = output;
|
|
151
|
+
await this.monitor.trackEvent("llm", "end", {
|
|
152
|
+
runId,
|
|
153
|
+
output: (0, exports.convertToLLMonitorMessages)(generations),
|
|
154
|
+
tokensUsage: {
|
|
155
|
+
completion: llmOutput?.tokenUsage?.completionTokens,
|
|
156
|
+
prompt: llmOutput?.tokenUsage?.promptTokens,
|
|
157
|
+
},
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
async handleLLMError(error, runId) {
|
|
161
|
+
await this.monitor.trackEvent("llm", "error", {
|
|
162
|
+
runId,
|
|
163
|
+
error,
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
async handleChainStart(chain, inputs, runId, parentRunId, tags, metadata) {
|
|
167
|
+
// allow the user to specify an agent name
|
|
168
|
+
const chainName = chain.id.at(-1);
|
|
169
|
+
const name = (metadata?.agentName ?? chainName);
|
|
170
|
+
// Attempt to automatically detect if this is an agent or chain
|
|
171
|
+
const runType = metadata?.agentName ||
|
|
172
|
+
["AgentExecutor", "PlanAndExecute"].includes(chainName)
|
|
173
|
+
? "agent"
|
|
174
|
+
: "chain";
|
|
175
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
176
|
+
const { agentName, ...rest } = metadata || {};
|
|
177
|
+
await this.monitor.trackEvent(runType, "start", {
|
|
178
|
+
runId,
|
|
179
|
+
parentRunId,
|
|
180
|
+
name,
|
|
181
|
+
input: parseInput(inputs),
|
|
182
|
+
extra: rest,
|
|
183
|
+
tags,
|
|
184
|
+
runtime: "langchain-js",
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
async handleChainEnd(outputs, runId) {
|
|
188
|
+
await this.monitor.trackEvent("chain", "end", {
|
|
189
|
+
runId,
|
|
190
|
+
output: parseOutput(outputs),
|
|
191
|
+
});
|
|
192
|
+
}
|
|
193
|
+
async handleChainError(error, runId) {
|
|
194
|
+
await this.monitor.trackEvent("chain", "error", {
|
|
195
|
+
runId,
|
|
196
|
+
error,
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
async handleToolStart(tool, input, runId, parentRunId, tags, metadata) {
|
|
200
|
+
await this.monitor.trackEvent("tool", "start", {
|
|
201
|
+
runId,
|
|
202
|
+
parentRunId,
|
|
203
|
+
name: tool.id[tool.id.length - 1],
|
|
204
|
+
input,
|
|
205
|
+
extra: metadata,
|
|
206
|
+
tags,
|
|
207
|
+
runtime: "langchain-js",
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
async handleToolEnd(output, runId) {
|
|
211
|
+
await this.monitor.trackEvent("tool", "end", {
|
|
212
|
+
runId,
|
|
213
|
+
output,
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
async handleToolError(error, runId) {
|
|
217
|
+
await this.monitor.trackEvent("tool", "error", {
|
|
218
|
+
runId,
|
|
219
|
+
error,
|
|
220
|
+
});
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
exports.LLMonitorHandler = LLMonitorHandler;
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import monitor from "llmonitor";
|
|
2
|
+
import { LLMonitorOptions, ChatMessage } from "llmonitor/types";
|
|
3
|
+
import { BaseRun, RunUpdate as BaseRunUpdate, KVMap } from "langsmith/schemas";
|
|
4
|
+
import { BaseMessage, ChainValues, Generation, LLMResult } from "../../schema/index.js";
|
|
5
|
+
import { Serialized } from "../../load/serializable.js";
|
|
6
|
+
import { BaseCallbackHandler, BaseCallbackHandlerInput } from "../base.js";
|
|
7
|
+
type Message = BaseMessage | Generation | string;
|
|
8
|
+
type OutputMessage = ChatMessage | string;
|
|
9
|
+
export declare const convertToLLMonitorMessages: (input: Message | Message[] | Message[][]) => OutputMessage | OutputMessage[] | OutputMessage[][];
|
|
10
|
+
export interface Run extends BaseRun {
|
|
11
|
+
id: string;
|
|
12
|
+
child_runs: this[];
|
|
13
|
+
child_execution_order: number;
|
|
14
|
+
}
|
|
15
|
+
export interface RunUpdate extends BaseRunUpdate {
|
|
16
|
+
events: BaseRun["events"];
|
|
17
|
+
}
|
|
18
|
+
export interface LLMonitorHandlerFields extends BaseCallbackHandlerInput, LLMonitorOptions {
|
|
19
|
+
}
|
|
20
|
+
export declare class LLMonitorHandler extends BaseCallbackHandler implements LLMonitorHandlerFields {
|
|
21
|
+
name: string;
|
|
22
|
+
monitor: typeof monitor;
|
|
23
|
+
constructor(fields?: LLMonitorHandlerFields);
|
|
24
|
+
handleLLMStart(llm: Serialized, prompts: string[], runId: string, parentRunId?: string, extraParams?: KVMap, tags?: string[], metadata?: KVMap): Promise<void>;
|
|
25
|
+
handleChatModelStart(llm: Serialized, messages: BaseMessage[][], runId: string, parentRunId?: string, extraParams?: KVMap, tags?: string[], metadata?: KVMap): Promise<void>;
|
|
26
|
+
handleLLMEnd(output: LLMResult, runId: string): Promise<void>;
|
|
27
|
+
handleLLMError(error: Error, runId: string): Promise<void>;
|
|
28
|
+
handleChainStart(chain: Serialized, inputs: ChainValues, runId: string, parentRunId?: string, tags?: string[], metadata?: KVMap): Promise<void>;
|
|
29
|
+
handleChainEnd(outputs: ChainValues, runId: string): Promise<void>;
|
|
30
|
+
handleChainError(error: Error, runId: string): Promise<void>;
|
|
31
|
+
handleToolStart(tool: Serialized, input: string, runId: string, parentRunId?: string, tags?: string[], metadata?: KVMap): Promise<void>;
|
|
32
|
+
handleToolEnd(output: string, runId: string): Promise<void>;
|
|
33
|
+
handleToolError(error: Error, runId: string): Promise<void>;
|
|
34
|
+
}
|
|
35
|
+
export {};
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
import monitor from "llmonitor";
|
|
2
|
+
import { getEnvironmentVariable } from "../../util/env.js";
|
|
3
|
+
import { BaseCallbackHandler } from "../base.js";
|
|
4
|
+
// Langchain Helpers
|
|
5
|
+
// Input can be either a single message, an array of message, or an array of array of messages (batch requests)
|
|
6
|
+
const parseRole = (id) => {
|
|
7
|
+
const roleHint = id[id.length - 1];
|
|
8
|
+
if (roleHint.includes("Human"))
|
|
9
|
+
return "user";
|
|
10
|
+
if (roleHint.includes("System"))
|
|
11
|
+
return "system";
|
|
12
|
+
if (roleHint.includes("AI"))
|
|
13
|
+
return "ai";
|
|
14
|
+
if (roleHint.includes("Function"))
|
|
15
|
+
return "function";
|
|
16
|
+
return "ai";
|
|
17
|
+
};
|
|
18
|
+
export const convertToLLMonitorMessages = (input) => {
|
|
19
|
+
const parseMessage = (raw) => {
|
|
20
|
+
if (typeof raw === "string")
|
|
21
|
+
return raw;
|
|
22
|
+
// sometimes the message is nested in a "message" property
|
|
23
|
+
if ("message" in raw)
|
|
24
|
+
return parseMessage(raw.message);
|
|
25
|
+
// Serialize
|
|
26
|
+
const message = JSON.parse(JSON.stringify(raw));
|
|
27
|
+
try {
|
|
28
|
+
// "id" contains an array describing the constructor, with last item actual schema type
|
|
29
|
+
const role = parseRole(message.id);
|
|
30
|
+
const obj = message.kwargs;
|
|
31
|
+
const text = message.text ?? obj.content;
|
|
32
|
+
const functionCall = obj.additional_kwargs?.function_call;
|
|
33
|
+
return {
|
|
34
|
+
role,
|
|
35
|
+
text,
|
|
36
|
+
functionCall,
|
|
37
|
+
};
|
|
38
|
+
}
|
|
39
|
+
catch (e) {
|
|
40
|
+
// if parsing fails, return the original message
|
|
41
|
+
return message.text ?? message;
|
|
42
|
+
}
|
|
43
|
+
};
|
|
44
|
+
if (Array.isArray(input)) {
|
|
45
|
+
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
|
46
|
+
// @ts-ignore Confuses the compiler
|
|
47
|
+
return input.length === 1
|
|
48
|
+
? convertToLLMonitorMessages(input[0])
|
|
49
|
+
: input.map(convertToLLMonitorMessages);
|
|
50
|
+
}
|
|
51
|
+
return parseMessage(input);
|
|
52
|
+
};
|
|
53
|
+
const parseInput = (rawInput) => {
|
|
54
|
+
if (!rawInput)
|
|
55
|
+
return null;
|
|
56
|
+
const { input, inputs, question } = rawInput;
|
|
57
|
+
if (input)
|
|
58
|
+
return input;
|
|
59
|
+
if (inputs)
|
|
60
|
+
return inputs;
|
|
61
|
+
if (question)
|
|
62
|
+
return question;
|
|
63
|
+
return rawInput;
|
|
64
|
+
};
|
|
65
|
+
const parseOutput = (rawOutput) => {
|
|
66
|
+
if (!rawOutput)
|
|
67
|
+
return null;
|
|
68
|
+
const { text, output, answer } = rawOutput;
|
|
69
|
+
if (text)
|
|
70
|
+
return text;
|
|
71
|
+
if (answer)
|
|
72
|
+
return answer;
|
|
73
|
+
if (output)
|
|
74
|
+
return output;
|
|
75
|
+
return rawOutput;
|
|
76
|
+
};
|
|
77
|
+
export class LLMonitorHandler extends BaseCallbackHandler {
|
|
78
|
+
constructor(fields = {}) {
|
|
79
|
+
super(fields);
|
|
80
|
+
Object.defineProperty(this, "name", {
|
|
81
|
+
enumerable: true,
|
|
82
|
+
configurable: true,
|
|
83
|
+
writable: true,
|
|
84
|
+
value: "llmonitor_handler"
|
|
85
|
+
});
|
|
86
|
+
Object.defineProperty(this, "monitor", {
|
|
87
|
+
enumerable: true,
|
|
88
|
+
configurable: true,
|
|
89
|
+
writable: true,
|
|
90
|
+
value: void 0
|
|
91
|
+
});
|
|
92
|
+
this.monitor = monitor;
|
|
93
|
+
if (fields) {
|
|
94
|
+
const { appId, apiUrl, verbose } = fields;
|
|
95
|
+
this.monitor.init({
|
|
96
|
+
verbose,
|
|
97
|
+
appId: appId ?? getEnvironmentVariable("LLMONITOR_APP_ID"),
|
|
98
|
+
apiUrl: apiUrl ?? getEnvironmentVariable("LLMONITOR_API_URL"),
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
async handleLLMStart(llm, prompts, runId, parentRunId, extraParams, tags, metadata) {
|
|
103
|
+
const params = {
|
|
104
|
+
...(extraParams?.invocation_params || {}),
|
|
105
|
+
...(metadata || {}),
|
|
106
|
+
};
|
|
107
|
+
const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
|
|
108
|
+
const userId = params?.userId || undefined;
|
|
109
|
+
const userProps = params?.userProps || undefined;
|
|
110
|
+
await this.monitor.trackEvent("llm", "start", {
|
|
111
|
+
runId,
|
|
112
|
+
parentRunId,
|
|
113
|
+
name,
|
|
114
|
+
input: convertToLLMonitorMessages(prompts),
|
|
115
|
+
extra: params,
|
|
116
|
+
userId,
|
|
117
|
+
userProps,
|
|
118
|
+
tags,
|
|
119
|
+
runtime: "langchain-js",
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
async handleChatModelStart(llm, messages, runId, parentRunId, extraParams, tags, metadata) {
|
|
123
|
+
const params = {
|
|
124
|
+
...(extraParams?.invocation_params || {}),
|
|
125
|
+
...(metadata || {}),
|
|
126
|
+
};
|
|
127
|
+
const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
|
|
128
|
+
const userId = params?.userId || undefined;
|
|
129
|
+
const userProps = params?.userProps || undefined;
|
|
130
|
+
await this.monitor.trackEvent("llm", "start", {
|
|
131
|
+
runId,
|
|
132
|
+
parentRunId,
|
|
133
|
+
name,
|
|
134
|
+
input: convertToLLMonitorMessages(messages),
|
|
135
|
+
extra: params,
|
|
136
|
+
userId,
|
|
137
|
+
userProps,
|
|
138
|
+
tags,
|
|
139
|
+
runtime: "langchain-js",
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
async handleLLMEnd(output, runId) {
|
|
143
|
+
const { generations, llmOutput } = output;
|
|
144
|
+
await this.monitor.trackEvent("llm", "end", {
|
|
145
|
+
runId,
|
|
146
|
+
output: convertToLLMonitorMessages(generations),
|
|
147
|
+
tokensUsage: {
|
|
148
|
+
completion: llmOutput?.tokenUsage?.completionTokens,
|
|
149
|
+
prompt: llmOutput?.tokenUsage?.promptTokens,
|
|
150
|
+
},
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
async handleLLMError(error, runId) {
|
|
154
|
+
await this.monitor.trackEvent("llm", "error", {
|
|
155
|
+
runId,
|
|
156
|
+
error,
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
async handleChainStart(chain, inputs, runId, parentRunId, tags, metadata) {
|
|
160
|
+
// allow the user to specify an agent name
|
|
161
|
+
const chainName = chain.id.at(-1);
|
|
162
|
+
const name = (metadata?.agentName ?? chainName);
|
|
163
|
+
// Attempt to automatically detect if this is an agent or chain
|
|
164
|
+
const runType = metadata?.agentName ||
|
|
165
|
+
["AgentExecutor", "PlanAndExecute"].includes(chainName)
|
|
166
|
+
? "agent"
|
|
167
|
+
: "chain";
|
|
168
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
169
|
+
const { agentName, ...rest } = metadata || {};
|
|
170
|
+
await this.monitor.trackEvent(runType, "start", {
|
|
171
|
+
runId,
|
|
172
|
+
parentRunId,
|
|
173
|
+
name,
|
|
174
|
+
input: parseInput(inputs),
|
|
175
|
+
extra: rest,
|
|
176
|
+
tags,
|
|
177
|
+
runtime: "langchain-js",
|
|
178
|
+
});
|
|
179
|
+
}
|
|
180
|
+
async handleChainEnd(outputs, runId) {
|
|
181
|
+
await this.monitor.trackEvent("chain", "end", {
|
|
182
|
+
runId,
|
|
183
|
+
output: parseOutput(outputs),
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
async handleChainError(error, runId) {
|
|
187
|
+
await this.monitor.trackEvent("chain", "error", {
|
|
188
|
+
runId,
|
|
189
|
+
error,
|
|
190
|
+
});
|
|
191
|
+
}
|
|
192
|
+
async handleToolStart(tool, input, runId, parentRunId, tags, metadata) {
|
|
193
|
+
await this.monitor.trackEvent("tool", "start", {
|
|
194
|
+
runId,
|
|
195
|
+
parentRunId,
|
|
196
|
+
name: tool.id[tool.id.length - 1],
|
|
197
|
+
input,
|
|
198
|
+
extra: metadata,
|
|
199
|
+
tags,
|
|
200
|
+
runtime: "langchain-js",
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
async handleToolEnd(output, runId) {
|
|
204
|
+
await this.monitor.trackEvent("tool", "end", {
|
|
205
|
+
runId,
|
|
206
|
+
output,
|
|
207
|
+
});
|
|
208
|
+
}
|
|
209
|
+
async handleToolError(error, runId) {
|
|
210
|
+
await this.monitor.trackEvent("tool", "error", {
|
|
211
|
+
runId,
|
|
212
|
+
error,
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
}
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { z } from "zod";
|
|
2
|
-
import { ChatOpenAI } from "../../chat_models/openai.js";
|
|
3
2
|
import { FunctionParameters } from "../../output_parsers/openai_functions.js";
|
|
4
3
|
import { LLMChain } from "../llm_chain.js";
|
|
5
|
-
import {
|
|
4
|
+
import { BaseChatModel } from "../../chat_models/index.js";
|
|
5
|
+
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
6
6
|
/**
|
|
7
7
|
* Function that creates an extraction chain using the provided JSON schema.
|
|
8
8
|
* It sets up the necessary components, such as the prompt, output parser, and tags.
|
|
@@ -10,7 +10,7 @@ import { AnthropicFunctions } from "../../experimental/chat_models/anthropic_fun
|
|
|
10
10
|
* @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
|
|
11
11
|
* @returns A LLMChain instance configured to return data matching the schema.
|
|
12
12
|
*/
|
|
13
|
-
export declare function createExtractionChain(schema: FunctionParameters, llm:
|
|
13
|
+
export declare function createExtractionChain(schema: FunctionParameters, llm: BaseChatModel<BaseFunctionCallOptions>): LLMChain<object, BaseChatModel<BaseFunctionCallOptions>>;
|
|
14
14
|
/**
|
|
15
15
|
* Function that creates an extraction chain from a Zod schema. It
|
|
16
16
|
* converts the Zod schema to a JSON schema using zod-to-json-schema
|
|
@@ -19,4 +19,4 @@ export declare function createExtractionChain(schema: FunctionParameters, llm: C
|
|
|
19
19
|
* @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
|
|
20
20
|
* @returns A LLMChain instance configured to return data matching the schema.
|
|
21
21
|
*/
|
|
22
|
-
export declare function createExtractionChainFromZod(schema: z.ZodObject<any, any, any, any>, llm:
|
|
22
|
+
export declare function createExtractionChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: BaseChatModel<BaseFunctionCallOptions>): LLMChain<object, BaseChatModel<BaseFunctionCallOptions>>;
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
import type { OpenAPIV3_1 } from "openapi-types";
|
|
2
2
|
import { BaseChain } from "../base.js";
|
|
3
3
|
import { LLMChainInput } from "../llm_chain.js";
|
|
4
|
-
import { ChatOpenAI } from "../../chat_models/openai.js";
|
|
5
4
|
import { BasePromptTemplate } from "../../prompts/base.js";
|
|
6
5
|
import { SequentialChain } from "../sequential_chain.js";
|
|
7
|
-
import {
|
|
6
|
+
import { BaseChatModel } from "../../chat_models/index.js";
|
|
7
|
+
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
8
8
|
/**
|
|
9
9
|
* Type representing the options for creating an OpenAPI chain.
|
|
10
10
|
*/
|
|
11
11
|
export type OpenAPIChainOptions = {
|
|
12
|
-
llm?:
|
|
12
|
+
llm?: BaseChatModel<BaseFunctionCallOptions>;
|
|
13
13
|
prompt?: BasePromptTemplate;
|
|
14
14
|
requestChain?: BaseChain;
|
|
15
15
|
llmChainInputs?: LLMChainInput;
|
|
@@ -7,7 +7,8 @@ import { BasePromptTemplate } from "../../prompts/index.js";
|
|
|
7
7
|
import { BaseLLMOutputParser } from "../../schema/output_parser.js";
|
|
8
8
|
import { OutputFunctionsParser } from "../../output_parsers/openai_functions.js";
|
|
9
9
|
import { ChatGeneration } from "../../schema/index.js";
|
|
10
|
-
import {
|
|
10
|
+
import { BaseChatModel } from "../../chat_models/index.js";
|
|
11
|
+
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
11
12
|
/**
|
|
12
13
|
* Type representing the input for creating a structured output chain. It
|
|
13
14
|
* extends the LLMChainInput type and includes an additional
|
|
@@ -17,7 +18,7 @@ import { AnthropicFunctions } from "../../experimental/chat_models/anthropic_fun
|
|
|
17
18
|
export type StructuredOutputChainInput = Omit<LLMChainInput, "outputParser" | "llm"> & {
|
|
18
19
|
outputSchema: JsonSchema7Type;
|
|
19
20
|
prompt: BasePromptTemplate;
|
|
20
|
-
llm?:
|
|
21
|
+
llm?: BaseChatModel<BaseFunctionCallOptions>;
|
|
21
22
|
};
|
|
22
23
|
/**
|
|
23
24
|
* Class that extends the BaseLLMOutputParser class. It provides
|
|
@@ -45,5 +46,5 @@ export declare class FunctionCallStructuredOutputParser<T extends z.AnyZodObject
|
|
|
45
46
|
* as well as an additional required "outputSchema" JSON Schema object.
|
|
46
47
|
* @returns OpenAPIChain
|
|
47
48
|
*/
|
|
48
|
-
export declare function createStructuredOutputChain<T extends z.AnyZodObject = z.AnyZodObject>(input: StructuredOutputChainInput): LLMChain<any, ChatOpenAI |
|
|
49
|
-
export declare function createStructuredOutputChainFromZod<T extends z.AnyZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput, "outputSchema">): LLMChain<any, ChatOpenAI |
|
|
49
|
+
export declare function createStructuredOutputChain<T extends z.AnyZodObject = z.AnyZodObject>(input: StructuredOutputChainInput): LLMChain<any, ChatOpenAI | BaseChatModel<BaseFunctionCallOptions>>;
|
|
50
|
+
export declare function createStructuredOutputChainFromZod<T extends z.AnyZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput, "outputSchema">): LLMChain<any, ChatOpenAI | BaseChatModel<BaseFunctionCallOptions>>;
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import { z } from "zod";
|
|
2
|
-
import { ChatOpenAI } from "../../chat_models/openai.js";
|
|
3
2
|
import { PromptTemplate } from "../../prompts/prompt.js";
|
|
4
3
|
import { FunctionParameters } from "../../output_parsers/openai_functions.js";
|
|
5
4
|
import { LLMChain, LLMChainInput } from "../llm_chain.js";
|
|
6
|
-
import {
|
|
5
|
+
import { BaseChatModel } from "../../chat_models/index.js";
|
|
6
|
+
import { BaseFunctionCallOptions } from "../../base_language/index.js";
|
|
7
7
|
/**
|
|
8
8
|
* Type representing the options for creating a tagging chain.
|
|
9
9
|
*/
|
|
@@ -19,7 +19,7 @@ export type TaggingChainOptions = {
|
|
|
19
19
|
* @param options Options for creating the tagging chain.
|
|
20
20
|
* @returns A new instance of LLMChain configured for tagging.
|
|
21
21
|
*/
|
|
22
|
-
export declare function createTaggingChain(schema: FunctionParameters, llm:
|
|
22
|
+
export declare function createTaggingChain(schema: FunctionParameters, llm: BaseChatModel<BaseFunctionCallOptions>, options?: TaggingChainOptions): LLMChain<object, BaseChatModel<BaseFunctionCallOptions>>;
|
|
23
23
|
/**
|
|
24
24
|
* Function that creates a tagging chain from a Zod schema. It converts
|
|
25
25
|
* the Zod schema to a JSON schema using the zodToJsonSchema function and
|
|
@@ -29,4 +29,4 @@ export declare function createTaggingChain(schema: FunctionParameters, llm: Chat
|
|
|
29
29
|
* @param options Options for creating the tagging chain.
|
|
30
30
|
* @returns A new instance of LLMChain configured for tagging.
|
|
31
31
|
*/
|
|
32
|
-
export declare function createTaggingChainFromZod(schema: z.ZodObject<any, any, any, any>, llm:
|
|
32
|
+
export declare function createTaggingChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: BaseChatModel<BaseFunctionCallOptions>, options?: TaggingChainOptions): LLMChain<object, BaseChatModel<BaseFunctionCallOptions>>;
|