@langchain/core 0.1.7 → 0.1.9-rc.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/language_models/base.d.ts +21 -0
- package/dist/language_models/chat_models.cjs +8 -0
- package/dist/language_models/chat_models.d.ts +8 -0
- package/dist/language_models/chat_models.js +8 -0
- package/dist/language_models/llms.cjs +6 -1
- package/dist/language_models/llms.d.ts +6 -1
- package/dist/language_models/llms.js +6 -1
- package/dist/load/import_map.cjs +2 -1
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/runnables/branch.cjs +1 -1
- package/dist/runnables/branch.d.ts +1 -1
- package/dist/runnables/branch.js +1 -1
- package/dist/runnables/history.cjs +64 -0
- package/dist/runnables/history.d.ts +64 -0
- package/dist/runnables/history.js +64 -0
- package/dist/runnables/passthrough.cjs +1 -3
- package/dist/runnables/passthrough.d.ts +1 -1
- package/dist/runnables/passthrough.js +1 -3
- package/dist/tracers/initialize.cjs +22 -1
- package/dist/tracers/initialize.d.ts +11 -0
- package/dist/tracers/initialize.js +20 -0
- package/dist/tracers/tracer_langchain_v1.cjs +200 -0
- package/dist/tracers/tracer_langchain_v1.d.ts +59 -0
- package/dist/tracers/tracer_langchain_v1.js +196 -0
- package/package.json +9 -1
- package/tracers/tracer_langchain_v1.cjs +1 -0
- package/tracers/tracer_langchain_v1.d.ts +1 -0
- package/tracers/tracer_langchain_v1.js +1 -0
|
@@ -105,7 +105,13 @@ export interface BaseLanguageModelInterface<RunOutput = any, CallOptions extends
|
|
|
105
105
|
CallOptions: CallOptions;
|
|
106
106
|
get callKeys(): string[];
|
|
107
107
|
generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
|
|
108
|
+
/**
|
|
109
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
110
|
+
*/
|
|
108
111
|
predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
|
|
112
|
+
/**
|
|
113
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
114
|
+
*/
|
|
109
115
|
predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
|
|
110
116
|
_modelType(): string;
|
|
111
117
|
_llmType(): string;
|
|
@@ -135,7 +141,13 @@ export declare abstract class BaseLanguageModel<RunOutput = any, CallOptions ext
|
|
|
135
141
|
cache?: BaseCache;
|
|
136
142
|
constructor({ callbacks, callbackManager, ...params }: BaseLanguageModelParams);
|
|
137
143
|
abstract generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
|
|
144
|
+
/**
|
|
145
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
146
|
+
*/
|
|
138
147
|
abstract predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
|
|
148
|
+
/**
|
|
149
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
150
|
+
*/
|
|
139
151
|
abstract predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
|
|
140
152
|
abstract _modelType(): string;
|
|
141
153
|
abstract _llmType(): string;
|
|
@@ -163,4 +175,13 @@ export declare abstract class BaseLanguageModel<RunOutput = any, CallOptions ext
|
|
|
163
175
|
*/
|
|
164
176
|
static deserialize(_data: SerializedLLM): Promise<BaseLanguageModel>;
|
|
165
177
|
}
|
|
178
|
+
/**
|
|
179
|
+
* Shared interface for token usage
|
|
180
|
+
* return type from LLM calls.
|
|
181
|
+
*/
|
|
182
|
+
export interface TokenUsage {
|
|
183
|
+
completionTokens?: number;
|
|
184
|
+
promptTokens?: number;
|
|
185
|
+
totalTokens?: number;
|
|
186
|
+
}
|
|
166
187
|
export {};
|
|
@@ -286,6 +286,8 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
|
|
|
286
286
|
return this.generate(promptMessages, options, callbacks);
|
|
287
287
|
}
|
|
288
288
|
/**
|
|
289
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
290
|
+
*
|
|
289
291
|
* Makes a single call to the chat model.
|
|
290
292
|
* @param messages An array of BaseMessage instances.
|
|
291
293
|
* @param options The call options or an array of stop sequences.
|
|
@@ -298,6 +300,8 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
|
|
|
298
300
|
return generations[0][0].message;
|
|
299
301
|
}
|
|
300
302
|
/**
|
|
303
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
304
|
+
*
|
|
301
305
|
* Makes a single call to the chat model with a prompt value.
|
|
302
306
|
* @param promptValue The value of the prompt.
|
|
303
307
|
* @param options The call options or an array of stop sequences.
|
|
@@ -309,6 +313,8 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
|
|
|
309
313
|
return this.call(promptMessages, options, callbacks);
|
|
310
314
|
}
|
|
311
315
|
/**
|
|
316
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
317
|
+
*
|
|
312
318
|
* Predicts the next message based on the input messages.
|
|
313
319
|
* @param messages An array of BaseMessage instances.
|
|
314
320
|
* @param options The call options or an array of stop sequences.
|
|
@@ -319,6 +325,8 @@ class BaseChatModel extends base_js_1.BaseLanguageModel {
|
|
|
319
325
|
return this.call(messages, options, callbacks);
|
|
320
326
|
}
|
|
321
327
|
/**
|
|
328
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
329
|
+
*
|
|
322
330
|
* Predicts the next message based on a text input.
|
|
323
331
|
* @param text The text input.
|
|
324
332
|
* @param options The call options or an array of stop sequences.
|
|
@@ -93,6 +93,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
|
|
|
93
93
|
generatePrompt(promptValues: BasePromptValueInterface[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
|
|
94
94
|
abstract _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
|
|
95
95
|
/**
|
|
96
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
97
|
+
*
|
|
96
98
|
* Makes a single call to the chat model.
|
|
97
99
|
* @param messages An array of BaseMessage instances.
|
|
98
100
|
* @param options The call options or an array of stop sequences.
|
|
@@ -101,6 +103,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
|
|
|
101
103
|
*/
|
|
102
104
|
call(messages: BaseMessageLike[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
|
|
103
105
|
/**
|
|
106
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
107
|
+
*
|
|
104
108
|
* Makes a single call to the chat model with a prompt value.
|
|
105
109
|
* @param promptValue The value of the prompt.
|
|
106
110
|
* @param options The call options or an array of stop sequences.
|
|
@@ -109,6 +113,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
|
|
|
109
113
|
*/
|
|
110
114
|
callPrompt(promptValue: BasePromptValueInterface, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
|
|
111
115
|
/**
|
|
116
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
117
|
+
*
|
|
112
118
|
* Predicts the next message based on the input messages.
|
|
113
119
|
* @param messages An array of BaseMessage instances.
|
|
114
120
|
* @param options The call options or an array of stop sequences.
|
|
@@ -117,6 +123,8 @@ export declare abstract class BaseChatModel<CallOptions extends BaseChatModelCal
|
|
|
117
123
|
*/
|
|
118
124
|
predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
|
|
119
125
|
/**
|
|
126
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
127
|
+
*
|
|
120
128
|
* Predicts the next message based on a text input.
|
|
121
129
|
* @param text The text input.
|
|
122
130
|
* @param options The call options or an array of stop sequences.
|
|
@@ -282,6 +282,8 @@ export class BaseChatModel extends BaseLanguageModel {
|
|
|
282
282
|
return this.generate(promptMessages, options, callbacks);
|
|
283
283
|
}
|
|
284
284
|
/**
|
|
285
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
286
|
+
*
|
|
285
287
|
* Makes a single call to the chat model.
|
|
286
288
|
* @param messages An array of BaseMessage instances.
|
|
287
289
|
* @param options The call options or an array of stop sequences.
|
|
@@ -294,6 +296,8 @@ export class BaseChatModel extends BaseLanguageModel {
|
|
|
294
296
|
return generations[0][0].message;
|
|
295
297
|
}
|
|
296
298
|
/**
|
|
299
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
300
|
+
*
|
|
297
301
|
* Makes a single call to the chat model with a prompt value.
|
|
298
302
|
* @param promptValue The value of the prompt.
|
|
299
303
|
* @param options The call options or an array of stop sequences.
|
|
@@ -305,6 +309,8 @@ export class BaseChatModel extends BaseLanguageModel {
|
|
|
305
309
|
return this.call(promptMessages, options, callbacks);
|
|
306
310
|
}
|
|
307
311
|
/**
|
|
312
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
313
|
+
*
|
|
308
314
|
* Predicts the next message based on the input messages.
|
|
309
315
|
* @param messages An array of BaseMessage instances.
|
|
310
316
|
* @param options The call options or an array of stop sequences.
|
|
@@ -315,6 +321,8 @@ export class BaseChatModel extends BaseLanguageModel {
|
|
|
315
321
|
return this.call(messages, options, callbacks);
|
|
316
322
|
}
|
|
317
323
|
/**
|
|
324
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
325
|
+
*
|
|
318
326
|
* Predicts the next message based on a text input.
|
|
319
327
|
* @param text The text input.
|
|
320
328
|
* @param options The call options or an array of stop sequences.
|
|
@@ -6,7 +6,7 @@ const outputs_js_1 = require("../outputs.cjs");
|
|
|
6
6
|
const manager_js_1 = require("../callbacks/manager.cjs");
|
|
7
7
|
const base_js_1 = require("./base.cjs");
|
|
8
8
|
/**
|
|
9
|
-
* LLM Wrapper.
|
|
9
|
+
* LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
|
|
10
10
|
*/
|
|
11
11
|
class BaseLLM extends base_js_1.BaseLanguageModel {
|
|
12
12
|
constructor({ concurrency, ...rest }) {
|
|
@@ -252,6 +252,7 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
|
|
|
252
252
|
return { generations, llmOutput };
|
|
253
253
|
}
|
|
254
254
|
/**
|
|
255
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
255
256
|
* Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
|
|
256
257
|
*/
|
|
257
258
|
async call(prompt, options, callbacks) {
|
|
@@ -259,6 +260,8 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
|
|
|
259
260
|
return generations[0][0].text;
|
|
260
261
|
}
|
|
261
262
|
/**
|
|
263
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
264
|
+
*
|
|
262
265
|
* This method is similar to `call`, but it's used for making predictions
|
|
263
266
|
* based on the input text.
|
|
264
267
|
* @param text Input text for the prediction.
|
|
@@ -270,6 +273,8 @@ class BaseLLM extends base_js_1.BaseLanguageModel {
|
|
|
270
273
|
return this.call(text, options, callbacks);
|
|
271
274
|
}
|
|
272
275
|
/**
|
|
276
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
277
|
+
*
|
|
273
278
|
* This method takes a list of messages, options, and callbacks, and
|
|
274
279
|
* returns a predicted message.
|
|
275
280
|
* @param messages A list of messages for the prediction.
|
|
@@ -25,7 +25,7 @@ interface LLMGenerateCachedParameters<T extends BaseLLM<CallOptions>, CallOption
|
|
|
25
25
|
handledOptions: RunnableConfig;
|
|
26
26
|
}
|
|
27
27
|
/**
|
|
28
|
-
* LLM Wrapper.
|
|
28
|
+
* LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
|
|
29
29
|
*/
|
|
30
30
|
export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = BaseLLMCallOptions> extends BaseLanguageModel<string, CallOptions> {
|
|
31
31
|
ParsedCallOptions: Omit<CallOptions, keyof RunnableConfig & "timeout">;
|
|
@@ -71,10 +71,13 @@ export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = B
|
|
|
71
71
|
*/
|
|
72
72
|
generate(prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
|
|
73
73
|
/**
|
|
74
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
74
75
|
* Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
|
|
75
76
|
*/
|
|
76
77
|
call(prompt: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
|
|
77
78
|
/**
|
|
79
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
80
|
+
*
|
|
78
81
|
* This method is similar to `call`, but it's used for making predictions
|
|
79
82
|
* based on the input text.
|
|
80
83
|
* @param text Input text for the prediction.
|
|
@@ -84,6 +87,8 @@ export declare abstract class BaseLLM<CallOptions extends BaseLLMCallOptions = B
|
|
|
84
87
|
*/
|
|
85
88
|
predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
|
|
86
89
|
/**
|
|
90
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
91
|
+
*
|
|
87
92
|
* This method takes a list of messages, options, and callbacks, and
|
|
88
93
|
* returns a predicted message.
|
|
89
94
|
* @param messages A list of messages for the prediction.
|
|
@@ -3,7 +3,7 @@ import { RUN_KEY, GenerationChunk, } from "../outputs.js";
|
|
|
3
3
|
import { CallbackManager, } from "../callbacks/manager.js";
|
|
4
4
|
import { BaseLanguageModel, } from "./base.js";
|
|
5
5
|
/**
|
|
6
|
-
* LLM Wrapper.
|
|
6
|
+
* LLM Wrapper. Takes in a prompt (or prompts) and returns a string.
|
|
7
7
|
*/
|
|
8
8
|
export class BaseLLM extends BaseLanguageModel {
|
|
9
9
|
constructor({ concurrency, ...rest }) {
|
|
@@ -249,6 +249,7 @@ export class BaseLLM extends BaseLanguageModel {
|
|
|
249
249
|
return { generations, llmOutput };
|
|
250
250
|
}
|
|
251
251
|
/**
|
|
252
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
252
253
|
* Convenience wrapper for {@link generate} that takes in a single string prompt and returns a single string output.
|
|
253
254
|
*/
|
|
254
255
|
async call(prompt, options, callbacks) {
|
|
@@ -256,6 +257,8 @@ export class BaseLLM extends BaseLanguageModel {
|
|
|
256
257
|
return generations[0][0].text;
|
|
257
258
|
}
|
|
258
259
|
/**
|
|
260
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
261
|
+
*
|
|
259
262
|
* This method is similar to `call`, but it's used for making predictions
|
|
260
263
|
* based on the input text.
|
|
261
264
|
* @param text Input text for the prediction.
|
|
@@ -267,6 +270,8 @@ export class BaseLLM extends BaseLanguageModel {
|
|
|
267
270
|
return this.call(text, options, callbacks);
|
|
268
271
|
}
|
|
269
272
|
/**
|
|
273
|
+
* @deprecated Use .invoke() instead. Will be removed in 0.2.0.
|
|
274
|
+
*
|
|
270
275
|
* This method takes a list of messages, options, and callbacks, and
|
|
271
276
|
* returns a predicted message.
|
|
272
277
|
* @param messages A list of messages for the prediction.
|
package/dist/load/import_map.cjs
CHANGED
|
@@ -24,7 +24,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
24
24
|
return result;
|
|
25
25
|
};
|
|
26
26
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
27
|
-
exports.vectorstores = exports.utils__types = exports.utils__tiktoken = exports.utils__testing = exports.utils__stream = exports.utils__math = exports.utils__json_schema = exports.utils__json_patch = exports.utils__hash = exports.utils__env = exports.utils__chunk_array = exports.utils__async_caller = exports.tracers__tracer_langchain = exports.tracers__run_collector = exports.tracers__log_stream = exports.tracers__initialize = exports.tracers__console = exports.tracers__base = exports.tools = exports.stores = exports.retrievers = exports.runnables = exports.prompt_values = exports.prompts = exports.outputs = exports.output_parsers = exports.messages = exports.memory = exports.load__serializable = exports.language_models__llms = exports.language_models__chat_models = exports.language_models__base = exports.example_selectors = exports.embeddings = exports.documents = exports.chat_history = exports.callbacks__promises = exports.callbacks__manager = exports.callbacks__base = exports.caches = exports.agents = void 0;
|
|
27
|
+
exports.vectorstores = exports.utils__types = exports.utils__tiktoken = exports.utils__testing = exports.utils__stream = exports.utils__math = exports.utils__json_schema = exports.utils__json_patch = exports.utils__hash = exports.utils__env = exports.utils__chunk_array = exports.utils__async_caller = exports.tracers__tracer_langchain_v1 = exports.tracers__tracer_langchain = exports.tracers__run_collector = exports.tracers__log_stream = exports.tracers__initialize = exports.tracers__console = exports.tracers__base = exports.tools = exports.stores = exports.retrievers = exports.runnables = exports.prompt_values = exports.prompts = exports.outputs = exports.output_parsers = exports.messages = exports.memory = exports.load__serializable = exports.language_models__llms = exports.language_models__chat_models = exports.language_models__base = exports.example_selectors = exports.embeddings = exports.documents = exports.chat_history = exports.callbacks__promises = exports.callbacks__manager = exports.callbacks__base = exports.caches = exports.agents = void 0;
|
|
28
28
|
exports.agents = __importStar(require("../agents.cjs"));
|
|
29
29
|
exports.caches = __importStar(require("../caches.cjs"));
|
|
30
30
|
exports.callbacks__base = __importStar(require("../callbacks/base.cjs"));
|
|
@@ -54,6 +54,7 @@ exports.tracers__initialize = __importStar(require("../tracers/initialize.cjs"))
|
|
|
54
54
|
exports.tracers__log_stream = __importStar(require("../tracers/log_stream.cjs"));
|
|
55
55
|
exports.tracers__run_collector = __importStar(require("../tracers/run_collector.cjs"));
|
|
56
56
|
exports.tracers__tracer_langchain = __importStar(require("../tracers/tracer_langchain.cjs"));
|
|
57
|
+
exports.tracers__tracer_langchain_v1 = __importStar(require("../tracers/tracer_langchain_v1.cjs"));
|
|
57
58
|
exports.utils__async_caller = __importStar(require("../utils/async_caller.cjs"));
|
|
58
59
|
exports.utils__chunk_array = __importStar(require("../utils/chunk_array.cjs"));
|
|
59
60
|
exports.utils__env = __importStar(require("../utils/env.cjs"));
|
|
@@ -27,6 +27,7 @@ export * as tracers__initialize from "../tracers/initialize.js";
|
|
|
27
27
|
export * as tracers__log_stream from "../tracers/log_stream.js";
|
|
28
28
|
export * as tracers__run_collector from "../tracers/run_collector.js";
|
|
29
29
|
export * as tracers__tracer_langchain from "../tracers/tracer_langchain.js";
|
|
30
|
+
export * as tracers__tracer_langchain_v1 from "../tracers/tracer_langchain_v1.js";
|
|
30
31
|
export * as utils__async_caller from "../utils/async_caller.js";
|
|
31
32
|
export * as utils__chunk_array from "../utils/chunk_array.js";
|
|
32
33
|
export * as utils__env from "../utils/env.js";
|
package/dist/load/import_map.js
CHANGED
|
@@ -28,6 +28,7 @@ export * as tracers__initialize from "../tracers/initialize.js";
|
|
|
28
28
|
export * as tracers__log_stream from "../tracers/log_stream.js";
|
|
29
29
|
export * as tracers__run_collector from "../tracers/run_collector.js";
|
|
30
30
|
export * as tracers__tracer_langchain from "../tracers/tracer_langchain.js";
|
|
31
|
+
export * as tracers__tracer_langchain_v1 from "../tracers/tracer_langchain_v1.js";
|
|
31
32
|
export * as utils__async_caller from "../utils/async_caller.js";
|
|
32
33
|
export * as utils__chunk_array from "../utils/chunk_array.js";
|
|
33
34
|
export * as utils__env from "../utils/env.js";
|
|
@@ -84,7 +84,7 @@ class RunnableBranch extends base_js_1.Runnable {
|
|
|
84
84
|
*
|
|
85
85
|
* @example
|
|
86
86
|
* ```ts
|
|
87
|
-
* import { RunnableBranch } from "langchain/
|
|
87
|
+
* import { RunnableBranch } from "@langchain/core/runnables";
|
|
88
88
|
*
|
|
89
89
|
* const branch = RunnableBranch.from([
|
|
90
90
|
* [(x: number) => x > 0, (x: number) => x + 1],
|
|
@@ -73,7 +73,7 @@ export declare class RunnableBranch<RunInput = any, RunOutput = any> extends Run
|
|
|
73
73
|
*
|
|
74
74
|
* @example
|
|
75
75
|
* ```ts
|
|
76
|
-
* import { RunnableBranch } from "langchain/
|
|
76
|
+
* import { RunnableBranch } from "@langchain/core/runnables";
|
|
77
77
|
*
|
|
78
78
|
* const branch = RunnableBranch.from([
|
|
79
79
|
* [(x: number) => x > 0, (x: number) => x + 1],
|
package/dist/runnables/branch.js
CHANGED
|
@@ -81,7 +81,7 @@ export class RunnableBranch extends Runnable {
|
|
|
81
81
|
*
|
|
82
82
|
* @example
|
|
83
83
|
* ```ts
|
|
84
|
-
* import { RunnableBranch } from "langchain/
|
|
84
|
+
* import { RunnableBranch } from "@langchain/core/runnables";
|
|
85
85
|
*
|
|
86
86
|
* const branch = RunnableBranch.from([
|
|
87
87
|
* [(x: number) => x > 0, (x: number) => x + 1],
|
|
@@ -4,6 +4,70 @@ exports.RunnableWithMessageHistory = void 0;
|
|
|
4
4
|
const index_js_1 = require("../messages/index.cjs");
|
|
5
5
|
const base_js_1 = require("./base.cjs");
|
|
6
6
|
const passthrough_js_1 = require("./passthrough.cjs");
|
|
7
|
+
/**
|
|
8
|
+
* Wraps a LCEL chain and manages history. It appends input messages
|
|
9
|
+
* and chain outputs as history, and adds the current history messages to
|
|
10
|
+
* the chain input.
|
|
11
|
+
* @example
|
|
12
|
+
* ```typescript
|
|
13
|
+
* // yarn add @langchain/anthropic @langchain/community @upstash/redis
|
|
14
|
+
*
|
|
15
|
+
* import {
|
|
16
|
+
* ChatPromptTemplate,
|
|
17
|
+
* MessagesPlaceholder,
|
|
18
|
+
* } from "@langchain/core/prompts";
|
|
19
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
20
|
+
* import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis";
|
|
21
|
+
* // For demos, you can also use an in-memory store:
|
|
22
|
+
* // import { ChatMessageHistory } from "langchain/stores/message/in_memory";
|
|
23
|
+
*
|
|
24
|
+
* const prompt = ChatPromptTemplate.fromMessages([
|
|
25
|
+
* ["system", "You're an assistant who's good at {ability}"],
|
|
26
|
+
* new MessagesPlaceholder("history"),
|
|
27
|
+
* ["human", "{question}"],
|
|
28
|
+
* ]);
|
|
29
|
+
*
|
|
30
|
+
* const chain = prompt.pipe(new ChatAnthropic({}));
|
|
31
|
+
*
|
|
32
|
+
* const chainWithHistory = new RunnableWithMessageHistory({
|
|
33
|
+
* runnable: chain,
|
|
34
|
+
* getMessageHistory: (sessionId) =>
|
|
35
|
+
* new UpstashRedisChatMessageHistory({
|
|
36
|
+
* sessionId,
|
|
37
|
+
* config: {
|
|
38
|
+
* url: process.env.UPSTASH_REDIS_REST_URL!,
|
|
39
|
+
* token: process.env.UPSTASH_REDIS_REST_TOKEN!,
|
|
40
|
+
* },
|
|
41
|
+
* }),
|
|
42
|
+
* inputMessagesKey: "question",
|
|
43
|
+
* historyMessagesKey: "history",
|
|
44
|
+
* });
|
|
45
|
+
*
|
|
46
|
+
* const result = await chainWithHistory.invoke(
|
|
47
|
+
* {
|
|
48
|
+
* ability: "math",
|
|
49
|
+
* question: "What does cosine mean?",
|
|
50
|
+
* },
|
|
51
|
+
* {
|
|
52
|
+
* configurable: {
|
|
53
|
+
* sessionId: "some_string_identifying_a_user",
|
|
54
|
+
* },
|
|
55
|
+
* }
|
|
56
|
+
* );
|
|
57
|
+
*
|
|
58
|
+
* const result2 = await chainWithHistory.invoke(
|
|
59
|
+
* {
|
|
60
|
+
* ability: "math",
|
|
61
|
+
* question: "What's its inverse?",
|
|
62
|
+
* },
|
|
63
|
+
* {
|
|
64
|
+
* configurable: {
|
|
65
|
+
* sessionId: "some_string_identifying_a_user",
|
|
66
|
+
* },
|
|
67
|
+
* }
|
|
68
|
+
* );
|
|
69
|
+
* ```
|
|
70
|
+
*/
|
|
7
71
|
class RunnableWithMessageHistory extends base_js_1.RunnableBinding {
|
|
8
72
|
constructor(fields) {
|
|
9
73
|
let historyChain = new base_js_1.RunnableLambda({
|
|
@@ -12,6 +12,70 @@ export interface RunnableWithMessageHistoryInputs<RunInput, RunOutput> extends O
|
|
|
12
12
|
historyMessagesKey?: string;
|
|
13
13
|
config?: RunnableConfig;
|
|
14
14
|
}
|
|
15
|
+
/**
|
|
16
|
+
* Wraps a LCEL chain and manages history. It appends input messages
|
|
17
|
+
* and chain outputs as history, and adds the current history messages to
|
|
18
|
+
* the chain input.
|
|
19
|
+
* @example
|
|
20
|
+
* ```typescript
|
|
21
|
+
* // yarn add @langchain/anthropic @langchain/community @upstash/redis
|
|
22
|
+
*
|
|
23
|
+
* import {
|
|
24
|
+
* ChatPromptTemplate,
|
|
25
|
+
* MessagesPlaceholder,
|
|
26
|
+
* } from "@langchain/core/prompts";
|
|
27
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
28
|
+
* import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis";
|
|
29
|
+
* // For demos, you can also use an in-memory store:
|
|
30
|
+
* // import { ChatMessageHistory } from "langchain/stores/message/in_memory";
|
|
31
|
+
*
|
|
32
|
+
* const prompt = ChatPromptTemplate.fromMessages([
|
|
33
|
+
* ["system", "You're an assistant who's good at {ability}"],
|
|
34
|
+
* new MessagesPlaceholder("history"),
|
|
35
|
+
* ["human", "{question}"],
|
|
36
|
+
* ]);
|
|
37
|
+
*
|
|
38
|
+
* const chain = prompt.pipe(new ChatAnthropic({}));
|
|
39
|
+
*
|
|
40
|
+
* const chainWithHistory = new RunnableWithMessageHistory({
|
|
41
|
+
* runnable: chain,
|
|
42
|
+
* getMessageHistory: (sessionId) =>
|
|
43
|
+
* new UpstashRedisChatMessageHistory({
|
|
44
|
+
* sessionId,
|
|
45
|
+
* config: {
|
|
46
|
+
* url: process.env.UPSTASH_REDIS_REST_URL!,
|
|
47
|
+
* token: process.env.UPSTASH_REDIS_REST_TOKEN!,
|
|
48
|
+
* },
|
|
49
|
+
* }),
|
|
50
|
+
* inputMessagesKey: "question",
|
|
51
|
+
* historyMessagesKey: "history",
|
|
52
|
+
* });
|
|
53
|
+
*
|
|
54
|
+
* const result = await chainWithHistory.invoke(
|
|
55
|
+
* {
|
|
56
|
+
* ability: "math",
|
|
57
|
+
* question: "What does cosine mean?",
|
|
58
|
+
* },
|
|
59
|
+
* {
|
|
60
|
+
* configurable: {
|
|
61
|
+
* sessionId: "some_string_identifying_a_user",
|
|
62
|
+
* },
|
|
63
|
+
* }
|
|
64
|
+
* );
|
|
65
|
+
*
|
|
66
|
+
* const result2 = await chainWithHistory.invoke(
|
|
67
|
+
* {
|
|
68
|
+
* ability: "math",
|
|
69
|
+
* question: "What's its inverse?",
|
|
70
|
+
* },
|
|
71
|
+
* {
|
|
72
|
+
* configurable: {
|
|
73
|
+
* sessionId: "some_string_identifying_a_user",
|
|
74
|
+
* },
|
|
75
|
+
* }
|
|
76
|
+
* );
|
|
77
|
+
* ```
|
|
78
|
+
*/
|
|
15
79
|
export declare class RunnableWithMessageHistory<RunInput, RunOutput> extends RunnableBinding<RunInput, RunOutput> {
|
|
16
80
|
runnable: Runnable<RunInput, RunOutput>;
|
|
17
81
|
inputMessagesKey?: string;
|
|
@@ -1,6 +1,70 @@
|
|
|
1
1
|
import { AIMessage, HumanMessage, isBaseMessage, } from "../messages/index.js";
|
|
2
2
|
import { RunnableBinding, RunnableLambda, } from "./base.js";
|
|
3
3
|
import { RunnablePassthrough } from "./passthrough.js";
|
|
4
|
+
/**
|
|
5
|
+
* Wraps a LCEL chain and manages history. It appends input messages
|
|
6
|
+
* and chain outputs as history, and adds the current history messages to
|
|
7
|
+
* the chain input.
|
|
8
|
+
* @example
|
|
9
|
+
* ```typescript
|
|
10
|
+
* // yarn add @langchain/anthropic @langchain/community @upstash/redis
|
|
11
|
+
*
|
|
12
|
+
* import {
|
|
13
|
+
* ChatPromptTemplate,
|
|
14
|
+
* MessagesPlaceholder,
|
|
15
|
+
* } from "@langchain/core/prompts";
|
|
16
|
+
* import { ChatAnthropic } from "@langchain/anthropic";
|
|
17
|
+
* import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis";
|
|
18
|
+
* // For demos, you can also use an in-memory store:
|
|
19
|
+
* // import { ChatMessageHistory } from "langchain/stores/message/in_memory";
|
|
20
|
+
*
|
|
21
|
+
* const prompt = ChatPromptTemplate.fromMessages([
|
|
22
|
+
* ["system", "You're an assistant who's good at {ability}"],
|
|
23
|
+
* new MessagesPlaceholder("history"),
|
|
24
|
+
* ["human", "{question}"],
|
|
25
|
+
* ]);
|
|
26
|
+
*
|
|
27
|
+
* const chain = prompt.pipe(new ChatAnthropic({}));
|
|
28
|
+
*
|
|
29
|
+
* const chainWithHistory = new RunnableWithMessageHistory({
|
|
30
|
+
* runnable: chain,
|
|
31
|
+
* getMessageHistory: (sessionId) =>
|
|
32
|
+
* new UpstashRedisChatMessageHistory({
|
|
33
|
+
* sessionId,
|
|
34
|
+
* config: {
|
|
35
|
+
* url: process.env.UPSTASH_REDIS_REST_URL!,
|
|
36
|
+
* token: process.env.UPSTASH_REDIS_REST_TOKEN!,
|
|
37
|
+
* },
|
|
38
|
+
* }),
|
|
39
|
+
* inputMessagesKey: "question",
|
|
40
|
+
* historyMessagesKey: "history",
|
|
41
|
+
* });
|
|
42
|
+
*
|
|
43
|
+
* const result = await chainWithHistory.invoke(
|
|
44
|
+
* {
|
|
45
|
+
* ability: "math",
|
|
46
|
+
* question: "What does cosine mean?",
|
|
47
|
+
* },
|
|
48
|
+
* {
|
|
49
|
+
* configurable: {
|
|
50
|
+
* sessionId: "some_string_identifying_a_user",
|
|
51
|
+
* },
|
|
52
|
+
* }
|
|
53
|
+
* );
|
|
54
|
+
*
|
|
55
|
+
* const result2 = await chainWithHistory.invoke(
|
|
56
|
+
* {
|
|
57
|
+
* ability: "math",
|
|
58
|
+
* question: "What's its inverse?",
|
|
59
|
+
* },
|
|
60
|
+
* {
|
|
61
|
+
* configurable: {
|
|
62
|
+
* sessionId: "some_string_identifying_a_user",
|
|
63
|
+
* },
|
|
64
|
+
* }
|
|
65
|
+
* );
|
|
66
|
+
* ```
|
|
67
|
+
*/
|
|
4
68
|
export class RunnableWithMessageHistory extends RunnableBinding {
|
|
5
69
|
constructor(fields) {
|
|
6
70
|
let historyChain = new RunnableLambda({
|
|
@@ -81,9 +81,7 @@ class RunnablePassthrough extends base_js_1.Runnable {
|
|
|
81
81
|
* });
|
|
82
82
|
* ```
|
|
83
83
|
*/
|
|
84
|
-
static assign(
|
|
85
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
86
|
-
mapping) {
|
|
84
|
+
static assign(mapping) {
|
|
87
85
|
return new base_js_1.RunnableAssign(new base_js_1.RunnableMap({ steps: mapping }));
|
|
88
86
|
}
|
|
89
87
|
}
|
|
@@ -60,5 +60,5 @@ export declare class RunnablePassthrough<RunInput> extends Runnable<RunInput, Ru
|
|
|
60
60
|
* });
|
|
61
61
|
* ```
|
|
62
62
|
*/
|
|
63
|
-
static assign
|
|
63
|
+
static assign<RunInput extends Record<string, unknown>, RunOutput extends Record<string, unknown>>(mapping: RunnableMapLike<RunInput, RunOutput>): RunnableAssign<RunInput, RunInput & RunOutput>;
|
|
64
64
|
}
|
|
@@ -78,9 +78,7 @@ export class RunnablePassthrough extends Runnable {
|
|
|
78
78
|
* });
|
|
79
79
|
* ```
|
|
80
80
|
*/
|
|
81
|
-
static assign(
|
|
82
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
83
|
-
mapping) {
|
|
81
|
+
static assign(mapping) {
|
|
84
82
|
return new RunnableAssign(new RunnableMap({ steps: mapping }));
|
|
85
83
|
}
|
|
86
84
|
}
|
|
@@ -1,7 +1,28 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.getTracingV2CallbackHandler = void 0;
|
|
3
|
+
exports.getTracingV2CallbackHandler = exports.getTracingCallbackHandler = void 0;
|
|
4
4
|
const tracer_langchain_js_1 = require("./tracer_langchain.cjs");
|
|
5
|
+
const tracer_langchain_v1_js_1 = require("./tracer_langchain_v1.cjs");
|
|
6
|
+
/**
|
|
7
|
+
* @deprecated Use the V2 handler instead.
|
|
8
|
+
*
|
|
9
|
+
* Function that returns an instance of `LangChainTracerV1`. If a session
|
|
10
|
+
* is provided, it loads that session into the tracer; otherwise, it loads
|
|
11
|
+
* a default session.
|
|
12
|
+
* @param session Optional session to load into the tracer.
|
|
13
|
+
* @returns An instance of `LangChainTracerV1`.
|
|
14
|
+
*/
|
|
15
|
+
async function getTracingCallbackHandler(session) {
|
|
16
|
+
const tracer = new tracer_langchain_v1_js_1.LangChainTracerV1();
|
|
17
|
+
if (session) {
|
|
18
|
+
await tracer.loadSession(session);
|
|
19
|
+
}
|
|
20
|
+
else {
|
|
21
|
+
await tracer.loadDefaultSession();
|
|
22
|
+
}
|
|
23
|
+
return tracer;
|
|
24
|
+
}
|
|
25
|
+
exports.getTracingCallbackHandler = getTracingCallbackHandler;
|
|
5
26
|
/**
|
|
6
27
|
* Function that returns an instance of `LangChainTracer`. It does not
|
|
7
28
|
* load any session data.
|
|
@@ -1,4 +1,15 @@
|
|
|
1
1
|
import { LangChainTracer } from "./tracer_langchain.js";
|
|
2
|
+
import { LangChainTracerV1 } from "./tracer_langchain_v1.js";
|
|
3
|
+
/**
|
|
4
|
+
* @deprecated Use the V2 handler instead.
|
|
5
|
+
*
|
|
6
|
+
* Function that returns an instance of `LangChainTracerV1`. If a session
|
|
7
|
+
* is provided, it loads that session into the tracer; otherwise, it loads
|
|
8
|
+
* a default session.
|
|
9
|
+
* @param session Optional session to load into the tracer.
|
|
10
|
+
* @returns An instance of `LangChainTracerV1`.
|
|
11
|
+
*/
|
|
12
|
+
export declare function getTracingCallbackHandler(session?: string): Promise<LangChainTracerV1>;
|
|
2
13
|
/**
|
|
3
14
|
* Function that returns an instance of `LangChainTracer`. It does not
|
|
4
15
|
* load any session data.
|
|
@@ -1,4 +1,24 @@
|
|
|
1
1
|
import { LangChainTracer } from "./tracer_langchain.js";
|
|
2
|
+
import { LangChainTracerV1 } from "./tracer_langchain_v1.js";
|
|
3
|
+
/**
|
|
4
|
+
* @deprecated Use the V2 handler instead.
|
|
5
|
+
*
|
|
6
|
+
* Function that returns an instance of `LangChainTracerV1`. If a session
|
|
7
|
+
* is provided, it loads that session into the tracer; otherwise, it loads
|
|
8
|
+
* a default session.
|
|
9
|
+
* @param session Optional session to load into the tracer.
|
|
10
|
+
* @returns An instance of `LangChainTracerV1`.
|
|
11
|
+
*/
|
|
12
|
+
export async function getTracingCallbackHandler(session) {
|
|
13
|
+
const tracer = new LangChainTracerV1();
|
|
14
|
+
if (session) {
|
|
15
|
+
await tracer.loadSession(session);
|
|
16
|
+
}
|
|
17
|
+
else {
|
|
18
|
+
await tracer.loadDefaultSession();
|
|
19
|
+
}
|
|
20
|
+
return tracer;
|
|
21
|
+
}
|
|
2
22
|
/**
|
|
3
23
|
* Function that returns an instance of `LangChainTracer`. It does not
|
|
4
24
|
* load any session data.
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.LangChainTracerV1 = void 0;
|
|
4
|
+
const index_js_1 = require("../messages/index.cjs");
|
|
5
|
+
const env_js_1 = require("../utils/env.cjs");
|
|
6
|
+
const base_js_1 = require("./base.cjs");
|
|
7
|
+
/** @deprecated Use LangChainTracer instead. */
|
|
8
|
+
class LangChainTracerV1 extends base_js_1.BaseTracer {
|
|
9
|
+
constructor() {
|
|
10
|
+
super();
|
|
11
|
+
Object.defineProperty(this, "name", {
|
|
12
|
+
enumerable: true,
|
|
13
|
+
configurable: true,
|
|
14
|
+
writable: true,
|
|
15
|
+
value: "langchain_tracer"
|
|
16
|
+
});
|
|
17
|
+
Object.defineProperty(this, "endpoint", {
|
|
18
|
+
enumerable: true,
|
|
19
|
+
configurable: true,
|
|
20
|
+
writable: true,
|
|
21
|
+
value: (0, env_js_1.getEnvironmentVariable)("LANGCHAIN_ENDPOINT") || "http://localhost:1984"
|
|
22
|
+
});
|
|
23
|
+
Object.defineProperty(this, "headers", {
|
|
24
|
+
enumerable: true,
|
|
25
|
+
configurable: true,
|
|
26
|
+
writable: true,
|
|
27
|
+
value: {
|
|
28
|
+
"Content-Type": "application/json",
|
|
29
|
+
}
|
|
30
|
+
});
|
|
31
|
+
Object.defineProperty(this, "session", {
|
|
32
|
+
enumerable: true,
|
|
33
|
+
configurable: true,
|
|
34
|
+
writable: true,
|
|
35
|
+
value: void 0
|
|
36
|
+
});
|
|
37
|
+
const apiKey = (0, env_js_1.getEnvironmentVariable)("LANGCHAIN_API_KEY");
|
|
38
|
+
if (apiKey) {
|
|
39
|
+
this.headers["x-api-key"] = apiKey;
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
async newSession(sessionName) {
|
|
43
|
+
const sessionCreate = {
|
|
44
|
+
start_time: Date.now(),
|
|
45
|
+
name: sessionName,
|
|
46
|
+
};
|
|
47
|
+
const session = await this.persistSession(sessionCreate);
|
|
48
|
+
this.session = session;
|
|
49
|
+
return session;
|
|
50
|
+
}
|
|
51
|
+
async loadSession(sessionName) {
|
|
52
|
+
const endpoint = `${this.endpoint}/sessions?name=${sessionName}`;
|
|
53
|
+
return this._handleSessionResponse(endpoint);
|
|
54
|
+
}
|
|
55
|
+
async loadDefaultSession() {
|
|
56
|
+
const endpoint = `${this.endpoint}/sessions?name=default`;
|
|
57
|
+
return this._handleSessionResponse(endpoint);
|
|
58
|
+
}
|
|
59
|
+
async convertV2RunToRun(run) {
|
|
60
|
+
const session = this.session ?? (await this.loadDefaultSession());
|
|
61
|
+
const serialized = run.serialized;
|
|
62
|
+
let runResult;
|
|
63
|
+
if (run.run_type === "llm") {
|
|
64
|
+
const prompts = run.inputs.prompts
|
|
65
|
+
? run.inputs.prompts
|
|
66
|
+
: run.inputs.messages.map((x) => (0, index_js_1.getBufferString)(x));
|
|
67
|
+
const llmRun = {
|
|
68
|
+
uuid: run.id,
|
|
69
|
+
start_time: run.start_time,
|
|
70
|
+
end_time: run.end_time,
|
|
71
|
+
execution_order: run.execution_order,
|
|
72
|
+
child_execution_order: run.child_execution_order,
|
|
73
|
+
serialized,
|
|
74
|
+
type: run.run_type,
|
|
75
|
+
session_id: session.id,
|
|
76
|
+
prompts,
|
|
77
|
+
response: run.outputs,
|
|
78
|
+
};
|
|
79
|
+
runResult = llmRun;
|
|
80
|
+
}
|
|
81
|
+
else if (run.run_type === "chain") {
|
|
82
|
+
const child_runs = await Promise.all(run.child_runs.map((child_run) => this.convertV2RunToRun(child_run)));
|
|
83
|
+
const chainRun = {
|
|
84
|
+
uuid: run.id,
|
|
85
|
+
start_time: run.start_time,
|
|
86
|
+
end_time: run.end_time,
|
|
87
|
+
execution_order: run.execution_order,
|
|
88
|
+
child_execution_order: run.child_execution_order,
|
|
89
|
+
serialized,
|
|
90
|
+
type: run.run_type,
|
|
91
|
+
session_id: session.id,
|
|
92
|
+
inputs: run.inputs,
|
|
93
|
+
outputs: run.outputs,
|
|
94
|
+
child_llm_runs: child_runs.filter((child_run) => child_run.type === "llm"),
|
|
95
|
+
child_chain_runs: child_runs.filter((child_run) => child_run.type === "chain"),
|
|
96
|
+
child_tool_runs: child_runs.filter((child_run) => child_run.type === "tool"),
|
|
97
|
+
};
|
|
98
|
+
runResult = chainRun;
|
|
99
|
+
}
|
|
100
|
+
else if (run.run_type === "tool") {
|
|
101
|
+
const child_runs = await Promise.all(run.child_runs.map((child_run) => this.convertV2RunToRun(child_run)));
|
|
102
|
+
const toolRun = {
|
|
103
|
+
uuid: run.id,
|
|
104
|
+
start_time: run.start_time,
|
|
105
|
+
end_time: run.end_time,
|
|
106
|
+
execution_order: run.execution_order,
|
|
107
|
+
child_execution_order: run.child_execution_order,
|
|
108
|
+
serialized,
|
|
109
|
+
type: run.run_type,
|
|
110
|
+
session_id: session.id,
|
|
111
|
+
tool_input: run.inputs.input,
|
|
112
|
+
output: run.outputs?.output,
|
|
113
|
+
action: JSON.stringify(serialized),
|
|
114
|
+
child_llm_runs: child_runs.filter((child_run) => child_run.type === "llm"),
|
|
115
|
+
child_chain_runs: child_runs.filter((child_run) => child_run.type === "chain"),
|
|
116
|
+
child_tool_runs: child_runs.filter((child_run) => child_run.type === "tool"),
|
|
117
|
+
};
|
|
118
|
+
runResult = toolRun;
|
|
119
|
+
}
|
|
120
|
+
else {
|
|
121
|
+
throw new Error(`Unknown run type: ${run.run_type}`);
|
|
122
|
+
}
|
|
123
|
+
return runResult;
|
|
124
|
+
}
|
|
125
|
+
async persistRun(run) {
|
|
126
|
+
let endpoint;
|
|
127
|
+
let v1Run;
|
|
128
|
+
if (run.run_type !== undefined) {
|
|
129
|
+
v1Run = await this.convertV2RunToRun(run);
|
|
130
|
+
}
|
|
131
|
+
else {
|
|
132
|
+
v1Run = run;
|
|
133
|
+
}
|
|
134
|
+
if (v1Run.type === "llm") {
|
|
135
|
+
endpoint = `${this.endpoint}/llm-runs`;
|
|
136
|
+
}
|
|
137
|
+
else if (v1Run.type === "chain") {
|
|
138
|
+
endpoint = `${this.endpoint}/chain-runs`;
|
|
139
|
+
}
|
|
140
|
+
else {
|
|
141
|
+
endpoint = `${this.endpoint}/tool-runs`;
|
|
142
|
+
}
|
|
143
|
+
const response = await fetch(endpoint, {
|
|
144
|
+
method: "POST",
|
|
145
|
+
headers: this.headers,
|
|
146
|
+
body: JSON.stringify(v1Run),
|
|
147
|
+
});
|
|
148
|
+
if (!response.ok) {
|
|
149
|
+
console.error(`Failed to persist run: ${response.status} ${response.statusText}`);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
async persistSession(sessionCreate) {
|
|
153
|
+
const endpoint = `${this.endpoint}/sessions`;
|
|
154
|
+
const response = await fetch(endpoint, {
|
|
155
|
+
method: "POST",
|
|
156
|
+
headers: this.headers,
|
|
157
|
+
body: JSON.stringify(sessionCreate),
|
|
158
|
+
});
|
|
159
|
+
if (!response.ok) {
|
|
160
|
+
console.error(`Failed to persist session: ${response.status} ${response.statusText}, using default session.`);
|
|
161
|
+
return {
|
|
162
|
+
id: 1,
|
|
163
|
+
...sessionCreate,
|
|
164
|
+
};
|
|
165
|
+
}
|
|
166
|
+
return {
|
|
167
|
+
id: (await response.json()).id,
|
|
168
|
+
...sessionCreate,
|
|
169
|
+
};
|
|
170
|
+
}
|
|
171
|
+
async _handleSessionResponse(endpoint) {
|
|
172
|
+
const response = await fetch(endpoint, {
|
|
173
|
+
method: "GET",
|
|
174
|
+
headers: this.headers,
|
|
175
|
+
});
|
|
176
|
+
let tracerSession;
|
|
177
|
+
if (!response.ok) {
|
|
178
|
+
console.error(`Failed to load session: ${response.status} ${response.statusText}`);
|
|
179
|
+
tracerSession = {
|
|
180
|
+
id: 1,
|
|
181
|
+
start_time: Date.now(),
|
|
182
|
+
};
|
|
183
|
+
this.session = tracerSession;
|
|
184
|
+
return tracerSession;
|
|
185
|
+
}
|
|
186
|
+
const resp = (await response.json());
|
|
187
|
+
if (resp.length === 0) {
|
|
188
|
+
tracerSession = {
|
|
189
|
+
id: 1,
|
|
190
|
+
start_time: Date.now(),
|
|
191
|
+
};
|
|
192
|
+
this.session = tracerSession;
|
|
193
|
+
return tracerSession;
|
|
194
|
+
}
|
|
195
|
+
[tracerSession] = resp;
|
|
196
|
+
this.session = tracerSession;
|
|
197
|
+
return tracerSession;
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
exports.LangChainTracerV1 = LangChainTracerV1;
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import type { ChainValues } from "../utils/types.js";
|
|
2
|
+
import type { LLMResult } from "../outputs.js";
|
|
3
|
+
import { BaseTracer, type RunType, type Run } from "./base.js";
|
|
4
|
+
export interface BaseRunV1 {
|
|
5
|
+
uuid: string;
|
|
6
|
+
parent_uuid?: string;
|
|
7
|
+
start_time: number;
|
|
8
|
+
end_time?: number;
|
|
9
|
+
execution_order: number;
|
|
10
|
+
child_execution_order: number;
|
|
11
|
+
serialized: {
|
|
12
|
+
name: string;
|
|
13
|
+
};
|
|
14
|
+
session_id: number;
|
|
15
|
+
error?: string;
|
|
16
|
+
type: RunType;
|
|
17
|
+
}
|
|
18
|
+
export interface LLMRun extends BaseRunV1 {
|
|
19
|
+
prompts: string[];
|
|
20
|
+
response?: LLMResult;
|
|
21
|
+
}
|
|
22
|
+
export interface ChainRun extends BaseRunV1 {
|
|
23
|
+
inputs: ChainValues;
|
|
24
|
+
outputs?: ChainValues;
|
|
25
|
+
child_llm_runs: LLMRun[];
|
|
26
|
+
child_chain_runs: ChainRun[];
|
|
27
|
+
child_tool_runs: ToolRun[];
|
|
28
|
+
}
|
|
29
|
+
export interface ToolRun extends BaseRunV1 {
|
|
30
|
+
tool_input: string;
|
|
31
|
+
output?: string;
|
|
32
|
+
action: string;
|
|
33
|
+
child_llm_runs: LLMRun[];
|
|
34
|
+
child_chain_runs: ChainRun[];
|
|
35
|
+
child_tool_runs: ToolRun[];
|
|
36
|
+
}
|
|
37
|
+
export interface BaseTracerSession {
|
|
38
|
+
start_time: number;
|
|
39
|
+
name?: string;
|
|
40
|
+
}
|
|
41
|
+
export type TracerSessionCreate = BaseTracerSession;
|
|
42
|
+
export interface TracerSessionV1 extends BaseTracerSession {
|
|
43
|
+
id: number;
|
|
44
|
+
}
|
|
45
|
+
/** @deprecated Use LangChainTracer instead. */
|
|
46
|
+
export declare class LangChainTracerV1 extends BaseTracer {
|
|
47
|
+
name: string;
|
|
48
|
+
protected endpoint: string;
|
|
49
|
+
protected headers: Record<string, string>;
|
|
50
|
+
protected session: TracerSessionV1;
|
|
51
|
+
constructor();
|
|
52
|
+
newSession(sessionName?: string): Promise<TracerSessionV1>;
|
|
53
|
+
loadSession(sessionName: string): Promise<TracerSessionV1>;
|
|
54
|
+
loadDefaultSession(): Promise<TracerSessionV1>;
|
|
55
|
+
protected convertV2RunToRun(run: Run): Promise<LLMRun | ChainRun | ToolRun>;
|
|
56
|
+
protected persistRun(run: Run | LLMRun | ChainRun | ToolRun): Promise<void>;
|
|
57
|
+
protected persistSession(sessionCreate: BaseTracerSession): Promise<TracerSessionV1>;
|
|
58
|
+
protected _handleSessionResponse(endpoint: string): Promise<TracerSessionV1>;
|
|
59
|
+
}
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
import { getBufferString } from "../messages/index.js";
|
|
2
|
+
import { getEnvironmentVariable } from "../utils/env.js";
|
|
3
|
+
import { BaseTracer } from "./base.js";
|
|
4
|
+
/** @deprecated Use LangChainTracer instead. */
|
|
5
|
+
export class LangChainTracerV1 extends BaseTracer {
|
|
6
|
+
constructor() {
|
|
7
|
+
super();
|
|
8
|
+
Object.defineProperty(this, "name", {
|
|
9
|
+
enumerable: true,
|
|
10
|
+
configurable: true,
|
|
11
|
+
writable: true,
|
|
12
|
+
value: "langchain_tracer"
|
|
13
|
+
});
|
|
14
|
+
Object.defineProperty(this, "endpoint", {
|
|
15
|
+
enumerable: true,
|
|
16
|
+
configurable: true,
|
|
17
|
+
writable: true,
|
|
18
|
+
value: getEnvironmentVariable("LANGCHAIN_ENDPOINT") || "http://localhost:1984"
|
|
19
|
+
});
|
|
20
|
+
Object.defineProperty(this, "headers", {
|
|
21
|
+
enumerable: true,
|
|
22
|
+
configurable: true,
|
|
23
|
+
writable: true,
|
|
24
|
+
value: {
|
|
25
|
+
"Content-Type": "application/json",
|
|
26
|
+
}
|
|
27
|
+
});
|
|
28
|
+
Object.defineProperty(this, "session", {
|
|
29
|
+
enumerable: true,
|
|
30
|
+
configurable: true,
|
|
31
|
+
writable: true,
|
|
32
|
+
value: void 0
|
|
33
|
+
});
|
|
34
|
+
const apiKey = getEnvironmentVariable("LANGCHAIN_API_KEY");
|
|
35
|
+
if (apiKey) {
|
|
36
|
+
this.headers["x-api-key"] = apiKey;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
async newSession(sessionName) {
|
|
40
|
+
const sessionCreate = {
|
|
41
|
+
start_time: Date.now(),
|
|
42
|
+
name: sessionName,
|
|
43
|
+
};
|
|
44
|
+
const session = await this.persistSession(sessionCreate);
|
|
45
|
+
this.session = session;
|
|
46
|
+
return session;
|
|
47
|
+
}
|
|
48
|
+
async loadSession(sessionName) {
|
|
49
|
+
const endpoint = `${this.endpoint}/sessions?name=${sessionName}`;
|
|
50
|
+
return this._handleSessionResponse(endpoint);
|
|
51
|
+
}
|
|
52
|
+
async loadDefaultSession() {
|
|
53
|
+
const endpoint = `${this.endpoint}/sessions?name=default`;
|
|
54
|
+
return this._handleSessionResponse(endpoint);
|
|
55
|
+
}
|
|
56
|
+
async convertV2RunToRun(run) {
|
|
57
|
+
const session = this.session ?? (await this.loadDefaultSession());
|
|
58
|
+
const serialized = run.serialized;
|
|
59
|
+
let runResult;
|
|
60
|
+
if (run.run_type === "llm") {
|
|
61
|
+
const prompts = run.inputs.prompts
|
|
62
|
+
? run.inputs.prompts
|
|
63
|
+
: run.inputs.messages.map((x) => getBufferString(x));
|
|
64
|
+
const llmRun = {
|
|
65
|
+
uuid: run.id,
|
|
66
|
+
start_time: run.start_time,
|
|
67
|
+
end_time: run.end_time,
|
|
68
|
+
execution_order: run.execution_order,
|
|
69
|
+
child_execution_order: run.child_execution_order,
|
|
70
|
+
serialized,
|
|
71
|
+
type: run.run_type,
|
|
72
|
+
session_id: session.id,
|
|
73
|
+
prompts,
|
|
74
|
+
response: run.outputs,
|
|
75
|
+
};
|
|
76
|
+
runResult = llmRun;
|
|
77
|
+
}
|
|
78
|
+
else if (run.run_type === "chain") {
|
|
79
|
+
const child_runs = await Promise.all(run.child_runs.map((child_run) => this.convertV2RunToRun(child_run)));
|
|
80
|
+
const chainRun = {
|
|
81
|
+
uuid: run.id,
|
|
82
|
+
start_time: run.start_time,
|
|
83
|
+
end_time: run.end_time,
|
|
84
|
+
execution_order: run.execution_order,
|
|
85
|
+
child_execution_order: run.child_execution_order,
|
|
86
|
+
serialized,
|
|
87
|
+
type: run.run_type,
|
|
88
|
+
session_id: session.id,
|
|
89
|
+
inputs: run.inputs,
|
|
90
|
+
outputs: run.outputs,
|
|
91
|
+
child_llm_runs: child_runs.filter((child_run) => child_run.type === "llm"),
|
|
92
|
+
child_chain_runs: child_runs.filter((child_run) => child_run.type === "chain"),
|
|
93
|
+
child_tool_runs: child_runs.filter((child_run) => child_run.type === "tool"),
|
|
94
|
+
};
|
|
95
|
+
runResult = chainRun;
|
|
96
|
+
}
|
|
97
|
+
else if (run.run_type === "tool") {
|
|
98
|
+
const child_runs = await Promise.all(run.child_runs.map((child_run) => this.convertV2RunToRun(child_run)));
|
|
99
|
+
const toolRun = {
|
|
100
|
+
uuid: run.id,
|
|
101
|
+
start_time: run.start_time,
|
|
102
|
+
end_time: run.end_time,
|
|
103
|
+
execution_order: run.execution_order,
|
|
104
|
+
child_execution_order: run.child_execution_order,
|
|
105
|
+
serialized,
|
|
106
|
+
type: run.run_type,
|
|
107
|
+
session_id: session.id,
|
|
108
|
+
tool_input: run.inputs.input,
|
|
109
|
+
output: run.outputs?.output,
|
|
110
|
+
action: JSON.stringify(serialized),
|
|
111
|
+
child_llm_runs: child_runs.filter((child_run) => child_run.type === "llm"),
|
|
112
|
+
child_chain_runs: child_runs.filter((child_run) => child_run.type === "chain"),
|
|
113
|
+
child_tool_runs: child_runs.filter((child_run) => child_run.type === "tool"),
|
|
114
|
+
};
|
|
115
|
+
runResult = toolRun;
|
|
116
|
+
}
|
|
117
|
+
else {
|
|
118
|
+
throw new Error(`Unknown run type: ${run.run_type}`);
|
|
119
|
+
}
|
|
120
|
+
return runResult;
|
|
121
|
+
}
|
|
122
|
+
async persistRun(run) {
|
|
123
|
+
let endpoint;
|
|
124
|
+
let v1Run;
|
|
125
|
+
if (run.run_type !== undefined) {
|
|
126
|
+
v1Run = await this.convertV2RunToRun(run);
|
|
127
|
+
}
|
|
128
|
+
else {
|
|
129
|
+
v1Run = run;
|
|
130
|
+
}
|
|
131
|
+
if (v1Run.type === "llm") {
|
|
132
|
+
endpoint = `${this.endpoint}/llm-runs`;
|
|
133
|
+
}
|
|
134
|
+
else if (v1Run.type === "chain") {
|
|
135
|
+
endpoint = `${this.endpoint}/chain-runs`;
|
|
136
|
+
}
|
|
137
|
+
else {
|
|
138
|
+
endpoint = `${this.endpoint}/tool-runs`;
|
|
139
|
+
}
|
|
140
|
+
const response = await fetch(endpoint, {
|
|
141
|
+
method: "POST",
|
|
142
|
+
headers: this.headers,
|
|
143
|
+
body: JSON.stringify(v1Run),
|
|
144
|
+
});
|
|
145
|
+
if (!response.ok) {
|
|
146
|
+
console.error(`Failed to persist run: ${response.status} ${response.statusText}`);
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
async persistSession(sessionCreate) {
|
|
150
|
+
const endpoint = `${this.endpoint}/sessions`;
|
|
151
|
+
const response = await fetch(endpoint, {
|
|
152
|
+
method: "POST",
|
|
153
|
+
headers: this.headers,
|
|
154
|
+
body: JSON.stringify(sessionCreate),
|
|
155
|
+
});
|
|
156
|
+
if (!response.ok) {
|
|
157
|
+
console.error(`Failed to persist session: ${response.status} ${response.statusText}, using default session.`);
|
|
158
|
+
return {
|
|
159
|
+
id: 1,
|
|
160
|
+
...sessionCreate,
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
return {
|
|
164
|
+
id: (await response.json()).id,
|
|
165
|
+
...sessionCreate,
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
async _handleSessionResponse(endpoint) {
|
|
169
|
+
const response = await fetch(endpoint, {
|
|
170
|
+
method: "GET",
|
|
171
|
+
headers: this.headers,
|
|
172
|
+
});
|
|
173
|
+
let tracerSession;
|
|
174
|
+
if (!response.ok) {
|
|
175
|
+
console.error(`Failed to load session: ${response.status} ${response.statusText}`);
|
|
176
|
+
tracerSession = {
|
|
177
|
+
id: 1,
|
|
178
|
+
start_time: Date.now(),
|
|
179
|
+
};
|
|
180
|
+
this.session = tracerSession;
|
|
181
|
+
return tracerSession;
|
|
182
|
+
}
|
|
183
|
+
const resp = (await response.json());
|
|
184
|
+
if (resp.length === 0) {
|
|
185
|
+
tracerSession = {
|
|
186
|
+
id: 1,
|
|
187
|
+
start_time: Date.now(),
|
|
188
|
+
};
|
|
189
|
+
this.session = tracerSession;
|
|
190
|
+
return tracerSession;
|
|
191
|
+
}
|
|
192
|
+
[tracerSession] = resp;
|
|
193
|
+
this.session = tracerSession;
|
|
194
|
+
return tracerSession;
|
|
195
|
+
}
|
|
196
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@langchain/core",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.9-rc.0",
|
|
4
4
|
"description": "Core LangChain.js abstractions and schemas",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -234,6 +234,11 @@
|
|
|
234
234
|
"import": "./tracers/tracer_langchain.js",
|
|
235
235
|
"require": "./tracers/tracer_langchain.cjs"
|
|
236
236
|
},
|
|
237
|
+
"./tracers/tracer_langchain_v1": {
|
|
238
|
+
"types": "./tracers/tracer_langchain_v1.d.ts",
|
|
239
|
+
"import": "./tracers/tracer_langchain_v1.js",
|
|
240
|
+
"require": "./tracers/tracer_langchain_v1.cjs"
|
|
241
|
+
},
|
|
237
242
|
"./utils/async_caller": {
|
|
238
243
|
"types": "./utils/async_caller.d.ts",
|
|
239
244
|
"import": "./utils/async_caller.js",
|
|
@@ -388,6 +393,9 @@
|
|
|
388
393
|
"tracers/tracer_langchain.cjs",
|
|
389
394
|
"tracers/tracer_langchain.js",
|
|
390
395
|
"tracers/tracer_langchain.d.ts",
|
|
396
|
+
"tracers/tracer_langchain_v1.cjs",
|
|
397
|
+
"tracers/tracer_langchain_v1.js",
|
|
398
|
+
"tracers/tracer_langchain_v1.d.ts",
|
|
391
399
|
"utils/async_caller.cjs",
|
|
392
400
|
"utils/async_caller.js",
|
|
393
401
|
"utils/async_caller.d.ts",
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../dist/tracers/tracer_langchain_v1.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/tracers/tracer_langchain_v1.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/tracers/tracer_langchain_v1.js'
|