langchain 0.0.199 → 0.0.201
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/agents/toolkits/connery.cjs +1 -0
- package/agents/toolkits/connery.d.ts +1 -0
- package/agents/toolkits/connery.js +1 -0
- package/dist/agents/index.cjs +3 -1
- package/dist/agents/index.d.ts +1 -1
- package/dist/agents/index.js +1 -1
- package/dist/agents/toolkits/connery/index.cjs +39 -0
- package/dist/agents/toolkits/connery/index.d.ts +23 -0
- package/dist/agents/toolkits/connery/index.js +35 -0
- package/dist/agents/toolkits/conversational_retrieval/tool.cjs +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.js +1 -1
- package/dist/chat_models/fake.cjs +2 -114
- package/dist/chat_models/fake.d.ts +1 -52
- package/dist/chat_models/fake.js +1 -113
- package/dist/chat_models/llama_cpp.cjs +2 -1
- package/dist/chat_models/llama_cpp.d.ts +1 -1
- package/dist/chat_models/llama_cpp.js +2 -1
- package/dist/chat_models/minimax.d.ts +1 -1
- package/dist/document_loaders/fs/obsidian.cjs +240 -0
- package/dist/document_loaders/fs/obsidian.d.ts +26 -0
- package/dist/document_loaders/fs/obsidian.js +233 -0
- package/dist/embeddings/gradient_ai.cjs +103 -0
- package/dist/embeddings/gradient_ai.d.ts +48 -0
- package/dist/embeddings/gradient_ai.js +99 -0
- package/dist/llms/gradient_ai.cjs +22 -8
- package/dist/llms/gradient_ai.d.ts +7 -2
- package/dist/llms/gradient_ai.js +22 -8
- package/dist/llms/llama_cpp.cjs +2 -1
- package/dist/llms/llama_cpp.d.ts +1 -1
- package/dist/llms/llama_cpp.js +2 -1
- package/dist/load/import_constants.cjs +3 -0
- package/dist/load/import_constants.js +3 -0
- package/dist/load/import_map.cjs +5 -3
- package/dist/load/import_map.d.ts +2 -0
- package/dist/load/import_map.js +2 -0
- package/dist/memory/vector_store.cjs +1 -1
- package/dist/memory/vector_store.js +1 -1
- package/dist/tools/connery.cjs +279 -0
- package/dist/tools/connery.d.ts +145 -0
- package/dist/tools/connery.js +274 -0
- package/dist/tools/gmail/base.cjs +69 -0
- package/dist/tools/gmail/base.d.ts +19 -0
- package/dist/tools/gmail/base.js +65 -0
- package/dist/tools/gmail/create_draft.cjs +62 -0
- package/dist/tools/gmail/create_draft.d.ts +35 -0
- package/dist/tools/gmail/create_draft.js +58 -0
- package/dist/tools/gmail/descriptions.cjs +118 -0
- package/dist/tools/gmail/descriptions.d.ts +5 -0
- package/dist/tools/gmail/descriptions.js +115 -0
- package/dist/tools/gmail/get_message.cjs +83 -0
- package/dist/tools/gmail/get_message.d.ts +18 -0
- package/dist/tools/gmail/get_message.js +79 -0
- package/dist/tools/gmail/get_thread.cjs +89 -0
- package/dist/tools/gmail/get_thread.d.ts +18 -0
- package/dist/tools/gmail/get_thread.js +85 -0
- package/dist/tools/gmail/index.cjs +13 -0
- package/dist/tools/gmail/index.d.ts +11 -0
- package/dist/tools/gmail/index.js +5 -0
- package/dist/tools/gmail/search.cjs +118 -0
- package/dist/tools/gmail/search.d.ts +29 -0
- package/dist/tools/gmail/search.js +114 -0
- package/dist/tools/gmail/send_message.cjs +74 -0
- package/dist/tools/gmail/send_message.d.ts +35 -0
- package/dist/tools/gmail/send_message.js +70 -0
- package/dist/tools/webbrowser.cjs +1 -1
- package/dist/tools/webbrowser.js +1 -1
- package/dist/tools/wolframalpha.cjs +1 -1
- package/dist/tools/wolframalpha.js +1 -1
- package/dist/util/document.cjs +1 -1
- package/dist/util/document.d.ts +1 -1
- package/dist/util/document.js +1 -1
- package/dist/util/tiktoken.cjs +15 -24
- package/dist/util/tiktoken.d.ts +1 -9
- package/dist/util/tiktoken.js +1 -21
- package/document_loaders/fs/obsidian.cjs +1 -0
- package/document_loaders/fs/obsidian.d.ts +1 -0
- package/document_loaders/fs/obsidian.js +1 -0
- package/embeddings/gradient_ai.cjs +1 -0
- package/embeddings/gradient_ai.d.ts +1 -0
- package/embeddings/gradient_ai.js +1 -0
- package/package.json +43 -3
- package/tools/connery.cjs +1 -0
- package/tools/connery.d.ts +1 -0
- package/tools/connery.js +1 -0
- package/tools/gmail.cjs +1 -0
- package/tools/gmail.d.ts +1 -0
- package/tools/gmail.js +1 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../../dist/agents/toolkits/connery/index.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/agents/toolkits/connery/index.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../../dist/agents/toolkits/connery/index.js'
|
package/dist/agents/index.cjs
CHANGED
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.XMLAgent = exports.OpenAIAgent = exports.StructuredChatOutputParserWithRetries = exports.StructuredChatOutputParser = exports.StructuredChatAgent = exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParserWithRetries = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
|
|
3
|
+
exports.XMLAgent = exports.OpenAIAgent = exports.StructuredChatOutputParserWithRetries = exports.StructuredChatOutputParser = exports.StructuredChatAgent = exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParserWithRetries = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.RunnableAgent = exports.BaseMultiActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
|
|
4
4
|
var agent_js_1 = require("./agent.cjs");
|
|
5
5
|
Object.defineProperty(exports, "Agent", { enumerable: true, get: function () { return agent_js_1.Agent; } });
|
|
6
6
|
Object.defineProperty(exports, "BaseSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseSingleActionAgent; } });
|
|
7
|
+
Object.defineProperty(exports, "BaseMultiActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseMultiActionAgent; } });
|
|
8
|
+
Object.defineProperty(exports, "RunnableAgent", { enumerable: true, get: function () { return agent_js_1.RunnableAgent; } });
|
|
7
9
|
Object.defineProperty(exports, "LLMSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.LLMSingleActionAgent; } });
|
|
8
10
|
var index_js_1 = require("./toolkits/index.cjs");
|
|
9
11
|
Object.defineProperty(exports, "JsonToolkit", { enumerable: true, get: function () { return index_js_1.JsonToolkit; } });
|
package/dist/agents/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
export { Agent, type AgentArgs, BaseSingleActionAgent, LLMSingleActionAgent, type LLMSingleActionAgentInput, type OutputParserArgs, } from "./agent.js";
|
|
1
|
+
export { Agent, type AgentArgs, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, LLMSingleActionAgent, type LLMSingleActionAgentInput, type OutputParserArgs, } from "./agent.js";
|
|
2
2
|
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, type VectorStoreInfo, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createVectorStoreAgent, createVectorStoreRouterAgent, } from "./toolkits/index.js";
|
|
3
3
|
export { Toolkit } from "./toolkits/base.js";
|
|
4
4
|
export { ChatAgent, type ChatAgentInput, type ChatCreatePromptArgs, } from "./chat/index.js";
|
package/dist/agents/index.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
export { Agent, BaseSingleActionAgent, LLMSingleActionAgent, } from "./agent.js";
|
|
1
|
+
export { Agent, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, LLMSingleActionAgent, } from "./agent.js";
|
|
2
2
|
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createVectorStoreAgent, createVectorStoreRouterAgent, } from "./toolkits/index.js";
|
|
3
3
|
export { Toolkit } from "./toolkits/base.js";
|
|
4
4
|
export { ChatAgent, } from "./chat/index.js";
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.ConneryToolkit = void 0;
|
|
4
|
+
const base_js_1 = require("../base.cjs");
|
|
5
|
+
/**
|
|
6
|
+
* A toolkit for working with Connery actions.
|
|
7
|
+
*
|
|
8
|
+
* Connery is an open-source plugin infrastructure for AI.
|
|
9
|
+
* Source code: https://github.com/connery-io/connery-platform
|
|
10
|
+
*
|
|
11
|
+
* See an example of using this toolkit here: `./examples/src/agents/connery_mrkl.ts`
|
|
12
|
+
* @extends Toolkit
|
|
13
|
+
*/
|
|
14
|
+
class ConneryToolkit extends base_js_1.Toolkit {
|
|
15
|
+
constructor() {
|
|
16
|
+
super(...arguments);
|
|
17
|
+
Object.defineProperty(this, "tools", {
|
|
18
|
+
enumerable: true,
|
|
19
|
+
configurable: true,
|
|
20
|
+
writable: true,
|
|
21
|
+
value: void 0
|
|
22
|
+
});
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Creates a ConneryToolkit instance based on the provided ConneryService instance.
|
|
26
|
+
* It populates the tools property of the ConneryToolkit instance with the list of
|
|
27
|
+
* available tools from the ConneryService instance.
|
|
28
|
+
* @param conneryService The ConneryService instance.
|
|
29
|
+
* @returns A Promise that resolves to a ConneryToolkit instance.
|
|
30
|
+
*/
|
|
31
|
+
static async createInstance(conneryService) {
|
|
32
|
+
const toolkit = new ConneryToolkit();
|
|
33
|
+
toolkit.tools = [];
|
|
34
|
+
const actions = await conneryService.listActions();
|
|
35
|
+
toolkit.tools.push(...actions);
|
|
36
|
+
return toolkit;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
exports.ConneryToolkit = ConneryToolkit;
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import { Tool } from "@langchain/core/tools";
|
|
2
|
+
import { Toolkit } from "../base.js";
|
|
3
|
+
import { ConneryService } from "../../../tools/connery.js";
|
|
4
|
+
/**
|
|
5
|
+
* A toolkit for working with Connery actions.
|
|
6
|
+
*
|
|
7
|
+
* Connery is an open-source plugin infrastructure for AI.
|
|
8
|
+
* Source code: https://github.com/connery-io/connery-platform
|
|
9
|
+
*
|
|
10
|
+
* See an example of using this toolkit here: `./examples/src/agents/connery_mrkl.ts`
|
|
11
|
+
* @extends Toolkit
|
|
12
|
+
*/
|
|
13
|
+
export declare class ConneryToolkit extends Toolkit {
|
|
14
|
+
tools: Tool[];
|
|
15
|
+
/**
|
|
16
|
+
* Creates a ConneryToolkit instance based on the provided ConneryService instance.
|
|
17
|
+
* It populates the tools property of the ConneryToolkit instance with the list of
|
|
18
|
+
* available tools from the ConneryService instance.
|
|
19
|
+
* @param conneryService The ConneryService instance.
|
|
20
|
+
* @returns A Promise that resolves to a ConneryToolkit instance.
|
|
21
|
+
*/
|
|
22
|
+
static createInstance(conneryService: ConneryService): Promise<ConneryToolkit>;
|
|
23
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { Toolkit } from "../base.js";
|
|
2
|
+
/**
|
|
3
|
+
* A toolkit for working with Connery actions.
|
|
4
|
+
*
|
|
5
|
+
* Connery is an open-source plugin infrastructure for AI.
|
|
6
|
+
* Source code: https://github.com/connery-io/connery-platform
|
|
7
|
+
*
|
|
8
|
+
* See an example of using this toolkit here: `./examples/src/agents/connery_mrkl.ts`
|
|
9
|
+
* @extends Toolkit
|
|
10
|
+
*/
|
|
11
|
+
export class ConneryToolkit extends Toolkit {
|
|
12
|
+
constructor() {
|
|
13
|
+
super(...arguments);
|
|
14
|
+
Object.defineProperty(this, "tools", {
|
|
15
|
+
enumerable: true,
|
|
16
|
+
configurable: true,
|
|
17
|
+
writable: true,
|
|
18
|
+
value: void 0
|
|
19
|
+
});
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Creates a ConneryToolkit instance based on the provided ConneryService instance.
|
|
23
|
+
* It populates the tools property of the ConneryToolkit instance with the list of
|
|
24
|
+
* available tools from the ConneryService instance.
|
|
25
|
+
* @param conneryService The ConneryService instance.
|
|
26
|
+
* @returns A Promise that resolves to a ConneryToolkit instance.
|
|
27
|
+
*/
|
|
28
|
+
static async createInstance(conneryService) {
|
|
29
|
+
const toolkit = new ConneryToolkit();
|
|
30
|
+
toolkit.tools = [];
|
|
31
|
+
const actions = await conneryService.listActions();
|
|
32
|
+
toolkit.tools.push(...actions);
|
|
33
|
+
return toolkit;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -7,7 +7,7 @@ const document_js_1 = require("../../../util/document.cjs");
|
|
|
7
7
|
function createRetrieverTool(retriever, input) {
|
|
8
8
|
const func = async ({ input }, runManager) => {
|
|
9
9
|
const docs = await retriever.getRelevantDocuments(input, runManager?.getChild("retriever"));
|
|
10
|
-
return (0, document_js_1.formatDocumentsAsString)(docs
|
|
10
|
+
return (0, document_js_1.formatDocumentsAsString)(docs);
|
|
11
11
|
};
|
|
12
12
|
const schema = zod_1.z.object({
|
|
13
13
|
input: zod_1.z
|
|
@@ -4,7 +4,7 @@ import { formatDocumentsAsString } from "../../../util/document.js";
|
|
|
4
4
|
export function createRetrieverTool(retriever, input) {
|
|
5
5
|
const func = async ({ input }, runManager) => {
|
|
6
6
|
const docs = await retriever.getRelevantDocuments(input, runManager?.getChild("retriever"));
|
|
7
|
-
return formatDocumentsAsString(docs
|
|
7
|
+
return formatDocumentsAsString(docs);
|
|
8
8
|
};
|
|
9
9
|
const schema = z.object({
|
|
10
10
|
input: z
|
|
@@ -1,117 +1,5 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.FakeListChatModel = void 0;
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
/**
|
|
7
|
-
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
8
|
-
* for testing purposes.
|
|
9
|
-
* @example
|
|
10
|
-
* ```typescript
|
|
11
|
-
* const chat = new FakeListChatModel({
|
|
12
|
-
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
13
|
-
* });
|
|
14
|
-
*
|
|
15
|
-
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
16
|
-
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
17
|
-
*
|
|
18
|
-
* // Call the chat model with a message and log the response
|
|
19
|
-
* const firstResponse = await chat.call([firstMessage]);
|
|
20
|
-
* console.log({ firstResponse });
|
|
21
|
-
*
|
|
22
|
-
* const secondResponse = await chat.call([secondMessage]);
|
|
23
|
-
* console.log({ secondResponse });
|
|
24
|
-
* ```
|
|
25
|
-
*/
|
|
26
|
-
class FakeListChatModel extends base_js_1.BaseChatModel {
|
|
27
|
-
static lc_name() {
|
|
28
|
-
return "FakeListChatModel";
|
|
29
|
-
}
|
|
30
|
-
constructor({ responses, sleep }) {
|
|
31
|
-
super({});
|
|
32
|
-
Object.defineProperty(this, "responses", {
|
|
33
|
-
enumerable: true,
|
|
34
|
-
configurable: true,
|
|
35
|
-
writable: true,
|
|
36
|
-
value: void 0
|
|
37
|
-
});
|
|
38
|
-
Object.defineProperty(this, "i", {
|
|
39
|
-
enumerable: true,
|
|
40
|
-
configurable: true,
|
|
41
|
-
writable: true,
|
|
42
|
-
value: 0
|
|
43
|
-
});
|
|
44
|
-
Object.defineProperty(this, "sleep", {
|
|
45
|
-
enumerable: true,
|
|
46
|
-
configurable: true,
|
|
47
|
-
writable: true,
|
|
48
|
-
value: void 0
|
|
49
|
-
});
|
|
50
|
-
this.responses = responses;
|
|
51
|
-
this.sleep = sleep;
|
|
52
|
-
}
|
|
53
|
-
_combineLLMOutput() {
|
|
54
|
-
return [];
|
|
55
|
-
}
|
|
56
|
-
_llmType() {
|
|
57
|
-
return "fake-list";
|
|
58
|
-
}
|
|
59
|
-
async _generate(_messages, options) {
|
|
60
|
-
await this._sleepIfRequested();
|
|
61
|
-
if (options?.stop?.length) {
|
|
62
|
-
return {
|
|
63
|
-
generations: [this._formatGeneration(options.stop[0])],
|
|
64
|
-
};
|
|
65
|
-
}
|
|
66
|
-
else {
|
|
67
|
-
const response = this._currentResponse();
|
|
68
|
-
this._incrementResponse();
|
|
69
|
-
return {
|
|
70
|
-
generations: [this._formatGeneration(response)],
|
|
71
|
-
llmOutput: {},
|
|
72
|
-
};
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
_formatGeneration(text) {
|
|
76
|
-
return {
|
|
77
|
-
message: new index_js_1.AIMessage(text),
|
|
78
|
-
text,
|
|
79
|
-
};
|
|
80
|
-
}
|
|
81
|
-
async *_streamResponseChunks(_messages, _options, _runManager) {
|
|
82
|
-
const response = this._currentResponse();
|
|
83
|
-
this._incrementResponse();
|
|
84
|
-
for await (const text of response) {
|
|
85
|
-
await this._sleepIfRequested();
|
|
86
|
-
yield this._createResponseChunk(text);
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
async _sleepIfRequested() {
|
|
90
|
-
if (this.sleep !== undefined) {
|
|
91
|
-
await this._sleep();
|
|
92
|
-
}
|
|
93
|
-
}
|
|
94
|
-
async _sleep() {
|
|
95
|
-
return new Promise((resolve) => {
|
|
96
|
-
setTimeout(() => resolve(), this.sleep);
|
|
97
|
-
});
|
|
98
|
-
}
|
|
99
|
-
_createResponseChunk(text) {
|
|
100
|
-
return new index_js_1.ChatGenerationChunk({
|
|
101
|
-
message: new index_js_1.AIMessageChunk({ content: text }),
|
|
102
|
-
text,
|
|
103
|
-
});
|
|
104
|
-
}
|
|
105
|
-
_currentResponse() {
|
|
106
|
-
return this.responses[this.i];
|
|
107
|
-
}
|
|
108
|
-
_incrementResponse() {
|
|
109
|
-
if (this.i < this.responses.length - 1) {
|
|
110
|
-
this.i += 1;
|
|
111
|
-
}
|
|
112
|
-
else {
|
|
113
|
-
this.i = 0;
|
|
114
|
-
}
|
|
115
|
-
}
|
|
116
|
-
}
|
|
117
|
-
exports.FakeListChatModel = FakeListChatModel;
|
|
4
|
+
var testing_1 = require("@langchain/core/utils/testing");
|
|
5
|
+
Object.defineProperty(exports, "FakeListChatModel", { enumerable: true, get: function () { return testing_1.FakeListChatModel; } });
|
|
@@ -1,52 +1 @@
|
|
|
1
|
-
|
|
2
|
-
import { AIMessage, BaseMessage, ChatGenerationChunk, ChatResult } from "../schema/index.js";
|
|
3
|
-
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
4
|
-
/**
|
|
5
|
-
* Interface for the input parameters specific to the Fake List Chat model.
|
|
6
|
-
*/
|
|
7
|
-
export interface FakeChatInput extends BaseChatModelParams {
|
|
8
|
-
/** Responses to return */
|
|
9
|
-
responses: string[];
|
|
10
|
-
/** Time to sleep in milliseconds between responses */
|
|
11
|
-
sleep?: number;
|
|
12
|
-
}
|
|
13
|
-
/**
|
|
14
|
-
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
15
|
-
* for testing purposes.
|
|
16
|
-
* @example
|
|
17
|
-
* ```typescript
|
|
18
|
-
* const chat = new FakeListChatModel({
|
|
19
|
-
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
20
|
-
* });
|
|
21
|
-
*
|
|
22
|
-
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
23
|
-
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
24
|
-
*
|
|
25
|
-
* // Call the chat model with a message and log the response
|
|
26
|
-
* const firstResponse = await chat.call([firstMessage]);
|
|
27
|
-
* console.log({ firstResponse });
|
|
28
|
-
*
|
|
29
|
-
* const secondResponse = await chat.call([secondMessage]);
|
|
30
|
-
* console.log({ secondResponse });
|
|
31
|
-
* ```
|
|
32
|
-
*/
|
|
33
|
-
export declare class FakeListChatModel extends BaseChatModel {
|
|
34
|
-
static lc_name(): string;
|
|
35
|
-
responses: string[];
|
|
36
|
-
i: number;
|
|
37
|
-
sleep?: number;
|
|
38
|
-
constructor({ responses, sleep }: FakeChatInput);
|
|
39
|
-
_combineLLMOutput(): never[];
|
|
40
|
-
_llmType(): string;
|
|
41
|
-
_generate(_messages: BaseMessage[], options?: this["ParsedCallOptions"]): Promise<ChatResult>;
|
|
42
|
-
_formatGeneration(text: string): {
|
|
43
|
-
message: AIMessage;
|
|
44
|
-
text: string;
|
|
45
|
-
};
|
|
46
|
-
_streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
47
|
-
_sleepIfRequested(): Promise<void>;
|
|
48
|
-
_sleep(): Promise<void>;
|
|
49
|
-
_createResponseChunk(text: string): ChatGenerationChunk;
|
|
50
|
-
_currentResponse(): string;
|
|
51
|
-
_incrementResponse(): void;
|
|
52
|
-
}
|
|
1
|
+
export { type FakeChatInput, FakeListChatModel, } from "@langchain/core/utils/testing";
|
package/dist/chat_models/fake.js
CHANGED
|
@@ -1,113 +1 @@
|
|
|
1
|
-
|
|
2
|
-
import { AIMessage, AIMessageChunk, ChatGenerationChunk, } from "../schema/index.js";
|
|
3
|
-
/**
|
|
4
|
-
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
5
|
-
* for testing purposes.
|
|
6
|
-
* @example
|
|
7
|
-
* ```typescript
|
|
8
|
-
* const chat = new FakeListChatModel({
|
|
9
|
-
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
10
|
-
* });
|
|
11
|
-
*
|
|
12
|
-
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
13
|
-
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
14
|
-
*
|
|
15
|
-
* // Call the chat model with a message and log the response
|
|
16
|
-
* const firstResponse = await chat.call([firstMessage]);
|
|
17
|
-
* console.log({ firstResponse });
|
|
18
|
-
*
|
|
19
|
-
* const secondResponse = await chat.call([secondMessage]);
|
|
20
|
-
* console.log({ secondResponse });
|
|
21
|
-
* ```
|
|
22
|
-
*/
|
|
23
|
-
export class FakeListChatModel extends BaseChatModel {
|
|
24
|
-
static lc_name() {
|
|
25
|
-
return "FakeListChatModel";
|
|
26
|
-
}
|
|
27
|
-
constructor({ responses, sleep }) {
|
|
28
|
-
super({});
|
|
29
|
-
Object.defineProperty(this, "responses", {
|
|
30
|
-
enumerable: true,
|
|
31
|
-
configurable: true,
|
|
32
|
-
writable: true,
|
|
33
|
-
value: void 0
|
|
34
|
-
});
|
|
35
|
-
Object.defineProperty(this, "i", {
|
|
36
|
-
enumerable: true,
|
|
37
|
-
configurable: true,
|
|
38
|
-
writable: true,
|
|
39
|
-
value: 0
|
|
40
|
-
});
|
|
41
|
-
Object.defineProperty(this, "sleep", {
|
|
42
|
-
enumerable: true,
|
|
43
|
-
configurable: true,
|
|
44
|
-
writable: true,
|
|
45
|
-
value: void 0
|
|
46
|
-
});
|
|
47
|
-
this.responses = responses;
|
|
48
|
-
this.sleep = sleep;
|
|
49
|
-
}
|
|
50
|
-
_combineLLMOutput() {
|
|
51
|
-
return [];
|
|
52
|
-
}
|
|
53
|
-
_llmType() {
|
|
54
|
-
return "fake-list";
|
|
55
|
-
}
|
|
56
|
-
async _generate(_messages, options) {
|
|
57
|
-
await this._sleepIfRequested();
|
|
58
|
-
if (options?.stop?.length) {
|
|
59
|
-
return {
|
|
60
|
-
generations: [this._formatGeneration(options.stop[0])],
|
|
61
|
-
};
|
|
62
|
-
}
|
|
63
|
-
else {
|
|
64
|
-
const response = this._currentResponse();
|
|
65
|
-
this._incrementResponse();
|
|
66
|
-
return {
|
|
67
|
-
generations: [this._formatGeneration(response)],
|
|
68
|
-
llmOutput: {},
|
|
69
|
-
};
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
_formatGeneration(text) {
|
|
73
|
-
return {
|
|
74
|
-
message: new AIMessage(text),
|
|
75
|
-
text,
|
|
76
|
-
};
|
|
77
|
-
}
|
|
78
|
-
async *_streamResponseChunks(_messages, _options, _runManager) {
|
|
79
|
-
const response = this._currentResponse();
|
|
80
|
-
this._incrementResponse();
|
|
81
|
-
for await (const text of response) {
|
|
82
|
-
await this._sleepIfRequested();
|
|
83
|
-
yield this._createResponseChunk(text);
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
async _sleepIfRequested() {
|
|
87
|
-
if (this.sleep !== undefined) {
|
|
88
|
-
await this._sleep();
|
|
89
|
-
}
|
|
90
|
-
}
|
|
91
|
-
async _sleep() {
|
|
92
|
-
return new Promise((resolve) => {
|
|
93
|
-
setTimeout(() => resolve(), this.sleep);
|
|
94
|
-
});
|
|
95
|
-
}
|
|
96
|
-
_createResponseChunk(text) {
|
|
97
|
-
return new ChatGenerationChunk({
|
|
98
|
-
message: new AIMessageChunk({ content: text }),
|
|
99
|
-
text,
|
|
100
|
-
});
|
|
101
|
-
}
|
|
102
|
-
_currentResponse() {
|
|
103
|
-
return this.responses[this.i];
|
|
104
|
-
}
|
|
105
|
-
_incrementResponse() {
|
|
106
|
-
if (this.i < this.responses.length - 1) {
|
|
107
|
-
this.i += 1;
|
|
108
|
-
}
|
|
109
|
-
else {
|
|
110
|
-
this.i = 0;
|
|
111
|
-
}
|
|
112
|
-
}
|
|
113
|
-
}
|
|
1
|
+
export { FakeListChatModel, } from "@langchain/core/utils/testing";
|
|
@@ -108,7 +108,7 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
108
108
|
};
|
|
109
109
|
}
|
|
110
110
|
/** @ignore */
|
|
111
|
-
async _call(messages,
|
|
111
|
+
async _call(messages, options) {
|
|
112
112
|
let prompt = "";
|
|
113
113
|
if (messages.length > 1) {
|
|
114
114
|
// We need to build a new _session
|
|
@@ -126,6 +126,7 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
126
126
|
}
|
|
127
127
|
try {
|
|
128
128
|
const promptOptions = {
|
|
129
|
+
onToken: options.onToken,
|
|
129
130
|
maxTokens: this?.maxTokens,
|
|
130
131
|
temperature: this?.temperature,
|
|
131
132
|
topK: this?.topK,
|
|
@@ -63,7 +63,7 @@ export declare class ChatLlamaCpp extends SimpleChatModel<LlamaCppCallOptions> {
|
|
|
63
63
|
trimWhitespaceSuffix: boolean | undefined;
|
|
64
64
|
};
|
|
65
65
|
/** @ignore */
|
|
66
|
-
_call(messages: BaseMessage[],
|
|
66
|
+
_call(messages: BaseMessage[], options: this["ParsedCallOptions"]): Promise<string>;
|
|
67
67
|
_streamResponseChunks(input: BaseMessage[], _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
68
68
|
protected _buildSession(messages: BaseMessage[]): string;
|
|
69
69
|
protected _convertMessagesToInteractions(messages: BaseMessage[]): ConversationInteraction[];
|
|
@@ -105,7 +105,7 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
105
105
|
};
|
|
106
106
|
}
|
|
107
107
|
/** @ignore */
|
|
108
|
-
async _call(messages,
|
|
108
|
+
async _call(messages, options) {
|
|
109
109
|
let prompt = "";
|
|
110
110
|
if (messages.length > 1) {
|
|
111
111
|
// We need to build a new _session
|
|
@@ -123,6 +123,7 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
123
123
|
}
|
|
124
124
|
try {
|
|
125
125
|
const promptOptions = {
|
|
126
|
+
onToken: options.onToken,
|
|
126
127
|
maxTokens: this?.maxTokens,
|
|
127
128
|
temperature: this?.temperature,
|
|
128
129
|
topK: this?.topK,
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { OpenAIClient } from "
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import { BaseChatModel, BaseChatModelParams } from "./base.js";
|
|
3
3
|
import { BaseMessage, ChatResult } from "../schema/index.js";
|
|
4
4
|
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|