langchain 0.0.198 → 0.0.200
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/index.cjs +3 -1
- package/dist/agents/index.d.ts +2 -2
- package/dist/agents/index.js +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.cjs +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.js +1 -1
- package/dist/chains/conversational_retrieval_chain.cjs +16 -2
- package/dist/chains/conversational_retrieval_chain.d.ts +2 -0
- package/dist/chains/conversational_retrieval_chain.js +16 -2
- package/dist/chat_models/fake.cjs +2 -114
- package/dist/chat_models/fake.d.ts +1 -52
- package/dist/chat_models/fake.js +1 -113
- package/dist/chat_models/llama_cpp.cjs +43 -21
- package/dist/chat_models/llama_cpp.d.ts +2 -1
- package/dist/chat_models/llama_cpp.js +44 -22
- package/dist/chat_models/minimax.d.ts +1 -1
- package/dist/document_loaders/fs/chatgpt.cjs +85 -0
- package/dist/document_loaders/fs/chatgpt.d.ts +8 -0
- package/dist/document_loaders/fs/chatgpt.js +81 -0
- package/dist/document_loaders/web/confluence.cjs +31 -7
- package/dist/document_loaders/web/confluence.d.ts +12 -5
- package/dist/document_loaders/web/confluence.js +31 -7
- package/dist/embeddings/gradient_ai.cjs +102 -0
- package/dist/embeddings/gradient_ai.d.ts +48 -0
- package/dist/embeddings/gradient_ai.js +98 -0
- package/dist/llms/gradient_ai.cjs +112 -0
- package/dist/llms/gradient_ai.d.ts +55 -0
- package/dist/llms/gradient_ai.js +108 -0
- package/dist/llms/llama_cpp.cjs +2 -1
- package/dist/llms/llama_cpp.d.ts +1 -1
- package/dist/llms/llama_cpp.js +2 -1
- package/dist/llms/watsonx_ai.cjs +154 -0
- package/dist/llms/watsonx_ai.d.ts +72 -0
- package/dist/llms/watsonx_ai.js +150 -0
- package/dist/load/import_constants.cjs +4 -0
- package/dist/load/import_constants.js +4 -0
- package/dist/load/import_map.cjs +4 -3
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/memory/vector_store.cjs +1 -1
- package/dist/memory/vector_store.js +1 -1
- package/dist/tools/google_places.cjs +81 -0
- package/dist/tools/google_places.d.ts +21 -0
- package/dist/tools/google_places.js +77 -0
- package/dist/tools/webbrowser.cjs +1 -1
- package/dist/tools/webbrowser.js +1 -1
- package/dist/util/document.cjs +1 -1
- package/dist/util/document.d.ts +1 -1
- package/dist/util/document.js +1 -1
- package/dist/util/tiktoken.cjs +15 -24
- package/dist/util/tiktoken.d.ts +1 -9
- package/dist/util/tiktoken.js +1 -21
- package/dist/vectorstores/elasticsearch.cjs +16 -3
- package/dist/vectorstores/elasticsearch.d.ts +6 -2
- package/dist/vectorstores/elasticsearch.js +16 -3
- package/dist/vectorstores/prisma.cjs +1 -1
- package/dist/vectorstores/prisma.js +1 -1
- package/dist/vectorstores/weaviate.d.ts +1 -1
- package/dist/vectorstores/xata.cjs +3 -2
- package/dist/vectorstores/xata.js +3 -2
- package/document_loaders/fs/chatgpt.cjs +1 -0
- package/document_loaders/fs/chatgpt.d.ts +1 -0
- package/document_loaders/fs/chatgpt.js +1 -0
- package/embeddings/gradient_ai.cjs +1 -0
- package/embeddings/gradient_ai.d.ts +1 -0
- package/embeddings/gradient_ai.js +1 -0
- package/llms/gradient_ai.cjs +1 -0
- package/llms/gradient_ai.d.ts +1 -0
- package/llms/gradient_ai.js +1 -0
- package/llms/watsonx_ai.cjs +1 -0
- package/llms/watsonx_ai.d.ts +1 -0
- package/llms/watsonx_ai.js +1 -0
- package/package.json +58 -11
- package/tools/google_places.cjs +1 -0
- package/tools/google_places.d.ts +1 -0
- package/tools/google_places.js +1 -0
package/dist/agents/index.cjs
CHANGED
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.XMLAgent = exports.OpenAIAgent = exports.StructuredChatOutputParserWithRetries = exports.StructuredChatOutputParser = exports.StructuredChatAgent = exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParserWithRetries = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
|
|
3
|
+
exports.XMLAgent = exports.OpenAIAgent = exports.StructuredChatOutputParserWithRetries = exports.StructuredChatOutputParser = exports.StructuredChatAgent = exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParserWithRetries = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.RunnableAgent = exports.BaseMultiActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
|
|
4
4
|
var agent_js_1 = require("./agent.cjs");
|
|
5
5
|
Object.defineProperty(exports, "Agent", { enumerable: true, get: function () { return agent_js_1.Agent; } });
|
|
6
6
|
Object.defineProperty(exports, "BaseSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseSingleActionAgent; } });
|
|
7
|
+
Object.defineProperty(exports, "BaseMultiActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseMultiActionAgent; } });
|
|
8
|
+
Object.defineProperty(exports, "RunnableAgent", { enumerable: true, get: function () { return agent_js_1.RunnableAgent; } });
|
|
7
9
|
Object.defineProperty(exports, "LLMSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.LLMSingleActionAgent; } });
|
|
8
10
|
var index_js_1 = require("./toolkits/index.cjs");
|
|
9
11
|
Object.defineProperty(exports, "JsonToolkit", { enumerable: true, get: function () { return index_js_1.JsonToolkit; } });
|
package/dist/agents/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
export { Agent, type AgentArgs, BaseSingleActionAgent, LLMSingleActionAgent, type LLMSingleActionAgentInput, type OutputParserArgs, } from "./agent.js";
|
|
1
|
+
export { Agent, type AgentArgs, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, LLMSingleActionAgent, type LLMSingleActionAgentInput, type OutputParserArgs, } from "./agent.js";
|
|
2
2
|
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, type VectorStoreInfo, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createVectorStoreAgent, createVectorStoreRouterAgent, } from "./toolkits/index.js";
|
|
3
3
|
export { Toolkit } from "./toolkits/base.js";
|
|
4
4
|
export { ChatAgent, type ChatAgentInput, type ChatCreatePromptArgs, } from "./chat/index.js";
|
|
@@ -6,7 +6,7 @@ export { ChatAgentOutputParser } from "./chat/outputParser.js";
|
|
|
6
6
|
export { ChatConversationalAgent, type ChatConversationalAgentInput, type ChatConversationalCreatePromptArgs, } from "./chat_convo/index.js";
|
|
7
7
|
export { ChatConversationalAgentOutputParser, type ChatConversationalAgentOutputParserArgs, ChatConversationalAgentOutputParserWithRetries, type ChatConversationalAgentOutputParserFormatInstructionsOptions, } from "./chat_convo/outputParser.js";
|
|
8
8
|
export { AgentExecutor, type AgentExecutorInput } from "./executor.js";
|
|
9
|
-
export { initializeAgentExecutor, initializeAgentExecutorWithOptions, type InitializeAgentExecutorOptions, } from "./initialize.js";
|
|
9
|
+
export { initializeAgentExecutor, initializeAgentExecutorWithOptions, type InitializeAgentExecutorOptions, type InitializeAgentExecutorOptionsStructured, } from "./initialize.js";
|
|
10
10
|
export { ZeroShotAgent, type ZeroShotAgentInput, type ZeroShotCreatePromptArgs, } from "./mrkl/index.js";
|
|
11
11
|
export { ZeroShotAgentOutputParser } from "./mrkl/outputParser.js";
|
|
12
12
|
export { AgentActionOutputParser, type AgentInput, type SerializedAgent, type SerializedAgentT, type SerializedZeroShotAgent, type StoppingMethod, } from "./types.js";
|
package/dist/agents/index.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
export { Agent, BaseSingleActionAgent, LLMSingleActionAgent, } from "./agent.js";
|
|
1
|
+
export { Agent, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, LLMSingleActionAgent, } from "./agent.js";
|
|
2
2
|
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createVectorStoreAgent, createVectorStoreRouterAgent, } from "./toolkits/index.js";
|
|
3
3
|
export { Toolkit } from "./toolkits/base.js";
|
|
4
4
|
export { ChatAgent, } from "./chat/index.js";
|
|
@@ -7,7 +7,7 @@ const document_js_1 = require("../../../util/document.cjs");
|
|
|
7
7
|
function createRetrieverTool(retriever, input) {
|
|
8
8
|
const func = async ({ input }, runManager) => {
|
|
9
9
|
const docs = await retriever.getRelevantDocuments(input, runManager?.getChild("retriever"));
|
|
10
|
-
return (0, document_js_1.formatDocumentsAsString)(docs
|
|
10
|
+
return (0, document_js_1.formatDocumentsAsString)(docs);
|
|
11
11
|
};
|
|
12
12
|
const schema = zod_1.z.object({
|
|
13
13
|
input: zod_1.z
|
|
@@ -4,7 +4,7 @@ import { formatDocumentsAsString } from "../../../util/document.js";
|
|
|
4
4
|
export function createRetrieverTool(retriever, input) {
|
|
5
5
|
const func = async ({ input }, runManager) => {
|
|
6
6
|
const docs = await retriever.getRelevantDocuments(input, runManager?.getChild("retriever"));
|
|
7
|
-
return formatDocumentsAsString(docs
|
|
7
|
+
return formatDocumentsAsString(docs);
|
|
8
8
|
};
|
|
9
9
|
const schema = z.object({
|
|
10
10
|
input: z
|
|
@@ -94,12 +94,20 @@ class ConversationalRetrievalQAChain extends base_js_1.BaseChain {
|
|
|
94
94
|
writable: true,
|
|
95
95
|
value: false
|
|
96
96
|
});
|
|
97
|
+
Object.defineProperty(this, "returnGeneratedQuestion", {
|
|
98
|
+
enumerable: true,
|
|
99
|
+
configurable: true,
|
|
100
|
+
writable: true,
|
|
101
|
+
value: false
|
|
102
|
+
});
|
|
97
103
|
this.retriever = fields.retriever;
|
|
98
104
|
this.combineDocumentsChain = fields.combineDocumentsChain;
|
|
99
105
|
this.questionGeneratorChain = fields.questionGeneratorChain;
|
|
100
106
|
this.inputKey = fields.inputKey ?? this.inputKey;
|
|
101
107
|
this.returnSourceDocuments =
|
|
102
108
|
fields.returnSourceDocuments ?? this.returnSourceDocuments;
|
|
109
|
+
this.returnGeneratedQuestion =
|
|
110
|
+
fields.returnGeneratedQuestion ?? this.returnGeneratedQuestion;
|
|
103
111
|
}
|
|
104
112
|
/**
|
|
105
113
|
* Static method to convert the chat history input into a formatted
|
|
@@ -172,13 +180,19 @@ class ConversationalRetrievalQAChain extends base_js_1.BaseChain {
|
|
|
172
180
|
input_documents: docs,
|
|
173
181
|
chat_history: chatHistory,
|
|
174
182
|
};
|
|
175
|
-
|
|
183
|
+
let result = await this.combineDocumentsChain.call(inputs, runManager?.getChild("combine_documents"));
|
|
176
184
|
if (this.returnSourceDocuments) {
|
|
177
|
-
|
|
185
|
+
result = {
|
|
178
186
|
...result,
|
|
179
187
|
sourceDocuments: docs,
|
|
180
188
|
};
|
|
181
189
|
}
|
|
190
|
+
if (this.returnGeneratedQuestion) {
|
|
191
|
+
result = {
|
|
192
|
+
...result,
|
|
193
|
+
generatedQuestion: newQuestion,
|
|
194
|
+
};
|
|
195
|
+
}
|
|
182
196
|
return result;
|
|
183
197
|
}
|
|
184
198
|
_chainType() {
|
|
@@ -16,6 +16,7 @@ export interface ConversationalRetrievalQAChainInput extends ChainInputs {
|
|
|
16
16
|
combineDocumentsChain: BaseChain;
|
|
17
17
|
questionGeneratorChain: LLMChain;
|
|
18
18
|
returnSourceDocuments?: boolean;
|
|
19
|
+
returnGeneratedQuestion?: boolean;
|
|
19
20
|
inputKey?: string;
|
|
20
21
|
}
|
|
21
22
|
/**
|
|
@@ -62,6 +63,7 @@ export declare class ConversationalRetrievalQAChain extends BaseChain implements
|
|
|
62
63
|
combineDocumentsChain: BaseChain;
|
|
63
64
|
questionGeneratorChain: LLMChain;
|
|
64
65
|
returnSourceDocuments: boolean;
|
|
66
|
+
returnGeneratedQuestion: boolean;
|
|
65
67
|
constructor(fields: ConversationalRetrievalQAChainInput);
|
|
66
68
|
/**
|
|
67
69
|
* Static method to convert the chat history input into a formatted
|
|
@@ -91,12 +91,20 @@ export class ConversationalRetrievalQAChain extends BaseChain {
|
|
|
91
91
|
writable: true,
|
|
92
92
|
value: false
|
|
93
93
|
});
|
|
94
|
+
Object.defineProperty(this, "returnGeneratedQuestion", {
|
|
95
|
+
enumerable: true,
|
|
96
|
+
configurable: true,
|
|
97
|
+
writable: true,
|
|
98
|
+
value: false
|
|
99
|
+
});
|
|
94
100
|
this.retriever = fields.retriever;
|
|
95
101
|
this.combineDocumentsChain = fields.combineDocumentsChain;
|
|
96
102
|
this.questionGeneratorChain = fields.questionGeneratorChain;
|
|
97
103
|
this.inputKey = fields.inputKey ?? this.inputKey;
|
|
98
104
|
this.returnSourceDocuments =
|
|
99
105
|
fields.returnSourceDocuments ?? this.returnSourceDocuments;
|
|
106
|
+
this.returnGeneratedQuestion =
|
|
107
|
+
fields.returnGeneratedQuestion ?? this.returnGeneratedQuestion;
|
|
100
108
|
}
|
|
101
109
|
/**
|
|
102
110
|
* Static method to convert the chat history input into a formatted
|
|
@@ -169,13 +177,19 @@ export class ConversationalRetrievalQAChain extends BaseChain {
|
|
|
169
177
|
input_documents: docs,
|
|
170
178
|
chat_history: chatHistory,
|
|
171
179
|
};
|
|
172
|
-
|
|
180
|
+
let result = await this.combineDocumentsChain.call(inputs, runManager?.getChild("combine_documents"));
|
|
173
181
|
if (this.returnSourceDocuments) {
|
|
174
|
-
|
|
182
|
+
result = {
|
|
175
183
|
...result,
|
|
176
184
|
sourceDocuments: docs,
|
|
177
185
|
};
|
|
178
186
|
}
|
|
187
|
+
if (this.returnGeneratedQuestion) {
|
|
188
|
+
result = {
|
|
189
|
+
...result,
|
|
190
|
+
generatedQuestion: newQuestion,
|
|
191
|
+
};
|
|
192
|
+
}
|
|
179
193
|
return result;
|
|
180
194
|
}
|
|
181
195
|
_chainType() {
|
|
@@ -1,117 +1,5 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.FakeListChatModel = void 0;
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
/**
|
|
7
|
-
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
8
|
-
* for testing purposes.
|
|
9
|
-
* @example
|
|
10
|
-
* ```typescript
|
|
11
|
-
* const chat = new FakeListChatModel({
|
|
12
|
-
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
13
|
-
* });
|
|
14
|
-
*
|
|
15
|
-
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
16
|
-
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
17
|
-
*
|
|
18
|
-
* // Call the chat model with a message and log the response
|
|
19
|
-
* const firstResponse = await chat.call([firstMessage]);
|
|
20
|
-
* console.log({ firstResponse });
|
|
21
|
-
*
|
|
22
|
-
* const secondResponse = await chat.call([secondMessage]);
|
|
23
|
-
* console.log({ secondResponse });
|
|
24
|
-
* ```
|
|
25
|
-
*/
|
|
26
|
-
class FakeListChatModel extends base_js_1.BaseChatModel {
|
|
27
|
-
static lc_name() {
|
|
28
|
-
return "FakeListChatModel";
|
|
29
|
-
}
|
|
30
|
-
constructor({ responses, sleep }) {
|
|
31
|
-
super({});
|
|
32
|
-
Object.defineProperty(this, "responses", {
|
|
33
|
-
enumerable: true,
|
|
34
|
-
configurable: true,
|
|
35
|
-
writable: true,
|
|
36
|
-
value: void 0
|
|
37
|
-
});
|
|
38
|
-
Object.defineProperty(this, "i", {
|
|
39
|
-
enumerable: true,
|
|
40
|
-
configurable: true,
|
|
41
|
-
writable: true,
|
|
42
|
-
value: 0
|
|
43
|
-
});
|
|
44
|
-
Object.defineProperty(this, "sleep", {
|
|
45
|
-
enumerable: true,
|
|
46
|
-
configurable: true,
|
|
47
|
-
writable: true,
|
|
48
|
-
value: void 0
|
|
49
|
-
});
|
|
50
|
-
this.responses = responses;
|
|
51
|
-
this.sleep = sleep;
|
|
52
|
-
}
|
|
53
|
-
_combineLLMOutput() {
|
|
54
|
-
return [];
|
|
55
|
-
}
|
|
56
|
-
_llmType() {
|
|
57
|
-
return "fake-list";
|
|
58
|
-
}
|
|
59
|
-
async _generate(_messages, options) {
|
|
60
|
-
await this._sleepIfRequested();
|
|
61
|
-
if (options?.stop?.length) {
|
|
62
|
-
return {
|
|
63
|
-
generations: [this._formatGeneration(options.stop[0])],
|
|
64
|
-
};
|
|
65
|
-
}
|
|
66
|
-
else {
|
|
67
|
-
const response = this._currentResponse();
|
|
68
|
-
this._incrementResponse();
|
|
69
|
-
return {
|
|
70
|
-
generations: [this._formatGeneration(response)],
|
|
71
|
-
llmOutput: {},
|
|
72
|
-
};
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
_formatGeneration(text) {
|
|
76
|
-
return {
|
|
77
|
-
message: new index_js_1.AIMessage(text),
|
|
78
|
-
text,
|
|
79
|
-
};
|
|
80
|
-
}
|
|
81
|
-
async *_streamResponseChunks(_messages, _options, _runManager) {
|
|
82
|
-
const response = this._currentResponse();
|
|
83
|
-
this._incrementResponse();
|
|
84
|
-
for await (const text of response) {
|
|
85
|
-
await this._sleepIfRequested();
|
|
86
|
-
yield this._createResponseChunk(text);
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
async _sleepIfRequested() {
|
|
90
|
-
if (this.sleep !== undefined) {
|
|
91
|
-
await this._sleep();
|
|
92
|
-
}
|
|
93
|
-
}
|
|
94
|
-
async _sleep() {
|
|
95
|
-
return new Promise((resolve) => {
|
|
96
|
-
setTimeout(() => resolve(), this.sleep);
|
|
97
|
-
});
|
|
98
|
-
}
|
|
99
|
-
_createResponseChunk(text) {
|
|
100
|
-
return new index_js_1.ChatGenerationChunk({
|
|
101
|
-
message: new index_js_1.AIMessageChunk({ content: text }),
|
|
102
|
-
text,
|
|
103
|
-
});
|
|
104
|
-
}
|
|
105
|
-
_currentResponse() {
|
|
106
|
-
return this.responses[this.i];
|
|
107
|
-
}
|
|
108
|
-
_incrementResponse() {
|
|
109
|
-
if (this.i < this.responses.length - 1) {
|
|
110
|
-
this.i += 1;
|
|
111
|
-
}
|
|
112
|
-
else {
|
|
113
|
-
this.i = 0;
|
|
114
|
-
}
|
|
115
|
-
}
|
|
116
|
-
}
|
|
117
|
-
exports.FakeListChatModel = FakeListChatModel;
|
|
4
|
+
var testing_1 = require("@langchain/core/utils/testing");
|
|
5
|
+
Object.defineProperty(exports, "FakeListChatModel", { enumerable: true, get: function () { return testing_1.FakeListChatModel; } });
|
|
@@ -1,52 +1 @@
|
|
|
1
|
-
|
|
2
|
-
import { AIMessage, BaseMessage, ChatGenerationChunk, ChatResult } from "../schema/index.js";
|
|
3
|
-
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
4
|
-
/**
|
|
5
|
-
* Interface for the input parameters specific to the Fake List Chat model.
|
|
6
|
-
*/
|
|
7
|
-
export interface FakeChatInput extends BaseChatModelParams {
|
|
8
|
-
/** Responses to return */
|
|
9
|
-
responses: string[];
|
|
10
|
-
/** Time to sleep in milliseconds between responses */
|
|
11
|
-
sleep?: number;
|
|
12
|
-
}
|
|
13
|
-
/**
|
|
14
|
-
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
15
|
-
* for testing purposes.
|
|
16
|
-
* @example
|
|
17
|
-
* ```typescript
|
|
18
|
-
* const chat = new FakeListChatModel({
|
|
19
|
-
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
20
|
-
* });
|
|
21
|
-
*
|
|
22
|
-
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
23
|
-
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
24
|
-
*
|
|
25
|
-
* // Call the chat model with a message and log the response
|
|
26
|
-
* const firstResponse = await chat.call([firstMessage]);
|
|
27
|
-
* console.log({ firstResponse });
|
|
28
|
-
*
|
|
29
|
-
* const secondResponse = await chat.call([secondMessage]);
|
|
30
|
-
* console.log({ secondResponse });
|
|
31
|
-
* ```
|
|
32
|
-
*/
|
|
33
|
-
export declare class FakeListChatModel extends BaseChatModel {
|
|
34
|
-
static lc_name(): string;
|
|
35
|
-
responses: string[];
|
|
36
|
-
i: number;
|
|
37
|
-
sleep?: number;
|
|
38
|
-
constructor({ responses, sleep }: FakeChatInput);
|
|
39
|
-
_combineLLMOutput(): never[];
|
|
40
|
-
_llmType(): string;
|
|
41
|
-
_generate(_messages: BaseMessage[], options?: this["ParsedCallOptions"]): Promise<ChatResult>;
|
|
42
|
-
_formatGeneration(text: string): {
|
|
43
|
-
message: AIMessage;
|
|
44
|
-
text: string;
|
|
45
|
-
};
|
|
46
|
-
_streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
47
|
-
_sleepIfRequested(): Promise<void>;
|
|
48
|
-
_sleep(): Promise<void>;
|
|
49
|
-
_createResponseChunk(text: string): ChatGenerationChunk;
|
|
50
|
-
_currentResponse(): string;
|
|
51
|
-
_incrementResponse(): void;
|
|
52
|
-
}
|
|
1
|
+
export { type FakeChatInput, FakeListChatModel, } from "@langchain/core/utils/testing";
|
package/dist/chat_models/fake.js
CHANGED
|
@@ -1,113 +1 @@
|
|
|
1
|
-
|
|
2
|
-
import { AIMessage, AIMessageChunk, ChatGenerationChunk, } from "../schema/index.js";
|
|
3
|
-
/**
|
|
4
|
-
* A fake Chat Model that returns a predefined list of responses. It can be used
|
|
5
|
-
* for testing purposes.
|
|
6
|
-
* @example
|
|
7
|
-
* ```typescript
|
|
8
|
-
* const chat = new FakeListChatModel({
|
|
9
|
-
* responses: ["I'll callback later.", "You 'console' them!"]
|
|
10
|
-
* });
|
|
11
|
-
*
|
|
12
|
-
* const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
|
|
13
|
-
* const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
|
|
14
|
-
*
|
|
15
|
-
* // Call the chat model with a message and log the response
|
|
16
|
-
* const firstResponse = await chat.call([firstMessage]);
|
|
17
|
-
* console.log({ firstResponse });
|
|
18
|
-
*
|
|
19
|
-
* const secondResponse = await chat.call([secondMessage]);
|
|
20
|
-
* console.log({ secondResponse });
|
|
21
|
-
* ```
|
|
22
|
-
*/
|
|
23
|
-
export class FakeListChatModel extends BaseChatModel {
|
|
24
|
-
static lc_name() {
|
|
25
|
-
return "FakeListChatModel";
|
|
26
|
-
}
|
|
27
|
-
constructor({ responses, sleep }) {
|
|
28
|
-
super({});
|
|
29
|
-
Object.defineProperty(this, "responses", {
|
|
30
|
-
enumerable: true,
|
|
31
|
-
configurable: true,
|
|
32
|
-
writable: true,
|
|
33
|
-
value: void 0
|
|
34
|
-
});
|
|
35
|
-
Object.defineProperty(this, "i", {
|
|
36
|
-
enumerable: true,
|
|
37
|
-
configurable: true,
|
|
38
|
-
writable: true,
|
|
39
|
-
value: 0
|
|
40
|
-
});
|
|
41
|
-
Object.defineProperty(this, "sleep", {
|
|
42
|
-
enumerable: true,
|
|
43
|
-
configurable: true,
|
|
44
|
-
writable: true,
|
|
45
|
-
value: void 0
|
|
46
|
-
});
|
|
47
|
-
this.responses = responses;
|
|
48
|
-
this.sleep = sleep;
|
|
49
|
-
}
|
|
50
|
-
_combineLLMOutput() {
|
|
51
|
-
return [];
|
|
52
|
-
}
|
|
53
|
-
_llmType() {
|
|
54
|
-
return "fake-list";
|
|
55
|
-
}
|
|
56
|
-
async _generate(_messages, options) {
|
|
57
|
-
await this._sleepIfRequested();
|
|
58
|
-
if (options?.stop?.length) {
|
|
59
|
-
return {
|
|
60
|
-
generations: [this._formatGeneration(options.stop[0])],
|
|
61
|
-
};
|
|
62
|
-
}
|
|
63
|
-
else {
|
|
64
|
-
const response = this._currentResponse();
|
|
65
|
-
this._incrementResponse();
|
|
66
|
-
return {
|
|
67
|
-
generations: [this._formatGeneration(response)],
|
|
68
|
-
llmOutput: {},
|
|
69
|
-
};
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
_formatGeneration(text) {
|
|
73
|
-
return {
|
|
74
|
-
message: new AIMessage(text),
|
|
75
|
-
text,
|
|
76
|
-
};
|
|
77
|
-
}
|
|
78
|
-
async *_streamResponseChunks(_messages, _options, _runManager) {
|
|
79
|
-
const response = this._currentResponse();
|
|
80
|
-
this._incrementResponse();
|
|
81
|
-
for await (const text of response) {
|
|
82
|
-
await this._sleepIfRequested();
|
|
83
|
-
yield this._createResponseChunk(text);
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
async _sleepIfRequested() {
|
|
87
|
-
if (this.sleep !== undefined) {
|
|
88
|
-
await this._sleep();
|
|
89
|
-
}
|
|
90
|
-
}
|
|
91
|
-
async _sleep() {
|
|
92
|
-
return new Promise((resolve) => {
|
|
93
|
-
setTimeout(() => resolve(), this.sleep);
|
|
94
|
-
});
|
|
95
|
-
}
|
|
96
|
-
_createResponseChunk(text) {
|
|
97
|
-
return new ChatGenerationChunk({
|
|
98
|
-
message: new AIMessageChunk({ content: text }),
|
|
99
|
-
text,
|
|
100
|
-
});
|
|
101
|
-
}
|
|
102
|
-
_currentResponse() {
|
|
103
|
-
return this.responses[this.i];
|
|
104
|
-
}
|
|
105
|
-
_incrementResponse() {
|
|
106
|
-
if (this.i < this.responses.length - 1) {
|
|
107
|
-
this.i += 1;
|
|
108
|
-
}
|
|
109
|
-
else {
|
|
110
|
-
this.i = 0;
|
|
111
|
-
}
|
|
112
|
-
}
|
|
113
|
-
}
|
|
1
|
+
export { FakeListChatModel, } from "@langchain/core/utils/testing";
|
|
@@ -108,7 +108,7 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
108
108
|
};
|
|
109
109
|
}
|
|
110
110
|
/** @ignore */
|
|
111
|
-
async _call(messages,
|
|
111
|
+
async _call(messages, options) {
|
|
112
112
|
let prompt = "";
|
|
113
113
|
if (messages.length > 1) {
|
|
114
114
|
// We need to build a new _session
|
|
@@ -126,6 +126,7 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
126
126
|
}
|
|
127
127
|
try {
|
|
128
128
|
const promptOptions = {
|
|
129
|
+
onToken: options.onToken,
|
|
129
130
|
maxTokens: this?.maxTokens,
|
|
130
131
|
temperature: this?.temperature,
|
|
131
132
|
topK: this?.topK,
|
|
@@ -141,26 +142,22 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
141
142
|
}
|
|
142
143
|
}
|
|
143
144
|
async *_streamResponseChunks(input, _options, runManager) {
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
generationInfo: {},
|
|
161
|
-
});
|
|
162
|
-
await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? "");
|
|
163
|
-
}
|
|
145
|
+
const promptOptions = {
|
|
146
|
+
temperature: this?.temperature,
|
|
147
|
+
topK: this?.topK,
|
|
148
|
+
topP: this?.topP,
|
|
149
|
+
};
|
|
150
|
+
const prompt = this._buildPrompt(input);
|
|
151
|
+
const stream = await this.caller.call(async () => this._context.evaluate(this._context.encode(prompt), promptOptions));
|
|
152
|
+
for await (const chunk of stream) {
|
|
153
|
+
yield new index_js_1.ChatGenerationChunk({
|
|
154
|
+
text: this._context.decode([chunk]),
|
|
155
|
+
message: new index_js_1.AIMessageChunk({
|
|
156
|
+
content: this._context.decode([chunk]),
|
|
157
|
+
}),
|
|
158
|
+
generationInfo: {},
|
|
159
|
+
});
|
|
160
|
+
await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? "");
|
|
164
161
|
}
|
|
165
162
|
}
|
|
166
163
|
// This constructs a new session if we need to adding in any sys messages or previous chats
|
|
@@ -251,5 +248,30 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
251
248
|
}
|
|
252
249
|
return result;
|
|
253
250
|
}
|
|
251
|
+
_buildPrompt(input) {
|
|
252
|
+
const prompt = input
|
|
253
|
+
.map((message) => {
|
|
254
|
+
let messageText;
|
|
255
|
+
if (message._getType() === "human") {
|
|
256
|
+
messageText = `[INST] ${message.content} [/INST]`;
|
|
257
|
+
}
|
|
258
|
+
else if (message._getType() === "ai") {
|
|
259
|
+
messageText = message.content;
|
|
260
|
+
}
|
|
261
|
+
else if (message._getType() === "system") {
|
|
262
|
+
messageText = `<<SYS>> ${message.content} <</SYS>>`;
|
|
263
|
+
}
|
|
264
|
+
else if (index_js_1.ChatMessage.isInstance(message)) {
|
|
265
|
+
messageText = `\n\n${message.role[0].toUpperCase()}${message.role.slice(1)}: ${message.content}`;
|
|
266
|
+
}
|
|
267
|
+
else {
|
|
268
|
+
console.warn(`Unsupported message type passed to llama_cpp: "${message._getType()}"`);
|
|
269
|
+
messageText = "";
|
|
270
|
+
}
|
|
271
|
+
return messageText;
|
|
272
|
+
})
|
|
273
|
+
.join("\n");
|
|
274
|
+
return prompt;
|
|
275
|
+
}
|
|
254
276
|
}
|
|
255
277
|
exports.ChatLlamaCpp = ChatLlamaCpp;
|
|
@@ -63,8 +63,9 @@ export declare class ChatLlamaCpp extends SimpleChatModel<LlamaCppCallOptions> {
|
|
|
63
63
|
trimWhitespaceSuffix: boolean | undefined;
|
|
64
64
|
};
|
|
65
65
|
/** @ignore */
|
|
66
|
-
_call(messages: BaseMessage[],
|
|
66
|
+
_call(messages: BaseMessage[], options: this["ParsedCallOptions"]): Promise<string>;
|
|
67
67
|
_streamResponseChunks(input: BaseMessage[], _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
|
|
68
68
|
protected _buildSession(messages: BaseMessage[]): string;
|
|
69
69
|
protected _convertMessagesToInteractions(messages: BaseMessage[]): ConversationInteraction[];
|
|
70
|
+
protected _buildPrompt(input: BaseMessage[]): string;
|
|
70
71
|
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { LlamaChatSession, } from "node-llama-cpp";
|
|
2
2
|
import { SimpleChatModel } from "./base.js";
|
|
3
3
|
import { createLlamaModel, createLlamaContext, } from "../util/llama_cpp.js";
|
|
4
|
-
import { ChatGenerationChunk, AIMessageChunk, } from "../schema/index.js";
|
|
4
|
+
import { ChatGenerationChunk, AIMessageChunk, ChatMessage, } from "../schema/index.js";
|
|
5
5
|
/**
|
|
6
6
|
* To use this model you need to have the `node-llama-cpp` module installed.
|
|
7
7
|
* This can be installed using `npm install -S node-llama-cpp` and the minimum
|
|
@@ -105,7 +105,7 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
105
105
|
};
|
|
106
106
|
}
|
|
107
107
|
/** @ignore */
|
|
108
|
-
async _call(messages,
|
|
108
|
+
async _call(messages, options) {
|
|
109
109
|
let prompt = "";
|
|
110
110
|
if (messages.length > 1) {
|
|
111
111
|
// We need to build a new _session
|
|
@@ -123,6 +123,7 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
123
123
|
}
|
|
124
124
|
try {
|
|
125
125
|
const promptOptions = {
|
|
126
|
+
onToken: options.onToken,
|
|
126
127
|
maxTokens: this?.maxTokens,
|
|
127
128
|
temperature: this?.temperature,
|
|
128
129
|
topK: this?.topK,
|
|
@@ -138,26 +139,22 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
138
139
|
}
|
|
139
140
|
}
|
|
140
141
|
async *_streamResponseChunks(input, _options, runManager) {
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
generationInfo: {},
|
|
158
|
-
});
|
|
159
|
-
await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? "");
|
|
160
|
-
}
|
|
142
|
+
const promptOptions = {
|
|
143
|
+
temperature: this?.temperature,
|
|
144
|
+
topK: this?.topK,
|
|
145
|
+
topP: this?.topP,
|
|
146
|
+
};
|
|
147
|
+
const prompt = this._buildPrompt(input);
|
|
148
|
+
const stream = await this.caller.call(async () => this._context.evaluate(this._context.encode(prompt), promptOptions));
|
|
149
|
+
for await (const chunk of stream) {
|
|
150
|
+
yield new ChatGenerationChunk({
|
|
151
|
+
text: this._context.decode([chunk]),
|
|
152
|
+
message: new AIMessageChunk({
|
|
153
|
+
content: this._context.decode([chunk]),
|
|
154
|
+
}),
|
|
155
|
+
generationInfo: {},
|
|
156
|
+
});
|
|
157
|
+
await runManager?.handleLLMNewToken(this._context.decode([chunk]) ?? "");
|
|
161
158
|
}
|
|
162
159
|
}
|
|
163
160
|
// This constructs a new session if we need to adding in any sys messages or previous chats
|
|
@@ -248,4 +245,29 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
248
245
|
}
|
|
249
246
|
return result;
|
|
250
247
|
}
|
|
248
|
+
_buildPrompt(input) {
|
|
249
|
+
const prompt = input
|
|
250
|
+
.map((message) => {
|
|
251
|
+
let messageText;
|
|
252
|
+
if (message._getType() === "human") {
|
|
253
|
+
messageText = `[INST] ${message.content} [/INST]`;
|
|
254
|
+
}
|
|
255
|
+
else if (message._getType() === "ai") {
|
|
256
|
+
messageText = message.content;
|
|
257
|
+
}
|
|
258
|
+
else if (message._getType() === "system") {
|
|
259
|
+
messageText = `<<SYS>> ${message.content} <</SYS>>`;
|
|
260
|
+
}
|
|
261
|
+
else if (ChatMessage.isInstance(message)) {
|
|
262
|
+
messageText = `\n\n${message.role[0].toUpperCase()}${message.role.slice(1)}: ${message.content}`;
|
|
263
|
+
}
|
|
264
|
+
else {
|
|
265
|
+
console.warn(`Unsupported message type passed to llama_cpp: "${message._getType()}"`);
|
|
266
|
+
messageText = "";
|
|
267
|
+
}
|
|
268
|
+
return messageText;
|
|
269
|
+
})
|
|
270
|
+
.join("\n");
|
|
271
|
+
return prompt;
|
|
272
|
+
}
|
|
251
273
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { OpenAIClient } from "
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
2
|
import { BaseChatModel, BaseChatModelParams } from "./base.js";
|
|
3
3
|
import { BaseMessage, ChatResult } from "../schema/index.js";
|
|
4
4
|
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|