langchain 0.0.57 → 0.0.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cache/redis.cjs +1 -0
- package/cache/redis.d.ts +1 -0
- package/cache/redis.js +1 -0
- package/cache.cjs +1 -1
- package/cache.d.ts +1 -1
- package/cache.js +1 -1
- package/dist/agents/chat/index.d.ts +1 -2
- package/dist/agents/executor.d.ts +1 -2
- package/dist/agents/index.cjs +1 -3
- package/dist/agents/index.d.ts +5 -6
- package/dist/agents/index.js +1 -2
- package/dist/agents/initialize.cjs +7 -0
- package/dist/agents/initialize.d.ts +9 -2
- package/dist/agents/initialize.js +7 -0
- package/dist/cache/base.cjs +19 -0
- package/dist/cache/base.d.ts +11 -0
- package/dist/cache/base.js +12 -0
- package/dist/cache/index.cjs +28 -0
- package/dist/cache/index.d.ts +8 -0
- package/dist/cache/index.js +24 -0
- package/dist/cache/redis.cjs +40 -0
- package/dist/cache/redis.d.ts +8 -0
- package/dist/cache/redis.js +36 -0
- package/dist/chat_models/anthropic.cjs +3 -17
- package/dist/chat_models/anthropic.d.ts +8 -25
- package/dist/chat_models/anthropic.js +3 -17
- package/dist/chat_models/openai.cjs +3 -15
- package/dist/chat_models/openai.d.ts +4 -22
- package/dist/chat_models/openai.js +3 -15
- package/dist/document_loaders/fs/pdf.cjs +2 -2
- package/dist/document_loaders/fs/pdf.d.ts +2 -1
- package/dist/document_loaders/fs/pdf.js +2 -2
- package/dist/embeddings/cohere.cjs +1 -9
- package/dist/embeddings/cohere.d.ts +9 -14
- package/dist/embeddings/cohere.js +1 -9
- package/dist/embeddings/openai.cjs +0 -8
- package/dist/embeddings/openai.d.ts +8 -9
- package/dist/embeddings/openai.js +0 -8
- package/dist/llms/base.cjs +4 -4
- package/dist/llms/base.d.ts +1 -2
- package/dist/llms/base.js +1 -1
- package/dist/llms/cohere.cjs +2 -0
- package/dist/llms/cohere.d.ts +3 -2
- package/dist/llms/cohere.js +2 -0
- package/dist/llms/hf.cjs +2 -0
- package/dist/llms/hf.d.ts +3 -2
- package/dist/llms/hf.js +2 -0
- package/dist/llms/openai-chat.cjs +2 -15
- package/dist/llms/openai-chat.d.ts +10 -28
- package/dist/llms/openai-chat.js +2 -15
- package/dist/llms/openai.d.ts +7 -10
- package/dist/llms/replicate.cjs +2 -0
- package/dist/llms/replicate.d.ts +3 -2
- package/dist/llms/replicate.js +2 -0
- package/dist/memory/motorhead_memory.cjs +4 -2
- package/dist/memory/motorhead_memory.js +5 -3
- package/dist/output_parsers/regex.cjs +2 -2
- package/dist/output_parsers/regex.js +2 -2
- package/dist/schema/index.cjs +4 -1
- package/dist/schema/index.d.ts +4 -0
- package/dist/schema/index.js +2 -0
- package/dist/tools/IFTTTWebhook.cjs +1 -0
- package/dist/tools/IFTTTWebhook.d.ts +1 -0
- package/dist/tools/IFTTTWebhook.js +1 -0
- package/dist/tools/aiplugin.cjs +1 -0
- package/dist/tools/aiplugin.d.ts +1 -0
- package/dist/tools/aiplugin.js +1 -0
- package/dist/tools/aws_lambda.cjs +1 -0
- package/dist/tools/aws_lambda.d.ts +1 -0
- package/dist/tools/aws_lambda.js +1 -0
- package/dist/tools/bingserpapi.cjs +9 -2
- package/dist/tools/bingserpapi.d.ts +1 -0
- package/dist/tools/bingserpapi.js +9 -2
- package/dist/tools/calculator.cjs +1 -0
- package/dist/tools/calculator.d.ts +1 -0
- package/dist/tools/calculator.js +1 -0
- package/dist/tools/chain.cjs +1 -0
- package/dist/tools/chain.d.ts +1 -0
- package/dist/tools/chain.js +1 -0
- package/dist/tools/dadjokeapi.cjs +1 -0
- package/dist/tools/dadjokeapi.d.ts +1 -0
- package/dist/tools/dadjokeapi.js +1 -0
- package/dist/tools/dynamic.cjs +1 -0
- package/dist/tools/dynamic.d.ts +1 -0
- package/dist/tools/dynamic.js +1 -0
- package/dist/tools/index.d.ts +3 -3
- package/dist/tools/index.js +1 -1
- package/dist/tools/json.cjs +2 -0
- package/dist/tools/json.d.ts +2 -0
- package/dist/tools/json.js +2 -0
- package/dist/tools/requests.cjs +2 -0
- package/dist/tools/requests.d.ts +2 -0
- package/dist/tools/requests.js +2 -0
- package/dist/tools/serpapi.cjs +1 -3
- package/dist/tools/serpapi.d.ts +4 -6
- package/dist/tools/serpapi.js +1 -3
- package/dist/tools/serper.cjs +1 -3
- package/dist/tools/serper.d.ts +4 -7
- package/dist/tools/serper.js +1 -3
- package/dist/tools/sql.cjs +5 -3
- package/dist/tools/sql.d.ts +4 -0
- package/dist/tools/sql.js +5 -3
- package/dist/tools/vectorstore.cjs +1 -0
- package/dist/tools/vectorstore.d.ts +1 -0
- package/dist/tools/vectorstore.js +1 -0
- package/dist/tools/webbrowser.cjs +1 -0
- package/dist/tools/webbrowser.d.ts +2 -1
- package/dist/tools/webbrowser.js +1 -0
- package/dist/tools/zapier.cjs +1 -0
- package/dist/tools/zapier.d.ts +2 -2
- package/dist/tools/zapier.js +1 -0
- package/dist/vectorstores/base.cjs +10 -3
- package/dist/vectorstores/base.d.ts +10 -7
- package/dist/vectorstores/base.js +10 -3
- package/dist/vectorstores/mongo.d.ts +1 -0
- package/dist/vectorstores/opensearch.d.ts +3 -1
- package/dist/vectorstores/pinecone.d.ts +2 -1
- package/dist/vectorstores/weaviate.cjs +0 -6
- package/dist/vectorstores/weaviate.d.ts +1 -2
- package/dist/vectorstores/weaviate.js +0 -6
- package/package.json +13 -4
- package/dist/cache.cjs +0 -83
- package/dist/cache.d.ts +0 -23
- package/dist/cache.js +0 -74
package/cache/redis.cjs
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../dist/cache/redis.cjs');
|
package/cache/redis.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/cache/redis.js'
|
package/cache/redis.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/cache/redis.js'
|
package/cache.cjs
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
module.exports = require('./dist/cache.cjs');
|
|
1
|
+
module.exports = require('./dist/cache/index.cjs');
|
package/cache.d.ts
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export * from './dist/cache.js'
|
|
1
|
+
export * from './dist/cache/index.js'
|
package/cache.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
export * from './dist/cache.js'
|
|
1
|
+
export * from './dist/cache/index.js'
|
|
@@ -14,7 +14,7 @@ export type CreatePromptArgs = {
|
|
|
14
14
|
/** List of input variables the final prompt will expect. */
|
|
15
15
|
inputVariables?: string[];
|
|
16
16
|
};
|
|
17
|
-
type ChatAgentInput = Optional<AgentInput, "outputParser">;
|
|
17
|
+
export type ChatAgentInput = Optional<AgentInput, "outputParser">;
|
|
18
18
|
/**
|
|
19
19
|
* Agent for the MRKL chain.
|
|
20
20
|
* @augments Agent
|
|
@@ -39,4 +39,3 @@ export declare class ChatAgent extends Agent {
|
|
|
39
39
|
static createPrompt(tools: Tool[], args?: CreatePromptArgs): ChatPromptTemplate;
|
|
40
40
|
static fromLLMAndTools(llm: BaseLanguageModel, tools: Tool[], args?: CreatePromptArgs & AgentArgs): ChatAgent;
|
|
41
41
|
}
|
|
42
|
-
export {};
|
|
@@ -4,7 +4,7 @@ import { Tool } from "../tools/base.js";
|
|
|
4
4
|
import { StoppingMethod } from "./types.js";
|
|
5
5
|
import { SerializedLLMChain } from "../chains/serde.js";
|
|
6
6
|
import { ChainValues } from "../schema/index.js";
|
|
7
|
-
interface AgentExecutorInput extends ChainInputs {
|
|
7
|
+
export interface AgentExecutorInput extends ChainInputs {
|
|
8
8
|
agent: BaseSingleActionAgent | BaseMultiActionAgent;
|
|
9
9
|
tools: Tool[];
|
|
10
10
|
returnIntermediateSteps?: boolean;
|
|
@@ -31,4 +31,3 @@ export declare class AgentExecutor extends BaseChain {
|
|
|
31
31
|
_chainType(): "agent_executor";
|
|
32
32
|
serialize(): SerializedLLMChain;
|
|
33
33
|
}
|
|
34
|
-
export {};
|
package/dist/agents/index.cjs
CHANGED
|
@@ -1,8 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.createVectorStoreAgent = exports.createSqlAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.SqlToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.BaseSingleActionAgent = exports.Agent =
|
|
4
|
-
var base_js_1 = require("../tools/base.cjs");
|
|
5
|
-
Object.defineProperty(exports, "Tool", { enumerable: true, get: function () { return base_js_1.Tool; } });
|
|
3
|
+
exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.createVectorStoreAgent = exports.createSqlAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.SqlToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
|
|
6
4
|
var agent_js_1 = require("./agent.cjs");
|
|
7
5
|
Object.defineProperty(exports, "Agent", { enumerable: true, get: function () { return agent_js_1.Agent; } });
|
|
8
6
|
Object.defineProperty(exports, "BaseSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseSingleActionAgent; } });
|
package/dist/agents/index.d.ts
CHANGED
|
@@ -1,12 +1,11 @@
|
|
|
1
|
-
export {
|
|
2
|
-
export { Agent, BaseSingleActionAgent, LLMSingleActionAgent } from "./agent.js";
|
|
1
|
+
export { Agent, AgentArgs, BaseSingleActionAgent, LLMSingleActionAgent, LLMSingleActionAgentInput, } from "./agent.js";
|
|
3
2
|
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, SqlToolkit, VectorStoreInfo, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createSqlAgent, createVectorStoreAgent, } from "./agent_toolkits/index.js";
|
|
4
|
-
export { ChatAgent } from "./chat/index.js";
|
|
3
|
+
export { ChatAgent, ChatAgentInput } from "./chat/index.js";
|
|
5
4
|
export { ChatAgentOutputParser } from "./chat/outputParser.js";
|
|
6
5
|
export { ChatConversationalAgent, ChatConversationalAgentInput, } from "./chat_convo/index.js";
|
|
7
6
|
export { ChatConversationalAgentOutputParser } from "./chat_convo/outputParser.js";
|
|
8
|
-
export { AgentExecutor } from "./executor.js";
|
|
9
|
-
export { initializeAgentExecutor, initializeAgentExecutorWithOptions, } from "./initialize.js";
|
|
10
|
-
export { ZeroShotAgent } from "./mrkl/index.js";
|
|
7
|
+
export { AgentExecutor, AgentExecutorInput } from "./executor.js";
|
|
8
|
+
export { initializeAgentExecutor, initializeAgentExecutorWithOptions, InitializeAgentExecutorOptions, } from "./initialize.js";
|
|
9
|
+
export { ZeroShotAgent, ZeroShotAgentInput } from "./mrkl/index.js";
|
|
11
10
|
export { ZeroShotAgentOutputParser } from "./mrkl/outputParser.js";
|
|
12
11
|
export { AgentActionOutputParser, AgentInput, SerializedAgent, SerializedAgentT, SerializedZeroShotAgent, StoppingMethod, } from "./types.js";
|
package/dist/agents/index.js
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
|
-
export {
|
|
2
|
-
export { Agent, BaseSingleActionAgent, LLMSingleActionAgent } from "./agent.js";
|
|
1
|
+
export { Agent, BaseSingleActionAgent, LLMSingleActionAgent, } from "./agent.js";
|
|
3
2
|
export { JsonToolkit, OpenApiToolkit, RequestsToolkit, SqlToolkit, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createSqlAgent, createVectorStoreAgent, } from "./agent_toolkits/index.js";
|
|
4
3
|
export { ChatAgent } from "./chat/index.js";
|
|
5
4
|
export { ChatAgentOutputParser } from "./chat/outputParser.js";
|
|
@@ -40,6 +40,13 @@ const initializeAgentExecutor = async (tools, llm, _agentType, _verbose, _callba
|
|
|
40
40
|
}
|
|
41
41
|
};
|
|
42
42
|
exports.initializeAgentExecutor = initializeAgentExecutor;
|
|
43
|
+
/**
|
|
44
|
+
* Initialize an agent executor with options
|
|
45
|
+
* @param tools Array of tools to use in the agent
|
|
46
|
+
* @param llm LLM or ChatModel to use in the agent
|
|
47
|
+
* @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools
|
|
48
|
+
* @returns AgentExecutor
|
|
49
|
+
*/
|
|
43
50
|
const initializeAgentExecutorWithOptions = async (tools, llm, options = {
|
|
44
51
|
agentType: llm._modelType() === "base_chat_model"
|
|
45
52
|
? "chat-zero-shot-react-description"
|
|
@@ -7,7 +7,7 @@ import { AgentExecutor } from "./executor.js";
|
|
|
7
7
|
import { ZeroShotAgent } from "./mrkl/index.js";
|
|
8
8
|
type AgentType = "zero-shot-react-description" | "chat-zero-shot-react-description" | "chat-conversational-react-description";
|
|
9
9
|
export declare const initializeAgentExecutor: (tools: Tool[], llm: BaseLanguageModel, _agentType?: AgentType, _verbose?: boolean, _callbackManager?: CallbackManager) => Promise<AgentExecutor>;
|
|
10
|
-
type
|
|
10
|
+
export type InitializeAgentExecutorOptions = ({
|
|
11
11
|
agentType: "zero-shot-react-description";
|
|
12
12
|
agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];
|
|
13
13
|
} & Omit<Parameters<typeof AgentExecutor.fromAgentAndTools>[0], "agent" | "tools" | "memory">) | ({
|
|
@@ -17,5 +17,12 @@ type AgentExecutorOptions = ({
|
|
|
17
17
|
agentType: "chat-conversational-react-description";
|
|
18
18
|
agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];
|
|
19
19
|
} & Omit<Parameters<typeof AgentExecutor.fromAgentAndTools>[0], "agent" | "tools">);
|
|
20
|
-
|
|
20
|
+
/**
|
|
21
|
+
* Initialize an agent executor with options
|
|
22
|
+
* @param tools Array of tools to use in the agent
|
|
23
|
+
* @param llm LLM or ChatModel to use in the agent
|
|
24
|
+
* @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools
|
|
25
|
+
* @returns AgentExecutor
|
|
26
|
+
*/
|
|
27
|
+
export declare const initializeAgentExecutorWithOptions: (tools: Tool[], llm: BaseLanguageModel, options?: InitializeAgentExecutorOptions) => Promise<AgentExecutor>;
|
|
21
28
|
export {};
|
|
@@ -36,6 +36,13 @@ export const initializeAgentExecutor = async (tools, llm, _agentType, _verbose,
|
|
|
36
36
|
throw new Error("Unknown agent type");
|
|
37
37
|
}
|
|
38
38
|
};
|
|
39
|
+
/**
|
|
40
|
+
* Initialize an agent executor with options
|
|
41
|
+
* @param tools Array of tools to use in the agent
|
|
42
|
+
* @param llm LLM or ChatModel to use in the agent
|
|
43
|
+
* @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools
|
|
44
|
+
* @returns AgentExecutor
|
|
45
|
+
*/
|
|
39
46
|
export const initializeAgentExecutorWithOptions = async (tools, llm, options = {
|
|
40
47
|
agentType: llm._modelType() === "base_chat_model"
|
|
41
48
|
? "chat-zero-shot-react-description"
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.getCacheKey = void 0;
|
|
7
|
+
const object_hash_1 = __importDefault(require("object-hash"));
|
|
8
|
+
/**
|
|
9
|
+
* This cache key should be consistent across all versions of langchain.
|
|
10
|
+
* It is currently NOT consistent across versions of langchain.
|
|
11
|
+
*
|
|
12
|
+
* A huge benefit of having a remote cache (like redis) is that you can
|
|
13
|
+
* access the cache from different processes/machines. The allows you to
|
|
14
|
+
* seperate concerns and scale horizontally.
|
|
15
|
+
*
|
|
16
|
+
* TODO: Make cache key consistent across versions of langchain.
|
|
17
|
+
*/
|
|
18
|
+
const getCacheKey = (...strings) => (0, object_hash_1.default)(strings.join("_"));
|
|
19
|
+
exports.getCacheKey = getCacheKey;
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This cache key should be consistent across all versions of langchain.
|
|
3
|
+
* It is currently NOT consistent across versions of langchain.
|
|
4
|
+
*
|
|
5
|
+
* A huge benefit of having a remote cache (like redis) is that you can
|
|
6
|
+
* access the cache from different processes/machines. The allows you to
|
|
7
|
+
* seperate concerns and scale horizontally.
|
|
8
|
+
*
|
|
9
|
+
* TODO: Make cache key consistent across versions of langchain.
|
|
10
|
+
*/
|
|
11
|
+
export declare const getCacheKey: (...strings: string[]) => string;
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import hash from "object-hash";
|
|
2
|
+
/**
|
|
3
|
+
* This cache key should be consistent across all versions of langchain.
|
|
4
|
+
* It is currently NOT consistent across versions of langchain.
|
|
5
|
+
*
|
|
6
|
+
* A huge benefit of having a remote cache (like redis) is that you can
|
|
7
|
+
* access the cache from different processes/machines. The allows you to
|
|
8
|
+
* seperate concerns and scale horizontally.
|
|
9
|
+
*
|
|
10
|
+
* TODO: Make cache key consistent across versions of langchain.
|
|
11
|
+
*/
|
|
12
|
+
export const getCacheKey = (...strings) => hash(strings.join("_"));
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.InMemoryCache = void 0;
|
|
4
|
+
const base_js_1 = require("./base.cjs");
|
|
5
|
+
const index_js_1 = require("../schema/index.cjs");
|
|
6
|
+
const GLOBAL_MAP = new Map();
|
|
7
|
+
class InMemoryCache extends index_js_1.BaseCache {
|
|
8
|
+
constructor(map) {
|
|
9
|
+
super();
|
|
10
|
+
Object.defineProperty(this, "cache", {
|
|
11
|
+
enumerable: true,
|
|
12
|
+
configurable: true,
|
|
13
|
+
writable: true,
|
|
14
|
+
value: void 0
|
|
15
|
+
});
|
|
16
|
+
this.cache = map ?? new Map();
|
|
17
|
+
}
|
|
18
|
+
lookup(prompt, llmKey) {
|
|
19
|
+
return Promise.resolve(this.cache.get((0, base_js_1.getCacheKey)(prompt, llmKey)) ?? null);
|
|
20
|
+
}
|
|
21
|
+
async update(prompt, llmKey, value) {
|
|
22
|
+
this.cache.set((0, base_js_1.getCacheKey)(prompt, llmKey), value);
|
|
23
|
+
}
|
|
24
|
+
static global() {
|
|
25
|
+
return new InMemoryCache(GLOBAL_MAP);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
exports.InMemoryCache = InMemoryCache;
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import { Generation, BaseCache } from "../schema/index.js";
|
|
2
|
+
export declare class InMemoryCache<T = Generation[]> extends BaseCache<T> {
|
|
3
|
+
private cache;
|
|
4
|
+
constructor(map?: Map<string, T>);
|
|
5
|
+
lookup(prompt: string, llmKey: string): Promise<T | null>;
|
|
6
|
+
update(prompt: string, llmKey: string, value: T): Promise<void>;
|
|
7
|
+
static global(): InMemoryCache;
|
|
8
|
+
}
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { getCacheKey } from "./base.js";
|
|
2
|
+
import { BaseCache } from "../schema/index.js";
|
|
3
|
+
const GLOBAL_MAP = new Map();
|
|
4
|
+
export class InMemoryCache extends BaseCache {
|
|
5
|
+
constructor(map) {
|
|
6
|
+
super();
|
|
7
|
+
Object.defineProperty(this, "cache", {
|
|
8
|
+
enumerable: true,
|
|
9
|
+
configurable: true,
|
|
10
|
+
writable: true,
|
|
11
|
+
value: void 0
|
|
12
|
+
});
|
|
13
|
+
this.cache = map ?? new Map();
|
|
14
|
+
}
|
|
15
|
+
lookup(prompt, llmKey) {
|
|
16
|
+
return Promise.resolve(this.cache.get(getCacheKey(prompt, llmKey)) ?? null);
|
|
17
|
+
}
|
|
18
|
+
async update(prompt, llmKey, value) {
|
|
19
|
+
this.cache.set(getCacheKey(prompt, llmKey), value);
|
|
20
|
+
}
|
|
21
|
+
static global() {
|
|
22
|
+
return new InMemoryCache(GLOBAL_MAP);
|
|
23
|
+
}
|
|
24
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.RedisCache = void 0;
|
|
4
|
+
const index_js_1 = require("../schema/index.cjs");
|
|
5
|
+
const base_js_1 = require("./base.cjs");
|
|
6
|
+
class RedisCache extends index_js_1.BaseCache {
|
|
7
|
+
constructor(redisClient) {
|
|
8
|
+
super();
|
|
9
|
+
Object.defineProperty(this, "redisClient", {
|
|
10
|
+
enumerable: true,
|
|
11
|
+
configurable: true,
|
|
12
|
+
writable: true,
|
|
13
|
+
value: void 0
|
|
14
|
+
});
|
|
15
|
+
this.redisClient = redisClient;
|
|
16
|
+
}
|
|
17
|
+
async lookup(prompt, llmKey) {
|
|
18
|
+
let idx = 0;
|
|
19
|
+
let key = (0, base_js_1.getCacheKey)(prompt, llmKey, String(idx));
|
|
20
|
+
let value = await this.redisClient.get(key);
|
|
21
|
+
const generations = [];
|
|
22
|
+
while (value) {
|
|
23
|
+
if (!value) {
|
|
24
|
+
break;
|
|
25
|
+
}
|
|
26
|
+
generations.push({ text: value });
|
|
27
|
+
idx += 1;
|
|
28
|
+
key = (0, base_js_1.getCacheKey)(prompt, llmKey, String(idx));
|
|
29
|
+
value = await this.redisClient.get(key);
|
|
30
|
+
}
|
|
31
|
+
return generations.length > 0 ? generations : null;
|
|
32
|
+
}
|
|
33
|
+
async update(prompt, llmKey, value) {
|
|
34
|
+
for (let i = 0; i < value.length; i += 1) {
|
|
35
|
+
const key = (0, base_js_1.getCacheKey)(prompt, llmKey, String(i));
|
|
36
|
+
await this.redisClient.set(key, value[i].text);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
exports.RedisCache = RedisCache;
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { RedisClientType } from "redis";
|
|
2
|
+
import { BaseCache, Generation } from "../schema/index.js";
|
|
3
|
+
export declare class RedisCache extends BaseCache {
|
|
4
|
+
private redisClient;
|
|
5
|
+
constructor(redisClient: RedisClientType);
|
|
6
|
+
lookup(prompt: string, llmKey: string): Promise<Generation[] | null>;
|
|
7
|
+
update(prompt: string, llmKey: string, value: Generation[]): Promise<void>;
|
|
8
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { BaseCache } from "../schema/index.js";
|
|
2
|
+
import { getCacheKey } from "./base.js";
|
|
3
|
+
export class RedisCache extends BaseCache {
|
|
4
|
+
constructor(redisClient) {
|
|
5
|
+
super();
|
|
6
|
+
Object.defineProperty(this, "redisClient", {
|
|
7
|
+
enumerable: true,
|
|
8
|
+
configurable: true,
|
|
9
|
+
writable: true,
|
|
10
|
+
value: void 0
|
|
11
|
+
});
|
|
12
|
+
this.redisClient = redisClient;
|
|
13
|
+
}
|
|
14
|
+
async lookup(prompt, llmKey) {
|
|
15
|
+
let idx = 0;
|
|
16
|
+
let key = getCacheKey(prompt, llmKey, String(idx));
|
|
17
|
+
let value = await this.redisClient.get(key);
|
|
18
|
+
const generations = [];
|
|
19
|
+
while (value) {
|
|
20
|
+
if (!value) {
|
|
21
|
+
break;
|
|
22
|
+
}
|
|
23
|
+
generations.push({ text: value });
|
|
24
|
+
idx += 1;
|
|
25
|
+
key = getCacheKey(prompt, llmKey, String(idx));
|
|
26
|
+
value = await this.redisClient.get(key);
|
|
27
|
+
}
|
|
28
|
+
return generations.length > 0 ? generations : null;
|
|
29
|
+
}
|
|
30
|
+
async update(prompt, llmKey, value) {
|
|
31
|
+
for (let i = 0; i < value.length; i += 1) {
|
|
32
|
+
const key = getCacheKey(prompt, llmKey, String(i));
|
|
33
|
+
await this.redisClient.set(key, value[i].text);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
@@ -29,8 +29,6 @@ const DEFAULT_STOP_SEQUENCES = [sdk_1.HUMAN_PROMPT];
|
|
|
29
29
|
* `anthropic.complete`} can be passed through {@link invocationKwargs},
|
|
30
30
|
* even if not explicitly available on this class.
|
|
31
31
|
*
|
|
32
|
-
* @augments BaseLLM
|
|
33
|
-
* @augments AnthropicInput
|
|
34
32
|
*/
|
|
35
33
|
class ChatAnthropic extends base_js_1.BaseChatModel {
|
|
36
34
|
constructor(fields) {
|
|
@@ -137,6 +135,7 @@ class ChatAnthropic extends base_js_1.BaseChatModel {
|
|
|
137
135
|
...this.invocationKwargs,
|
|
138
136
|
};
|
|
139
137
|
}
|
|
138
|
+
/** @ignore */
|
|
140
139
|
_identifyingParams() {
|
|
141
140
|
return {
|
|
142
141
|
model_name: this.modelName,
|
|
@@ -160,21 +159,7 @@ class ChatAnthropic extends base_js_1.BaseChatModel {
|
|
|
160
159
|
})
|
|
161
160
|
.join("") + sdk_1.AI_PROMPT);
|
|
162
161
|
}
|
|
163
|
-
/**
|
|
164
|
-
* Call out to Anthropic's endpoint with k unique prompts
|
|
165
|
-
*
|
|
166
|
-
* @param messages - The messages to pass into the model.
|
|
167
|
-
* @param [stopSequences] - Optional list of stop sequences to use when generating.
|
|
168
|
-
*
|
|
169
|
-
* @returns The full LLM output.
|
|
170
|
-
*
|
|
171
|
-
* @example
|
|
172
|
-
* ```ts
|
|
173
|
-
* import { ChatAnthropic } from "langchain/chat_models/openai";
|
|
174
|
-
* const anthropic = new ChatAnthropic();
|
|
175
|
-
* const response = await anthropic.generate(new HumanChatMessage(["Tell me a joke."]));
|
|
176
|
-
* ```
|
|
177
|
-
*/
|
|
162
|
+
/** @ignore */
|
|
178
163
|
async _generate(messages, stopSequences) {
|
|
179
164
|
if (this.stopSequences && stopSequences) {
|
|
180
165
|
throw new Error(`"stopSequence" parameter found in input and default params`);
|
|
@@ -236,6 +221,7 @@ class ChatAnthropic extends base_js_1.BaseChatModel {
|
|
|
236
221
|
_llmType() {
|
|
237
222
|
return "anthropic";
|
|
238
223
|
}
|
|
224
|
+
/** @ignore */
|
|
239
225
|
_combineLLMOutput() {
|
|
240
226
|
return [];
|
|
241
227
|
}
|
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
import { CompletionResponse, SamplingParameters } from "@anthropic-ai/sdk";
|
|
2
2
|
import { BaseChatModel, BaseChatModelParams } from "./base.js";
|
|
3
3
|
import { BaseChatMessage, ChatResult } from "../schema/index.js";
|
|
4
|
-
|
|
4
|
+
/**
|
|
5
|
+
* Input to AnthropicChat class.
|
|
6
|
+
*/
|
|
7
|
+
export interface AnthropicInput {
|
|
5
8
|
/** Amount of randomness injected into the response. Ranges
|
|
6
9
|
* from 0 to 1. Use temp closer to 0 for analytical /
|
|
7
10
|
* multiple choice, and temp closer to 1 for creative
|
|
@@ -25,18 +28,12 @@ interface ModelParams {
|
|
|
25
28
|
/** A maximum number of tokens to generate before stopping. */
|
|
26
29
|
maxTokensToSample: number;
|
|
27
30
|
/** A list of strings upon which to stop generating.
|
|
28
|
-
* You probably want ["\n\nHuman:"]
|
|
31
|
+
* You probably want `["\n\nHuman:"]`, as that's the cue for
|
|
29
32
|
* the next turn in the dialog agent.
|
|
30
33
|
*/
|
|
31
34
|
stopSequences?: string[];
|
|
32
35
|
/** Whether to stream the results or not */
|
|
33
36
|
streaming?: boolean;
|
|
34
|
-
}
|
|
35
|
-
/**
|
|
36
|
-
* Input to AnthropicChat class.
|
|
37
|
-
* @augments ModelParams
|
|
38
|
-
*/
|
|
39
|
-
interface AnthropicInput extends ModelParams {
|
|
40
37
|
/** Anthropic API key */
|
|
41
38
|
apiKey?: string;
|
|
42
39
|
/** Model name to use */
|
|
@@ -60,8 +57,6 @@ type Kwargs = Record<string, any>;
|
|
|
60
57
|
* `anthropic.complete`} can be passed through {@link invocationKwargs},
|
|
61
58
|
* even if not explicitly available on this class.
|
|
62
59
|
*
|
|
63
|
-
* @augments BaseLLM
|
|
64
|
-
* @augments AnthropicInput
|
|
65
60
|
*/
|
|
66
61
|
export declare class ChatAnthropic extends BaseChatModel implements AnthropicInput {
|
|
67
62
|
apiKey?: string;
|
|
@@ -82,6 +77,7 @@ export declare class ChatAnthropic extends BaseChatModel implements AnthropicInp
|
|
|
82
77
|
* Get the parameters used to invoke the model
|
|
83
78
|
*/
|
|
84
79
|
invocationParams(): Omit<SamplingParameters, "prompt"> & Kwargs;
|
|
80
|
+
/** @ignore */
|
|
85
81
|
_identifyingParams(): {
|
|
86
82
|
model: string;
|
|
87
83
|
temperature?: number | undefined;
|
|
@@ -110,25 +106,12 @@ export declare class ChatAnthropic extends BaseChatModel implements AnthropicInp
|
|
|
110
106
|
model_name: string;
|
|
111
107
|
};
|
|
112
108
|
private formatMessagesAsPrompt;
|
|
113
|
-
/**
|
|
114
|
-
* Call out to Anthropic's endpoint with k unique prompts
|
|
115
|
-
*
|
|
116
|
-
* @param messages - The messages to pass into the model.
|
|
117
|
-
* @param [stopSequences] - Optional list of stop sequences to use when generating.
|
|
118
|
-
*
|
|
119
|
-
* @returns The full LLM output.
|
|
120
|
-
*
|
|
121
|
-
* @example
|
|
122
|
-
* ```ts
|
|
123
|
-
* import { ChatAnthropic } from "langchain/chat_models/openai";
|
|
124
|
-
* const anthropic = new ChatAnthropic();
|
|
125
|
-
* const response = await anthropic.generate(new HumanChatMessage(["Tell me a joke."]));
|
|
126
|
-
* ```
|
|
127
|
-
*/
|
|
109
|
+
/** @ignore */
|
|
128
110
|
_generate(messages: BaseChatMessage[], stopSequences?: string[]): Promise<ChatResult>;
|
|
129
111
|
/** @ignore */
|
|
130
112
|
completionWithRetry(request: SamplingParameters & Kwargs): Promise<CompletionResponse>;
|
|
131
113
|
_llmType(): string;
|
|
114
|
+
/** @ignore */
|
|
132
115
|
_combineLLMOutput(): never[];
|
|
133
116
|
}
|
|
134
117
|
export {};
|
|
@@ -26,8 +26,6 @@ const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
|
|
|
26
26
|
* `anthropic.complete`} can be passed through {@link invocationKwargs},
|
|
27
27
|
* even if not explicitly available on this class.
|
|
28
28
|
*
|
|
29
|
-
* @augments BaseLLM
|
|
30
|
-
* @augments AnthropicInput
|
|
31
29
|
*/
|
|
32
30
|
export class ChatAnthropic extends BaseChatModel {
|
|
33
31
|
constructor(fields) {
|
|
@@ -134,6 +132,7 @@ export class ChatAnthropic extends BaseChatModel {
|
|
|
134
132
|
...this.invocationKwargs,
|
|
135
133
|
};
|
|
136
134
|
}
|
|
135
|
+
/** @ignore */
|
|
137
136
|
_identifyingParams() {
|
|
138
137
|
return {
|
|
139
138
|
model_name: this.modelName,
|
|
@@ -157,21 +156,7 @@ export class ChatAnthropic extends BaseChatModel {
|
|
|
157
156
|
})
|
|
158
157
|
.join("") + AI_PROMPT);
|
|
159
158
|
}
|
|
160
|
-
/**
|
|
161
|
-
* Call out to Anthropic's endpoint with k unique prompts
|
|
162
|
-
*
|
|
163
|
-
* @param messages - The messages to pass into the model.
|
|
164
|
-
* @param [stopSequences] - Optional list of stop sequences to use when generating.
|
|
165
|
-
*
|
|
166
|
-
* @returns The full LLM output.
|
|
167
|
-
*
|
|
168
|
-
* @example
|
|
169
|
-
* ```ts
|
|
170
|
-
* import { ChatAnthropic } from "langchain/chat_models/openai";
|
|
171
|
-
* const anthropic = new ChatAnthropic();
|
|
172
|
-
* const response = await anthropic.generate(new HumanChatMessage(["Tell me a joke."]));
|
|
173
|
-
* ```
|
|
174
|
-
*/
|
|
159
|
+
/** @ignore */
|
|
175
160
|
async _generate(messages, stopSequences) {
|
|
176
161
|
if (this.stopSequences && stopSequences) {
|
|
177
162
|
throw new Error(`"stopSequence" parameter found in input and default params`);
|
|
@@ -233,6 +218,7 @@ export class ChatAnthropic extends BaseChatModel {
|
|
|
233
218
|
_llmType() {
|
|
234
219
|
return "anthropic";
|
|
235
220
|
}
|
|
221
|
+
/** @ignore */
|
|
236
222
|
_combineLLMOutput() {
|
|
237
223
|
return [];
|
|
238
224
|
}
|
|
@@ -179,6 +179,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
179
179
|
...this.modelKwargs,
|
|
180
180
|
};
|
|
181
181
|
}
|
|
182
|
+
/** @ignore */
|
|
182
183
|
_identifyingParams() {
|
|
183
184
|
return {
|
|
184
185
|
model_name: this.modelName,
|
|
@@ -192,21 +193,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
192
193
|
identifyingParams() {
|
|
193
194
|
return this._identifyingParams();
|
|
194
195
|
}
|
|
195
|
-
/**
|
|
196
|
-
* Call out to OpenAI's endpoint with k unique prompts
|
|
197
|
-
*
|
|
198
|
-
* @param messages - The messages to pass into the model.
|
|
199
|
-
* @param [stop] - Optional list of stop words to use when generating.
|
|
200
|
-
*
|
|
201
|
-
* @returns The full LLM output.
|
|
202
|
-
*
|
|
203
|
-
* @example
|
|
204
|
-
* ```ts
|
|
205
|
-
* import { OpenAI } from "langchain/llms/openai";
|
|
206
|
-
* const openai = new OpenAI();
|
|
207
|
-
* const response = await openai.generate(["Tell me a joke."]);
|
|
208
|
-
* ```
|
|
209
|
-
*/
|
|
196
|
+
/** @ignore */
|
|
210
197
|
async _generate(messages, stop) {
|
|
211
198
|
const tokenUsage = {};
|
|
212
199
|
if (this.stop && stop) {
|
|
@@ -345,6 +332,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
345
332
|
_llmType() {
|
|
346
333
|
return "openai";
|
|
347
334
|
}
|
|
335
|
+
/** @ignore */
|
|
348
336
|
_combineLLMOutput(...llmOutputs) {
|
|
349
337
|
return llmOutputs.reduce((acc, llmOutput) => {
|
|
350
338
|
if (llmOutput && llmOutput.tokenUsage) {
|
|
@@ -10,7 +10,7 @@ interface TokenUsage {
|
|
|
10
10
|
interface OpenAILLMOutput {
|
|
11
11
|
tokenUsage: TokenUsage;
|
|
12
12
|
}
|
|
13
|
-
interface
|
|
13
|
+
export interface OpenAIInput {
|
|
14
14
|
/** Sampling temperature to use, between 0 and 2, defaults to 1 */
|
|
15
15
|
temperature: number;
|
|
16
16
|
/** Total probability mass of tokens to consider at each step, between 0 and 1, defaults to 1 */
|
|
@@ -30,12 +30,6 @@ interface ModelParams {
|
|
|
30
30
|
* defaults to the maximum number of tokens allowed by the model.
|
|
31
31
|
*/
|
|
32
32
|
maxTokens?: number;
|
|
33
|
-
}
|
|
34
|
-
/**
|
|
35
|
-
* Input to OpenAI class.
|
|
36
|
-
* @augments ModelParams
|
|
37
|
-
*/
|
|
38
|
-
interface OpenAIInput extends ModelParams {
|
|
39
33
|
/** Model name to use */
|
|
40
34
|
modelName: string;
|
|
41
35
|
/** Holds any additional parameters that are valid to pass to {@link
|
|
@@ -90,6 +84,7 @@ export declare class ChatOpenAI extends BaseChatModel implements OpenAIInput {
|
|
|
90
84
|
* Get the parameters used to invoke the model
|
|
91
85
|
*/
|
|
92
86
|
invocationParams(): Omit<CreateChatCompletionRequest, "messages"> & Kwargs;
|
|
87
|
+
/** @ignore */
|
|
93
88
|
_identifyingParams(): {
|
|
94
89
|
apiKey?: string | Promise<string> | ((name: string) => string) | ((name: string) => Promise<string>) | undefined;
|
|
95
90
|
organization?: string | undefined;
|
|
@@ -137,21 +132,7 @@ export declare class ChatOpenAI extends BaseChatModel implements OpenAIInput {
|
|
|
137
132
|
logit_bias?: object | null | undefined;
|
|
138
133
|
model_name: string;
|
|
139
134
|
};
|
|
140
|
-
/**
|
|
141
|
-
* Call out to OpenAI's endpoint with k unique prompts
|
|
142
|
-
*
|
|
143
|
-
* @param messages - The messages to pass into the model.
|
|
144
|
-
* @param [stop] - Optional list of stop words to use when generating.
|
|
145
|
-
*
|
|
146
|
-
* @returns The full LLM output.
|
|
147
|
-
*
|
|
148
|
-
* @example
|
|
149
|
-
* ```ts
|
|
150
|
-
* import { OpenAI } from "langchain/llms/openai";
|
|
151
|
-
* const openai = new OpenAI();
|
|
152
|
-
* const response = await openai.generate(["Tell me a joke."]);
|
|
153
|
-
* ```
|
|
154
|
-
*/
|
|
135
|
+
/** @ignore */
|
|
155
136
|
_generate(messages: BaseChatMessage[], stop?: string[]): Promise<ChatResult>;
|
|
156
137
|
getNumTokensFromMessages(messages: BaseChatMessage[]): Promise<{
|
|
157
138
|
totalCount: number;
|
|
@@ -160,6 +141,7 @@ export declare class ChatOpenAI extends BaseChatModel implements OpenAIInput {
|
|
|
160
141
|
/** @ignore */
|
|
161
142
|
completionWithRetry(request: CreateChatCompletionRequest, options?: StreamingAxiosConfiguration): Promise<CreateChatCompletionResponse>;
|
|
162
143
|
_llmType(): string;
|
|
144
|
+
/** @ignore */
|
|
163
145
|
_combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput;
|
|
164
146
|
}
|
|
165
147
|
export {};
|
|
@@ -173,6 +173,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
173
173
|
...this.modelKwargs,
|
|
174
174
|
};
|
|
175
175
|
}
|
|
176
|
+
/** @ignore */
|
|
176
177
|
_identifyingParams() {
|
|
177
178
|
return {
|
|
178
179
|
model_name: this.modelName,
|
|
@@ -186,21 +187,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
186
187
|
identifyingParams() {
|
|
187
188
|
return this._identifyingParams();
|
|
188
189
|
}
|
|
189
|
-
/**
|
|
190
|
-
* Call out to OpenAI's endpoint with k unique prompts
|
|
191
|
-
*
|
|
192
|
-
* @param messages - The messages to pass into the model.
|
|
193
|
-
* @param [stop] - Optional list of stop words to use when generating.
|
|
194
|
-
*
|
|
195
|
-
* @returns The full LLM output.
|
|
196
|
-
*
|
|
197
|
-
* @example
|
|
198
|
-
* ```ts
|
|
199
|
-
* import { OpenAI } from "langchain/llms/openai";
|
|
200
|
-
* const openai = new OpenAI();
|
|
201
|
-
* const response = await openai.generate(["Tell me a joke."]);
|
|
202
|
-
* ```
|
|
203
|
-
*/
|
|
190
|
+
/** @ignore */
|
|
204
191
|
async _generate(messages, stop) {
|
|
205
192
|
const tokenUsage = {};
|
|
206
193
|
if (this.stop && stop) {
|
|
@@ -339,6 +326,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
339
326
|
_llmType() {
|
|
340
327
|
return "openai";
|
|
341
328
|
}
|
|
329
|
+
/** @ignore */
|
|
342
330
|
_combineLLMOutput(...llmOutputs) {
|
|
343
331
|
return llmOutputs.reduce((acc, llmOutput) => {
|
|
344
332
|
if (llmOutput && llmOutput.tokenUsage) {
|