langchain 0.0.199 → 0.0.200

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/agents/index.cjs +3 -1
  2. package/dist/agents/index.d.ts +1 -1
  3. package/dist/agents/index.js +1 -1
  4. package/dist/agents/toolkits/conversational_retrieval/tool.cjs +1 -1
  5. package/dist/agents/toolkits/conversational_retrieval/tool.js +1 -1
  6. package/dist/chat_models/fake.cjs +2 -114
  7. package/dist/chat_models/fake.d.ts +1 -52
  8. package/dist/chat_models/fake.js +1 -113
  9. package/dist/chat_models/llama_cpp.cjs +2 -1
  10. package/dist/chat_models/llama_cpp.d.ts +1 -1
  11. package/dist/chat_models/llama_cpp.js +2 -1
  12. package/dist/chat_models/minimax.d.ts +1 -1
  13. package/dist/embeddings/gradient_ai.cjs +102 -0
  14. package/dist/embeddings/gradient_ai.d.ts +48 -0
  15. package/dist/embeddings/gradient_ai.js +98 -0
  16. package/dist/llms/gradient_ai.cjs +22 -8
  17. package/dist/llms/gradient_ai.d.ts +7 -2
  18. package/dist/llms/gradient_ai.js +22 -8
  19. package/dist/llms/llama_cpp.cjs +2 -1
  20. package/dist/llms/llama_cpp.d.ts +1 -1
  21. package/dist/llms/llama_cpp.js +2 -1
  22. package/dist/load/import_constants.cjs +1 -0
  23. package/dist/load/import_constants.js +1 -0
  24. package/dist/memory/vector_store.cjs +1 -1
  25. package/dist/memory/vector_store.js +1 -1
  26. package/dist/tools/webbrowser.cjs +1 -1
  27. package/dist/tools/webbrowser.js +1 -1
  28. package/dist/util/document.cjs +1 -1
  29. package/dist/util/document.d.ts +1 -1
  30. package/dist/util/document.js +1 -1
  31. package/dist/util/tiktoken.cjs +15 -24
  32. package/dist/util/tiktoken.d.ts +1 -9
  33. package/dist/util/tiktoken.js +1 -21
  34. package/embeddings/gradient_ai.cjs +1 -0
  35. package/embeddings/gradient_ai.d.ts +1 -0
  36. package/embeddings/gradient_ai.js +1 -0
  37. package/package.json +10 -2
@@ -1,9 +1,11 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.XMLAgent = exports.OpenAIAgent = exports.StructuredChatOutputParserWithRetries = exports.StructuredChatOutputParser = exports.StructuredChatAgent = exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParserWithRetries = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
3
+ exports.XMLAgent = exports.OpenAIAgent = exports.StructuredChatOutputParserWithRetries = exports.StructuredChatOutputParser = exports.StructuredChatAgent = exports.AgentActionOutputParser = exports.ZeroShotAgentOutputParser = exports.ZeroShotAgent = exports.initializeAgentExecutorWithOptions = exports.initializeAgentExecutor = exports.AgentExecutor = exports.ChatConversationalAgentOutputParserWithRetries = exports.ChatConversationalAgentOutputParser = exports.ChatConversationalAgent = exports.ChatAgentOutputParser = exports.ChatAgent = exports.Toolkit = exports.createVectorStoreRouterAgent = exports.createVectorStoreAgent = exports.createOpenApiAgent = exports.createJsonAgent = exports.ZapierToolKit = exports.VectorStoreToolkit = exports.VectorStoreRouterToolkit = exports.RequestsToolkit = exports.OpenApiToolkit = exports.JsonToolkit = exports.LLMSingleActionAgent = exports.RunnableAgent = exports.BaseMultiActionAgent = exports.BaseSingleActionAgent = exports.Agent = void 0;
4
4
  var agent_js_1 = require("./agent.cjs");
5
5
  Object.defineProperty(exports, "Agent", { enumerable: true, get: function () { return agent_js_1.Agent; } });
6
6
  Object.defineProperty(exports, "BaseSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseSingleActionAgent; } });
7
+ Object.defineProperty(exports, "BaseMultiActionAgent", { enumerable: true, get: function () { return agent_js_1.BaseMultiActionAgent; } });
8
+ Object.defineProperty(exports, "RunnableAgent", { enumerable: true, get: function () { return agent_js_1.RunnableAgent; } });
7
9
  Object.defineProperty(exports, "LLMSingleActionAgent", { enumerable: true, get: function () { return agent_js_1.LLMSingleActionAgent; } });
8
10
  var index_js_1 = require("./toolkits/index.cjs");
9
11
  Object.defineProperty(exports, "JsonToolkit", { enumerable: true, get: function () { return index_js_1.JsonToolkit; } });
@@ -1,4 +1,4 @@
1
- export { Agent, type AgentArgs, BaseSingleActionAgent, LLMSingleActionAgent, type LLMSingleActionAgentInput, type OutputParserArgs, } from "./agent.js";
1
+ export { Agent, type AgentArgs, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, LLMSingleActionAgent, type LLMSingleActionAgentInput, type OutputParserArgs, } from "./agent.js";
2
2
  export { JsonToolkit, OpenApiToolkit, RequestsToolkit, type VectorStoreInfo, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createVectorStoreAgent, createVectorStoreRouterAgent, } from "./toolkits/index.js";
3
3
  export { Toolkit } from "./toolkits/base.js";
4
4
  export { ChatAgent, type ChatAgentInput, type ChatCreatePromptArgs, } from "./chat/index.js";
@@ -1,4 +1,4 @@
1
- export { Agent, BaseSingleActionAgent, LLMSingleActionAgent, } from "./agent.js";
1
+ export { Agent, BaseSingleActionAgent, BaseMultiActionAgent, RunnableAgent, LLMSingleActionAgent, } from "./agent.js";
2
2
  export { JsonToolkit, OpenApiToolkit, RequestsToolkit, VectorStoreRouterToolkit, VectorStoreToolkit, ZapierToolKit, createJsonAgent, createOpenApiAgent, createVectorStoreAgent, createVectorStoreRouterAgent, } from "./toolkits/index.js";
3
3
  export { Toolkit } from "./toolkits/base.js";
4
4
  export { ChatAgent, } from "./chat/index.js";
@@ -7,7 +7,7 @@ const document_js_1 = require("../../../util/document.cjs");
7
7
  function createRetrieverTool(retriever, input) {
8
8
  const func = async ({ input }, runManager) => {
9
9
  const docs = await retriever.getRelevantDocuments(input, runManager?.getChild("retriever"));
10
- return (0, document_js_1.formatDocumentsAsString)(docs, "\n");
10
+ return (0, document_js_1.formatDocumentsAsString)(docs);
11
11
  };
12
12
  const schema = zod_1.z.object({
13
13
  input: zod_1.z
@@ -4,7 +4,7 @@ import { formatDocumentsAsString } from "../../../util/document.js";
4
4
  export function createRetrieverTool(retriever, input) {
5
5
  const func = async ({ input }, runManager) => {
6
6
  const docs = await retriever.getRelevantDocuments(input, runManager?.getChild("retriever"));
7
- return formatDocumentsAsString(docs, "\n");
7
+ return formatDocumentsAsString(docs);
8
8
  };
9
9
  const schema = z.object({
10
10
  input: z
@@ -1,117 +1,5 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.FakeListChatModel = void 0;
4
- const base_js_1 = require("./base.cjs");
5
- const index_js_1 = require("../schema/index.cjs");
6
- /**
7
- * A fake Chat Model that returns a predefined list of responses. It can be used
8
- * for testing purposes.
9
- * @example
10
- * ```typescript
11
- * const chat = new FakeListChatModel({
12
- * responses: ["I'll callback later.", "You 'console' them!"]
13
- * });
14
- *
15
- * const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
16
- * const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
17
- *
18
- * // Call the chat model with a message and log the response
19
- * const firstResponse = await chat.call([firstMessage]);
20
- * console.log({ firstResponse });
21
- *
22
- * const secondResponse = await chat.call([secondMessage]);
23
- * console.log({ secondResponse });
24
- * ```
25
- */
26
- class FakeListChatModel extends base_js_1.BaseChatModel {
27
- static lc_name() {
28
- return "FakeListChatModel";
29
- }
30
- constructor({ responses, sleep }) {
31
- super({});
32
- Object.defineProperty(this, "responses", {
33
- enumerable: true,
34
- configurable: true,
35
- writable: true,
36
- value: void 0
37
- });
38
- Object.defineProperty(this, "i", {
39
- enumerable: true,
40
- configurable: true,
41
- writable: true,
42
- value: 0
43
- });
44
- Object.defineProperty(this, "sleep", {
45
- enumerable: true,
46
- configurable: true,
47
- writable: true,
48
- value: void 0
49
- });
50
- this.responses = responses;
51
- this.sleep = sleep;
52
- }
53
- _combineLLMOutput() {
54
- return [];
55
- }
56
- _llmType() {
57
- return "fake-list";
58
- }
59
- async _generate(_messages, options) {
60
- await this._sleepIfRequested();
61
- if (options?.stop?.length) {
62
- return {
63
- generations: [this._formatGeneration(options.stop[0])],
64
- };
65
- }
66
- else {
67
- const response = this._currentResponse();
68
- this._incrementResponse();
69
- return {
70
- generations: [this._formatGeneration(response)],
71
- llmOutput: {},
72
- };
73
- }
74
- }
75
- _formatGeneration(text) {
76
- return {
77
- message: new index_js_1.AIMessage(text),
78
- text,
79
- };
80
- }
81
- async *_streamResponseChunks(_messages, _options, _runManager) {
82
- const response = this._currentResponse();
83
- this._incrementResponse();
84
- for await (const text of response) {
85
- await this._sleepIfRequested();
86
- yield this._createResponseChunk(text);
87
- }
88
- }
89
- async _sleepIfRequested() {
90
- if (this.sleep !== undefined) {
91
- await this._sleep();
92
- }
93
- }
94
- async _sleep() {
95
- return new Promise((resolve) => {
96
- setTimeout(() => resolve(), this.sleep);
97
- });
98
- }
99
- _createResponseChunk(text) {
100
- return new index_js_1.ChatGenerationChunk({
101
- message: new index_js_1.AIMessageChunk({ content: text }),
102
- text,
103
- });
104
- }
105
- _currentResponse() {
106
- return this.responses[this.i];
107
- }
108
- _incrementResponse() {
109
- if (this.i < this.responses.length - 1) {
110
- this.i += 1;
111
- }
112
- else {
113
- this.i = 0;
114
- }
115
- }
116
- }
117
- exports.FakeListChatModel = FakeListChatModel;
4
+ var testing_1 = require("@langchain/core/utils/testing");
5
+ Object.defineProperty(exports, "FakeListChatModel", { enumerable: true, get: function () { return testing_1.FakeListChatModel; } });
@@ -1,52 +1 @@
1
- import { BaseChatModel, BaseChatModelParams } from "./base.js";
2
- import { AIMessage, BaseMessage, ChatGenerationChunk, ChatResult } from "../schema/index.js";
3
- import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
4
- /**
5
- * Interface for the input parameters specific to the Fake List Chat model.
6
- */
7
- export interface FakeChatInput extends BaseChatModelParams {
8
- /** Responses to return */
9
- responses: string[];
10
- /** Time to sleep in milliseconds between responses */
11
- sleep?: number;
12
- }
13
- /**
14
- * A fake Chat Model that returns a predefined list of responses. It can be used
15
- * for testing purposes.
16
- * @example
17
- * ```typescript
18
- * const chat = new FakeListChatModel({
19
- * responses: ["I'll callback later.", "You 'console' them!"]
20
- * });
21
- *
22
- * const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
23
- * const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
24
- *
25
- * // Call the chat model with a message and log the response
26
- * const firstResponse = await chat.call([firstMessage]);
27
- * console.log({ firstResponse });
28
- *
29
- * const secondResponse = await chat.call([secondMessage]);
30
- * console.log({ secondResponse });
31
- * ```
32
- */
33
- export declare class FakeListChatModel extends BaseChatModel {
34
- static lc_name(): string;
35
- responses: string[];
36
- i: number;
37
- sleep?: number;
38
- constructor({ responses, sleep }: FakeChatInput);
39
- _combineLLMOutput(): never[];
40
- _llmType(): string;
41
- _generate(_messages: BaseMessage[], options?: this["ParsedCallOptions"]): Promise<ChatResult>;
42
- _formatGeneration(text: string): {
43
- message: AIMessage;
44
- text: string;
45
- };
46
- _streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
47
- _sleepIfRequested(): Promise<void>;
48
- _sleep(): Promise<void>;
49
- _createResponseChunk(text: string): ChatGenerationChunk;
50
- _currentResponse(): string;
51
- _incrementResponse(): void;
52
- }
1
+ export { type FakeChatInput, FakeListChatModel, } from "@langchain/core/utils/testing";
@@ -1,113 +1 @@
1
- import { BaseChatModel } from "./base.js";
2
- import { AIMessage, AIMessageChunk, ChatGenerationChunk, } from "../schema/index.js";
3
- /**
4
- * A fake Chat Model that returns a predefined list of responses. It can be used
5
- * for testing purposes.
6
- * @example
7
- * ```typescript
8
- * const chat = new FakeListChatModel({
9
- * responses: ["I'll callback later.", "You 'console' them!"]
10
- * });
11
- *
12
- * const firstMessage = new HumanMessage("You want to hear a JavaScript joke?");
13
- * const secondMessage = new HumanMessage("How do you cheer up a JavaScript developer?");
14
- *
15
- * // Call the chat model with a message and log the response
16
- * const firstResponse = await chat.call([firstMessage]);
17
- * console.log({ firstResponse });
18
- *
19
- * const secondResponse = await chat.call([secondMessage]);
20
- * console.log({ secondResponse });
21
- * ```
22
- */
23
- export class FakeListChatModel extends BaseChatModel {
24
- static lc_name() {
25
- return "FakeListChatModel";
26
- }
27
- constructor({ responses, sleep }) {
28
- super({});
29
- Object.defineProperty(this, "responses", {
30
- enumerable: true,
31
- configurable: true,
32
- writable: true,
33
- value: void 0
34
- });
35
- Object.defineProperty(this, "i", {
36
- enumerable: true,
37
- configurable: true,
38
- writable: true,
39
- value: 0
40
- });
41
- Object.defineProperty(this, "sleep", {
42
- enumerable: true,
43
- configurable: true,
44
- writable: true,
45
- value: void 0
46
- });
47
- this.responses = responses;
48
- this.sleep = sleep;
49
- }
50
- _combineLLMOutput() {
51
- return [];
52
- }
53
- _llmType() {
54
- return "fake-list";
55
- }
56
- async _generate(_messages, options) {
57
- await this._sleepIfRequested();
58
- if (options?.stop?.length) {
59
- return {
60
- generations: [this._formatGeneration(options.stop[0])],
61
- };
62
- }
63
- else {
64
- const response = this._currentResponse();
65
- this._incrementResponse();
66
- return {
67
- generations: [this._formatGeneration(response)],
68
- llmOutput: {},
69
- };
70
- }
71
- }
72
- _formatGeneration(text) {
73
- return {
74
- message: new AIMessage(text),
75
- text,
76
- };
77
- }
78
- async *_streamResponseChunks(_messages, _options, _runManager) {
79
- const response = this._currentResponse();
80
- this._incrementResponse();
81
- for await (const text of response) {
82
- await this._sleepIfRequested();
83
- yield this._createResponseChunk(text);
84
- }
85
- }
86
- async _sleepIfRequested() {
87
- if (this.sleep !== undefined) {
88
- await this._sleep();
89
- }
90
- }
91
- async _sleep() {
92
- return new Promise((resolve) => {
93
- setTimeout(() => resolve(), this.sleep);
94
- });
95
- }
96
- _createResponseChunk(text) {
97
- return new ChatGenerationChunk({
98
- message: new AIMessageChunk({ content: text }),
99
- text,
100
- });
101
- }
102
- _currentResponse() {
103
- return this.responses[this.i];
104
- }
105
- _incrementResponse() {
106
- if (this.i < this.responses.length - 1) {
107
- this.i += 1;
108
- }
109
- else {
110
- this.i = 0;
111
- }
112
- }
113
- }
1
+ export { FakeListChatModel, } from "@langchain/core/utils/testing";
@@ -108,7 +108,7 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
108
108
  };
109
109
  }
110
110
  /** @ignore */
111
- async _call(messages, _options) {
111
+ async _call(messages, options) {
112
112
  let prompt = "";
113
113
  if (messages.length > 1) {
114
114
  // We need to build a new _session
@@ -126,6 +126,7 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
126
126
  }
127
127
  try {
128
128
  const promptOptions = {
129
+ onToken: options.onToken,
129
130
  maxTokens: this?.maxTokens,
130
131
  temperature: this?.temperature,
131
132
  topK: this?.topK,
@@ -63,7 +63,7 @@ export declare class ChatLlamaCpp extends SimpleChatModel<LlamaCppCallOptions> {
63
63
  trimWhitespaceSuffix: boolean | undefined;
64
64
  };
65
65
  /** @ignore */
66
- _call(messages: BaseMessage[], _options: this["ParsedCallOptions"]): Promise<string>;
66
+ _call(messages: BaseMessage[], options: this["ParsedCallOptions"]): Promise<string>;
67
67
  _streamResponseChunks(input: BaseMessage[], _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
68
68
  protected _buildSession(messages: BaseMessage[]): string;
69
69
  protected _convertMessagesToInteractions(messages: BaseMessage[]): ConversationInteraction[];
@@ -105,7 +105,7 @@ export class ChatLlamaCpp extends SimpleChatModel {
105
105
  };
106
106
  }
107
107
  /** @ignore */
108
- async _call(messages, _options) {
108
+ async _call(messages, options) {
109
109
  let prompt = "";
110
110
  if (messages.length > 1) {
111
111
  // We need to build a new _session
@@ -123,6 +123,7 @@ export class ChatLlamaCpp extends SimpleChatModel {
123
123
  }
124
124
  try {
125
125
  const promptOptions = {
126
+ onToken: options.onToken,
126
127
  maxTokens: this?.maxTokens,
127
128
  temperature: this?.temperature,
128
129
  topK: this?.topK,
@@ -1,4 +1,4 @@
1
- import type { OpenAIClient } from "@langchain/openai";
1
+ import type { OpenAI as OpenAIClient } from "openai";
2
2
  import { BaseChatModel, BaseChatModelParams } from "./base.js";
3
3
  import { BaseMessage, ChatResult } from "../schema/index.js";
4
4
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
@@ -0,0 +1,102 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GradientEmbeddings = void 0;
4
+ const nodejs_sdk_1 = require("@gradientai/nodejs-sdk");
5
+ const env_js_1 = require("../util/env.cjs");
6
+ const chunk_js_1 = require("../util/chunk.cjs");
7
+ const base_js_1 = require("./base.cjs");
8
+ /**
9
+ * Class for generating embeddings using the Gradient AI's API. Extends the
10
+ * Embeddings class and implements GradientEmbeddingsParams and
11
+ */
12
+ class GradientEmbeddings extends base_js_1.Embeddings {
13
+ constructor(fields) {
14
+ super(fields);
15
+ Object.defineProperty(this, "gradientAccessKey", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: void 0
20
+ });
21
+ Object.defineProperty(this, "workspaceId", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: void 0
26
+ });
27
+ Object.defineProperty(this, "batchSize", {
28
+ enumerable: true,
29
+ configurable: true,
30
+ writable: true,
31
+ value: 128
32
+ });
33
+ Object.defineProperty(this, "model", {
34
+ enumerable: true,
35
+ configurable: true,
36
+ writable: true,
37
+ value: void 0
38
+ });
39
+ this.gradientAccessKey =
40
+ fields?.gradientAccessKey ??
41
+ (0, env_js_1.getEnvironmentVariable)("GRADIENT_ACCESS_TOKEN");
42
+ this.workspaceId =
43
+ fields?.workspaceId ?? (0, env_js_1.getEnvironmentVariable)("GRADIENT_WORKSPACE_ID");
44
+ if (!this.gradientAccessKey) {
45
+ throw new Error("Missing Gradient AI Access Token");
46
+ }
47
+ if (!this.workspaceId) {
48
+ throw new Error("Missing Gradient AI Workspace ID");
49
+ }
50
+ }
51
+ /**
52
+ * Method to generate embeddings for an array of documents. Splits the
53
+ * documents into batches and makes requests to the Gradient API to generate
54
+ * embeddings.
55
+ * @param texts Array of documents to generate embeddings for.
56
+ * @returns Promise that resolves to a 2D array of embeddings for each document.
57
+ */
58
+ async embedDocuments(texts) {
59
+ await this.setModel();
60
+ const mappedTexts = texts.map((text) => ({ input: text }));
61
+ const batches = (0, chunk_js_1.chunkArray)(mappedTexts, this.batchSize);
62
+ const batchRequests = batches.map((batch) => this.caller.call(async () => this.model.generateEmbeddings({
63
+ inputs: batch,
64
+ })));
65
+ const batchResponses = await Promise.all(batchRequests);
66
+ const embeddings = [];
67
+ for (let i = 0; i < batchResponses.length; i += 1) {
68
+ const batch = batches[i];
69
+ const { embeddings: batchResponse } = batchResponses[i];
70
+ for (let j = 0; j < batch.length; j += 1) {
71
+ embeddings.push(batchResponse[j].embedding);
72
+ }
73
+ }
74
+ return embeddings;
75
+ }
76
+ /**
77
+ * Method to generate an embedding for a single document. Calls the
78
+ * embedDocuments method with the document as the input.
79
+ * @param text Document to generate an embedding for.
80
+ * @returns Promise that resolves to an embedding for the document.
81
+ */
82
+ async embedQuery(text) {
83
+ const data = await this.embedDocuments([text]);
84
+ return data[0];
85
+ }
86
+ /**
87
+ * Method to set the model to use for generating embeddings.
88
+ * @sets the class' `model` value to that of the retrieved Embeddings Model.
89
+ */
90
+ async setModel() {
91
+ if (this.model)
92
+ return;
93
+ const gradient = new nodejs_sdk_1.Gradient({
94
+ accessToken: this.gradientAccessKey,
95
+ workspaceId: this.workspaceId,
96
+ });
97
+ this.model = await gradient.getEmbeddingsModel({
98
+ slug: "bge-large",
99
+ });
100
+ }
101
+ }
102
+ exports.GradientEmbeddings = GradientEmbeddings;
@@ -0,0 +1,48 @@
1
+ import { Embeddings, EmbeddingsParams } from "./base.js";
2
+ /**
3
+ * Interface for GradientEmbeddings parameters. Extends EmbeddingsParams and
4
+ * defines additional parameters specific to the GradientEmbeddings class.
5
+ */
6
+ export interface GradientEmbeddingsParams extends EmbeddingsParams {
7
+ /**
8
+ * Gradient AI Access Token.
9
+ * Provide Access Token if you do not wish to automatically pull from env.
10
+ */
11
+ gradientAccessKey?: string;
12
+ /**
13
+ * Gradient Workspace Id.
14
+ * Provide workspace id if you do not wish to automatically pull from env.
15
+ */
16
+ workspaceId?: string;
17
+ }
18
+ /**
19
+ * Class for generating embeddings using the Gradient AI's API. Extends the
20
+ * Embeddings class and implements GradientEmbeddingsParams and
21
+ */
22
+ export declare class GradientEmbeddings extends Embeddings implements GradientEmbeddingsParams {
23
+ gradientAccessKey?: string;
24
+ workspaceId?: string;
25
+ batchSize: number;
26
+ model: any;
27
+ constructor(fields: GradientEmbeddingsParams);
28
+ /**
29
+ * Method to generate embeddings for an array of documents. Splits the
30
+ * documents into batches and makes requests to the Gradient API to generate
31
+ * embeddings.
32
+ * @param texts Array of documents to generate embeddings for.
33
+ * @returns Promise that resolves to a 2D array of embeddings for each document.
34
+ */
35
+ embedDocuments(texts: string[]): Promise<number[][]>;
36
+ /**
37
+ * Method to generate an embedding for a single document. Calls the
38
+ * embedDocuments method with the document as the input.
39
+ * @param text Document to generate an embedding for.
40
+ * @returns Promise that resolves to an embedding for the document.
41
+ */
42
+ embedQuery(text: string): Promise<number[]>;
43
+ /**
44
+ * Method to set the model to use for generating embeddings.
45
+ * @sets the class' `model` value to that of the retrieved Embeddings Model.
46
+ */
47
+ setModel(): Promise<void>;
48
+ }
@@ -0,0 +1,98 @@
1
+ import { Gradient } from "@gradientai/nodejs-sdk";
2
+ import { getEnvironmentVariable } from "../util/env.js";
3
+ import { chunkArray } from "../util/chunk.js";
4
+ import { Embeddings } from "./base.js";
5
+ /**
6
+ * Class for generating embeddings using the Gradient AI's API. Extends the
7
+ * Embeddings class and implements GradientEmbeddingsParams and
8
+ */
9
+ export class GradientEmbeddings extends Embeddings {
10
+ constructor(fields) {
11
+ super(fields);
12
+ Object.defineProperty(this, "gradientAccessKey", {
13
+ enumerable: true,
14
+ configurable: true,
15
+ writable: true,
16
+ value: void 0
17
+ });
18
+ Object.defineProperty(this, "workspaceId", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: void 0
23
+ });
24
+ Object.defineProperty(this, "batchSize", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: 128
29
+ });
30
+ Object.defineProperty(this, "model", {
31
+ enumerable: true,
32
+ configurable: true,
33
+ writable: true,
34
+ value: void 0
35
+ });
36
+ this.gradientAccessKey =
37
+ fields?.gradientAccessKey ??
38
+ getEnvironmentVariable("GRADIENT_ACCESS_TOKEN");
39
+ this.workspaceId =
40
+ fields?.workspaceId ?? getEnvironmentVariable("GRADIENT_WORKSPACE_ID");
41
+ if (!this.gradientAccessKey) {
42
+ throw new Error("Missing Gradient AI Access Token");
43
+ }
44
+ if (!this.workspaceId) {
45
+ throw new Error("Missing Gradient AI Workspace ID");
46
+ }
47
+ }
48
+ /**
49
+ * Method to generate embeddings for an array of documents. Splits the
50
+ * documents into batches and makes requests to the Gradient API to generate
51
+ * embeddings.
52
+ * @param texts Array of documents to generate embeddings for.
53
+ * @returns Promise that resolves to a 2D array of embeddings for each document.
54
+ */
55
+ async embedDocuments(texts) {
56
+ await this.setModel();
57
+ const mappedTexts = texts.map((text) => ({ input: text }));
58
+ const batches = chunkArray(mappedTexts, this.batchSize);
59
+ const batchRequests = batches.map((batch) => this.caller.call(async () => this.model.generateEmbeddings({
60
+ inputs: batch,
61
+ })));
62
+ const batchResponses = await Promise.all(batchRequests);
63
+ const embeddings = [];
64
+ for (let i = 0; i < batchResponses.length; i += 1) {
65
+ const batch = batches[i];
66
+ const { embeddings: batchResponse } = batchResponses[i];
67
+ for (let j = 0; j < batch.length; j += 1) {
68
+ embeddings.push(batchResponse[j].embedding);
69
+ }
70
+ }
71
+ return embeddings;
72
+ }
73
+ /**
74
+ * Method to generate an embedding for a single document. Calls the
75
+ * embedDocuments method with the document as the input.
76
+ * @param text Document to generate an embedding for.
77
+ * @returns Promise that resolves to an embedding for the document.
78
+ */
79
+ async embedQuery(text) {
80
+ const data = await this.embedDocuments([text]);
81
+ return data[0];
82
+ }
83
+ /**
84
+ * Method to set the model to use for generating embeddings.
85
+ * @sets the class' `model` value to that of the retrieved Embeddings Model.
86
+ */
87
+ async setModel() {
88
+ if (this.model)
89
+ return;
90
+ const gradient = new Gradient({
91
+ accessToken: this.gradientAccessKey,
92
+ workspaceId: this.workspaceId,
93
+ });
94
+ this.model = await gradient.getEmbeddingsModel({
95
+ slug: "bge-large",
96
+ });
97
+ }
98
+ }
@@ -26,6 +26,12 @@ class GradientLLM extends base_js_1.LLM {
26
26
  writable: true,
27
27
  value: "llama2-7b-chat"
28
28
  });
29
+ Object.defineProperty(this, "adapterId", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: void 0
34
+ });
29
35
  Object.defineProperty(this, "gradientAccessKey", {
30
36
  enumerable: true,
31
37
  configurable: true,
@@ -46,13 +52,14 @@ class GradientLLM extends base_js_1.LLM {
46
52
  });
47
53
  // Gradient AI does not export the BaseModel type. Once it does, we can use it here.
48
54
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
49
- Object.defineProperty(this, "baseModel", {
55
+ Object.defineProperty(this, "model", {
50
56
  enumerable: true,
51
57
  configurable: true,
52
58
  writable: true,
53
59
  value: void 0
54
60
  });
55
61
  this.modelSlug = fields?.modelSlug ?? this.modelSlug;
62
+ this.adapterId = fields?.adapterId;
56
63
  this.gradientAccessKey =
57
64
  fields?.gradientAccessKey ??
58
65
  (0, env_js_1.getEnvironmentVariable)("GRADIENT_ACCESS_TOKEN");
@@ -76,23 +83,30 @@ class GradientLLM extends base_js_1.LLM {
76
83
  */
77
84
  /** @ignore */
78
85
  async _call(prompt, _options) {
79
- await this.setBaseModel();
80
- const response = (await this.caller.call(async () => this.baseModel.complete({
86
+ await this.setModel();
87
+ const response = (await this.caller.call(async () => this.model.complete({
81
88
  query: prompt,
82
89
  ...this.inferenceParameters,
83
90
  })));
84
91
  return response.generatedOutput;
85
92
  }
86
- async setBaseModel() {
87
- if (this.baseModel)
93
+ async setModel() {
94
+ if (this.model)
88
95
  return;
89
96
  const gradient = new nodejs_sdk_1.Gradient({
90
97
  accessToken: this.gradientAccessKey,
91
98
  workspaceId: this.workspaceId,
92
99
  });
93
- this.baseModel = await gradient.getBaseModel({
94
- baseModelSlug: this.modelSlug,
95
- });
100
+ if (this.adapterId) {
101
+ this.model = await gradient.getModelAdapter({
102
+ modelAdapterId: this.adapterId,
103
+ });
104
+ }
105
+ else {
106
+ this.model = await gradient.getBaseModel({
107
+ baseModelSlug: this.modelSlug,
108
+ });
109
+ }
96
110
  }
97
111
  }
98
112
  exports.GradientLLM = GradientLLM;
@@ -22,6 +22,10 @@ export interface GradientLLMParams extends BaseLLMParams {
22
22
  * Gradient AI Model Slug.
23
23
  */
24
24
  modelSlug?: string;
25
+ /**
26
+ * Gradient Adapter ID for custom fine tuned models.
27
+ */
28
+ adapterId?: string;
25
29
  }
26
30
  /**
27
31
  * The GradientLLM class is used to interact with Gradient AI inference Endpoint models.
@@ -33,10 +37,11 @@ export declare class GradientLLM extends LLM<BaseLLMCallOptions> {
33
37
  [key: string]: string;
34
38
  } | undefined;
35
39
  modelSlug: string;
40
+ adapterId?: string;
36
41
  gradientAccessKey?: string;
37
42
  workspaceId?: string;
38
43
  inferenceParameters?: Record<string, unknown>;
39
- baseModel: any;
44
+ model: any;
40
45
  constructor(fields: GradientLLMParams);
41
46
  _llmType(): string;
42
47
  /**
@@ -46,5 +51,5 @@ export declare class GradientLLM extends LLM<BaseLLMCallOptions> {
46
51
  */
47
52
  /** @ignore */
48
53
  _call(prompt: string, _options: this["ParsedCallOptions"]): Promise<string>;
49
- setBaseModel(): Promise<void>;
54
+ setModel(): Promise<void>;
50
55
  }
@@ -23,6 +23,12 @@ export class GradientLLM extends LLM {
23
23
  writable: true,
24
24
  value: "llama2-7b-chat"
25
25
  });
26
+ Object.defineProperty(this, "adapterId", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: void 0
31
+ });
26
32
  Object.defineProperty(this, "gradientAccessKey", {
27
33
  enumerable: true,
28
34
  configurable: true,
@@ -43,13 +49,14 @@ export class GradientLLM extends LLM {
43
49
  });
44
50
  // Gradient AI does not export the BaseModel type. Once it does, we can use it here.
45
51
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
46
- Object.defineProperty(this, "baseModel", {
52
+ Object.defineProperty(this, "model", {
47
53
  enumerable: true,
48
54
  configurable: true,
49
55
  writable: true,
50
56
  value: void 0
51
57
  });
52
58
  this.modelSlug = fields?.modelSlug ?? this.modelSlug;
59
+ this.adapterId = fields?.adapterId;
53
60
  this.gradientAccessKey =
54
61
  fields?.gradientAccessKey ??
55
62
  getEnvironmentVariable("GRADIENT_ACCESS_TOKEN");
@@ -73,22 +80,29 @@ export class GradientLLM extends LLM {
73
80
  */
74
81
  /** @ignore */
75
82
  async _call(prompt, _options) {
76
- await this.setBaseModel();
77
- const response = (await this.caller.call(async () => this.baseModel.complete({
83
+ await this.setModel();
84
+ const response = (await this.caller.call(async () => this.model.complete({
78
85
  query: prompt,
79
86
  ...this.inferenceParameters,
80
87
  })));
81
88
  return response.generatedOutput;
82
89
  }
83
- async setBaseModel() {
84
- if (this.baseModel)
90
+ async setModel() {
91
+ if (this.model)
85
92
  return;
86
93
  const gradient = new Gradient({
87
94
  accessToken: this.gradientAccessKey,
88
95
  workspaceId: this.workspaceId,
89
96
  });
90
- this.baseModel = await gradient.getBaseModel({
91
- baseModelSlug: this.modelSlug,
92
- });
97
+ if (this.adapterId) {
98
+ this.model = await gradient.getModelAdapter({
99
+ modelAdapterId: this.adapterId,
100
+ });
101
+ }
102
+ else {
103
+ this.model = await gradient.getBaseModel({
104
+ baseModelSlug: this.modelSlug,
105
+ });
106
+ }
93
107
  }
94
108
  }
@@ -77,9 +77,10 @@ class LlamaCpp extends base_js_1.LLM {
77
77
  return "llama2_cpp";
78
78
  }
79
79
  /** @ignore */
80
- async _call(prompt, _options) {
80
+ async _call(prompt, options) {
81
81
  try {
82
82
  const promptOptions = {
83
+ onToken: options?.onToken,
83
84
  maxTokens: this?.maxTokens,
84
85
  temperature: this?.temperature,
85
86
  topK: this?.topK,
@@ -36,6 +36,6 @@ export declare class LlamaCpp extends LLM<LlamaCppCallOptions> {
36
36
  constructor(inputs: LlamaCppInputs);
37
37
  _llmType(): string;
38
38
  /** @ignore */
39
- _call(prompt: string, _options?: this["ParsedCallOptions"]): Promise<string>;
39
+ _call(prompt: string, options?: this["ParsedCallOptions"]): Promise<string>;
40
40
  _streamResponseChunks(prompt: string, _options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
41
41
  }
@@ -74,9 +74,10 @@ export class LlamaCpp extends LLM {
74
74
  return "llama2_cpp";
75
75
  }
76
76
  /** @ignore */
77
- async _call(prompt, _options) {
77
+ async _call(prompt, options) {
78
78
  try {
79
79
  const promptOptions = {
80
+ onToken: options?.onToken,
80
81
  maxTokens: this?.maxTokens,
81
82
  temperature: this?.temperature,
82
83
  topK: this?.topK,
@@ -26,6 +26,7 @@ exports.optionalImportEntrypoints = [
26
26
  "langchain/embeddings/googlevertexai",
27
27
  "langchain/embeddings/googlepalm",
28
28
  "langchain/embeddings/llama_cpp",
29
+ "langchain/embeddings/gradient_ai",
29
30
  "langchain/llms/load",
30
31
  "langchain/llms/cohere",
31
32
  "langchain/llms/hf",
@@ -23,6 +23,7 @@ export const optionalImportEntrypoints = [
23
23
  "langchain/embeddings/googlevertexai",
24
24
  "langchain/embeddings/googlepalm",
25
25
  "langchain/embeddings/llama_cpp",
26
+ "langchain/embeddings/gradient_ai",
26
27
  "langchain/llms/load",
27
28
  "langchain/llms/cohere",
28
29
  "langchain/llms/hf",
@@ -83,7 +83,7 @@ class VectorStoreRetrieverMemory extends base_js_1.BaseMemory {
83
83
  return {
84
84
  [this.memoryKey]: this.returnDocs
85
85
  ? results
86
- : (0, document_js_2.formatDocumentsAsString)(results, "\n"),
86
+ : (0, document_js_2.formatDocumentsAsString)(results),
87
87
  };
88
88
  }
89
89
  /**
@@ -80,7 +80,7 @@ export class VectorStoreRetrieverMemory extends BaseMemory {
80
80
  return {
81
81
  [this.memoryKey]: this.returnDocs
82
82
  ? results
83
- : formatDocumentsAsString(results, "\n"),
83
+ : formatDocumentsAsString(results),
84
84
  };
85
85
  }
86
86
  /**
@@ -243,7 +243,7 @@ class WebBrowser extends base_js_1.Tool {
243
243
  }));
244
244
  const vectorStore = await memory_js_1.MemoryVectorStore.fromDocuments(docs, this.embeddings);
245
245
  const results = await vectorStore.similaritySearch(task, 4, undefined, runManager?.getChild("vectorstore"));
246
- context = (0, document_js_2.formatDocumentsAsString)(results, "\n");
246
+ context = (0, document_js_2.formatDocumentsAsString)(results);
247
247
  }
248
248
  const input = `Text:${context}\n\nI need ${doSummary ? "a summary" : task} from the above text, also provide up to 5 markdown links from within that would be of interest (always including URL and text). Links should be provided, if present, in markdown syntax as a list under the heading "Relevant Links:".`;
249
249
  return this.model.predict(input, undefined, runManager?.getChild());
@@ -212,7 +212,7 @@ export class WebBrowser extends Tool {
212
212
  }));
213
213
  const vectorStore = await MemoryVectorStore.fromDocuments(docs, this.embeddings);
214
214
  const results = await vectorStore.similaritySearch(task, 4, undefined, runManager?.getChild("vectorstore"));
215
- context = formatDocumentsAsString(results, "\n");
215
+ context = formatDocumentsAsString(results);
216
216
  }
217
217
  const input = `Text:${context}\n\nI need ${doSummary ? "a summary" : task} from the above text, also provide up to 5 markdown links from within that would be of interest (always including URL and text). Links should be provided, if present, in markdown syntax as a list under the heading "Relevant Links:".`;
218
218
  return this.model.predict(input, undefined, runManager?.getChild());
@@ -8,5 +8,5 @@ exports.formatDocumentsAsString = void 0;
8
8
  * @param documents
9
9
  * @returns A string of the documents page content, separated by newlines.
10
10
  */
11
- const formatDocumentsAsString = (documents, separator = "\n\n") => documents.map((doc) => doc.pageContent).join(separator);
11
+ const formatDocumentsAsString = (documents) => documents.map((doc) => doc.pageContent).join("\n\n");
12
12
  exports.formatDocumentsAsString = formatDocumentsAsString;
@@ -6,4 +6,4 @@ import { Document } from "../document.js";
6
6
  * @param documents
7
7
  * @returns A string of the documents page content, separated by newlines.
8
8
  */
9
- export declare const formatDocumentsAsString: (documents: Document[], separator?: string) => string;
9
+ export declare const formatDocumentsAsString: (documents: Document[]) => string;
@@ -5,4 +5,4 @@
5
5
  * @param documents
6
6
  * @returns A string of the documents page content, separated by newlines.
7
7
  */
8
- export const formatDocumentsAsString = (documents, separator = "\n\n") => documents.map((doc) => doc.pageContent).join(separator);
8
+ export const formatDocumentsAsString = (documents) => documents.map((doc) => doc.pageContent).join("\n\n");
@@ -1,26 +1,17 @@
1
1
  "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.encodingForModel = exports.getEncoding = void 0;
4
- const lite_1 = require("js-tiktoken/lite");
5
- const async_caller_js_1 = require("./async_caller.cjs");
6
- const cache = {};
7
- const caller = /* #__PURE__ */ new async_caller_js_1.AsyncCaller({});
8
- async function getEncoding(encoding, options) {
9
- if (!(encoding in cache)) {
10
- cache[encoding] = caller
11
- .fetch(`https://tiktoken.pages.dev/js/${encoding}.json`, {
12
- signal: options?.signal,
13
- })
14
- .then((res) => res.json())
15
- .catch((e) => {
16
- delete cache[encoding];
17
- throw e;
18
- });
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
19
7
  }
20
- return new lite_1.Tiktoken(await cache[encoding], options?.extendedSpecialTokens);
21
- }
22
- exports.getEncoding = getEncoding;
23
- async function encodingForModel(model, options) {
24
- return getEncoding((0, lite_1.getEncodingNameForModel)(model), options);
25
- }
26
- exports.encodingForModel = encodingForModel;
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("@langchain/core/utils/tiktoken"), exports);
@@ -1,9 +1 @@
1
- import { Tiktoken, TiktokenEncoding, TiktokenModel } from "js-tiktoken/lite";
2
- export declare function getEncoding(encoding: TiktokenEncoding, options?: {
3
- signal?: AbortSignal;
4
- extendedSpecialTokens?: Record<string, number>;
5
- }): Promise<Tiktoken>;
6
- export declare function encodingForModel(model: TiktokenModel, options?: {
7
- signal?: AbortSignal;
8
- extendedSpecialTokens?: Record<string, number>;
9
- }): Promise<Tiktoken>;
1
+ export * from "@langchain/core/utils/tiktoken";
@@ -1,21 +1 @@
1
- import { Tiktoken, getEncodingNameForModel, } from "js-tiktoken/lite";
2
- import { AsyncCaller } from "./async_caller.js";
3
- const cache = {};
4
- const caller = /* #__PURE__ */ new AsyncCaller({});
5
- export async function getEncoding(encoding, options) {
6
- if (!(encoding in cache)) {
7
- cache[encoding] = caller
8
- .fetch(`https://tiktoken.pages.dev/js/${encoding}.json`, {
9
- signal: options?.signal,
10
- })
11
- .then((res) => res.json())
12
- .catch((e) => {
13
- delete cache[encoding];
14
- throw e;
15
- });
16
- }
17
- return new Tiktoken(await cache[encoding], options?.extendedSpecialTokens);
18
- }
19
- export async function encodingForModel(model, options) {
20
- return getEncoding(getEncodingNameForModel(model), options);
21
- }
1
+ export * from "@langchain/core/utils/tiktoken";
@@ -0,0 +1 @@
1
+ module.exports = require('../dist/embeddings/gradient_ai.cjs');
@@ -0,0 +1 @@
1
+ export * from '../dist/embeddings/gradient_ai.js'
@@ -0,0 +1 @@
1
+ export * from '../dist/embeddings/gradient_ai.js'
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langchain",
3
- "version": "0.0.199",
3
+ "version": "0.0.200",
4
4
  "description": "Typescript bindings for langchain",
5
5
  "type": "module",
6
6
  "engines": {
@@ -157,6 +157,9 @@
157
157
  "embeddings/llama_cpp.cjs",
158
158
  "embeddings/llama_cpp.js",
159
159
  "embeddings/llama_cpp.d.ts",
160
+ "embeddings/gradient_ai.cjs",
161
+ "embeddings/gradient_ai.js",
162
+ "embeddings/gradient_ai.d.ts",
160
163
  "llms/load.cjs",
161
164
  "llms/load.js",
162
165
  "llms/load.d.ts",
@@ -1417,7 +1420,7 @@
1417
1420
  },
1418
1421
  "dependencies": {
1419
1422
  "@anthropic-ai/sdk": "^0.9.1",
1420
- "@langchain/core": "~0.0.3",
1423
+ "@langchain/core": "~0.0.6",
1421
1424
  "binary-extensions": "^2.2.0",
1422
1425
  "expr-eval": "^2.0.2",
1423
1426
  "flat": "^5.0.2",
@@ -1698,6 +1701,11 @@
1698
1701
  "import": "./embeddings/llama_cpp.js",
1699
1702
  "require": "./embeddings/llama_cpp.cjs"
1700
1703
  },
1704
+ "./embeddings/gradient_ai": {
1705
+ "types": "./embeddings/gradient_ai.d.ts",
1706
+ "import": "./embeddings/gradient_ai.js",
1707
+ "require": "./embeddings/gradient_ai.cjs"
1708
+ },
1701
1709
  "./llms/load": {
1702
1710
  "types": "./llms/load.d.ts",
1703
1711
  "import": "./llms/load.js",