langchain 0.0.168 → 0.0.170

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. package/README.md +2 -2
  2. package/agents/format_scratchpad.cjs +1 -0
  3. package/agents/format_scratchpad.d.ts +1 -0
  4. package/agents/format_scratchpad.js +1 -0
  5. package/chat_models/yandex.cjs +1 -0
  6. package/chat_models/yandex.d.ts +1 -0
  7. package/chat_models/yandex.js +1 -0
  8. package/dist/agents/agent.cjs +49 -1
  9. package/dist/agents/agent.d.ts +19 -1
  10. package/dist/agents/agent.js +47 -0
  11. package/dist/agents/executor.cjs +10 -1
  12. package/dist/agents/executor.d.ts +22 -8
  13. package/dist/agents/executor.js +11 -2
  14. package/dist/agents/format_scratchpad.cjs +25 -0
  15. package/dist/agents/format_scratchpad.d.ts +10 -0
  16. package/dist/agents/format_scratchpad.js +21 -0
  17. package/dist/agents/toolkits/aws_sfn.d.ts +4 -1
  18. package/dist/agents/toolkits/conversational_retrieval/openai_functions.d.ts +1 -1
  19. package/dist/agents/toolkits/json/json.d.ts +4 -1
  20. package/dist/agents/toolkits/openapi/openapi.cjs +8 -0
  21. package/dist/agents/toolkits/openapi/openapi.d.ts +12 -1
  22. package/dist/agents/toolkits/openapi/openapi.js +8 -0
  23. package/dist/agents/toolkits/sql/sql.d.ts +4 -1
  24. package/dist/agents/toolkits/vectorstore/vectorstore.d.ts +8 -2
  25. package/dist/agents/types.d.ts +13 -1
  26. package/dist/callbacks/handlers/llmonitor.cjs +21 -17
  27. package/dist/callbacks/handlers/llmonitor.js +21 -17
  28. package/dist/chains/sql_db/sql_db_chain.cjs +9 -0
  29. package/dist/chains/sql_db/sql_db_chain.d.ts +9 -0
  30. package/dist/chains/sql_db/sql_db_chain.js +9 -0
  31. package/dist/chat_models/baiduwenxin.cjs +12 -1
  32. package/dist/chat_models/baiduwenxin.d.ts +3 -1
  33. package/dist/chat_models/baiduwenxin.js +12 -1
  34. package/dist/chat_models/cloudflare_workersai.cjs +7 -2
  35. package/dist/chat_models/cloudflare_workersai.d.ts +1 -1
  36. package/dist/chat_models/cloudflare_workersai.js +7 -2
  37. package/dist/chat_models/yandex.cjs +117 -0
  38. package/dist/chat_models/yandex.d.ts +16 -0
  39. package/dist/chat_models/yandex.js +113 -0
  40. package/dist/document_loaders/web/assemblyai.cjs +63 -114
  41. package/dist/document_loaders/web/assemblyai.d.ts +38 -57
  42. package/dist/document_loaders/web/assemblyai.js +63 -100
  43. package/dist/evaluation/comparison/prompt.d.ts +2 -2
  44. package/dist/experimental/chains/violation_of_expectations/index.cjs +5 -0
  45. package/dist/experimental/chains/violation_of_expectations/index.d.ts +1 -0
  46. package/dist/experimental/chains/violation_of_expectations/index.js +1 -0
  47. package/dist/experimental/chains/violation_of_expectations/types.cjs +49 -0
  48. package/dist/experimental/chains/violation_of_expectations/types.d.ts +69 -0
  49. package/dist/experimental/chains/violation_of_expectations/types.js +46 -0
  50. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +328 -0
  51. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.d.ts +148 -0
  52. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +324 -0
  53. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.cjs +49 -0
  54. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.d.ts +5 -0
  55. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.js +46 -0
  56. package/dist/llms/cloudflare_workersai.cjs +14 -7
  57. package/dist/llms/cloudflare_workersai.d.ts +1 -1
  58. package/dist/llms/cloudflare_workersai.js +14 -7
  59. package/dist/load/import_constants.cjs +1 -0
  60. package/dist/load/import_constants.js +1 -0
  61. package/dist/load/import_map.cjs +5 -2
  62. package/dist/load/import_map.d.ts +3 -0
  63. package/dist/load/import_map.js +3 -0
  64. package/dist/memory/index.d.ts +1 -1
  65. package/dist/memory/index.js +1 -1
  66. package/dist/retrievers/time_weighted.cjs +1 -1
  67. package/dist/retrievers/time_weighted.d.ts +1 -1
  68. package/dist/retrievers/time_weighted.js +1 -1
  69. package/dist/retrievers/zep.cjs +29 -3
  70. package/dist/retrievers/zep.d.ts +14 -0
  71. package/dist/retrievers/zep.js +29 -3
  72. package/dist/schema/runnable/base.cjs +4 -1
  73. package/dist/schema/runnable/base.d.ts +1 -0
  74. package/dist/schema/runnable/base.js +4 -1
  75. package/dist/schema/runnable/passthrough.cjs +33 -1
  76. package/dist/schema/runnable/passthrough.d.ts +11 -1
  77. package/dist/schema/runnable/passthrough.js +32 -1
  78. package/dist/sql_db.cjs +12 -0
  79. package/dist/sql_db.d.ts +12 -0
  80. package/dist/sql_db.js +12 -0
  81. package/dist/storage/ioredis.cjs +2 -1
  82. package/dist/storage/ioredis.js +2 -1
  83. package/dist/storage/upstash_redis.cjs +155 -0
  84. package/dist/storage/upstash_redis.d.ts +59 -0
  85. package/dist/storage/upstash_redis.js +151 -0
  86. package/dist/storage/vercel_kv.cjs +2 -1
  87. package/dist/storage/vercel_kv.js +2 -1
  88. package/dist/types/assemblyai-types.cjs +0 -150
  89. package/dist/types/assemblyai-types.d.ts +4 -670
  90. package/dist/types/assemblyai-types.js +1 -149
  91. package/dist/vectorstores/faiss.cjs +38 -6
  92. package/dist/vectorstores/faiss.d.ts +14 -2
  93. package/dist/vectorstores/faiss.js +38 -6
  94. package/dist/vectorstores/pgvector.cjs +1 -1
  95. package/dist/vectorstores/pgvector.js +1 -1
  96. package/dist/vectorstores/weaviate.cjs +13 -2
  97. package/dist/vectorstores/weaviate.js +13 -2
  98. package/experimental/chains/violation_of_expectations.cjs +1 -0
  99. package/experimental/chains/violation_of_expectations.d.ts +1 -0
  100. package/experimental/chains/violation_of_expectations.js +1 -0
  101. package/package.json +47 -10
  102. package/storage/upstash_redis.cjs +1 -0
  103. package/storage/upstash_redis.d.ts +1 -0
  104. package/storage/upstash_redis.js +1 -0
  105. package/dist/util/assemblyai-client.cjs +0 -173
  106. package/dist/util/assemblyai-client.d.ts +0 -63
  107. package/dist/util/assemblyai-client.js +0 -170
@@ -65,13 +65,15 @@ const parseInput = (rawInput) => {
65
65
  const parseOutput = (rawOutput) => {
66
66
  if (!rawOutput)
67
67
  return null;
68
- const { text, output, answer } = rawOutput;
68
+ const { text, output, answer, result } = rawOutput;
69
69
  if (text)
70
70
  return text;
71
71
  if (answer)
72
72
  return answer;
73
73
  if (output)
74
74
  return output;
75
+ if (result)
76
+ return result;
75
77
  return rawOutput;
76
78
  };
77
79
  export class LLMonitorHandler extends BaseCallbackHandler {
@@ -104,15 +106,14 @@ export class LLMonitorHandler extends BaseCallbackHandler {
104
106
  ...(extraParams?.invocation_params || {}),
105
107
  ...(metadata || {}),
106
108
  };
107
- const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
108
- const userId = params?.userId || undefined;
109
- const userProps = params?.userProps || undefined;
109
+ const { model, model_name, modelName, userId, userProps, ...rest } = params;
110
+ const name = model || modelName || model_name || llm.id.at(-1);
110
111
  await this.monitor.trackEvent("llm", "start", {
111
112
  runId,
112
113
  parentRunId,
113
114
  name,
114
115
  input: convertToLLMonitorMessages(prompts),
115
- extra: params,
116
+ extra: rest,
116
117
  userId,
117
118
  userProps,
118
119
  tags,
@@ -124,15 +125,15 @@ export class LLMonitorHandler extends BaseCallbackHandler {
124
125
  ...(extraParams?.invocation_params || {}),
125
126
  ...(metadata || {}),
126
127
  };
127
- const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
128
- const userId = params?.userId || undefined;
129
- const userProps = params?.userProps || undefined;
128
+ // Expand them so they're excluded from the "extra" field
129
+ const { model, model_name, modelName, userId, userProps, ...rest } = params;
130
+ const name = model || modelName || model_name || llm.id.at(-1);
130
131
  await this.monitor.trackEvent("llm", "start", {
131
132
  runId,
132
133
  parentRunId,
133
134
  name,
134
135
  input: convertToLLMonitorMessages(messages),
135
- extra: params,
136
+ extra: rest,
136
137
  userId,
137
138
  userProps,
138
139
  tags,
@@ -157,20 +158,19 @@ export class LLMonitorHandler extends BaseCallbackHandler {
157
158
  });
158
159
  }
159
160
  async handleChainStart(chain, inputs, runId, parentRunId, tags, metadata) {
161
+ const { agentName, userId, userProps, ...rest } = metadata || {};
160
162
  // allow the user to specify an agent name
161
- const chainName = chain.id.at(-1);
162
- const name = (metadata?.agentName ?? chainName);
163
+ const name = agentName || chain.id.at(-1);
163
164
  // Attempt to automatically detect if this is an agent or chain
164
- const runType = metadata?.agentName ||
165
- ["AgentExecutor", "PlanAndExecute"].includes(chainName)
165
+ const runType = agentName || ["AgentExecutor", "PlanAndExecute"].includes(name)
166
166
  ? "agent"
167
167
  : "chain";
168
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
169
- const { agentName, ...rest } = metadata || {};
170
168
  await this.monitor.trackEvent(runType, "start", {
171
169
  runId,
172
170
  parentRunId,
173
171
  name,
172
+ userId,
173
+ userProps,
174
174
  input: parseInput(inputs),
175
175
  extra: rest,
176
176
  tags,
@@ -190,12 +190,16 @@ export class LLMonitorHandler extends BaseCallbackHandler {
190
190
  });
191
191
  }
192
192
  async handleToolStart(tool, input, runId, parentRunId, tags, metadata) {
193
+ const { toolName, userId, userProps, ...rest } = metadata || {};
194
+ const name = toolName || tool.id.at(-1);
193
195
  await this.monitor.trackEvent("tool", "start", {
194
196
  runId,
195
197
  parentRunId,
196
- name: tool.id[tool.id.length - 1],
198
+ name,
199
+ userId,
200
+ userProps,
197
201
  input,
198
- extra: metadata,
202
+ extra: rest,
199
203
  tags,
200
204
  runtime: "langchain-js",
201
205
  });
@@ -10,6 +10,15 @@ const sql_utils_js_1 = require("../../util/sql_utils.cjs");
10
10
  * Class that represents a SQL database chain in the LangChain framework.
11
11
  * It extends the BaseChain class and implements the functionality
12
12
  * specific to a SQL database chain.
13
+ *
14
+ * @security **Security Notice**
15
+ * This chain generates SQL queries for the given database.
16
+ * The SQLDatabase class provides a getTableInfo method that can be used
17
+ * to get column information as well as sample data from the table.
18
+ * To mitigate risk of leaking sensitive data, limit permissions
19
+ * to read and scope to the tables that are needed.
20
+ * Optionally, use the includesTables or ignoreTables class parameters
21
+ * to limit which tables can/cannot be accessed.
13
22
  */
14
23
  class SqlDatabaseChain extends base_js_1.BaseChain {
15
24
  static lc_name() {
@@ -22,6 +22,15 @@ export interface SqlDatabaseChainInput extends ChainInputs {
22
22
  * Class that represents a SQL database chain in the LangChain framework.
23
23
  * It extends the BaseChain class and implements the functionality
24
24
  * specific to a SQL database chain.
25
+ *
26
+ * @security **Security Notice**
27
+ * This chain generates SQL queries for the given database.
28
+ * The SQLDatabase class provides a getTableInfo method that can be used
29
+ * to get column information as well as sample data from the table.
30
+ * To mitigate risk of leaking sensitive data, limit permissions
31
+ * to read and scope to the tables that are needed.
32
+ * Optionally, use the includesTables or ignoreTables class parameters
33
+ * to limit which tables can/cannot be accessed.
25
34
  */
26
35
  export declare class SqlDatabaseChain extends BaseChain {
27
36
  static lc_name(): string;
@@ -7,6 +7,15 @@ import { getPromptTemplateFromDataSource } from "../../util/sql_utils.js";
7
7
  * Class that represents a SQL database chain in the LangChain framework.
8
8
  * It extends the BaseChain class and implements the functionality
9
9
  * specific to a SQL database chain.
10
+ *
11
+ * @security **Security Notice**
12
+ * This chain generates SQL queries for the given database.
13
+ * The SQLDatabase class provides a getTableInfo method that can be used
14
+ * to get column information as well as sample data from the table.
15
+ * To mitigate risk of leaking sensitive data, limit permissions
16
+ * to read and scope to the tables that are needed.
17
+ * Optionally, use the includesTables or ignoreTables class parameters
18
+ * to limit which tables can/cannot be accessed.
10
19
  */
11
20
  export class SqlDatabaseChain extends BaseChain {
12
21
  static lc_name() {
@@ -28,7 +28,7 @@ function messageToWenxinRole(message) {
28
28
  case "human":
29
29
  return "user";
30
30
  case "system":
31
- throw new Error("System messages not supported");
31
+ throw new Error("System messages should not be here");
32
32
  case "function":
33
33
  throw new Error("Function messages not supported");
34
34
  case "generic": {
@@ -164,6 +164,10 @@ class ChatBaiduWenxin extends base_js_1.BaseChatModel {
164
164
  this.apiUrl =
165
165
  "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant";
166
166
  }
167
+ else if (this.modelName === "ERNIE-Bot-4") {
168
+ this.apiUrl =
169
+ "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro";
170
+ }
167
171
  else {
168
172
  throw new Error(`Invalid model name: ${this.modelName}`);
169
173
  }
@@ -219,6 +223,13 @@ class ChatBaiduWenxin extends base_js_1.BaseChatModel {
219
223
  async _generate(messages, options, runManager) {
220
224
  const tokenUsage = {};
221
225
  const params = this.invocationParams();
226
+ // Wenxin requires the system message to be put in the params, not messages array
227
+ const systemMessage = messages.find((message) => message._getType() === "system");
228
+ if (systemMessage) {
229
+ // eslint-disable-next-line no-param-reassign
230
+ messages = messages.filter((message) => message !== systemMessage);
231
+ params.system = systemMessage.text;
232
+ }
222
233
  const messagesMapped = messages.map((message) => ({
223
234
  role: messageToWenxinRole(message),
224
235
  content: message.text,
@@ -22,12 +22,13 @@ interface ChatCompletionRequest {
22
22
  temperature?: number;
23
23
  top_p?: number;
24
24
  penalty_score?: number;
25
+ system?: string;
25
26
  }
26
27
  /**
27
28
  * Interface defining the input to the ChatBaiduWenxin class.
28
29
  */
29
30
  declare interface BaiduWenxinChatInput {
30
- /** Model name to use
31
+ /** Model name to use. Available options are: ERNIE-Bot, ERNIE-Bot-turbo, ERNIE-Bot-4
31
32
  * @default "ERNIE-Bot-turbo"
32
33
  */
33
34
  modelName: string;
@@ -110,6 +111,7 @@ export declare class ChatBaiduWenxin extends BaseChatModel implements BaiduWenxi
110
111
  * Get the identifying parameters for the model
111
112
  */
112
113
  identifyingParams(): {
114
+ system?: string | undefined;
113
115
  stream?: boolean | undefined;
114
116
  temperature?: number | undefined;
115
117
  top_p?: number | undefined;
@@ -25,7 +25,7 @@ function messageToWenxinRole(message) {
25
25
  case "human":
26
26
  return "user";
27
27
  case "system":
28
- throw new Error("System messages not supported");
28
+ throw new Error("System messages should not be here");
29
29
  case "function":
30
30
  throw new Error("Function messages not supported");
31
31
  case "generic": {
@@ -161,6 +161,10 @@ export class ChatBaiduWenxin extends BaseChatModel {
161
161
  this.apiUrl =
162
162
  "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/eb-instant";
163
163
  }
164
+ else if (this.modelName === "ERNIE-Bot-4") {
165
+ this.apiUrl =
166
+ "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/completions_pro";
167
+ }
164
168
  else {
165
169
  throw new Error(`Invalid model name: ${this.modelName}`);
166
170
  }
@@ -216,6 +220,13 @@ export class ChatBaiduWenxin extends BaseChatModel {
216
220
  async _generate(messages, options, runManager) {
217
221
  const tokenUsage = {};
218
222
  const params = this.invocationParams();
223
+ // Wenxin requires the system message to be put in the params, not messages array
224
+ const systemMessage = messages.find((message) => message._getType() === "system");
225
+ if (systemMessage) {
226
+ // eslint-disable-next-line no-param-reassign
227
+ messages = messages.filter((message) => message !== systemMessage);
228
+ params.system = systemMessage.text;
229
+ }
219
230
  const messagesMapped = messages.map((message) => ({
220
231
  role: messageToWenxinRole(message),
221
232
  content: message.text,
@@ -52,7 +52,12 @@ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
52
52
  this.cloudflareApiToken =
53
53
  fields?.cloudflareApiToken ??
54
54
  (0, env_js_1.getEnvironmentVariable)("CLOUDFLARE_API_TOKEN");
55
- this.baseUrl = fields?.baseUrl;
55
+ this.baseUrl =
56
+ fields?.baseUrl ??
57
+ `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
58
+ if (this.baseUrl.endsWith("/")) {
59
+ this.baseUrl = this.baseUrl.slice(0, -1);
60
+ }
56
61
  }
57
62
  _llmType() {
58
63
  return "cloudflare";
@@ -112,7 +117,7 @@ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
112
117
  /** @ignore */
113
118
  async _call(messages, options) {
114
119
  this.validateEnvironment();
115
- const url = `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run/${this.model}`;
120
+ const url = `${this.baseUrl}/${this.model}`;
116
121
  const headers = {
117
122
  Authorization: `Bearer ${this.cloudflareApiToken}`,
118
123
  "Content-Type": "application/json",
@@ -19,7 +19,7 @@ export declare class ChatCloudflareWorkersAI extends SimpleChatModel implements
19
19
  model: string;
20
20
  cloudflareAccountId?: string;
21
21
  cloudflareApiToken?: string;
22
- baseUrl?: string;
22
+ baseUrl: string;
23
23
  constructor(fields?: CloudflareWorkersAIInput & BaseChatModelParams);
24
24
  _llmType(): string;
25
25
  /** Get the identifying parameters for this LLM. */
@@ -49,7 +49,12 @@ export class ChatCloudflareWorkersAI extends SimpleChatModel {
49
49
  this.cloudflareApiToken =
50
50
  fields?.cloudflareApiToken ??
51
51
  getEnvironmentVariable("CLOUDFLARE_API_TOKEN");
52
- this.baseUrl = fields?.baseUrl;
52
+ this.baseUrl =
53
+ fields?.baseUrl ??
54
+ `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
55
+ if (this.baseUrl.endsWith("/")) {
56
+ this.baseUrl = this.baseUrl.slice(0, -1);
57
+ }
53
58
  }
54
59
  _llmType() {
55
60
  return "cloudflare";
@@ -109,7 +114,7 @@ export class ChatCloudflareWorkersAI extends SimpleChatModel {
109
114
  /** @ignore */
110
115
  async _call(messages, options) {
111
116
  this.validateEnvironment();
112
- const url = `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run/${this.model}`;
117
+ const url = `${this.baseUrl}/${this.model}`;
113
118
  const headers = {
114
119
  Authorization: `Bearer ${this.cloudflareApiToken}`,
115
120
  "Content-Type": "application/json",
@@ -0,0 +1,117 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ChatYandexGPT = void 0;
4
+ const index_js_1 = require("../schema/index.cjs");
5
+ const env_js_1 = require("../util/env.cjs");
6
+ const base_js_1 = require("./base.cjs");
7
+ const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/chat";
8
+ function _parseChatHistory(history) {
9
+ const chatHistory = [];
10
+ let instruction = "";
11
+ for (const message of history) {
12
+ if ("content" in message) {
13
+ if (message._getType() === "human") {
14
+ chatHistory.push({ role: "user", text: message.content });
15
+ }
16
+ else if (message._getType() === "ai") {
17
+ chatHistory.push({ role: "assistant", text: message.content });
18
+ }
19
+ else if (message._getType() === "system") {
20
+ instruction = message.content;
21
+ }
22
+ }
23
+ }
24
+ return [chatHistory, instruction];
25
+ }
26
+ class ChatYandexGPT extends base_js_1.BaseChatModel {
27
+ constructor(fields) {
28
+ super(fields ?? {});
29
+ Object.defineProperty(this, "apiKey", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: void 0
34
+ });
35
+ Object.defineProperty(this, "iamToken", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: void 0
40
+ });
41
+ Object.defineProperty(this, "temperature", {
42
+ enumerable: true,
43
+ configurable: true,
44
+ writable: true,
45
+ value: 0.6
46
+ });
47
+ Object.defineProperty(this, "maxTokens", {
48
+ enumerable: true,
49
+ configurable: true,
50
+ writable: true,
51
+ value: 1700
52
+ });
53
+ Object.defineProperty(this, "model", {
54
+ enumerable: true,
55
+ configurable: true,
56
+ writable: true,
57
+ value: "general"
58
+ });
59
+ const apiKey = fields?.apiKey ?? (0, env_js_1.getEnvironmentVariable)("YC_API_KEY");
60
+ const iamToken = fields?.iamToken ?? (0, env_js_1.getEnvironmentVariable)("YC_IAM_TOKEN");
61
+ if (apiKey === undefined && iamToken === undefined) {
62
+ throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
63
+ }
64
+ this.apiKey = apiKey;
65
+ this.iamToken = iamToken;
66
+ this.maxTokens = fields?.maxTokens ?? this.maxTokens;
67
+ this.temperature = fields?.temperature ?? this.temperature;
68
+ this.model = fields?.model ?? this.model;
69
+ }
70
+ _llmType() {
71
+ return "yandexgpt";
72
+ }
73
+ _combineLLMOutput() {
74
+ return {};
75
+ }
76
+ /** @ignore */
77
+ async _generate(messages, options, _) {
78
+ const [messageHistory, instruction] = _parseChatHistory(messages);
79
+ const headers = { "Content-Type": "application/json", Authorization: "" };
80
+ if (this.apiKey !== undefined) {
81
+ headers.Authorization = `Api-Key ${this.apiKey}`;
82
+ }
83
+ else {
84
+ headers.Authorization = `Bearer ${this.iamToken}`;
85
+ }
86
+ const bodyData = {
87
+ model: this.model,
88
+ generationOptions: {
89
+ temperature: this.temperature,
90
+ maxTokens: this.maxTokens,
91
+ },
92
+ messages: messageHistory,
93
+ instructionText: instruction,
94
+ };
95
+ const response = await fetch(apiUrl, {
96
+ method: "POST",
97
+ headers,
98
+ body: JSON.stringify(bodyData),
99
+ signal: options?.signal,
100
+ });
101
+ if (!response.ok) {
102
+ throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
103
+ }
104
+ const responseData = await response.json();
105
+ const { result } = responseData;
106
+ const { text } = result.message;
107
+ const totalTokens = result.num_tokens;
108
+ const generations = [
109
+ { text, message: new index_js_1.AIMessage(text) },
110
+ ];
111
+ return {
112
+ generations,
113
+ llmOutput: { totalTokens },
114
+ };
115
+ }
116
+ }
117
+ exports.ChatYandexGPT = ChatYandexGPT;
@@ -0,0 +1,16 @@
1
+ import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
2
+ import { YandexGPTInputs } from "../llms/yandex.js";
3
+ import { BaseMessage, ChatResult } from "../schema/index.js";
4
+ import { BaseChatModel } from "./base.js";
5
+ export declare class ChatYandexGPT extends BaseChatModel {
6
+ apiKey?: string;
7
+ iamToken?: string;
8
+ temperature: number;
9
+ maxTokens: number;
10
+ model: string;
11
+ constructor(fields?: YandexGPTInputs);
12
+ _llmType(): string;
13
+ _combineLLMOutput?(): {};
14
+ /** @ignore */
15
+ _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], _?: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
16
+ }
@@ -0,0 +1,113 @@
1
+ import { AIMessage, } from "../schema/index.js";
2
+ import { getEnvironmentVariable } from "../util/env.js";
3
+ import { BaseChatModel } from "./base.js";
4
+ const apiUrl = "https://llm.api.cloud.yandex.net/llm/v1alpha/chat";
5
+ function _parseChatHistory(history) {
6
+ const chatHistory = [];
7
+ let instruction = "";
8
+ for (const message of history) {
9
+ if ("content" in message) {
10
+ if (message._getType() === "human") {
11
+ chatHistory.push({ role: "user", text: message.content });
12
+ }
13
+ else if (message._getType() === "ai") {
14
+ chatHistory.push({ role: "assistant", text: message.content });
15
+ }
16
+ else if (message._getType() === "system") {
17
+ instruction = message.content;
18
+ }
19
+ }
20
+ }
21
+ return [chatHistory, instruction];
22
+ }
23
+ export class ChatYandexGPT extends BaseChatModel {
24
+ constructor(fields) {
25
+ super(fields ?? {});
26
+ Object.defineProperty(this, "apiKey", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: void 0
31
+ });
32
+ Object.defineProperty(this, "iamToken", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: void 0
37
+ });
38
+ Object.defineProperty(this, "temperature", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: 0.6
43
+ });
44
+ Object.defineProperty(this, "maxTokens", {
45
+ enumerable: true,
46
+ configurable: true,
47
+ writable: true,
48
+ value: 1700
49
+ });
50
+ Object.defineProperty(this, "model", {
51
+ enumerable: true,
52
+ configurable: true,
53
+ writable: true,
54
+ value: "general"
55
+ });
56
+ const apiKey = fields?.apiKey ?? getEnvironmentVariable("YC_API_KEY");
57
+ const iamToken = fields?.iamToken ?? getEnvironmentVariable("YC_IAM_TOKEN");
58
+ if (apiKey === undefined && iamToken === undefined) {
59
+ throw new Error("Please set the YC_API_KEY or YC_IAM_TOKEN environment variable or pass it to the constructor as the apiKey or iamToken field.");
60
+ }
61
+ this.apiKey = apiKey;
62
+ this.iamToken = iamToken;
63
+ this.maxTokens = fields?.maxTokens ?? this.maxTokens;
64
+ this.temperature = fields?.temperature ?? this.temperature;
65
+ this.model = fields?.model ?? this.model;
66
+ }
67
+ _llmType() {
68
+ return "yandexgpt";
69
+ }
70
+ _combineLLMOutput() {
71
+ return {};
72
+ }
73
+ /** @ignore */
74
+ async _generate(messages, options, _) {
75
+ const [messageHistory, instruction] = _parseChatHistory(messages);
76
+ const headers = { "Content-Type": "application/json", Authorization: "" };
77
+ if (this.apiKey !== undefined) {
78
+ headers.Authorization = `Api-Key ${this.apiKey}`;
79
+ }
80
+ else {
81
+ headers.Authorization = `Bearer ${this.iamToken}`;
82
+ }
83
+ const bodyData = {
84
+ model: this.model,
85
+ generationOptions: {
86
+ temperature: this.temperature,
87
+ maxTokens: this.maxTokens,
88
+ },
89
+ messages: messageHistory,
90
+ instructionText: instruction,
91
+ };
92
+ const response = await fetch(apiUrl, {
93
+ method: "POST",
94
+ headers,
95
+ body: JSON.stringify(bodyData),
96
+ signal: options?.signal,
97
+ });
98
+ if (!response.ok) {
99
+ throw new Error(`Failed to fetch ${apiUrl} from YandexGPT: ${response.status}`);
100
+ }
101
+ const responseData = await response.json();
102
+ const { result } = responseData;
103
+ const { text } = result.message;
104
+ const totalTokens = result.num_tokens;
105
+ const generations = [
106
+ { text, message: new AIMessage(text) },
107
+ ];
108
+ return {
109
+ generations,
110
+ llmOutput: { totalTokens },
111
+ };
112
+ }
113
+ }