langchain 0.0.167 → 0.0.169

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/README.md +4 -4
  2. package/chat_models/cloudflare_workersai.cjs +1 -0
  3. package/chat_models/cloudflare_workersai.d.ts +1 -0
  4. package/chat_models/cloudflare_workersai.js +1 -0
  5. package/chat_models/fake.cjs +1 -0
  6. package/chat_models/fake.d.ts +1 -0
  7. package/chat_models/fake.js +1 -0
  8. package/chat_models/yandex.cjs +1 -0
  9. package/chat_models/yandex.d.ts +1 -0
  10. package/chat_models/yandex.js +1 -0
  11. package/dist/agents/chat/index.cjs +3 -2
  12. package/dist/agents/chat/index.d.ts +3 -0
  13. package/dist/agents/chat/index.js +3 -2
  14. package/dist/callbacks/handlers/llmonitor.cjs +21 -17
  15. package/dist/callbacks/handlers/llmonitor.js +21 -17
  16. package/dist/chat_models/cloudflare_workersai.cjs +145 -0
  17. package/dist/chat_models/cloudflare_workersai.d.ts +46 -0
  18. package/dist/chat_models/cloudflare_workersai.js +141 -0
  19. package/dist/chat_models/fake.cjs +101 -0
  20. package/dist/chat_models/fake.d.ts +36 -0
  21. package/dist/chat_models/fake.js +97 -0
  22. package/dist/chat_models/yandex.cjs +117 -0
  23. package/dist/chat_models/yandex.d.ts +16 -0
  24. package/dist/chat_models/yandex.js +113 -0
  25. package/dist/evaluation/comparison/prompt.d.ts +2 -2
  26. package/dist/experimental/chains/violation_of_expectations/index.cjs +5 -0
  27. package/dist/experimental/chains/violation_of_expectations/index.d.ts +1 -0
  28. package/dist/experimental/chains/violation_of_expectations/index.js +1 -0
  29. package/dist/experimental/chains/violation_of_expectations/types.cjs +49 -0
  30. package/dist/experimental/chains/violation_of_expectations/types.d.ts +69 -0
  31. package/dist/experimental/chains/violation_of_expectations/types.js +46 -0
  32. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +328 -0
  33. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.d.ts +148 -0
  34. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +324 -0
  35. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.cjs +49 -0
  36. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.d.ts +5 -0
  37. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.js +46 -0
  38. package/dist/llms/cloudflare_workersai.cjs +124 -0
  39. package/dist/llms/cloudflare_workersai.d.ts +49 -0
  40. package/dist/llms/cloudflare_workersai.js +120 -0
  41. package/dist/llms/fake.cjs +82 -0
  42. package/dist/llms/fake.d.ts +31 -0
  43. package/dist/llms/fake.js +78 -0
  44. package/dist/llms/sagemaker_endpoint.cjs +9 -7
  45. package/dist/llms/sagemaker_endpoint.d.ts +3 -3
  46. package/dist/llms/sagemaker_endpoint.js +9 -7
  47. package/dist/load/import_constants.cjs +2 -0
  48. package/dist/load/import_constants.js +2 -0
  49. package/dist/load/import_map.cjs +8 -2
  50. package/dist/load/import_map.d.ts +6 -0
  51. package/dist/load/import_map.js +6 -0
  52. package/dist/retrievers/zep.cjs +29 -3
  53. package/dist/retrievers/zep.d.ts +14 -0
  54. package/dist/retrievers/zep.js +29 -3
  55. package/dist/util/axios-fetch-adapter.cjs +1 -1
  56. package/dist/util/axios-fetch-adapter.js +1 -1
  57. package/dist/util/env.cjs +1 -1
  58. package/dist/util/env.js +1 -1
  59. package/dist/util/event-source-parse.cjs +1 -1
  60. package/dist/util/event-source-parse.js +1 -1
  61. package/dist/vectorstores/closevector/common.cjs +128 -0
  62. package/dist/vectorstores/closevector/common.d.ts +82 -0
  63. package/dist/vectorstores/closevector/common.js +124 -0
  64. package/dist/vectorstores/closevector/node.cjs +109 -0
  65. package/dist/vectorstores/closevector/node.d.ts +83 -0
  66. package/dist/vectorstores/closevector/node.js +105 -0
  67. package/dist/vectorstores/closevector/web.cjs +109 -0
  68. package/dist/vectorstores/closevector/web.d.ts +80 -0
  69. package/dist/vectorstores/closevector/web.js +105 -0
  70. package/dist/vectorstores/faiss.cjs +38 -6
  71. package/dist/vectorstores/faiss.d.ts +14 -2
  72. package/dist/vectorstores/faiss.js +38 -6
  73. package/dist/vectorstores/weaviate.cjs +13 -2
  74. package/dist/vectorstores/weaviate.js +13 -2
  75. package/experimental/chains/violation_of_expectations.cjs +1 -0
  76. package/experimental/chains/violation_of_expectations.d.ts +1 -0
  77. package/experimental/chains/violation_of_expectations.js +1 -0
  78. package/llms/cloudflare_workersai.cjs +1 -0
  79. package/llms/cloudflare_workersai.d.ts +1 -0
  80. package/llms/cloudflare_workersai.js +1 -0
  81. package/llms/fake.cjs +1 -0
  82. package/llms/fake.d.ts +1 -0
  83. package/llms/fake.js +1 -0
  84. package/package.json +92 -13
  85. package/vectorstores/closevector/node.cjs +1 -0
  86. package/vectorstores/closevector/node.d.ts +1 -0
  87. package/vectorstores/closevector/node.js +1 -0
  88. package/vectorstores/closevector/web.cjs +1 -0
  89. package/vectorstores/closevector/web.d.ts +1 -0
  90. package/vectorstores/closevector/web.js +1 -0
package/README.md CHANGED
@@ -2,13 +2,13 @@
2
2
 
3
3
  ⚡ Building applications with LLMs through composability ⚡
4
4
 
5
- [![CI](https://github.com/hwchase17/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/hwchase17/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dw/langchain) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS) [![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/hwchase17/langchainjs)
5
+ [![CI](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml/badge.svg)](https://github.com/langchain-ai/langchainjs/actions/workflows/ci.yml) ![npm](https://img.shields.io/npm/dw/langchain) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Twitter](https://img.shields.io/twitter/url/https/twitter.com/langchainai.svg?style=social&label=Follow%20%40LangChainAI)](https://twitter.com/langchainai) [![](https://dcbadge.vercel.app/api/server/6adMQxSpJS?compact=true&style=flat)](https://discord.gg/6adMQxSpJS) [![Open in Dev Containers](https://img.shields.io/static/v1?label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/langchain-ai/langchainjs)
6
6
  [<img src="https://github.com/codespaces/badge.svg" title="Open in Github Codespace" width="150" height="20">](https://codespaces.new/hwchase17/langchainjs)
7
7
 
8
8
  Looking for the Python version? Check out [LangChain](https://github.com/hwchase17/langchain).
9
9
 
10
- To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
11
- [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
10
+ To help you ship LangChain apps to production faster, check out [LangSmith](https://smith.langchain.com).
11
+ [LangSmith](https://smith.langchain.com) is a unified developer platform for building, testing, and monitoring LLM applications.
12
12
  Fill out [this form](https://airtable.com/appwQzlErAS2qiP0L/shrGtGaVBVAz7NcV2) to get off the waitlist or speak with our sales team
13
13
 
14
14
  ## Quick Install
@@ -53,4 +53,4 @@ The [LangChainHub](https://github.com/hwchase17/langchain-hub) is a central plac
53
53
 
54
54
  As an open source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infra, or better documentation.
55
55
 
56
- Check out [our contributing guidelines](https://github.com/hwchase17/langchainjs/blob/main/CONTRIBUTING.md) for instructions on how to contribute.
56
+ Check out [our contributing guidelines](https://github.com/langchain-ai/langchainjs/blob/main/CONTRIBUTING.md) for instructions on how to contribute.
@@ -0,0 +1 @@
1
+ module.exports = require('../dist/chat_models/cloudflare_workersai.cjs');
@@ -0,0 +1 @@
1
+ export * from '../dist/chat_models/cloudflare_workersai.js'
@@ -0,0 +1 @@
1
+ export * from '../dist/chat_models/cloudflare_workersai.js'
@@ -0,0 +1 @@
1
+ module.exports = require('../dist/chat_models/fake.cjs');
@@ -0,0 +1 @@
1
+ export * from '../dist/chat_models/fake.js'
@@ -0,0 +1 @@
1
+ export * from '../dist/chat_models/fake.js'
@@ -0,0 +1 @@
1
+ module.exports = require('../dist/chat_models/yandex.cjs');
@@ -0,0 +1 @@
1
+ export * from '../dist/chat_models/yandex.js'
@@ -0,0 +1 @@
1
+ export * from '../dist/chat_models/yandex.js'
@@ -80,13 +80,14 @@ class ChatAgent extends agent_js_1.Agent {
80
80
  * @param args.suffix - String to put after the list of tools.
81
81
  * @param args.prefix - String to put before the list of tools.
82
82
  * @param args.humanMessageTemplate - String to use directly as the human message template
83
+ * @param args.formatInstructions - Formattable string to use as the instructions template
83
84
  */
84
85
  static createPrompt(tools, args) {
85
- const { prefix = prompt_js_1.PREFIX, suffix = prompt_js_1.SUFFIX, humanMessageTemplate = DEFAULT_HUMAN_MESSAGE_TEMPLATE, } = args ?? {};
86
+ const { prefix = prompt_js_1.PREFIX, suffix = prompt_js_1.SUFFIX, humanMessageTemplate = DEFAULT_HUMAN_MESSAGE_TEMPLATE, formatInstructions = prompt_js_1.FORMAT_INSTRUCTIONS, } = args ?? {};
86
87
  const toolStrings = tools
87
88
  .map((tool) => `${tool.name}: ${tool.description}`)
88
89
  .join("\n");
89
- const template = [prefix, toolStrings, prompt_js_1.FORMAT_INSTRUCTIONS, suffix].join("\n\n");
90
+ const template = [prefix, toolStrings, formatInstructions, suffix].join("\n\n");
90
91
  const messages = [
91
92
  chat_js_1.SystemMessagePromptTemplate.fromTemplate(template),
92
93
  chat_js_1.HumanMessagePromptTemplate.fromTemplate(humanMessageTemplate),
@@ -16,6 +16,8 @@ export interface ChatCreatePromptArgs {
16
16
  prefix?: string;
17
17
  /** String to use directly as the human message template. */
18
18
  humanMessageTemplate?: string;
19
+ /** Formattable string to use as the instructions template. */
20
+ formatInstructions?: string;
19
21
  /** List of input variables the final prompt will expect. */
20
22
  inputVariables?: string[];
21
23
  }
@@ -65,6 +67,7 @@ export declare class ChatAgent extends Agent {
65
67
  * @param args.suffix - String to put after the list of tools.
66
68
  * @param args.prefix - String to put before the list of tools.
67
69
  * @param args.humanMessageTemplate - String to use directly as the human message template
70
+ * @param args.formatInstructions - Formattable string to use as the instructions template
68
71
  */
69
72
  static createPrompt(tools: Tool[], args?: ChatCreatePromptArgs): ChatPromptTemplate<any, any>;
70
73
  /**
@@ -77,13 +77,14 @@ export class ChatAgent extends Agent {
77
77
  * @param args.suffix - String to put after the list of tools.
78
78
  * @param args.prefix - String to put before the list of tools.
79
79
  * @param args.humanMessageTemplate - String to use directly as the human message template
80
+ * @param args.formatInstructions - Formattable string to use as the instructions template
80
81
  */
81
82
  static createPrompt(tools, args) {
82
- const { prefix = PREFIX, suffix = SUFFIX, humanMessageTemplate = DEFAULT_HUMAN_MESSAGE_TEMPLATE, } = args ?? {};
83
+ const { prefix = PREFIX, suffix = SUFFIX, humanMessageTemplate = DEFAULT_HUMAN_MESSAGE_TEMPLATE, formatInstructions = FORMAT_INSTRUCTIONS, } = args ?? {};
83
84
  const toolStrings = tools
84
85
  .map((tool) => `${tool.name}: ${tool.description}`)
85
86
  .join("\n");
86
- const template = [prefix, toolStrings, FORMAT_INSTRUCTIONS, suffix].join("\n\n");
87
+ const template = [prefix, toolStrings, formatInstructions, suffix].join("\n\n");
87
88
  const messages = [
88
89
  SystemMessagePromptTemplate.fromTemplate(template),
89
90
  HumanMessagePromptTemplate.fromTemplate(humanMessageTemplate),
@@ -72,13 +72,15 @@ const parseInput = (rawInput) => {
72
72
  const parseOutput = (rawOutput) => {
73
73
  if (!rawOutput)
74
74
  return null;
75
- const { text, output, answer } = rawOutput;
75
+ const { text, output, answer, result } = rawOutput;
76
76
  if (text)
77
77
  return text;
78
78
  if (answer)
79
79
  return answer;
80
80
  if (output)
81
81
  return output;
82
+ if (result)
83
+ return result;
82
84
  return rawOutput;
83
85
  };
84
86
  class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
@@ -111,15 +113,14 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
111
113
  ...(extraParams?.invocation_params || {}),
112
114
  ...(metadata || {}),
113
115
  };
114
- const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
115
- const userId = params?.userId || undefined;
116
- const userProps = params?.userProps || undefined;
116
+ const { model, model_name, modelName, userId, userProps, ...rest } = params;
117
+ const name = model || modelName || model_name || llm.id.at(-1);
117
118
  await this.monitor.trackEvent("llm", "start", {
118
119
  runId,
119
120
  parentRunId,
120
121
  name,
121
122
  input: (0, exports.convertToLLMonitorMessages)(prompts),
122
- extra: params,
123
+ extra: rest,
123
124
  userId,
124
125
  userProps,
125
126
  tags,
@@ -131,15 +132,15 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
131
132
  ...(extraParams?.invocation_params || {}),
132
133
  ...(metadata || {}),
133
134
  };
134
- const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
135
- const userId = params?.userId || undefined;
136
- const userProps = params?.userProps || undefined;
135
+ // Expand them so they're excluded from the "extra" field
136
+ const { model, model_name, modelName, userId, userProps, ...rest } = params;
137
+ const name = model || modelName || model_name || llm.id.at(-1);
137
138
  await this.monitor.trackEvent("llm", "start", {
138
139
  runId,
139
140
  parentRunId,
140
141
  name,
141
142
  input: (0, exports.convertToLLMonitorMessages)(messages),
142
- extra: params,
143
+ extra: rest,
143
144
  userId,
144
145
  userProps,
145
146
  tags,
@@ -164,20 +165,19 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
164
165
  });
165
166
  }
166
167
  async handleChainStart(chain, inputs, runId, parentRunId, tags, metadata) {
168
+ const { agentName, userId, userProps, ...rest } = metadata || {};
167
169
  // allow the user to specify an agent name
168
- const chainName = chain.id.at(-1);
169
- const name = (metadata?.agentName ?? chainName);
170
+ const name = agentName || chain.id.at(-1);
170
171
  // Attempt to automatically detect if this is an agent or chain
171
- const runType = metadata?.agentName ||
172
- ["AgentExecutor", "PlanAndExecute"].includes(chainName)
172
+ const runType = agentName || ["AgentExecutor", "PlanAndExecute"].includes(name)
173
173
  ? "agent"
174
174
  : "chain";
175
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
176
- const { agentName, ...rest } = metadata || {};
177
175
  await this.monitor.trackEvent(runType, "start", {
178
176
  runId,
179
177
  parentRunId,
180
178
  name,
179
+ userId,
180
+ userProps,
181
181
  input: parseInput(inputs),
182
182
  extra: rest,
183
183
  tags,
@@ -197,12 +197,16 @@ class LLMonitorHandler extends base_js_1.BaseCallbackHandler {
197
197
  });
198
198
  }
199
199
  async handleToolStart(tool, input, runId, parentRunId, tags, metadata) {
200
+ const { toolName, userId, userProps, ...rest } = metadata || {};
201
+ const name = toolName || tool.id.at(-1);
200
202
  await this.monitor.trackEvent("tool", "start", {
201
203
  runId,
202
204
  parentRunId,
203
- name: tool.id[tool.id.length - 1],
205
+ name,
206
+ userId,
207
+ userProps,
204
208
  input,
205
- extra: metadata,
209
+ extra: rest,
206
210
  tags,
207
211
  runtime: "langchain-js",
208
212
  });
@@ -65,13 +65,15 @@ const parseInput = (rawInput) => {
65
65
  const parseOutput = (rawOutput) => {
66
66
  if (!rawOutput)
67
67
  return null;
68
- const { text, output, answer } = rawOutput;
68
+ const { text, output, answer, result } = rawOutput;
69
69
  if (text)
70
70
  return text;
71
71
  if (answer)
72
72
  return answer;
73
73
  if (output)
74
74
  return output;
75
+ if (result)
76
+ return result;
75
77
  return rawOutput;
76
78
  };
77
79
  export class LLMonitorHandler extends BaseCallbackHandler {
@@ -104,15 +106,14 @@ export class LLMonitorHandler extends BaseCallbackHandler {
104
106
  ...(extraParams?.invocation_params || {}),
105
107
  ...(metadata || {}),
106
108
  };
107
- const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
108
- const userId = params?.userId || undefined;
109
- const userProps = params?.userProps || undefined;
109
+ const { model, model_name, modelName, userId, userProps, ...rest } = params;
110
+ const name = model || modelName || model_name || llm.id.at(-1);
110
111
  await this.monitor.trackEvent("llm", "start", {
111
112
  runId,
112
113
  parentRunId,
113
114
  name,
114
115
  input: convertToLLMonitorMessages(prompts),
115
- extra: params,
116
+ extra: rest,
116
117
  userId,
117
118
  userProps,
118
119
  tags,
@@ -124,15 +125,15 @@ export class LLMonitorHandler extends BaseCallbackHandler {
124
125
  ...(extraParams?.invocation_params || {}),
125
126
  ...(metadata || {}),
126
127
  };
127
- const name = params?.model || params?.name || params?.model_name || llm.id.at(-1);
128
- const userId = params?.userId || undefined;
129
- const userProps = params?.userProps || undefined;
128
+ // Expand them so they're excluded from the "extra" field
129
+ const { model, model_name, modelName, userId, userProps, ...rest } = params;
130
+ const name = model || modelName || model_name || llm.id.at(-1);
130
131
  await this.monitor.trackEvent("llm", "start", {
131
132
  runId,
132
133
  parentRunId,
133
134
  name,
134
135
  input: convertToLLMonitorMessages(messages),
135
- extra: params,
136
+ extra: rest,
136
137
  userId,
137
138
  userProps,
138
139
  tags,
@@ -157,20 +158,19 @@ export class LLMonitorHandler extends BaseCallbackHandler {
157
158
  });
158
159
  }
159
160
  async handleChainStart(chain, inputs, runId, parentRunId, tags, metadata) {
161
+ const { agentName, userId, userProps, ...rest } = metadata || {};
160
162
  // allow the user to specify an agent name
161
- const chainName = chain.id.at(-1);
162
- const name = (metadata?.agentName ?? chainName);
163
+ const name = agentName || chain.id.at(-1);
163
164
  // Attempt to automatically detect if this is an agent or chain
164
- const runType = metadata?.agentName ||
165
- ["AgentExecutor", "PlanAndExecute"].includes(chainName)
165
+ const runType = agentName || ["AgentExecutor", "PlanAndExecute"].includes(name)
166
166
  ? "agent"
167
167
  : "chain";
168
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
169
- const { agentName, ...rest } = metadata || {};
170
168
  await this.monitor.trackEvent(runType, "start", {
171
169
  runId,
172
170
  parentRunId,
173
171
  name,
172
+ userId,
173
+ userProps,
174
174
  input: parseInput(inputs),
175
175
  extra: rest,
176
176
  tags,
@@ -190,12 +190,16 @@ export class LLMonitorHandler extends BaseCallbackHandler {
190
190
  });
191
191
  }
192
192
  async handleToolStart(tool, input, runId, parentRunId, tags, metadata) {
193
+ const { toolName, userId, userProps, ...rest } = metadata || {};
194
+ const name = toolName || tool.id.at(-1);
193
195
  await this.monitor.trackEvent("tool", "start", {
194
196
  runId,
195
197
  parentRunId,
196
- name: tool.id[tool.id.length - 1],
198
+ name,
199
+ userId,
200
+ userProps,
197
201
  input,
198
- extra: metadata,
202
+ extra: rest,
199
203
  tags,
200
204
  runtime: "langchain-js",
201
205
  });
@@ -0,0 +1,145 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ChatCloudflareWorkersAI = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const index_js_1 = require("../schema/index.cjs");
6
+ const env_js_1 = require("../util/env.cjs");
7
+ /**
8
+ * A class that enables calls to the Cloudflare Workers AI API to access large language
9
+ * models in a chat-like fashion. It extends the SimpleChatModel class and
10
+ * implements the CloudflareWorkersAIInput interface.
11
+ */
12
+ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
13
+ static lc_name() {
14
+ return "ChatCloudflareWorkersAI";
15
+ }
16
+ constructor(fields) {
17
+ super(fields ?? {});
18
+ Object.defineProperty(this, "lc_serializable", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: true
23
+ });
24
+ Object.defineProperty(this, "model", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: "@cf/meta/llama-2-7b-chat-int8"
29
+ });
30
+ Object.defineProperty(this, "cloudflareAccountId", {
31
+ enumerable: true,
32
+ configurable: true,
33
+ writable: true,
34
+ value: void 0
35
+ });
36
+ Object.defineProperty(this, "cloudflareApiToken", {
37
+ enumerable: true,
38
+ configurable: true,
39
+ writable: true,
40
+ value: void 0
41
+ });
42
+ Object.defineProperty(this, "baseUrl", {
43
+ enumerable: true,
44
+ configurable: true,
45
+ writable: true,
46
+ value: void 0
47
+ });
48
+ this.model = fields?.model ?? this.model;
49
+ this.cloudflareAccountId =
50
+ fields?.cloudflareAccountId ??
51
+ (0, env_js_1.getEnvironmentVariable)("CLOUDFLARE_ACCOUNT_ID");
52
+ this.cloudflareApiToken =
53
+ fields?.cloudflareApiToken ??
54
+ (0, env_js_1.getEnvironmentVariable)("CLOUDFLARE_API_TOKEN");
55
+ this.baseUrl =
56
+ fields?.baseUrl ??
57
+ `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
58
+ if (this.baseUrl.endsWith("/")) {
59
+ this.baseUrl = this.baseUrl.slice(0, -1);
60
+ }
61
+ }
62
+ _llmType() {
63
+ return "cloudflare";
64
+ }
65
+ /** Get the identifying parameters for this LLM. */
66
+ get identifyingParams() {
67
+ return { model: this.model };
68
+ }
69
+ /**
70
+ * Get the parameters used to invoke the model
71
+ */
72
+ invocationParams(_options) {
73
+ return {
74
+ model: this.model,
75
+ };
76
+ }
77
+ _combineLLMOutput() {
78
+ return {};
79
+ }
80
+ /**
81
+ * Method to validate the environment.
82
+ */
83
+ validateEnvironment() {
84
+ if (!this.cloudflareAccountId) {
85
+ throw new Error(`No Cloudflare account ID found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_ACCOUNT_ID" in your environment variables.`);
86
+ }
87
+ if (!this.cloudflareApiToken) {
88
+ throw new Error(`No Cloudflare API key found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_API_KEY" in your environment variables.`);
89
+ }
90
+ }
91
+ _formatMessages(messages) {
92
+ const formattedMessages = messages.map((message) => {
93
+ let role;
94
+ if (message._getType() === "human") {
95
+ role = "user";
96
+ }
97
+ else if (message._getType() === "ai") {
98
+ role = "assistant";
99
+ }
100
+ else if (message._getType() === "system") {
101
+ role = "system";
102
+ }
103
+ else if (index_js_1.ChatMessage.isInstance(message)) {
104
+ role = message.role;
105
+ }
106
+ else {
107
+ console.warn(`Unsupported message type passed to Cloudflare: "${message._getType()}"`);
108
+ role = "user";
109
+ }
110
+ return {
111
+ role,
112
+ content: message.content,
113
+ };
114
+ });
115
+ return formattedMessages;
116
+ }
117
+ /** @ignore */
118
+ async _call(messages, options) {
119
+ this.validateEnvironment();
120
+ const url = `${this.baseUrl}/${this.model}`;
121
+ const headers = {
122
+ Authorization: `Bearer ${this.cloudflareApiToken}`,
123
+ "Content-Type": "application/json",
124
+ };
125
+ const formattedMessages = this._formatMessages(messages);
126
+ const data = { messages: formattedMessages };
127
+ const responseData = await this.caller.call(async () => {
128
+ const response = await fetch(url, {
129
+ method: "POST",
130
+ headers,
131
+ body: JSON.stringify(data),
132
+ signal: options.signal,
133
+ });
134
+ if (!response.ok) {
135
+ const error = new Error(`Cloudflare LLM call failed with status code ${response.status}`);
136
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
137
+ error.response = response;
138
+ throw error;
139
+ }
140
+ return response.json();
141
+ });
142
+ return responseData.result.response;
143
+ }
144
+ }
145
+ exports.ChatCloudflareWorkersAI = ChatCloudflareWorkersAI;
@@ -0,0 +1,46 @@
1
+ import { SimpleChatModel, BaseChatModelParams } from "./base.js";
2
+ import { BaseLanguageModelCallOptions } from "../base_language/index.js";
3
+ import { BaseMessage } from "../schema/index.js";
4
+ import { CloudflareWorkersAIInput } from "../llms/cloudflare_workersai.js";
5
+ /**
6
+ * An interface defining the options for a Cloudflare Workers AI call. It extends
7
+ * the BaseLanguageModelCallOptions interface.
8
+ */
9
+ export interface ChatCloudflareWorkersAICallOptions extends BaseLanguageModelCallOptions {
10
+ }
11
+ /**
12
+ * A class that enables calls to the Cloudflare Workers AI API to access large language
13
+ * models in a chat-like fashion. It extends the SimpleChatModel class and
14
+ * implements the CloudflareWorkersAIInput interface.
15
+ */
16
+ export declare class ChatCloudflareWorkersAI extends SimpleChatModel implements CloudflareWorkersAIInput {
17
+ static lc_name(): string;
18
+ lc_serializable: boolean;
19
+ model: string;
20
+ cloudflareAccountId?: string;
21
+ cloudflareApiToken?: string;
22
+ baseUrl: string;
23
+ constructor(fields?: CloudflareWorkersAIInput & BaseChatModelParams);
24
+ _llmType(): string;
25
+ /** Get the identifying parameters for this LLM. */
26
+ get identifyingParams(): {
27
+ model: string;
28
+ };
29
+ /**
30
+ * Get the parameters used to invoke the model
31
+ */
32
+ invocationParams(_options?: this["ParsedCallOptions"]): {
33
+ model: string;
34
+ };
35
+ _combineLLMOutput(): {};
36
+ /**
37
+ * Method to validate the environment.
38
+ */
39
+ validateEnvironment(): void;
40
+ protected _formatMessages(messages: BaseMessage[]): {
41
+ role: string;
42
+ content: string;
43
+ }[];
44
+ /** @ignore */
45
+ _call(messages: BaseMessage[], options: this["ParsedCallOptions"]): Promise<string>;
46
+ }
@@ -0,0 +1,141 @@
1
+ import { SimpleChatModel } from "./base.js";
2
+ import { ChatMessage } from "../schema/index.js";
3
+ import { getEnvironmentVariable } from "../util/env.js";
4
+ /**
5
+ * A class that enables calls to the Cloudflare Workers AI API to access large language
6
+ * models in a chat-like fashion. It extends the SimpleChatModel class and
7
+ * implements the CloudflareWorkersAIInput interface.
8
+ */
9
+ export class ChatCloudflareWorkersAI extends SimpleChatModel {
10
+ static lc_name() {
11
+ return "ChatCloudflareWorkersAI";
12
+ }
13
+ constructor(fields) {
14
+ super(fields ?? {});
15
+ Object.defineProperty(this, "lc_serializable", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: true
20
+ });
21
+ Object.defineProperty(this, "model", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: "@cf/meta/llama-2-7b-chat-int8"
26
+ });
27
+ Object.defineProperty(this, "cloudflareAccountId", {
28
+ enumerable: true,
29
+ configurable: true,
30
+ writable: true,
31
+ value: void 0
32
+ });
33
+ Object.defineProperty(this, "cloudflareApiToken", {
34
+ enumerable: true,
35
+ configurable: true,
36
+ writable: true,
37
+ value: void 0
38
+ });
39
+ Object.defineProperty(this, "baseUrl", {
40
+ enumerable: true,
41
+ configurable: true,
42
+ writable: true,
43
+ value: void 0
44
+ });
45
+ this.model = fields?.model ?? this.model;
46
+ this.cloudflareAccountId =
47
+ fields?.cloudflareAccountId ??
48
+ getEnvironmentVariable("CLOUDFLARE_ACCOUNT_ID");
49
+ this.cloudflareApiToken =
50
+ fields?.cloudflareApiToken ??
51
+ getEnvironmentVariable("CLOUDFLARE_API_TOKEN");
52
+ this.baseUrl =
53
+ fields?.baseUrl ??
54
+ `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
55
+ if (this.baseUrl.endsWith("/")) {
56
+ this.baseUrl = this.baseUrl.slice(0, -1);
57
+ }
58
+ }
59
+ _llmType() {
60
+ return "cloudflare";
61
+ }
62
+ /** Get the identifying parameters for this LLM. */
63
+ get identifyingParams() {
64
+ return { model: this.model };
65
+ }
66
+ /**
67
+ * Get the parameters used to invoke the model
68
+ */
69
+ invocationParams(_options) {
70
+ return {
71
+ model: this.model,
72
+ };
73
+ }
74
+ _combineLLMOutput() {
75
+ return {};
76
+ }
77
+ /**
78
+ * Method to validate the environment.
79
+ */
80
+ validateEnvironment() {
81
+ if (!this.cloudflareAccountId) {
82
+ throw new Error(`No Cloudflare account ID found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_ACCOUNT_ID" in your environment variables.`);
83
+ }
84
+ if (!this.cloudflareApiToken) {
85
+ throw new Error(`No Cloudflare API key found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_API_KEY" in your environment variables.`);
86
+ }
87
+ }
88
+ _formatMessages(messages) {
89
+ const formattedMessages = messages.map((message) => {
90
+ let role;
91
+ if (message._getType() === "human") {
92
+ role = "user";
93
+ }
94
+ else if (message._getType() === "ai") {
95
+ role = "assistant";
96
+ }
97
+ else if (message._getType() === "system") {
98
+ role = "system";
99
+ }
100
+ else if (ChatMessage.isInstance(message)) {
101
+ role = message.role;
102
+ }
103
+ else {
104
+ console.warn(`Unsupported message type passed to Cloudflare: "${message._getType()}"`);
105
+ role = "user";
106
+ }
107
+ return {
108
+ role,
109
+ content: message.content,
110
+ };
111
+ });
112
+ return formattedMessages;
113
+ }
114
+ /** @ignore */
115
+ async _call(messages, options) {
116
+ this.validateEnvironment();
117
+ const url = `${this.baseUrl}/${this.model}`;
118
+ const headers = {
119
+ Authorization: `Bearer ${this.cloudflareApiToken}`,
120
+ "Content-Type": "application/json",
121
+ };
122
+ const formattedMessages = this._formatMessages(messages);
123
+ const data = { messages: formattedMessages };
124
+ const responseData = await this.caller.call(async () => {
125
+ const response = await fetch(url, {
126
+ method: "POST",
127
+ headers,
128
+ body: JSON.stringify(data),
129
+ signal: options.signal,
130
+ });
131
+ if (!response.ok) {
132
+ const error = new Error(`Cloudflare LLM call failed with status code ${response.status}`);
133
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
134
+ error.response = response;
135
+ throw error;
136
+ }
137
+ return response.json();
138
+ });
139
+ return responseData.result.response;
140
+ }
141
+ }