langchain 0.0.151 → 0.0.153

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. package/cache/cloudflare_kv.cjs +1 -0
  2. package/cache/cloudflare_kv.d.ts +1 -0
  3. package/cache/cloudflare_kv.js +1 -0
  4. package/chat_models/fireworks.cjs +1 -0
  5. package/chat_models/fireworks.d.ts +1 -0
  6. package/chat_models/fireworks.js +1 -0
  7. package/dist/agents/chat/index.cjs +1 -1
  8. package/dist/agents/chat/index.js +1 -1
  9. package/dist/agents/chat_convo/index.cjs +1 -1
  10. package/dist/agents/chat_convo/index.js +1 -1
  11. package/dist/agents/executor.cjs +9 -2
  12. package/dist/agents/executor.js +9 -2
  13. package/dist/agents/openai/index.cjs +1 -1
  14. package/dist/agents/openai/index.js +1 -1
  15. package/dist/agents/structured_chat/index.cjs +1 -1
  16. package/dist/agents/structured_chat/index.js +1 -1
  17. package/dist/agents/xml/index.cjs +1 -1
  18. package/dist/agents/xml/index.js +1 -1
  19. package/dist/base_language/count_tokens.cjs +2 -1
  20. package/dist/base_language/count_tokens.js +2 -1
  21. package/dist/base_language/index.cjs +5 -3
  22. package/dist/base_language/index.d.ts +1 -1
  23. package/dist/base_language/index.js +4 -3
  24. package/dist/cache/cloudflare_kv.cjs +61 -0
  25. package/dist/cache/cloudflare_kv.d.ts +29 -0
  26. package/dist/cache/cloudflare_kv.js +57 -0
  27. package/dist/chains/openai_functions/openapi.cjs +1 -1
  28. package/dist/chains/openai_functions/openapi.js +1 -1
  29. package/dist/chains/openai_functions/structured_output.d.ts +2 -2
  30. package/dist/chains/question_answering/map_reduce_prompts.cjs +2 -3
  31. package/dist/chains/question_answering/map_reduce_prompts.js +2 -3
  32. package/dist/chains/question_answering/refine_prompts.cjs +2 -2
  33. package/dist/chains/question_answering/refine_prompts.js +2 -2
  34. package/dist/chains/question_answering/stuff_prompts.cjs +1 -2
  35. package/dist/chains/question_answering/stuff_prompts.js +1 -2
  36. package/dist/chat_models/base.d.ts +1 -1
  37. package/dist/chat_models/fireworks.cjs +81 -0
  38. package/dist/chat_models/fireworks.d.ts +33 -0
  39. package/dist/chat_models/fireworks.js +77 -0
  40. package/dist/chat_models/ollama.cjs +25 -12
  41. package/dist/chat_models/ollama.d.ts +2 -3
  42. package/dist/chat_models/ollama.js +25 -12
  43. package/dist/chat_models/openai.d.ts +2 -2
  44. package/dist/document_loaders/web/pdf.cjs +87 -0
  45. package/dist/document_loaders/web/pdf.d.ts +17 -0
  46. package/dist/document_loaders/web/pdf.js +83 -0
  47. package/dist/evaluation/agents/prompt.cjs +2 -3
  48. package/dist/evaluation/agents/prompt.js +2 -3
  49. package/dist/experimental/plan_and_execute/prompt.cjs +1 -1
  50. package/dist/experimental/plan_and_execute/prompt.js +1 -1
  51. package/dist/llms/fireworks.cjs +92 -0
  52. package/dist/llms/fireworks.d.ts +33 -0
  53. package/dist/llms/fireworks.js +88 -0
  54. package/dist/llms/llama_cpp.cjs +10 -4
  55. package/dist/llms/llama_cpp.d.ts +2 -1
  56. package/dist/llms/llama_cpp.js +10 -4
  57. package/dist/llms/ollama.cjs +29 -14
  58. package/dist/llms/ollama.d.ts +3 -4
  59. package/dist/llms/ollama.js +29 -14
  60. package/dist/llms/openai-chat.cjs +1 -5
  61. package/dist/llms/openai-chat.d.ts +1 -1
  62. package/dist/llms/openai-chat.js +1 -5
  63. package/dist/llms/openai.cjs +3 -4
  64. package/dist/llms/openai.d.ts +2 -2
  65. package/dist/llms/openai.js +3 -4
  66. package/dist/load/import_constants.cjs +3 -0
  67. package/dist/load/import_constants.js +3 -0
  68. package/dist/load/import_map.cjs +4 -2
  69. package/dist/load/import_map.d.ts +2 -0
  70. package/dist/load/import_map.js +2 -0
  71. package/dist/prompts/chat.cjs +12 -1
  72. package/dist/prompts/chat.d.ts +8 -0
  73. package/dist/prompts/chat.js +12 -1
  74. package/dist/schema/output_parser.cjs +38 -6
  75. package/dist/schema/output_parser.d.ts +20 -5
  76. package/dist/schema/output_parser.js +38 -6
  77. package/dist/schema/runnable/base.cjs +65 -10
  78. package/dist/schema/runnable/base.d.ts +17 -3
  79. package/dist/schema/runnable/base.js +65 -10
  80. package/dist/stores/message/cloudflare_d1.cjs +134 -0
  81. package/dist/stores/message/cloudflare_d1.d.ts +49 -0
  82. package/dist/stores/message/cloudflare_d1.js +130 -0
  83. package/dist/types/openai-types.d.ts +2 -0
  84. package/dist/util/ollama.cjs +2 -2
  85. package/dist/util/ollama.d.ts +6 -0
  86. package/dist/util/ollama.js +2 -2
  87. package/document_loaders/web/pdf.cjs +1 -0
  88. package/document_loaders/web/pdf.d.ts +1 -0
  89. package/document_loaders/web/pdf.js +1 -0
  90. package/llms/fireworks.cjs +1 -0
  91. package/llms/fireworks.d.ts +1 -0
  92. package/llms/fireworks.js +1 -0
  93. package/package.json +46 -1
  94. package/stores/message/cloudflare_d1.cjs +1 -0
  95. package/stores/message/cloudflare_d1.d.ts +1 -0
  96. package/stores/message/cloudflare_d1.js +1 -0
@@ -0,0 +1,88 @@
1
+ import { getEnvironmentVariable } from "../util/env.js";
2
+ import { OpenAI } from "./openai.js";
3
+ /**
4
+ * Wrapper around Fireworks API for large language models
5
+ *
6
+ * Fireworks API is compatible to the OpenAI API with some limitations described in
7
+ * https://readme.fireworks.ai/docs/openai-compatibility.
8
+ *
9
+ * To use, you should have the `openai` package installed and
10
+ * the `FIREWORKS_API_KEY` environment variable set.
11
+ */
12
+ export class Fireworks extends OpenAI {
13
+ static lc_name() {
14
+ return "Fireworks";
15
+ }
16
+ _llmType() {
17
+ return "fireworks";
18
+ }
19
+ get lc_secrets() {
20
+ return {
21
+ fireworksApiKey: "FIREWORKS_API_KEY",
22
+ };
23
+ }
24
+ constructor(fields) {
25
+ const fireworksApiKey = fields?.fireworksApiKey || getEnvironmentVariable("FIREWORKS_API_KEY");
26
+ if (!fireworksApiKey) {
27
+ throw new Error(`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`);
28
+ }
29
+ super({
30
+ ...fields,
31
+ openAIApiKey: fireworksApiKey,
32
+ modelName: fields?.modelName || "accounts/fireworks/models/llama-v2-13b",
33
+ configuration: {
34
+ baseURL: "https://api.fireworks.ai/inference/v1",
35
+ },
36
+ });
37
+ Object.defineProperty(this, "lc_serializable", {
38
+ enumerable: true,
39
+ configurable: true,
40
+ writable: true,
41
+ value: true
42
+ });
43
+ Object.defineProperty(this, "fireworksApiKey", {
44
+ enumerable: true,
45
+ configurable: true,
46
+ writable: true,
47
+ value: void 0
48
+ });
49
+ this.fireworksApiKey = fireworksApiKey;
50
+ }
51
+ toJSON() {
52
+ const result = super.toJSON();
53
+ if ("kwargs" in result &&
54
+ typeof result.kwargs === "object" &&
55
+ result.kwargs != null) {
56
+ delete result.kwargs.openai_api_key;
57
+ delete result.kwargs.configuration;
58
+ }
59
+ return result;
60
+ }
61
+ /**
62
+ * Calls the Fireworks API with retry logic in case of failures.
63
+ * @param request The request to send to the Fireworks API.
64
+ * @param options Optional configuration for the API call.
65
+ * @returns The response from the Fireworks API.
66
+ */
67
+ async completionWithRetry(request, options) {
68
+ // https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility
69
+ if (Array.isArray(request.prompt)) {
70
+ if (request.prompt.length > 1) {
71
+ throw new Error("Multiple prompts are not supported by Fireworks");
72
+ }
73
+ const prompt = request.prompt[0];
74
+ if (typeof prompt !== "string") {
75
+ throw new Error("Only string prompts are supported by Fireworks");
76
+ }
77
+ request.prompt = prompt;
78
+ }
79
+ delete request.frequency_penalty;
80
+ delete request.presence_penalty;
81
+ delete request.best_of;
82
+ delete request.logit_bias;
83
+ if (request.stream === true) {
84
+ return super.completionWithRetry(request, options);
85
+ }
86
+ return super.completionWithRetry(request, options);
87
+ }
88
+ }
@@ -99,6 +99,12 @@ class LlamaCpp extends base_js_1.LLM {
99
99
  writable: true,
100
100
  value: void 0
101
101
  });
102
+ Object.defineProperty(this, "_session", {
103
+ enumerable: true,
104
+ configurable: true,
105
+ writable: true,
106
+ value: void 0
107
+ });
102
108
  this.batchSize = inputs.batchSize;
103
109
  this.contextSize = inputs.contextSize;
104
110
  this.embedding = inputs.embedding;
@@ -113,19 +119,19 @@ class LlamaCpp extends base_js_1.LLM {
113
119
  this.vocabOnly = inputs.vocabOnly;
114
120
  this._model = new node_llama_cpp_1.LlamaModel(inputs);
115
121
  this._context = new node_llama_cpp_1.LlamaContext({ model: this._model });
122
+ this._session = new node_llama_cpp_1.LlamaChatSession({ context: this._context });
116
123
  }
117
124
  _llmType() {
118
125
  return "llama2_cpp";
119
126
  }
120
127
  /** @ignore */
121
128
  async _call(prompt, options) {
122
- const session = new node_llama_cpp_1.LlamaChatSession({ context: this._context });
123
129
  try {
124
- const compleation = await session.prompt(prompt, options);
125
- return compleation;
130
+ const completion = await this._session.prompt(prompt, options);
131
+ return completion;
126
132
  }
127
133
  catch (e) {
128
- throw new Error("Error getting prompt compleation.");
134
+ throw new Error("Error getting prompt completion.");
129
135
  }
130
136
  }
131
137
  }
@@ -1,4 +1,4 @@
1
- import { LlamaModel, LlamaContext } from "node-llama-cpp";
1
+ import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp";
2
2
  import { LLM, BaseLLMCallOptions, BaseLLMParams } from "./base.js";
3
3
  /**
4
4
  * Note that the modelPath is the only required parameter. For testing you
@@ -65,6 +65,7 @@ export declare class LlamaCpp extends LLM<LlamaCppCallOptions> {
65
65
  modelPath: string;
66
66
  _model: LlamaModel;
67
67
  _context: LlamaContext;
68
+ _session: LlamaChatSession;
68
69
  static lc_name(): string;
69
70
  constructor(inputs: LlamaCppInputs);
70
71
  _llmType(): string;
@@ -96,6 +96,12 @@ export class LlamaCpp extends LLM {
96
96
  writable: true,
97
97
  value: void 0
98
98
  });
99
+ Object.defineProperty(this, "_session", {
100
+ enumerable: true,
101
+ configurable: true,
102
+ writable: true,
103
+ value: void 0
104
+ });
99
105
  this.batchSize = inputs.batchSize;
100
106
  this.contextSize = inputs.contextSize;
101
107
  this.embedding = inputs.embedding;
@@ -110,19 +116,19 @@ export class LlamaCpp extends LLM {
110
116
  this.vocabOnly = inputs.vocabOnly;
111
117
  this._model = new LlamaModel(inputs);
112
118
  this._context = new LlamaContext({ model: this._model });
119
+ this._session = new LlamaChatSession({ context: this._context });
113
120
  }
114
121
  _llmType() {
115
122
  return "llama2_cpp";
116
123
  }
117
124
  /** @ignore */
118
125
  async _call(prompt, options) {
119
- const session = new LlamaChatSession({ context: this._context });
120
126
  try {
121
- const compleation = await session.prompt(prompt, options);
122
- return compleation;
127
+ const completion = await this._session.prompt(prompt, options);
128
+ return completion;
123
129
  }
124
130
  catch (e) {
125
- throw new Error("Error getting prompt compleation.");
131
+ throw new Error("Error getting prompt completion.");
126
132
  }
127
133
  }
128
134
  }
@@ -287,25 +287,40 @@ class Ollama extends base_js_1.LLM {
287
287
  },
288
288
  };
289
289
  }
290
- async *_streamResponseChunks(input, options, runManager) {
291
- const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt: input }, options));
290
+ async *_streamResponseChunks(prompt, options, runManager) {
291
+ const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
292
292
  for await (const chunk of stream) {
293
- yield new index_js_1.GenerationChunk({
294
- text: chunk.response,
295
- generationInfo: {
296
- ...chunk,
297
- response: undefined,
298
- },
299
- });
300
- await runManager?.handleLLMNewToken(chunk.response ?? "");
293
+ if (!chunk.done) {
294
+ yield new index_js_1.GenerationChunk({
295
+ text: chunk.response,
296
+ generationInfo: {
297
+ ...chunk,
298
+ response: undefined,
299
+ },
300
+ });
301
+ await runManager?.handleLLMNewToken(chunk.response ?? "");
302
+ }
303
+ else {
304
+ yield new index_js_1.GenerationChunk({
305
+ text: "",
306
+ generationInfo: {
307
+ model: chunk.model,
308
+ total_duration: chunk.total_duration,
309
+ load_duration: chunk.load_duration,
310
+ prompt_eval_count: chunk.prompt_eval_count,
311
+ prompt_eval_duration: chunk.prompt_eval_duration,
312
+ eval_count: chunk.eval_count,
313
+ eval_duration: chunk.eval_duration,
314
+ },
315
+ });
316
+ }
301
317
  }
302
318
  }
303
319
  /** @ignore */
304
- async _call(prompt, options) {
305
- const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
320
+ async _call(prompt, options, runManager) {
306
321
  const chunks = [];
307
- for await (const chunk of stream) {
308
- chunks.push(chunk.response);
322
+ for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
323
+ chunks.push(chunk.text);
309
324
  }
310
325
  return chunks.join("");
311
326
  }
@@ -6,8 +6,7 @@ import { GenerationChunk } from "../schema/index.js";
6
6
  * Class that represents the Ollama language model. It extends the base
7
7
  * LLM class and implements the OllamaInput interface.
8
8
  */
9
- export declare class Ollama extends LLM implements OllamaInput {
10
- CallOptions: OllamaCallOptions;
9
+ export declare class Ollama extends LLM<OllamaCallOptions> implements OllamaInput {
11
10
  static lc_name(): string;
12
11
  lc_serializable: boolean;
13
12
  model: string;
@@ -79,7 +78,7 @@ export declare class Ollama extends LLM implements OllamaInput {
79
78
  vocab_only: boolean | undefined;
80
79
  };
81
80
  };
82
- _streamResponseChunks(input: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
81
+ _streamResponseChunks(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
83
82
  /** @ignore */
84
- _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
83
+ _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
85
84
  }
@@ -284,25 +284,40 @@ export class Ollama extends LLM {
284
284
  },
285
285
  };
286
286
  }
287
- async *_streamResponseChunks(input, options, runManager) {
288
- const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt: input }, options));
287
+ async *_streamResponseChunks(prompt, options, runManager) {
288
+ const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
289
289
  for await (const chunk of stream) {
290
- yield new GenerationChunk({
291
- text: chunk.response,
292
- generationInfo: {
293
- ...chunk,
294
- response: undefined,
295
- },
296
- });
297
- await runManager?.handleLLMNewToken(chunk.response ?? "");
290
+ if (!chunk.done) {
291
+ yield new GenerationChunk({
292
+ text: chunk.response,
293
+ generationInfo: {
294
+ ...chunk,
295
+ response: undefined,
296
+ },
297
+ });
298
+ await runManager?.handleLLMNewToken(chunk.response ?? "");
299
+ }
300
+ else {
301
+ yield new GenerationChunk({
302
+ text: "",
303
+ generationInfo: {
304
+ model: chunk.model,
305
+ total_duration: chunk.total_duration,
306
+ load_duration: chunk.load_duration,
307
+ prompt_eval_count: chunk.prompt_eval_count,
308
+ prompt_eval_duration: chunk.prompt_eval_duration,
309
+ eval_count: chunk.eval_count,
310
+ eval_duration: chunk.eval_duration,
311
+ },
312
+ });
313
+ }
298
314
  }
299
315
  }
300
316
  /** @ignore */
301
- async _call(prompt, options) {
302
- const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
317
+ async _call(prompt, options, runManager) {
303
318
  const chunks = [];
304
- for await (const chunk of stream) {
305
- chunks.push(chunk.response);
319
+ for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
320
+ chunks.push(chunk.text);
306
321
  }
307
322
  return chunks.join("");
308
323
  }
@@ -35,11 +35,7 @@ class OpenAIChat extends base_js_1.LLM {
35
35
  return "OpenAIChat";
36
36
  }
37
37
  get callKeys() {
38
- return [
39
- ...super.callKeys,
40
- "options",
41
- "promptIndex",
42
- ];
38
+ return [...super.callKeys, "options", "promptIndex"];
43
39
  }
44
40
  get lc_secrets() {
45
41
  return {
@@ -36,7 +36,7 @@ export interface OpenAIChatCallOptions extends OpenAICallOptions {
36
36
  */
37
37
  export declare class OpenAIChat extends LLM<OpenAIChatCallOptions> implements OpenAIChatInput, AzureOpenAIInput {
38
38
  static lc_name(): string;
39
- get callKeys(): (keyof OpenAIChatCallOptions)[];
39
+ get callKeys(): string[];
40
40
  lc_serializable: boolean;
41
41
  get lc_secrets(): {
42
42
  [key: string]: string;
@@ -32,11 +32,7 @@ export class OpenAIChat extends LLM {
32
32
  return "OpenAIChat";
33
33
  }
34
34
  get callKeys() {
35
- return [
36
- ...super.callKeys,
37
- "options",
38
- "promptIndex",
39
- ];
35
+ return [...super.callKeys, "options", "promptIndex"];
40
36
  }
41
37
  get lc_secrets() {
42
38
  return {
@@ -57,10 +57,9 @@ class OpenAI extends base_js_1.BaseLLM {
57
57
  /** @deprecated */
58
58
  configuration) {
59
59
  if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
60
- fields?.modelName?.startsWith("gpt-4") ||
61
- fields?.modelName?.startsWith("gpt-4-32k")) &&
62
- !fields?.modelName.endsWith("-instruct")) {
63
- // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
60
+ fields?.modelName?.startsWith("gpt-4")) &&
61
+ !fields?.modelName?.includes("-instruct")) {
62
+ // eslint-disable-next-line no-constructor-return
64
63
  return new openai_chat_js_1.OpenAIChat(fields, configuration);
65
64
  }
66
65
  super(fields ?? {});
@@ -22,9 +22,9 @@ export { AzureOpenAIInput, OpenAICallOptions, OpenAIInput };
22
22
  * `openai.createCompletion`} can be passed through {@link modelKwargs}, even
23
23
  * if not explicitly available on this class.
24
24
  */
25
- export declare class OpenAI extends BaseLLM<OpenAICallOptions> implements OpenAIInput, AzureOpenAIInput {
25
+ export declare class OpenAI<CallOptions extends OpenAICallOptions = OpenAICallOptions> extends BaseLLM<CallOptions> implements OpenAIInput, AzureOpenAIInput {
26
26
  static lc_name(): string;
27
- get callKeys(): (keyof OpenAICallOptions)[];
27
+ get callKeys(): string[];
28
28
  lc_serializable: boolean;
29
29
  get lc_secrets(): {
30
30
  [key: string]: string;
@@ -54,10 +54,9 @@ export class OpenAI extends BaseLLM {
54
54
  /** @deprecated */
55
55
  configuration) {
56
56
  if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
57
- fields?.modelName?.startsWith("gpt-4") ||
58
- fields?.modelName?.startsWith("gpt-4-32k")) &&
59
- !fields?.modelName.endsWith("-instruct")) {
60
- // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
57
+ fields?.modelName?.startsWith("gpt-4")) &&
58
+ !fields?.modelName?.includes("-instruct")) {
59
+ // eslint-disable-next-line no-constructor-return
61
60
  return new OpenAIChat(fields, configuration);
62
61
  }
63
62
  super(fields ?? {});
@@ -75,6 +75,7 @@ exports.optionalImportEntrypoints = [
75
75
  "langchain/document_loaders/web/github",
76
76
  "langchain/document_loaders/web/notiondb",
77
77
  "langchain/document_loaders/web/notionapi",
78
+ "langchain/document_loaders/web/pdf",
78
79
  "langchain/document_loaders/web/recursive_url",
79
80
  "langchain/document_loaders/web/s3",
80
81
  "langchain/document_loaders/web/sonix_audio",
@@ -110,12 +111,14 @@ exports.optionalImportEntrypoints = [
110
111
  "langchain/retrievers/self_query/pinecone",
111
112
  "langchain/retrievers/self_query/supabase",
112
113
  "langchain/retrievers/self_query/weaviate",
114
+ "langchain/cache/cloudflare_kv",
113
115
  "langchain/cache/momento",
114
116
  "langchain/cache/redis",
115
117
  "langchain/cache/ioredis",
116
118
  "langchain/cache/upstash_redis",
117
119
  "langchain/stores/doc/gcs",
118
120
  "langchain/stores/file/node",
121
+ "langchain/stores/message/cloudflare_d1",
119
122
  "langchain/stores/message/dynamodb",
120
123
  "langchain/stores/message/firestore",
121
124
  "langchain/stores/message/momento",
@@ -72,6 +72,7 @@ export const optionalImportEntrypoints = [
72
72
  "langchain/document_loaders/web/github",
73
73
  "langchain/document_loaders/web/notiondb",
74
74
  "langchain/document_loaders/web/notionapi",
75
+ "langchain/document_loaders/web/pdf",
75
76
  "langchain/document_loaders/web/recursive_url",
76
77
  "langchain/document_loaders/web/s3",
77
78
  "langchain/document_loaders/web/sonix_audio",
@@ -107,12 +108,14 @@ export const optionalImportEntrypoints = [
107
108
  "langchain/retrievers/self_query/pinecone",
108
109
  "langchain/retrievers/self_query/supabase",
109
110
  "langchain/retrievers/self_query/weaviate",
111
+ "langchain/cache/cloudflare_kv",
110
112
  "langchain/cache/momento",
111
113
  "langchain/cache/redis",
112
114
  "langchain/cache/ioredis",
113
115
  "langchain/cache/upstash_redis",
114
116
  "langchain/stores/doc/gcs",
115
117
  "langchain/stores/file/node",
118
+ "langchain/stores/message/cloudflare_d1",
116
119
  "langchain/stores/message/dynamodb",
117
120
  "langchain/stores/message/firestore",
118
121
  "langchain/stores/message/momento",
@@ -24,8 +24,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
24
24
  return result;
25
25
  };
26
26
  Object.defineProperty(exports, "__esModule", { value: true });
27
- exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
- exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = void 0;
27
+ exports.retrievers__databerry = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fireworks = exports.llms__ollama = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains = exports.tools = exports.base_language = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
28
+ exports.evaluation = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__math = exports.storage__in_memory = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = void 0;
29
29
  exports.load__serializable = __importStar(require("../load/serializable.cjs"));
30
30
  exports.agents = __importStar(require("../agents/index.cjs"));
31
31
  exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
@@ -44,6 +44,7 @@ exports.llms__openai = __importStar(require("../llms/openai.cjs"));
44
44
  exports.llms__ai21 = __importStar(require("../llms/ai21.cjs"));
45
45
  exports.llms__aleph_alpha = __importStar(require("../llms/aleph_alpha.cjs"));
46
46
  exports.llms__ollama = __importStar(require("../llms/ollama.cjs"));
47
+ exports.llms__fireworks = __importStar(require("../llms/fireworks.cjs"));
47
48
  exports.prompts = __importStar(require("../prompts/index.cjs"));
48
49
  exports.vectorstores__base = __importStar(require("../vectorstores/base.cjs"));
49
50
  exports.vectorstores__memory = __importStar(require("../vectorstores/memory.cjs"));
@@ -60,6 +61,7 @@ exports.document_transformers__openai_functions = __importStar(require("../docum
60
61
  exports.chat_models__base = __importStar(require("../chat_models/base.cjs"));
61
62
  exports.chat_models__openai = __importStar(require("../chat_models/openai.cjs"));
62
63
  exports.chat_models__anthropic = __importStar(require("../chat_models/anthropic.cjs"));
64
+ exports.chat_models__fireworks = __importStar(require("../chat_models/fireworks.cjs"));
63
65
  exports.chat_models__baiduwenxin = __importStar(require("../chat_models/baiduwenxin.cjs"));
64
66
  exports.chat_models__ollama = __importStar(require("../chat_models/ollama.cjs"));
65
67
  exports.chat_models__minimax = __importStar(require("../chat_models/minimax.cjs"));
@@ -16,6 +16,7 @@ export * as llms__openai from "../llms/openai.js";
16
16
  export * as llms__ai21 from "../llms/ai21.js";
17
17
  export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
18
18
  export * as llms__ollama from "../llms/ollama.js";
19
+ export * as llms__fireworks from "../llms/fireworks.js";
19
20
  export * as prompts from "../prompts/index.js";
20
21
  export * as vectorstores__base from "../vectorstores/base.js";
21
22
  export * as vectorstores__memory from "../vectorstores/memory.js";
@@ -32,6 +33,7 @@ export * as document_transformers__openai_functions from "../document_transforme
32
33
  export * as chat_models__base from "../chat_models/base.js";
33
34
  export * as chat_models__openai from "../chat_models/openai.js";
34
35
  export * as chat_models__anthropic from "../chat_models/anthropic.js";
36
+ export * as chat_models__fireworks from "../chat_models/fireworks.js";
35
37
  export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
36
38
  export * as chat_models__ollama from "../chat_models/ollama.js";
37
39
  export * as chat_models__minimax from "../chat_models/minimax.js";
@@ -17,6 +17,7 @@ export * as llms__openai from "../llms/openai.js";
17
17
  export * as llms__ai21 from "../llms/ai21.js";
18
18
  export * as llms__aleph_alpha from "../llms/aleph_alpha.js";
19
19
  export * as llms__ollama from "../llms/ollama.js";
20
+ export * as llms__fireworks from "../llms/fireworks.js";
20
21
  export * as prompts from "../prompts/index.js";
21
22
  export * as vectorstores__base from "../vectorstores/base.js";
22
23
  export * as vectorstores__memory from "../vectorstores/memory.js";
@@ -33,6 +34,7 @@ export * as document_transformers__openai_functions from "../document_transforme
33
34
  export * as chat_models__base from "../chat_models/base.js";
34
35
  export * as chat_models__openai from "../chat_models/openai.js";
35
36
  export * as chat_models__anthropic from "../chat_models/anthropic.js";
37
+ export * as chat_models__fireworks from "../chat_models/fireworks.js";
36
38
  export * as chat_models__baiduwenxin from "../chat_models/baiduwenxin.js";
37
39
  export * as chat_models__ollama from "../chat_models/ollama.js";
38
40
  export * as chat_models__minimax from "../chat_models/minimax.js";
@@ -356,8 +356,14 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
356
356
  };
357
357
  return new ChatPromptTemplate(promptDict);
358
358
  }
359
+ /**
360
+ * Create a chat model-specific prompt from individual chat messages
361
+ * or message-like tuples.
362
+ * @param promptMessages Messages to be passed to the chat model
363
+ * @returns A new ChatPromptTemplate
364
+ */
359
365
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
360
- static fromPromptMessages(promptMessages) {
366
+ static fromMessages(promptMessages) {
361
367
  const flattenedMessages = promptMessages.reduce((acc, promptMessage) => acc.concat(
362
368
  // eslint-disable-next-line no-instanceof/no-instanceof
363
369
  promptMessage instanceof ChatPromptTemplate
@@ -386,5 +392,10 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
386
392
  partialVariables: flattenedPartialVariables,
387
393
  });
388
394
  }
395
+ /** @deprecated Renamed to .fromMessages */
396
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
397
+ static fromPromptMessages(promptMessages) {
398
+ return this.fromMessages(promptMessages);
399
+ }
389
400
  }
390
401
  exports.ChatPromptTemplate = ChatPromptTemplate;
@@ -169,5 +169,13 @@ export declare class ChatPromptTemplate<RunInput extends InputValues = any, Part
169
169
  _getPromptType(): "chat";
170
170
  formatMessages(values: TypedPromptInputValues<RunInput>): Promise<BaseMessage[]>;
171
171
  partial<NewPartialVariableName extends string>(values: PartialValues<NewPartialVariableName>): Promise<ChatPromptTemplate<InputValues<Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>>, any>>;
172
+ /**
173
+ * Create a chat model-specific prompt from individual chat messages
174
+ * or message-like tuples.
175
+ * @param promptMessages Messages to be passed to the chat model
176
+ * @returns A new ChatPromptTemplate
177
+ */
178
+ static fromMessages<RunInput extends InputValues = any>(promptMessages: (ChatPromptTemplate<InputValues, string> | BaseMessagePromptTemplateLike)[]): ChatPromptTemplate<RunInput>;
179
+ /** @deprecated Renamed to .fromMessages */
172
180
  static fromPromptMessages<RunInput extends InputValues = any>(promptMessages: (ChatPromptTemplate<InputValues, string> | BaseMessagePromptTemplateLike)[]): ChatPromptTemplate<RunInput>;
173
181
  }
@@ -344,8 +344,14 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
344
344
  };
345
345
  return new ChatPromptTemplate(promptDict);
346
346
  }
347
+ /**
348
+ * Create a chat model-specific prompt from individual chat messages
349
+ * or message-like tuples.
350
+ * @param promptMessages Messages to be passed to the chat model
351
+ * @returns A new ChatPromptTemplate
352
+ */
347
353
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
348
- static fromPromptMessages(promptMessages) {
354
+ static fromMessages(promptMessages) {
349
355
  const flattenedMessages = promptMessages.reduce((acc, promptMessage) => acc.concat(
350
356
  // eslint-disable-next-line no-instanceof/no-instanceof
351
357
  promptMessage instanceof ChatPromptTemplate
@@ -374,4 +380,9 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
374
380
  partialVariables: flattenedPartialVariables,
375
381
  });
376
382
  }
383
+ /** @deprecated Renamed to .fromMessages */
384
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
385
+ static fromPromptMessages(promptMessages) {
386
+ return this.fromMessages(promptMessages);
387
+ }
377
388
  }
@@ -162,20 +162,52 @@ class BytesOutputParser extends BaseTransformOutputParser {
162
162
  }
163
163
  exports.BytesOutputParser = BytesOutputParser;
164
164
  /**
165
- * Custom error class used to handle exceptions related to output parsing.
166
- * It extends the built-in `Error` class and adds an optional `output`
167
- * property that can hold the output that caused the exception.
165
+ * Exception that output parsers should raise to signify a parsing error.
166
+ *
167
+ * This exists to differentiate parsing errors from other code or execution errors
168
+ * that also may arise inside the output parser. OutputParserExceptions will be
169
+ * available to catch and handle in ways to fix the parsing error, while other
170
+ * errors will be raised.
171
+ *
172
+ * @param message - The error that's being re-raised or an error message.
173
+ * @param llmOutput - String model output which is error-ing.
174
+ * @param observation - String explanation of error which can be passed to a
175
+ * model to try and remediate the issue.
176
+ * @param sendToLLM - Whether to send the observation and llm_output back to an Agent
177
+ * after an OutputParserException has been raised. This gives the underlying
178
+ * model driving the agent the context that the previous output was improperly
179
+ * structured, in the hopes that it will update the output to the correct
180
+ * format.
168
181
  */
169
182
  class OutputParserException extends Error {
170
- constructor(message, output) {
183
+ constructor(message, llmOutput, observation, sendToLLM = false) {
171
184
  super(message);
172
- Object.defineProperty(this, "output", {
185
+ Object.defineProperty(this, "llmOutput", {
173
186
  enumerable: true,
174
187
  configurable: true,
175
188
  writable: true,
176
189
  value: void 0
177
190
  });
178
- this.output = output;
191
+ Object.defineProperty(this, "observation", {
192
+ enumerable: true,
193
+ configurable: true,
194
+ writable: true,
195
+ value: void 0
196
+ });
197
+ Object.defineProperty(this, "sendToLLM", {
198
+ enumerable: true,
199
+ configurable: true,
200
+ writable: true,
201
+ value: void 0
202
+ });
203
+ this.llmOutput = llmOutput;
204
+ this.observation = observation;
205
+ this.sendToLLM = sendToLLM;
206
+ if (sendToLLM) {
207
+ if (observation === undefined || llmOutput === undefined) {
208
+ throw new Error("Arguments 'observation' & 'llmOutput' are required if 'sendToLlm' is true");
209
+ }
210
+ }
179
211
  }
180
212
  }
181
213
  exports.OutputParserException = OutputParserException;