langchain 0.0.151 → 0.0.152

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/cache/cloudflare_kv.cjs +1 -0
  2. package/cache/cloudflare_kv.d.ts +1 -0
  3. package/cache/cloudflare_kv.js +1 -0
  4. package/dist/agents/chat/index.cjs +1 -1
  5. package/dist/agents/chat/index.js +1 -1
  6. package/dist/agents/chat_convo/index.cjs +1 -1
  7. package/dist/agents/chat_convo/index.js +1 -1
  8. package/dist/agents/openai/index.cjs +1 -1
  9. package/dist/agents/openai/index.js +1 -1
  10. package/dist/agents/structured_chat/index.cjs +1 -1
  11. package/dist/agents/structured_chat/index.js +1 -1
  12. package/dist/agents/xml/index.cjs +1 -1
  13. package/dist/agents/xml/index.js +1 -1
  14. package/dist/base_language/count_tokens.cjs +1 -0
  15. package/dist/base_language/count_tokens.js +1 -0
  16. package/dist/base_language/index.cjs +5 -3
  17. package/dist/base_language/index.d.ts +1 -1
  18. package/dist/base_language/index.js +4 -3
  19. package/dist/cache/cloudflare_kv.cjs +61 -0
  20. package/dist/cache/cloudflare_kv.d.ts +29 -0
  21. package/dist/cache/cloudflare_kv.js +57 -0
  22. package/dist/chains/openai_functions/openapi.cjs +1 -1
  23. package/dist/chains/openai_functions/openapi.js +1 -1
  24. package/dist/chains/question_answering/map_reduce_prompts.cjs +2 -3
  25. package/dist/chains/question_answering/map_reduce_prompts.js +2 -3
  26. package/dist/chains/question_answering/refine_prompts.cjs +2 -2
  27. package/dist/chains/question_answering/refine_prompts.js +2 -2
  28. package/dist/chains/question_answering/stuff_prompts.cjs +1 -2
  29. package/dist/chains/question_answering/stuff_prompts.js +1 -2
  30. package/dist/chat_models/ollama.cjs +3 -7
  31. package/dist/chat_models/ollama.d.ts +1 -1
  32. package/dist/chat_models/ollama.js +3 -7
  33. package/dist/document_loaders/web/pdf.cjs +87 -0
  34. package/dist/document_loaders/web/pdf.d.ts +17 -0
  35. package/dist/document_loaders/web/pdf.js +83 -0
  36. package/dist/evaluation/agents/prompt.cjs +2 -3
  37. package/dist/evaluation/agents/prompt.js +2 -3
  38. package/dist/experimental/plan_and_execute/prompt.cjs +1 -1
  39. package/dist/experimental/plan_and_execute/prompt.js +1 -1
  40. package/dist/llms/llama_cpp.cjs +10 -4
  41. package/dist/llms/llama_cpp.d.ts +2 -1
  42. package/dist/llms/llama_cpp.js +10 -4
  43. package/dist/llms/ollama.cjs +5 -6
  44. package/dist/llms/ollama.d.ts +2 -2
  45. package/dist/llms/ollama.js +5 -6
  46. package/dist/llms/openai.cjs +2 -3
  47. package/dist/llms/openai.js +2 -3
  48. package/dist/load/import_constants.cjs +3 -0
  49. package/dist/load/import_constants.js +3 -0
  50. package/dist/prompts/chat.cjs +12 -1
  51. package/dist/prompts/chat.d.ts +8 -0
  52. package/dist/prompts/chat.js +12 -1
  53. package/dist/stores/message/cloudflare_d1.cjs +134 -0
  54. package/dist/stores/message/cloudflare_d1.d.ts +49 -0
  55. package/dist/stores/message/cloudflare_d1.js +130 -0
  56. package/dist/types/openai-types.d.ts +2 -0
  57. package/document_loaders/web/pdf.cjs +1 -0
  58. package/document_loaders/web/pdf.d.ts +1 -0
  59. package/document_loaders/web/pdf.js +1 -0
  60. package/package.json +30 -1
  61. package/stores/message/cloudflare_d1.cjs +1 -0
  62. package/stores/message/cloudflare_d1.d.ts +1 -0
  63. package/stores/message/cloudflare_d1.js +1 -0
@@ -0,0 +1,83 @@
1
+ import { getDocument, version, } from "pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js";
2
+ import { Document } from "../../document.js";
3
+ import { BaseDocumentLoader } from "../base.js";
4
+ /**
5
+ * A document loader for loading data from PDFs.
6
+ */
7
+ export class WebPDFLoader extends BaseDocumentLoader {
8
+ constructor(blob, { splitPages = true } = {}) {
9
+ super();
10
+ Object.defineProperty(this, "blob", {
11
+ enumerable: true,
12
+ configurable: true,
13
+ writable: true,
14
+ value: void 0
15
+ });
16
+ Object.defineProperty(this, "splitPages", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: true
21
+ });
22
+ this.blob = blob;
23
+ this.splitPages = splitPages ?? this.splitPages;
24
+ }
25
+ /**
26
+ * Loads the contents of the PDF as documents.
27
+ * @returns An array of Documents representing the retrieved data.
28
+ */
29
+ async load() {
30
+ const parsedPdf = await getDocument({
31
+ data: new Uint8Array(await this.blob.arrayBuffer()),
32
+ useWorkerFetch: false,
33
+ isEvalSupported: false,
34
+ useSystemFonts: true,
35
+ }).promise;
36
+ const meta = await parsedPdf.getMetadata().catch(() => null);
37
+ const documents = [];
38
+ for (let i = 1; i <= parsedPdf.numPages; i += 1) {
39
+ const page = await parsedPdf.getPage(i);
40
+ const content = await page.getTextContent();
41
+ if (content.items.length === 0) {
42
+ continue;
43
+ }
44
+ const text = content.items
45
+ .map((item) => item.str)
46
+ .join("\n");
47
+ documents.push(new Document({
48
+ pageContent: text,
49
+ metadata: {
50
+ pdf: {
51
+ version,
52
+ info: meta?.info,
53
+ metadata: meta?.metadata,
54
+ totalPages: parsedPdf.numPages,
55
+ },
56
+ loc: {
57
+ pageNumber: i,
58
+ },
59
+ },
60
+ }));
61
+ }
62
+ if (this.splitPages) {
63
+ return documents;
64
+ }
65
+ if (documents.length === 0) {
66
+ return [];
67
+ }
68
+ return [
69
+ new Document({
70
+ pageContent: documents.map((doc) => doc.pageContent).join("\n\n"),
71
+ metadata: {
72
+ pdf: {
73
+ version,
74
+ info: meta?.info,
75
+ metadata: meta?.metadata,
76
+ totalPages: parsedPdf.numPages,
77
+ },
78
+ },
79
+ }),
80
+ ];
81
+ return documents;
82
+ }
83
+ }
@@ -90,8 +90,7 @@ The model did not use the appropriate tools to answer the question.\
90
90
  Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
91
91
 
92
92
  Score: 2`;
93
- exports.EVAL_CHAT_PROMPT =
94
- /* #__PURE__ */ index_js_1.ChatPromptTemplate.fromPromptMessages([
93
+ exports.EVAL_CHAT_PROMPT = index_js_1.ChatPromptTemplate.fromMessages([
95
94
  /* #__PURE__ */ index_js_1.SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
96
95
  /* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
97
96
  /* #__PURE__ */ index_js_1.AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
@@ -124,7 +123,7 @@ i. Is the final answer helpful?
124
123
  iv. Does the AI language model use too many steps to answer the question?
125
124
  v. Are the appropriate tools used to answer the question?`;
126
125
  exports.TOOL_FREE_EVAL_CHAT_PROMPT =
127
- /* #__PURE__ */ index_js_1.ChatPromptTemplate.fromPromptMessages([
126
+ /* #__PURE__ */ index_js_1.ChatPromptTemplate.fromMessages([
128
127
  /* #__PURE__ */ index_js_1.SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
129
128
  /* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
130
129
  /* #__PURE__ */ index_js_1.AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
@@ -87,8 +87,7 @@ The model did not use the appropriate tools to answer the question.\
87
87
  Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
88
88
 
89
89
  Score: 2`;
90
- export const EVAL_CHAT_PROMPT =
91
- /* #__PURE__ */ ChatPromptTemplate.fromPromptMessages([
90
+ export const EVAL_CHAT_PROMPT = /* #__PURE__ */ ChatPromptTemplate.fromMessages([
92
91
  /* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
93
92
  /* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
94
93
  /* #__PURE__ */ AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
@@ -121,7 +120,7 @@ i. Is the final answer helpful?
121
120
  iv. Does the AI language model use too many steps to answer the question?
122
121
  v. Are the appropriate tools used to answer the question?`;
123
122
  export const TOOL_FREE_EVAL_CHAT_PROMPT =
124
- /* #__PURE__ */ ChatPromptTemplate.fromPromptMessages([
123
+ /* #__PURE__ */ ChatPromptTemplate.fromMessages([
125
124
  /* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
126
125
  /* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
127
126
  /* #__PURE__ */ AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
@@ -14,7 +14,7 @@ exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
14
14
  `At the end of your plan, say "<END_OF_PLAN>"`,
15
15
  ].join(" ");
16
16
  exports.PLANNER_CHAT_PROMPT =
17
- /* #__PURE__ */ chat_js_1.ChatPromptTemplate.fromPromptMessages([
17
+ /* #__PURE__ */ chat_js_1.ChatPromptTemplate.fromMessages([
18
18
  /* #__PURE__ */ chat_js_1.SystemMessagePromptTemplate.fromTemplate(exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
19
19
  /* #__PURE__ */ chat_js_1.HumanMessagePromptTemplate.fromTemplate(`{input}`),
20
20
  ]);
@@ -11,7 +11,7 @@ export const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
11
11
  `At the end of your plan, say "<END_OF_PLAN>"`,
12
12
  ].join(" ");
13
13
  export const PLANNER_CHAT_PROMPT =
14
- /* #__PURE__ */ ChatPromptTemplate.fromPromptMessages([
14
+ /* #__PURE__ */ ChatPromptTemplate.fromMessages([
15
15
  /* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate(PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
16
16
  /* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(`{input}`),
17
17
  ]);
@@ -99,6 +99,12 @@ class LlamaCpp extends base_js_1.LLM {
99
99
  writable: true,
100
100
  value: void 0
101
101
  });
102
+ Object.defineProperty(this, "_session", {
103
+ enumerable: true,
104
+ configurable: true,
105
+ writable: true,
106
+ value: void 0
107
+ });
102
108
  this.batchSize = inputs.batchSize;
103
109
  this.contextSize = inputs.contextSize;
104
110
  this.embedding = inputs.embedding;
@@ -113,19 +119,19 @@ class LlamaCpp extends base_js_1.LLM {
113
119
  this.vocabOnly = inputs.vocabOnly;
114
120
  this._model = new node_llama_cpp_1.LlamaModel(inputs);
115
121
  this._context = new node_llama_cpp_1.LlamaContext({ model: this._model });
122
+ this._session = new node_llama_cpp_1.LlamaChatSession({ context: this._context });
116
123
  }
117
124
  _llmType() {
118
125
  return "llama2_cpp";
119
126
  }
120
127
  /** @ignore */
121
128
  async _call(prompt, options) {
122
- const session = new node_llama_cpp_1.LlamaChatSession({ context: this._context });
123
129
  try {
124
- const compleation = await session.prompt(prompt, options);
125
- return compleation;
130
+ const completion = await this._session.prompt(prompt, options);
131
+ return completion;
126
132
  }
127
133
  catch (e) {
128
- throw new Error("Error getting prompt compleation.");
134
+ throw new Error("Error getting prompt completion.");
129
135
  }
130
136
  }
131
137
  }
@@ -1,4 +1,4 @@
1
- import { LlamaModel, LlamaContext } from "node-llama-cpp";
1
+ import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp";
2
2
  import { LLM, BaseLLMCallOptions, BaseLLMParams } from "./base.js";
3
3
  /**
4
4
  * Note that the modelPath is the only required parameter. For testing you
@@ -65,6 +65,7 @@ export declare class LlamaCpp extends LLM<LlamaCppCallOptions> {
65
65
  modelPath: string;
66
66
  _model: LlamaModel;
67
67
  _context: LlamaContext;
68
+ _session: LlamaChatSession;
68
69
  static lc_name(): string;
69
70
  constructor(inputs: LlamaCppInputs);
70
71
  _llmType(): string;
@@ -96,6 +96,12 @@ export class LlamaCpp extends LLM {
96
96
  writable: true,
97
97
  value: void 0
98
98
  });
99
+ Object.defineProperty(this, "_session", {
100
+ enumerable: true,
101
+ configurable: true,
102
+ writable: true,
103
+ value: void 0
104
+ });
99
105
  this.batchSize = inputs.batchSize;
100
106
  this.contextSize = inputs.contextSize;
101
107
  this.embedding = inputs.embedding;
@@ -110,19 +116,19 @@ export class LlamaCpp extends LLM {
110
116
  this.vocabOnly = inputs.vocabOnly;
111
117
  this._model = new LlamaModel(inputs);
112
118
  this._context = new LlamaContext({ model: this._model });
119
+ this._session = new LlamaChatSession({ context: this._context });
113
120
  }
114
121
  _llmType() {
115
122
  return "llama2_cpp";
116
123
  }
117
124
  /** @ignore */
118
125
  async _call(prompt, options) {
119
- const session = new LlamaChatSession({ context: this._context });
120
126
  try {
121
- const compleation = await session.prompt(prompt, options);
122
- return compleation;
127
+ const completion = await this._session.prompt(prompt, options);
128
+ return completion;
123
129
  }
124
130
  catch (e) {
125
- throw new Error("Error getting prompt compleation.");
131
+ throw new Error("Error getting prompt completion.");
126
132
  }
127
133
  }
128
134
  }
@@ -287,8 +287,8 @@ class Ollama extends base_js_1.LLM {
287
287
  },
288
288
  };
289
289
  }
290
- async *_streamResponseChunks(input, options, runManager) {
291
- const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt: input }, options));
290
+ async *_streamResponseChunks(prompt, options, runManager) {
291
+ const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
292
292
  for await (const chunk of stream) {
293
293
  yield new index_js_1.GenerationChunk({
294
294
  text: chunk.response,
@@ -301,11 +301,10 @@ class Ollama extends base_js_1.LLM {
301
301
  }
302
302
  }
303
303
  /** @ignore */
304
- async _call(prompt, options) {
305
- const stream = await this.caller.call(async () => (0, ollama_js_1.createOllamaStream)(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
304
+ async _call(prompt, options, runManager) {
306
305
  const chunks = [];
307
- for await (const chunk of stream) {
308
- chunks.push(chunk.response);
306
+ for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
307
+ chunks.push(chunk.text);
309
308
  }
310
309
  return chunks.join("");
311
310
  }
@@ -79,7 +79,7 @@ export declare class Ollama extends LLM implements OllamaInput {
79
79
  vocab_only: boolean | undefined;
80
80
  };
81
81
  };
82
- _streamResponseChunks(input: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
82
+ _streamResponseChunks(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
83
83
  /** @ignore */
84
- _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
84
+ _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
85
85
  }
@@ -284,8 +284,8 @@ export class Ollama extends LLM {
284
284
  },
285
285
  };
286
286
  }
287
- async *_streamResponseChunks(input, options, runManager) {
288
- const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt: input }, options));
287
+ async *_streamResponseChunks(prompt, options, runManager) {
288
+ const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
289
289
  for await (const chunk of stream) {
290
290
  yield new GenerationChunk({
291
291
  text: chunk.response,
@@ -298,11 +298,10 @@ export class Ollama extends LLM {
298
298
  }
299
299
  }
300
300
  /** @ignore */
301
- async _call(prompt, options) {
302
- const stream = await this.caller.call(async () => createOllamaStream(this.baseUrl, { ...this.invocationParams(options), prompt }, options));
301
+ async _call(prompt, options, runManager) {
303
302
  const chunks = [];
304
- for await (const chunk of stream) {
305
- chunks.push(chunk.response);
303
+ for await (const chunk of this._streamResponseChunks(prompt, options, runManager)) {
304
+ chunks.push(chunk.text);
306
305
  }
307
306
  return chunks.join("");
308
307
  }
@@ -57,9 +57,8 @@ class OpenAI extends base_js_1.BaseLLM {
57
57
  /** @deprecated */
58
58
  configuration) {
59
59
  if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
60
- fields?.modelName?.startsWith("gpt-4") ||
61
- fields?.modelName?.startsWith("gpt-4-32k")) &&
62
- !fields?.modelName.endsWith("-instruct")) {
60
+ fields?.modelName?.startsWith("gpt-4")) &&
61
+ !fields?.modelName?.includes("-instruct")) {
63
62
  // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
64
63
  return new openai_chat_js_1.OpenAIChat(fields, configuration);
65
64
  }
@@ -54,9 +54,8 @@ export class OpenAI extends BaseLLM {
54
54
  /** @deprecated */
55
55
  configuration) {
56
56
  if ((fields?.modelName?.startsWith("gpt-3.5-turbo") ||
57
- fields?.modelName?.startsWith("gpt-4") ||
58
- fields?.modelName?.startsWith("gpt-4-32k")) &&
59
- !fields?.modelName.endsWith("-instruct")) {
57
+ fields?.modelName?.startsWith("gpt-4")) &&
58
+ !fields?.modelName?.includes("-instruct")) {
60
59
  // eslint-disable-next-line no-constructor-return, @typescript-eslint/no-explicit-any
61
60
  return new OpenAIChat(fields, configuration);
62
61
  }
@@ -75,6 +75,7 @@ exports.optionalImportEntrypoints = [
75
75
  "langchain/document_loaders/web/github",
76
76
  "langchain/document_loaders/web/notiondb",
77
77
  "langchain/document_loaders/web/notionapi",
78
+ "langchain/document_loaders/web/pdf",
78
79
  "langchain/document_loaders/web/recursive_url",
79
80
  "langchain/document_loaders/web/s3",
80
81
  "langchain/document_loaders/web/sonix_audio",
@@ -110,12 +111,14 @@ exports.optionalImportEntrypoints = [
110
111
  "langchain/retrievers/self_query/pinecone",
111
112
  "langchain/retrievers/self_query/supabase",
112
113
  "langchain/retrievers/self_query/weaviate",
114
+ "langchain/cache/cloudflare_kv",
113
115
  "langchain/cache/momento",
114
116
  "langchain/cache/redis",
115
117
  "langchain/cache/ioredis",
116
118
  "langchain/cache/upstash_redis",
117
119
  "langchain/stores/doc/gcs",
118
120
  "langchain/stores/file/node",
121
+ "langchain/stores/message/cloudflare_d1",
119
122
  "langchain/stores/message/dynamodb",
120
123
  "langchain/stores/message/firestore",
121
124
  "langchain/stores/message/momento",
@@ -72,6 +72,7 @@ export const optionalImportEntrypoints = [
72
72
  "langchain/document_loaders/web/github",
73
73
  "langchain/document_loaders/web/notiondb",
74
74
  "langchain/document_loaders/web/notionapi",
75
+ "langchain/document_loaders/web/pdf",
75
76
  "langchain/document_loaders/web/recursive_url",
76
77
  "langchain/document_loaders/web/s3",
77
78
  "langchain/document_loaders/web/sonix_audio",
@@ -107,12 +108,14 @@ export const optionalImportEntrypoints = [
107
108
  "langchain/retrievers/self_query/pinecone",
108
109
  "langchain/retrievers/self_query/supabase",
109
110
  "langchain/retrievers/self_query/weaviate",
111
+ "langchain/cache/cloudflare_kv",
110
112
  "langchain/cache/momento",
111
113
  "langchain/cache/redis",
112
114
  "langchain/cache/ioredis",
113
115
  "langchain/cache/upstash_redis",
114
116
  "langchain/stores/doc/gcs",
115
117
  "langchain/stores/file/node",
118
+ "langchain/stores/message/cloudflare_d1",
116
119
  "langchain/stores/message/dynamodb",
117
120
  "langchain/stores/message/firestore",
118
121
  "langchain/stores/message/momento",
@@ -356,8 +356,14 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
356
356
  };
357
357
  return new ChatPromptTemplate(promptDict);
358
358
  }
359
+ /**
360
+ * Create a chat model-specific prompt from individual chat messages
361
+ * or message-like tuples.
362
+ * @param promptMessages Messages to be passed to the chat model
363
+ * @returns A new ChatPromptTemplate
364
+ */
359
365
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
360
- static fromPromptMessages(promptMessages) {
366
+ static fromMessages(promptMessages) {
361
367
  const flattenedMessages = promptMessages.reduce((acc, promptMessage) => acc.concat(
362
368
  // eslint-disable-next-line no-instanceof/no-instanceof
363
369
  promptMessage instanceof ChatPromptTemplate
@@ -386,5 +392,10 @@ class ChatPromptTemplate extends BaseChatPromptTemplate {
386
392
  partialVariables: flattenedPartialVariables,
387
393
  });
388
394
  }
395
+ /** @deprecated Renamed to .fromMessages */
396
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
397
+ static fromPromptMessages(promptMessages) {
398
+ return this.fromMessages(promptMessages);
399
+ }
389
400
  }
390
401
  exports.ChatPromptTemplate = ChatPromptTemplate;
@@ -169,5 +169,13 @@ export declare class ChatPromptTemplate<RunInput extends InputValues = any, Part
169
169
  _getPromptType(): "chat";
170
170
  formatMessages(values: TypedPromptInputValues<RunInput>): Promise<BaseMessage[]>;
171
171
  partial<NewPartialVariableName extends string>(values: PartialValues<NewPartialVariableName>): Promise<ChatPromptTemplate<InputValues<Exclude<Extract<keyof RunInput, string>, NewPartialVariableName>>, any>>;
172
+ /**
173
+ * Create a chat model-specific prompt from individual chat messages
174
+ * or message-like tuples.
175
+ * @param promptMessages Messages to be passed to the chat model
176
+ * @returns A new ChatPromptTemplate
177
+ */
178
+ static fromMessages<RunInput extends InputValues = any>(promptMessages: (ChatPromptTemplate<InputValues, string> | BaseMessagePromptTemplateLike)[]): ChatPromptTemplate<RunInput>;
179
+ /** @deprecated Renamed to .fromMessages */
172
180
  static fromPromptMessages<RunInput extends InputValues = any>(promptMessages: (ChatPromptTemplate<InputValues, string> | BaseMessagePromptTemplateLike)[]): ChatPromptTemplate<RunInput>;
173
181
  }
@@ -344,8 +344,14 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
344
344
  };
345
345
  return new ChatPromptTemplate(promptDict);
346
346
  }
347
+ /**
348
+ * Create a chat model-specific prompt from individual chat messages
349
+ * or message-like tuples.
350
+ * @param promptMessages Messages to be passed to the chat model
351
+ * @returns A new ChatPromptTemplate
352
+ */
347
353
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
348
- static fromPromptMessages(promptMessages) {
354
+ static fromMessages(promptMessages) {
349
355
  const flattenedMessages = promptMessages.reduce((acc, promptMessage) => acc.concat(
350
356
  // eslint-disable-next-line no-instanceof/no-instanceof
351
357
  promptMessage instanceof ChatPromptTemplate
@@ -374,4 +380,9 @@ export class ChatPromptTemplate extends BaseChatPromptTemplate {
374
380
  partialVariables: flattenedPartialVariables,
375
381
  });
376
382
  }
383
+ /** @deprecated Renamed to .fromMessages */
384
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
385
+ static fromPromptMessages(promptMessages) {
386
+ return this.fromMessages(promptMessages);
387
+ }
377
388
  }
@@ -0,0 +1,134 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.CloudflareD1MessageHistory = void 0;
4
+ const uuid_1 = require("uuid");
5
+ const index_js_1 = require("../../schema/index.cjs");
6
+ const utils_js_1 = require("./utils.cjs");
7
+ /**
8
+ * Class for storing and retrieving chat message history from a
9
+ * Cloudflare D1 database. Extends the BaseListChatMessageHistory class.
10
+ */
11
+ class CloudflareD1MessageHistory extends index_js_1.BaseListChatMessageHistory {
12
+ constructor(fields) {
13
+ super(fields);
14
+ Object.defineProperty(this, "lc_namespace", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: ["langchain", "stores", "message", "cloudflare_d1"]
19
+ });
20
+ Object.defineProperty(this, "database", {
21
+ enumerable: true,
22
+ configurable: true,
23
+ writable: true,
24
+ value: void 0
25
+ });
26
+ Object.defineProperty(this, "tableName", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: void 0
31
+ });
32
+ Object.defineProperty(this, "sessionId", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: void 0
37
+ });
38
+ Object.defineProperty(this, "tableInitialized", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: void 0
43
+ });
44
+ const { sessionId, database, tableName } = fields;
45
+ if (database) {
46
+ this.database = database;
47
+ }
48
+ else {
49
+ throw new Error("Either a client or config must be provided to CloudflareD1MessageHistory");
50
+ }
51
+ this.tableName = tableName || "langchain_chat_histories";
52
+ this.tableInitialized = false;
53
+ this.sessionId = sessionId;
54
+ }
55
+ /**
56
+ * Private method to ensure that the necessary table exists in the
57
+ * Cloudflare D1 database before performing any operations. If the table
58
+ * does not exist, it is created.
59
+ * @returns Promise that resolves to void.
60
+ */
61
+ async ensureTable() {
62
+ if (this.tableInitialized) {
63
+ return;
64
+ }
65
+ const query = `CREATE TABLE IF NOT EXISTS ${this.tableName} (id TEXT PRIMARY KEY, session_id TEXT, type TEXT, content TEXT, role TEXT, name TEXT, additional_kwargs TEXT);`;
66
+ await this.database.prepare(query).bind().all();
67
+ const idIndexQuery = `CREATE INDEX IF NOT EXISTS id_index ON ${this.tableName} (id);`;
68
+ await this.database.prepare(idIndexQuery).bind().all();
69
+ const sessionIdIndexQuery = `CREATE INDEX IF NOT EXISTS session_id_index ON ${this.tableName} (session_id);`;
70
+ await this.database.prepare(sessionIdIndexQuery).bind().all();
71
+ this.tableInitialized = true;
72
+ }
73
+ /**
74
+ * Method to retrieve all messages from the Cloudflare D1 database for the
75
+ * current session.
76
+ * @returns Promise that resolves to an array of BaseMessage objects.
77
+ */
78
+ async getMessages() {
79
+ await this.ensureTable();
80
+ const query = `SELECT * FROM ${this.tableName} WHERE session_id = ?`;
81
+ const rawStoredMessages = await this.database
82
+ .prepare(query)
83
+ .bind(this.sessionId)
84
+ .all();
85
+ const storedMessagesObject = rawStoredMessages.results;
86
+ const orderedMessages = storedMessagesObject.map((message) => {
87
+ const data = {
88
+ content: message.content,
89
+ additional_kwargs: JSON.parse(message.additional_kwargs),
90
+ };
91
+ if (message.role) {
92
+ data.role = message.role;
93
+ }
94
+ if (message.name) {
95
+ data.name = message.name;
96
+ }
97
+ return {
98
+ type: message.type,
99
+ data,
100
+ };
101
+ });
102
+ return (0, utils_js_1.mapStoredMessagesToChatMessages)(orderedMessages);
103
+ }
104
+ /**
105
+ * Method to add a new message to the Cloudflare D1 database for the current
106
+ * session.
107
+ * @param message The BaseMessage object to be added to the database.
108
+ * @returns Promise that resolves to void.
109
+ */
110
+ async addMessage(message) {
111
+ await this.ensureTable();
112
+ const messageToAdd = (0, utils_js_1.mapChatMessagesToStoredMessages)([message]);
113
+ const query = `INSERT INTO ${this.tableName} (id, session_id, type, content, role, name, additional_kwargs) VALUES(?, ?, ?, ?, ?, ?, ?)`;
114
+ const id = (0, uuid_1.v4)();
115
+ await this.database
116
+ .prepare(query)
117
+ .bind(id, this.sessionId, messageToAdd[0].type || null, messageToAdd[0].data.content || null, messageToAdd[0].data.role || null, messageToAdd[0].data.name || null, JSON.stringify(messageToAdd[0].data.additional_kwargs))
118
+ .all();
119
+ }
120
+ /**
121
+ * Method to delete all messages from the Cloudflare D1 database for the
122
+ * current session.
123
+ * @returns Promise that resolves to void.
124
+ */
125
+ async clear() {
126
+ await this.ensureTable();
127
+ const query = `DELETE FROM ? WHERE session_id = ? `;
128
+ await this.database
129
+ .prepare(query)
130
+ .bind(this.tableName, this.sessionId)
131
+ .all();
132
+ }
133
+ }
134
+ exports.CloudflareD1MessageHistory = CloudflareD1MessageHistory;
@@ -0,0 +1,49 @@
1
+ import type { D1Database } from "@cloudflare/workers-types";
2
+ import { BaseMessage, BaseListChatMessageHistory } from "../../schema/index.js";
3
+ /**
4
+ * Type definition for the input parameters required when instantiating a
5
+ * CloudflareD1MessageHistory object.
6
+ */
7
+ export type CloudflareD1MessageHistoryInput = {
8
+ tableName?: string;
9
+ sessionId: string;
10
+ database?: D1Database;
11
+ };
12
+ /**
13
+ * Class for storing and retrieving chat message history from a
14
+ * Cloudflare D1 database. Extends the BaseListChatMessageHistory class.
15
+ */
16
+ export declare class CloudflareD1MessageHistory extends BaseListChatMessageHistory {
17
+ lc_namespace: string[];
18
+ database: D1Database;
19
+ private tableName;
20
+ private sessionId;
21
+ private tableInitialized;
22
+ constructor(fields: CloudflareD1MessageHistoryInput);
23
+ /**
24
+ * Private method to ensure that the necessary table exists in the
25
+ * Cloudflare D1 database before performing any operations. If the table
26
+ * does not exist, it is created.
27
+ * @returns Promise that resolves to void.
28
+ */
29
+ private ensureTable;
30
+ /**
31
+ * Method to retrieve all messages from the Cloudflare D1 database for the
32
+ * current session.
33
+ * @returns Promise that resolves to an array of BaseMessage objects.
34
+ */
35
+ getMessages(): Promise<BaseMessage[]>;
36
+ /**
37
+ * Method to add a new message to the Cloudflare D1 database for the current
38
+ * session.
39
+ * @param message The BaseMessage object to be added to the database.
40
+ * @returns Promise that resolves to void.
41
+ */
42
+ addMessage(message: BaseMessage): Promise<void>;
43
+ /**
44
+ * Method to delete all messages from the Cloudflare D1 database for the
45
+ * current session.
46
+ * @returns Promise that resolves to void.
47
+ */
48
+ clear(): Promise<void>;
49
+ }