langchain 0.0.167 → 0.0.169

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/README.md +4 -4
  2. package/chat_models/cloudflare_workersai.cjs +1 -0
  3. package/chat_models/cloudflare_workersai.d.ts +1 -0
  4. package/chat_models/cloudflare_workersai.js +1 -0
  5. package/chat_models/fake.cjs +1 -0
  6. package/chat_models/fake.d.ts +1 -0
  7. package/chat_models/fake.js +1 -0
  8. package/chat_models/yandex.cjs +1 -0
  9. package/chat_models/yandex.d.ts +1 -0
  10. package/chat_models/yandex.js +1 -0
  11. package/dist/agents/chat/index.cjs +3 -2
  12. package/dist/agents/chat/index.d.ts +3 -0
  13. package/dist/agents/chat/index.js +3 -2
  14. package/dist/callbacks/handlers/llmonitor.cjs +21 -17
  15. package/dist/callbacks/handlers/llmonitor.js +21 -17
  16. package/dist/chat_models/cloudflare_workersai.cjs +145 -0
  17. package/dist/chat_models/cloudflare_workersai.d.ts +46 -0
  18. package/dist/chat_models/cloudflare_workersai.js +141 -0
  19. package/dist/chat_models/fake.cjs +101 -0
  20. package/dist/chat_models/fake.d.ts +36 -0
  21. package/dist/chat_models/fake.js +97 -0
  22. package/dist/chat_models/yandex.cjs +117 -0
  23. package/dist/chat_models/yandex.d.ts +16 -0
  24. package/dist/chat_models/yandex.js +113 -0
  25. package/dist/evaluation/comparison/prompt.d.ts +2 -2
  26. package/dist/experimental/chains/violation_of_expectations/index.cjs +5 -0
  27. package/dist/experimental/chains/violation_of_expectations/index.d.ts +1 -0
  28. package/dist/experimental/chains/violation_of_expectations/index.js +1 -0
  29. package/dist/experimental/chains/violation_of_expectations/types.cjs +49 -0
  30. package/dist/experimental/chains/violation_of_expectations/types.d.ts +69 -0
  31. package/dist/experimental/chains/violation_of_expectations/types.js +46 -0
  32. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +328 -0
  33. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.d.ts +148 -0
  34. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +324 -0
  35. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.cjs +49 -0
  36. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.d.ts +5 -0
  37. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_prompt.js +46 -0
  38. package/dist/llms/cloudflare_workersai.cjs +124 -0
  39. package/dist/llms/cloudflare_workersai.d.ts +49 -0
  40. package/dist/llms/cloudflare_workersai.js +120 -0
  41. package/dist/llms/fake.cjs +82 -0
  42. package/dist/llms/fake.d.ts +31 -0
  43. package/dist/llms/fake.js +78 -0
  44. package/dist/llms/sagemaker_endpoint.cjs +9 -7
  45. package/dist/llms/sagemaker_endpoint.d.ts +3 -3
  46. package/dist/llms/sagemaker_endpoint.js +9 -7
  47. package/dist/load/import_constants.cjs +2 -0
  48. package/dist/load/import_constants.js +2 -0
  49. package/dist/load/import_map.cjs +8 -2
  50. package/dist/load/import_map.d.ts +6 -0
  51. package/dist/load/import_map.js +6 -0
  52. package/dist/retrievers/zep.cjs +29 -3
  53. package/dist/retrievers/zep.d.ts +14 -0
  54. package/dist/retrievers/zep.js +29 -3
  55. package/dist/util/axios-fetch-adapter.cjs +1 -1
  56. package/dist/util/axios-fetch-adapter.js +1 -1
  57. package/dist/util/env.cjs +1 -1
  58. package/dist/util/env.js +1 -1
  59. package/dist/util/event-source-parse.cjs +1 -1
  60. package/dist/util/event-source-parse.js +1 -1
  61. package/dist/vectorstores/closevector/common.cjs +128 -0
  62. package/dist/vectorstores/closevector/common.d.ts +82 -0
  63. package/dist/vectorstores/closevector/common.js +124 -0
  64. package/dist/vectorstores/closevector/node.cjs +109 -0
  65. package/dist/vectorstores/closevector/node.d.ts +83 -0
  66. package/dist/vectorstores/closevector/node.js +105 -0
  67. package/dist/vectorstores/closevector/web.cjs +109 -0
  68. package/dist/vectorstores/closevector/web.d.ts +80 -0
  69. package/dist/vectorstores/closevector/web.js +105 -0
  70. package/dist/vectorstores/faiss.cjs +38 -6
  71. package/dist/vectorstores/faiss.d.ts +14 -2
  72. package/dist/vectorstores/faiss.js +38 -6
  73. package/dist/vectorstores/weaviate.cjs +13 -2
  74. package/dist/vectorstores/weaviate.js +13 -2
  75. package/experimental/chains/violation_of_expectations.cjs +1 -0
  76. package/experimental/chains/violation_of_expectations.d.ts +1 -0
  77. package/experimental/chains/violation_of_expectations.js +1 -0
  78. package/llms/cloudflare_workersai.cjs +1 -0
  79. package/llms/cloudflare_workersai.d.ts +1 -0
  80. package/llms/cloudflare_workersai.js +1 -0
  81. package/llms/fake.cjs +1 -0
  82. package/llms/fake.d.ts +1 -0
  83. package/llms/fake.js +1 -0
  84. package/package.json +92 -13
  85. package/vectorstores/closevector/node.cjs +1 -0
  86. package/vectorstores/closevector/node.d.ts +1 -0
  87. package/vectorstores/closevector/node.js +1 -0
  88. package/vectorstores/closevector/web.cjs +1 -0
  89. package/vectorstores/closevector/web.d.ts +1 -0
  90. package/vectorstores/closevector/web.js +1 -0
@@ -0,0 +1,324 @@
1
+ import { JsonOutputFunctionsParser } from "../../../output_parsers/openai_functions.js";
2
+ import { HumanMessage, isBaseMessage, } from "../../../schema/index.js";
3
+ import { StringOutputParser } from "../../../schema/output_parser.js";
4
+ import { BaseChain } from "../../../chains/base.js";
5
+ import { PREDICTION_VIOLATIONS_FUNCTION, PREDICT_NEXT_USER_MESSAGE_FUNCTION, } from "./types.js";
6
+ import { GENERATE_FACTS_PROMPT, GENERATE_REVISED_PREDICTION_PROMPT, PREDICTION_VIOLATIONS_PROMPT, PREDICT_NEXT_USER_MESSAGE_PROMPT, } from "./violation_of_expectations_prompt.js";
7
+ /**
8
+ * Chain that generates key insights/facts of a user based on a
9
+ * a chat conversation with an AI.
10
+ */
11
+ export class ViolationOfExpectationsChain extends BaseChain {
12
+ static lc_name() {
13
+ return "ViolationOfExpectationsChain";
14
+ }
15
+ _chainType() {
16
+ return "violation_of_expectation_chain";
17
+ }
18
+ get inputKeys() {
19
+ return [this.chatHistoryKey];
20
+ }
21
+ get outputKeys() {
22
+ return [this.thoughtsKey];
23
+ }
24
+ constructor(fields) {
25
+ super(fields);
26
+ Object.defineProperty(this, "chatHistoryKey", {
27
+ enumerable: true,
28
+ configurable: true,
29
+ writable: true,
30
+ value: "chat_history"
31
+ });
32
+ Object.defineProperty(this, "thoughtsKey", {
33
+ enumerable: true,
34
+ configurable: true,
35
+ writable: true,
36
+ value: "thoughts"
37
+ });
38
+ Object.defineProperty(this, "retriever", {
39
+ enumerable: true,
40
+ configurable: true,
41
+ writable: true,
42
+ value: void 0
43
+ });
44
+ Object.defineProperty(this, "llm", {
45
+ enumerable: true,
46
+ configurable: true,
47
+ writable: true,
48
+ value: void 0
49
+ });
50
+ Object.defineProperty(this, "jsonOutputParser", {
51
+ enumerable: true,
52
+ configurable: true,
53
+ writable: true,
54
+ value: void 0
55
+ });
56
+ Object.defineProperty(this, "stringOutputParser", {
57
+ enumerable: true,
58
+ configurable: true,
59
+ writable: true,
60
+ value: void 0
61
+ });
62
+ this.retriever = fields.retriever;
63
+ this.llm = fields.llm;
64
+ this.jsonOutputParser = new JsonOutputFunctionsParser();
65
+ this.stringOutputParser = new StringOutputParser();
66
+ }
67
+ getChatHistoryString(chatHistory) {
68
+ return chatHistory
69
+ .map((chatMessage) => {
70
+ if (chatMessage._getType() === "human") {
71
+ return `Human: ${chatMessage.content}`;
72
+ }
73
+ else if (chatMessage._getType() === "ai") {
74
+ return `AI: ${chatMessage.content}`;
75
+ }
76
+ else {
77
+ return `${chatMessage.content}`;
78
+ }
79
+ })
80
+ .join("\n");
81
+ }
82
+ removeDuplicateStrings(strings) {
83
+ return [...new Set(strings)];
84
+ }
85
+ /**
86
+ * This method breaks down the chat history into chunks of messages.
87
+ * Each chunk consists of a sequence of messages ending with an AI message and the subsequent user response, if any.
88
+ *
89
+ * @param {BaseMessage[]} chatHistory - The chat history to be chunked.
90
+ *
91
+ * @returns {MessageChunkResult[]} An array of message chunks. Each chunk includes a sequence of messages and the subsequent user response.
92
+ *
93
+ * @description
94
+ * The method iterates over the chat history and pushes each message into a temporary array.
95
+ * When it encounters an AI message, it checks for a subsequent user message.
96
+ * If a user message is found, it is considered as the user response to the AI message.
97
+ * If no user message is found after the AI message, the user response is undefined.
98
+ * The method then pushes the chunk (sequence of messages and user response) into the result array.
99
+ * This process continues until all messages in the chat history have been processed.
100
+ */
101
+ chunkMessagesByAIResponse(chatHistory) {
102
+ const newArray = [];
103
+ const tempArray = [];
104
+ chatHistory.forEach((item, index) => {
105
+ tempArray.push(item);
106
+ if (item._getType() === "ai") {
107
+ let userResponse = chatHistory[index + 1];
108
+ if (!userResponse || userResponse._getType() !== "human") {
109
+ userResponse = undefined;
110
+ }
111
+ newArray.push({
112
+ chunkedMessages: tempArray,
113
+ userResponse: userResponse
114
+ ? new HumanMessage(userResponse)
115
+ : undefined,
116
+ });
117
+ }
118
+ });
119
+ return newArray;
120
+ }
121
+ /**
122
+ * This method processes a chat history to generate insights about the user.
123
+ *
124
+ * @param {ChainValues} values - The input values for the chain. It should contain a key for chat history.
125
+ * @param {CallbackManagerForChainRun} [runManager] - Optional callback manager for the chain run.
126
+ *
127
+ * @returns {Promise<ChainValues>} A promise that resolves to a list of insights about the user.
128
+ *
129
+ * @throws {Error} If the chat history key is not found in the input values or if the chat history is not an array of BaseMessages.
130
+ *
131
+ * @description
132
+ * The method performs the following steps:
133
+ * 1. Checks if the chat history key is present in the input values and if the chat history is an array of BaseMessages.
134
+ * 2. Breaks the chat history into chunks of messages.
135
+ * 3. For each chunk, it generates an initial prediction for the user's next message.
136
+ * 4. For each prediction, it generates insights and prediction violations, and regenerates the prediction based on the violations.
137
+ * 5. For each set of messages, it generates a fact/insight about the user.
138
+ * The method returns a list of these insights.
139
+ */
140
+ async _call(values, runManager) {
141
+ if (!(this.chatHistoryKey in values)) {
142
+ throw new Error(`Chat history key ${this.chatHistoryKey} not found`);
143
+ }
144
+ const chatHistory = values[this.chatHistoryKey];
145
+ const isEveryMessageBaseMessage = chatHistory.every((message) => isBaseMessage(message));
146
+ if (!isEveryMessageBaseMessage) {
147
+ throw new Error("Chat history must be an array of BaseMessages");
148
+ }
149
+ const messageChunks = this.chunkMessagesByAIResponse(chatHistory);
150
+ // Generate the initial prediction for every user message.
151
+ const userPredictions = await Promise.all(messageChunks.map(async (chatHistoryChunk) => ({
152
+ userPredictions: await this.predictNextUserMessage(chatHistoryChunk.chunkedMessages),
153
+ userResponse: chatHistoryChunk.userResponse,
154
+ runManager,
155
+ })));
156
+ // Generate insights, and prediction violations for every user message.
157
+ // This call also regenerates the prediction based on the violations.
158
+ const predictionViolations = await Promise.all(userPredictions.map((prediction) => this.getPredictionViolations({
159
+ userPredictions: prediction.userPredictions,
160
+ userResponse: prediction.userResponse,
161
+ runManager,
162
+ })));
163
+ // Generate a fact/insight about the user for every set of messages.
164
+ const insights = await Promise.all(predictionViolations.map((violation) => this.generateFacts({
165
+ userResponse: violation.userResponse,
166
+ predictions: {
167
+ revisedPrediction: violation.revisedPrediction,
168
+ explainedPredictionErrors: violation.explainedPredictionErrors,
169
+ },
170
+ })));
171
+ return {
172
+ insights,
173
+ };
174
+ }
175
+ /**
176
+ * This method predicts the next user message based on the chat history.
177
+ *
178
+ * @param {BaseMessage[]} chatHistory - The chat history based on which the next user message is predicted.
179
+ * @param {CallbackManagerForChainRun} [runManager] - Optional callback manager for the chain run.
180
+ *
181
+ * @returns {Promise<PredictNextUserMessageResponse>} A promise that resolves to the predicted next user message, the user state, and any insights.
182
+ *
183
+ * @throws {Error} If the response from the language model does not contain the expected keys: 'userState', 'predictedUserMessage', and 'insights'.
184
+ */
185
+ async predictNextUserMessage(chatHistory, runManager) {
186
+ const messageString = this.getChatHistoryString(chatHistory);
187
+ const llmWithFunctions = this.llm.bind({
188
+ functions: [PREDICT_NEXT_USER_MESSAGE_FUNCTION],
189
+ function_call: { name: PREDICT_NEXT_USER_MESSAGE_FUNCTION.name },
190
+ });
191
+ const chain = PREDICT_NEXT_USER_MESSAGE_PROMPT.pipe(llmWithFunctions).pipe(this.jsonOutputParser);
192
+ const res = await chain.invoke({
193
+ chat_history: messageString,
194
+ }, runManager?.getChild("prediction"));
195
+ if (!("userState" in res &&
196
+ "predictedUserMessage" in res &&
197
+ "insights" in res)) {
198
+ throw new Error(`Invalid response from LLM: ${JSON.stringify(res)}`);
199
+ }
200
+ const predictionResponse = res;
201
+ // Query the retriever for relevant insights. Use the generates insights as a query.
202
+ const retrievedDocs = await this.retrieveRelevantInsights(predictionResponse.insights);
203
+ const relevantDocs = this.removeDuplicateStrings([
204
+ ...predictionResponse.insights,
205
+ ...retrievedDocs,
206
+ ]);
207
+ return {
208
+ ...predictionResponse,
209
+ insights: relevantDocs,
210
+ };
211
+ }
212
+ /**
213
+ * Retrieves relevant insights based on the provided insights.
214
+ *
215
+ * @param {Array<string>} insights - An array of insights to be used for retrieving relevant documents.
216
+ *
217
+ * @returns {Promise<Array<string>>} A promise that resolves to an array of relevant insights content.
218
+ */
219
+ async retrieveRelevantInsights(insights) {
220
+ // Only extract the first relevant doc from the retriever. We don't need more than one.
221
+ const relevantInsightsDocuments = await Promise.all(insights.map(async (insight) => {
222
+ const relevantInsight = await this.retriever.getRelevantDocuments(insight);
223
+ return relevantInsight[0];
224
+ }));
225
+ const relevantInsightsContent = relevantInsightsDocuments.map((document) => document.pageContent);
226
+ return relevantInsightsContent;
227
+ }
228
+ /**
229
+ * This method generates prediction violations based on the predicted and actual user responses.
230
+ * It also generates a revised prediction based on the identified violations.
231
+ *
232
+ * @param {Object} params - The parameters for the method.
233
+ * @param {PredictNextUserMessageResponse} params.userPredictions - The predicted user message, user state, and insights.
234
+ * @param {BaseMessage} [params.userResponse] - The actual user response.
235
+ * @param {CallbackManagerForChainRun} [params.runManager] - Optional callback manager for the chain run.
236
+ *
237
+ * @returns {Promise<{ userResponse: BaseMessage | undefined; revisedPrediction: string; explainedPredictionErrors: Array<string>; }>} A promise that resolves to an object containing the actual user response, the revised prediction, and the explained prediction errors.
238
+ *
239
+ * @throws {Error} If the response from the language model does not contain the expected keys: 'violationExplanation', 'explainedPredictionErrors', and 'accuratePrediction'.
240
+ */
241
+ async getPredictionViolations({ userPredictions, userResponse, runManager, }) {
242
+ const llmWithFunctions = this.llm.bind({
243
+ functions: [PREDICTION_VIOLATIONS_FUNCTION],
244
+ function_call: { name: PREDICTION_VIOLATIONS_FUNCTION.name },
245
+ });
246
+ const chain = PREDICTION_VIOLATIONS_PROMPT.pipe(llmWithFunctions).pipe(this.jsonOutputParser);
247
+ const res = (await chain.invoke({
248
+ predicted_output: userPredictions.predictedUserMessage,
249
+ actual_output: userResponse?.content ?? "",
250
+ user_insights: userPredictions.insights.join("\n"),
251
+ }, runManager?.getChild("prediction_violations")));
252
+ // Generate a revised prediction based on violations.
253
+ const revisedPrediction = await this.generateRevisedPrediction({
254
+ originalPrediction: userPredictions.predictedUserMessage,
255
+ explainedPredictionErrors: res.explainedPredictionErrors,
256
+ userInsights: userPredictions.insights,
257
+ runManager,
258
+ });
259
+ return {
260
+ userResponse,
261
+ revisedPrediction,
262
+ explainedPredictionErrors: res.explainedPredictionErrors,
263
+ };
264
+ }
265
+ /**
266
+ * This method generates a revised prediction based on the original prediction, explained prediction errors, and user insights.
267
+ *
268
+ * @param {Object} params - The parameters for the method.
269
+ * @param {string} params.originalPrediction - The original prediction made by the model.
270
+ * @param {Array<string>} params.explainedPredictionErrors - An array of explained prediction errors.
271
+ * @param {Array<string>} params.userInsights - An array of insights about the user.
272
+ * @param {CallbackManagerForChainRun} [params.runManager] - Optional callback manager for the chain run.
273
+ *
274
+ * @returns {Promise<string>} A promise that resolves to a revised prediction.
275
+ */
276
+ async generateRevisedPrediction({ originalPrediction, explainedPredictionErrors, userInsights, runManager, }) {
277
+ const revisedPredictionChain = GENERATE_REVISED_PREDICTION_PROMPT.pipe(this.llm).pipe(this.stringOutputParser);
278
+ const revisedPredictionRes = await revisedPredictionChain.invoke({
279
+ prediction: originalPrediction,
280
+ explained_prediction_errors: explainedPredictionErrors.join("\n"),
281
+ user_insights: userInsights.join("\n"),
282
+ }, runManager?.getChild("prediction_revision"));
283
+ return revisedPredictionRes;
284
+ }
285
+ /**
286
+ * This method generates facts or insights about the user based on the revised prediction, explained prediction errors, and the user's response.
287
+ *
288
+ * @param {Object} params - The parameters for the method.
289
+ * @param {BaseMessage} [params.userResponse] - The actual user response.
290
+ * @param {Object} params.predictions - The revised prediction and explained prediction errors.
291
+ * @param {string} params.predictions.revisedPrediction - The revised prediction made by the model.
292
+ * @param {Array<string>} params.predictions.explainedPredictionErrors - An array of explained prediction errors.
293
+ * @param {CallbackManagerForChainRun} [params.runManager] - Optional callback manager for the chain run.
294
+ *
295
+ * @returns {Promise<string>} A promise that resolves to a string containing the generated facts or insights about the user.
296
+ */
297
+ async generateFacts({ userResponse, predictions, runManager, }) {
298
+ const chain = GENERATE_FACTS_PROMPT.pipe(this.llm).pipe(this.stringOutputParser);
299
+ const res = await chain.invoke({
300
+ prediction_violations: predictions.explainedPredictionErrors.join("\n"),
301
+ prediction: predictions.revisedPrediction,
302
+ user_message: userResponse?.content ?? "",
303
+ }, runManager?.getChild("generate_facts"));
304
+ return res;
305
+ }
306
+ /**
307
+ * Static method that creates a ViolationOfExpectationsChain instance from a
308
+ * ChatOpenAI and retriever. It also accepts optional options
309
+ * to customize the chain.
310
+ *
311
+ * @param llm The ChatOpenAI instance.
312
+ * @param retriever The retriever used for similarity search.
313
+ * @param options Optional options to customize the chain.
314
+ *
315
+ * @returns A new instance of ViolationOfExpectationsChain.
316
+ */
317
+ static fromLLM(llm, retriever, options) {
318
+ return new this({
319
+ retriever,
320
+ llm,
321
+ ...options,
322
+ });
323
+ }
324
+ }
@@ -0,0 +1,49 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GENERATE_FACTS_PROMPT = exports.GENERATE_REVISED_PREDICTION_PROMPT = exports.PREDICTION_VIOLATIONS_PROMPT = exports.PREDICT_NEXT_USER_MESSAGE_PROMPT = void 0;
4
+ const prompt_js_1 = require("../../../prompts/prompt.cjs");
5
+ exports.PREDICT_NEXT_USER_MESSAGE_PROMPT =
6
+ /* #__PURE__ */ prompt_js_1.PromptTemplate.fromTemplate(`
7
+ You have been tasked with coming up with insights and data-points based on a chat history between a human and an AI.
8
+ Given the user's chat history provide the following:
9
+ - Concise reasoning about the users internal mental state.
10
+ - Your prediction on how they will respond to the AI's most recent message.
11
+ - A concise list of any additional insights that would be useful to improve prediction.
12
+ --------
13
+ Chat History: {chat_history}`);
14
+ exports.PREDICTION_VIOLATIONS_PROMPT =
15
+ /* #__PURE__ */ prompt_js_1.PromptTemplate.fromTemplate(`You have been given a prediction and an actual message from a human and AI conversation.
16
+ Using the prediction, actual message, and additional user insights, generate the following:
17
+ - How exactly was the original prediction violated? Which parts were wrong? State the exact differences.
18
+ - If there were errors with the prediction, what were they and why?
19
+ --------
20
+ Predicted Output: {predicted_output}
21
+ --------
22
+ Actual Output: {actual_output}
23
+ --------
24
+ User Insights: {user_insights}
25
+ --------
26
+ `);
27
+ exports.GENERATE_REVISED_PREDICTION_PROMPT =
28
+ /* #__PURE__ */ prompt_js_1.PromptTemplate.fromTemplate(`
29
+ You have been tasked with revising a prediction on what a user might say in a chat conversation.
30
+ --------
31
+ Your previous prediction: {prediction}
32
+ --------
33
+ Ways in which your prediction was off: {explained_prediction_errors}
34
+ --------
35
+ Key insights to the user: {user_insights}
36
+ --------
37
+ Given the above, revise your prediction to be more accurate.
38
+ Revised Prediction:`);
39
+ exports.GENERATE_FACTS_PROMPT =
40
+ /* #__PURE__ */ prompt_js_1.PromptTemplate.fromTemplate(`
41
+ Given a user message, an LLM generated prediction of what that message might be, and a list of violations which the prediction made compared to the actual message, generate a fact about the user, relevant to the users message.
42
+ --------
43
+ Prediction violations: {prediction_violations}
44
+ --------
45
+ Revised prediction: {prediction}
46
+ --------
47
+ Actual user message: {user_message}
48
+ --------
49
+ Relevant fact:`);
@@ -0,0 +1,5 @@
1
+ import { PromptTemplate } from "../../../prompts/prompt.js";
2
+ export declare const PREDICT_NEXT_USER_MESSAGE_PROMPT: PromptTemplate<import("../../../prompts/prompt.js").ParamsFromFString<"\nYou have been tasked with coming up with insights and data-points based on a chat history between a human and an AI.\nGiven the user's chat history provide the following:\n- Concise reasoning about the users internal mental state.\n- Your prediction on how they will respond to the AI's most recent message.\n- A concise list of any additional insights that would be useful to improve prediction.\n--------\nChat History: {chat_history}">, any>;
3
+ export declare const PREDICTION_VIOLATIONS_PROMPT: PromptTemplate<import("../../../prompts/prompt.js").ParamsFromFString<"You have been given a prediction and an actual message from a human and AI conversation.\nUsing the prediction, actual message, and additional user insights, generate the following:\n- How exactly was the original prediction violated? Which parts were wrong? State the exact differences.\n- If there were errors with the prediction, what were they and why?\n--------\nPredicted Output: {predicted_output}\n--------\nActual Output: {actual_output}\n--------\nUser Insights: {user_insights}\n--------\n">, any>;
4
+ export declare const GENERATE_REVISED_PREDICTION_PROMPT: PromptTemplate<import("../../../prompts/prompt.js").ParamsFromFString<"\nYou have been tasked with revising a prediction on what a user might say in a chat conversation.\n--------\nYour previous prediction: {prediction}\n--------\nWays in which your prediction was off: {explained_prediction_errors}\n--------\nKey insights to the user: {user_insights}\n--------\nGiven the above, revise your prediction to be more accurate.\nRevised Prediction:">, any>;
5
+ export declare const GENERATE_FACTS_PROMPT: PromptTemplate<import("../../../prompts/prompt.js").ParamsFromFString<"\nGiven a user message, an LLM generated prediction of what that message might be, and a list of violations which the prediction made compared to the actual message, generate a fact about the user, relevant to the users message.\n--------\nPrediction violations: {prediction_violations}\n--------\nRevised prediction: {prediction}\n--------\nActual user message: {user_message}\n--------\nRelevant fact:">, any>;
@@ -0,0 +1,46 @@
1
+ import { PromptTemplate } from "../../../prompts/prompt.js";
2
+ export const PREDICT_NEXT_USER_MESSAGE_PROMPT =
3
+ /* #__PURE__ */ PromptTemplate.fromTemplate(`
4
+ You have been tasked with coming up with insights and data-points based on a chat history between a human and an AI.
5
+ Given the user's chat history provide the following:
6
+ - Concise reasoning about the users internal mental state.
7
+ - Your prediction on how they will respond to the AI's most recent message.
8
+ - A concise list of any additional insights that would be useful to improve prediction.
9
+ --------
10
+ Chat History: {chat_history}`);
11
+ export const PREDICTION_VIOLATIONS_PROMPT =
12
+ /* #__PURE__ */ PromptTemplate.fromTemplate(`You have been given a prediction and an actual message from a human and AI conversation.
13
+ Using the prediction, actual message, and additional user insights, generate the following:
14
+ - How exactly was the original prediction violated? Which parts were wrong? State the exact differences.
15
+ - If there were errors with the prediction, what were they and why?
16
+ --------
17
+ Predicted Output: {predicted_output}
18
+ --------
19
+ Actual Output: {actual_output}
20
+ --------
21
+ User Insights: {user_insights}
22
+ --------
23
+ `);
24
+ export const GENERATE_REVISED_PREDICTION_PROMPT =
25
+ /* #__PURE__ */ PromptTemplate.fromTemplate(`
26
+ You have been tasked with revising a prediction on what a user might say in a chat conversation.
27
+ --------
28
+ Your previous prediction: {prediction}
29
+ --------
30
+ Ways in which your prediction was off: {explained_prediction_errors}
31
+ --------
32
+ Key insights to the user: {user_insights}
33
+ --------
34
+ Given the above, revise your prediction to be more accurate.
35
+ Revised Prediction:`);
36
+ export const GENERATE_FACTS_PROMPT =
37
+ /* #__PURE__ */ PromptTemplate.fromTemplate(`
38
+ Given a user message, an LLM generated prediction of what that message might be, and a list of violations which the prediction made compared to the actual message, generate a fact about the user, relevant to the users message.
39
+ --------
40
+ Prediction violations: {prediction_violations}
41
+ --------
42
+ Revised prediction: {prediction}
43
+ --------
44
+ Actual user message: {user_message}
45
+ --------
46
+ Relevant fact:`);
@@ -0,0 +1,124 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.CloudflareWorkersAI = void 0;
4
+ const base_js_1 = require("./base.cjs");
5
+ const env_js_1 = require("../util/env.cjs");
6
+ /**
7
+ * Class representing the CloudflareWorkersAI language model. It extends the LLM (Large
8
+ * Language Model) class, providing a standard interface for interacting
9
+ * with the CloudflareWorkersAI language model.
10
+ */
11
+ class CloudflareWorkersAI extends base_js_1.LLM {
12
+ static lc_name() {
13
+ return "CloudflareWorkersAI";
14
+ }
15
+ constructor(fields) {
16
+ super(fields ?? {});
17
+ Object.defineProperty(this, "model", {
18
+ enumerable: true,
19
+ configurable: true,
20
+ writable: true,
21
+ value: "@cf/meta/llama-2-7b-chat-int8"
22
+ });
23
+ Object.defineProperty(this, "cloudflareAccountId", {
24
+ enumerable: true,
25
+ configurable: true,
26
+ writable: true,
27
+ value: void 0
28
+ });
29
+ Object.defineProperty(this, "cloudflareApiToken", {
30
+ enumerable: true,
31
+ configurable: true,
32
+ writable: true,
33
+ value: void 0
34
+ });
35
+ Object.defineProperty(this, "baseUrl", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: void 0
40
+ });
41
+ Object.defineProperty(this, "lc_serializable", {
42
+ enumerable: true,
43
+ configurable: true,
44
+ writable: true,
45
+ value: true
46
+ });
47
+ this.model = fields?.model ?? this.model;
48
+ this.cloudflareAccountId =
49
+ fields?.cloudflareAccountId ??
50
+ (0, env_js_1.getEnvironmentVariable)("CLOUDFLARE_ACCOUNT_ID");
51
+ this.cloudflareApiToken =
52
+ fields?.cloudflareApiToken ??
53
+ (0, env_js_1.getEnvironmentVariable)("CLOUDFLARE_API_TOKEN");
54
+ this.baseUrl =
55
+ fields?.baseUrl ??
56
+ `https://api.cloudflare.com/client/v4/accounts/${this.cloudflareAccountId}/ai/run`;
57
+ if (this.baseUrl.endsWith("/")) {
58
+ this.baseUrl = this.baseUrl.slice(0, -1);
59
+ }
60
+ }
61
+ /**
62
+ * Method to validate the environment.
63
+ */
64
+ validateEnvironment() {
65
+ if (this.baseUrl === undefined) {
66
+ if (!this.cloudflareAccountId) {
67
+ throw new Error(`No Cloudflare account ID found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_ACCOUNT_ID" in your environment variables.`);
68
+ }
69
+ if (!this.cloudflareApiToken) {
70
+ throw new Error(`No Cloudflare API key found. Please provide it when instantiating the CloudflareWorkersAI class, or set it as "CLOUDFLARE_API_KEY" in your environment variables.`);
71
+ }
72
+ }
73
+ }
74
+ /** Get the identifying parameters for this LLM. */
75
+ get identifyingParams() {
76
+ return { model: this.model };
77
+ }
78
+ /**
79
+ * Get the parameters used to invoke the model
80
+ */
81
+ invocationParams() {
82
+ return {
83
+ model: this.model,
84
+ };
85
+ }
86
+ /** Get the type of LLM. */
87
+ _llmType() {
88
+ return "cloudflare";
89
+ }
90
+ /** Call out to CloudflareWorkersAI's complete endpoint.
91
+ Args:
92
+ prompt: The prompt to pass into the model.
93
+ Returns:
94
+ The string generated by the model.
95
+ Example:
96
+ let response = CloudflareWorkersAI.call("Tell me a joke.");
97
+ */
98
+ async _call(prompt, options) {
99
+ this.validateEnvironment();
100
+ const url = `${this.baseUrl}/${this.model}`;
101
+ const headers = {
102
+ Authorization: `Bearer ${this.cloudflareApiToken}`,
103
+ "Content-Type": "application/json",
104
+ };
105
+ const data = { prompt };
106
+ const responseData = await this.caller.call(async () => {
107
+ const response = await fetch(url, {
108
+ method: "POST",
109
+ headers,
110
+ body: JSON.stringify(data),
111
+ signal: options.signal,
112
+ });
113
+ if (!response.ok) {
114
+ const error = new Error(`Cloudflare LLM call failed with status code ${response.status}`);
115
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
116
+ error.response = response;
117
+ throw error;
118
+ }
119
+ return response.json();
120
+ });
121
+ return responseData.result.response;
122
+ }
123
+ }
124
+ exports.CloudflareWorkersAI = CloudflareWorkersAI;
@@ -0,0 +1,49 @@
1
+ import { LLM, BaseLLMParams } from "./base.js";
2
+ /**
3
+ * Interface for CloudflareWorkersAI input parameters.
4
+ */
5
+ export interface CloudflareWorkersAIInput {
6
+ cloudflareAccountId?: string;
7
+ cloudflareApiToken?: string;
8
+ model?: string;
9
+ baseUrl?: string;
10
+ }
11
+ /**
12
+ * Class representing the CloudflareWorkersAI language model. It extends the LLM (Large
13
+ * Language Model) class, providing a standard interface for interacting
14
+ * with the CloudflareWorkersAI language model.
15
+ */
16
+ export declare class CloudflareWorkersAI extends LLM implements CloudflareWorkersAIInput {
17
+ model: string;
18
+ cloudflareAccountId?: string;
19
+ cloudflareApiToken?: string;
20
+ baseUrl: string;
21
+ static lc_name(): string;
22
+ lc_serializable: boolean;
23
+ constructor(fields?: CloudflareWorkersAIInput & BaseLLMParams);
24
+ /**
25
+ * Method to validate the environment.
26
+ */
27
+ validateEnvironment(): void;
28
+ /** Get the identifying parameters for this LLM. */
29
+ get identifyingParams(): {
30
+ model: string;
31
+ };
32
+ /**
33
+ * Get the parameters used to invoke the model
34
+ */
35
+ invocationParams(): {
36
+ model: string;
37
+ };
38
+ /** Get the type of LLM. */
39
+ _llmType(): string;
40
+ /** Call out to CloudflareWorkersAI's complete endpoint.
41
+ Args:
42
+ prompt: The prompt to pass into the model.
43
+ Returns:
44
+ The string generated by the model.
45
+ Example:
46
+ let response = CloudflareWorkersAI.call("Tell me a joke.");
47
+ */
48
+ _call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
49
+ }