langchain 0.0.180 → 0.0.182-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/dist/agents/openai/output_parser.cjs +3 -0
  2. package/dist/agents/openai/output_parser.js +3 -0
  3. package/dist/base_language/index.cjs +7 -3
  4. package/dist/base_language/index.d.ts +3 -3
  5. package/dist/base_language/index.js +7 -3
  6. package/dist/cache/base.cjs +2 -5
  7. package/dist/cache/base.js +2 -2
  8. package/dist/chat_models/base.cjs +9 -1
  9. package/dist/chat_models/base.js +9 -1
  10. package/dist/chat_models/bedrock/web.cjs +5 -1
  11. package/dist/chat_models/bedrock/web.js +5 -1
  12. package/dist/chat_models/cloudflare_workersai.cjs +8 -1
  13. package/dist/chat_models/cloudflare_workersai.js +8 -1
  14. package/dist/chat_models/googlepalm.cjs +16 -7
  15. package/dist/chat_models/googlepalm.js +16 -7
  16. package/dist/chat_models/googlevertexai/common.cjs +6 -0
  17. package/dist/chat_models/googlevertexai/common.js +6 -0
  18. package/dist/chat_models/iflytek_xinghuo/common.cjs +9 -4
  19. package/dist/chat_models/iflytek_xinghuo/common.js +9 -4
  20. package/dist/chat_models/llama_cpp.cjs +23 -4
  21. package/dist/chat_models/llama_cpp.js +23 -4
  22. package/dist/chat_models/minimax.cjs +6 -0
  23. package/dist/chat_models/minimax.js +6 -0
  24. package/dist/chat_models/openai.cjs +2 -5
  25. package/dist/chat_models/openai.js +3 -6
  26. package/dist/chat_models/portkey.cjs +18 -8
  27. package/dist/chat_models/portkey.js +18 -8
  28. package/dist/chat_models/yandex.cjs +3 -0
  29. package/dist/chat_models/yandex.js +3 -0
  30. package/dist/embeddings/cache_backed.cjs +2 -5
  31. package/dist/embeddings/cache_backed.js +2 -2
  32. package/dist/embeddings/voyage.cjs +120 -0
  33. package/dist/embeddings/voyage.d.ts +66 -0
  34. package/dist/embeddings/voyage.js +116 -0
  35. package/dist/experimental/autogpt/prompt.cjs +10 -0
  36. package/dist/experimental/autogpt/prompt.js +10 -0
  37. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +6 -0
  38. package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +6 -0
  39. package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
  40. package/dist/experimental/chat_models/anthropic_functions.js +3 -0
  41. package/dist/experimental/chat_models/bittensor.cjs +9 -4
  42. package/dist/experimental/chat_models/bittensor.js +9 -4
  43. package/dist/load/import_map.cjs +3 -2
  44. package/dist/load/import_map.d.ts +1 -0
  45. package/dist/load/import_map.js +1 -0
  46. package/dist/schema/index.cjs +27 -7
  47. package/dist/schema/index.d.ts +10 -3
  48. package/dist/schema/index.js +27 -7
  49. package/dist/schema/output_parser.cjs +25 -2
  50. package/dist/schema/output_parser.js +25 -2
  51. package/dist/util/js-sha1/hash.cjs +358 -0
  52. package/dist/util/js-sha1/hash.d.ts +1 -0
  53. package/dist/util/js-sha1/hash.js +355 -0
  54. package/dist/util/stream.cjs +4 -1
  55. package/dist/util/stream.d.ts +4 -1
  56. package/dist/util/stream.js +4 -1
  57. package/embeddings/voyage.cjs +1 -0
  58. package/embeddings/voyage.d.ts +1 -0
  59. package/embeddings/voyage.js +1 -0
  60. package/package.json +12 -5
@@ -1,5 +1,5 @@
1
1
  import { OpenAI as OpenAIClient } from "openai";
2
- import { AIMessage, AIMessageChunk, ChatGenerationChunk, ChatMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, } from "../schema/index.js";
2
+ import { AIMessage, AIMessageChunk, ChatGenerationChunk, ChatMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, } from "../schema/index.js";
3
3
  import { formatToOpenAIFunction } from "../tools/convert_to_openai.js";
4
4
  import { getEndpoint } from "../util/azure.js";
5
5
  import { getEnvironmentVariable } from "../util/env.js";
@@ -51,14 +51,10 @@ function messageToOpenAIMessage(message) {
51
51
  }
52
52
  function openAIResponseToChatMessage(message) {
53
53
  switch (message.role) {
54
- case "user":
55
- return new HumanMessage(message.content || "");
56
54
  case "assistant":
57
55
  return new AIMessage(message.content || "", {
58
56
  function_call: message.function_call,
59
57
  });
60
- case "system":
61
- return new SystemMessage(message.content || "");
62
58
  default:
63
59
  return new ChatMessage(message.content || "", message.role ?? "unknown");
64
60
  }
@@ -574,7 +570,8 @@ export class ChatOpenAI extends BaseChatModel {
574
570
  let count = textCount + tokensPerMessage + roleCount + nameCount;
575
571
  // From: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts messageTokenEstimate
576
572
  const openAIMessage = messageToOpenAIMessage(message);
577
- if (openAIMessage.role === "function") {
573
+ if (openAIMessage.role === "function" ||
574
+ openAIMessage.role === "tool") {
578
575
  count -= 2;
579
576
  }
580
577
  if (openAIMessage.function_call) {
@@ -98,10 +98,15 @@ class PortkeyChat extends base_js_1.BaseChatModel {
98
98
  return "portkey";
99
99
  }
100
100
  async _generate(messages, options, _) {
101
- const messagesList = messages.map((message) => ({
102
- role: message._getType(),
103
- content: message.content,
104
- }));
101
+ const messagesList = messages.map((message) => {
102
+ if (typeof message.content !== "string") {
103
+ throw new Error("PortkeyChat does not support non-string message content.");
104
+ }
105
+ return {
106
+ role: message._getType(),
107
+ content: message.content,
108
+ };
109
+ });
105
110
  const response = await this.session.portkey.chatCompletions.create({
106
111
  messages: messagesList,
107
112
  ...options,
@@ -124,10 +129,15 @@ class PortkeyChat extends base_js_1.BaseChatModel {
124
129
  };
125
130
  }
126
131
  async *_streamResponseChunks(messages, options, runManager) {
127
- const messagesList = messages.map((message) => ({
128
- role: message._getType(),
129
- content: message.content,
130
- }));
132
+ const messagesList = messages.map((message) => {
133
+ if (typeof message.content !== "string") {
134
+ throw new Error("PortkeyChat does not support non-string message content.");
135
+ }
136
+ return {
137
+ role: message._getType(),
138
+ content: message.content,
139
+ };
140
+ });
131
141
  const response = await this.session.portkey.chatCompletions.create({
132
142
  messages: messagesList,
133
143
  ...options,
@@ -95,10 +95,15 @@ export class PortkeyChat extends BaseChatModel {
95
95
  return "portkey";
96
96
  }
97
97
  async _generate(messages, options, _) {
98
- const messagesList = messages.map((message) => ({
99
- role: message._getType(),
100
- content: message.content,
101
- }));
98
+ const messagesList = messages.map((message) => {
99
+ if (typeof message.content !== "string") {
100
+ throw new Error("PortkeyChat does not support non-string message content.");
101
+ }
102
+ return {
103
+ role: message._getType(),
104
+ content: message.content,
105
+ };
106
+ });
102
107
  const response = await this.session.portkey.chatCompletions.create({
103
108
  messages: messagesList,
104
109
  ...options,
@@ -121,10 +126,15 @@ export class PortkeyChat extends BaseChatModel {
121
126
  };
122
127
  }
123
128
  async *_streamResponseChunks(messages, options, runManager) {
124
- const messagesList = messages.map((message) => ({
125
- role: message._getType(),
126
- content: message.content,
127
- }));
129
+ const messagesList = messages.map((message) => {
130
+ if (typeof message.content !== "string") {
131
+ throw new Error("PortkeyChat does not support non-string message content.");
132
+ }
133
+ return {
134
+ role: message._getType(),
135
+ content: message.content,
136
+ };
137
+ });
128
138
  const response = await this.session.portkey.chatCompletions.create({
129
139
  messages: messagesList,
130
140
  ...options,
@@ -9,6 +9,9 @@ function _parseChatHistory(history) {
9
9
  const chatHistory = [];
10
10
  let instruction = "";
11
11
  for (const message of history) {
12
+ if (typeof message.content !== "string") {
13
+ throw new Error("ChatYandexGPT does not support non-string message content.");
14
+ }
12
15
  if ("content" in message) {
13
16
  if (message._getType() === "human") {
14
17
  chatHistory.push({ role: "user", text: message.content });
@@ -6,6 +6,9 @@ function _parseChatHistory(history) {
6
6
  const chatHistory = [];
7
7
  let instruction = "";
8
8
  for (const message of history) {
9
+ if (typeof message.content !== "string") {
10
+ throw new Error("ChatYandexGPT does not support non-string message content.");
11
+ }
9
12
  if ("content" in message) {
10
13
  if (message._getType() === "human") {
11
14
  chatHistory.push({ role: "user", text: message.content });
@@ -1,10 +1,7 @@
1
1
  "use strict";
2
- var __importDefault = (this && this.__importDefault) || function (mod) {
3
- return (mod && mod.__esModule) ? mod : { "default": mod };
4
- };
5
2
  Object.defineProperty(exports, "__esModule", { value: true });
6
3
  exports.CacheBackedEmbeddings = void 0;
7
- const object_hash_1 = __importDefault(require("object-hash"));
4
+ const hash_js_1 = require("../util/js-sha1/hash.cjs");
8
5
  const encoder_backed_js_1 = require("../storage/encoder_backed.cjs");
9
6
  const base_js_1 = require("./base.cjs");
10
7
  /**
@@ -96,7 +93,7 @@ class CacheBackedEmbeddings extends base_js_1.Embeddings {
96
93
  const decoder = new TextDecoder();
97
94
  const encoderBackedStore = new encoder_backed_js_1.EncoderBackedStore({
98
95
  store: documentEmbeddingStore,
99
- keyEncoder: (key) => (options?.namespace ?? "") + (0, object_hash_1.default)(key),
96
+ keyEncoder: (key) => (options?.namespace ?? "") + (0, hash_js_1.insecureHash)(key),
100
97
  valueSerializer: (value) => encoder.encode(JSON.stringify(value)),
101
98
  valueDeserializer: (serializedValue) => JSON.parse(decoder.decode(serializedValue)),
102
99
  });
@@ -1,4 +1,4 @@
1
- import hash from "object-hash";
1
+ import { insecureHash } from "../util/js-sha1/hash.js";
2
2
  import { EncoderBackedStore } from "../storage/encoder_backed.js";
3
3
  import { Embeddings } from "./base.js";
4
4
  /**
@@ -90,7 +90,7 @@ export class CacheBackedEmbeddings extends Embeddings {
90
90
  const decoder = new TextDecoder();
91
91
  const encoderBackedStore = new EncoderBackedStore({
92
92
  store: documentEmbeddingStore,
93
- keyEncoder: (key) => (options?.namespace ?? "") + hash(key),
93
+ keyEncoder: (key) => (options?.namespace ?? "") + insecureHash(key),
94
94
  valueSerializer: (value) => encoder.encode(JSON.stringify(value)),
95
95
  valueDeserializer: (serializedValue) => JSON.parse(decoder.decode(serializedValue)),
96
96
  });
@@ -0,0 +1,120 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.VoyageEmbeddings = void 0;
4
+ const chunk_js_1 = require("../util/chunk.cjs");
5
+ const env_js_1 = require("../util/env.cjs");
6
+ const base_js_1 = require("./base.cjs");
7
+ /**
8
+ * A class for generating embeddings using the Voyage AI API.
9
+ */
10
+ class VoyageEmbeddings extends base_js_1.Embeddings {
11
+ /**
12
+ * Constructor for the VoyageEmbeddings class.
13
+ * @param fields - An optional object with properties to configure the instance.
14
+ */
15
+ constructor(fields) {
16
+ const fieldsWithDefaults = { ...fields };
17
+ super(fieldsWithDefaults);
18
+ Object.defineProperty(this, "modelName", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: "voyage-01"
23
+ });
24
+ Object.defineProperty(this, "batchSize", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: 8
29
+ });
30
+ Object.defineProperty(this, "apiKey", {
31
+ enumerable: true,
32
+ configurable: true,
33
+ writable: true,
34
+ value: void 0
35
+ });
36
+ Object.defineProperty(this, "basePath", {
37
+ enumerable: true,
38
+ configurable: true,
39
+ writable: true,
40
+ value: "https://api.voyageai.com/v1"
41
+ });
42
+ Object.defineProperty(this, "apiUrl", {
43
+ enumerable: true,
44
+ configurable: true,
45
+ writable: true,
46
+ value: void 0
47
+ });
48
+ Object.defineProperty(this, "headers", {
49
+ enumerable: true,
50
+ configurable: true,
51
+ writable: true,
52
+ value: void 0
53
+ });
54
+ const apiKey = fieldsWithDefaults?.apiKey || (0, env_js_1.getEnvironmentVariable)("VOYAGEAI_API_KEY");
55
+ if (!apiKey) {
56
+ throw new Error("Voyage AI API key not found");
57
+ }
58
+ this.modelName = fieldsWithDefaults?.modelName ?? this.modelName;
59
+ this.batchSize = fieldsWithDefaults?.batchSize ?? this.batchSize;
60
+ this.apiKey = apiKey;
61
+ this.apiUrl = `${this.basePath}/embeddings`;
62
+ }
63
+ /**
64
+ * Generates embeddings for an array of texts.
65
+ * @param texts - An array of strings to generate embeddings for.
66
+ * @returns A Promise that resolves to an array of embeddings.
67
+ */
68
+ async embedDocuments(texts) {
69
+ const batches = (0, chunk_js_1.chunkArray)(texts, this.batchSize);
70
+ const batchRequests = batches.map((batch) => this.embeddingWithRetry({
71
+ model: this.modelName,
72
+ input: batch,
73
+ }));
74
+ const batchResponses = await Promise.all(batchRequests);
75
+ const embeddings = [];
76
+ for (let i = 0; i < batchResponses.length; i += 1) {
77
+ const batch = batches[i];
78
+ const { data: batchResponse } = batchResponses[i];
79
+ for (let j = 0; j < batch.length; j += 1) {
80
+ embeddings.push(batchResponse[j].embedding);
81
+ }
82
+ }
83
+ return embeddings;
84
+ }
85
+ /**
86
+ * Generates an embedding for a single text.
87
+ * @param text - A string to generate an embedding for.
88
+ * @returns A Promise that resolves to an array of numbers representing the embedding.
89
+ */
90
+ async embedQuery(text) {
91
+ const { data } = await this.embeddingWithRetry({
92
+ model: this.modelName,
93
+ input: text,
94
+ });
95
+ return data[0].embedding;
96
+ }
97
+ /**
98
+ * Makes a request to the Voyage AI API to generate embeddings for an array of texts.
99
+ * @param request - An object with properties to configure the request.
100
+ * @returns A Promise that resolves to the response from the Voyage AI API.
101
+ */
102
+ async embeddingWithRetry(request) {
103
+ const makeCompletionRequest = async () => {
104
+ const url = `${this.apiUrl}`;
105
+ const response = await fetch(url, {
106
+ method: "POST",
107
+ headers: {
108
+ "Content-Type": "application/json",
109
+ Authorization: `Bearer ${this.apiKey}`,
110
+ ...this.headers,
111
+ },
112
+ body: JSON.stringify(request),
113
+ });
114
+ const json = await response.json();
115
+ return json;
116
+ };
117
+ return this.caller.call(makeCompletionRequest);
118
+ }
119
+ }
120
+ exports.VoyageEmbeddings = VoyageEmbeddings;
@@ -0,0 +1,66 @@
1
+ import { Embeddings, EmbeddingsParams } from "./base.js";
2
+ /**
3
+ * Interface that extends EmbeddingsParams and defines additional
4
+ * parameters specific to the VoyageEmbeddings class.
5
+ */
6
+ export interface VoyageEmbeddingsParams extends EmbeddingsParams {
7
+ modelName: string;
8
+ /**
9
+ * The maximum number of documents to embed in a single request. This is
10
+ * limited by the Voyage AI API to a maximum of 8.
11
+ */
12
+ batchSize?: number;
13
+ }
14
+ /**
15
+ * Interface for the request body to generate embeddings.
16
+ */
17
+ export interface CreateVoyageEmbeddingRequest {
18
+ /**
19
+ * @type {string}
20
+ * @memberof CreateVoyageEmbeddingRequest
21
+ */
22
+ model: string;
23
+ /**
24
+ * Text to generate vector expectation
25
+ * @type {CreateEmbeddingRequestInput}
26
+ * @memberof CreateVoyageEmbeddingRequest
27
+ */
28
+ input: string | string[];
29
+ }
30
+ /**
31
+ * A class for generating embeddings using the Voyage AI API.
32
+ */
33
+ export declare class VoyageEmbeddings extends Embeddings implements VoyageEmbeddingsParams {
34
+ modelName: string;
35
+ batchSize: number;
36
+ private apiKey;
37
+ basePath?: string;
38
+ apiUrl: string;
39
+ headers?: Record<string, string>;
40
+ /**
41
+ * Constructor for the VoyageEmbeddings class.
42
+ * @param fields - An optional object with properties to configure the instance.
43
+ */
44
+ constructor(fields?: Partial<VoyageEmbeddingsParams> & {
45
+ verbose?: boolean;
46
+ apiKey?: string;
47
+ });
48
+ /**
49
+ * Generates embeddings for an array of texts.
50
+ * @param texts - An array of strings to generate embeddings for.
51
+ * @returns A Promise that resolves to an array of embeddings.
52
+ */
53
+ embedDocuments(texts: string[]): Promise<number[][]>;
54
+ /**
55
+ * Generates an embedding for a single text.
56
+ * @param text - A string to generate an embedding for.
57
+ * @returns A Promise that resolves to an array of numbers representing the embedding.
58
+ */
59
+ embedQuery(text: string): Promise<number[]>;
60
+ /**
61
+ * Makes a request to the Voyage AI API to generate embeddings for an array of texts.
62
+ * @param request - An object with properties to configure the request.
63
+ * @returns A Promise that resolves to the response from the Voyage AI API.
64
+ */
65
+ private embeddingWithRetry;
66
+ }
@@ -0,0 +1,116 @@
1
+ import { chunkArray } from "../util/chunk.js";
2
+ import { getEnvironmentVariable } from "../util/env.js";
3
+ import { Embeddings } from "./base.js";
4
+ /**
5
+ * A class for generating embeddings using the Voyage AI API.
6
+ */
7
+ export class VoyageEmbeddings extends Embeddings {
8
+ /**
9
+ * Constructor for the VoyageEmbeddings class.
10
+ * @param fields - An optional object with properties to configure the instance.
11
+ */
12
+ constructor(fields) {
13
+ const fieldsWithDefaults = { ...fields };
14
+ super(fieldsWithDefaults);
15
+ Object.defineProperty(this, "modelName", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: "voyage-01"
20
+ });
21
+ Object.defineProperty(this, "batchSize", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: 8
26
+ });
27
+ Object.defineProperty(this, "apiKey", {
28
+ enumerable: true,
29
+ configurable: true,
30
+ writable: true,
31
+ value: void 0
32
+ });
33
+ Object.defineProperty(this, "basePath", {
34
+ enumerable: true,
35
+ configurable: true,
36
+ writable: true,
37
+ value: "https://api.voyageai.com/v1"
38
+ });
39
+ Object.defineProperty(this, "apiUrl", {
40
+ enumerable: true,
41
+ configurable: true,
42
+ writable: true,
43
+ value: void 0
44
+ });
45
+ Object.defineProperty(this, "headers", {
46
+ enumerable: true,
47
+ configurable: true,
48
+ writable: true,
49
+ value: void 0
50
+ });
51
+ const apiKey = fieldsWithDefaults?.apiKey || getEnvironmentVariable("VOYAGEAI_API_KEY");
52
+ if (!apiKey) {
53
+ throw new Error("Voyage AI API key not found");
54
+ }
55
+ this.modelName = fieldsWithDefaults?.modelName ?? this.modelName;
56
+ this.batchSize = fieldsWithDefaults?.batchSize ?? this.batchSize;
57
+ this.apiKey = apiKey;
58
+ this.apiUrl = `${this.basePath}/embeddings`;
59
+ }
60
+ /**
61
+ * Generates embeddings for an array of texts.
62
+ * @param texts - An array of strings to generate embeddings for.
63
+ * @returns A Promise that resolves to an array of embeddings.
64
+ */
65
+ async embedDocuments(texts) {
66
+ const batches = chunkArray(texts, this.batchSize);
67
+ const batchRequests = batches.map((batch) => this.embeddingWithRetry({
68
+ model: this.modelName,
69
+ input: batch,
70
+ }));
71
+ const batchResponses = await Promise.all(batchRequests);
72
+ const embeddings = [];
73
+ for (let i = 0; i < batchResponses.length; i += 1) {
74
+ const batch = batches[i];
75
+ const { data: batchResponse } = batchResponses[i];
76
+ for (let j = 0; j < batch.length; j += 1) {
77
+ embeddings.push(batchResponse[j].embedding);
78
+ }
79
+ }
80
+ return embeddings;
81
+ }
82
+ /**
83
+ * Generates an embedding for a single text.
84
+ * @param text - A string to generate an embedding for.
85
+ * @returns A Promise that resolves to an array of numbers representing the embedding.
86
+ */
87
+ async embedQuery(text) {
88
+ const { data } = await this.embeddingWithRetry({
89
+ model: this.modelName,
90
+ input: text,
91
+ });
92
+ return data[0].embedding;
93
+ }
94
+ /**
95
+ * Makes a request to the Voyage AI API to generate embeddings for an array of texts.
96
+ * @param request - An object with properties to configure the request.
97
+ * @returns A Promise that resolves to the response from the Voyage AI API.
98
+ */
99
+ async embeddingWithRetry(request) {
100
+ const makeCompletionRequest = async () => {
101
+ const url = `${this.apiUrl}`;
102
+ const response = await fetch(url, {
103
+ method: "POST",
104
+ headers: {
105
+ "Content-Type": "application/json",
106
+ Authorization: `Bearer ${this.apiKey}`,
107
+ ...this.headers,
108
+ },
109
+ body: JSON.stringify(request),
110
+ });
111
+ const json = await response.json();
112
+ return json;
113
+ };
114
+ return this.caller.call(makeCompletionRequest);
115
+ }
116
+ }
@@ -81,6 +81,10 @@ class AutoGPTPrompt extends chat_js_1.BaseChatPromptTemplate {
81
81
  async formatMessages({ goals, memory, messages: previousMessages, user_input, }) {
82
82
  const basePrompt = new index_js_1.SystemMessage(this.constructFullPrompt(goals));
83
83
  const timePrompt = new index_js_1.SystemMessage(`The current time and date is ${new Date().toLocaleString()}`);
84
+ if (typeof basePrompt.content !== "string" ||
85
+ typeof timePrompt.content !== "string") {
86
+ throw new Error("Non-string message content is not supported.");
87
+ }
84
88
  const usedTokens = (await this.tokenCounter(basePrompt.content)) +
85
89
  (await this.tokenCounter(timePrompt.content));
86
90
  const relevantDocs = await memory.getRelevantDocuments(JSON.stringify(previousMessages.slice(-10)));
@@ -92,9 +96,15 @@ class AutoGPTPrompt extends chat_js_1.BaseChatPromptTemplate {
92
96
  }
93
97
  const contentFormat = `This reminds you of these events from your past:\n${relevantMemory.join("\n")}\n\n`;
94
98
  const memoryMessage = new index_js_1.SystemMessage(contentFormat);
99
+ if (typeof memoryMessage.content !== "string") {
100
+ throw new Error("Non-string message content is not supported.");
101
+ }
95
102
  const usedTokensWithMemory = (await usedTokens) + (await this.tokenCounter(memoryMessage.content));
96
103
  const historicalMessages = [];
97
104
  for (const message of previousMessages.slice(-10).reverse()) {
105
+ if (typeof message.content !== "string") {
106
+ throw new Error("Non-string message content is not supported.");
107
+ }
98
108
  const messageTokens = await this.tokenCounter(message.content);
99
109
  if (usedTokensWithMemory + messageTokens > this.sendTokenLimit - 1000) {
100
110
  break;
@@ -78,6 +78,10 @@ export class AutoGPTPrompt extends BaseChatPromptTemplate {
78
78
  async formatMessages({ goals, memory, messages: previousMessages, user_input, }) {
79
79
  const basePrompt = new SystemMessage(this.constructFullPrompt(goals));
80
80
  const timePrompt = new SystemMessage(`The current time and date is ${new Date().toLocaleString()}`);
81
+ if (typeof basePrompt.content !== "string" ||
82
+ typeof timePrompt.content !== "string") {
83
+ throw new Error("Non-string message content is not supported.");
84
+ }
81
85
  const usedTokens = (await this.tokenCounter(basePrompt.content)) +
82
86
  (await this.tokenCounter(timePrompt.content));
83
87
  const relevantDocs = await memory.getRelevantDocuments(JSON.stringify(previousMessages.slice(-10)));
@@ -89,9 +93,15 @@ export class AutoGPTPrompt extends BaseChatPromptTemplate {
89
93
  }
90
94
  const contentFormat = `This reminds you of these events from your past:\n${relevantMemory.join("\n")}\n\n`;
91
95
  const memoryMessage = new SystemMessage(contentFormat);
96
+ if (typeof memoryMessage.content !== "string") {
97
+ throw new Error("Non-string message content is not supported.");
98
+ }
92
99
  const usedTokensWithMemory = (await usedTokens) + (await this.tokenCounter(memoryMessage.content));
93
100
  const historicalMessages = [];
94
101
  for (const message of previousMessages.slice(-10).reverse()) {
102
+ if (typeof message.content !== "string") {
103
+ throw new Error("Non-string message content is not supported.");
104
+ }
95
105
  const messageTokens = await this.tokenCounter(message.content);
96
106
  if (usedTokensWithMemory + messageTokens > this.sendTokenLimit - 1000) {
97
107
  break;
@@ -247,6 +247,9 @@ class ViolationOfExpectationsChain extends base_js_1.BaseChain {
247
247
  function_call: { name: types_js_1.PREDICTION_VIOLATIONS_FUNCTION.name },
248
248
  });
249
249
  const chain = violation_of_expectations_prompt_js_1.PREDICTION_VIOLATIONS_PROMPT.pipe(llmWithFunctions).pipe(this.jsonOutputParser);
250
+ if (typeof userResponse?.content !== "string") {
251
+ throw new Error("This chain does not support non-string model output.");
252
+ }
250
253
  const res = (await chain.invoke({
251
254
  predicted_output: userPredictions.predictedUserMessage,
252
255
  actual_output: userResponse?.content ?? "",
@@ -299,6 +302,9 @@ class ViolationOfExpectationsChain extends base_js_1.BaseChain {
299
302
  */
300
303
  async generateFacts({ userResponse, predictions, runManager, }) {
301
304
  const chain = violation_of_expectations_prompt_js_1.GENERATE_FACTS_PROMPT.pipe(this.llm).pipe(this.stringOutputParser);
305
+ if (typeof userResponse?.content !== "string") {
306
+ throw new Error("This chain does not support non-string model output.");
307
+ }
302
308
  const res = await chain.invoke({
303
309
  prediction_violations: predictions.explainedPredictionErrors.join("\n"),
304
310
  prediction: predictions.revisedPrediction,
@@ -244,6 +244,9 @@ export class ViolationOfExpectationsChain extends BaseChain {
244
244
  function_call: { name: PREDICTION_VIOLATIONS_FUNCTION.name },
245
245
  });
246
246
  const chain = PREDICTION_VIOLATIONS_PROMPT.pipe(llmWithFunctions).pipe(this.jsonOutputParser);
247
+ if (typeof userResponse?.content !== "string") {
248
+ throw new Error("This chain does not support non-string model output.");
249
+ }
247
250
  const res = (await chain.invoke({
248
251
  predicted_output: userPredictions.predictedUserMessage,
249
252
  actual_output: userResponse?.content ?? "",
@@ -296,6 +299,9 @@ export class ViolationOfExpectationsChain extends BaseChain {
296
299
  */
297
300
  async generateFacts({ userResponse, predictions, runManager, }) {
298
301
  const chain = GENERATE_FACTS_PROMPT.pipe(this.llm).pipe(this.stringOutputParser);
302
+ if (typeof userResponse?.content !== "string") {
303
+ throw new Error("This chain does not support non-string model output.");
304
+ }
299
305
  const res = await chain.invoke({
300
306
  prediction_violations: predictions.explainedPredictionErrors.join("\n"),
301
307
  prediction: predictions.revisedPrediction,
@@ -112,6 +112,9 @@ class AnthropicFunctions extends base_js_1.BaseChatModel {
112
112
  }
113
113
  const chatResult = await this.llm._generate(promptMessages, options, runManager);
114
114
  const chatGenerationContent = chatResult.generations[0].message.content;
115
+ if (typeof chatGenerationContent !== "string") {
116
+ throw new Error("AnthropicFunctions does not support non-string output.");
117
+ }
115
118
  if (forced) {
116
119
  const parser = new fast_xml_parser_1.XMLParser();
117
120
  const result = parser.parse(`${chatGenerationContent}</tool_input>`);
@@ -109,6 +109,9 @@ export class AnthropicFunctions extends BaseChatModel {
109
109
  }
110
110
  const chatResult = await this.llm._generate(promptMessages, options, runManager);
111
111
  const chatGenerationContent = chatResult.generations[0].message.content;
112
+ if (typeof chatGenerationContent !== "string") {
113
+ throw new Error("AnthropicFunctions does not support non-string output.");
114
+ }
112
115
  if (forced) {
113
116
  const parser = new XMLParser();
114
117
  const result = parser.parse(`${chatGenerationContent}</tool_input>`);
@@ -60,10 +60,15 @@ class NIBittensorChatModel extends base_js_1.BaseChatModel {
60
60
  const res = await chat.call([message]);
61
61
  */
62
62
  async _generate(messages) {
63
- const processed_messages = messages.map((message) => ({
64
- role: this.messageToOpenAIRole(message),
65
- content: message.content,
66
- }));
63
+ const processed_messages = messages.map((message) => {
64
+ if (typeof message.content !== "string") {
65
+ throw new Error("NIBittensorChat does not support non-string output.");
66
+ }
67
+ return {
68
+ role: this.messageToOpenAIRole(message),
69
+ content: message.content,
70
+ };
71
+ });
67
72
  const generations = [];
68
73
  try {
69
74
  // Retrieve API KEY
@@ -57,10 +57,15 @@ export class NIBittensorChatModel extends BaseChatModel {
57
57
  const res = await chat.call([message]);
58
58
  */
59
59
  async _generate(messages) {
60
- const processed_messages = messages.map((message) => ({
61
- role: this.messageToOpenAIRole(message),
62
- content: message.content,
63
- }));
60
+ const processed_messages = messages.map((message) => {
61
+ if (typeof message.content !== "string") {
62
+ throw new Error("NIBittensorChat does not support non-string output.");
63
+ }
64
+ return {
65
+ role: this.messageToOpenAIRole(message),
66
+ content: message.content,
67
+ };
68
+ });
64
69
  const generations = [];
65
70
  try {
66
71
  // Retrieve API KEY