langchain 0.0.180 → 0.0.182-rc.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/openai/output_parser.cjs +3 -0
- package/dist/agents/openai/output_parser.js +3 -0
- package/dist/base_language/index.cjs +7 -3
- package/dist/base_language/index.d.ts +3 -3
- package/dist/base_language/index.js +7 -3
- package/dist/cache/base.cjs +2 -5
- package/dist/cache/base.js +2 -2
- package/dist/chat_models/base.cjs +9 -1
- package/dist/chat_models/base.js +9 -1
- package/dist/chat_models/bedrock/web.cjs +5 -1
- package/dist/chat_models/bedrock/web.js +5 -1
- package/dist/chat_models/cloudflare_workersai.cjs +8 -1
- package/dist/chat_models/cloudflare_workersai.js +8 -1
- package/dist/chat_models/googlepalm.cjs +16 -7
- package/dist/chat_models/googlepalm.js +16 -7
- package/dist/chat_models/googlevertexai/common.cjs +6 -0
- package/dist/chat_models/googlevertexai/common.js +6 -0
- package/dist/chat_models/iflytek_xinghuo/common.cjs +9 -4
- package/dist/chat_models/iflytek_xinghuo/common.js +9 -4
- package/dist/chat_models/llama_cpp.cjs +23 -4
- package/dist/chat_models/llama_cpp.js +23 -4
- package/dist/chat_models/minimax.cjs +6 -0
- package/dist/chat_models/minimax.js +6 -0
- package/dist/chat_models/openai.cjs +2 -5
- package/dist/chat_models/openai.js +3 -6
- package/dist/chat_models/portkey.cjs +18 -8
- package/dist/chat_models/portkey.js +18 -8
- package/dist/chat_models/yandex.cjs +3 -0
- package/dist/chat_models/yandex.js +3 -0
- package/dist/embeddings/cache_backed.cjs +2 -5
- package/dist/embeddings/cache_backed.js +2 -2
- package/dist/embeddings/voyage.cjs +120 -0
- package/dist/embeddings/voyage.d.ts +66 -0
- package/dist/embeddings/voyage.js +116 -0
- package/dist/experimental/autogpt/prompt.cjs +10 -0
- package/dist/experimental/autogpt/prompt.js +10 -0
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.cjs +6 -0
- package/dist/experimental/chains/violation_of_expectations/violation_of_expectations_chain.js +6 -0
- package/dist/experimental/chat_models/anthropic_functions.cjs +3 -0
- package/dist/experimental/chat_models/anthropic_functions.js +3 -0
- package/dist/experimental/chat_models/bittensor.cjs +9 -4
- package/dist/experimental/chat_models/bittensor.js +9 -4
- package/dist/load/import_map.cjs +3 -2
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/schema/index.cjs +27 -7
- package/dist/schema/index.d.ts +10 -3
- package/dist/schema/index.js +27 -7
- package/dist/schema/output_parser.cjs +25 -2
- package/dist/schema/output_parser.js +25 -2
- package/dist/util/js-sha1/hash.cjs +358 -0
- package/dist/util/js-sha1/hash.d.ts +1 -0
- package/dist/util/js-sha1/hash.js +355 -0
- package/dist/util/stream.cjs +4 -1
- package/dist/util/stream.d.ts +4 -1
- package/dist/util/stream.js +4 -1
- package/embeddings/voyage.cjs +1 -0
- package/embeddings/voyage.d.ts +1 -0
- package/embeddings/voyage.js +1 -0
- package/package.json +12 -5
|
@@ -33,6 +33,9 @@ class OpenAIFunctionsAgentOutputParser extends types_js_1.AgentActionOutputParse
|
|
|
33
33
|
* @returns A FunctionsAgentAction or AgentFinish object.
|
|
34
34
|
*/
|
|
35
35
|
parseAIMessage(message) {
|
|
36
|
+
if (message.content && typeof message.content !== "string") {
|
|
37
|
+
throw new Error("This agent cannot parse non-string model responses.");
|
|
38
|
+
}
|
|
36
39
|
if (message.additional_kwargs.function_call) {
|
|
37
40
|
// eslint-disable-next-line prefer-destructuring
|
|
38
41
|
const function_call = message.additional_kwargs.function_call;
|
|
@@ -30,6 +30,9 @@ export class OpenAIFunctionsAgentOutputParser extends AgentActionOutputParser {
|
|
|
30
30
|
* @returns A FunctionsAgentAction or AgentFinish object.
|
|
31
31
|
*/
|
|
32
32
|
parseAIMessage(message) {
|
|
33
|
+
if (message.content && typeof message.content !== "string") {
|
|
34
|
+
throw new Error("This agent cannot parse non-string model responses.");
|
|
35
|
+
}
|
|
33
36
|
if (message.additional_kwargs.function_call) {
|
|
34
37
|
// eslint-disable-next-line prefer-destructuring
|
|
35
38
|
const function_call = message.additional_kwargs.function_call;
|
|
@@ -104,9 +104,13 @@ class BaseLanguageModel extends BaseLangChain {
|
|
|
104
104
|
}
|
|
105
105
|
this.caller = new async_caller_js_1.AsyncCaller(params ?? {});
|
|
106
106
|
}
|
|
107
|
-
async getNumTokens(
|
|
107
|
+
async getNumTokens(content) {
|
|
108
|
+
// TODO: Figure out correct value.
|
|
109
|
+
if (typeof content !== "string") {
|
|
110
|
+
return 0;
|
|
111
|
+
}
|
|
108
112
|
// fallback to approximate calculation if tiktoken is not available
|
|
109
|
-
let numTokens = Math.ceil(
|
|
113
|
+
let numTokens = Math.ceil(content.length / 4);
|
|
110
114
|
if (!this._encoding) {
|
|
111
115
|
try {
|
|
112
116
|
this._encoding = await (0, tiktoken_js_1.encodingForModel)("modelName" in this
|
|
@@ -118,7 +122,7 @@ class BaseLanguageModel extends BaseLangChain {
|
|
|
118
122
|
}
|
|
119
123
|
}
|
|
120
124
|
if (this._encoding) {
|
|
121
|
-
numTokens = this._encoding.encode(
|
|
125
|
+
numTokens = this._encoding.encode(content).length;
|
|
122
126
|
}
|
|
123
127
|
return numTokens;
|
|
124
128
|
}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
|
-
import { BaseCache, BaseMessage, BaseMessageLike, BasePromptValue, LLMResult } from "../schema/index.js";
|
|
2
|
+
import { BaseCache, BaseMessage, BaseMessageLike, BasePromptValue, LLMResult, MessageContent } from "../schema/index.js";
|
|
3
3
|
import { BaseCallbackConfig, CallbackManager, Callbacks } from "../callbacks/manager.js";
|
|
4
4
|
import { AsyncCaller, AsyncCallerParams } from "../util/async_caller.js";
|
|
5
5
|
import { Runnable } from "../schema/runnable/index.js";
|
|
@@ -60,7 +60,7 @@ export interface BaseLanguageModelCallOptions extends BaseCallbackConfig {
|
|
|
60
60
|
signal?: AbortSignal;
|
|
61
61
|
}
|
|
62
62
|
export interface BaseFunctionCallOptions extends BaseLanguageModelCallOptions {
|
|
63
|
-
function_call?: OpenAIClient.Chat.
|
|
63
|
+
function_call?: OpenAIClient.Chat.ChatCompletionFunctionCallOption;
|
|
64
64
|
functions?: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];
|
|
65
65
|
}
|
|
66
66
|
export type BaseLanguageModelInput = BasePromptValue | string | BaseMessageLike[];
|
|
@@ -86,7 +86,7 @@ export declare abstract class BaseLanguageModel<RunOutput = any, CallOptions ext
|
|
|
86
86
|
abstract _modelType(): string;
|
|
87
87
|
abstract _llmType(): string;
|
|
88
88
|
private _encoding?;
|
|
89
|
-
getNumTokens(
|
|
89
|
+
getNumTokens(content: MessageContent): Promise<number>;
|
|
90
90
|
protected static _convertInputToPromptValue(input: BaseLanguageModelInput): BasePromptValue;
|
|
91
91
|
/**
|
|
92
92
|
* Get the identifying parameters of the LLM.
|
|
@@ -100,9 +100,13 @@ export class BaseLanguageModel extends BaseLangChain {
|
|
|
100
100
|
}
|
|
101
101
|
this.caller = new AsyncCaller(params ?? {});
|
|
102
102
|
}
|
|
103
|
-
async getNumTokens(
|
|
103
|
+
async getNumTokens(content) {
|
|
104
|
+
// TODO: Figure out correct value.
|
|
105
|
+
if (typeof content !== "string") {
|
|
106
|
+
return 0;
|
|
107
|
+
}
|
|
104
108
|
// fallback to approximate calculation if tiktoken is not available
|
|
105
|
-
let numTokens = Math.ceil(
|
|
109
|
+
let numTokens = Math.ceil(content.length / 4);
|
|
106
110
|
if (!this._encoding) {
|
|
107
111
|
try {
|
|
108
112
|
this._encoding = await encodingForModel("modelName" in this
|
|
@@ -114,7 +118,7 @@ export class BaseLanguageModel extends BaseLangChain {
|
|
|
114
118
|
}
|
|
115
119
|
}
|
|
116
120
|
if (this._encoding) {
|
|
117
|
-
numTokens = this._encoding.encode(
|
|
121
|
+
numTokens = this._encoding.encode(content).length;
|
|
118
122
|
}
|
|
119
123
|
return numTokens;
|
|
120
124
|
}
|
package/dist/cache/base.cjs
CHANGED
|
@@ -1,10 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
-
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
-
};
|
|
5
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
3
|
exports.serializeGeneration = exports.deserializeStoredGeneration = exports.getCacheKey = void 0;
|
|
7
|
-
const
|
|
4
|
+
const hash_js_1 = require("../util/js-sha1/hash.cjs");
|
|
8
5
|
const index_js_1 = require("../schema/index.cjs");
|
|
9
6
|
/**
|
|
10
7
|
* This cache key should be consistent across all versions of langchain.
|
|
@@ -16,7 +13,7 @@ const index_js_1 = require("../schema/index.cjs");
|
|
|
16
13
|
*
|
|
17
14
|
* TODO: Make cache key consistent across versions of langchain.
|
|
18
15
|
*/
|
|
19
|
-
const getCacheKey = (...strings) => (0,
|
|
16
|
+
const getCacheKey = (...strings) => (0, hash_js_1.insecureHash)(strings.join("_"));
|
|
20
17
|
exports.getCacheKey = getCacheKey;
|
|
21
18
|
function deserializeStoredGeneration(storedGeneration) {
|
|
22
19
|
if (storedGeneration.message !== undefined) {
|
package/dist/cache/base.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import
|
|
1
|
+
import { insecureHash } from "../util/js-sha1/hash.js";
|
|
2
2
|
import { mapStoredMessageToChatMessage, } from "../schema/index.js";
|
|
3
3
|
/**
|
|
4
4
|
* This cache key should be consistent across all versions of langchain.
|
|
@@ -10,7 +10,7 @@ import { mapStoredMessageToChatMessage, } from "../schema/index.js";
|
|
|
10
10
|
*
|
|
11
11
|
* TODO: Make cache key consistent across versions of langchain.
|
|
12
12
|
*/
|
|
13
|
-
export const getCacheKey = (...strings) =>
|
|
13
|
+
export const getCacheKey = (...strings) => insecureHash(strings.join("_"));
|
|
14
14
|
export function deserializeStoredGeneration(storedGeneration) {
|
|
15
15
|
if (storedGeneration.message !== undefined) {
|
|
16
16
|
return {
|
|
@@ -13,7 +13,9 @@ function createChatMessageChunkEncoderStream() {
|
|
|
13
13
|
const textEncoder = new TextEncoder();
|
|
14
14
|
return new TransformStream({
|
|
15
15
|
transform(chunk, controller) {
|
|
16
|
-
controller.enqueue(textEncoder.encode(chunk.content
|
|
16
|
+
controller.enqueue(textEncoder.encode(typeof chunk.content === "string"
|
|
17
|
+
? chunk.content
|
|
18
|
+
: JSON.stringify(chunk.content)));
|
|
17
19
|
},
|
|
18
20
|
});
|
|
19
21
|
}
|
|
@@ -263,6 +265,9 @@ class BaseChatModel extends index_js_2.BaseLanguageModel {
|
|
|
263
265
|
async predict(text, options, callbacks) {
|
|
264
266
|
const message = new index_js_1.HumanMessage(text);
|
|
265
267
|
const result = await this.call([message], options, callbacks);
|
|
268
|
+
if (typeof result.content !== "string") {
|
|
269
|
+
throw new Error("Cannot use predict when output is not a string.");
|
|
270
|
+
}
|
|
266
271
|
return result.content;
|
|
267
272
|
}
|
|
268
273
|
}
|
|
@@ -275,6 +280,9 @@ class SimpleChatModel extends BaseChatModel {
|
|
|
275
280
|
async _generate(messages, options, runManager) {
|
|
276
281
|
const text = await this._call(messages, options, runManager);
|
|
277
282
|
const message = new index_js_1.AIMessage(text);
|
|
283
|
+
if (typeof message.content !== "string") {
|
|
284
|
+
throw new Error("Cannot generate with a simple chat model when output is not a string.");
|
|
285
|
+
}
|
|
278
286
|
return {
|
|
279
287
|
generations: [
|
|
280
288
|
{
|
package/dist/chat_models/base.js
CHANGED
|
@@ -10,7 +10,9 @@ export function createChatMessageChunkEncoderStream() {
|
|
|
10
10
|
const textEncoder = new TextEncoder();
|
|
11
11
|
return new TransformStream({
|
|
12
12
|
transform(chunk, controller) {
|
|
13
|
-
controller.enqueue(textEncoder.encode(chunk.content
|
|
13
|
+
controller.enqueue(textEncoder.encode(typeof chunk.content === "string"
|
|
14
|
+
? chunk.content
|
|
15
|
+
: JSON.stringify(chunk.content)));
|
|
14
16
|
},
|
|
15
17
|
});
|
|
16
18
|
}
|
|
@@ -259,6 +261,9 @@ export class BaseChatModel extends BaseLanguageModel {
|
|
|
259
261
|
async predict(text, options, callbacks) {
|
|
260
262
|
const message = new HumanMessage(text);
|
|
261
263
|
const result = await this.call([message], options, callbacks);
|
|
264
|
+
if (typeof result.content !== "string") {
|
|
265
|
+
throw new Error("Cannot use predict when output is not a string.");
|
|
266
|
+
}
|
|
262
267
|
return result.content;
|
|
263
268
|
}
|
|
264
269
|
}
|
|
@@ -270,6 +275,9 @@ export class SimpleChatModel extends BaseChatModel {
|
|
|
270
275
|
async _generate(messages, options, runManager) {
|
|
271
276
|
const text = await this._call(messages, options, runManager);
|
|
272
277
|
const message = new AIMessage(text);
|
|
278
|
+
if (typeof message.content !== "string") {
|
|
279
|
+
throw new Error("Cannot generate with a simple chat model when output is not a string.");
|
|
280
|
+
}
|
|
273
281
|
return {
|
|
274
282
|
generations: [
|
|
275
283
|
{
|
|
@@ -206,7 +206,11 @@ class BedrockChat extends base_js_1.SimpleChatModel {
|
|
|
206
206
|
finalResult = finalResult.concat(chunk);
|
|
207
207
|
}
|
|
208
208
|
}
|
|
209
|
-
|
|
209
|
+
const messageContent = finalResult?.message.content;
|
|
210
|
+
if (messageContent && typeof messageContent !== "string") {
|
|
211
|
+
throw new Error("Non-string output for ChatBedrock is currently not supported.");
|
|
212
|
+
}
|
|
213
|
+
return messageContent ?? "";
|
|
210
214
|
}
|
|
211
215
|
const response = await this._signedFetch(messages, options, {
|
|
212
216
|
bedrockMethod: "invoke",
|
|
@@ -201,7 +201,11 @@ export class BedrockChat extends SimpleChatModel {
|
|
|
201
201
|
finalResult = finalResult.concat(chunk);
|
|
202
202
|
}
|
|
203
203
|
}
|
|
204
|
-
|
|
204
|
+
const messageContent = finalResult?.message.content;
|
|
205
|
+
if (messageContent && typeof messageContent !== "string") {
|
|
206
|
+
throw new Error("Non-string output for ChatBedrock is currently not supported.");
|
|
207
|
+
}
|
|
208
|
+
return messageContent ?? "";
|
|
205
209
|
}
|
|
206
210
|
const response = await this._signedFetch(messages, options, {
|
|
207
211
|
bedrockMethod: "invoke",
|
|
@@ -159,6 +159,9 @@ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
|
|
|
159
159
|
console.warn(`Unsupported message type passed to Cloudflare: "${message._getType()}"`);
|
|
160
160
|
role = "user";
|
|
161
161
|
}
|
|
162
|
+
if (typeof message.content !== "string") {
|
|
163
|
+
throw new Error("ChatCloudflareWorkersAI currently does not support non-string message content.");
|
|
164
|
+
}
|
|
162
165
|
return {
|
|
163
166
|
role,
|
|
164
167
|
content: message.content,
|
|
@@ -184,7 +187,11 @@ class ChatCloudflareWorkersAI extends base_js_1.SimpleChatModel {
|
|
|
184
187
|
finalResult = finalResult.concat(chunk);
|
|
185
188
|
}
|
|
186
189
|
}
|
|
187
|
-
|
|
190
|
+
const messageContent = finalResult?.message.content;
|
|
191
|
+
if (messageContent && typeof messageContent !== "string") {
|
|
192
|
+
throw new Error("Non-string output for ChatCloudflareWorkersAI is currently not supported.");
|
|
193
|
+
}
|
|
194
|
+
return messageContent ?? "";
|
|
188
195
|
}
|
|
189
196
|
}
|
|
190
197
|
}
|
|
@@ -156,6 +156,9 @@ export class ChatCloudflareWorkersAI extends SimpleChatModel {
|
|
|
156
156
|
console.warn(`Unsupported message type passed to Cloudflare: "${message._getType()}"`);
|
|
157
157
|
role = "user";
|
|
158
158
|
}
|
|
159
|
+
if (typeof message.content !== "string") {
|
|
160
|
+
throw new Error("ChatCloudflareWorkersAI currently does not support non-string message content.");
|
|
161
|
+
}
|
|
159
162
|
return {
|
|
160
163
|
role,
|
|
161
164
|
content: message.content,
|
|
@@ -181,7 +184,11 @@ export class ChatCloudflareWorkersAI extends SimpleChatModel {
|
|
|
181
184
|
finalResult = finalResult.concat(chunk);
|
|
182
185
|
}
|
|
183
186
|
}
|
|
184
|
-
|
|
187
|
+
const messageContent = finalResult?.message.content;
|
|
188
|
+
if (messageContent && typeof messageContent !== "string") {
|
|
189
|
+
throw new Error("Non-string output for ChatCloudflareWorkersAI is currently not supported.");
|
|
190
|
+
}
|
|
191
|
+
return messageContent ?? "";
|
|
185
192
|
}
|
|
186
193
|
}
|
|
187
194
|
}
|
|
@@ -132,6 +132,10 @@ class ChatGooglePaLM extends base_js_1.BaseChatModel {
|
|
|
132
132
|
const systemMessage = messages.length > 0 && getMessageAuthor(messages[0]) === "system"
|
|
133
133
|
? messages[0]
|
|
134
134
|
: undefined;
|
|
135
|
+
if (systemMessage?.content !== undefined &&
|
|
136
|
+
typeof systemMessage.content !== "string") {
|
|
137
|
+
throw new Error("Non-string system message content is not supported.");
|
|
138
|
+
}
|
|
135
139
|
return systemMessage?.content;
|
|
136
140
|
}
|
|
137
141
|
_mapBaseMessagesToPalmMessages(messages) {
|
|
@@ -145,13 +149,18 @@ class ChatGooglePaLM extends base_js_1.BaseChatModel {
|
|
|
145
149
|
throw new Error(`Google PaLM requires alternate messages between authors`);
|
|
146
150
|
}
|
|
147
151
|
});
|
|
148
|
-
return nonSystemMessages.map((m) =>
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
152
|
+
return nonSystemMessages.map((m) => {
|
|
153
|
+
if (typeof m.content !== "string") {
|
|
154
|
+
throw new Error("ChatGooglePaLM does not support non-string message content.");
|
|
155
|
+
}
|
|
156
|
+
return {
|
|
157
|
+
author: getMessageAuthor(m),
|
|
158
|
+
content: m.content,
|
|
159
|
+
citationMetadata: {
|
|
160
|
+
citationSources: m.additional_kwargs.citationSources,
|
|
161
|
+
},
|
|
162
|
+
};
|
|
163
|
+
});
|
|
155
164
|
}
|
|
156
165
|
_mapPalmMessagesToChatResult(msgRes) {
|
|
157
166
|
if (msgRes.candidates &&
|
|
@@ -129,6 +129,10 @@ export class ChatGooglePaLM extends BaseChatModel {
|
|
|
129
129
|
const systemMessage = messages.length > 0 && getMessageAuthor(messages[0]) === "system"
|
|
130
130
|
? messages[0]
|
|
131
131
|
: undefined;
|
|
132
|
+
if (systemMessage?.content !== undefined &&
|
|
133
|
+
typeof systemMessage.content !== "string") {
|
|
134
|
+
throw new Error("Non-string system message content is not supported.");
|
|
135
|
+
}
|
|
132
136
|
return systemMessage?.content;
|
|
133
137
|
}
|
|
134
138
|
_mapBaseMessagesToPalmMessages(messages) {
|
|
@@ -142,13 +146,18 @@ export class ChatGooglePaLM extends BaseChatModel {
|
|
|
142
146
|
throw new Error(`Google PaLM requires alternate messages between authors`);
|
|
143
147
|
}
|
|
144
148
|
});
|
|
145
|
-
return nonSystemMessages.map((m) =>
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
149
|
+
return nonSystemMessages.map((m) => {
|
|
150
|
+
if (typeof m.content !== "string") {
|
|
151
|
+
throw new Error("ChatGooglePaLM does not support non-string message content.");
|
|
152
|
+
}
|
|
153
|
+
return {
|
|
154
|
+
author: getMessageAuthor(m),
|
|
155
|
+
content: m.content,
|
|
156
|
+
citationMetadata: {
|
|
157
|
+
citationSources: m.additional_kwargs.citationSources,
|
|
158
|
+
},
|
|
159
|
+
};
|
|
160
|
+
});
|
|
152
161
|
}
|
|
153
162
|
_mapPalmMessagesToChatResult(msgRes) {
|
|
154
163
|
if (msgRes.candidates &&
|
|
@@ -76,6 +76,9 @@ class GoogleVertexAIChatMessage {
|
|
|
76
76
|
* @returns A new Google Vertex AI chat message.
|
|
77
77
|
*/
|
|
78
78
|
static fromChatMessage(message, model) {
|
|
79
|
+
if (typeof message.content !== "string") {
|
|
80
|
+
throw new Error("ChatGoogleVertexAI does not support non-string message content.");
|
|
81
|
+
}
|
|
79
82
|
return new GoogleVertexAIChatMessage({
|
|
80
83
|
author: GoogleVertexAIChatMessage.mapMessageTypeToVertexChatAuthor(message, model),
|
|
81
84
|
content: message.content,
|
|
@@ -204,6 +207,9 @@ class BaseChatGoogleVertexAI extends base_js_1.BaseChatModel {
|
|
|
204
207
|
let context = "";
|
|
205
208
|
let conversationMessages = messages;
|
|
206
209
|
if (messages[0]?._getType() === "system") {
|
|
210
|
+
if (typeof messages[0].content !== "string") {
|
|
211
|
+
throw new Error("ChatGoogleVertexAI does not support non-string message content.");
|
|
212
|
+
}
|
|
207
213
|
context = messages[0].content;
|
|
208
214
|
conversationMessages = messages.slice(1);
|
|
209
215
|
}
|
|
@@ -73,6 +73,9 @@ export class GoogleVertexAIChatMessage {
|
|
|
73
73
|
* @returns A new Google Vertex AI chat message.
|
|
74
74
|
*/
|
|
75
75
|
static fromChatMessage(message, model) {
|
|
76
|
+
if (typeof message.content !== "string") {
|
|
77
|
+
throw new Error("ChatGoogleVertexAI does not support non-string message content.");
|
|
78
|
+
}
|
|
76
79
|
return new GoogleVertexAIChatMessage({
|
|
77
80
|
author: GoogleVertexAIChatMessage.mapMessageTypeToVertexChatAuthor(message, model),
|
|
78
81
|
content: message.content,
|
|
@@ -200,6 +203,9 @@ export class BaseChatGoogleVertexAI extends BaseChatModel {
|
|
|
200
203
|
let context = "";
|
|
201
204
|
let conversationMessages = messages;
|
|
202
205
|
if (messages[0]?._getType() === "system") {
|
|
206
|
+
if (typeof messages[0].content !== "string") {
|
|
207
|
+
throw new Error("ChatGoogleVertexAI does not support non-string message content.");
|
|
208
|
+
}
|
|
203
209
|
context = messages[0].content;
|
|
204
210
|
conversationMessages = messages.slice(1);
|
|
205
211
|
}
|
|
@@ -269,10 +269,15 @@ class BaseChatIflytekXinghuo extends base_js_1.BaseChatModel {
|
|
|
269
269
|
async _generate(messages, options, runManager) {
|
|
270
270
|
const tokenUsage = {};
|
|
271
271
|
const params = this.invocationParams();
|
|
272
|
-
const messagesMapped = messages.map((message) =>
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
272
|
+
const messagesMapped = messages.map((message) => {
|
|
273
|
+
if (typeof message.content !== "string") {
|
|
274
|
+
throw new Error("ChatIflytekXinghuo does not support non-string message content.");
|
|
275
|
+
}
|
|
276
|
+
return {
|
|
277
|
+
role: messageToXinghuoRole(message),
|
|
278
|
+
content: message.content,
|
|
279
|
+
};
|
|
280
|
+
});
|
|
276
281
|
const data = params.streaming
|
|
277
282
|
? await (async () => {
|
|
278
283
|
const streams = await this.completion({ messages: messagesMapped, ...params }, true, options.signal);
|
|
@@ -266,10 +266,15 @@ export class BaseChatIflytekXinghuo extends BaseChatModel {
|
|
|
266
266
|
async _generate(messages, options, runManager) {
|
|
267
267
|
const tokenUsage = {};
|
|
268
268
|
const params = this.invocationParams();
|
|
269
|
-
const messagesMapped = messages.map((message) =>
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
269
|
+
const messagesMapped = messages.map((message) => {
|
|
270
|
+
if (typeof message.content !== "string") {
|
|
271
|
+
throw new Error("ChatIflytekXinghuo does not support non-string message content.");
|
|
272
|
+
}
|
|
273
|
+
return {
|
|
274
|
+
role: messageToXinghuoRole(message),
|
|
275
|
+
content: message.content,
|
|
276
|
+
};
|
|
277
|
+
});
|
|
273
278
|
const data = params.streaming
|
|
274
279
|
? await (async () => {
|
|
275
280
|
const streams = await this.completion({ messages: messagesMapped, ...params }, true, options.signal);
|
|
@@ -100,6 +100,9 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
100
100
|
prompt = this._buildSession(messages);
|
|
101
101
|
}
|
|
102
102
|
else {
|
|
103
|
+
if (typeof messages[0].content !== "string") {
|
|
104
|
+
throw new Error("ChatLlamaCpp does not support non-string message content in sessions.");
|
|
105
|
+
}
|
|
103
106
|
// If we already have a session then we should just have a single prompt
|
|
104
107
|
prompt = messages[0].content;
|
|
105
108
|
}
|
|
@@ -128,8 +131,12 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
128
131
|
// Let's see if we have a system message
|
|
129
132
|
if (messages.findIndex((msg) => msg._getType() === "system") !== -1) {
|
|
130
133
|
const sysMessages = messages.filter((message) => message._getType() === "system");
|
|
134
|
+
const systemMessageContent = sysMessages[sysMessages.length - 1].content;
|
|
135
|
+
if (typeof systemMessageContent !== "string") {
|
|
136
|
+
throw new Error("ChatLlamaCpp does not support non-string message content in sessions.");
|
|
137
|
+
}
|
|
131
138
|
// Only use the last provided system message
|
|
132
|
-
sysMessage =
|
|
139
|
+
sysMessage = systemMessageContent;
|
|
133
140
|
// Now filter out the system messages
|
|
134
141
|
noSystemMessages = messages.filter((message) => message._getType() !== "system");
|
|
135
142
|
}
|
|
@@ -140,7 +147,11 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
140
147
|
if (noSystemMessages.length > 1) {
|
|
141
148
|
// Is the last message a prompt?
|
|
142
149
|
if (noSystemMessages[noSystemMessages.length - 1]._getType() === "human") {
|
|
143
|
-
|
|
150
|
+
const finalMessageContent = noSystemMessages[noSystemMessages.length - 1].content;
|
|
151
|
+
if (typeof finalMessageContent !== "string") {
|
|
152
|
+
throw new Error("ChatLlamaCpp does not support non-string message content in sessions.");
|
|
153
|
+
}
|
|
154
|
+
prompt = finalMessageContent;
|
|
144
155
|
interactions = this._convertMessagesToInteractions(noSystemMessages.slice(0, noSystemMessages.length - 1));
|
|
145
156
|
}
|
|
146
157
|
else {
|
|
@@ -148,6 +159,9 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
148
159
|
}
|
|
149
160
|
}
|
|
150
161
|
else {
|
|
162
|
+
if (typeof noSystemMessages[0].content !== "string") {
|
|
163
|
+
throw new Error("ChatLlamaCpp does not support non-string message content in sessions.");
|
|
164
|
+
}
|
|
151
165
|
// If there was only a single message we assume it's a prompt
|
|
152
166
|
prompt = noSystemMessages[0].content;
|
|
153
167
|
}
|
|
@@ -183,9 +197,14 @@ class ChatLlamaCpp extends base_js_1.SimpleChatModel {
|
|
|
183
197
|
const result = [];
|
|
184
198
|
for (let i = 0; i < messages.length; i += 2) {
|
|
185
199
|
if (i + 1 < messages.length) {
|
|
200
|
+
const prompt = messages[i].content;
|
|
201
|
+
const response = messages[i + 1].content;
|
|
202
|
+
if (typeof prompt !== "string" || typeof response !== "string") {
|
|
203
|
+
throw new Error("ChatLlamaCpp does not support non-string message content.");
|
|
204
|
+
}
|
|
186
205
|
result.push({
|
|
187
|
-
prompt
|
|
188
|
-
response
|
|
206
|
+
prompt,
|
|
207
|
+
response,
|
|
189
208
|
});
|
|
190
209
|
}
|
|
191
210
|
}
|
|
@@ -97,6 +97,9 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
97
97
|
prompt = this._buildSession(messages);
|
|
98
98
|
}
|
|
99
99
|
else {
|
|
100
|
+
if (typeof messages[0].content !== "string") {
|
|
101
|
+
throw new Error("ChatLlamaCpp does not support non-string message content in sessions.");
|
|
102
|
+
}
|
|
100
103
|
// If we already have a session then we should just have a single prompt
|
|
101
104
|
prompt = messages[0].content;
|
|
102
105
|
}
|
|
@@ -125,8 +128,12 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
125
128
|
// Let's see if we have a system message
|
|
126
129
|
if (messages.findIndex((msg) => msg._getType() === "system") !== -1) {
|
|
127
130
|
const sysMessages = messages.filter((message) => message._getType() === "system");
|
|
131
|
+
const systemMessageContent = sysMessages[sysMessages.length - 1].content;
|
|
132
|
+
if (typeof systemMessageContent !== "string") {
|
|
133
|
+
throw new Error("ChatLlamaCpp does not support non-string message content in sessions.");
|
|
134
|
+
}
|
|
128
135
|
// Only use the last provided system message
|
|
129
|
-
sysMessage =
|
|
136
|
+
sysMessage = systemMessageContent;
|
|
130
137
|
// Now filter out the system messages
|
|
131
138
|
noSystemMessages = messages.filter((message) => message._getType() !== "system");
|
|
132
139
|
}
|
|
@@ -137,7 +144,11 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
137
144
|
if (noSystemMessages.length > 1) {
|
|
138
145
|
// Is the last message a prompt?
|
|
139
146
|
if (noSystemMessages[noSystemMessages.length - 1]._getType() === "human") {
|
|
140
|
-
|
|
147
|
+
const finalMessageContent = noSystemMessages[noSystemMessages.length - 1].content;
|
|
148
|
+
if (typeof finalMessageContent !== "string") {
|
|
149
|
+
throw new Error("ChatLlamaCpp does not support non-string message content in sessions.");
|
|
150
|
+
}
|
|
151
|
+
prompt = finalMessageContent;
|
|
141
152
|
interactions = this._convertMessagesToInteractions(noSystemMessages.slice(0, noSystemMessages.length - 1));
|
|
142
153
|
}
|
|
143
154
|
else {
|
|
@@ -145,6 +156,9 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
145
156
|
}
|
|
146
157
|
}
|
|
147
158
|
else {
|
|
159
|
+
if (typeof noSystemMessages[0].content !== "string") {
|
|
160
|
+
throw new Error("ChatLlamaCpp does not support non-string message content in sessions.");
|
|
161
|
+
}
|
|
148
162
|
// If there was only a single message we assume it's a prompt
|
|
149
163
|
prompt = noSystemMessages[0].content;
|
|
150
164
|
}
|
|
@@ -180,9 +194,14 @@ export class ChatLlamaCpp extends SimpleChatModel {
|
|
|
180
194
|
const result = [];
|
|
181
195
|
for (let i = 0; i < messages.length; i += 2) {
|
|
182
196
|
if (i + 1 < messages.length) {
|
|
197
|
+
const prompt = messages[i].content;
|
|
198
|
+
const response = messages[i + 1].content;
|
|
199
|
+
if (typeof prompt !== "string" || typeof response !== "string") {
|
|
200
|
+
throw new Error("ChatLlamaCpp does not support non-string message content.");
|
|
201
|
+
}
|
|
183
202
|
result.push({
|
|
184
|
-
prompt
|
|
185
|
-
response
|
|
203
|
+
prompt,
|
|
204
|
+
response,
|
|
186
205
|
});
|
|
187
206
|
}
|
|
188
207
|
}
|
|
@@ -328,6 +328,9 @@ class ChatMinimax extends base_js_1.BaseChatModel {
|
|
|
328
328
|
})
|
|
329
329
|
?.map((message) => {
|
|
330
330
|
const sender_type = messageToMinimaxRole(message);
|
|
331
|
+
if (typeof message.content !== "string") {
|
|
332
|
+
throw new Error("ChatMinimax does not support non-string message content.");
|
|
333
|
+
}
|
|
331
334
|
return {
|
|
332
335
|
sender_type,
|
|
333
336
|
text: message.content,
|
|
@@ -520,6 +523,9 @@ class ChatMinimax extends base_js_1.BaseChatModel {
|
|
|
520
523
|
return;
|
|
521
524
|
}
|
|
522
525
|
const lastSystemMessage = systemMessages[systemMessages.length - 1];
|
|
526
|
+
if (typeof lastSystemMessage.content !== "string") {
|
|
527
|
+
throw new Error("ChatMinimax does not support non-string message content.");
|
|
528
|
+
}
|
|
523
529
|
// setting the default botSetting.
|
|
524
530
|
this.botSetting = [
|
|
525
531
|
{
|
|
@@ -325,6 +325,9 @@ export class ChatMinimax extends BaseChatModel {
|
|
|
325
325
|
})
|
|
326
326
|
?.map((message) => {
|
|
327
327
|
const sender_type = messageToMinimaxRole(message);
|
|
328
|
+
if (typeof message.content !== "string") {
|
|
329
|
+
throw new Error("ChatMinimax does not support non-string message content.");
|
|
330
|
+
}
|
|
328
331
|
return {
|
|
329
332
|
sender_type,
|
|
330
333
|
text: message.content,
|
|
@@ -517,6 +520,9 @@ export class ChatMinimax extends BaseChatModel {
|
|
|
517
520
|
return;
|
|
518
521
|
}
|
|
519
522
|
const lastSystemMessage = systemMessages[systemMessages.length - 1];
|
|
523
|
+
if (typeof lastSystemMessage.content !== "string") {
|
|
524
|
+
throw new Error("ChatMinimax does not support non-string message content.");
|
|
525
|
+
}
|
|
520
526
|
// setting the default botSetting.
|
|
521
527
|
this.botSetting = [
|
|
522
528
|
{
|
|
@@ -54,14 +54,10 @@ function messageToOpenAIMessage(message) {
|
|
|
54
54
|
}
|
|
55
55
|
function openAIResponseToChatMessage(message) {
|
|
56
56
|
switch (message.role) {
|
|
57
|
-
case "user":
|
|
58
|
-
return new index_js_1.HumanMessage(message.content || "");
|
|
59
57
|
case "assistant":
|
|
60
58
|
return new index_js_1.AIMessage(message.content || "", {
|
|
61
59
|
function_call: message.function_call,
|
|
62
60
|
});
|
|
63
|
-
case "system":
|
|
64
|
-
return new index_js_1.SystemMessage(message.content || "");
|
|
65
61
|
default:
|
|
66
62
|
return new index_js_1.ChatMessage(message.content || "", message.role ?? "unknown");
|
|
67
63
|
}
|
|
@@ -577,7 +573,8 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
577
573
|
let count = textCount + tokensPerMessage + roleCount + nameCount;
|
|
578
574
|
// From: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts messageTokenEstimate
|
|
579
575
|
const openAIMessage = messageToOpenAIMessage(message);
|
|
580
|
-
if (openAIMessage.role === "function"
|
|
576
|
+
if (openAIMessage.role === "function" ||
|
|
577
|
+
openAIMessage.role === "tool") {
|
|
581
578
|
count -= 2;
|
|
582
579
|
}
|
|
583
580
|
if (openAIMessage.function_call) {
|