langchain 0.0.182-rc.0 → 0.0.182
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/openai/index.cjs +2 -1
- package/dist/agents/openai/index.js +2 -1
- package/dist/cache/base.d.ts +1 -1
- package/dist/chat_models/bedrock/web.cjs +1 -2
- package/dist/chat_models/bedrock/web.js +1 -2
- package/dist/chat_models/openai.cjs +63 -48
- package/dist/chat_models/openai.d.ts +7 -2
- package/dist/chat_models/openai.js +65 -50
- package/dist/document_loaders/web/apify_dataset.cjs +1 -0
- package/dist/document_loaders/web/apify_dataset.d.ts +13 -13
- package/dist/document_loaders/web/apify_dataset.js +1 -0
- package/dist/runnables/remote.cjs +12 -0
- package/dist/runnables/remote.js +13 -1
- package/dist/schema/index.cjs +92 -1
- package/dist/schema/index.d.ts +31 -2
- package/dist/schema/index.js +89 -0
- package/dist/stores/message/convex.d.ts +3 -3
- package/dist/tools/convert_to_openai.cjs +12 -1
- package/dist/tools/convert_to_openai.d.ts +1 -0
- package/dist/tools/convert_to_openai.js +10 -0
- package/package.json +4 -4
|
@@ -7,6 +7,7 @@ const prompt_js_1 = require("./prompt.cjs");
|
|
|
7
7
|
const chat_js_1 = require("../../prompts/chat.cjs");
|
|
8
8
|
const llm_chain_js_1 = require("../../chains/llm_chain.cjs");
|
|
9
9
|
const output_parser_js_1 = require("./output_parser.cjs");
|
|
10
|
+
const convert_to_openai_js_1 = require("../../tools/convert_to_openai.cjs");
|
|
10
11
|
/**
|
|
11
12
|
* Checks if the given action is a FunctionsAgentAction.
|
|
12
13
|
* @param action The action to check.
|
|
@@ -140,7 +141,7 @@ class OpenAIAgent extends agent_js_1.Agent {
|
|
|
140
141
|
const llm = this.llmChain.llm;
|
|
141
142
|
const valuesForPrompt = { ...newInputs };
|
|
142
143
|
const valuesForLLM = {
|
|
143
|
-
|
|
144
|
+
functions: this.tools.map(convert_to_openai_js_1.formatToOpenAIFunction),
|
|
144
145
|
};
|
|
145
146
|
for (const key of this.llmChain.llm.callKeys) {
|
|
146
147
|
if (key in inputs) {
|
|
@@ -4,6 +4,7 @@ import { PREFIX } from "./prompt.js";
|
|
|
4
4
|
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, } from "../../prompts/chat.js";
|
|
5
5
|
import { LLMChain } from "../../chains/llm_chain.js";
|
|
6
6
|
import { OpenAIFunctionsAgentOutputParser, } from "./output_parser.js";
|
|
7
|
+
import { formatToOpenAIFunction } from "../../tools/convert_to_openai.js";
|
|
7
8
|
/**
|
|
8
9
|
* Checks if the given action is a FunctionsAgentAction.
|
|
9
10
|
* @param action The action to check.
|
|
@@ -136,7 +137,7 @@ export class OpenAIAgent extends Agent {
|
|
|
136
137
|
const llm = this.llmChain.llm;
|
|
137
138
|
const valuesForPrompt = { ...newInputs };
|
|
138
139
|
const valuesForLLM = {
|
|
139
|
-
|
|
140
|
+
functions: this.tools.map(formatToOpenAIFunction),
|
|
140
141
|
};
|
|
141
142
|
for (const key of this.llmChain.llm.callKeys) {
|
|
142
143
|
if (key in inputs) {
|
package/dist/cache/base.d.ts
CHANGED
|
@@ -12,7 +12,7 @@ import { Generation, StoredGeneration } from "../schema/index.js";
|
|
|
12
12
|
export declare const getCacheKey: (...strings: string[]) => string;
|
|
13
13
|
export declare function deserializeStoredGeneration(storedGeneration: StoredGeneration): {
|
|
14
14
|
text: string;
|
|
15
|
-
message: import("../schema/index.js").ChatMessage | import("../schema/index.js").HumanMessage | import("../schema/index.js").AIMessage | import("../schema/index.js").SystemMessage | import("../schema/index.js").FunctionMessage;
|
|
15
|
+
message: import("../schema/index.js").ChatMessage | import("../schema/index.js").HumanMessage | import("../schema/index.js").AIMessage | import("../schema/index.js").SystemMessage | import("../schema/index.js").FunctionMessage | import("../schema/index.js").ToolMessage;
|
|
16
16
|
} | {
|
|
17
17
|
text: string;
|
|
18
18
|
message?: undefined;
|
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.ChatBedrock = exports.BedrockChat = exports.convertMessagesToPrompt = exports.convertMessagesToPromptAnthropic = void 0;
|
|
4
4
|
const signature_v4_1 = require("@smithy/signature-v4");
|
|
5
|
-
const credential_provider_node_1 = require("@aws-sdk/credential-provider-node");
|
|
6
5
|
const protocol_http_1 = require("@smithy/protocol-http");
|
|
7
6
|
const eventstream_codec_1 = require("@smithy/eventstream-codec");
|
|
8
7
|
const util_utf8_1 = require("@smithy/util-utf8");
|
|
@@ -168,7 +167,7 @@ class BedrockChat extends base_js_1.SimpleChatModel {
|
|
|
168
167
|
throw new Error("Please set the AWS_DEFAULT_REGION environment variable or pass it to the constructor as the region field.");
|
|
169
168
|
}
|
|
170
169
|
this.region = region;
|
|
171
|
-
const credentials = fields?.credentials
|
|
170
|
+
const credentials = fields?.credentials;
|
|
172
171
|
if (!credentials) {
|
|
173
172
|
throw new Error("Please set the AWS credentials in the 'credentials' field.");
|
|
174
173
|
}
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import { SignatureV4 } from "@smithy/signature-v4";
|
|
2
|
-
import { defaultProvider } from "@aws-sdk/credential-provider-node";
|
|
3
2
|
import { HttpRequest } from "@smithy/protocol-http";
|
|
4
3
|
import { EventStreamCodec } from "@smithy/eventstream-codec";
|
|
5
4
|
import { fromUtf8, toUtf8 } from "@smithy/util-utf8";
|
|
@@ -163,7 +162,7 @@ export class BedrockChat extends SimpleChatModel {
|
|
|
163
162
|
throw new Error("Please set the AWS_DEFAULT_REGION environment variable or pass it to the constructor as the region field.");
|
|
164
163
|
}
|
|
165
164
|
this.region = region;
|
|
166
|
-
const credentials = fields?.credentials
|
|
165
|
+
const credentials = fields?.credentials;
|
|
167
166
|
if (!credentials) {
|
|
168
167
|
throw new Error("Please set the AWS credentials in the 'credentials' field.");
|
|
169
168
|
}
|
|
@@ -14,7 +14,8 @@ function extractGenericMessageCustomRole(message) {
|
|
|
14
14
|
if (message.role !== "system" &&
|
|
15
15
|
message.role !== "assistant" &&
|
|
16
16
|
message.role !== "user" &&
|
|
17
|
-
message.role !== "function"
|
|
17
|
+
message.role !== "function" &&
|
|
18
|
+
message.role !== "tool") {
|
|
18
19
|
console.warn(`Unknown message role: ${message.role}`);
|
|
19
20
|
}
|
|
20
21
|
return message.role;
|
|
@@ -30,6 +31,8 @@ function messageToOpenAIRole(message) {
|
|
|
30
31
|
return "user";
|
|
31
32
|
case "function":
|
|
32
33
|
return "function";
|
|
34
|
+
case "tool":
|
|
35
|
+
return "tool";
|
|
33
36
|
case "generic": {
|
|
34
37
|
if (!index_js_1.ChatMessage.isInstance(message))
|
|
35
38
|
throw new Error("Invalid generic chat message");
|
|
@@ -39,24 +42,12 @@ function messageToOpenAIRole(message) {
|
|
|
39
42
|
throw new Error(`Unknown message type: ${type}`);
|
|
40
43
|
}
|
|
41
44
|
}
|
|
42
|
-
function messageToOpenAIMessage(message) {
|
|
43
|
-
const msg = {
|
|
44
|
-
content: message.content || null,
|
|
45
|
-
name: message.name,
|
|
46
|
-
role: messageToOpenAIRole(message),
|
|
47
|
-
function_call: message.additional_kwargs.function_call,
|
|
48
|
-
};
|
|
49
|
-
if (msg.function_call?.arguments) {
|
|
50
|
-
// Remove spaces, new line characters etc.
|
|
51
|
-
msg.function_call.arguments = JSON.stringify(JSON.parse(msg.function_call.arguments));
|
|
52
|
-
}
|
|
53
|
-
return msg;
|
|
54
|
-
}
|
|
55
45
|
function openAIResponseToChatMessage(message) {
|
|
56
46
|
switch (message.role) {
|
|
57
47
|
case "assistant":
|
|
58
48
|
return new index_js_1.AIMessage(message.content || "", {
|
|
59
49
|
function_call: message.function_call,
|
|
50
|
+
tool_calls: message.tool_calls,
|
|
60
51
|
});
|
|
61
52
|
default:
|
|
62
53
|
return new index_js_1.ChatMessage(message.content || "", message.role ?? "unknown");
|
|
@@ -73,6 +64,11 @@ delta, defaultRole) {
|
|
|
73
64
|
function_call: delta.function_call,
|
|
74
65
|
};
|
|
75
66
|
}
|
|
67
|
+
else if (delta.tool_calls) {
|
|
68
|
+
additional_kwargs = {
|
|
69
|
+
tool_calls: delta.tool_calls,
|
|
70
|
+
};
|
|
71
|
+
}
|
|
76
72
|
else {
|
|
77
73
|
additional_kwargs = {};
|
|
78
74
|
}
|
|
@@ -92,10 +88,28 @@ delta, defaultRole) {
|
|
|
92
88
|
name: delta.name,
|
|
93
89
|
});
|
|
94
90
|
}
|
|
91
|
+
else if (role === "tool") {
|
|
92
|
+
return new index_js_1.ToolMessageChunk({
|
|
93
|
+
content,
|
|
94
|
+
additional_kwargs,
|
|
95
|
+
tool_call_id: delta.tool_call_id,
|
|
96
|
+
});
|
|
97
|
+
}
|
|
95
98
|
else {
|
|
96
99
|
return new index_js_1.ChatMessageChunk({ content, role });
|
|
97
100
|
}
|
|
98
101
|
}
|
|
102
|
+
function convertMessagesToOpenAIParams(messages) {
|
|
103
|
+
// TODO: Function messages do not support array content, fix cast
|
|
104
|
+
return messages.map((message) => ({
|
|
105
|
+
role: messageToOpenAIRole(message),
|
|
106
|
+
content: message.content,
|
|
107
|
+
name: message.name,
|
|
108
|
+
function_call: message.additional_kwargs.function_call,
|
|
109
|
+
tool_calls: message.additional_kwargs.tool_calls,
|
|
110
|
+
tool_call_id: message.tool_call_id,
|
|
111
|
+
}));
|
|
112
|
+
}
|
|
99
113
|
/**
|
|
100
114
|
* Wrapper around OpenAI large language models that use the Chat endpoint.
|
|
101
115
|
*
|
|
@@ -126,7 +140,10 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
126
140
|
"function_call",
|
|
127
141
|
"functions",
|
|
128
142
|
"tools",
|
|
143
|
+
"tool_choice",
|
|
129
144
|
"promptIndex",
|
|
145
|
+
"response_format",
|
|
146
|
+
"seed",
|
|
130
147
|
];
|
|
131
148
|
}
|
|
132
149
|
get lc_secrets() {
|
|
@@ -353,7 +370,11 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
353
370
|
* Get the parameters used to invoke the model
|
|
354
371
|
*/
|
|
355
372
|
invocationParams(options) {
|
|
356
|
-
|
|
373
|
+
function isStructuredToolArray(tools) {
|
|
374
|
+
return (tools !== undefined &&
|
|
375
|
+
tools.every((tool) => Array.isArray(tool.lc_namespace)));
|
|
376
|
+
}
|
|
377
|
+
const params = {
|
|
357
378
|
model: this.modelName,
|
|
358
379
|
temperature: this.temperature,
|
|
359
380
|
top_p: this.topP,
|
|
@@ -365,13 +386,17 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
365
386
|
stop: options?.stop ?? this.stop,
|
|
366
387
|
user: this.user,
|
|
367
388
|
stream: this.streaming,
|
|
368
|
-
functions: options?.functions
|
|
369
|
-
(options?.tools
|
|
370
|
-
? options?.tools.map(convert_to_openai_js_1.formatToOpenAIFunction)
|
|
371
|
-
: undefined),
|
|
389
|
+
functions: options?.functions,
|
|
372
390
|
function_call: options?.function_call,
|
|
391
|
+
tools: isStructuredToolArray(options?.tools)
|
|
392
|
+
? options?.tools.map(convert_to_openai_js_1.formatToOpenAITool)
|
|
393
|
+
: options?.tools,
|
|
394
|
+
tool_choice: options?.tool_choice,
|
|
395
|
+
response_format: options?.response_format,
|
|
396
|
+
seed: options?.seed,
|
|
373
397
|
...this.modelKwargs,
|
|
374
398
|
};
|
|
399
|
+
return params;
|
|
375
400
|
}
|
|
376
401
|
/** @ignore */
|
|
377
402
|
_identifyingParams() {
|
|
@@ -382,13 +407,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
382
407
|
};
|
|
383
408
|
}
|
|
384
409
|
async *_streamResponseChunks(messages, options, runManager) {
|
|
385
|
-
const messagesMapped = messages
|
|
386
|
-
role: messageToOpenAIRole(message),
|
|
387
|
-
content: message.content,
|
|
388
|
-
name: message.name,
|
|
389
|
-
function_call: message.additional_kwargs
|
|
390
|
-
.function_call,
|
|
391
|
-
}));
|
|
410
|
+
const messagesMapped = convertMessagesToOpenAIParams(messages);
|
|
392
411
|
const params = {
|
|
393
412
|
...this.invocationParams(options),
|
|
394
413
|
messages: messagesMapped,
|
|
@@ -408,6 +427,10 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
408
427
|
prompt: options.promptIndex ?? 0,
|
|
409
428
|
completion: choice.index ?? 0,
|
|
410
429
|
};
|
|
430
|
+
if (typeof chunk.content !== "string") {
|
|
431
|
+
console.log("[WARNING]: Received non-string content from OpenAI. This is currently not supported.");
|
|
432
|
+
continue;
|
|
433
|
+
}
|
|
411
434
|
const generationChunk = new index_js_1.ChatGenerationChunk({
|
|
412
435
|
message: chunk,
|
|
413
436
|
text: chunk.content,
|
|
@@ -432,13 +455,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
432
455
|
async _generate(messages, options, runManager) {
|
|
433
456
|
const tokenUsage = {};
|
|
434
457
|
const params = this.invocationParams(options);
|
|
435
|
-
const messagesMapped = messages
|
|
436
|
-
role: messageToOpenAIRole(message),
|
|
437
|
-
content: message.content,
|
|
438
|
-
name: message.name,
|
|
439
|
-
function_call: message.additional_kwargs
|
|
440
|
-
.function_call,
|
|
441
|
-
}));
|
|
458
|
+
const messagesMapped = convertMessagesToOpenAIParams(messages);
|
|
442
459
|
if (params.stream) {
|
|
443
460
|
const stream = this._streamResponseChunks(messages, options, runManager);
|
|
444
461
|
const finalChunks = {};
|
|
@@ -457,7 +474,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
457
474
|
const { functions, function_call } = this.invocationParams(options);
|
|
458
475
|
// OpenAI does not support token usage report under stream mode,
|
|
459
476
|
// fallback to estimation.
|
|
460
|
-
const promptTokenUsage = await this.
|
|
477
|
+
const promptTokenUsage = await this.getEstimatedTokenCountFromPrompt(messages, functions, function_call);
|
|
461
478
|
const completionTokenUsage = await this.getNumTokensFromGenerations(generations);
|
|
462
479
|
tokenUsage.promptTokens = promptTokenUsage;
|
|
463
480
|
tokenUsage.completionTokens = completionTokenUsage;
|
|
@@ -506,11 +523,9 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
506
523
|
* Estimate the number of tokens a prompt will use.
|
|
507
524
|
* Modified from: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts
|
|
508
525
|
*/
|
|
509
|
-
async
|
|
526
|
+
async getEstimatedTokenCountFromPrompt(messages, functions, function_call) {
|
|
510
527
|
// It appears that if functions are present, the first system message is padded with a trailing newline. This
|
|
511
528
|
// was inferred by trying lots of combinations of messages and functions and seeing what the token counts were.
|
|
512
|
-
// let paddedSystem = false;
|
|
513
|
-
const openaiMessages = messages.map((m) => messageToOpenAIMessage(m));
|
|
514
529
|
let tokens = (await this.getNumTokensFromMessages(messages)).totalCount;
|
|
515
530
|
// If there are functions, add the function definitions as they count towards token usage
|
|
516
531
|
if (functions && function_call !== "auto") {
|
|
@@ -521,7 +536,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
521
536
|
// If there's a system message _and_ functions are present, subtract four tokens. I assume this is because
|
|
522
537
|
// functions typically add a system message, but reuse the first one if it's already there. This offsets
|
|
523
538
|
// the extra 9 tokens added by the function definitions.
|
|
524
|
-
if (functions &&
|
|
539
|
+
if (functions && messages.find((m) => m._getType() === "system")) {
|
|
525
540
|
tokens -= 4;
|
|
526
541
|
}
|
|
527
542
|
// If function_call is 'none', add one token.
|
|
@@ -540,8 +555,7 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
540
555
|
*/
|
|
541
556
|
async getNumTokensFromGenerations(generations) {
|
|
542
557
|
const generationUsages = await Promise.all(generations.map(async (generation) => {
|
|
543
|
-
|
|
544
|
-
if (openAIMessage.function_call) {
|
|
558
|
+
if (generation.message.additional_kwargs?.function_call) {
|
|
545
559
|
return (await this.getNumTokensFromMessages([generation.message]))
|
|
546
560
|
.countPerMessage[0];
|
|
547
561
|
}
|
|
@@ -572,19 +586,20 @@ class ChatOpenAI extends base_js_1.BaseChatModel {
|
|
|
572
586
|
: 0;
|
|
573
587
|
let count = textCount + tokensPerMessage + roleCount + nameCount;
|
|
574
588
|
// From: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts messageTokenEstimate
|
|
575
|
-
const openAIMessage =
|
|
576
|
-
if (openAIMessage.
|
|
577
|
-
openAIMessage.role === "tool") {
|
|
589
|
+
const openAIMessage = message;
|
|
590
|
+
if (openAIMessage._getType() === "function") {
|
|
578
591
|
count -= 2;
|
|
579
592
|
}
|
|
580
|
-
if (openAIMessage.function_call) {
|
|
593
|
+
if (openAIMessage.additional_kwargs?.function_call) {
|
|
581
594
|
count += 3;
|
|
582
595
|
}
|
|
583
|
-
if (openAIMessage.function_call?.name) {
|
|
584
|
-
count += await this.getNumTokens(openAIMessage.function_call?.name);
|
|
596
|
+
if (openAIMessage?.additional_kwargs.function_call?.name) {
|
|
597
|
+
count += await this.getNumTokens(openAIMessage.additional_kwargs.function_call?.name);
|
|
585
598
|
}
|
|
586
|
-
if (openAIMessage.function_call?.arguments) {
|
|
587
|
-
count += await this.getNumTokens(
|
|
599
|
+
if (openAIMessage.additional_kwargs.function_call?.arguments) {
|
|
600
|
+
count += await this.getNumTokens(
|
|
601
|
+
// Remove newlines and spaces
|
|
602
|
+
JSON.stringify(JSON.parse(openAIMessage.additional_kwargs.function_call?.arguments)));
|
|
588
603
|
}
|
|
589
604
|
totalCount += count;
|
|
590
605
|
return count;
|
|
@@ -15,8 +15,13 @@ interface OpenAILLMOutput {
|
|
|
15
15
|
tokenUsage: TokenUsage;
|
|
16
16
|
}
|
|
17
17
|
export interface ChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCallOptions {
|
|
18
|
-
tools?: StructuredTool[];
|
|
18
|
+
tools?: StructuredTool[] | OpenAIClient.ChatCompletionTool[];
|
|
19
|
+
tool_choice?: OpenAIClient.ChatCompletionToolChoiceOption;
|
|
19
20
|
promptIndex?: number;
|
|
21
|
+
response_format?: {
|
|
22
|
+
type: "json_object";
|
|
23
|
+
};
|
|
24
|
+
seed?: number;
|
|
20
25
|
}
|
|
21
26
|
/**
|
|
22
27
|
* Wrapper around OpenAI large language models that use the Chat endpoint.
|
|
@@ -94,7 +99,7 @@ export declare class ChatOpenAI<CallOptions extends ChatOpenAICallOptions = Chat
|
|
|
94
99
|
* Estimate the number of tokens a prompt will use.
|
|
95
100
|
* Modified from: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts
|
|
96
101
|
*/
|
|
97
|
-
private
|
|
102
|
+
private getEstimatedTokenCountFromPrompt;
|
|
98
103
|
/**
|
|
99
104
|
* Estimate the number of tokens an array of generations have used.
|
|
100
105
|
*/
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { OpenAI as OpenAIClient } from "openai";
|
|
2
|
-
import { AIMessage, AIMessageChunk, ChatGenerationChunk, ChatMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, } from "../schema/index.js";
|
|
3
|
-
import {
|
|
2
|
+
import { AIMessage, AIMessageChunk, ChatGenerationChunk, ChatMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, SystemMessageChunk, ToolMessageChunk, } from "../schema/index.js";
|
|
3
|
+
import { formatToOpenAITool } from "../tools/convert_to_openai.js";
|
|
4
4
|
import { getEndpoint } from "../util/azure.js";
|
|
5
5
|
import { getEnvironmentVariable } from "../util/env.js";
|
|
6
6
|
import { promptLayerTrackRequest } from "../util/prompt-layer.js";
|
|
@@ -11,7 +11,8 @@ function extractGenericMessageCustomRole(message) {
|
|
|
11
11
|
if (message.role !== "system" &&
|
|
12
12
|
message.role !== "assistant" &&
|
|
13
13
|
message.role !== "user" &&
|
|
14
|
-
message.role !== "function"
|
|
14
|
+
message.role !== "function" &&
|
|
15
|
+
message.role !== "tool") {
|
|
15
16
|
console.warn(`Unknown message role: ${message.role}`);
|
|
16
17
|
}
|
|
17
18
|
return message.role;
|
|
@@ -27,6 +28,8 @@ function messageToOpenAIRole(message) {
|
|
|
27
28
|
return "user";
|
|
28
29
|
case "function":
|
|
29
30
|
return "function";
|
|
31
|
+
case "tool":
|
|
32
|
+
return "tool";
|
|
30
33
|
case "generic": {
|
|
31
34
|
if (!ChatMessage.isInstance(message))
|
|
32
35
|
throw new Error("Invalid generic chat message");
|
|
@@ -36,24 +39,12 @@ function messageToOpenAIRole(message) {
|
|
|
36
39
|
throw new Error(`Unknown message type: ${type}`);
|
|
37
40
|
}
|
|
38
41
|
}
|
|
39
|
-
function messageToOpenAIMessage(message) {
|
|
40
|
-
const msg = {
|
|
41
|
-
content: message.content || null,
|
|
42
|
-
name: message.name,
|
|
43
|
-
role: messageToOpenAIRole(message),
|
|
44
|
-
function_call: message.additional_kwargs.function_call,
|
|
45
|
-
};
|
|
46
|
-
if (msg.function_call?.arguments) {
|
|
47
|
-
// Remove spaces, new line characters etc.
|
|
48
|
-
msg.function_call.arguments = JSON.stringify(JSON.parse(msg.function_call.arguments));
|
|
49
|
-
}
|
|
50
|
-
return msg;
|
|
51
|
-
}
|
|
52
42
|
function openAIResponseToChatMessage(message) {
|
|
53
43
|
switch (message.role) {
|
|
54
44
|
case "assistant":
|
|
55
45
|
return new AIMessage(message.content || "", {
|
|
56
46
|
function_call: message.function_call,
|
|
47
|
+
tool_calls: message.tool_calls,
|
|
57
48
|
});
|
|
58
49
|
default:
|
|
59
50
|
return new ChatMessage(message.content || "", message.role ?? "unknown");
|
|
@@ -70,6 +61,11 @@ delta, defaultRole) {
|
|
|
70
61
|
function_call: delta.function_call,
|
|
71
62
|
};
|
|
72
63
|
}
|
|
64
|
+
else if (delta.tool_calls) {
|
|
65
|
+
additional_kwargs = {
|
|
66
|
+
tool_calls: delta.tool_calls,
|
|
67
|
+
};
|
|
68
|
+
}
|
|
73
69
|
else {
|
|
74
70
|
additional_kwargs = {};
|
|
75
71
|
}
|
|
@@ -89,10 +85,28 @@ delta, defaultRole) {
|
|
|
89
85
|
name: delta.name,
|
|
90
86
|
});
|
|
91
87
|
}
|
|
88
|
+
else if (role === "tool") {
|
|
89
|
+
return new ToolMessageChunk({
|
|
90
|
+
content,
|
|
91
|
+
additional_kwargs,
|
|
92
|
+
tool_call_id: delta.tool_call_id,
|
|
93
|
+
});
|
|
94
|
+
}
|
|
92
95
|
else {
|
|
93
96
|
return new ChatMessageChunk({ content, role });
|
|
94
97
|
}
|
|
95
98
|
}
|
|
99
|
+
function convertMessagesToOpenAIParams(messages) {
|
|
100
|
+
// TODO: Function messages do not support array content, fix cast
|
|
101
|
+
return messages.map((message) => ({
|
|
102
|
+
role: messageToOpenAIRole(message),
|
|
103
|
+
content: message.content,
|
|
104
|
+
name: message.name,
|
|
105
|
+
function_call: message.additional_kwargs.function_call,
|
|
106
|
+
tool_calls: message.additional_kwargs.tool_calls,
|
|
107
|
+
tool_call_id: message.tool_call_id,
|
|
108
|
+
}));
|
|
109
|
+
}
|
|
96
110
|
/**
|
|
97
111
|
* Wrapper around OpenAI large language models that use the Chat endpoint.
|
|
98
112
|
*
|
|
@@ -123,7 +137,10 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
123
137
|
"function_call",
|
|
124
138
|
"functions",
|
|
125
139
|
"tools",
|
|
140
|
+
"tool_choice",
|
|
126
141
|
"promptIndex",
|
|
142
|
+
"response_format",
|
|
143
|
+
"seed",
|
|
127
144
|
];
|
|
128
145
|
}
|
|
129
146
|
get lc_secrets() {
|
|
@@ -350,7 +367,11 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
350
367
|
* Get the parameters used to invoke the model
|
|
351
368
|
*/
|
|
352
369
|
invocationParams(options) {
|
|
353
|
-
|
|
370
|
+
function isStructuredToolArray(tools) {
|
|
371
|
+
return (tools !== undefined &&
|
|
372
|
+
tools.every((tool) => Array.isArray(tool.lc_namespace)));
|
|
373
|
+
}
|
|
374
|
+
const params = {
|
|
354
375
|
model: this.modelName,
|
|
355
376
|
temperature: this.temperature,
|
|
356
377
|
top_p: this.topP,
|
|
@@ -362,13 +383,17 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
362
383
|
stop: options?.stop ?? this.stop,
|
|
363
384
|
user: this.user,
|
|
364
385
|
stream: this.streaming,
|
|
365
|
-
functions: options?.functions
|
|
366
|
-
(options?.tools
|
|
367
|
-
? options?.tools.map(formatToOpenAIFunction)
|
|
368
|
-
: undefined),
|
|
386
|
+
functions: options?.functions,
|
|
369
387
|
function_call: options?.function_call,
|
|
388
|
+
tools: isStructuredToolArray(options?.tools)
|
|
389
|
+
? options?.tools.map(formatToOpenAITool)
|
|
390
|
+
: options?.tools,
|
|
391
|
+
tool_choice: options?.tool_choice,
|
|
392
|
+
response_format: options?.response_format,
|
|
393
|
+
seed: options?.seed,
|
|
370
394
|
...this.modelKwargs,
|
|
371
395
|
};
|
|
396
|
+
return params;
|
|
372
397
|
}
|
|
373
398
|
/** @ignore */
|
|
374
399
|
_identifyingParams() {
|
|
@@ -379,13 +404,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
379
404
|
};
|
|
380
405
|
}
|
|
381
406
|
async *_streamResponseChunks(messages, options, runManager) {
|
|
382
|
-
const messagesMapped = messages
|
|
383
|
-
role: messageToOpenAIRole(message),
|
|
384
|
-
content: message.content,
|
|
385
|
-
name: message.name,
|
|
386
|
-
function_call: message.additional_kwargs
|
|
387
|
-
.function_call,
|
|
388
|
-
}));
|
|
407
|
+
const messagesMapped = convertMessagesToOpenAIParams(messages);
|
|
389
408
|
const params = {
|
|
390
409
|
...this.invocationParams(options),
|
|
391
410
|
messages: messagesMapped,
|
|
@@ -405,6 +424,10 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
405
424
|
prompt: options.promptIndex ?? 0,
|
|
406
425
|
completion: choice.index ?? 0,
|
|
407
426
|
};
|
|
427
|
+
if (typeof chunk.content !== "string") {
|
|
428
|
+
console.log("[WARNING]: Received non-string content from OpenAI. This is currently not supported.");
|
|
429
|
+
continue;
|
|
430
|
+
}
|
|
408
431
|
const generationChunk = new ChatGenerationChunk({
|
|
409
432
|
message: chunk,
|
|
410
433
|
text: chunk.content,
|
|
@@ -429,13 +452,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
429
452
|
async _generate(messages, options, runManager) {
|
|
430
453
|
const tokenUsage = {};
|
|
431
454
|
const params = this.invocationParams(options);
|
|
432
|
-
const messagesMapped = messages
|
|
433
|
-
role: messageToOpenAIRole(message),
|
|
434
|
-
content: message.content,
|
|
435
|
-
name: message.name,
|
|
436
|
-
function_call: message.additional_kwargs
|
|
437
|
-
.function_call,
|
|
438
|
-
}));
|
|
455
|
+
const messagesMapped = convertMessagesToOpenAIParams(messages);
|
|
439
456
|
if (params.stream) {
|
|
440
457
|
const stream = this._streamResponseChunks(messages, options, runManager);
|
|
441
458
|
const finalChunks = {};
|
|
@@ -454,7 +471,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
454
471
|
const { functions, function_call } = this.invocationParams(options);
|
|
455
472
|
// OpenAI does not support token usage report under stream mode,
|
|
456
473
|
// fallback to estimation.
|
|
457
|
-
const promptTokenUsage = await this.
|
|
474
|
+
const promptTokenUsage = await this.getEstimatedTokenCountFromPrompt(messages, functions, function_call);
|
|
458
475
|
const completionTokenUsage = await this.getNumTokensFromGenerations(generations);
|
|
459
476
|
tokenUsage.promptTokens = promptTokenUsage;
|
|
460
477
|
tokenUsage.completionTokens = completionTokenUsage;
|
|
@@ -503,11 +520,9 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
503
520
|
* Estimate the number of tokens a prompt will use.
|
|
504
521
|
* Modified from: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts
|
|
505
522
|
*/
|
|
506
|
-
async
|
|
523
|
+
async getEstimatedTokenCountFromPrompt(messages, functions, function_call) {
|
|
507
524
|
// It appears that if functions are present, the first system message is padded with a trailing newline. This
|
|
508
525
|
// was inferred by trying lots of combinations of messages and functions and seeing what the token counts were.
|
|
509
|
-
// let paddedSystem = false;
|
|
510
|
-
const openaiMessages = messages.map((m) => messageToOpenAIMessage(m));
|
|
511
526
|
let tokens = (await this.getNumTokensFromMessages(messages)).totalCount;
|
|
512
527
|
// If there are functions, add the function definitions as they count towards token usage
|
|
513
528
|
if (functions && function_call !== "auto") {
|
|
@@ -518,7 +533,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
518
533
|
// If there's a system message _and_ functions are present, subtract four tokens. I assume this is because
|
|
519
534
|
// functions typically add a system message, but reuse the first one if it's already there. This offsets
|
|
520
535
|
// the extra 9 tokens added by the function definitions.
|
|
521
|
-
if (functions &&
|
|
536
|
+
if (functions && messages.find((m) => m._getType() === "system")) {
|
|
522
537
|
tokens -= 4;
|
|
523
538
|
}
|
|
524
539
|
// If function_call is 'none', add one token.
|
|
@@ -537,8 +552,7 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
537
552
|
*/
|
|
538
553
|
async getNumTokensFromGenerations(generations) {
|
|
539
554
|
const generationUsages = await Promise.all(generations.map(async (generation) => {
|
|
540
|
-
|
|
541
|
-
if (openAIMessage.function_call) {
|
|
555
|
+
if (generation.message.additional_kwargs?.function_call) {
|
|
542
556
|
return (await this.getNumTokensFromMessages([generation.message]))
|
|
543
557
|
.countPerMessage[0];
|
|
544
558
|
}
|
|
@@ -569,19 +583,20 @@ export class ChatOpenAI extends BaseChatModel {
|
|
|
569
583
|
: 0;
|
|
570
584
|
let count = textCount + tokensPerMessage + roleCount + nameCount;
|
|
571
585
|
// From: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts messageTokenEstimate
|
|
572
|
-
const openAIMessage =
|
|
573
|
-
if (openAIMessage.
|
|
574
|
-
openAIMessage.role === "tool") {
|
|
586
|
+
const openAIMessage = message;
|
|
587
|
+
if (openAIMessage._getType() === "function") {
|
|
575
588
|
count -= 2;
|
|
576
589
|
}
|
|
577
|
-
if (openAIMessage.function_call) {
|
|
590
|
+
if (openAIMessage.additional_kwargs?.function_call) {
|
|
578
591
|
count += 3;
|
|
579
592
|
}
|
|
580
|
-
if (openAIMessage.function_call?.name) {
|
|
581
|
-
count += await this.getNumTokens(openAIMessage.function_call?.name);
|
|
593
|
+
if (openAIMessage?.additional_kwargs.function_call?.name) {
|
|
594
|
+
count += await this.getNumTokens(openAIMessage.additional_kwargs.function_call?.name);
|
|
582
595
|
}
|
|
583
|
-
if (openAIMessage.function_call?.arguments) {
|
|
584
|
-
count += await this.getNumTokens(
|
|
596
|
+
if (openAIMessage.additional_kwargs.function_call?.arguments) {
|
|
597
|
+
count += await this.getNumTokens(
|
|
598
|
+
// Remove newlines and spaces
|
|
599
|
+
JSON.stringify(JSON.parse(openAIMessage.additional_kwargs.function_call?.arguments)));
|
|
585
600
|
}
|
|
586
601
|
totalCount += count;
|
|
587
602
|
return count;
|
|
@@ -1,22 +1,22 @@
|
|
|
1
|
-
import { ApifyClient, ApifyClientOptions,
|
|
2
|
-
import { Document } from "../../document.js";
|
|
1
|
+
import { ActorCallOptions, ApifyClient, ApifyClientOptions, TaskCallOptions } from "apify-client";
|
|
3
2
|
import { BaseDocumentLoader, DocumentLoader } from "../base.js";
|
|
3
|
+
import { Document } from "../../document.js";
|
|
4
4
|
/**
|
|
5
5
|
* A type that represents a function that takes a single object (an Apify
|
|
6
6
|
* dataset item) and converts it to an instance of the Document class.
|
|
7
7
|
*/
|
|
8
|
-
export type ApifyDatasetMappingFunction = (item: Record<string | number, unknown>) => Document
|
|
8
|
+
export type ApifyDatasetMappingFunction<Metadata extends Record<string, any>> = (item: Record<string | number, unknown>) => Document<Metadata>;
|
|
9
9
|
/**
|
|
10
10
|
* A class that extends the BaseDocumentLoader and implements the
|
|
11
11
|
* DocumentLoader interface. It represents a document loader that loads
|
|
12
12
|
* documents from an Apify dataset.
|
|
13
13
|
*/
|
|
14
|
-
export declare class ApifyDatasetLoader extends BaseDocumentLoader implements DocumentLoader {
|
|
14
|
+
export declare class ApifyDatasetLoader<Metadata extends Record<string, any>> extends BaseDocumentLoader implements DocumentLoader {
|
|
15
15
|
protected apifyClient: ApifyClient;
|
|
16
16
|
protected datasetId: string;
|
|
17
|
-
protected datasetMappingFunction: (item: Record<string | number, unknown>) => Document
|
|
17
|
+
protected datasetMappingFunction: (item: Record<string | number, unknown>) => Document<Metadata>;
|
|
18
18
|
constructor(datasetId: string, config: {
|
|
19
|
-
datasetMappingFunction: ApifyDatasetMappingFunction
|
|
19
|
+
datasetMappingFunction: ApifyDatasetMappingFunction<Metadata>;
|
|
20
20
|
clientOptions?: ApifyClientOptions;
|
|
21
21
|
});
|
|
22
22
|
private static _getApifyApiToken;
|
|
@@ -26,7 +26,7 @@ export declare class ApifyDatasetLoader extends BaseDocumentLoader implements Do
|
|
|
26
26
|
* instances.
|
|
27
27
|
* @returns An array of Document instances.
|
|
28
28
|
*/
|
|
29
|
-
load(): Promise<Document[]>;
|
|
29
|
+
load(): Promise<Document<Metadata>[]>;
|
|
30
30
|
/**
|
|
31
31
|
* Create an ApifyDatasetLoader by calling an Actor on the Apify platform and waiting for its results to be ready.
|
|
32
32
|
* @param actorId The ID or name of the Actor on the Apify platform.
|
|
@@ -35,11 +35,11 @@ export declare class ApifyDatasetLoader extends BaseDocumentLoader implements Do
|
|
|
35
35
|
* @param options.datasetMappingFunction A function that takes a single object (an Apify dataset item) and converts it to an instance of the Document class.
|
|
36
36
|
* @returns An instance of `ApifyDatasetLoader` with the results from the Actor run.
|
|
37
37
|
*/
|
|
38
|
-
static fromActorCall(actorId: string, input: Record<string | number, unknown>, config: {
|
|
38
|
+
static fromActorCall<Metadata extends Record<string, any>>(actorId: string, input: Record<string | number, unknown>, config: {
|
|
39
39
|
callOptions?: ActorCallOptions;
|
|
40
40
|
clientOptions?: ApifyClientOptions;
|
|
41
|
-
datasetMappingFunction: ApifyDatasetMappingFunction
|
|
42
|
-
}): Promise<ApifyDatasetLoader
|
|
41
|
+
datasetMappingFunction: ApifyDatasetMappingFunction<Metadata>;
|
|
42
|
+
}): Promise<ApifyDatasetLoader<Metadata>>;
|
|
43
43
|
/**
|
|
44
44
|
* Create an ApifyDatasetLoader by calling a saved Actor task on the Apify platform and waiting for its results to be ready.
|
|
45
45
|
* @param taskId The ID or name of the task on the Apify platform.
|
|
@@ -48,9 +48,9 @@ export declare class ApifyDatasetLoader extends BaseDocumentLoader implements Do
|
|
|
48
48
|
* @param options.datasetMappingFunction A function that takes a single object (an Apify dataset item) and converts it to an instance of the Document class.
|
|
49
49
|
* @returns An instance of `ApifyDatasetLoader` with the results from the task's run.
|
|
50
50
|
*/
|
|
51
|
-
static fromActorTaskCall(taskId: string, input: Record<string | number, unknown>, config: {
|
|
51
|
+
static fromActorTaskCall<Metadata extends Record<string, any>>(taskId: string, input: Record<string | number, unknown>, config: {
|
|
52
52
|
callOptions?: TaskCallOptions;
|
|
53
53
|
clientOptions?: ApifyClientOptions;
|
|
54
|
-
datasetMappingFunction: ApifyDatasetMappingFunction
|
|
55
|
-
}): Promise<ApifyDatasetLoader
|
|
54
|
+
datasetMappingFunction: ApifyDatasetMappingFunction<Metadata>;
|
|
55
|
+
}): Promise<ApifyDatasetLoader<Metadata>>;
|
|
56
56
|
}
|
|
@@ -52,6 +52,12 @@ function revive(obj) {
|
|
|
52
52
|
name: obj.name,
|
|
53
53
|
});
|
|
54
54
|
}
|
|
55
|
+
if (obj.type === "tool") {
|
|
56
|
+
return new index_js_2.ToolMessage({
|
|
57
|
+
content: obj.content,
|
|
58
|
+
tool_call_id: obj.tool_call_id,
|
|
59
|
+
});
|
|
60
|
+
}
|
|
55
61
|
if (obj.type === "ai") {
|
|
56
62
|
return new index_js_2.AIMessage({
|
|
57
63
|
content: obj.content,
|
|
@@ -81,6 +87,12 @@ function revive(obj) {
|
|
|
81
87
|
name: obj.name,
|
|
82
88
|
});
|
|
83
89
|
}
|
|
90
|
+
if (obj.type === "tool") {
|
|
91
|
+
return new index_js_2.ToolMessageChunk({
|
|
92
|
+
content: obj.content,
|
|
93
|
+
tool_call_id: obj.tool_call_id,
|
|
94
|
+
});
|
|
95
|
+
}
|
|
84
96
|
if (obj.type === "ai") {
|
|
85
97
|
return new index_js_2.AIMessageChunk({
|
|
86
98
|
content: obj.content,
|
package/dist/runnables/remote.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { Runnable } from "../schema/runnable/index.js";
|
|
2
2
|
import { getBytes, getLines, getMessages } from "../util/event-source-parse.js";
|
|
3
3
|
import { Document } from "../document.js";
|
|
4
|
-
import { AIMessage, AIMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, } from "../schema/index.js";
|
|
4
|
+
import { AIMessage, AIMessageChunk, ChatMessage, ChatMessageChunk, FunctionMessage, FunctionMessageChunk, HumanMessage, HumanMessageChunk, SystemMessage, SystemMessageChunk, ToolMessage, ToolMessageChunk, } from "../schema/index.js";
|
|
5
5
|
import { StringPromptValue } from "../prompts/base.js";
|
|
6
6
|
import { ChatPromptValue } from "../prompts/chat.js";
|
|
7
7
|
import { IterableReadableStream } from "../util/stream.js";
|
|
@@ -49,6 +49,12 @@ function revive(obj) {
|
|
|
49
49
|
name: obj.name,
|
|
50
50
|
});
|
|
51
51
|
}
|
|
52
|
+
if (obj.type === "tool") {
|
|
53
|
+
return new ToolMessage({
|
|
54
|
+
content: obj.content,
|
|
55
|
+
tool_call_id: obj.tool_call_id,
|
|
56
|
+
});
|
|
57
|
+
}
|
|
52
58
|
if (obj.type === "ai") {
|
|
53
59
|
return new AIMessage({
|
|
54
60
|
content: obj.content,
|
|
@@ -78,6 +84,12 @@ function revive(obj) {
|
|
|
78
84
|
name: obj.name,
|
|
79
85
|
});
|
|
80
86
|
}
|
|
87
|
+
if (obj.type === "tool") {
|
|
88
|
+
return new ToolMessageChunk({
|
|
89
|
+
content: obj.content,
|
|
90
|
+
tool_call_id: obj.tool_call_id,
|
|
91
|
+
});
|
|
92
|
+
}
|
|
81
93
|
if (obj.type === "ai") {
|
|
82
94
|
return new AIMessageChunk({
|
|
83
95
|
content: obj.content,
|
package/dist/schema/index.cjs
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.Docstore = exports.BaseEntityStore = exports.BaseFileStore = exports.BaseCache = exports.BaseListChatMessageHistory = exports.BaseChatMessageHistory = exports.BasePromptValue = exports.mapStoredMessageToChatMessage = exports.ChatGenerationChunk = exports.ChatMessageChunk = exports.coerceMessageLikeToMessage = exports.isBaseMessageChunk = exports.isBaseMessage = exports.ChatMessage = exports.FunctionMessageChunk = exports.FunctionMessage = exports.SystemChatMessage = exports.AIChatMessage = exports.HumanChatMessage = exports.BaseChatMessage = exports.SystemMessageChunk = exports.SystemMessage = exports.AIMessageChunk = exports.AIMessage = exports.HumanMessageChunk = exports.HumanMessage = exports.BaseMessageChunk = exports.BaseMessage = exports.GenerationChunk = exports.RUN_KEY = void 0;
|
|
3
|
+
exports.Docstore = exports.BaseEntityStore = exports.BaseFileStore = exports.BaseCache = exports.BaseListChatMessageHistory = exports.BaseChatMessageHistory = exports.BasePromptValue = exports.mapStoredMessageToChatMessage = exports.ChatGenerationChunk = exports.ChatMessageChunk = exports.coerceMessageLikeToMessage = exports.isBaseMessageChunk = exports.isBaseMessage = exports.ChatMessage = exports.ToolMessageChunk = exports.ToolMessage = exports.FunctionMessageChunk = exports.FunctionMessage = exports.SystemChatMessage = exports.AIChatMessage = exports.HumanChatMessage = exports.BaseChatMessage = exports.SystemMessageChunk = exports.SystemMessage = exports.AIMessageChunk = exports.AIMessage = exports.HumanMessageChunk = exports.HumanMessage = exports.BaseMessageChunk = exports.BaseMessage = exports.GenerationChunk = exports.RUN_KEY = void 0;
|
|
4
4
|
const serializable_js_1 = require("../load/serializable.cjs");
|
|
5
5
|
exports.RUN_KEY = "__run";
|
|
6
6
|
/**
|
|
@@ -154,6 +154,10 @@ class BaseMessage extends serializable_js_1.Serializable {
|
|
|
154
154
|
}
|
|
155
155
|
}
|
|
156
156
|
exports.BaseMessage = BaseMessage;
|
|
157
|
+
function isOpenAIToolCallArray(value) {
|
|
158
|
+
return (Array.isArray(value) &&
|
|
159
|
+
value.every((v) => typeof v.index === "number"));
|
|
160
|
+
}
|
|
157
161
|
/**
|
|
158
162
|
* Represents a chunk of a message, which can be concatenated with other
|
|
159
163
|
* message chunks. It includes a method `_merge_kwargs_dict()` for merging
|
|
@@ -178,6 +182,31 @@ class BaseMessageChunk extends BaseMessage {
|
|
|
178
182
|
typeof merged[key] === "object") {
|
|
179
183
|
merged[key] = this._mergeAdditionalKwargs(merged[key], value);
|
|
180
184
|
}
|
|
185
|
+
else if (key === "tool_calls" &&
|
|
186
|
+
isOpenAIToolCallArray(merged[key]) &&
|
|
187
|
+
isOpenAIToolCallArray(value)) {
|
|
188
|
+
for (const toolCall of value) {
|
|
189
|
+
if (merged[key]?.[toolCall.index] !== undefined) {
|
|
190
|
+
merged[key] = merged[key]?.map((value, i) => {
|
|
191
|
+
if (i !== toolCall.index) {
|
|
192
|
+
return value;
|
|
193
|
+
}
|
|
194
|
+
return {
|
|
195
|
+
...value,
|
|
196
|
+
...toolCall,
|
|
197
|
+
function: {
|
|
198
|
+
name: toolCall.function.name ?? value.function.name,
|
|
199
|
+
arguments: (value.function.arguments ?? "") +
|
|
200
|
+
(toolCall.function.arguments ?? ""),
|
|
201
|
+
},
|
|
202
|
+
};
|
|
203
|
+
});
|
|
204
|
+
}
|
|
205
|
+
else {
|
|
206
|
+
merged[key][toolCall.index] = toolCall;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
}
|
|
181
210
|
else {
|
|
182
211
|
throw new Error(`additional_kwargs[${key}] already exists in this message chunk.`);
|
|
183
212
|
}
|
|
@@ -340,6 +369,62 @@ class FunctionMessageChunk extends BaseMessageChunk {
|
|
|
340
369
|
}
|
|
341
370
|
}
|
|
342
371
|
exports.FunctionMessageChunk = FunctionMessageChunk;
|
|
372
|
+
/**
|
|
373
|
+
* Represents a tool message in a conversation.
|
|
374
|
+
*/
|
|
375
|
+
class ToolMessage extends BaseMessage {
|
|
376
|
+
static lc_name() {
|
|
377
|
+
return "ToolMessage";
|
|
378
|
+
}
|
|
379
|
+
constructor(fields, tool_call_id, name) {
|
|
380
|
+
if (typeof fields === "string") {
|
|
381
|
+
// eslint-disable-next-line no-param-reassign, @typescript-eslint/no-non-null-assertion
|
|
382
|
+
fields = { content: fields, name, tool_call_id: tool_call_id };
|
|
383
|
+
}
|
|
384
|
+
super(fields);
|
|
385
|
+
Object.defineProperty(this, "tool_call_id", {
|
|
386
|
+
enumerable: true,
|
|
387
|
+
configurable: true,
|
|
388
|
+
writable: true,
|
|
389
|
+
value: void 0
|
|
390
|
+
});
|
|
391
|
+
this.tool_call_id = fields.tool_call_id;
|
|
392
|
+
}
|
|
393
|
+
_getType() {
|
|
394
|
+
return "tool";
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
exports.ToolMessage = ToolMessage;
|
|
398
|
+
/**
|
|
399
|
+
* Represents a chunk of a function message, which can be concatenated
|
|
400
|
+
* with other function message chunks.
|
|
401
|
+
*/
|
|
402
|
+
class ToolMessageChunk extends BaseMessageChunk {
|
|
403
|
+
constructor(fields) {
|
|
404
|
+
super(fields);
|
|
405
|
+
Object.defineProperty(this, "tool_call_id", {
|
|
406
|
+
enumerable: true,
|
|
407
|
+
configurable: true,
|
|
408
|
+
writable: true,
|
|
409
|
+
value: void 0
|
|
410
|
+
});
|
|
411
|
+
this.tool_call_id = fields.tool_call_id;
|
|
412
|
+
}
|
|
413
|
+
static lc_name() {
|
|
414
|
+
return "ToolMessageChunk";
|
|
415
|
+
}
|
|
416
|
+
_getType() {
|
|
417
|
+
return "tool";
|
|
418
|
+
}
|
|
419
|
+
concat(chunk) {
|
|
420
|
+
return new ToolMessageChunk({
|
|
421
|
+
content: mergeContent(this.content, chunk.content),
|
|
422
|
+
additional_kwargs: ToolMessageChunk._mergeAdditionalKwargs(this.additional_kwargs, chunk.additional_kwargs),
|
|
423
|
+
tool_call_id: this.tool_call_id,
|
|
424
|
+
});
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
exports.ToolMessageChunk = ToolMessageChunk;
|
|
343
428
|
/**
|
|
344
429
|
* Represents a chat message in a conversation.
|
|
345
430
|
*/
|
|
@@ -477,6 +562,7 @@ function mapV1MessageToStoredMessage(message) {
|
|
|
477
562
|
content: v1Message.text,
|
|
478
563
|
role: v1Message.role,
|
|
479
564
|
name: undefined,
|
|
565
|
+
tool_call_id: undefined,
|
|
480
566
|
},
|
|
481
567
|
};
|
|
482
568
|
}
|
|
@@ -495,6 +581,11 @@ function mapStoredMessageToChatMessage(message) {
|
|
|
495
581
|
throw new Error("Name must be defined for function messages");
|
|
496
582
|
}
|
|
497
583
|
return new FunctionMessage(storedMessage.data);
|
|
584
|
+
case "tool":
|
|
585
|
+
if (storedMessage.data.tool_call_id === undefined) {
|
|
586
|
+
throw new Error("Tool call ID must be defined for tool messages");
|
|
587
|
+
}
|
|
588
|
+
return new ToolMessage(storedMessage.data);
|
|
498
589
|
case "chat": {
|
|
499
590
|
if (storedMessage.data.role === undefined) {
|
|
500
591
|
throw new Error("Role must be defined for chat messages");
|
package/dist/schema/index.d.ts
CHANGED
|
@@ -53,6 +53,7 @@ export interface StoredMessageData {
|
|
|
53
53
|
content: string;
|
|
54
54
|
role: string | undefined;
|
|
55
55
|
name: string | undefined;
|
|
56
|
+
tool_call_id: string | undefined;
|
|
56
57
|
additional_kwargs?: Record<string, any>;
|
|
57
58
|
}
|
|
58
59
|
export interface StoredMessage {
|
|
@@ -63,7 +64,7 @@ export interface StoredGeneration {
|
|
|
63
64
|
text: string;
|
|
64
65
|
message?: StoredMessage;
|
|
65
66
|
}
|
|
66
|
-
export type MessageType = "human" | "ai" | "generic" | "system" | "function";
|
|
67
|
+
export type MessageType = "human" | "ai" | "generic" | "system" | "function" | "tool";
|
|
67
68
|
export type MessageContent = string | {
|
|
68
69
|
type: "text" | "image_url";
|
|
69
70
|
text?: string;
|
|
@@ -76,6 +77,7 @@ export interface BaseMessageFields {
|
|
|
76
77
|
name?: string;
|
|
77
78
|
additional_kwargs?: {
|
|
78
79
|
function_call?: OpenAIClient.Chat.ChatCompletionMessage.FunctionCall;
|
|
80
|
+
tool_calls?: OpenAIClient.Chat.ChatCompletionMessageToolCall[];
|
|
79
81
|
[key: string]: unknown;
|
|
80
82
|
};
|
|
81
83
|
}
|
|
@@ -85,6 +87,9 @@ export interface ChatMessageFieldsWithRole extends BaseMessageFields {
|
|
|
85
87
|
export interface FunctionMessageFieldsWithName extends BaseMessageFields {
|
|
86
88
|
name: string;
|
|
87
89
|
}
|
|
90
|
+
export interface ToolMessageFieldsWithToolCallId extends BaseMessageFields {
|
|
91
|
+
tool_call_id: string;
|
|
92
|
+
}
|
|
88
93
|
/**
|
|
89
94
|
* Base class for all types of messages in a conversation. It includes
|
|
90
95
|
* properties like `content`, `name`, and `additional_kwargs`. It also
|
|
@@ -112,6 +117,9 @@ export declare abstract class BaseMessage extends Serializable implements BaseMe
|
|
|
112
117
|
toDict(): StoredMessage;
|
|
113
118
|
toChunk(): BaseMessageChunk;
|
|
114
119
|
}
|
|
120
|
+
export type OpenAIToolCall = OpenAIClient.ChatCompletionMessageToolCall & {
|
|
121
|
+
index: number;
|
|
122
|
+
};
|
|
115
123
|
/**
|
|
116
124
|
* Represents a chunk of a message, which can be concatenated with other
|
|
117
125
|
* message chunks. It includes a method `_merge_kwargs_dict()` for merging
|
|
@@ -211,6 +219,27 @@ export declare class FunctionMessageChunk extends BaseMessageChunk {
|
|
|
211
219
|
_getType(): MessageType;
|
|
212
220
|
concat(chunk: FunctionMessageChunk): FunctionMessageChunk;
|
|
213
221
|
}
|
|
222
|
+
/**
|
|
223
|
+
* Represents a tool message in a conversation.
|
|
224
|
+
*/
|
|
225
|
+
export declare class ToolMessage extends BaseMessage {
|
|
226
|
+
static lc_name(): string;
|
|
227
|
+
tool_call_id: string;
|
|
228
|
+
constructor(fields: ToolMessageFieldsWithToolCallId);
|
|
229
|
+
constructor(fields: string | BaseMessageFields, tool_call_id: string, name?: string);
|
|
230
|
+
_getType(): MessageType;
|
|
231
|
+
}
|
|
232
|
+
/**
|
|
233
|
+
* Represents a chunk of a function message, which can be concatenated
|
|
234
|
+
* with other function message chunks.
|
|
235
|
+
*/
|
|
236
|
+
export declare class ToolMessageChunk extends BaseMessageChunk {
|
|
237
|
+
tool_call_id: string;
|
|
238
|
+
constructor(fields: ToolMessageFieldsWithToolCallId);
|
|
239
|
+
static lc_name(): string;
|
|
240
|
+
_getType(): MessageType;
|
|
241
|
+
concat(chunk: ToolMessageChunk): ToolMessageChunk;
|
|
242
|
+
}
|
|
214
243
|
/**
|
|
215
244
|
* Represents a chat message in a conversation.
|
|
216
245
|
*/
|
|
@@ -252,7 +281,7 @@ export declare class ChatGenerationChunk extends GenerationChunk implements Chat
|
|
|
252
281
|
constructor(fields: ChatGenerationChunkFields);
|
|
253
282
|
concat(chunk: ChatGenerationChunk): ChatGenerationChunk;
|
|
254
283
|
}
|
|
255
|
-
export declare function mapStoredMessageToChatMessage(message: StoredMessage): ChatMessage | HumanMessage | AIMessage | SystemMessage | FunctionMessage;
|
|
284
|
+
export declare function mapStoredMessageToChatMessage(message: StoredMessage): ChatMessage | HumanMessage | AIMessage | SystemMessage | FunctionMessage | ToolMessage;
|
|
256
285
|
export interface ChatResult {
|
|
257
286
|
generations: ChatGeneration[];
|
|
258
287
|
llmOutput?: Record<string, any>;
|
package/dist/schema/index.js
CHANGED
|
@@ -149,6 +149,10 @@ export class BaseMessage extends Serializable {
|
|
|
149
149
|
}
|
|
150
150
|
}
|
|
151
151
|
}
|
|
152
|
+
function isOpenAIToolCallArray(value) {
|
|
153
|
+
return (Array.isArray(value) &&
|
|
154
|
+
value.every((v) => typeof v.index === "number"));
|
|
155
|
+
}
|
|
152
156
|
/**
|
|
153
157
|
* Represents a chunk of a message, which can be concatenated with other
|
|
154
158
|
* message chunks. It includes a method `_merge_kwargs_dict()` for merging
|
|
@@ -173,6 +177,31 @@ export class BaseMessageChunk extends BaseMessage {
|
|
|
173
177
|
typeof merged[key] === "object") {
|
|
174
178
|
merged[key] = this._mergeAdditionalKwargs(merged[key], value);
|
|
175
179
|
}
|
|
180
|
+
else if (key === "tool_calls" &&
|
|
181
|
+
isOpenAIToolCallArray(merged[key]) &&
|
|
182
|
+
isOpenAIToolCallArray(value)) {
|
|
183
|
+
for (const toolCall of value) {
|
|
184
|
+
if (merged[key]?.[toolCall.index] !== undefined) {
|
|
185
|
+
merged[key] = merged[key]?.map((value, i) => {
|
|
186
|
+
if (i !== toolCall.index) {
|
|
187
|
+
return value;
|
|
188
|
+
}
|
|
189
|
+
return {
|
|
190
|
+
...value,
|
|
191
|
+
...toolCall,
|
|
192
|
+
function: {
|
|
193
|
+
name: toolCall.function.name ?? value.function.name,
|
|
194
|
+
arguments: (value.function.arguments ?? "") +
|
|
195
|
+
(toolCall.function.arguments ?? ""),
|
|
196
|
+
},
|
|
197
|
+
};
|
|
198
|
+
});
|
|
199
|
+
}
|
|
200
|
+
else {
|
|
201
|
+
merged[key][toolCall.index] = toolCall;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
}
|
|
176
205
|
else {
|
|
177
206
|
throw new Error(`additional_kwargs[${key}] already exists in this message chunk.`);
|
|
178
207
|
}
|
|
@@ -326,6 +355,60 @@ export class FunctionMessageChunk extends BaseMessageChunk {
|
|
|
326
355
|
});
|
|
327
356
|
}
|
|
328
357
|
}
|
|
358
|
+
/**
|
|
359
|
+
* Represents a tool message in a conversation.
|
|
360
|
+
*/
|
|
361
|
+
export class ToolMessage extends BaseMessage {
|
|
362
|
+
static lc_name() {
|
|
363
|
+
return "ToolMessage";
|
|
364
|
+
}
|
|
365
|
+
constructor(fields, tool_call_id, name) {
|
|
366
|
+
if (typeof fields === "string") {
|
|
367
|
+
// eslint-disable-next-line no-param-reassign, @typescript-eslint/no-non-null-assertion
|
|
368
|
+
fields = { content: fields, name, tool_call_id: tool_call_id };
|
|
369
|
+
}
|
|
370
|
+
super(fields);
|
|
371
|
+
Object.defineProperty(this, "tool_call_id", {
|
|
372
|
+
enumerable: true,
|
|
373
|
+
configurable: true,
|
|
374
|
+
writable: true,
|
|
375
|
+
value: void 0
|
|
376
|
+
});
|
|
377
|
+
this.tool_call_id = fields.tool_call_id;
|
|
378
|
+
}
|
|
379
|
+
_getType() {
|
|
380
|
+
return "tool";
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
/**
|
|
384
|
+
* Represents a chunk of a function message, which can be concatenated
|
|
385
|
+
* with other function message chunks.
|
|
386
|
+
*/
|
|
387
|
+
export class ToolMessageChunk extends BaseMessageChunk {
|
|
388
|
+
constructor(fields) {
|
|
389
|
+
super(fields);
|
|
390
|
+
Object.defineProperty(this, "tool_call_id", {
|
|
391
|
+
enumerable: true,
|
|
392
|
+
configurable: true,
|
|
393
|
+
writable: true,
|
|
394
|
+
value: void 0
|
|
395
|
+
});
|
|
396
|
+
this.tool_call_id = fields.tool_call_id;
|
|
397
|
+
}
|
|
398
|
+
static lc_name() {
|
|
399
|
+
return "ToolMessageChunk";
|
|
400
|
+
}
|
|
401
|
+
_getType() {
|
|
402
|
+
return "tool";
|
|
403
|
+
}
|
|
404
|
+
concat(chunk) {
|
|
405
|
+
return new ToolMessageChunk({
|
|
406
|
+
content: mergeContent(this.content, chunk.content),
|
|
407
|
+
additional_kwargs: ToolMessageChunk._mergeAdditionalKwargs(this.additional_kwargs, chunk.additional_kwargs),
|
|
408
|
+
tool_call_id: this.tool_call_id,
|
|
409
|
+
});
|
|
410
|
+
}
|
|
411
|
+
}
|
|
329
412
|
/**
|
|
330
413
|
* Represents a chat message in a conversation.
|
|
331
414
|
*/
|
|
@@ -457,6 +540,7 @@ function mapV1MessageToStoredMessage(message) {
|
|
|
457
540
|
content: v1Message.text,
|
|
458
541
|
role: v1Message.role,
|
|
459
542
|
name: undefined,
|
|
543
|
+
tool_call_id: undefined,
|
|
460
544
|
},
|
|
461
545
|
};
|
|
462
546
|
}
|
|
@@ -475,6 +559,11 @@ export function mapStoredMessageToChatMessage(message) {
|
|
|
475
559
|
throw new Error("Name must be defined for function messages");
|
|
476
560
|
}
|
|
477
561
|
return new FunctionMessage(storedMessage.data);
|
|
562
|
+
case "tool":
|
|
563
|
+
if (storedMessage.data.tool_call_id === undefined) {
|
|
564
|
+
throw new Error("Tool call ID must be defined for tool messages");
|
|
565
|
+
}
|
|
566
|
+
return new ToolMessage(storedMessage.data);
|
|
478
567
|
case "chat": {
|
|
479
568
|
if (storedMessage.data.role === undefined) {
|
|
480
569
|
throw new Error("Role must be defined for chat messages");
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import { DocumentByName, FieldPaths, FunctionReference, GenericActionCtx, GenericDataModel, NamedTableInfo, TableNamesInDataModel,
|
|
1
|
+
import { DocumentByName, FieldPaths, FunctionReference, GenericActionCtx, GenericDataModel, NamedTableInfo, TableNamesInDataModel, IndexNames } from "convex/server";
|
|
2
2
|
import { BaseMessage, BaseListChatMessageHistory } from "../../schema/index.js";
|
|
3
3
|
/**
|
|
4
4
|
* Type that defines the config required to initialize the
|
|
5
5
|
* ConvexChatMessageHistory class. At minimum it needs a sessionId
|
|
6
6
|
* and an ActionCtx.
|
|
7
7
|
*/
|
|
8
|
-
export type ConvexChatMessageHistoryInput<DataModel extends GenericDataModel, TableName extends TableNamesInDataModel<DataModel> = "messages", IndexName extends
|
|
8
|
+
export type ConvexChatMessageHistoryInput<DataModel extends GenericDataModel, TableName extends TableNamesInDataModel<DataModel> = "messages", IndexName extends IndexNames<NamedTableInfo<DataModel, TableName>> = "bySessionId", SessionIdFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>> = "sessionId", MessageTextFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>> = "message", InsertMutation extends FunctionReference<"mutation", "internal", {
|
|
9
9
|
table: string;
|
|
10
10
|
document: object;
|
|
11
11
|
}> = any, LookupQuery extends FunctionReference<"query", "internal", {
|
|
@@ -50,7 +50,7 @@ export type ConvexChatMessageHistoryInput<DataModel extends GenericDataModel, Ta
|
|
|
50
50
|
*/
|
|
51
51
|
readonly deleteMany?: DeleteManyMutation;
|
|
52
52
|
};
|
|
53
|
-
export declare class ConvexChatMessageHistory<DataModel extends GenericDataModel, SessionIdFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>> = "sessionId", TableName extends TableNamesInDataModel<DataModel> = "messages", IndexName extends
|
|
53
|
+
export declare class ConvexChatMessageHistory<DataModel extends GenericDataModel, SessionIdFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>> = "sessionId", TableName extends TableNamesInDataModel<DataModel> = "messages", IndexName extends IndexNames<NamedTableInfo<DataModel, TableName>> = "bySessionId", MessageTextFieldName extends FieldPaths<NamedTableInfo<DataModel, TableName>> = "message", InsertMutation extends FunctionReference<"mutation", "internal", {
|
|
54
54
|
table: string;
|
|
55
55
|
document: object;
|
|
56
56
|
}> = any, LookupQuery extends FunctionReference<"query", "internal", {
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.formatToOpenAIFunction = void 0;
|
|
3
|
+
exports.formatToOpenAITool = exports.formatToOpenAIFunction = void 0;
|
|
4
4
|
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
5
5
|
/**
|
|
6
6
|
* Formats a `StructuredTool` instance into a format that is compatible
|
|
@@ -16,3 +16,14 @@ function formatToOpenAIFunction(tool) {
|
|
|
16
16
|
};
|
|
17
17
|
}
|
|
18
18
|
exports.formatToOpenAIFunction = formatToOpenAIFunction;
|
|
19
|
+
function formatToOpenAITool(tool) {
|
|
20
|
+
return {
|
|
21
|
+
type: "function",
|
|
22
|
+
function: {
|
|
23
|
+
name: tool.name,
|
|
24
|
+
description: tool.description,
|
|
25
|
+
parameters: (0, zod_to_json_schema_1.zodToJsonSchema)(tool.schema),
|
|
26
|
+
},
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
exports.formatToOpenAITool = formatToOpenAITool;
|
|
@@ -7,3 +7,4 @@ import { StructuredTool } from "./base.js";
|
|
|
7
7
|
* schema, which is then used as the parameters for the OpenAI function.
|
|
8
8
|
*/
|
|
9
9
|
export declare function formatToOpenAIFunction(tool: StructuredTool): OpenAIClient.Chat.ChatCompletionCreateParams.Function;
|
|
10
|
+
export declare function formatToOpenAITool(tool: StructuredTool): OpenAIClient.Chat.ChatCompletionTool;
|
|
@@ -12,3 +12,13 @@ export function formatToOpenAIFunction(tool) {
|
|
|
12
12
|
parameters: zodToJsonSchema(tool.schema),
|
|
13
13
|
};
|
|
14
14
|
}
|
|
15
|
+
export function formatToOpenAITool(tool) {
|
|
16
|
+
return {
|
|
17
|
+
type: "function",
|
|
18
|
+
function: {
|
|
19
|
+
name: tool.name,
|
|
20
|
+
description: tool.description,
|
|
21
|
+
parameters: zodToJsonSchema(tool.schema),
|
|
22
|
+
},
|
|
23
|
+
};
|
|
24
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langchain",
|
|
3
|
-
"version": "0.0.182
|
|
3
|
+
"version": "0.0.182",
|
|
4
4
|
"description": "Typescript bindings for langchain",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -796,16 +796,16 @@
|
|
|
796
796
|
"url": "git@github.com:langchain-ai/langchainjs.git"
|
|
797
797
|
},
|
|
798
798
|
"scripts": {
|
|
799
|
-
"build": "yarn clean && yarn build:esm && yarn build:cjs &&
|
|
799
|
+
"build": "yarn clean && yarn build:esm && yarn build:cjs && yarn build:scripts",
|
|
800
800
|
"build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/ && rimraf dist/tests dist/**/tests",
|
|
801
801
|
"build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && node scripts/move-cjs-to-dist.js && rimraf dist-cjs",
|
|
802
802
|
"build:watch": "node scripts/create-entrypoints.js && tsc --outDir dist/ --watch",
|
|
803
|
+
"build:scripts": "node scripts/create-entrypoints.js && node scripts/check-tree-shaking.js",
|
|
803
804
|
"lint": "NODE_OPTIONS=--max-old-space-size=4096 eslint src && dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
|
|
804
805
|
"lint:fix": "yarn lint --fix",
|
|
805
806
|
"precommit": "lint-staged",
|
|
806
807
|
"clean": "rimraf dist/ && NODE_OPTIONS=--max-old-space-size=4096 node scripts/create-entrypoints.js pre",
|
|
807
808
|
"prepack": "yarn build",
|
|
808
|
-
"prerelease": "npm publish",
|
|
809
809
|
"release": "release-it --only-version --config .release-it.json",
|
|
810
810
|
"test": "NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",
|
|
811
811
|
"test:watch": "NODE_OPTIONS=--experimental-vm-modules jest --watch --testPathIgnorePatterns=\\.int\\.test.ts",
|
|
@@ -1361,7 +1361,7 @@
|
|
|
1361
1361
|
"langchainhub": "~0.0.6",
|
|
1362
1362
|
"langsmith": "~0.0.48",
|
|
1363
1363
|
"ml-distance": "^4.0.0",
|
|
1364
|
-
"openai": "
|
|
1364
|
+
"openai": "^4.16.1",
|
|
1365
1365
|
"openapi-types": "^12.1.3",
|
|
1366
1366
|
"p-queue": "^6.6.2",
|
|
1367
1367
|
"p-retry": "4",
|