@librechat/agents 2.4.41 → 2.4.43
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/common/enum.cjs +4 -2
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/graphs/Graph.cjs +5 -6
- package/dist/cjs/graphs/Graph.cjs.map +1 -1
- package/dist/cjs/llm/google/index.cjs +73 -1
- package/dist/cjs/llm/google/index.cjs.map +1 -1
- package/dist/cjs/llm/google/utils/common.cjs +469 -0
- package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
- package/dist/cjs/run.cjs +4 -3
- package/dist/cjs/run.cjs.map +1 -1
- package/dist/cjs/stream.cjs +5 -2
- package/dist/cjs/stream.cjs.map +1 -1
- package/dist/cjs/utils/title.cjs +25 -20
- package/dist/cjs/utils/title.cjs.map +1 -1
- package/dist/esm/common/enum.mjs +4 -2
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/graphs/Graph.mjs +5 -6
- package/dist/esm/graphs/Graph.mjs.map +1 -1
- package/dist/esm/llm/google/index.mjs +73 -1
- package/dist/esm/llm/google/index.mjs.map +1 -1
- package/dist/esm/llm/google/utils/common.mjs +463 -0
- package/dist/esm/llm/google/utils/common.mjs.map +1 -0
- package/dist/esm/run.mjs +4 -3
- package/dist/esm/run.mjs.map +1 -1
- package/dist/esm/stream.mjs +5 -2
- package/dist/esm/stream.mjs.map +1 -1
- package/dist/esm/utils/title.mjs +25 -20
- package/dist/esm/utils/title.mjs.map +1 -1
- package/dist/types/common/enum.d.ts +5 -3
- package/dist/types/graphs/Graph.d.ts +3 -2
- package/dist/types/llm/google/index.d.ts +10 -5
- package/dist/types/llm/google/types.d.ts +32 -0
- package/dist/types/llm/google/utils/common.d.ts +19 -0
- package/dist/types/llm/google/utils/tools.d.ts +10 -0
- package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
- package/dist/types/run.d.ts +1 -1
- package/dist/types/scripts/args.d.ts +2 -1
- package/dist/types/types/llm.d.ts +2 -0
- package/dist/types/types/run.d.ts +1 -0
- package/dist/types/types/stream.d.ts +5 -0
- package/package.json +1 -1
- package/src/common/enum.ts +4 -2
- package/src/graphs/Graph.ts +16 -11
- package/src/llm/google/index.ts +118 -8
- package/src/llm/google/types.ts +43 -0
- package/src/llm/google/utils/common.ts +632 -0
- package/src/llm/google/utils/tools.ts +160 -0
- package/src/llm/google/utils/zod_to_genai_parameters.ts +88 -0
- package/src/run.ts +4 -2
- package/src/scripts/args.ts +12 -8
- package/src/scripts/code_exec.ts +49 -18
- package/src/scripts/code_exec_files.ts +48 -17
- package/src/scripts/image.ts +52 -20
- package/src/scripts/simple.ts +1 -0
- package/src/specs/anthropic.simple.test.ts +88 -31
- package/src/specs/openai.simple.test.ts +88 -31
- package/src/stream.ts +5 -2
- package/src/types/llm.ts +2 -0
- package/src/types/run.ts +1 -0
- package/src/types/stream.ts +6 -0
- package/src/utils/llmConfig.ts +2 -2
- package/src/utils/title.ts +44 -27
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
import {
|
|
2
|
+
Tool as GenerativeAITool,
|
|
3
|
+
ToolConfig,
|
|
4
|
+
FunctionCallingMode,
|
|
5
|
+
FunctionDeclaration,
|
|
6
|
+
FunctionDeclarationsTool,
|
|
7
|
+
FunctionDeclarationSchema,
|
|
8
|
+
} from '@google/generative-ai';
|
|
9
|
+
import { ToolChoice } from '@langchain/core/language_models/chat_models';
|
|
10
|
+
import { StructuredToolInterface } from '@langchain/core/tools';
|
|
11
|
+
import { isLangChainTool } from '@langchain/core/utils/function_calling';
|
|
12
|
+
import {
|
|
13
|
+
isOpenAITool,
|
|
14
|
+
ToolDefinition,
|
|
15
|
+
} from '@langchain/core/language_models/base';
|
|
16
|
+
import { convertToGenerativeAITools } from './common';
|
|
17
|
+
import { GoogleGenerativeAIToolType } from '../types';
|
|
18
|
+
import { removeAdditionalProperties } from './zod_to_genai_parameters';
|
|
19
|
+
|
|
20
|
+
export function convertToolsToGenAI(
|
|
21
|
+
tools: GoogleGenerativeAIToolType[],
|
|
22
|
+
extra?: {
|
|
23
|
+
toolChoice?: ToolChoice;
|
|
24
|
+
allowedFunctionNames?: string[];
|
|
25
|
+
}
|
|
26
|
+
): {
|
|
27
|
+
tools: GenerativeAITool[];
|
|
28
|
+
toolConfig?: ToolConfig;
|
|
29
|
+
} {
|
|
30
|
+
// Extract function declaration processing to a separate function
|
|
31
|
+
const genAITools = processTools(tools);
|
|
32
|
+
|
|
33
|
+
// Simplify tool config creation
|
|
34
|
+
const toolConfig = createToolConfig(genAITools, extra);
|
|
35
|
+
|
|
36
|
+
return { tools: genAITools, toolConfig };
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
function processTools(tools: GoogleGenerativeAIToolType[]): GenerativeAITool[] {
|
|
40
|
+
let functionDeclarationTools: FunctionDeclaration[] = [];
|
|
41
|
+
const genAITools: GenerativeAITool[] = [];
|
|
42
|
+
|
|
43
|
+
tools.forEach((tool) => {
|
|
44
|
+
if (isLangChainTool(tool)) {
|
|
45
|
+
const [convertedTool] = convertToGenerativeAITools([
|
|
46
|
+
tool as StructuredToolInterface,
|
|
47
|
+
]);
|
|
48
|
+
if (convertedTool.functionDeclarations) {
|
|
49
|
+
functionDeclarationTools.push(...convertedTool.functionDeclarations);
|
|
50
|
+
}
|
|
51
|
+
} else if (isOpenAITool(tool)) {
|
|
52
|
+
const { functionDeclarations } = convertOpenAIToolToGenAI(tool);
|
|
53
|
+
if (functionDeclarations) {
|
|
54
|
+
functionDeclarationTools.push(...functionDeclarations);
|
|
55
|
+
} else {
|
|
56
|
+
throw new Error(
|
|
57
|
+
'Failed to convert OpenAI structured tool to GenerativeAI tool'
|
|
58
|
+
);
|
|
59
|
+
}
|
|
60
|
+
} else {
|
|
61
|
+
genAITools.push(tool as GenerativeAITool);
|
|
62
|
+
}
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
const genAIFunctionDeclaration = genAITools.find(
|
|
66
|
+
(t) => 'functionDeclarations' in t
|
|
67
|
+
);
|
|
68
|
+
if (genAIFunctionDeclaration) {
|
|
69
|
+
return genAITools.map((tool) => {
|
|
70
|
+
if (
|
|
71
|
+
functionDeclarationTools.length > 0 &&
|
|
72
|
+
'functionDeclarations' in tool
|
|
73
|
+
) {
|
|
74
|
+
const newTool = {
|
|
75
|
+
functionDeclarations: [
|
|
76
|
+
...(tool.functionDeclarations || []),
|
|
77
|
+
...functionDeclarationTools,
|
|
78
|
+
],
|
|
79
|
+
};
|
|
80
|
+
// Clear the functionDeclarationTools array so it is not passed again
|
|
81
|
+
functionDeclarationTools = [];
|
|
82
|
+
return newTool;
|
|
83
|
+
}
|
|
84
|
+
return tool;
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return [
|
|
89
|
+
...genAITools,
|
|
90
|
+
...(functionDeclarationTools.length > 0
|
|
91
|
+
? [
|
|
92
|
+
{
|
|
93
|
+
functionDeclarations: functionDeclarationTools,
|
|
94
|
+
},
|
|
95
|
+
]
|
|
96
|
+
: []),
|
|
97
|
+
];
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
function convertOpenAIToolToGenAI(
|
|
101
|
+
tool: ToolDefinition
|
|
102
|
+
): FunctionDeclarationsTool {
|
|
103
|
+
return {
|
|
104
|
+
functionDeclarations: [
|
|
105
|
+
{
|
|
106
|
+
name: tool.function.name,
|
|
107
|
+
description: tool.function.description,
|
|
108
|
+
parameters: removeAdditionalProperties(
|
|
109
|
+
tool.function.parameters
|
|
110
|
+
) as FunctionDeclarationSchema,
|
|
111
|
+
},
|
|
112
|
+
],
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
function createToolConfig(
|
|
117
|
+
genAITools: GenerativeAITool[],
|
|
118
|
+
extra?: {
|
|
119
|
+
toolChoice?: ToolChoice;
|
|
120
|
+
allowedFunctionNames?: string[];
|
|
121
|
+
}
|
|
122
|
+
): ToolConfig | undefined {
|
|
123
|
+
if (!genAITools.length || !extra) return undefined;
|
|
124
|
+
|
|
125
|
+
const { toolChoice, allowedFunctionNames } = extra;
|
|
126
|
+
|
|
127
|
+
const modeMap: Record<string, FunctionCallingMode> = {
|
|
128
|
+
any: FunctionCallingMode.ANY,
|
|
129
|
+
auto: FunctionCallingMode.AUTO,
|
|
130
|
+
none: FunctionCallingMode.NONE,
|
|
131
|
+
};
|
|
132
|
+
|
|
133
|
+
if (
|
|
134
|
+
toolChoice != null &&
|
|
135
|
+
['any', 'auto', 'none'].includes(toolChoice as string)
|
|
136
|
+
) {
|
|
137
|
+
return {
|
|
138
|
+
functionCallingConfig: {
|
|
139
|
+
mode: modeMap[toolChoice as keyof typeof modeMap] ?? 'MODE_UNSPECIFIED',
|
|
140
|
+
allowedFunctionNames,
|
|
141
|
+
},
|
|
142
|
+
};
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
if (typeof toolChoice === 'string' || allowedFunctionNames) {
|
|
146
|
+
return {
|
|
147
|
+
functionCallingConfig: {
|
|
148
|
+
mode: FunctionCallingMode.ANY,
|
|
149
|
+
allowedFunctionNames: [
|
|
150
|
+
...(allowedFunctionNames ?? []),
|
|
151
|
+
...(toolChoice != null && typeof toolChoice === 'string'
|
|
152
|
+
? [toolChoice]
|
|
153
|
+
: []),
|
|
154
|
+
],
|
|
155
|
+
},
|
|
156
|
+
};
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
return undefined;
|
|
160
|
+
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
/* eslint-disable @typescript-eslint/no-unused-vars */
|
|
2
|
+
|
|
3
|
+
import {
|
|
4
|
+
type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema,
|
|
5
|
+
type SchemaType as FunctionDeclarationSchemaType,
|
|
6
|
+
} from '@google/generative-ai';
|
|
7
|
+
import {
|
|
8
|
+
InteropZodType,
|
|
9
|
+
isInteropZodSchema,
|
|
10
|
+
} from '@langchain/core/utils/types';
|
|
11
|
+
import {
|
|
12
|
+
type JsonSchema7Type,
|
|
13
|
+
toJsonSchema,
|
|
14
|
+
} from '@langchain/core/utils/json_schema';
|
|
15
|
+
|
|
16
|
+
export interface GenerativeAIJsonSchema extends Record<string, unknown> {
|
|
17
|
+
properties?: Record<string, GenerativeAIJsonSchema>;
|
|
18
|
+
type: FunctionDeclarationSchemaType;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export interface GenerativeAIJsonSchemaDirty extends GenerativeAIJsonSchema {
|
|
22
|
+
properties?: Record<string, GenerativeAIJsonSchemaDirty>;
|
|
23
|
+
additionalProperties?: boolean;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export function removeAdditionalProperties(
|
|
27
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
28
|
+
obj: Record<string, any>
|
|
29
|
+
): GenerativeAIJsonSchema {
|
|
30
|
+
if (typeof obj === 'object' && obj !== null) {
|
|
31
|
+
const newObj = { ...obj };
|
|
32
|
+
|
|
33
|
+
if ('additionalProperties' in newObj) {
|
|
34
|
+
delete newObj.additionalProperties;
|
|
35
|
+
}
|
|
36
|
+
if ('$schema' in newObj) {
|
|
37
|
+
delete newObj.$schema;
|
|
38
|
+
}
|
|
39
|
+
if ('strict' in newObj) {
|
|
40
|
+
delete newObj.strict;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
for (const key in newObj) {
|
|
44
|
+
if (key in newObj) {
|
|
45
|
+
if (Array.isArray(newObj[key])) {
|
|
46
|
+
newObj[key] = newObj[key].map(removeAdditionalProperties);
|
|
47
|
+
} else if (typeof newObj[key] === 'object' && newObj[key] !== null) {
|
|
48
|
+
newObj[key] = removeAdditionalProperties(newObj[key]);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return newObj as GenerativeAIJsonSchema;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
return obj as GenerativeAIJsonSchema;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
export function schemaToGenerativeAIParameters<
|
|
60
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
61
|
+
RunOutput extends Record<string, any> = Record<string, any>,
|
|
62
|
+
>(
|
|
63
|
+
schema: InteropZodType<RunOutput> | JsonSchema7Type
|
|
64
|
+
): GenerativeAIFunctionDeclarationSchema {
|
|
65
|
+
// GenerativeAI doesn't accept either the $schema or additionalProperties
|
|
66
|
+
// attributes, so we need to explicitly remove them.
|
|
67
|
+
const jsonSchema = removeAdditionalProperties(
|
|
68
|
+
isInteropZodSchema(schema) ? toJsonSchema(schema) : schema
|
|
69
|
+
);
|
|
70
|
+
const { $schema, ...rest } = jsonSchema;
|
|
71
|
+
|
|
72
|
+
return rest as GenerativeAIFunctionDeclarationSchema;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export function jsonSchemaToGeminiParameters(
|
|
76
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
77
|
+
schema: Record<string, any>
|
|
78
|
+
): GenerativeAIFunctionDeclarationSchema {
|
|
79
|
+
// Gemini doesn't accept either the $schema or additionalProperties
|
|
80
|
+
// attributes, so we need to explicitly remove them.
|
|
81
|
+
|
|
82
|
+
const jsonSchema = removeAdditionalProperties(
|
|
83
|
+
schema as GenerativeAIJsonSchemaDirty
|
|
84
|
+
);
|
|
85
|
+
const { $schema, ...rest } = jsonSchema;
|
|
86
|
+
|
|
87
|
+
return rest as GenerativeAIFunctionDeclarationSchema;
|
|
88
|
+
}
|
package/src/run.ts
CHANGED
|
@@ -235,6 +235,7 @@ export class Run<T extends t.BaseGraphState> {
|
|
|
235
235
|
}
|
|
236
236
|
|
|
237
237
|
async generateTitle({
|
|
238
|
+
provider,
|
|
238
239
|
inputText,
|
|
239
240
|
contentParts,
|
|
240
241
|
titlePrompt,
|
|
@@ -255,8 +256,9 @@ export class Run<T extends t.BaseGraphState> {
|
|
|
255
256
|
await convoTemplate.invoke({ input: inputText, output: response })
|
|
256
257
|
).value;
|
|
257
258
|
const model = this.Graph?.getNewModel({
|
|
259
|
+
provider,
|
|
258
260
|
clientOptions,
|
|
259
|
-
|
|
261
|
+
omitOptions: new Set([
|
|
260
262
|
'clientOptions',
|
|
261
263
|
'streaming',
|
|
262
264
|
'stream',
|
|
@@ -270,7 +272,7 @@ export class Run<T extends t.BaseGraphState> {
|
|
|
270
272
|
return { language: '', title: '' };
|
|
271
273
|
}
|
|
272
274
|
if (
|
|
273
|
-
isOpenAILike(
|
|
275
|
+
isOpenAILike(provider) &&
|
|
274
276
|
(model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)
|
|
275
277
|
) {
|
|
276
278
|
model.temperature = (clientOptions as t.OpenAIClientOptions | undefined)
|
package/src/scripts/args.ts
CHANGED
|
@@ -4,41 +4,45 @@ import { hideBin } from 'yargs/helpers';
|
|
|
4
4
|
import { llmConfigs } from '@/utils/llmConfig';
|
|
5
5
|
import { Providers } from '@/common';
|
|
6
6
|
|
|
7
|
-
export async function getArgs(): Promise<{
|
|
7
|
+
export async function getArgs(): Promise<{
|
|
8
|
+
userName: string;
|
|
9
|
+
location: string;
|
|
10
|
+
provider: Providers;
|
|
11
|
+
currentDate: string;
|
|
12
|
+
}> {
|
|
8
13
|
const argv = yargs(hideBin(process.argv))
|
|
9
14
|
.option('name', {
|
|
10
15
|
alias: 'n',
|
|
11
16
|
type: 'string',
|
|
12
17
|
description: 'User name',
|
|
13
|
-
default: 'Jo'
|
|
18
|
+
default: 'Jo',
|
|
14
19
|
})
|
|
15
20
|
.option('location', {
|
|
16
21
|
alias: 'l',
|
|
17
22
|
type: 'string',
|
|
18
23
|
description: 'User location',
|
|
19
|
-
default: 'New York'
|
|
24
|
+
default: 'New York',
|
|
20
25
|
})
|
|
21
26
|
.option('provider', {
|
|
22
27
|
alias: 'p',
|
|
23
28
|
type: 'string',
|
|
24
29
|
description: 'LLM provider',
|
|
25
30
|
choices: Object.keys(llmConfigs),
|
|
26
|
-
default: Providers.OPENAI
|
|
31
|
+
default: Providers.OPENAI,
|
|
27
32
|
})
|
|
28
33
|
.help()
|
|
29
|
-
.alias('help', 'h')
|
|
30
|
-
.argv;
|
|
34
|
+
.alias('help', 'h').argv;
|
|
31
35
|
|
|
32
36
|
const args = await argv;
|
|
33
37
|
const userName = args.name as string;
|
|
34
38
|
const location = args.location as string;
|
|
35
|
-
const provider = args.provider as
|
|
39
|
+
const provider = args.provider as Providers;
|
|
36
40
|
const currentDate = new Date().toLocaleString();
|
|
37
41
|
|
|
38
42
|
return {
|
|
39
43
|
userName,
|
|
40
44
|
location,
|
|
41
45
|
provider,
|
|
42
|
-
currentDate
|
|
46
|
+
currentDate,
|
|
43
47
|
};
|
|
44
48
|
}
|
package/src/scripts/code_exec.ts
CHANGED
|
@@ -6,7 +6,11 @@ import { TavilySearchResults } from '@langchain/community/tools/tavily_search';
|
|
|
6
6
|
import type { RunnableConfig } from '@langchain/core/runnables';
|
|
7
7
|
import type * as t from '@/types';
|
|
8
8
|
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
9
|
-
import {
|
|
9
|
+
import {
|
|
10
|
+
ToolEndHandler,
|
|
11
|
+
ModelEndHandler,
|
|
12
|
+
createMetadataAggregator,
|
|
13
|
+
} from '@/events';
|
|
10
14
|
import { getLLMConfig } from '@/utils/llmConfig';
|
|
11
15
|
import { getArgs } from '@/scripts/args';
|
|
12
16
|
import { GraphEvents } from '@/common';
|
|
@@ -23,38 +27,57 @@ async function testCodeExecution(): Promise<void> {
|
|
|
23
27
|
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
|
|
24
28
|
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
25
29
|
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
26
|
-
handle: (
|
|
30
|
+
handle: (
|
|
31
|
+
event: GraphEvents.ON_RUN_STEP_COMPLETED,
|
|
32
|
+
data: t.StreamEventData
|
|
33
|
+
): void => {
|
|
27
34
|
console.log('====== ON_RUN_STEP_COMPLETED ======');
|
|
28
35
|
console.dir(data, { depth: null });
|
|
29
|
-
aggregateContent({
|
|
30
|
-
|
|
36
|
+
aggregateContent({
|
|
37
|
+
event,
|
|
38
|
+
data: data as unknown as { result: t.ToolEndEvent },
|
|
39
|
+
});
|
|
40
|
+
},
|
|
31
41
|
},
|
|
32
42
|
[GraphEvents.ON_RUN_STEP]: {
|
|
33
|
-
handle: (
|
|
43
|
+
handle: (
|
|
44
|
+
event: GraphEvents.ON_RUN_STEP,
|
|
45
|
+
data: t.StreamEventData
|
|
46
|
+
): void => {
|
|
34
47
|
console.log('====== ON_RUN_STEP ======');
|
|
35
48
|
console.dir(data, { depth: null });
|
|
36
49
|
aggregateContent({ event, data: data as t.RunStep });
|
|
37
|
-
}
|
|
50
|
+
},
|
|
38
51
|
},
|
|
39
52
|
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
40
|
-
handle: (
|
|
53
|
+
handle: (
|
|
54
|
+
event: GraphEvents.ON_RUN_STEP_DELTA,
|
|
55
|
+
data: t.StreamEventData
|
|
56
|
+
): void => {
|
|
41
57
|
console.log('====== ON_RUN_STEP_DELTA ======');
|
|
42
58
|
console.dir(data, { depth: null });
|
|
43
59
|
aggregateContent({ event, data: data as t.RunStepDeltaEvent });
|
|
44
|
-
}
|
|
60
|
+
},
|
|
45
61
|
},
|
|
46
62
|
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
47
|
-
handle: (
|
|
63
|
+
handle: (
|
|
64
|
+
event: GraphEvents.ON_MESSAGE_DELTA,
|
|
65
|
+
data: t.StreamEventData
|
|
66
|
+
): void => {
|
|
48
67
|
console.log('====== ON_MESSAGE_DELTA ======');
|
|
49
68
|
console.dir(data, { depth: null });
|
|
50
69
|
aggregateContent({ event, data: data as t.MessageDeltaEvent });
|
|
51
|
-
}
|
|
70
|
+
},
|
|
52
71
|
},
|
|
53
72
|
[GraphEvents.TOOL_START]: {
|
|
54
|
-
handle: (
|
|
73
|
+
handle: (
|
|
74
|
+
_event: string,
|
|
75
|
+
data: t.StreamEventData,
|
|
76
|
+
metadata?: Record<string, unknown>
|
|
77
|
+
): void => {
|
|
55
78
|
console.log('====== TOOL_START ======');
|
|
56
79
|
console.dir(data, { depth: null });
|
|
57
|
-
}
|
|
80
|
+
},
|
|
58
81
|
},
|
|
59
82
|
};
|
|
60
83
|
|
|
@@ -66,14 +89,19 @@ async function testCodeExecution(): Promise<void> {
|
|
|
66
89
|
type: 'standard',
|
|
67
90
|
llmConfig,
|
|
68
91
|
tools: [new TavilySearchResults(), createCodeExecutionTool()],
|
|
69
|
-
instructions:
|
|
92
|
+
instructions:
|
|
93
|
+
'You are a friendly AI assistant with coding capabilities. Always address the user by their name.',
|
|
70
94
|
additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
|
|
71
95
|
},
|
|
72
96
|
returnContent: true,
|
|
73
97
|
customHandlers,
|
|
74
98
|
});
|
|
75
99
|
|
|
76
|
-
const config: Partial<RunnableConfig> & {
|
|
100
|
+
const config: Partial<RunnableConfig> & {
|
|
101
|
+
version: 'v1' | 'v2';
|
|
102
|
+
run_id?: string;
|
|
103
|
+
streamMode: string;
|
|
104
|
+
} = {
|
|
77
105
|
configurable: {
|
|
78
106
|
provider,
|
|
79
107
|
thread_id: 'conversation-num-1',
|
|
@@ -152,12 +180,15 @@ async function testCodeExecution(): Promise<void> {
|
|
|
152
180
|
|
|
153
181
|
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
154
182
|
const titleResult = await run.generateTitle({
|
|
183
|
+
provider,
|
|
155
184
|
inputText: userMessage2,
|
|
156
185
|
contentParts,
|
|
157
186
|
chainOptions: {
|
|
158
|
-
callbacks: [
|
|
159
|
-
|
|
160
|
-
|
|
187
|
+
callbacks: [
|
|
188
|
+
{
|
|
189
|
+
handleLLMEnd,
|
|
190
|
+
},
|
|
191
|
+
],
|
|
161
192
|
},
|
|
162
193
|
});
|
|
163
194
|
console.log('Generated Title:', titleResult);
|
|
@@ -180,4 +211,4 @@ testCodeExecution().catch((err) => {
|
|
|
180
211
|
console.log('Conversation history:');
|
|
181
212
|
console.dir(conversationHistory, { depth: null });
|
|
182
213
|
process.exit(1);
|
|
183
|
-
});
|
|
214
|
+
});
|
|
@@ -5,7 +5,11 @@ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
|
|
|
5
5
|
import type { RunnableConfig } from '@langchain/core/runnables';
|
|
6
6
|
import type * as t from '@/types';
|
|
7
7
|
import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
|
|
8
|
-
import {
|
|
8
|
+
import {
|
|
9
|
+
ToolEndHandler,
|
|
10
|
+
ModelEndHandler,
|
|
11
|
+
createMetadataAggregator,
|
|
12
|
+
} from '@/events';
|
|
9
13
|
import { getLLMConfig } from '@/utils/llmConfig';
|
|
10
14
|
import { getArgs } from '@/scripts/args';
|
|
11
15
|
import { GraphEvents } from '@/common';
|
|
@@ -22,38 +26,57 @@ async function testCodeExecution(): Promise<void> {
|
|
|
22
26
|
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
|
|
23
27
|
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
24
28
|
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
|
|
25
|
-
handle: (
|
|
29
|
+
handle: (
|
|
30
|
+
event: GraphEvents.ON_RUN_STEP_COMPLETED,
|
|
31
|
+
data: t.StreamEventData
|
|
32
|
+
): void => {
|
|
26
33
|
console.log('====== ON_RUN_STEP_COMPLETED ======');
|
|
27
34
|
console.dir(data, { depth: null });
|
|
28
|
-
aggregateContent({
|
|
29
|
-
|
|
35
|
+
aggregateContent({
|
|
36
|
+
event,
|
|
37
|
+
data: data as unknown as { result: t.ToolEndEvent },
|
|
38
|
+
});
|
|
39
|
+
},
|
|
30
40
|
},
|
|
31
41
|
[GraphEvents.ON_RUN_STEP]: {
|
|
32
|
-
handle: (
|
|
42
|
+
handle: (
|
|
43
|
+
event: GraphEvents.ON_RUN_STEP,
|
|
44
|
+
data: t.StreamEventData
|
|
45
|
+
): void => {
|
|
33
46
|
console.log('====== ON_RUN_STEP ======');
|
|
34
47
|
console.dir(data, { depth: null });
|
|
35
48
|
aggregateContent({ event, data: data as t.RunStep });
|
|
36
|
-
}
|
|
49
|
+
},
|
|
37
50
|
},
|
|
38
51
|
[GraphEvents.ON_RUN_STEP_DELTA]: {
|
|
39
|
-
handle: (
|
|
52
|
+
handle: (
|
|
53
|
+
event: GraphEvents.ON_RUN_STEP_DELTA,
|
|
54
|
+
data: t.StreamEventData
|
|
55
|
+
): void => {
|
|
40
56
|
console.log('====== ON_RUN_STEP_DELTA ======');
|
|
41
57
|
console.dir(data, { depth: null });
|
|
42
58
|
aggregateContent({ event, data: data as t.RunStepDeltaEvent });
|
|
43
|
-
}
|
|
59
|
+
},
|
|
44
60
|
},
|
|
45
61
|
[GraphEvents.ON_MESSAGE_DELTA]: {
|
|
46
|
-
handle: (
|
|
62
|
+
handle: (
|
|
63
|
+
event: GraphEvents.ON_MESSAGE_DELTA,
|
|
64
|
+
data: t.StreamEventData
|
|
65
|
+
): void => {
|
|
47
66
|
console.log('====== ON_MESSAGE_DELTA ======');
|
|
48
67
|
console.dir(data, { depth: null });
|
|
49
68
|
aggregateContent({ event, data: data as t.MessageDeltaEvent });
|
|
50
|
-
}
|
|
69
|
+
},
|
|
51
70
|
},
|
|
52
71
|
[GraphEvents.TOOL_START]: {
|
|
53
|
-
handle: (
|
|
72
|
+
handle: (
|
|
73
|
+
_event: string,
|
|
74
|
+
data: t.StreamEventData,
|
|
75
|
+
metadata?: Record<string, unknown>
|
|
76
|
+
): void => {
|
|
54
77
|
console.log('====== TOOL_START ======');
|
|
55
78
|
console.dir(data, { depth: null });
|
|
56
|
-
}
|
|
79
|
+
},
|
|
57
80
|
},
|
|
58
81
|
};
|
|
59
82
|
|
|
@@ -65,14 +88,19 @@ async function testCodeExecution(): Promise<void> {
|
|
|
65
88
|
type: 'standard',
|
|
66
89
|
llmConfig,
|
|
67
90
|
tools: [createCodeExecutionTool()],
|
|
68
|
-
instructions:
|
|
91
|
+
instructions:
|
|
92
|
+
'You are a friendly AI assistant with coding capabilities. Always address the user by their name.',
|
|
69
93
|
additional_instructions: `The user's name is ${userName} and they are located in ${location}. The current date is ${currentDate}.`,
|
|
70
94
|
},
|
|
71
95
|
returnContent: true,
|
|
72
96
|
customHandlers,
|
|
73
97
|
});
|
|
74
98
|
|
|
75
|
-
const config: Partial<RunnableConfig> & {
|
|
99
|
+
const config: Partial<RunnableConfig> & {
|
|
100
|
+
version: 'v1' | 'v2';
|
|
101
|
+
run_id?: string;
|
|
102
|
+
streamMode: string;
|
|
103
|
+
} = {
|
|
76
104
|
configurable: {
|
|
77
105
|
provider,
|
|
78
106
|
thread_id: 'conversation-num-1',
|
|
@@ -131,12 +159,15 @@ async function testCodeExecution(): Promise<void> {
|
|
|
131
159
|
|
|
132
160
|
const { handleLLMEnd, collected } = createMetadataAggregator();
|
|
133
161
|
const titleResult = await run.generateTitle({
|
|
162
|
+
provider,
|
|
134
163
|
inputText: userMessage2,
|
|
135
164
|
contentParts,
|
|
136
165
|
chainOptions: {
|
|
137
|
-
callbacks: [
|
|
138
|
-
|
|
139
|
-
|
|
166
|
+
callbacks: [
|
|
167
|
+
{
|
|
168
|
+
handleLLMEnd,
|
|
169
|
+
},
|
|
170
|
+
],
|
|
140
171
|
},
|
|
141
172
|
});
|
|
142
173
|
console.log('Generated Title:', titleResult);
|