@librechat/agents 3.0.1 → 3.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/common/enum.cjs +0 -1
- package/dist/cjs/common/enum.cjs.map +1 -1
- package/dist/cjs/llm/providers.cjs +0 -2
- package/dist/cjs/llm/providers.cjs.map +1 -1
- package/dist/cjs/main.cjs +2 -0
- package/dist/cjs/main.cjs.map +1 -1
- package/dist/cjs/tools/Calculator.cjs +45 -0
- package/dist/cjs/tools/Calculator.cjs.map +1 -0
- package/dist/esm/common/enum.mjs +0 -1
- package/dist/esm/common/enum.mjs.map +1 -1
- package/dist/esm/llm/providers.mjs +0 -2
- package/dist/esm/llm/providers.mjs.map +1 -1
- package/dist/esm/main.mjs +1 -0
- package/dist/esm/main.mjs.map +1 -1
- package/dist/esm/tools/Calculator.mjs +24 -0
- package/dist/esm/tools/Calculator.mjs.map +1 -0
- package/dist/types/common/enum.d.ts +0 -1
- package/dist/types/index.d.ts +1 -0
- package/dist/types/tools/Calculator.d.ts +8 -0
- package/dist/types/types/llm.d.ts +1 -6
- package/package.json +6 -4
- package/src/common/enum.ts +0 -1
- package/src/index.ts +1 -0
- package/src/llm/providers.ts +0 -2
- package/src/scripts/abort.ts +34 -15
- package/src/scripts/cli.ts +25 -20
- package/src/scripts/cli2.ts +23 -15
- package/src/scripts/cli3.ts +35 -29
- package/src/scripts/cli4.ts +1 -2
- package/src/scripts/cli5.ts +1 -2
- package/src/scripts/code_exec.ts +1 -2
- package/src/scripts/code_exec_simple.ts +1 -2
- package/src/scripts/content.ts +33 -15
- package/src/scripts/simple.ts +1 -2
- package/src/scripts/stream.ts +33 -15
- package/src/scripts/test-tools-before-handoff.ts +17 -28
- package/src/scripts/tools.ts +4 -6
- package/src/specs/anthropic.simple.test.ts +1 -1
- package/src/specs/azure.simple.test.ts +1 -1
- package/src/specs/openai.simple.test.ts +1 -1
- package/src/specs/openrouter.simple.test.ts +1 -1
- package/src/tools/Calculator.test.ts +278 -0
- package/src/tools/Calculator.ts +25 -0
- package/src/types/llm.ts +0 -6
- package/dist/types/tools/example.d.ts +0 -78
- package/src/proto/CollabGraph.ts +0 -269
- package/src/proto/TaskManager.ts +0 -243
- package/src/proto/collab.ts +0 -200
- package/src/proto/collab_design.ts +0 -184
- package/src/proto/collab_design_v2.ts +0 -224
- package/src/proto/collab_design_v3.ts +0 -255
- package/src/proto/collab_design_v4.ts +0 -220
- package/src/proto/collab_design_v5.ts +0 -251
- package/src/proto/collab_graph.ts +0 -181
- package/src/proto/collab_original.ts +0 -123
- package/src/proto/example.ts +0 -93
- package/src/proto/example_new.ts +0 -68
- package/src/proto/example_old.ts +0 -201
- package/src/proto/example_test.ts +0 -152
- package/src/proto/example_test_anthropic.ts +0 -100
- package/src/proto/log_stream.ts +0 -202
- package/src/proto/main_collab_community_event.ts +0 -133
- package/src/proto/main_collab_design_v2.ts +0 -96
- package/src/proto/main_collab_design_v4.ts +0 -100
- package/src/proto/main_collab_design_v5.ts +0 -135
- package/src/proto/main_collab_global_analysis.ts +0 -122
- package/src/proto/main_collab_hackathon_event.ts +0 -153
- package/src/proto/main_collab_space_mission.ts +0 -153
- package/src/proto/main_philosophy.ts +0 -210
- package/src/proto/original_script.ts +0 -126
- package/src/proto/standard.ts +0 -100
- package/src/proto/stream.ts +0 -56
- package/src/proto/tasks.ts +0 -118
- package/src/proto/tools/global_analysis_tools.ts +0 -86
- package/src/proto/tools/space_mission_tools.ts +0 -60
- package/src/proto/vertexai.ts +0 -54
- package/src/tools/example.ts +0 -129
package/src/proto/log_stream.ts
DELETED
|
@@ -1,202 +0,0 @@
|
|
|
1
|
-
import fs from 'fs/promises';
|
|
2
|
-
import { pull } from 'langchain/hub';
|
|
3
|
-
import { ChatOpenAI } from '@langchain/openai';
|
|
4
|
-
import type { ChatPromptTemplate } from '@langchain/core/prompts';
|
|
5
|
-
import { AgentExecutor, createOpenAIFunctionsAgent, AgentStep } from 'langchain/agents';
|
|
6
|
-
import { TavilySearchResults } from '@langchain/community/tools/tavily_search';
|
|
7
|
-
import type { RunLogPatch } from '@langchain/core/tracers/log_stream';
|
|
8
|
-
import dotenv from 'dotenv';
|
|
9
|
-
|
|
10
|
-
type ExtractedJSONPatchOperation = Pick<RunLogPatch, 'ops'>;
|
|
11
|
-
type OperationType = ExtractedJSONPatchOperation extends { ops: (infer T)[] } ? T : never;
|
|
12
|
-
|
|
13
|
-
// Load environment variables from .env file
|
|
14
|
-
dotenv.config();
|
|
15
|
-
|
|
16
|
-
// Define the tools the agent will have access to.
|
|
17
|
-
const tools = [new TavilySearchResults({})];
|
|
18
|
-
|
|
19
|
-
const llm = new ChatOpenAI({
|
|
20
|
-
model: 'gpt-3.5-turbo-1106',
|
|
21
|
-
temperature: 0,
|
|
22
|
-
streaming: true,
|
|
23
|
-
});
|
|
24
|
-
|
|
25
|
-
// Get the prompt to use - you can modify this!
|
|
26
|
-
// If you want to see the prompt in full, you can at:
|
|
27
|
-
// https://smith.langchain.com/hub/hwchase17/openai-functions-agent
|
|
28
|
-
const prompt = await pull<ChatPromptTemplate>(
|
|
29
|
-
'hwchase17/openai-functions-agent'
|
|
30
|
-
);
|
|
31
|
-
|
|
32
|
-
const agent = await createOpenAIFunctionsAgent({
|
|
33
|
-
llm,
|
|
34
|
-
tools,
|
|
35
|
-
prompt,
|
|
36
|
-
});
|
|
37
|
-
|
|
38
|
-
const agentExecutor = new AgentExecutor({
|
|
39
|
-
agent,
|
|
40
|
-
tools,
|
|
41
|
-
});
|
|
42
|
-
|
|
43
|
-
const logStream = await agentExecutor.streamLog({
|
|
44
|
-
input: 'what are the current US election polls 2024. today is 7/6/24',
|
|
45
|
-
});
|
|
46
|
-
|
|
47
|
-
const finalState: RunLogPatch[] = [];
|
|
48
|
-
const outputs: RunLogPatch[] = [];
|
|
49
|
-
let accumulatedOutput = '';
|
|
50
|
-
let accumulatedArguments = '';
|
|
51
|
-
|
|
52
|
-
let functionName: string | undefined = undefined;
|
|
53
|
-
|
|
54
|
-
function processStreamedOutput(op: any) {
|
|
55
|
-
let output = '';
|
|
56
|
-
if (op.value.text !== undefined) {
|
|
57
|
-
output += op.value.text;
|
|
58
|
-
}
|
|
59
|
-
if (op.value.message && op.value.message.kwargs) {
|
|
60
|
-
const kwargs = op.value.message.kwargs;
|
|
61
|
-
if (kwargs.content) {
|
|
62
|
-
output += kwargs.content;
|
|
63
|
-
}
|
|
64
|
-
}
|
|
65
|
-
if (output) {
|
|
66
|
-
accumulatedOutput += output;
|
|
67
|
-
process.stdout.write(output);
|
|
68
|
-
}
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
// A helper function to handle the event pattern for logged arguments
|
|
72
|
-
function handleLoggedArgument(loggedArgument: any) {
|
|
73
|
-
if (loggedArgument.value?.message?.additional_kwargs?.function_call) {
|
|
74
|
-
const functionCall = loggedArgument.value.message.additional_kwargs.function_call;
|
|
75
|
-
|
|
76
|
-
if (functionCall.name) {
|
|
77
|
-
functionName = functionCall.name;
|
|
78
|
-
process.stdout.write(`Logged Function Name:
|
|
79
|
-
${JSON.stringify(functionCall, null, 2)}
|
|
80
|
-
`);
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
if (functionCall.arguments) {
|
|
84
|
-
accumulatedArguments += functionCall.arguments;
|
|
85
|
-
// Print the part of the argument as it comes
|
|
86
|
-
// process.stdout.write(`Logged Argument: { "arguments": "${functionCall.arguments}" }\n`);
|
|
87
|
-
process.stdout.write(`Logged Argument:\n${JSON.stringify(functionCall, null, 2)}`);
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
// Check if the full arguments string has been accumulated
|
|
91
|
-
if (accumulatedArguments.startsWith('{') && accumulatedArguments.endsWith('}')) {
|
|
92
|
-
// Build the final logged argument string
|
|
93
|
-
const completeArguments = accumulatedArguments;
|
|
94
|
-
const namePart = functionName ? `"name": "${functionName}", ` : '';
|
|
95
|
-
|
|
96
|
-
console.log(`\nLogged Argument: {\n ${namePart}"arguments": ${completeArguments}\n}\n`);
|
|
97
|
-
|
|
98
|
-
// Reset accumulators
|
|
99
|
-
accumulatedArguments = '';
|
|
100
|
-
functionName = undefined;
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
for await (const chunk of logStream) {
|
|
106
|
-
finalState.push(chunk);
|
|
107
|
-
outputs.push(chunk);
|
|
108
|
-
|
|
109
|
-
if (!chunk.ops) continue;
|
|
110
|
-
|
|
111
|
-
for (const op of chunk.ops) {
|
|
112
|
-
if (isStreamedOutput(op)) {
|
|
113
|
-
processStreamedOutput(op);
|
|
114
|
-
if (hasFunctionCall(op)) {
|
|
115
|
-
handleLoggedArgument(op);
|
|
116
|
-
}
|
|
117
|
-
} else if (isFinalOutput(op)) {
|
|
118
|
-
printFinalOutput(op);
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
function isStreamedOutput(op: OperationType) {
|
|
124
|
-
return op.op === 'add' && (
|
|
125
|
-
op.path.includes('/streamed_output/-') ||
|
|
126
|
-
op.path.includes('/streamed_output_str/-')
|
|
127
|
-
);
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
function hasFunctionCall(op: OperationType) {
|
|
131
|
-
return (op as any)?.value?.message?.additional_kwargs?.function_call;
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
function isFinalOutput(op: OperationType) {
|
|
135
|
-
return op.op === 'add' &&
|
|
136
|
-
op.value?.output &&
|
|
137
|
-
op.path?.startsWith('/logs/') &&
|
|
138
|
-
op.path?.endsWith('final_output') &&
|
|
139
|
-
!op.path?.includes('Runnable');
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
function printFinalOutput(op: OperationType) {
|
|
143
|
-
process.stdout.write(JSON.stringify(op, null, 2));
|
|
144
|
-
process.stdout.write(`
|
|
145
|
-
|
|
146
|
-
########################_START_##########################
|
|
147
|
-
${JSON.stringify((op as any)?.value?.output, null, 2)}
|
|
148
|
-
########################__END__##########################
|
|
149
|
-
|
|
150
|
-
`);
|
|
151
|
-
}
|
|
152
|
-
|
|
153
|
-
// Define types for the final output structure
|
|
154
|
-
interface FinalOutput {
|
|
155
|
-
id: string;
|
|
156
|
-
streamed_output: Array<{
|
|
157
|
-
intermediateSteps?: AgentStep[];
|
|
158
|
-
output?: string;
|
|
159
|
-
}>;
|
|
160
|
-
final_output?: {
|
|
161
|
-
output: string;
|
|
162
|
-
};
|
|
163
|
-
logs: Record<string, any>;
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
// Process finalState to create FinalOutput
|
|
167
|
-
const finalOutput: FinalOutput = {
|
|
168
|
-
id: '',
|
|
169
|
-
streamed_output: [],
|
|
170
|
-
logs: {},
|
|
171
|
-
};
|
|
172
|
-
|
|
173
|
-
for (const patch of finalState) {
|
|
174
|
-
if (patch.ops) {
|
|
175
|
-
for (const op of patch.ops) {
|
|
176
|
-
if (op.op === 'add' || op.op === 'replace') {
|
|
177
|
-
if (op.path === '/id') {
|
|
178
|
-
finalOutput.id = op.value;
|
|
179
|
-
} else if (op.path === '/streamed_output/-') {
|
|
180
|
-
finalOutput.streamed_output.push(op.value);
|
|
181
|
-
} else if (op.path === '/final_output') {
|
|
182
|
-
finalOutput.final_output = op.value;
|
|
183
|
-
} else if (op.path.startsWith('/logs/')) {
|
|
184
|
-
const logKey = op.path.split('/')[2];
|
|
185
|
-
finalOutput.logs[logKey] = op.value;
|
|
186
|
-
}
|
|
187
|
-
}
|
|
188
|
-
}
|
|
189
|
-
}
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
// Save outputs to a JSON file
|
|
193
|
-
await fs.writeFile('outputs.json', JSON.stringify(outputs, null, 2));
|
|
194
|
-
console.log('\n\nOutputs have been saved to outputs.json');
|
|
195
|
-
|
|
196
|
-
// Save the final state separately
|
|
197
|
-
await fs.writeFile('final_output.json', JSON.stringify(finalOutput, null, 2));
|
|
198
|
-
console.log('\n\nFinal output has been saved to final_output.json');
|
|
199
|
-
|
|
200
|
-
// Save the cleaned-up accumulated output
|
|
201
|
-
await fs.writeFile('cleaned_output.txt', accumulatedOutput);
|
|
202
|
-
console.log('\n\nCleaned output has been saved to cleaned_output.txt');
|
|
@@ -1,133 +0,0 @@
|
|
|
1
|
-
// src/main_collab_community_event.ts
|
|
2
|
-
import dotenv from 'dotenv';
|
|
3
|
-
import { HumanMessage } from '@langchain/core/messages';
|
|
4
|
-
import type * as t from '@/types';
|
|
5
|
-
import {
|
|
6
|
-
ChatModelStreamHandler,
|
|
7
|
-
LLMStreamHandler,
|
|
8
|
-
} from '@/stream';
|
|
9
|
-
import { CollaborativeProcessor, Member } from '@/collab_design_v5';
|
|
10
|
-
import { tavilyTool, chartTool } from '@/tools/example';
|
|
11
|
-
import { supervisorPrompt } from '@/prompts/collab';
|
|
12
|
-
import { GraphEvents, Providers } from '@/common';
|
|
13
|
-
import fs from 'fs';
|
|
14
|
-
import util from 'util';
|
|
15
|
-
|
|
16
|
-
dotenv.config();
|
|
17
|
-
|
|
18
|
-
// Create a write stream
|
|
19
|
-
const logFile = fs.createWriteStream('event_log.log', { flags: 'a' });
|
|
20
|
-
|
|
21
|
-
// Redirect console.log and console.error
|
|
22
|
-
const originalConsoleLog = console.log;
|
|
23
|
-
const originalConsoleError = console.error;
|
|
24
|
-
|
|
25
|
-
console.log = function(...args) {
|
|
26
|
-
logFile.write(util.format.apply(null, args) + '\n');
|
|
27
|
-
originalConsoleLog.apply(console, args);
|
|
28
|
-
};
|
|
29
|
-
|
|
30
|
-
console.error = function(...args) {
|
|
31
|
-
logFile.write(util.format.apply(null, args) + '\n');
|
|
32
|
-
originalConsoleError.apply(console, args);
|
|
33
|
-
};
|
|
34
|
-
|
|
35
|
-
// Redirect process.stdout.write
|
|
36
|
-
const originalStdoutWrite = process.stdout.write;
|
|
37
|
-
process.stdout.write = function(chunk: string | Uint8Array, encoding?: BufferEncoding, callback?: (error: Error | null | undefined) => void): boolean {
|
|
38
|
-
logFile.write(chunk, encoding);
|
|
39
|
-
return originalStdoutWrite.apply(process.stdout, [chunk, encoding, callback]);
|
|
40
|
-
} as any;
|
|
41
|
-
|
|
42
|
-
// Redirect process.stderr.write
|
|
43
|
-
const originalStderrWrite = process.stderr.write;
|
|
44
|
-
process.stderr.write = function(chunk: string | Uint8Array, encoding?: BufferEncoding, callback?: (error: Error | null | undefined) => void): boolean {
|
|
45
|
-
logFile.write(chunk, encoding);
|
|
46
|
-
return originalStderrWrite.apply(process.stderr, [chunk, encoding, callback]);
|
|
47
|
-
} as any;
|
|
48
|
-
|
|
49
|
-
async function testCollaborativeCommunityEvent() {
|
|
50
|
-
const customHandlers = {
|
|
51
|
-
[GraphEvents.LLM_STREAM]: new LLMStreamHandler(),
|
|
52
|
-
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
53
|
-
[GraphEvents.LLM_START]: {
|
|
54
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
55
|
-
console.log('LLM Start:', event);
|
|
56
|
-
}
|
|
57
|
-
},
|
|
58
|
-
[GraphEvents.LLM_END]: {
|
|
59
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
60
|
-
console.log('LLM End:', event);
|
|
61
|
-
}
|
|
62
|
-
},
|
|
63
|
-
[GraphEvents.CHAT_MODEL_END]: {
|
|
64
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
65
|
-
console.log('Chat Model End:', event);
|
|
66
|
-
}
|
|
67
|
-
},
|
|
68
|
-
[GraphEvents.TOOL_END]: {
|
|
69
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
70
|
-
console.log('Tool End:', event);
|
|
71
|
-
console.dir(data, { depth: null });
|
|
72
|
-
}
|
|
73
|
-
},
|
|
74
|
-
};
|
|
75
|
-
|
|
76
|
-
// Define the collaborative members
|
|
77
|
-
const members: Member[] = [
|
|
78
|
-
{
|
|
79
|
-
name: 'resource_finder',
|
|
80
|
-
systemPrompt: 'You are a resource finder. You utilize the Tavily search engine to gather necessary resources and contacts needed for the community event.',
|
|
81
|
-
tools: [tavilyTool],
|
|
82
|
-
llmConfig: {
|
|
83
|
-
provider: Providers.OPENAI,
|
|
84
|
-
modelName: 'gpt-4o',
|
|
85
|
-
temperature: 0,
|
|
86
|
-
},
|
|
87
|
-
},
|
|
88
|
-
{
|
|
89
|
-
name: 'event_scheduler',
|
|
90
|
-
systemPrompt: 'You are an event scheduler. You manage the timeline of the event activities using the Chart Generator to visualize the schedule.',
|
|
91
|
-
tools: [chartTool],
|
|
92
|
-
llmConfig: {
|
|
93
|
-
provider: Providers.OPENAI,
|
|
94
|
-
modelName: 'gpt-4o',
|
|
95
|
-
temperature: 0.2,
|
|
96
|
-
},
|
|
97
|
-
},
|
|
98
|
-
];
|
|
99
|
-
|
|
100
|
-
const supervisorConfig = {
|
|
101
|
-
systemPrompt: supervisorPrompt,
|
|
102
|
-
llmConfig: {
|
|
103
|
-
provider: Providers.OPENAI,
|
|
104
|
-
modelName: 'gpt-4o',
|
|
105
|
-
temperature: 0,
|
|
106
|
-
},
|
|
107
|
-
};
|
|
108
|
-
|
|
109
|
-
const collaborativeProcessor = new CollaborativeProcessor(members, supervisorConfig, customHandlers);
|
|
110
|
-
await collaborativeProcessor.initialize();
|
|
111
|
-
|
|
112
|
-
const config = {
|
|
113
|
-
configurable: { thread_id: 'collaborative-event-planning-1' },
|
|
114
|
-
streamMode: 'events',
|
|
115
|
-
version: 'v2',
|
|
116
|
-
};
|
|
117
|
-
|
|
118
|
-
console.log('\nCollaborative Test: Plan a community event');
|
|
119
|
-
|
|
120
|
-
const input = {
|
|
121
|
-
messages: [new HumanMessage('Plan a community fair including activities for all ages, food vendors, and a performance stage.')],
|
|
122
|
-
};
|
|
123
|
-
|
|
124
|
-
await collaborativeProcessor.processStream(input, config);
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
async function main() {
|
|
128
|
-
await testCollaborativeCommunityEvent();
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
main().catch(console.error).finally(() => {
|
|
132
|
-
logFile.end();
|
|
133
|
-
});
|
|
@@ -1,96 +0,0 @@
|
|
|
1
|
-
// src/collaborative_main.ts
|
|
2
|
-
import dotenv from 'dotenv';
|
|
3
|
-
import { HumanMessage } from '@langchain/core/messages';
|
|
4
|
-
import type * as t from '@/types';
|
|
5
|
-
import {
|
|
6
|
-
ChatModelStreamHandler,
|
|
7
|
-
LLMStreamHandler,
|
|
8
|
-
} from '@/stream';
|
|
9
|
-
import { CollaborativeProcessor, Member } from '@/collab_design_v5';
|
|
10
|
-
import { tavilyTool, chartTool } from '@/tools/example';
|
|
11
|
-
import { GraphEvents, Providers } from '@/common';
|
|
12
|
-
|
|
13
|
-
dotenv.config();
|
|
14
|
-
|
|
15
|
-
async function testCollaborativeStreaming() {
|
|
16
|
-
const customHandlers = {
|
|
17
|
-
[GraphEvents.LLM_STREAM]: new LLMStreamHandler(),
|
|
18
|
-
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
19
|
-
[GraphEvents.LLM_START]: {
|
|
20
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
21
|
-
console.log('LLM Start:', event);
|
|
22
|
-
// console.dir(data, { depth: null });
|
|
23
|
-
}
|
|
24
|
-
},
|
|
25
|
-
[GraphEvents.LLM_END]: {
|
|
26
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
27
|
-
console.log('LLM End:', event);
|
|
28
|
-
// console.dir(data, { depth: null });
|
|
29
|
-
}
|
|
30
|
-
},
|
|
31
|
-
[GraphEvents.CHAT_MODEL_END]: {
|
|
32
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
33
|
-
console.log('Chat Model End:', event);
|
|
34
|
-
// console.dir(data, { depth: null });
|
|
35
|
-
}
|
|
36
|
-
},
|
|
37
|
-
[GraphEvents.TOOL_END]: {
|
|
38
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
39
|
-
console.log('Tool End:', event);
|
|
40
|
-
// console.dir(data, { depth: null });
|
|
41
|
-
}
|
|
42
|
-
},
|
|
43
|
-
};
|
|
44
|
-
|
|
45
|
-
const members: Member[] = [
|
|
46
|
-
{
|
|
47
|
-
name: 'researcher',
|
|
48
|
-
systemPrompt: 'You are a web researcher. You may use the Tavily search engine to search the web for important information, so the Chart Generator in your team can make useful plots.',
|
|
49
|
-
tools: [tavilyTool],
|
|
50
|
-
llmConfig: {
|
|
51
|
-
provider: Providers.OPENAI,
|
|
52
|
-
modelName: 'gpt-4o',
|
|
53
|
-
temperature: 0,
|
|
54
|
-
},
|
|
55
|
-
},
|
|
56
|
-
{
|
|
57
|
-
name: 'chart_generator',
|
|
58
|
-
systemPrompt: 'You excel at generating bar charts. Use the researcher\'s information to generate the charts.',
|
|
59
|
-
tools: [chartTool],
|
|
60
|
-
llmConfig: {
|
|
61
|
-
provider: Providers.OPENAI,
|
|
62
|
-
modelName: 'gpt-4o',
|
|
63
|
-
temperature: 0.2,
|
|
64
|
-
},
|
|
65
|
-
},
|
|
66
|
-
];
|
|
67
|
-
|
|
68
|
-
const collaborativeProcessor = new CollaborativeProcessor(members, {
|
|
69
|
-
llmConfig: {
|
|
70
|
-
provider: Providers.OPENAI,
|
|
71
|
-
modelName: 'gpt-4o',
|
|
72
|
-
temperature: 0.5,
|
|
73
|
-
},
|
|
74
|
-
}, customHandlers);
|
|
75
|
-
await collaborativeProcessor.initialize();
|
|
76
|
-
|
|
77
|
-
const config = {
|
|
78
|
-
configurable: { thread_id: 'collaborative-conversation-1' },
|
|
79
|
-
streamMode: 'values',
|
|
80
|
-
version: 'v2' as const,
|
|
81
|
-
};
|
|
82
|
-
|
|
83
|
-
console.log('\nCollaborative Test: Create a chart');
|
|
84
|
-
|
|
85
|
-
const input = {
|
|
86
|
-
messages: [new HumanMessage('Create a chart showing the population growth of the top 5 most populous countries over the last 50 years.')],
|
|
87
|
-
};
|
|
88
|
-
|
|
89
|
-
await collaborativeProcessor.processStream(input, config);
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
async function main() {
|
|
93
|
-
await testCollaborativeStreaming();
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
main().catch(console.error);
|
|
@@ -1,100 +0,0 @@
|
|
|
1
|
-
// src/collaborative_main.ts
|
|
2
|
-
import dotenv from 'dotenv';
|
|
3
|
-
import { HumanMessage } from '@langchain/core/messages';
|
|
4
|
-
import type * as t from '@/types';
|
|
5
|
-
import {
|
|
6
|
-
ChatModelStreamHandler,
|
|
7
|
-
LLMStreamHandler,
|
|
8
|
-
} from '@/stream';
|
|
9
|
-
import { CollaborativeProcessor, Member } from '@/collab_design_v4';
|
|
10
|
-
import { tavilyTool, chartTool } from '@/tools/example';
|
|
11
|
-
import { supervisorPrompt } from '@/prompts/collab';
|
|
12
|
-
import { GraphEvents, Providers } from '@/common';
|
|
13
|
-
|
|
14
|
-
dotenv.config();
|
|
15
|
-
|
|
16
|
-
async function testCollaborativeStreaming() {
|
|
17
|
-
const customHandlers = {
|
|
18
|
-
[GraphEvents.LLM_STREAM]: new LLMStreamHandler(),
|
|
19
|
-
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
20
|
-
[GraphEvents.LLM_START]: {
|
|
21
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
22
|
-
console.log('LLM Start:', event);
|
|
23
|
-
// console.dir(data, { depth: null });
|
|
24
|
-
}
|
|
25
|
-
},
|
|
26
|
-
[GraphEvents.LLM_END]: {
|
|
27
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
28
|
-
console.log('LLM End:', event);
|
|
29
|
-
// console.dir(data, { depth: null });
|
|
30
|
-
}
|
|
31
|
-
},
|
|
32
|
-
[GraphEvents.CHAT_MODEL_END]: {
|
|
33
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
34
|
-
console.log('Chat Model End:', event);
|
|
35
|
-
// console.dir(data, { depth: null });
|
|
36
|
-
}
|
|
37
|
-
},
|
|
38
|
-
[GraphEvents.TOOL_END]: {
|
|
39
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
40
|
-
console.log('Tool End:', event);
|
|
41
|
-
console.dir(data, { depth: null });
|
|
42
|
-
}
|
|
43
|
-
},
|
|
44
|
-
};
|
|
45
|
-
|
|
46
|
-
const members: Member[] = [
|
|
47
|
-
{
|
|
48
|
-
name: 'researcher',
|
|
49
|
-
systemPrompt: 'You are a web researcher. You may use the Tavily search engine to search the web for important information, so the Chart Generator in your team can make useful plots.',
|
|
50
|
-
tools: [tavilyTool],
|
|
51
|
-
llmConfig: {
|
|
52
|
-
provider: Providers.OPENAI,
|
|
53
|
-
modelName: 'gpt-4o',
|
|
54
|
-
temperature: 0,
|
|
55
|
-
},
|
|
56
|
-
},
|
|
57
|
-
{
|
|
58
|
-
name: 'chart_generator',
|
|
59
|
-
systemPrompt: 'You excel at generating bar charts. Use the researcher\'s information to generate the charts.',
|
|
60
|
-
tools: [chartTool],
|
|
61
|
-
llmConfig: {
|
|
62
|
-
provider: Providers.OPENAI,
|
|
63
|
-
modelName: 'gpt-4o',
|
|
64
|
-
temperature: 0.2,
|
|
65
|
-
},
|
|
66
|
-
},
|
|
67
|
-
];
|
|
68
|
-
|
|
69
|
-
const supervisorConfig = {
|
|
70
|
-
systemPrompt: supervisorPrompt,
|
|
71
|
-
llmConfig: {
|
|
72
|
-
provider: Providers.OPENAI,
|
|
73
|
-
modelName: 'gpt-4o',
|
|
74
|
-
temperature: 0,
|
|
75
|
-
},
|
|
76
|
-
};
|
|
77
|
-
|
|
78
|
-
const collaborativeProcessor = new CollaborativeProcessor(members, supervisorConfig, customHandlers);
|
|
79
|
-
await collaborativeProcessor.initialize();
|
|
80
|
-
|
|
81
|
-
const config = {
|
|
82
|
-
configurable: { thread_id: 'collaborative-conversation-1' },
|
|
83
|
-
streamMode: 'values',
|
|
84
|
-
version: 'v2' as const,
|
|
85
|
-
};
|
|
86
|
-
|
|
87
|
-
console.log('\nCollaborative Test: Create a chart');
|
|
88
|
-
|
|
89
|
-
const input = {
|
|
90
|
-
messages: [new HumanMessage('Create a chart showing the population growth of the top 5 most populous countries over the last 50 years.')],
|
|
91
|
-
};
|
|
92
|
-
|
|
93
|
-
await collaborativeProcessor.processStream(input, config);
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
async function main() {
|
|
97
|
-
await testCollaborativeStreaming();
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
main().catch(console.error);
|
|
@@ -1,135 +0,0 @@
|
|
|
1
|
-
// src/main_collab_design_v5.ts
|
|
2
|
-
import dotenv from 'dotenv';
|
|
3
|
-
import { HumanMessage } from '@langchain/core/messages';
|
|
4
|
-
import type * as t from '@/types';
|
|
5
|
-
import {
|
|
6
|
-
ChatModelStreamHandler,
|
|
7
|
-
LLMStreamHandler,
|
|
8
|
-
} from '@/stream';
|
|
9
|
-
import { CollaborativeProcessor, Member } from '@/collab_design_v5';
|
|
10
|
-
import { tavilyTool, chartTool } from '@/tools/example';
|
|
11
|
-
import { supervisorPrompt } from '@/prompts/collab';
|
|
12
|
-
import { GraphEvents, Providers } from '@/common';
|
|
13
|
-
import fs from 'fs';
|
|
14
|
-
import util from 'util';
|
|
15
|
-
|
|
16
|
-
dotenv.config();
|
|
17
|
-
|
|
18
|
-
// Create a write stream
|
|
19
|
-
const logFile = fs.createWriteStream('output.log', { flags: 'a' });
|
|
20
|
-
|
|
21
|
-
// Redirect console.log and console.error
|
|
22
|
-
const originalConsoleLog = console.log;
|
|
23
|
-
const originalConsoleError = console.error;
|
|
24
|
-
|
|
25
|
-
console.log = function(...args) {
|
|
26
|
-
logFile.write(util.format.apply(null, args) + '\n');
|
|
27
|
-
originalConsoleLog.apply(console, args);
|
|
28
|
-
};
|
|
29
|
-
|
|
30
|
-
console.error = function(...args) {
|
|
31
|
-
logFile.write(util.format.apply(null, args) + '\n');
|
|
32
|
-
originalConsoleError.apply(console, args);
|
|
33
|
-
};
|
|
34
|
-
|
|
35
|
-
// Redirect process.stdout.write
|
|
36
|
-
const originalStdoutWrite = process.stdout.write;
|
|
37
|
-
process.stdout.write = function(chunk: string | Uint8Array, encoding?: BufferEncoding, callback?: (error: Error | null | undefined) => void): boolean {
|
|
38
|
-
logFile.write(chunk, encoding);
|
|
39
|
-
return originalStdoutWrite.apply(process.stdout, [chunk, encoding, callback]);
|
|
40
|
-
} as any;
|
|
41
|
-
|
|
42
|
-
// Redirect process.stderr.write
|
|
43
|
-
const originalStderrWrite = process.stderr.write;
|
|
44
|
-
process.stderr.write = function(chunk: string | Uint8Array, encoding?: BufferEncoding, callback?: (error: Error | null | undefined) => void): boolean {
|
|
45
|
-
logFile.write(chunk, encoding);
|
|
46
|
-
return originalStderrWrite.apply(process.stderr, [chunk, encoding, callback]);
|
|
47
|
-
} as any;
|
|
48
|
-
|
|
49
|
-
async function testCollaborativeStreaming() {
|
|
50
|
-
const customHandlers = {
|
|
51
|
-
[GraphEvents.LLM_STREAM]: new LLMStreamHandler(),
|
|
52
|
-
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
|
|
53
|
-
[GraphEvents.LLM_START]: {
|
|
54
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
55
|
-
console.log('LLM Start:', event);
|
|
56
|
-
// console.dir(data, { depth: null });
|
|
57
|
-
}
|
|
58
|
-
},
|
|
59
|
-
[GraphEvents.LLM_END]: {
|
|
60
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
61
|
-
console.log('LLM End:', event);
|
|
62
|
-
// console.dir(data, { depth: null });
|
|
63
|
-
}
|
|
64
|
-
},
|
|
65
|
-
[GraphEvents.CHAT_MODEL_END]: {
|
|
66
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
67
|
-
console.log('Chat Model End:', event);
|
|
68
|
-
// console.dir(data, { depth: null });
|
|
69
|
-
}
|
|
70
|
-
},
|
|
71
|
-
[GraphEvents.TOOL_END]: {
|
|
72
|
-
handle: (event: string, data: t.StreamEventData) => {
|
|
73
|
-
console.log('Tool End:', event);
|
|
74
|
-
console.dir(data, { depth: null });
|
|
75
|
-
}
|
|
76
|
-
},
|
|
77
|
-
};
|
|
78
|
-
|
|
79
|
-
const members: Member[] = [
|
|
80
|
-
{
|
|
81
|
-
name: 'researcher',
|
|
82
|
-
systemPrompt: 'You are a web researcher. You may use the Tavily search engine to search the web for important information, so the Chart Generator in your team can make useful plots.',
|
|
83
|
-
tools: [tavilyTool],
|
|
84
|
-
llmConfig: {
|
|
85
|
-
provider: Providers.OPENAI,
|
|
86
|
-
modelName: 'gpt-4o',
|
|
87
|
-
temperature: 0,
|
|
88
|
-
},
|
|
89
|
-
},
|
|
90
|
-
{
|
|
91
|
-
name: 'chart_generator',
|
|
92
|
-
systemPrompt: 'You excel at generating bar charts. Use the researcher\'s information to generate the charts.',
|
|
93
|
-
tools: [chartTool],
|
|
94
|
-
llmConfig: {
|
|
95
|
-
provider: Providers.OPENAI,
|
|
96
|
-
modelName: 'gpt-4o',
|
|
97
|
-
temperature: 0.2,
|
|
98
|
-
},
|
|
99
|
-
},
|
|
100
|
-
];
|
|
101
|
-
|
|
102
|
-
const supervisorConfig = {
|
|
103
|
-
systemPrompt: supervisorPrompt,
|
|
104
|
-
llmConfig: {
|
|
105
|
-
provider: Providers.OPENAI,
|
|
106
|
-
modelName: 'gpt-4o',
|
|
107
|
-
temperature: 0,
|
|
108
|
-
},
|
|
109
|
-
};
|
|
110
|
-
|
|
111
|
-
const collaborativeProcessor = new CollaborativeProcessor(members, supervisorConfig, customHandlers);
|
|
112
|
-
await collaborativeProcessor.initialize();
|
|
113
|
-
|
|
114
|
-
const config = {
|
|
115
|
-
configurable: { thread_id: 'collaborative-conversation-1' },
|
|
116
|
-
streamMode: 'values',
|
|
117
|
-
version: 'v2' as const,
|
|
118
|
-
};
|
|
119
|
-
|
|
120
|
-
console.log('\nCollaborative Test: Create a chart');
|
|
121
|
-
|
|
122
|
-
const input = {
|
|
123
|
-
messages: [new HumanMessage('Create a chart showing the population growth of the top 5 most populous countries over the last 50 years.')],
|
|
124
|
-
};
|
|
125
|
-
|
|
126
|
-
await collaborativeProcessor.processStream(input, config);
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
async function main() {
|
|
130
|
-
await testCollaborativeStreaming();
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
main().catch(console.error).finally(() => {
|
|
134
|
-
logFile.end();
|
|
135
|
-
});
|