@autonomaai/agent-core 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/base-agent.d.ts +112 -0
- package/dist/base-agent.d.ts.map +1 -0
- package/dist/base-agent.js +173 -0
- package/dist/base-agent.js.map +1 -0
- package/dist/core.d.ts +81 -0
- package/dist/core.d.ts.map +1 -0
- package/dist/core.js +633 -0
- package/dist/core.js.map +1 -0
- package/dist/error-handler.d.ts +78 -0
- package/dist/error-handler.d.ts.map +1 -0
- package/dist/error-handler.js +129 -0
- package/dist/error-handler.js.map +1 -0
- package/dist/factory.d.ts +60 -0
- package/dist/factory.d.ts.map +1 -0
- package/dist/factory.js +621 -0
- package/dist/factory.js.map +1 -0
- package/dist/index.d.ts +13 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +19 -0
- package/dist/index.js.map +1 -0
- package/dist/streaming.d.ts +24 -0
- package/dist/streaming.d.ts.map +1 -0
- package/dist/streaming.js +36 -0
- package/dist/streaming.js.map +1 -0
- package/dist/trading/formatters.d.ts +167 -0
- package/dist/trading/formatters.d.ts.map +1 -0
- package/dist/trading/formatters.js +271 -0
- package/dist/trading/formatters.js.map +1 -0
- package/dist/trading/index.d.ts +9 -0
- package/dist/trading/index.d.ts.map +1 -0
- package/dist/trading/index.js +10 -0
- package/dist/trading/index.js.map +1 -0
- package/dist/trading/types.d.ts +205 -0
- package/dist/trading/types.d.ts.map +1 -0
- package/dist/trading/types.js +7 -0
- package/dist/trading/types.js.map +1 -0
- package/dist/trading/utils.d.ts +120 -0
- package/dist/trading/utils.d.ts.map +1 -0
- package/dist/trading/utils.js +291 -0
- package/dist/trading/utils.js.map +1 -0
- package/dist/trading/validation.d.ts +40 -0
- package/dist/trading/validation.d.ts.map +1 -0
- package/dist/trading/validation.js +247 -0
- package/dist/trading/validation.js.map +1 -0
- package/dist/types.d.ts +282 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +21 -0
- package/dist/types.js.map +1 -0
- package/package.json +57 -0
- package/src/base-agent.ts +263 -0
- package/src/core.ts +792 -0
- package/src/error-handler.ts +166 -0
- package/src/factory.ts +687 -0
- package/src/global.d.ts +12 -0
- package/src/index.ts +24 -0
- package/src/streaming.ts +50 -0
- package/src/trading/formatters.ts +363 -0
- package/src/trading/index.ts +10 -0
- package/src/trading/types.ts +263 -0
- package/src/trading/utils.ts +355 -0
- package/src/trading/validation.ts +321 -0
- package/src/types.ts +402 -0
package/src/core.ts
ADDED
|
@@ -0,0 +1,792 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core Agent Implementation for autonoma Ecosystem
|
|
3
|
+
*
|
|
4
|
+
* Provides standardized agent creation and management with LangGraph + AgentKit integration.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
// Node.js process global
|
|
8
|
+
declare const process: {
|
|
9
|
+
env: Record<string, string | undefined>;
|
|
10
|
+
memoryUsage(): { heapUsed: number; [key: string]: number };
|
|
11
|
+
};
|
|
12
|
+
declare const fetch: typeof globalThis.fetch;
|
|
13
|
+
|
|
14
|
+
import {
|
|
15
|
+
AgentConfig,
|
|
16
|
+
autonomaAgent,
|
|
17
|
+
AgentMessage,
|
|
18
|
+
AgentResponse,
|
|
19
|
+
AgentStatus,
|
|
20
|
+
AgentMetrics,
|
|
21
|
+
LangGraphAgent,
|
|
22
|
+
MessageService,
|
|
23
|
+
// RAGService, // Not yet implemented
|
|
24
|
+
Tool,
|
|
25
|
+
ToolCall
|
|
26
|
+
} from './types.js';
|
|
27
|
+
|
|
28
|
+
import { ChatOpenAI } from '@langchain/openai';
|
|
29
|
+
import { createReactAgent } from '@langchain/langgraph/prebuilt';
|
|
30
|
+
import { createUnifiedMCPTools, UnifiedMCPTools } from '@autonomaai/mcp-client';
|
|
31
|
+
import { executeMCPTool } from '@autonomaai/mcp-client';
|
|
32
|
+
import { streamingService, StreamingEventType } from './streaming.js';
|
|
33
|
+
|
|
34
|
+
// Structured logging interface
|
|
35
|
+
interface Logger {
|
|
36
|
+
info(message: string, meta?: Record<string, any>): void;
|
|
37
|
+
warn(message: string, meta?: Record<string, any>): void;
|
|
38
|
+
error(message: string, meta?: Record<string, any>): void;
|
|
39
|
+
debug(message: string, meta?: Record<string, any>): void;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
type McpServiceName = 'hummingbot' | 'rag' | 'dexscreener' | 'apy';
|
|
43
|
+
|
|
44
|
+
// Simple structured logger implementation
|
|
45
|
+
class StructuredLogger implements Logger {
|
|
46
|
+
constructor(private agentName: string, private logLevel: string = 'info') {}
|
|
47
|
+
|
|
48
|
+
private log(level: string, message: string, meta?: Record<string, any>): void {
|
|
49
|
+
if (!this.shouldLog(level)) return;
|
|
50
|
+
|
|
51
|
+
const timestamp = new Date().toISOString();
|
|
52
|
+
const logEntry = {
|
|
53
|
+
timestamp,
|
|
54
|
+
level,
|
|
55
|
+
agent: this.agentName,
|
|
56
|
+
message,
|
|
57
|
+
...(meta && { meta })
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
// Use structured JSON logging for production, readable format for development
|
|
61
|
+
if (process.env.NODE_ENV === 'production') {
|
|
62
|
+
console.log(JSON.stringify(logEntry));
|
|
63
|
+
} else {
|
|
64
|
+
console.log(`[${timestamp}] ${level.toUpperCase()} [${this.agentName}] ${message}${meta ? ` ${JSON.stringify(meta)}` : ''}`);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
private shouldLog(level: string): boolean {
|
|
69
|
+
const levels = ['debug', 'info', 'warn', 'error'];
|
|
70
|
+
const currentLevel = levels.indexOf(this.logLevel);
|
|
71
|
+
const messageLevel = levels.indexOf(level);
|
|
72
|
+
return messageLevel >= currentLevel;
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
info(message: string, meta?: Record<string, any>): void {
|
|
76
|
+
this.log('info', message, meta);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
warn(message: string, meta?: Record<string, any>): void {
|
|
80
|
+
this.log('warn', message, meta);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
error(message: string, meta?: Record<string, any>): void {
|
|
84
|
+
this.log('error', message, meta);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
debug(message: string, meta?: Record<string, any>): void {
|
|
88
|
+
this.log('debug', message, meta);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Core agent implementation that standardizes LangGraph + AgentKit patterns
|
|
94
|
+
*/
|
|
95
|
+
export class StandardAgent implements autonomaAgent {
|
|
96
|
+
public readonly id: string;
|
|
97
|
+
public readonly name: string;
|
|
98
|
+
public readonly description?: string;
|
|
99
|
+
public capabilities: string[] = [];
|
|
100
|
+
public tools: Tool[] = [];
|
|
101
|
+
public config: AgentConfig;
|
|
102
|
+
|
|
103
|
+
private langGraphAgent?: LangGraphAgent;
|
|
104
|
+
private openAiConfig?: {
|
|
105
|
+
apiKey: string;
|
|
106
|
+
model: string;
|
|
107
|
+
temperature: number;
|
|
108
|
+
maxTokens: number;
|
|
109
|
+
};
|
|
110
|
+
private lastTokenUsage: number = 0;
|
|
111
|
+
private messageService?: MessageService;
|
|
112
|
+
// private ragService?: RAGService; // Not yet implemented
|
|
113
|
+
private unifiedMcpTools?: UnifiedMCPTools;
|
|
114
|
+
private status: AgentStatus['status'] = 'idle';
|
|
115
|
+
private startTime: number = Date.now();
|
|
116
|
+
private metrics: AgentMetrics;
|
|
117
|
+
private logger: Logger;
|
|
118
|
+
|
|
119
|
+
constructor(config: AgentConfig) {
|
|
120
|
+
this.id = `agent_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
121
|
+
this.name = config.name;
|
|
122
|
+
this.description = config.description;
|
|
123
|
+
this.config = config;
|
|
124
|
+
|
|
125
|
+
// Initialize structured logger
|
|
126
|
+
const logLevel = process.env.LOG_LEVEL || (process.env.NODE_ENV === 'production' ? 'info' : 'debug');
|
|
127
|
+
this.logger = new StructuredLogger(this.name, logLevel);
|
|
128
|
+
|
|
129
|
+
this.metrics = {
|
|
130
|
+
totalMessages: 0,
|
|
131
|
+
totalToolCalls: 0,
|
|
132
|
+
averageResponseTime: 0,
|
|
133
|
+
successRate: 100,
|
|
134
|
+
topTools: [],
|
|
135
|
+
errorCount: 0,
|
|
136
|
+
uptime: 0
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
async start(): Promise<void> {
|
|
141
|
+
try {
|
|
142
|
+
this.status = 'processing';
|
|
143
|
+
|
|
144
|
+
// Initialize the underlying LangGraph agent
|
|
145
|
+
await this.initializeLangGraphAgent();
|
|
146
|
+
|
|
147
|
+
// Initialize services if configured
|
|
148
|
+
await this.initializeServices();
|
|
149
|
+
|
|
150
|
+
this.status = 'idle';
|
|
151
|
+
this.logger.info('Agent started successfully', {
|
|
152
|
+
agentId: this.id,
|
|
153
|
+
capabilities: this.capabilities,
|
|
154
|
+
toolCount: this.tools.length
|
|
155
|
+
});
|
|
156
|
+
} catch (error) {
|
|
157
|
+
this.status = 'error';
|
|
158
|
+
this.metrics.errorCount++;
|
|
159
|
+
throw new Error(`Failed to start agent: ${error instanceof Error ? error.message : String(error)}`);
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
async stop(): Promise<void> {
|
|
164
|
+
this.status = 'stopped';
|
|
165
|
+
this.logger.info('Agent stopped', {
|
|
166
|
+
agentId: this.id,
|
|
167
|
+
uptime: Date.now() - this.startTime,
|
|
168
|
+
totalMessages: this.metrics.totalMessages
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
async process(message: AgentMessage): Promise<AgentResponse> {
|
|
173
|
+
const startTime = Date.now();
|
|
174
|
+
this.status = 'processing';
|
|
175
|
+
|
|
176
|
+
try {
|
|
177
|
+
if (!this.langGraphAgent) {
|
|
178
|
+
throw new Error('Agent not initialized. Call start() first.');
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// Save user message if message service is available
|
|
182
|
+
if (this.messageService) {
|
|
183
|
+
await this.messageService.saveMessage(message);
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
// Process with LangGraph agent
|
|
187
|
+
const response = await this.processWithLangGraph(message);
|
|
188
|
+
|
|
189
|
+
// Update metrics
|
|
190
|
+
this.updateMetrics(startTime, true);
|
|
191
|
+
this.status = 'idle';
|
|
192
|
+
|
|
193
|
+
return response;
|
|
194
|
+
|
|
195
|
+
} catch (error) {
|
|
196
|
+
this.updateMetrics(startTime, false);
|
|
197
|
+
this.status = 'error';
|
|
198
|
+
this.metrics.errorCount++;
|
|
199
|
+
|
|
200
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
201
|
+
return {
|
|
202
|
+
content: `I encountered an error: ${errorMessage}`,
|
|
203
|
+
timestamp: new Date().toISOString(),
|
|
204
|
+
error: errorMessage
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
async updateConfig(config: Partial<AgentConfig>): Promise<void> {
|
|
210
|
+
this.config = { ...this.config, ...config };
|
|
211
|
+
|
|
212
|
+
// Reinitialize if agent is running
|
|
213
|
+
if (this.langGraphAgent) {
|
|
214
|
+
await this.initializeLangGraphAgent();
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
async addTool(tool: Tool): Promise<void> {
|
|
219
|
+
this.tools.push(tool);
|
|
220
|
+
this.capabilities.push(tool.name || 'unknown_tool');
|
|
221
|
+
|
|
222
|
+
// Reinitialize agent with new tools
|
|
223
|
+
if (this.langGraphAgent) {
|
|
224
|
+
await this.initializeLangGraphAgent();
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
async removeTool(toolName: string): Promise<void> {
|
|
229
|
+
this.tools = this.tools.filter(tool => tool.name !== toolName);
|
|
230
|
+
this.capabilities = this.capabilities.filter(cap => cap !== toolName);
|
|
231
|
+
|
|
232
|
+
// Reinitialize agent without the tool
|
|
233
|
+
if (this.langGraphAgent) {
|
|
234
|
+
await this.initializeLangGraphAgent();
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
getStatus(): AgentStatus {
|
|
239
|
+
return {
|
|
240
|
+
status: this.status,
|
|
241
|
+
uptime: Date.now() - this.startTime,
|
|
242
|
+
lastActivity: new Date().toISOString(),
|
|
243
|
+
activeTools: this.tools.map(tool => tool.name || 'unknown'),
|
|
244
|
+
memoryUsage: {
|
|
245
|
+
conversationMessages: this.metrics.totalMessages,
|
|
246
|
+
totalMemoryMB: process.memoryUsage().heapUsed / 1024 / 1024
|
|
247
|
+
}
|
|
248
|
+
};
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
getMetrics(): AgentMetrics {
|
|
252
|
+
return {
|
|
253
|
+
...this.metrics,
|
|
254
|
+
uptime: Date.now() - this.startTime
|
|
255
|
+
};
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
async clearMemory(): Promise<void> {
|
|
259
|
+
if (this.messageService) {
|
|
260
|
+
await this.messageService.clearHistory();
|
|
261
|
+
}
|
|
262
|
+
// Reset conversation-related metrics
|
|
263
|
+
this.metrics.totalMessages = 0;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
async getConversationHistory(limit: number = 50): Promise<AgentMessage[]> {
|
|
267
|
+
if (this.messageService) {
|
|
268
|
+
return await this.messageService.getRecentMessages(limit);
|
|
269
|
+
}
|
|
270
|
+
return [];
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
private buildSystemPrompt(): string {
|
|
274
|
+
if (this.config.prompt?.systemMessage) {
|
|
275
|
+
return this.config.prompt.systemMessage;
|
|
276
|
+
}
|
|
277
|
+
return 'You are a helpful AI assistant providing insights to the autonoma platform.';
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
private async buildConversationMessages(message: AgentMessage): Promise<Array<{ role: string; content: string }>> {
|
|
281
|
+
const messages: Array<{ role: string; content: string }> = [];
|
|
282
|
+
const systemPrompt = this.buildSystemPrompt();
|
|
283
|
+
messages.push({ role: 'system', content: systemPrompt });
|
|
284
|
+
|
|
285
|
+
if (this.config.prompt?.context) {
|
|
286
|
+
messages.push({ role: 'system', content: this.config.prompt.context });
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
const conversationHistory = await this.getConversationHistory(10);
|
|
290
|
+
for (const entry of conversationHistory.reverse()) {
|
|
291
|
+
messages.push({
|
|
292
|
+
role: entry.role,
|
|
293
|
+
content: entry.content
|
|
294
|
+
});
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
messages.push({
|
|
298
|
+
role: message.role,
|
|
299
|
+
content: message.content
|
|
300
|
+
});
|
|
301
|
+
|
|
302
|
+
return messages;
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
private getSessionId(message: AgentMessage): string {
|
|
306
|
+
return message.metadata?.sessionId || 'default';
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
private emitStreamingEvent(
|
|
310
|
+
sessionId: string,
|
|
311
|
+
type: StreamingEventType,
|
|
312
|
+
content: string,
|
|
313
|
+
metadata?: Record<string, any>
|
|
314
|
+
): void {
|
|
315
|
+
streamingService.publish(sessionId, { type, content, metadata });
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
private publishStreamingChunks(sessionId: string, text: string, type: StreamingEventType = 'progress'): void {
|
|
319
|
+
const chunkSize = 120;
|
|
320
|
+
for (let start = 0; start < text.length; start += chunkSize) {
|
|
321
|
+
const chunk = text.slice(start, start + chunkSize);
|
|
322
|
+
if (chunk.trim()) {
|
|
323
|
+
this.emitStreamingEvent(sessionId, type, chunk, {
|
|
324
|
+
chunk_index: Math.floor(start / chunkSize),
|
|
325
|
+
chunk_size: chunkSize
|
|
326
|
+
});
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
private async requestOpenAICompletion(messages: Array<{ role: string; content: string }>): Promise<string> {
|
|
332
|
+
const config = this.openAiConfig;
|
|
333
|
+
if (!config) {
|
|
334
|
+
throw new Error('LLM configuration is not initialized');
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
const response = await fetch('https://api.openai.com/v1/chat/completions', {
|
|
338
|
+
method: 'POST',
|
|
339
|
+
headers: {
|
|
340
|
+
authorization: `Bearer ${config.apiKey}`,
|
|
341
|
+
'Content-Type': 'application/json'
|
|
342
|
+
},
|
|
343
|
+
body: JSON.stringify({
|
|
344
|
+
model: config.model,
|
|
345
|
+
messages,
|
|
346
|
+
temperature: config.temperature,
|
|
347
|
+
max_tokens: config.maxTokens,
|
|
348
|
+
stream: false
|
|
349
|
+
})
|
|
350
|
+
});
|
|
351
|
+
|
|
352
|
+
const payload = await response.json();
|
|
353
|
+
if (!response.ok) {
|
|
354
|
+
const errorMessage = payload?.error?.message || response.statusText;
|
|
355
|
+
throw new Error(`OpenAI API error (${response.status}): ${errorMessage}`);
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
const choice = payload?.choices?.[0]?.message?.content;
|
|
359
|
+
if (!choice) {
|
|
360
|
+
throw new Error('OpenAI response is missing content');
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
this.lastTokenUsage = payload?.usage?.total_tokens || this.lastTokenUsage;
|
|
364
|
+
return choice;
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
private async createLangGraphTools(): Promise<Tool[]> {
|
|
368
|
+
const tools: Tool[] = [];
|
|
369
|
+
|
|
370
|
+
// Include any custom tools provided by the agent configuration
|
|
371
|
+
const customTools = this.config.tools?.customTools;
|
|
372
|
+
if (Array.isArray(customTools)) {
|
|
373
|
+
tools.push(...customTools);
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
if (this.config.tools?.enableMCP) {
|
|
377
|
+
const hummingbotUrl = this.config.tools.mcpServerUrl || process.env.HUMMINGBOT_MCP_URL || 'http://localhost:8000';
|
|
378
|
+
const ragUrl = this.config.tools.ragServerUrl || process.env.RAG_MCP_URL || 'http://localhost:3002';
|
|
379
|
+
const dexscreenerUrl = process.env.DEXSCREENER_MCP_URL || 'http://localhost:3000';
|
|
380
|
+
const apyUrl = process.env.APY_STRATEGY_MCP_URL || 'http://localhost:8003';
|
|
381
|
+
|
|
382
|
+
this.unifiedMcpTools = createUnifiedMCPTools({
|
|
383
|
+
hummingbotUrl,
|
|
384
|
+
ragServerUrl: ragUrl,
|
|
385
|
+
dexscreenerUrl,
|
|
386
|
+
apyStrategyUrl: apyUrl
|
|
387
|
+
});
|
|
388
|
+
|
|
389
|
+
tools.push(this.createLangGraphMcpTool());
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
return tools;
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
private createLangGraphMcpTool(): Tool {
|
|
396
|
+
return {
|
|
397
|
+
name: 'call_mcp_tool',
|
|
398
|
+
description: 'Execute an MCP tool by service name (hummingbot, rag, dexscreener, apy). Provide tool arguments as needed.',
|
|
399
|
+
func: async (params: {
|
|
400
|
+
service: McpServiceName;
|
|
401
|
+
tool: string;
|
|
402
|
+
arguments?: Record<string, any>;
|
|
403
|
+
}) => {
|
|
404
|
+
const client = this.getMcpClientForService(params.service);
|
|
405
|
+
if (!client) {
|
|
406
|
+
throw new Error(`MCP service ${params.service} is not configured for this agent`);
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
const result = await executeMCPTool(client, params.tool, params.arguments);
|
|
410
|
+
if (!result.success) {
|
|
411
|
+
throw new Error(result.error || `MCP tool ${params.tool} failed without providing an error`);
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
return result.response;
|
|
415
|
+
}
|
|
416
|
+
};
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
private getMcpClientForService(service: McpServiceName): any {
|
|
420
|
+
if (!this.unifiedMcpTools) return null;
|
|
421
|
+
switch (service) {
|
|
422
|
+
case 'hummingbot':
|
|
423
|
+
return this.unifiedMcpTools.hummingbotTools;
|
|
424
|
+
case 'rag':
|
|
425
|
+
return this.unifiedMcpTools.ragTools;
|
|
426
|
+
case 'dexscreener':
|
|
427
|
+
return this.unifiedMcpTools.dexscreenerTools;
|
|
428
|
+
case 'apy':
|
|
429
|
+
return this.unifiedMcpTools.apyStrategyTools;
|
|
430
|
+
default:
|
|
431
|
+
return null;
|
|
432
|
+
}
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
// =============================================================================
|
|
436
|
+
// Private Implementation Methods
|
|
437
|
+
// =============================================================================
|
|
438
|
+
|
|
439
|
+
private async initializeLangGraphAgent(): Promise<void> {
|
|
440
|
+
const llmConfig = this.config.llm || {};
|
|
441
|
+
const apiKey = llmConfig.openAIApiKey || process.env.OPENAI_API_KEY;
|
|
442
|
+
|
|
443
|
+
if (!apiKey) {
|
|
444
|
+
throw new Error('OpenAI API key is required to initialize the agent');
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
this.openAiConfig = {
|
|
448
|
+
apiKey,
|
|
449
|
+
model: llmConfig.model || process.env.OPENAI_MODEL || 'gpt-4o-mini',
|
|
450
|
+
temperature: llmConfig.temperature ?? 0.3,
|
|
451
|
+
maxTokens: llmConfig.maxTokens ?? parseInt(process.env.OPENAI_MAX_TOKENS || '2048', 10)
|
|
452
|
+
};
|
|
453
|
+
|
|
454
|
+
const llm = new ChatOpenAI({
|
|
455
|
+
openAIApiKey: this.openAiConfig.apiKey,
|
|
456
|
+
modelName: this.openAiConfig.model,
|
|
457
|
+
temperature: this.openAiConfig.temperature,
|
|
458
|
+
maxTokens: this.openAiConfig.maxTokens
|
|
459
|
+
});
|
|
460
|
+
|
|
461
|
+
const tools = await this.createLangGraphTools();
|
|
462
|
+
this.tools = tools;
|
|
463
|
+
|
|
464
|
+
this.langGraphAgent = await createReactAgent({
|
|
465
|
+
llm,
|
|
466
|
+
tools,
|
|
467
|
+
messageModifier: this.buildSystemPrompt()
|
|
468
|
+
});
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
private async initializeServices(): Promise<void> {
|
|
472
|
+
if (this.config.services?.messageService) {
|
|
473
|
+
// Support passing a concrete MessageService instance
|
|
474
|
+
if (typeof this.config.services.messageService !== 'boolean') {
|
|
475
|
+
this.messageService = this.config.services.messageService;
|
|
476
|
+
}
|
|
477
|
+
this.logger.info('Message service enabled', {
|
|
478
|
+
service: 'messageService',
|
|
479
|
+
config: this.config.services.messageService
|
|
480
|
+
});
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
if (this.config.services?.ragService) {
|
|
484
|
+
// Initialize RAG service - implementation will be injected
|
|
485
|
+
// if (typeof this.config.services.ragService !== 'boolean') {
|
|
486
|
+
// this.ragService = this.config.services.ragService;
|
|
487
|
+
// }
|
|
488
|
+
this.logger.info('RAG service enabled (not yet implemented)', {
|
|
489
|
+
service: 'ragService',
|
|
490
|
+
config: this.config.services.ragService
|
|
491
|
+
});
|
|
492
|
+
}
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
private async processWithLangGraph(message: AgentMessage): Promise<AgentResponse> {
|
|
496
|
+
if (!this.langGraphAgent) {
|
|
497
|
+
throw new Error('LangGraph agent is not initialized');
|
|
498
|
+
}
|
|
499
|
+
|
|
500
|
+
const sessionId = this.getSessionId(message);
|
|
501
|
+
const conversation = await this.buildConversationMessages(message);
|
|
502
|
+
this.emitStreamingEvent(sessionId, 'start', 'Processing your request with LangGraph');
|
|
503
|
+
const agentInput = conversation.map(entry => `${entry.role.toUpperCase()}: ${entry.content}`).join('\n\n');
|
|
504
|
+
const startTime = Date.now();
|
|
505
|
+
|
|
506
|
+
try {
|
|
507
|
+
const runResult = await (this.langGraphAgent as any).run(agentInput);
|
|
508
|
+
const { content, toolCalls, tokensUsed } = this.normalizeLangGraphResult(runResult);
|
|
509
|
+
const processingTime = Date.now() - startTime;
|
|
510
|
+
const timestamp = new Date().toISOString();
|
|
511
|
+
|
|
512
|
+
if (this.messageService) {
|
|
513
|
+
await this.messageService.saveMessage({ role: 'assistant', content, timestamp });
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
this.metrics.totalToolCalls += toolCalls.length;
|
|
517
|
+
const response: AgentResponse = {
|
|
518
|
+
content,
|
|
519
|
+
timestamp,
|
|
520
|
+
toolCalls,
|
|
521
|
+
metrics: {
|
|
522
|
+
processingTime,
|
|
523
|
+
tokensUsed,
|
|
524
|
+
toolsUsed: toolCalls.map(call => call.tool)
|
|
525
|
+
}
|
|
526
|
+
};
|
|
527
|
+
|
|
528
|
+
this.publishStreamingChunks(sessionId, content);
|
|
529
|
+
this.emitStreamingEvent(sessionId, 'done', content, { toolCalls: toolCalls.map(call => call.tool) });
|
|
530
|
+
|
|
531
|
+
return response;
|
|
532
|
+
|
|
533
|
+
} catch (error) {
|
|
534
|
+
this.logger.warn('LangGraph agent failed, falling back to direct OpenAI completion', {
|
|
535
|
+
error: error instanceof Error ? error.message : String(error)
|
|
536
|
+
});
|
|
537
|
+
|
|
538
|
+
const fallbackContent = await this.requestOpenAICompletion(conversation);
|
|
539
|
+
const processingTime = Date.now() - startTime;
|
|
540
|
+
const timestamp = new Date().toISOString();
|
|
541
|
+
|
|
542
|
+
if (this.messageService) {
|
|
543
|
+
await this.messageService.saveMessage({ role: 'assistant', content: fallbackContent, timestamp });
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
this.emitStreamingEvent(sessionId, 'error', 'LangGraph agent fallback triggered', {
|
|
547
|
+
error: error instanceof Error ? error.message : String(error)
|
|
548
|
+
});
|
|
549
|
+
this.publishStreamingChunks(sessionId, fallbackContent);
|
|
550
|
+
this.emitStreamingEvent(sessionId, 'done', fallbackContent);
|
|
551
|
+
|
|
552
|
+
return {
|
|
553
|
+
content: fallbackContent,
|
|
554
|
+
timestamp,
|
|
555
|
+
metrics: {
|
|
556
|
+
processingTime,
|
|
557
|
+
tokensUsed: this.lastTokenUsage,
|
|
558
|
+
toolsUsed: []
|
|
559
|
+
}
|
|
560
|
+
};
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
private normalizeLangGraphResult(result: any): {
|
|
565
|
+
content: string;
|
|
566
|
+
toolCalls: ToolCall[];
|
|
567
|
+
tokensUsed: number;
|
|
568
|
+
} {
|
|
569
|
+
let content = '';
|
|
570
|
+
if (typeof result === 'string') {
|
|
571
|
+
content = result;
|
|
572
|
+
} else if (typeof result?.content === 'string') {
|
|
573
|
+
content = result.content;
|
|
574
|
+
} else if (typeof result?.response === 'string') {
|
|
575
|
+
content = result.response;
|
|
576
|
+
} else if (Array.isArray(result?.output)) {
|
|
577
|
+
content = result.output
|
|
578
|
+
.map((segment: any) => (typeof segment === 'string' ? segment : segment?.text || segment?.content || ''))
|
|
579
|
+
.filter(Boolean)
|
|
580
|
+
.join('\n');
|
|
581
|
+
} else if (typeof result?.output === 'string') {
|
|
582
|
+
content = result.output;
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
const toolCalls: ToolCall[] = Array.isArray(result?.toolCalls)
|
|
586
|
+
? result.toolCalls.map((toolCall: any) => ({
|
|
587
|
+
tool: toolCall.tool || toolCall.name || 'unknown',
|
|
588
|
+
input: toolCall.input || toolCall.arguments,
|
|
589
|
+
output: toolCall.output || toolCall.result,
|
|
590
|
+
duration: toolCall.duration || 0,
|
|
591
|
+
success: toolCall.success ?? !toolCall.error,
|
|
592
|
+
error: toolCall.error
|
|
593
|
+
}))
|
|
594
|
+
: [];
|
|
595
|
+
|
|
596
|
+
const tokensUsed = result?.usage?.total_tokens || this.lastTokenUsage;
|
|
597
|
+
this.lastTokenUsage = tokensUsed;
|
|
598
|
+
|
|
599
|
+
return { content, toolCalls, tokensUsed };
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
private updateMetrics(startTime: number, success: boolean): void {
|
|
603
|
+
const processingTime = Date.now() - startTime;
|
|
604
|
+
|
|
605
|
+
this.metrics.totalMessages++;
|
|
606
|
+
this.metrics.averageResponseTime =
|
|
607
|
+
(this.metrics.averageResponseTime * (this.metrics.totalMessages - 1) + processingTime) /
|
|
608
|
+
this.metrics.totalMessages;
|
|
609
|
+
|
|
610
|
+
if (!success) {
|
|
611
|
+
this.metrics.errorCount++;
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
this.metrics.successRate =
|
|
615
|
+
((this.metrics.totalMessages - this.metrics.errorCount) / this.metrics.totalMessages) * 100;
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
// =============================================================================
|
|
619
|
+
// Static Factory Methods
|
|
620
|
+
// =============================================================================
|
|
621
|
+
|
|
622
|
+
static createTradingAgent(config: AgentConfig): StandardAgent {
|
|
623
|
+
const tradingConfig = {
|
|
624
|
+
...config,
|
|
625
|
+
capabilities: [
|
|
626
|
+
'trading_controller_management',
|
|
627
|
+
'exchange_connectivity',
|
|
628
|
+
'market_data_analysis',
|
|
629
|
+
'risk_management',
|
|
630
|
+
'portfolio_optimization',
|
|
631
|
+
...(config.prompt?.capabilities || [])
|
|
632
|
+
]
|
|
633
|
+
};
|
|
634
|
+
|
|
635
|
+
return new StandardAgent(tradingConfig);
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
static createDataAnalysisAgent(config: AgentConfig): StandardAgent {
|
|
639
|
+
const analysisConfig = {
|
|
640
|
+
...config,
|
|
641
|
+
capabilities: [
|
|
642
|
+
'data_collection',
|
|
643
|
+
'statistical_analysis',
|
|
644
|
+
'pattern_recognition',
|
|
645
|
+
'report_generation',
|
|
646
|
+
'visualization',
|
|
647
|
+
...(config.prompt?.capabilities || [])
|
|
648
|
+
]
|
|
649
|
+
};
|
|
650
|
+
|
|
651
|
+
return new StandardAgent(analysisConfig);
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
static createCustomerServiceAgent(config: AgentConfig): StandardAgent {
|
|
655
|
+
const serviceConfig = {
|
|
656
|
+
...config,
|
|
657
|
+
capabilities: [
|
|
658
|
+
'question_answering',
|
|
659
|
+
'knowledge_base_search',
|
|
660
|
+
'issue_escalation',
|
|
661
|
+
'conversation_management',
|
|
662
|
+
'sentiment_analysis',
|
|
663
|
+
...(config.prompt?.capabilities || [])
|
|
664
|
+
]
|
|
665
|
+
};
|
|
666
|
+
|
|
667
|
+
return new StandardAgent(serviceConfig);
|
|
668
|
+
}
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
/**
|
|
672
|
+
* Agent builder for fluent configuration
|
|
673
|
+
*/
|
|
674
|
+
export class AgentBuilder {
|
|
675
|
+
private config: AgentConfig = { name: 'unnamed_agent' };
|
|
676
|
+
|
|
677
|
+
setName(name: string): AgentBuilder {
|
|
678
|
+
this.config.name = name;
|
|
679
|
+
return this;
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
setDescription(description: string): AgentBuilder {
|
|
683
|
+
this.config.description = description;
|
|
684
|
+
return this;
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
setLLM(llmConfig: AgentConfig['llm']): AgentBuilder {
|
|
688
|
+
this.config.llm = llmConfig;
|
|
689
|
+
return this;
|
|
690
|
+
}
|
|
691
|
+
|
|
692
|
+
setAgentKit(agentKitConfig: AgentConfig['agentKit']): AgentBuilder {
|
|
693
|
+
this.config.agentKit = agentKitConfig;
|
|
694
|
+
return this;
|
|
695
|
+
}
|
|
696
|
+
|
|
697
|
+
setTools(toolsConfig: AgentConfig['tools']): AgentBuilder {
|
|
698
|
+
this.config.tools = toolsConfig;
|
|
699
|
+
return this;
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
setPrompt(promptConfig: AgentConfig['prompt']): AgentBuilder {
|
|
703
|
+
this.config.prompt = promptConfig;
|
|
704
|
+
return this;
|
|
705
|
+
}
|
|
706
|
+
|
|
707
|
+
setMemory(memoryConfig: AgentConfig['memory']): AgentBuilder {
|
|
708
|
+
this.config.memory = memoryConfig;
|
|
709
|
+
return this;
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
setServices(servicesConfig: AgentConfig['services']): AgentBuilder {
|
|
713
|
+
this.config.services = servicesConfig;
|
|
714
|
+
return this;
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
addCustomTool(tool: Tool): AgentBuilder {
|
|
718
|
+
if (!this.config.tools) {
|
|
719
|
+
this.config.tools = {};
|
|
720
|
+
}
|
|
721
|
+
if (!this.config.tools.customTools) {
|
|
722
|
+
this.config.tools.customTools = [];
|
|
723
|
+
}
|
|
724
|
+
this.config.tools.customTools.push(tool);
|
|
725
|
+
return this;
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
async build(): Promise<autonomaAgent> {
|
|
729
|
+
return new StandardAgent(this.config);
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
/**
|
|
734
|
+
* Utility functions for agent management
|
|
735
|
+
*/
|
|
736
|
+
export class AgentUtils {
|
|
737
|
+
static validateConfig(config: AgentConfig): { valid: boolean; errors: string[] } {
|
|
738
|
+
const errors: string[] = [];
|
|
739
|
+
|
|
740
|
+
if (!config.name || config.name.trim() === '') {
|
|
741
|
+
errors.push('Agent name is required');
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
if (config.tools?.enableAgentKit) {
|
|
745
|
+
if (!config.agentKit?.cdpApiKeyId || !config.agentKit?.cdpApiKeySecret) {
|
|
746
|
+
errors.push('AgentKit requires CDP API credentials');
|
|
747
|
+
}
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
if (config.tools?.enableMCP && !config.tools?.mcpServerUrl) {
|
|
751
|
+
errors.push('MCP tools require MCP server URL');
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
return {
|
|
755
|
+
valid: errors.length === 0,
|
|
756
|
+
errors
|
|
757
|
+
};
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
static getEnvironmentConfig(): AgentConfig {
|
|
761
|
+
return {
|
|
762
|
+
name: process.env.AGENT_NAME || 'environment_agent',
|
|
763
|
+
llm: {
|
|
764
|
+
model: process.env.LLM_MODEL || 'gpt-4o-mini',
|
|
765
|
+
openAIApiKey: process.env.OPENAI_API_KEY
|
|
766
|
+
},
|
|
767
|
+
agentKit: {
|
|
768
|
+
networkId: process.env.NETWORK_ID || 'base-sepolia',
|
|
769
|
+
cdpApiKeyId: process.env.CDP_API_KEY_ID,
|
|
770
|
+
cdpApiKeySecret: process.env.CDP_API_KEY_SECRET,
|
|
771
|
+
walletDataFile: process.env.WALLET_DATA_FILE || 'wallet_data.txt'
|
|
772
|
+
},
|
|
773
|
+
tools: {
|
|
774
|
+
enableAgentKit: !!process.env.CDP_API_KEY_ID,
|
|
775
|
+
enableMCP: !!process.env.MCP_SERVER_URL,
|
|
776
|
+
enableRAG: !!process.env.RAG_SERVER_URL,
|
|
777
|
+
mcpServerUrl: process.env.HUMMINGBOT_MCP_URL || 'http://localhost:8000',
|
|
778
|
+
ragServerUrl: process.env.RAG_MCP_URL || 'http://localhost:3002'
|
|
779
|
+
},
|
|
780
|
+
memory: {
|
|
781
|
+
enabled: true,
|
|
782
|
+
persistentThreadId: true,
|
|
783
|
+
maxMessages: parseInt(process.env.MAX_MESSAGES || '100')
|
|
784
|
+
},
|
|
785
|
+
services: {
|
|
786
|
+
messageService: true,
|
|
787
|
+
ragService: !!process.env.RAG_SERVER_URL,
|
|
788
|
+
loggingEnabled: process.env.NODE_ENV !== 'production'
|
|
789
|
+
}
|
|
790
|
+
};
|
|
791
|
+
}
|
|
792
|
+
}
|