@sschepis/robodev 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ai.mjs +8 -0
- package/package.json +48 -0
- package/src/cli/cli-interface.mjs +271 -0
- package/src/config.mjs +64 -0
- package/src/core/ai-assistant.mjs +540 -0
- package/src/core/ai-provider.mjs +579 -0
- package/src/core/history-manager.mjs +330 -0
- package/src/core/system-prompt.mjs +182 -0
- package/src/custom-tools/custom-tools-manager.mjs +310 -0
- package/src/execution/tool-executor.mjs +892 -0
- package/src/lib/README.md +114 -0
- package/src/lib/adapters/console-status-adapter.mjs +48 -0
- package/src/lib/adapters/network-llm-adapter.mjs +37 -0
- package/src/lib/index.mjs +101 -0
- package/src/lib/interfaces.d.ts +98 -0
- package/src/main.mjs +61 -0
- package/src/package/package-manager.mjs +223 -0
- package/src/quality/code-validator.mjs +126 -0
- package/src/quality/quality-evaluator.mjs +248 -0
- package/src/reasoning/reasoning-system.mjs +258 -0
- package/src/structured-dev/flow-manager.mjs +321 -0
- package/src/structured-dev/implementation-planner.mjs +223 -0
- package/src/structured-dev/manifest-manager.mjs +423 -0
- package/src/structured-dev/plan-executor.mjs +113 -0
- package/src/structured-dev/project-bootstrapper.mjs +523 -0
- package/src/tools/desktop-automation-tools.mjs +172 -0
- package/src/tools/file-tools.mjs +141 -0
- package/src/tools/tool-definitions.mjs +872 -0
- package/src/ui/console-styler.mjs +503 -0
- package/src/workspace/workspace-manager.mjs +215 -0
- package/themes.json +66 -0
|
@@ -0,0 +1,540 @@
|
|
|
1
|
+
// Main AI Assistant class
|
|
2
|
+
// Orchestrates all components and handles the main conversation flow
|
|
3
|
+
|
|
4
|
+
import { config } from '../config.mjs';
|
|
5
|
+
import { TOOLS } from '../tools/tool-definitions.mjs';
|
|
6
|
+
import { ReasoningSystem } from '../reasoning/reasoning-system.mjs';
|
|
7
|
+
import { CustomToolsManager } from '../custom-tools/custom-tools-manager.mjs';
|
|
8
|
+
import { PackageManager } from '../package/package-manager.mjs';
|
|
9
|
+
import { ToolExecutor } from '../execution/tool-executor.mjs';
|
|
10
|
+
import { WorkspaceManager } from '../workspace/workspace-manager.mjs';
|
|
11
|
+
import { QualityEvaluator } from '../quality/quality-evaluator.mjs';
|
|
12
|
+
import { HistoryManager } from './history-manager.mjs';
|
|
13
|
+
import { createSystemPrompt, enhanceMessagesWithWorkReporting } from './system-prompt.mjs';
|
|
14
|
+
import { consoleStyler } from '../ui/console-styler.mjs';
|
|
15
|
+
import { ManifestManager } from '../structured-dev/manifest-manager.mjs';
|
|
16
|
+
import { callProvider, callProviderStream, getProviderLabel, createProviderContext } from './ai-provider.mjs';
|
|
17
|
+
|
|
18
|
+
// Use native fetch in Node.js v18+
|
|
19
|
+
const fetch = globalThis.fetch;
|
|
20
|
+
|
|
21
|
+
export class MiniAIAssistant {
|
|
22
|
+
constructor(workingDir, options = {}) {
|
|
23
|
+
// Provider context is auto-detected from model name
|
|
24
|
+
const providerCtx = createProviderContext();
|
|
25
|
+
this.endpoint = providerCtx.endpoint;
|
|
26
|
+
this.workingDir = workingDir || config.system.workspaceRoot || process.cwd();
|
|
27
|
+
|
|
28
|
+
// Dependency Injection for Adapters
|
|
29
|
+
this.llmAdapter = options.llmAdapter || {
|
|
30
|
+
generateContent: (req) => callProvider(req),
|
|
31
|
+
generateContentStream: (req) => callProviderStream(req)
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
// Initialize all subsystems
|
|
35
|
+
this.reasoningSystem = new ReasoningSystem();
|
|
36
|
+
this.customToolsManager = new CustomToolsManager();
|
|
37
|
+
this.packageManager = new PackageManager();
|
|
38
|
+
this.workspaceManager = new WorkspaceManager();
|
|
39
|
+
this.qualityEvaluator = new QualityEvaluator(this.endpoint);
|
|
40
|
+
this.historyManager = new HistoryManager();
|
|
41
|
+
this.manifestManager = new ManifestManager(this.workingDir);
|
|
42
|
+
|
|
43
|
+
this.toolExecutor = new ToolExecutor(
|
|
44
|
+
this.packageManager,
|
|
45
|
+
this.customToolsManager,
|
|
46
|
+
this.workspaceManager,
|
|
47
|
+
MiniAIAssistant // Pass the class for recursive calls
|
|
48
|
+
);
|
|
49
|
+
|
|
50
|
+
// Initialize history with system prompt (will be updated async if manifest exists)
|
|
51
|
+
this.historyManager.initialize(
|
|
52
|
+
createSystemPrompt(this.workingDir, this.workspaceManager.getCurrentWorkspace())
|
|
53
|
+
);
|
|
54
|
+
|
|
55
|
+
// Initialize tools with custom tools
|
|
56
|
+
this.allTools = [...TOOLS];
|
|
57
|
+
|
|
58
|
+
// Model configuration
|
|
59
|
+
this.model = config.ai.model;
|
|
60
|
+
this.temperature = config.ai.temperature;
|
|
61
|
+
|
|
62
|
+
// Load custom tools will be done asynchronously after construction
|
|
63
|
+
this.customToolsLoaded = false;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Initialize custom tools and update system prompt with manifest
|
|
67
|
+
async initializeCustomTools() {
|
|
68
|
+
// Also take this opportunity to ensure system prompt has the latest manifest
|
|
69
|
+
await this.updateSystemPrompt();
|
|
70
|
+
|
|
71
|
+
if (this.customToolsLoaded) return;
|
|
72
|
+
const customSchemas = await this.customToolsManager.loadCustomTools();
|
|
73
|
+
this.allTools.push(...customSchemas);
|
|
74
|
+
this.customToolsLoaded = true;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Save current session state
|
|
78
|
+
async saveSession(sessionPath) {
|
|
79
|
+
try {
|
|
80
|
+
consoleStyler.log('system', `Saving session to ${sessionPath}...`);
|
|
81
|
+
const historySaved = await this.historyManager.save(`${sessionPath}.history.json`);
|
|
82
|
+
|
|
83
|
+
// Only save workspace if active
|
|
84
|
+
if (this.workspaceManager.isWorkspaceActive()) {
|
|
85
|
+
await this.workspaceManager.save(`${sessionPath}.workspace.json`);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
if (historySaved) {
|
|
89
|
+
consoleStyler.log('system', `✓ Session saved successfully`);
|
|
90
|
+
return true;
|
|
91
|
+
}
|
|
92
|
+
return false;
|
|
93
|
+
} catch (error) {
|
|
94
|
+
consoleStyler.log('error', `Failed to save session: ${error.message}`);
|
|
95
|
+
return false;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Load session state
|
|
100
|
+
async loadSession(sessionPath) {
|
|
101
|
+
try {
|
|
102
|
+
consoleStyler.log('system', `Loading session from ${sessionPath}...`);
|
|
103
|
+
const historyLoaded = await this.historyManager.load(`${sessionPath}.history.json`);
|
|
104
|
+
|
|
105
|
+
// Try to load workspace
|
|
106
|
+
await this.workspaceManager.load(`${sessionPath}.workspace.json`);
|
|
107
|
+
|
|
108
|
+
if (historyLoaded) {
|
|
109
|
+
consoleStyler.log('system', `✓ Session loaded successfully (${this.historyManager.getHistory().length} messages)`);
|
|
110
|
+
// Update system prompt with current environment
|
|
111
|
+
this.updateSystemPrompt();
|
|
112
|
+
return true;
|
|
113
|
+
}
|
|
114
|
+
return false;
|
|
115
|
+
} catch (error) {
|
|
116
|
+
consoleStyler.log('error', `Failed to load session: ${error.message}`);
|
|
117
|
+
return false;
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// Delete specified number of exchanges from history
|
|
122
|
+
deleteHistoryExchanges(count) {
|
|
123
|
+
const deletedExchanges = this.historyManager.deleteHistoryExchanges(count);
|
|
124
|
+
|
|
125
|
+
// Reset any conversation state that depends on history length
|
|
126
|
+
if (deletedExchanges > 0) {
|
|
127
|
+
this.qualityEvaluator.reset();
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
return deletedExchanges;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// Main function to process user input and orchestrate tool use
|
|
134
|
+
async run(userInput, isRetry = false) {
|
|
135
|
+
// Ensure custom tools are loaded
|
|
136
|
+
await this.initializeCustomTools();
|
|
137
|
+
|
|
138
|
+
if (!isRetry) {
|
|
139
|
+
consoleStyler.log('ai', 'Processing new user request...', { timestamp: true });
|
|
140
|
+
this.historyManager.addMessage('user', userInput);
|
|
141
|
+
this.qualityEvaluator.reset();
|
|
142
|
+
this.reasoningSystem.reset();
|
|
143
|
+
|
|
144
|
+
// Predict reasoning from input
|
|
145
|
+
consoleStyler.log('reasoning', 'Analyzing request complexity and predicting reasoning approach...');
|
|
146
|
+
this.reasoningSystem.predictReasoningFromInput(userInput);
|
|
147
|
+
} else {
|
|
148
|
+
consoleStyler.log('recovery', `Retry attempt #${this.qualityEvaluator.getRetryAttempts()} initiated`, { timestamp: true });
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
let finalResponse = null;
|
|
152
|
+
const maxTurns = 30; // Maximum conversation turns (increased for complex tasks)
|
|
153
|
+
|
|
154
|
+
for (let i = 0; i < maxTurns; i++) {
|
|
155
|
+
// Show conversation turn progress
|
|
156
|
+
consoleStyler.log('progress', `Processing turn ${i + 1}/${maxTurns}`, { timestamp: true });
|
|
157
|
+
|
|
158
|
+
const responseMessage = await this.generateContent();
|
|
159
|
+
|
|
160
|
+
if (responseMessage.tool_calls && responseMessage.tool_calls.length > 0) {
|
|
161
|
+
// Log tool calls initiation
|
|
162
|
+
const toolNames = responseMessage.tool_calls.map(tc => tc.function.name).join(', ');
|
|
163
|
+
consoleStyler.log('tools', `Initiating ${responseMessage.tool_calls.length} tool call(s): ${toolNames}`);
|
|
164
|
+
|
|
165
|
+
this.historyManager.pushMessage(responseMessage);
|
|
166
|
+
|
|
167
|
+
for (const toolCall of responseMessage.tool_calls) {
|
|
168
|
+
// Log individual tool execution start
|
|
169
|
+
consoleStyler.log('working', `Executing tool: ${toolCall.function.name}`);
|
|
170
|
+
|
|
171
|
+
// Handle tool-specific reasoning updates
|
|
172
|
+
if (toolCall.function.name === 'embellish_request') {
|
|
173
|
+
const args = JSON.parse(toolCall.function.arguments);
|
|
174
|
+
if (args.reasoning_effort && args.reasoning_justification) {
|
|
175
|
+
consoleStyler.log('reasoning', `Embellishing request with ${args.reasoning_effort} reasoning`);
|
|
176
|
+
this.reasoningSystem.setPredictedReasoning(
|
|
177
|
+
args.reasoning_effort,
|
|
178
|
+
args.reasoning_justification
|
|
179
|
+
);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
const toolResult = await this.toolExecutor.executeTool(toolCall);
|
|
184
|
+
|
|
185
|
+
// Log tool completion
|
|
186
|
+
const success = !toolResult.content.startsWith('Error:');
|
|
187
|
+
if (success) {
|
|
188
|
+
consoleStyler.log('tools', `✓ Tool completed: ${toolCall.function.name}`);
|
|
189
|
+
} else {
|
|
190
|
+
consoleStyler.log('error', `✗ Tool failed: ${toolCall.function.name} - ${toolResult.content.substring(0, 50)}...`);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
this.historyManager.pushMessage(toolResult);
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
consoleStyler.log('tools', `All tool calls completed. Continuing conversation...`);
|
|
197
|
+
continue;
|
|
198
|
+
|
|
199
|
+
} else {
|
|
200
|
+
finalResponse = responseMessage.content;
|
|
201
|
+
|
|
202
|
+
// Check for workPerformed field and display as intermediate update
|
|
203
|
+
if (responseMessage.workPerformed) {
|
|
204
|
+
consoleStyler.log('workCompleted', responseMessage.workPerformed);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
this.historyManager.addMessage('assistant', finalResponse);
|
|
208
|
+
|
|
209
|
+
// Perform quality evaluation if this isn't already a retry
|
|
210
|
+
if (!isRetry && !this.qualityEvaluator.isRetrying()) {
|
|
211
|
+
consoleStyler.log('quality', 'Initiating response quality evaluation...', { timestamp: true });
|
|
212
|
+
|
|
213
|
+
const qualityResult = await this.performQualityEvaluation(userInput, finalResponse);
|
|
214
|
+
|
|
215
|
+
if (qualityResult) {
|
|
216
|
+
const rating = qualityResult.rating !== undefined ? qualityResult.rating : 0;
|
|
217
|
+
consoleStyler.log('quality', `Quality evaluation complete: ${rating}/10`);
|
|
218
|
+
|
|
219
|
+
if (this.qualityEvaluator.shouldRetry(qualityResult)) {
|
|
220
|
+
consoleStyler.log('quality', `Quality below threshold (${rating}/10). Initiating retry...`, { box: true });
|
|
221
|
+
consoleStyler.log('quality', `Remedy: ${qualityResult.remedy}`);
|
|
222
|
+
|
|
223
|
+
const improvedPrompt = this.qualityEvaluator.createRetryPrompt(
|
|
224
|
+
userInput,
|
|
225
|
+
finalResponse,
|
|
226
|
+
qualityResult
|
|
227
|
+
);
|
|
228
|
+
|
|
229
|
+
// Preserve tool call history but reset conversation for retry
|
|
230
|
+
const systemPrompt = {
|
|
231
|
+
role: 'system',
|
|
232
|
+
content: createSystemPrompt(this.workingDir, this.workspaceManager.getCurrentWorkspace())
|
|
233
|
+
};
|
|
234
|
+
|
|
235
|
+
// Keep all tool calls and results, but remove the final poor-quality response
|
|
236
|
+
const history = this.historyManager.getHistory();
|
|
237
|
+
const preservedHistory = history.filter(msg =>
|
|
238
|
+
msg.role === 'system' ||
|
|
239
|
+
msg.role === 'tool' ||
|
|
240
|
+
(msg.role === 'assistant' && msg.tool_calls) ||
|
|
241
|
+
msg.role === 'user'
|
|
242
|
+
);
|
|
243
|
+
|
|
244
|
+
// Update system prompt and preserve session memory
|
|
245
|
+
this.historyManager.setHistory([systemPrompt, ...preservedHistory.slice(1)]);
|
|
246
|
+
|
|
247
|
+
consoleStyler.log('recovery', 'Preserving tool call history and retrying with improved prompt...');
|
|
248
|
+
const stats = this.historyManager.getStats();
|
|
249
|
+
consoleStyler.log('recovery', `Session memory preserved: ${stats.messageCount} messages`, { indent: true });
|
|
250
|
+
|
|
251
|
+
// Recursive retry with improved prompt
|
|
252
|
+
return await this.run(improvedPrompt, true);
|
|
253
|
+
} else {
|
|
254
|
+
consoleStyler.log('quality', `✓ Response quality approved (${rating}/10)`);
|
|
255
|
+
}
|
|
256
|
+
} else {
|
|
257
|
+
consoleStyler.log('quality', 'Quality evaluation skipped or failed');
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
break;
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if (!finalResponse) {
|
|
266
|
+
finalResponse = "The assistant could not determine a final answer after multiple steps.";
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
return finalResponse;
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
// Perform quality evaluation on the response
|
|
273
|
+
async performQualityEvaluation(userInput, finalResponse) {
|
|
274
|
+
const history = this.historyManager.getHistory();
|
|
275
|
+
const toolCallsSummary = this.qualityEvaluator.extractToolCallsSummary(history);
|
|
276
|
+
const toolResults = this.qualityEvaluator.extractToolResults(history);
|
|
277
|
+
|
|
278
|
+
return await this.qualityEvaluator.evaluateResponse(
|
|
279
|
+
userInput,
|
|
280
|
+
finalResponse,
|
|
281
|
+
toolCallsSummary,
|
|
282
|
+
toolResults,
|
|
283
|
+
createSystemPrompt,
|
|
284
|
+
this.workingDir,
|
|
285
|
+
this.workspaceManager.getCurrentWorkspace()
|
|
286
|
+
);
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
// Function to call the AI provider using the injected adapter
|
|
290
|
+
async generateContent(overrideReasoning = null, toolName = null) {
|
|
291
|
+
// Determine reasoning effort
|
|
292
|
+
let reasoning = overrideReasoning;
|
|
293
|
+
|
|
294
|
+
if (!reasoning) {
|
|
295
|
+
const history = this.historyManager.getHistory();
|
|
296
|
+
const context = {
|
|
297
|
+
retryAttempts: this.qualityEvaluator.getRetryAttempts(),
|
|
298
|
+
historyLength: history.length,
|
|
299
|
+
toolCallCount: history.filter(msg => msg.tool_calls).length,
|
|
300
|
+
pendingSteps: this.toolExecutor.getCurrentTodos()?.items?.filter(
|
|
301
|
+
item => item.status !== 'completed'
|
|
302
|
+
).length || 0,
|
|
303
|
+
todoCount: this.toolExecutor.getCurrentTodos()?.items?.length || 0,
|
|
304
|
+
toolName
|
|
305
|
+
};
|
|
306
|
+
|
|
307
|
+
reasoning = this.reasoningSystem.getSimplifiedReasoning('', context);
|
|
308
|
+
consoleStyler.log('reasoning', `Selected reasoning effort: ${reasoning}`);
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
try {
|
|
312
|
+
const providerLabel = getProviderLabel(this.model);
|
|
313
|
+
consoleStyler.log('ai', `Sending request to ${providerLabel}...`, { timestamp: true });
|
|
314
|
+
const stats = this.historyManager.getStats();
|
|
315
|
+
consoleStyler.log('ai', `Session context: ${stats.messageCount} messages, ~${stats.estimatedTokens} tokens`, { indent: true });
|
|
316
|
+
|
|
317
|
+
// Add specific instruction for workPerformed field
|
|
318
|
+
const enhancedHistory = enhanceMessagesWithWorkReporting([...this.historyManager.getHistory()]);
|
|
319
|
+
|
|
320
|
+
// Use the injected LLM adapter
|
|
321
|
+
const result = await this.llmAdapter.generateContent({
|
|
322
|
+
model: this.model,
|
|
323
|
+
messages: enhancedHistory,
|
|
324
|
+
tools: this.allTools,
|
|
325
|
+
tool_choice: "auto",
|
|
326
|
+
temperature: this.temperature,
|
|
327
|
+
reasoning_effort: reasoning
|
|
328
|
+
});
|
|
329
|
+
|
|
330
|
+
if (!result.choices || result.choices.length === 0) {
|
|
331
|
+
throw new Error("Invalid response structure from AI provider.");
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
const message = result.choices[0].message;
|
|
335
|
+
|
|
336
|
+
// Try to extract workPerformed from the response content if it's structured
|
|
337
|
+
if (message.content && typeof message.content === 'string') {
|
|
338
|
+
// Try to parse as JSON first
|
|
339
|
+
try {
|
|
340
|
+
const jsonMatch = message.content.match(/^\s*\{[\s\S]*\}\s*$/);
|
|
341
|
+
if (jsonMatch) {
|
|
342
|
+
const parsed = JSON.parse(message.content);
|
|
343
|
+
if (parsed.workPerformed) {
|
|
344
|
+
message.workPerformed = parsed.workPerformed;
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
} catch (e) {
|
|
348
|
+
// Not valid JSON, try regex extraction
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
// Fallback to regex for markdown or text formats
|
|
352
|
+
if (!message.workPerformed) {
|
|
353
|
+
const workPerformedMatch = message.content.match(/\*?\*?workPerformed\*?\*?[:\s]+([^*\n]+?)(?:\*\*|\n|$)/i);
|
|
354
|
+
if (workPerformedMatch) {
|
|
355
|
+
message.workPerformed = workPerformedMatch[1].trim();
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
return message;
|
|
361
|
+
|
|
362
|
+
} catch (error) {
|
|
363
|
+
consoleStyler.log('error', `AI provider communication failed: ${error.message}`, { box: true });
|
|
364
|
+
|
|
365
|
+
// Track errors for reasoning system
|
|
366
|
+
this.reasoningSystem.addError(error);
|
|
367
|
+
|
|
368
|
+
// If it's a fetch error, try to continue with a recovery message
|
|
369
|
+
if (error.message.includes('fetch failed') || error.message.includes('Error:')) {
|
|
370
|
+
consoleStyler.log('recovery', 'API connection failed, attempting to continue task execution');
|
|
371
|
+
return {
|
|
372
|
+
content: "API connection temporarily failed. Continuing with task execution.",
|
|
373
|
+
tool_calls: []
|
|
374
|
+
};
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
return { content: `Error: ${error.message}.` };
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
// Function to call the AI provider with streaming response.
|
|
382
|
+
async runStream(userInput, onChunk) {
|
|
383
|
+
// Ensure custom tools are loaded
|
|
384
|
+
await this.initializeCustomTools();
|
|
385
|
+
|
|
386
|
+
this.historyManager.addMessage('user', userInput);
|
|
387
|
+
this.qualityEvaluator.reset();
|
|
388
|
+
this.reasoningSystem.reset();
|
|
389
|
+
|
|
390
|
+
// Predict reasoning from input
|
|
391
|
+
this.reasoningSystem.predictReasoningFromInput(userInput);
|
|
392
|
+
|
|
393
|
+
let reasoning = this.reasoningSystem.getSimplifiedReasoning('', {});
|
|
394
|
+
const maxTurns = 30; // Maximum conversation turns (increased for complex tasks)
|
|
395
|
+
|
|
396
|
+
try {
|
|
397
|
+
for (let i = 0; i < maxTurns; i++) {
|
|
398
|
+
consoleStyler.log('progress', `Processing turn ${i + 1}/${maxTurns}`, { timestamp: true });
|
|
399
|
+
|
|
400
|
+
const enhancedHistory = enhanceMessagesWithWorkReporting([...this.historyManager.getHistory()]);
|
|
401
|
+
|
|
402
|
+
// Use the injected LLM adapter
|
|
403
|
+
// Note: We use generateContent (non-streaming) for tool logic usually,
|
|
404
|
+
// but if the adapter supports proper streaming for non-tool responses we could use it.
|
|
405
|
+
// The original code used callProvider (non-streaming) even in runStream for the loop logic,
|
|
406
|
+
// only sending the final text via onChunk.
|
|
407
|
+
// We will stick to that logic to ensure tool calls work.
|
|
408
|
+
|
|
409
|
+
const result = await this.llmAdapter.generateContent({
|
|
410
|
+
model: this.model,
|
|
411
|
+
messages: enhancedHistory,
|
|
412
|
+
tools: this.allTools,
|
|
413
|
+
tool_choice: "auto",
|
|
414
|
+
temperature: this.temperature,
|
|
415
|
+
reasoning_effort: reasoning,
|
|
416
|
+
});
|
|
417
|
+
|
|
418
|
+
if (!result.choices || result.choices.length === 0) {
|
|
419
|
+
const fallback = "Invalid response from AI provider.";
|
|
420
|
+
onChunk(fallback);
|
|
421
|
+
return fallback;
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
const message = result.choices[0].message;
|
|
425
|
+
|
|
426
|
+
if (message.tool_calls && message.tool_calls.length > 0) {
|
|
427
|
+
// Handle tool calls (same loop as run())
|
|
428
|
+
const toolNames = message.tool_calls.map(tc => tc.function.name).join(', ');
|
|
429
|
+
consoleStyler.log('tools', `Executing tool(s): ${toolNames}`);
|
|
430
|
+
|
|
431
|
+
this.historyManager.pushMessage(message);
|
|
432
|
+
|
|
433
|
+
for (const toolCall of message.tool_calls) {
|
|
434
|
+
consoleStyler.log('working', `Executing: ${toolCall.function.name}`);
|
|
435
|
+
|
|
436
|
+
// Handle embellish_request reasoning updates
|
|
437
|
+
if (toolCall.function.name === 'embellish_request') {
|
|
438
|
+
try {
|
|
439
|
+
const args = JSON.parse(toolCall.function.arguments);
|
|
440
|
+
if (args.reasoning_effort && args.reasoning_justification) {
|
|
441
|
+
this.reasoningSystem.setPredictedReasoning(
|
|
442
|
+
args.reasoning_effort,
|
|
443
|
+
args.reasoning_justification
|
|
444
|
+
);
|
|
445
|
+
}
|
|
446
|
+
} catch (e) { /* ignore parse errors */ }
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
const toolResult = await this.toolExecutor.executeTool(toolCall);
|
|
450
|
+
const success = !toolResult.content.startsWith('Error:');
|
|
451
|
+
if (success) {
|
|
452
|
+
consoleStyler.log('tools', `✓ ${toolCall.function.name}`);
|
|
453
|
+
} else {
|
|
454
|
+
consoleStyler.log('error', `✗ ${toolCall.function.name} - ${toolResult.content.substring(0, 80)}...`);
|
|
455
|
+
}
|
|
456
|
+
this.historyManager.pushMessage(toolResult);
|
|
457
|
+
}
|
|
458
|
+
consoleStyler.log('tools', 'All tool calls completed. Continuing...');
|
|
459
|
+
continue;
|
|
460
|
+
} else {
|
|
461
|
+
// Final text response — send to callback
|
|
462
|
+
const content = message.content || '';
|
|
463
|
+
onChunk(content);
|
|
464
|
+
this.historyManager.addMessage('assistant', content);
|
|
465
|
+
return content;
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
const fallback = "Could not determine a response after multiple turns.";
|
|
470
|
+
onChunk(fallback);
|
|
471
|
+
return fallback;
|
|
472
|
+
|
|
473
|
+
} catch (error) {
|
|
474
|
+
consoleStyler.log('error', `Request failed: ${error.message}`);
|
|
475
|
+
const errMsg = `Error: ${error.message}`;
|
|
476
|
+
onChunk(errMsg);
|
|
477
|
+
return errMsg;
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
// Update system prompt with current workspace and manifest
|
|
482
|
+
async updateSystemPrompt() {
|
|
483
|
+
let manifestContent = null;
|
|
484
|
+
if (this.manifestManager && this.manifestManager.hasManifest()) {
|
|
485
|
+
manifestContent = await this.manifestManager.readManifest();
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
this.historyManager.updateSystemPrompt(
|
|
489
|
+
createSystemPrompt(
|
|
490
|
+
this.workingDir,
|
|
491
|
+
this.workspaceManager.getCurrentWorkspace(),
|
|
492
|
+
manifestContent
|
|
493
|
+
)
|
|
494
|
+
);
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
// Get current conversation context
|
|
498
|
+
getContext() {
|
|
499
|
+
return {
|
|
500
|
+
historyLength: this.historyManager.getHistory().length,
|
|
501
|
+
workspace: this.workspaceManager.getCurrentWorkspace(),
|
|
502
|
+
currentTodos: this.toolExecutor.getCurrentTodos(),
|
|
503
|
+
qualityIssue: this.qualityEvaluator.getQualityIssue(),
|
|
504
|
+
retryAttempts: this.qualityEvaluator.getRetryAttempts(),
|
|
505
|
+
errorHistory: this.toolExecutor.getErrorHistory()
|
|
506
|
+
};
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
// Debug: Display current session memory state
|
|
510
|
+
displaySessionMemory() {
|
|
511
|
+
const history = this.historyManager.getHistory();
|
|
512
|
+
const sessionSummary = {
|
|
513
|
+
totalMessages: history.length,
|
|
514
|
+
messageTypes: {
|
|
515
|
+
system: history.filter(m => m.role === 'system').length,
|
|
516
|
+
user: history.filter(m => m.role === 'user').length,
|
|
517
|
+
assistant: history.filter(m => m.role === 'assistant').length,
|
|
518
|
+
tool: history.filter(m => m.role === 'tool').length
|
|
519
|
+
},
|
|
520
|
+
toolResults: history.filter(m => m.role === 'tool').map(m => ({
|
|
521
|
+
name: m.name,
|
|
522
|
+
contentLength: m.content.length
|
|
523
|
+
})),
|
|
524
|
+
assistantWithToolCalls: history.filter(m => m.role === 'assistant' && m.tool_calls).length
|
|
525
|
+
};
|
|
526
|
+
|
|
527
|
+
consoleStyler.log('system', 'Session Memory State:', { box: true });
|
|
528
|
+
consoleStyler.log('system', `Total messages: ${sessionSummary.totalMessages}`, { indent: true });
|
|
529
|
+
consoleStyler.log('system', `Message breakdown: ${JSON.stringify(sessionSummary.messageTypes)}`, { indent: true });
|
|
530
|
+
consoleStyler.log('system', `Tool results: ${sessionSummary.toolResults.length} preserved`, { indent: true });
|
|
531
|
+
|
|
532
|
+
if (sessionSummary.toolResults.length > 0) {
|
|
533
|
+
sessionSummary.toolResults.forEach((tool, i) => {
|
|
534
|
+
consoleStyler.log('system', ` ${i + 1}. ${tool.name} (${tool.contentLength} chars)`, { indent: true });
|
|
535
|
+
});
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
return sessionSummary;
|
|
539
|
+
}
|
|
540
|
+
}
|