aiexecode 1.0.66 → 1.0.68
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiexecode might be problematic. Click here for more details.
- package/config_template/settings.json +1 -3
- package/index.js +46 -71
- package/package.json +1 -12
- package/payload_viewer/out/404/index.html +1 -1
- package/payload_viewer/out/404.html +1 -1
- package/payload_viewer/out/index.html +1 -1
- package/payload_viewer/out/index.txt +1 -1
- package/payload_viewer/web_server.js +0 -163
- package/src/ai_based/completion_judge.js +96 -5
- package/src/ai_based/orchestrator.js +71 -3
- package/src/ai_based/pip_package_installer.js +14 -12
- package/src/ai_based/pip_package_lookup.js +13 -10
- package/src/commands/apikey.js +8 -34
- package/src/commands/help.js +3 -4
- package/src/commands/model.js +17 -74
- package/src/commands/reasoning_effort.js +1 -1
- package/src/config/feature_flags.js +0 -12
- package/src/{ui → frontend}/App.js +23 -25
- package/src/frontend/README.md +81 -0
- package/src/{ui/components/SuggestionsDisplay.js → frontend/components/AutocompleteMenu.js} +3 -3
- package/src/{ui/components/HistoryItemDisplay.js → frontend/components/ConversationItem.js} +37 -89
- package/src/{ui → frontend}/components/CurrentModelView.js +3 -5
- package/src/{ui → frontend}/components/Footer.js +4 -6
- package/src/{ui → frontend}/components/Header.js +2 -5
- package/src/{ui/components/InputPrompt.js → frontend/components/Input.js} +16 -54
- package/src/frontend/components/ModelListView.js +106 -0
- package/src/{ui → frontend}/components/ModelUpdatedView.js +3 -5
- package/src/{ui → frontend}/components/SessionSpinner.js +3 -3
- package/src/{ui → frontend}/components/SetupWizard.js +8 -101
- package/src/{ui → frontend}/components/ToolApprovalPrompt.js +16 -14
- package/src/frontend/design/themeColors.js +42 -0
- package/src/{ui → frontend}/index.js +7 -7
- package/src/frontend/utils/inputBuffer.js +441 -0
- package/src/{ui/utils/markdownRenderer.js → frontend/utils/markdownParser.js} +3 -3
- package/src/{ui/utils/ConsolePatcher.js → frontend/utils/outputRedirector.js} +9 -9
- package/src/{ui/utils/codeColorizer.js → frontend/utils/syntaxHighlighter.js} +2 -3
- package/src/system/ai_request.js +145 -595
- package/src/system/code_executer.js +111 -16
- package/src/system/file_integrity.js +5 -7
- package/src/system/log.js +3 -3
- package/src/system/mcp_integration.js +15 -13
- package/src/system/output_helper.js +0 -20
- package/src/system/session.js +97 -23
- package/src/system/session_memory.js +2 -82
- package/src/system/system_info.js +1 -1
- package/src/system/ui_events.js +0 -43
- package/src/tools/code_editor.js +17 -2
- package/src/tools/file_reader.js +17 -2
- package/src/tools/glob.js +9 -1
- package/src/tools/response_message.js +0 -2
- package/src/tools/ripgrep.js +9 -1
- package/src/tools/web_downloader.js +9 -1
- package/src/util/config.js +3 -8
- package/src/util/debug_log.js +4 -11
- package/src/util/mcp_config_manager.js +3 -5
- package/src/util/output_formatter.js +0 -47
- package/src/util/prompt_loader.js +3 -4
- package/src/util/safe_fs.js +60 -0
- package/src/util/setup_wizard.js +1 -3
- package/src/util/text_formatter.js +0 -86
- package/src/config/claude_models.js +0 -195
- package/src/ui/README.md +0 -208
- package/src/ui/api.js +0 -167
- package/src/ui/components/AgenticProgressDisplay.js +0 -126
- package/src/ui/components/Composer.js +0 -55
- package/src/ui/components/LoadingIndicator.js +0 -54
- package/src/ui/components/ModelListView.js +0 -214
- package/src/ui/components/Notifications.js +0 -55
- package/src/ui/components/StreamingIndicator.js +0 -36
- package/src/ui/contexts/AppContext.js +0 -25
- package/src/ui/contexts/StreamingContext.js +0 -20
- package/src/ui/contexts/UIStateContext.js +0 -117
- package/src/ui/example-usage.js +0 -180
- package/src/ui/hooks/useTerminalResize.js +0 -39
- package/src/ui/themes/semantic-tokens.js +0 -73
- package/src/ui/utils/text-buffer.js +0 -975
- /package/payload_viewer/out/_next/static/{t0WTsjXST7ISD1Boa6ifx → Z3AZSKhutj-kS4L8VpcOl}/_buildManifest.js +0 -0
- /package/payload_viewer/out/_next/static/{t0WTsjXST7ISD1Boa6ifx → Z3AZSKhutj-kS4L8VpcOl}/_clientMiddlewareManifest.json +0 -0
- /package/payload_viewer/out/_next/static/{t0WTsjXST7ISD1Boa6ifx → Z3AZSKhutj-kS4L8VpcOl}/_ssgManifest.js +0 -0
- /package/src/{ui → frontend}/components/BlankLine.js +0 -0
- /package/src/{ui → frontend}/components/FileDiffViewer.js +0 -0
- /package/src/{ui → frontend}/components/HelpView.js +0 -0
- /package/src/{ui → frontend}/hooks/useCompletion.js +0 -0
- /package/src/{ui → frontend}/hooks/useKeypress.js +0 -0
- /package/src/{ui → frontend}/utils/diffUtils.js +0 -0
- /package/src/{ui → frontend}/utils/renderInkComponent.js +0 -0
|
@@ -3,7 +3,6 @@ import path from 'path';
|
|
|
3
3
|
import fs from 'fs';
|
|
4
4
|
import { fileURLToPath } from 'url';
|
|
5
5
|
import OpenAI from 'openai';
|
|
6
|
-
import Anthropic from '@anthropic-ai/sdk';
|
|
7
6
|
function consolelog() { }
|
|
8
7
|
|
|
9
8
|
const __filename = fileURLToPath(import.meta.url);
|
|
@@ -49,19 +48,6 @@ if (openaiApiKey) {
|
|
|
49
48
|
consolelog(' Checked: process.env.OPENAI_API_KEY and ~/.aiexe/settings.json');
|
|
50
49
|
}
|
|
51
50
|
|
|
52
|
-
// Initialize Anthropic client
|
|
53
|
-
let anthropic = null;
|
|
54
|
-
const anthropicApiKey = process.env.ANTHROPIC_API_KEY || aiAgentSettings.ANTHROPIC_API_KEY;
|
|
55
|
-
|
|
56
|
-
if (anthropicApiKey) {
|
|
57
|
-
anthropic = new Anthropic({
|
|
58
|
-
apiKey: anthropicApiKey,
|
|
59
|
-
});
|
|
60
|
-
consolelog('✅ Anthropic client initialized');
|
|
61
|
-
} else {
|
|
62
|
-
consolelog('⚠️ Anthropic API key not found - Anthropic API testing will return mock responses');
|
|
63
|
-
consolelog(' Checked: process.env.ANTHROPIC_API_KEY and ~/.aiexe/settings.json');
|
|
64
|
-
}
|
|
65
51
|
|
|
66
52
|
// ID generation function
|
|
67
53
|
function generateId() {
|
|
@@ -434,155 +420,6 @@ function startWebServer(port = 3300) {
|
|
|
434
420
|
}
|
|
435
421
|
});
|
|
436
422
|
|
|
437
|
-
// Anthropic API test endpoint
|
|
438
|
-
app.post('/api/test-anthropic', async (req, res) => {
|
|
439
|
-
try {
|
|
440
|
-
const payload = req.body;
|
|
441
|
-
|
|
442
|
-
// Actual Anthropic API call (if API key is available)
|
|
443
|
-
if (anthropic && anthropicApiKey) {
|
|
444
|
-
// Convert from unified format to Anthropic format
|
|
445
|
-
const messages = payload.messages || payload.input || [];
|
|
446
|
-
const convertedMessages = [];
|
|
447
|
-
let systemMessage = payload.system;
|
|
448
|
-
|
|
449
|
-
for (const msg of messages) {
|
|
450
|
-
// Extract system messages
|
|
451
|
-
if (msg.role === 'system') {
|
|
452
|
-
if (msg.content && Array.isArray(msg.content)) {
|
|
453
|
-
systemMessage = msg.content.map(c => c.text || c).join('\n');
|
|
454
|
-
} else if (typeof msg.content === 'string') {
|
|
455
|
-
systemMessage = msg.content;
|
|
456
|
-
}
|
|
457
|
-
continue; // Skip adding to messages array
|
|
458
|
-
}
|
|
459
|
-
|
|
460
|
-
// Handle different message formats
|
|
461
|
-
if (msg.content && Array.isArray(msg.content)) {
|
|
462
|
-
convertedMessages.push({
|
|
463
|
-
role: msg.role,
|
|
464
|
-
content: msg.content.map(c => {
|
|
465
|
-
if (c.type === 'input_text' || c.type === 'output_text') {
|
|
466
|
-
return { type: 'text', text: c.text };
|
|
467
|
-
}
|
|
468
|
-
return c;
|
|
469
|
-
})
|
|
470
|
-
});
|
|
471
|
-
} else if (typeof msg.content === 'string') {
|
|
472
|
-
convertedMessages.push({
|
|
473
|
-
role: msg.role,
|
|
474
|
-
content: msg.content
|
|
475
|
-
});
|
|
476
|
-
} else {
|
|
477
|
-
convertedMessages.push(msg);
|
|
478
|
-
}
|
|
479
|
-
}
|
|
480
|
-
|
|
481
|
-
const anthropicPayload = {
|
|
482
|
-
model: payload.model || 'claude-sonnet-4-5-20250929',
|
|
483
|
-
max_tokens: payload.max_tokens || 8192,
|
|
484
|
-
messages: convertedMessages,
|
|
485
|
-
...(systemMessage && { system: systemMessage }),
|
|
486
|
-
...(payload.tools && { tools: payload.tools }),
|
|
487
|
-
...(payload.temperature && { temperature: payload.temperature }),
|
|
488
|
-
...(payload.top_p && { top_p: payload.top_p })
|
|
489
|
-
};
|
|
490
|
-
|
|
491
|
-
const response = await anthropic.messages.create(anthropicPayload);
|
|
492
|
-
|
|
493
|
-
// Convert response to unified format
|
|
494
|
-
const unifiedResponse = {
|
|
495
|
-
id: response.id,
|
|
496
|
-
object: 'response',
|
|
497
|
-
created_at: Math.floor(Date.now() / 1000),
|
|
498
|
-
status: 'completed',
|
|
499
|
-
model: response.model,
|
|
500
|
-
output: response.content.map((item, idx) => {
|
|
501
|
-
if (item.type === 'text') {
|
|
502
|
-
return {
|
|
503
|
-
id: `${response.id}_output_${idx}`,
|
|
504
|
-
type: 'message',
|
|
505
|
-
status: 'completed',
|
|
506
|
-
content: [{ type: 'text', text: item.text }]
|
|
507
|
-
};
|
|
508
|
-
} else if (item.type === 'tool_use') {
|
|
509
|
-
return {
|
|
510
|
-
id: item.id,
|
|
511
|
-
type: 'function_call',
|
|
512
|
-
status: 'in_progress',
|
|
513
|
-
name: item.name,
|
|
514
|
-
arguments: JSON.stringify(item.input),
|
|
515
|
-
call_id: item.id
|
|
516
|
-
};
|
|
517
|
-
}
|
|
518
|
-
return item;
|
|
519
|
-
}),
|
|
520
|
-
usage: {
|
|
521
|
-
input_tokens: response.usage.input_tokens,
|
|
522
|
-
output_tokens: response.usage.output_tokens,
|
|
523
|
-
total_tokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
524
|
-
...(response.usage.cache_creation_input_tokens && {
|
|
525
|
-
cache_creation_input_tokens: response.usage.cache_creation_input_tokens
|
|
526
|
-
}),
|
|
527
|
-
...(response.usage.cache_read_input_tokens && {
|
|
528
|
-
cache_read_input_tokens: response.usage.cache_read_input_tokens
|
|
529
|
-
})
|
|
530
|
-
},
|
|
531
|
-
provider: 'anthropic',
|
|
532
|
-
...(response.stop_reason && { stop_reason: response.stop_reason }),
|
|
533
|
-
...(response.stop_sequence && { stop_sequence: response.stop_sequence })
|
|
534
|
-
};
|
|
535
|
-
|
|
536
|
-
res.json({
|
|
537
|
-
success: true,
|
|
538
|
-
provider: 'anthropic',
|
|
539
|
-
response: unifiedResponse,
|
|
540
|
-
originalPayload: payload
|
|
541
|
-
});
|
|
542
|
-
} else {
|
|
543
|
-
// Mock response if no API key
|
|
544
|
-
res.json({
|
|
545
|
-
success: true,
|
|
546
|
-
provider: 'anthropic',
|
|
547
|
-
mock: true,
|
|
548
|
-
response: {
|
|
549
|
-
id: 'msg_mock_' + generateId(),
|
|
550
|
-
object: 'response',
|
|
551
|
-
created_at: Math.floor(Date.now() / 1000),
|
|
552
|
-
status: 'completed',
|
|
553
|
-
model: payload.model || 'claude-sonnet-4-5-20250929',
|
|
554
|
-
provider: 'anthropic',
|
|
555
|
-
output: [
|
|
556
|
-
{
|
|
557
|
-
id: 'output_mock_1',
|
|
558
|
-
type: 'message',
|
|
559
|
-
status: 'completed',
|
|
560
|
-
content: [
|
|
561
|
-
{
|
|
562
|
-
type: 'text',
|
|
563
|
-
text: 'This is a mock response (Anthropic API key not configured)'
|
|
564
|
-
}
|
|
565
|
-
]
|
|
566
|
-
}
|
|
567
|
-
],
|
|
568
|
-
usage: {
|
|
569
|
-
input_tokens: 10,
|
|
570
|
-
output_tokens: 15,
|
|
571
|
-
total_tokens: 25
|
|
572
|
-
}
|
|
573
|
-
},
|
|
574
|
-
originalPayload: payload
|
|
575
|
-
});
|
|
576
|
-
}
|
|
577
|
-
} catch (error) {
|
|
578
|
-
res.status(500).json({
|
|
579
|
-
success: false,
|
|
580
|
-
provider: 'anthropic',
|
|
581
|
-
error: error.message,
|
|
582
|
-
originalPayload: req.body
|
|
583
|
-
});
|
|
584
|
-
}
|
|
585
|
-
});
|
|
586
423
|
|
|
587
424
|
// Health check endpoint
|
|
588
425
|
app.get('/api/health', (req, res) => {
|
|
@@ -80,24 +80,39 @@ async function ensureCompletionJudgeConversationInitialized(templateVars = {}) {
|
|
|
80
80
|
}
|
|
81
81
|
|
|
82
82
|
function syncOrchestratorConversation() {
|
|
83
|
+
debugLog(`[syncOrchestratorConversation] START`);
|
|
83
84
|
const sourceConversation = getOrchestratorConversation();
|
|
85
|
+
debugLog(`[syncOrchestratorConversation] Source conversation length: ${sourceConversation?.length || 0}`);
|
|
86
|
+
debugLog(`[syncOrchestratorConversation] Last snapshot length: ${lastOrchestratorSnapshotLength}`);
|
|
87
|
+
debugLog(`[syncOrchestratorConversation] Current completionJudge conversation length: ${completionJudgeConversation.length}`);
|
|
88
|
+
|
|
84
89
|
if (!Array.isArray(sourceConversation) || !sourceConversation.length) {
|
|
90
|
+
debugLog(`[syncOrchestratorConversation] No source conversation to sync`);
|
|
85
91
|
return;
|
|
86
92
|
}
|
|
87
93
|
|
|
94
|
+
let syncedCount = 0;
|
|
88
95
|
for (let i = lastOrchestratorSnapshotLength; i < sourceConversation.length; i++) {
|
|
89
96
|
const entry = sourceConversation[i];
|
|
90
97
|
if (!entry) continue;
|
|
91
98
|
|
|
92
99
|
// Completion judge는 자체 system 메시지를 유지하므로, Orchestrator의 system 메시지는 제외
|
|
93
|
-
if (entry.role === "system")
|
|
100
|
+
if (entry.role === "system") {
|
|
101
|
+
debugLog(`[syncOrchestratorConversation] Skipping system message at index ${i}`);
|
|
102
|
+
continue;
|
|
103
|
+
}
|
|
94
104
|
|
|
105
|
+
debugLog(`[syncOrchestratorConversation] Syncing entry ${i}: type=${entry.type}, role=${entry.role}`);
|
|
95
106
|
// reasoning과 message를 모두 그대로 복사
|
|
96
107
|
// ai_request.js에서 이미 reasoning-message 쌍이 유지되고 있음
|
|
97
108
|
completionJudgeConversation.push(cloneMessage(entry));
|
|
109
|
+
syncedCount++;
|
|
98
110
|
}
|
|
99
111
|
|
|
100
112
|
lastOrchestratorSnapshotLength = sourceConversation.length;
|
|
113
|
+
debugLog(`[syncOrchestratorConversation] Synced ${syncedCount} entries, new snapshot length: ${lastOrchestratorSnapshotLength}`);
|
|
114
|
+
debugLog(`[syncOrchestratorConversation] CompletionJudge conversation now has ${completionJudgeConversation.length} entries`);
|
|
115
|
+
debugLog(`[syncOrchestratorConversation] END`);
|
|
101
116
|
}
|
|
102
117
|
|
|
103
118
|
function trimCompletionJudgeConversation() {
|
|
@@ -112,12 +127,31 @@ function trimCompletionJudgeConversation() {
|
|
|
112
127
|
}
|
|
113
128
|
|
|
114
129
|
async function dispatchCompletionJudgeRequest(options) {
|
|
130
|
+
debugLog(`[dispatchCompletionJudgeRequest] START`);
|
|
131
|
+
|
|
115
132
|
if (!options) {
|
|
133
|
+
debugLog(`[dispatchCompletionJudgeRequest] ERROR - options not initialized`);
|
|
116
134
|
throw new Error('Completion judge request options not initialized.');
|
|
117
135
|
}
|
|
118
136
|
|
|
137
|
+
debugLog(`[dispatchCompletionJudgeRequest] Options - model: ${options.model}, taskName: ${options.taskName}`);
|
|
138
|
+
|
|
139
|
+
let attemptCount = 0;
|
|
119
140
|
while (true) {
|
|
141
|
+
attemptCount++;
|
|
142
|
+
debugLog(`[dispatchCompletionJudgeRequest] Attempt ${attemptCount}`);
|
|
143
|
+
|
|
120
144
|
const { model, isGpt5Model, taskName } = options;
|
|
145
|
+
|
|
146
|
+
// Conversation 구조 분석
|
|
147
|
+
const conversationTypes = {};
|
|
148
|
+
completionJudgeConversation.forEach(entry => {
|
|
149
|
+
const type = entry?.type || entry?.role || 'unknown';
|
|
150
|
+
conversationTypes[type] = (conversationTypes[type] || 0) + 1;
|
|
151
|
+
});
|
|
152
|
+
debugLog(`[dispatchCompletionJudgeRequest] Conversation structure: ${JSON.stringify(conversationTypes)}`);
|
|
153
|
+
debugLog(`[dispatchCompletionJudgeRequest] Total conversation entries: ${completionJudgeConversation.length}`);
|
|
154
|
+
|
|
121
155
|
const requestPayload = {
|
|
122
156
|
model,
|
|
123
157
|
input: completionJudgeConversation,
|
|
@@ -140,23 +174,41 @@ async function dispatchCompletionJudgeRequest(options) {
|
|
|
140
174
|
requestPayload.temperature = 0;
|
|
141
175
|
}
|
|
142
176
|
|
|
177
|
+
// 토큰 추정
|
|
178
|
+
const conversationJson = JSON.stringify(completionJudgeConversation);
|
|
179
|
+
const estimatedTokens = Math.ceil(conversationJson.length / 4);
|
|
180
|
+
debugLog(`[dispatchCompletionJudgeRequest] Estimated tokens: ${estimatedTokens} (conversation size: ${conversationJson.length} bytes)`);
|
|
181
|
+
|
|
182
|
+
debugLog(`[dispatchCompletionJudgeRequest] Sending API request...`);
|
|
143
183
|
try {
|
|
144
|
-
|
|
184
|
+
const response = await request(taskName, requestPayload);
|
|
185
|
+
debugLog(`[dispatchCompletionJudgeRequest] API request successful`);
|
|
186
|
+
debugLog(`[dispatchCompletionJudgeRequest] Response output_text length: ${response?.output_text?.length || 0}`);
|
|
187
|
+
debugLog(`[dispatchCompletionJudgeRequest] END - success`);
|
|
188
|
+
return response;
|
|
145
189
|
} catch (error) {
|
|
190
|
+
debugLog(`[dispatchCompletionJudgeRequest] API request failed: ${error.message}`);
|
|
191
|
+
debugLog(`[dispatchCompletionJudgeRequest] Error name: ${error.name}, type: ${error?.constructor?.name}`);
|
|
192
|
+
|
|
146
193
|
// AbortError는 즉시 전파 (세션 중단)
|
|
147
194
|
if (error.name === 'AbortError') {
|
|
148
|
-
debugLog(`[dispatchCompletionJudgeRequest] Request aborted by user`);
|
|
195
|
+
debugLog(`[dispatchCompletionJudgeRequest] Request aborted by user, propagating AbortError`);
|
|
149
196
|
throw error;
|
|
150
197
|
}
|
|
151
198
|
|
|
152
199
|
if (!isContextWindowError(error)) {
|
|
200
|
+
debugLog(`[dispatchCompletionJudgeRequest] Not a context window error, re-throwing`);
|
|
153
201
|
throw error;
|
|
154
202
|
}
|
|
155
203
|
|
|
204
|
+
debugLog(`[dispatchCompletionJudgeRequest] Context window error detected, attempting to trim...`);
|
|
156
205
|
const trimmed = trimCompletionJudgeConversation();
|
|
206
|
+
debugLog(`[dispatchCompletionJudgeRequest] Trim result: ${trimmed}, new conversation length: ${completionJudgeConversation.length}`);
|
|
157
207
|
if (!trimmed) {
|
|
208
|
+
debugLog(`[dispatchCompletionJudgeRequest] Cannot trim further, re-throwing error`);
|
|
158
209
|
throw error;
|
|
159
210
|
}
|
|
211
|
+
debugLog(`[dispatchCompletionJudgeRequest] Retrying with trimmed conversation...`);
|
|
160
212
|
}
|
|
161
213
|
}
|
|
162
214
|
}
|
|
@@ -186,52 +238,91 @@ export function restoreCompletionJudgeConversation(savedConversation, savedSnaps
|
|
|
186
238
|
* @returns {Promise<{shouldComplete: boolean, whatUserShouldSay: string}>}
|
|
187
239
|
*/
|
|
188
240
|
export async function judgeMissionCompletion(templateVars = {}) {
|
|
241
|
+
debugLog('');
|
|
242
|
+
debugLog('========================================');
|
|
243
|
+
debugLog('=== judgeMissionCompletion START =======');
|
|
244
|
+
debugLog('========================================');
|
|
189
245
|
debugLog(`[judgeMissionCompletion] Called with templateVars: ${JSON.stringify(Object.keys(templateVars))}`);
|
|
246
|
+
if (templateVars.what_user_requests) {
|
|
247
|
+
debugLog(`[judgeMissionCompletion] what_user_requests: ${templateVars.what_user_requests.substring(0, 200)}`);
|
|
248
|
+
}
|
|
190
249
|
|
|
191
250
|
try {
|
|
251
|
+
debugLog(`[judgeMissionCompletion] Creating request options...`);
|
|
192
252
|
const requestOptions = await createCompletionJudgeRequestOptions();
|
|
253
|
+
debugLog(`[judgeMissionCompletion] Request options - model: ${requestOptions.model}, isGpt5Model: ${requestOptions.isGpt5Model}`);
|
|
193
254
|
|
|
194
255
|
// Completion judge 자체 system 메시지 초기화 (템플릿 변수 전달)
|
|
256
|
+
debugLog(`[judgeMissionCompletion] Initializing completion judge conversation...`);
|
|
195
257
|
await ensureCompletionJudgeConversationInitialized(templateVars);
|
|
258
|
+
debugLog(`[judgeMissionCompletion] Conversation initialized, length: ${completionJudgeConversation.length}`);
|
|
259
|
+
|
|
196
260
|
// Orchestrator의 대화 내용 동기화 (system 메시지 제외)
|
|
261
|
+
debugLog(`[judgeMissionCompletion] Syncing orchestrator conversation...`);
|
|
197
262
|
syncOrchestratorConversation();
|
|
263
|
+
debugLog(`[judgeMissionCompletion] Sync complete, conversation length: ${completionJudgeConversation.length}`);
|
|
198
264
|
|
|
199
265
|
debugLog(`[judgeMissionCompletion] Sending request with ${completionJudgeConversation.length} conversation entries`);
|
|
200
266
|
|
|
201
267
|
const response = await dispatchCompletionJudgeRequest(requestOptions);
|
|
202
268
|
|
|
203
269
|
debugLog(`[judgeMissionCompletion] Received response, parsing output_text`);
|
|
270
|
+
debugLog(`[judgeMissionCompletion] Raw output_text: ${response.output_text?.substring(0, 500)}`);
|
|
204
271
|
|
|
205
272
|
// Completion judge 자신의 응답은 히스토리에 추가하지 않음 (Orchestrator와 동일한 히스토리 유지)
|
|
206
273
|
// 대화 기록 초기화 (다음 판단을 위해)
|
|
274
|
+
debugLog(`[judgeMissionCompletion] Resetting completion judge conversation...`);
|
|
207
275
|
resetCompletionJudgeConversation();
|
|
208
276
|
|
|
209
277
|
try {
|
|
210
278
|
const judgment = JSON.parse(response.output_text);
|
|
211
|
-
debugLog(`[judgeMissionCompletion] Parsed judgment
|
|
279
|
+
debugLog(`[judgeMissionCompletion] Parsed judgment successfully`);
|
|
280
|
+
debugLog(`[judgeMissionCompletion] should_complete: ${judgment.should_complete}`);
|
|
281
|
+
debugLog(`[judgeMissionCompletion] whatUserShouldSay: ${judgment.whatUserShouldSay}`);
|
|
212
282
|
|
|
213
|
-
|
|
283
|
+
const result = {
|
|
214
284
|
shouldComplete: judgment.should_complete === true,
|
|
215
285
|
whatUserShouldSay: judgment.whatUserShouldSay || ""
|
|
216
286
|
};
|
|
287
|
+
|
|
288
|
+
debugLog('========================================');
|
|
289
|
+
debugLog('=== judgeMissionCompletion END =========');
|
|
290
|
+
debugLog(`=== Result: shouldComplete=${result.shouldComplete}, whatUserShouldSay="${result.whatUserShouldSay.substring(0, 100)}"`);
|
|
291
|
+
debugLog('========================================');
|
|
292
|
+
debugLog('');
|
|
293
|
+
|
|
294
|
+
return result;
|
|
217
295
|
} catch (parseError) {
|
|
218
296
|
debugLog(`[judgeMissionCompletion] Parse error: ${parseError.message}`);
|
|
297
|
+
debugLog(`[judgeMissionCompletion] Failed to parse output_text as JSON`);
|
|
219
298
|
throw new Error('Completion judge response did not include valid JSON output_text.');
|
|
220
299
|
}
|
|
221
300
|
|
|
222
301
|
} catch (error) {
|
|
223
302
|
debugLog(`[judgeMissionCompletion] ERROR: ${error.message}, error.name: ${error.name}`);
|
|
303
|
+
debugLog(`[judgeMissionCompletion] Error stack: ${error.stack}`);
|
|
224
304
|
|
|
225
305
|
// 에러 발생 시 대화 기록 초기화
|
|
306
|
+
debugLog(`[judgeMissionCompletion] Resetting completion judge conversation due to error...`);
|
|
226
307
|
resetCompletionJudgeConversation();
|
|
227
308
|
|
|
228
309
|
// AbortError는 즉시 전파 (세션 중단)
|
|
229
310
|
if (error.name === 'AbortError') {
|
|
230
311
|
debugLog(`[judgeMissionCompletion] AbortError detected, propagating to caller`);
|
|
312
|
+
debugLog('========================================');
|
|
313
|
+
debugLog('=== judgeMissionCompletion END (ABORT) =');
|
|
314
|
+
debugLog('========================================');
|
|
315
|
+
debugLog('');
|
|
231
316
|
throw error;
|
|
232
317
|
}
|
|
233
318
|
|
|
234
319
|
// 다른 에러 발생 시 안전하게 계속 진행
|
|
320
|
+
debugLog(`[judgeMissionCompletion] Non-abort error, returning safe default (shouldComplete=false)`);
|
|
321
|
+
debugLog('========================================');
|
|
322
|
+
debugLog('=== judgeMissionCompletion END (ERROR) =');
|
|
323
|
+
debugLog('========================================');
|
|
324
|
+
debugLog('');
|
|
325
|
+
|
|
235
326
|
return {
|
|
236
327
|
shouldComplete: false,
|
|
237
328
|
whatUserShouldSay: ""
|
|
@@ -17,8 +17,6 @@ dotenv.config({ quiet: true });
|
|
|
17
17
|
|
|
18
18
|
const debugLog = createDebugLogger('orchestrator.log', 'orchestrator');
|
|
19
19
|
|
|
20
|
-
function consolelog() { }
|
|
21
|
-
|
|
22
20
|
const orchestratorConversation = [];
|
|
23
21
|
let orchestratorRequestOptions = null;
|
|
24
22
|
|
|
@@ -277,15 +275,31 @@ function trimOrchestratorConversation() {
|
|
|
277
275
|
}
|
|
278
276
|
|
|
279
277
|
async function dispatchOrchestratorRequest({ toolChoice }) {
|
|
278
|
+
debugLog(`[dispatchOrchestratorRequest] START - toolChoice: ${toolChoice}`);
|
|
279
|
+
|
|
280
280
|
if (!orchestratorRequestOptions) {
|
|
281
|
+
debugLog(`[dispatchOrchestratorRequest] ERROR - orchestratorRequestOptions not initialized`);
|
|
281
282
|
throw new Error('Orchestrator request options not initialized.');
|
|
282
283
|
}
|
|
283
284
|
|
|
285
|
+
debugLog(`[dispatchOrchestratorRequest] Options - model: ${orchestratorRequestOptions.model}, tools count: ${orchestratorRequestOptions.tools?.length || 0}`);
|
|
286
|
+
|
|
287
|
+
let attemptCount = 0;
|
|
284
288
|
while (true) {
|
|
289
|
+
attemptCount++;
|
|
290
|
+
debugLog(`[dispatchOrchestratorRequest] Attempt ${attemptCount} - starting cleanup...`);
|
|
285
291
|
cleanupOrchestratorConversation();
|
|
286
292
|
|
|
287
293
|
debugLog(`[dispatchOrchestratorRequest] Conversation has ${orchestratorConversation.length} entries`);
|
|
288
294
|
|
|
295
|
+
// Conversation 구조 분석
|
|
296
|
+
const conversationTypes = {};
|
|
297
|
+
orchestratorConversation.forEach(entry => {
|
|
298
|
+
const type = entry?.type || entry?.role || 'unknown';
|
|
299
|
+
conversationTypes[type] = (conversationTypes[type] || 0) + 1;
|
|
300
|
+
});
|
|
301
|
+
debugLog(`[dispatchOrchestratorRequest] Conversation structure: ${JSON.stringify(conversationTypes)}`);
|
|
302
|
+
|
|
289
303
|
// function_call_output 항목들 확인
|
|
290
304
|
const functionCallOutputs = orchestratorConversation.filter(item => item.type === 'function_call_output');
|
|
291
305
|
debugLog(`[dispatchOrchestratorRequest] Found ${functionCallOutputs.length} function_call_output entries`);
|
|
@@ -309,31 +323,64 @@ async function dispatchOrchestratorRequest({ toolChoice }) {
|
|
|
309
323
|
requestConfig.temperature = 0;
|
|
310
324
|
}
|
|
311
325
|
|
|
312
|
-
debugLog(`[dispatchOrchestratorRequest]
|
|
326
|
+
debugLog(`[dispatchOrchestratorRequest] Request config - model: ${model}, tool_choice: ${requestConfig.tool_choice}, tools: ${tools.length}, conversation entries: ${orchestratorConversation.length}`);
|
|
327
|
+
|
|
328
|
+
// Conversation 토큰 추정 (대략적)
|
|
329
|
+
const conversationJson = JSON.stringify(orchestratorConversation);
|
|
330
|
+
const estimatedTokens = Math.ceil(conversationJson.length / 4); // 대략 4 chars = 1 token
|
|
331
|
+
debugLog(`[dispatchOrchestratorRequest] Estimated tokens: ${estimatedTokens} (conversation size: ${conversationJson.length} bytes)`);
|
|
332
|
+
|
|
333
|
+
debugLog(`[dispatchOrchestratorRequest] Sending API request...`);
|
|
313
334
|
|
|
314
335
|
try {
|
|
315
336
|
const response = await request(taskName, requestConfig);
|
|
337
|
+
debugLog(`[dispatchOrchestratorRequest] API request successful`);
|
|
338
|
+
debugLog(`[dispatchOrchestratorRequest] Response outputs: ${response?.output?.length || 0}`);
|
|
339
|
+
if (response?.output) {
|
|
340
|
+
const outputTypes = {};
|
|
341
|
+
response.output.forEach(o => {
|
|
342
|
+
const type = o?.type || 'unknown';
|
|
343
|
+
outputTypes[type] = (outputTypes[type] || 0) + 1;
|
|
344
|
+
});
|
|
345
|
+
debugLog(`[dispatchOrchestratorRequest] Response output types: ${JSON.stringify(outputTypes)}`);
|
|
346
|
+
}
|
|
347
|
+
debugLog(`[dispatchOrchestratorRequest] END - success`);
|
|
316
348
|
// consolelog(JSON.stringify(response, null, 2));
|
|
317
349
|
return response;
|
|
318
350
|
} catch (error) {
|
|
351
|
+
debugLog(`[dispatchOrchestratorRequest] API request failed: ${error.message}`);
|
|
352
|
+
debugLog(`[dispatchOrchestratorRequest] Error type: ${error?.constructor?.name}, code: ${error?.code}, status: ${error?.status}`);
|
|
353
|
+
|
|
319
354
|
if (!isContextWindowError(error)) {
|
|
355
|
+
debugLog(`[dispatchOrchestratorRequest] Not a context window error, re-throwing`);
|
|
320
356
|
throw error;
|
|
321
357
|
}
|
|
322
358
|
|
|
359
|
+
debugLog(`[dispatchOrchestratorRequest] Context window error detected, attempting to trim conversation...`);
|
|
323
360
|
const trimmed = trimOrchestratorConversation();
|
|
361
|
+
debugLog(`[dispatchOrchestratorRequest] Trim result: ${trimmed}, new conversation length: ${orchestratorConversation.length}`);
|
|
324
362
|
if (!trimmed) {
|
|
363
|
+
debugLog(`[dispatchOrchestratorRequest] Cannot trim further, re-throwing error`);
|
|
325
364
|
throw error;
|
|
326
365
|
}
|
|
366
|
+
debugLog(`[dispatchOrchestratorRequest] Retrying with trimmed conversation...`);
|
|
327
367
|
}
|
|
328
368
|
}
|
|
329
369
|
}
|
|
330
370
|
|
|
331
371
|
export async function continueOrchestratorConversation() {
|
|
372
|
+
debugLog(`[continueOrchestratorConversation] START`);
|
|
373
|
+
debugLog(`[continueOrchestratorConversation] Current conversation length: ${orchestratorConversation.length}`);
|
|
374
|
+
|
|
332
375
|
// 매번 최신 프롬프트를 반영
|
|
333
376
|
await ensureConversationInitialized();
|
|
377
|
+
debugLog(`[continueOrchestratorConversation] Conversation initialized/updated`);
|
|
334
378
|
|
|
335
379
|
const response = await dispatchOrchestratorRequest({ toolChoice: "auto" });
|
|
380
|
+
debugLog(`[continueOrchestratorConversation] Received response, appending to conversation...`);
|
|
336
381
|
appendResponseToConversation(response);
|
|
382
|
+
debugLog(`[continueOrchestratorConversation] Conversation length after append: ${orchestratorConversation.length}`);
|
|
383
|
+
debugLog(`[continueOrchestratorConversation] END`);
|
|
337
384
|
return response;
|
|
338
385
|
}
|
|
339
386
|
// 쉘스크립트 코딩 규칙:
|
|
@@ -347,9 +394,16 @@ export async function continueOrchestratorConversation() {
|
|
|
347
394
|
|
|
348
395
|
// 탐색 기반으로 현재 상황을 분석하고, 다음에 취할 행동을 AI에게 결정받습니다.
|
|
349
396
|
export async function orchestrateMission({ improvement_points = '', mcpToolSchemas = [], isAutoGenerated = false }) {
|
|
397
|
+
debugLog('');
|
|
398
|
+
debugLog('========================================');
|
|
399
|
+
debugLog('===== orchestrateMission START =========');
|
|
400
|
+
debugLog('========================================');
|
|
401
|
+
|
|
350
402
|
const taskName = 'orchestrator';
|
|
351
403
|
|
|
352
404
|
debugLog(`[orchestrateMission] Called with improvement_points: "${improvement_points?.substring(0, 100) || '(empty)'}", isAutoGenerated: ${isAutoGenerated}`);
|
|
405
|
+
debugLog(`[orchestrateMission] improvement_points length: ${improvement_points?.length || 0} characters`);
|
|
406
|
+
debugLog(`[orchestrateMission] mcpToolSchemas count: ${mcpToolSchemas.length}`);
|
|
353
407
|
const improvementPointsText = typeof improvement_points === 'string' && improvement_points.trim().length ? improvement_points : '';
|
|
354
408
|
debugLog(`[orchestrateMission] improvementPointsText after processing: "${improvementPointsText.substring(0, 100) || '(empty)'}"`);
|
|
355
409
|
if (isAutoGenerated) {
|
|
@@ -359,6 +413,7 @@ export async function orchestrateMission({ improvement_points = '', mcpToolSchem
|
|
|
359
413
|
|
|
360
414
|
// Python 사용 가능 여부 확인
|
|
361
415
|
const hasPython = process.app_custom?.systemInfo?.commands?.hasPython || false;
|
|
416
|
+
debugLog(`[orchestrateMission] Python available: ${hasPython}`);
|
|
362
417
|
|
|
363
418
|
// 설정 로드
|
|
364
419
|
const settings = await loadSettings();
|
|
@@ -393,7 +448,10 @@ export async function orchestrateMission({ improvement_points = '', mcpToolSchem
|
|
|
393
448
|
}
|
|
394
449
|
|
|
395
450
|
// MCP Tools (동적으로 추가)
|
|
451
|
+
debugLog(`[orchestrateMission] Adding ${mcpToolSchemas.length} MCP tools to available tools`);
|
|
396
452
|
tools.push(...mcpToolSchemas);
|
|
453
|
+
debugLog(`[orchestrateMission] Total tools after MCP: ${tools.length}`);
|
|
454
|
+
|
|
397
455
|
// Planner가 선언한 도구 이름을 실제 함수 스키마로 연결해, LLM이 지시한 작업이 즉시 실행되도록 합니다.
|
|
398
456
|
tools.forEach(tool => {
|
|
399
457
|
tool.type = "function";
|
|
@@ -401,6 +459,7 @@ export async function orchestrateMission({ improvement_points = '', mcpToolSchem
|
|
|
401
459
|
|
|
402
460
|
const model = await getModelForProvider();
|
|
403
461
|
const isGpt5Model = model.startsWith("gpt-5");
|
|
462
|
+
debugLog(`[orchestrateMission] Using model: ${model}, isGpt5Model: ${isGpt5Model}`);
|
|
404
463
|
|
|
405
464
|
orchestratorRequestOptions = {
|
|
406
465
|
model,
|
|
@@ -409,8 +468,10 @@ export async function orchestrateMission({ improvement_points = '', mcpToolSchem
|
|
|
409
468
|
taskName,
|
|
410
469
|
parallel_tool_calls: true
|
|
411
470
|
};
|
|
471
|
+
debugLog(`[orchestrateMission] orchestratorRequestOptions configured with ${tools.length} tools`);
|
|
412
472
|
|
|
413
473
|
await ensureConversationInitialized();
|
|
474
|
+
debugLog(`[orchestrateMission] Conversation initialized, current length: ${orchestratorConversation.length}`);
|
|
414
475
|
|
|
415
476
|
debugLog(`[orchestrateMission] Adding user message to conversation: "${improvementPointsText.substring(0, 100)}"`);
|
|
416
477
|
const userMessage = {
|
|
@@ -432,8 +493,15 @@ export async function orchestrateMission({ improvement_points = '', mcpToolSchem
|
|
|
432
493
|
orchestratorConversation.push(userMessage);
|
|
433
494
|
debugLog(`[orchestrateMission] Conversation length after adding user message: ${orchestratorConversation.length}`);
|
|
434
495
|
|
|
496
|
+
debugLog(`[orchestrateMission] Dispatching orchestrator request with toolChoice: required`);
|
|
435
497
|
const response = await dispatchOrchestratorRequest({ toolChoice: "required" });
|
|
498
|
+
debugLog(`[orchestrateMission] Received response, appending to conversation...`);
|
|
436
499
|
appendResponseToConversation(response);
|
|
437
500
|
debugLog(`[orchestrateMission] Conversation length after response: ${orchestratorConversation.length}`);
|
|
501
|
+
|
|
502
|
+
debugLog('========================================');
|
|
503
|
+
debugLog('===== orchestrateMission END ===========');
|
|
504
|
+
debugLog('========================================');
|
|
505
|
+
debugLog('');
|
|
438
506
|
return response;
|
|
439
507
|
}
|
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
import { checkValidPackageName } from "./pip_package_lookup.js";
|
|
2
2
|
import { installPythonPackage, isPackageInstalled, analyzeImports } from "../system/code_executer.js";
|
|
3
|
+
import { createDebugLogger } from "../util/debug_log.js";
|
|
4
|
+
|
|
5
|
+
const debugLog = createDebugLogger('pip_package_installer.log', 'pip_package_installer');
|
|
3
6
|
|
|
4
7
|
// 이 파일은 코드에서 필요로 하는 파이썬 패키지를 확인하고 자동으로 설치합니다.
|
|
5
8
|
// Code Executer가 스크립트를 실행할 때 패키지 누락 오류가 나지 않도록 Orchestrator 실행 이전에 환경을 준비합니다.
|
|
6
|
-
function consolelog() { }
|
|
7
9
|
|
|
8
10
|
// 하나의 파일을 분석해 필요한 외부 패키지를 찾아 설치합니다.
|
|
9
11
|
export async function installRequiredPackages(filePath) {
|
|
@@ -62,34 +64,34 @@ export async function installRequiredPackages(filePath) {
|
|
|
62
64
|
|
|
63
65
|
export async function installPackageList(packageList) {
|
|
64
66
|
try {
|
|
65
|
-
|
|
66
|
-
|
|
67
|
+
debugLog(`\n=== Installing package list ===`);
|
|
68
|
+
debugLog("Packages to install:", packageList);
|
|
67
69
|
|
|
68
70
|
const validPackageNames = await checkValidPackageName(packageList);
|
|
69
|
-
|
|
71
|
+
debugLog("Valid package names:", validPackageNames);
|
|
70
72
|
|
|
71
73
|
const installResults = [];
|
|
72
74
|
const alreadyInstalled = [];
|
|
73
75
|
const failedInstalls = [];
|
|
74
76
|
|
|
75
77
|
for (const packageName of validPackageNames) {
|
|
76
|
-
|
|
78
|
+
debugLog(`\n--- Processing package: ${packageName} ---`);
|
|
77
79
|
|
|
78
80
|
const isInstalled = await isPackageInstalled(packageName);
|
|
79
81
|
if (isInstalled) {
|
|
80
|
-
|
|
82
|
+
debugLog(`[SKIP] ${packageName} is already installed`);
|
|
81
83
|
alreadyInstalled.push(packageName);
|
|
82
84
|
continue;
|
|
83
85
|
}
|
|
84
86
|
|
|
85
|
-
|
|
87
|
+
debugLog(`Installing ${packageName}...`);
|
|
86
88
|
const installSuccess = await installPythonPackage(packageName);
|
|
87
89
|
|
|
88
90
|
if (installSuccess) {
|
|
89
|
-
|
|
91
|
+
debugLog(`[OK] Successfully installed ${packageName}`);
|
|
90
92
|
installResults.push(packageName);
|
|
91
93
|
} else {
|
|
92
|
-
|
|
94
|
+
debugLog(`[FAIL] Failed to install ${packageName}`);
|
|
93
95
|
failedInstalls.push(packageName);
|
|
94
96
|
}
|
|
95
97
|
}
|
|
@@ -120,8 +122,8 @@ export async function installPackageList(packageList) {
|
|
|
120
122
|
|
|
121
123
|
export async function installRequiredPackagesForFiles(filePaths) {
|
|
122
124
|
try {
|
|
123
|
-
|
|
124
|
-
|
|
125
|
+
debugLog(`\n=== Analyzing multiple files ===`);
|
|
126
|
+
debugLog("Files to analyze:", filePaths);
|
|
125
127
|
|
|
126
128
|
const allExternalPackages = new Set();
|
|
127
129
|
const analysisResults = [];
|
|
@@ -142,7 +144,7 @@ export async function installRequiredPackagesForFiles(filePaths) {
|
|
|
142
144
|
}
|
|
143
145
|
|
|
144
146
|
const uniquePackages = Array.from(allExternalPackages);
|
|
145
|
-
|
|
147
|
+
debugLog("All unique external packages:", uniquePackages);
|
|
146
148
|
|
|
147
149
|
if (uniquePackages.length === 0) {
|
|
148
150
|
return {
|