aiexecode 1.0.90 → 1.0.92
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiexecode might be problematic. Click here for more details.
- package/README.md +1 -0
- package/index.js +13 -11
- package/mcp-agent-lib/init.sh +3 -0
- package/mcp-agent-lib/package-lock.json +14 -1
- package/mcp-agent-lib/package.json +4 -6
- package/mcp-agent-lib/sampleFastMCPClient/client.py +25 -0
- package/mcp-agent-lib/sampleFastMCPClient/run.sh +3 -0
- package/mcp-agent-lib/sampleFastMCPServer/run.sh +3 -0
- package/mcp-agent-lib/sampleFastMCPServer/server.py +12 -0
- package/mcp-agent-lib/sampleFastMCPServerElicitationRequest/run.sh +3 -0
- package/mcp-agent-lib/sampleFastMCPServerElicitationRequest/server.py +43 -0
- package/mcp-agent-lib/sampleFastMCPServerRootsRequest/server.py +63 -0
- package/mcp-agent-lib/sampleMCPHost/index.js +182 -63
- package/mcp-agent-lib/sampleMCPHost/mcp_config.json +7 -1
- package/mcp-agent-lib/sampleMCPHostFeatures/elicitation.js +151 -0
- package/mcp-agent-lib/sampleMCPHostFeatures/index.js +166 -0
- package/mcp-agent-lib/sampleMCPHostFeatures/roots.js +197 -0
- package/mcp-agent-lib/src/mcp_client.js +129 -67
- package/mcp-agent-lib/src/mcp_message_logger.js +516 -0
- package/package.json +3 -1
- package/payload_viewer/out/404/index.html +1 -1
- package/payload_viewer/out/404.html +1 -1
- package/payload_viewer/out/index.html +1 -1
- package/payload_viewer/out/index.txt +1 -1
- package/src/LLMClient/client.js +992 -0
- package/src/LLMClient/converters/input-normalizer.js +238 -0
- package/src/LLMClient/converters/responses-to-claude.js +454 -0
- package/src/LLMClient/converters/responses-to-gemini.js +648 -0
- package/src/LLMClient/converters/responses-to-ollama.js +348 -0
- package/src/LLMClient/errors.js +372 -0
- package/src/LLMClient/index.js +31 -0
- package/src/commands/apikey.js +10 -22
- package/src/commands/model.js +28 -28
- package/src/commands/reasoning_effort.js +9 -23
- package/src/config/ai_models.js +212 -0
- package/src/config/feature_flags.js +1 -1
- package/src/frontend/App.js +5 -10
- package/src/frontend/components/CurrentModelView.js +0 -33
- package/src/frontend/components/Footer.js +3 -3
- package/src/frontend/components/ModelListView.js +30 -87
- package/src/frontend/components/ModelUpdatedView.js +7 -142
- package/src/frontend/components/SetupWizard.js +37 -32
- package/src/system/ai_request.js +57 -42
- package/src/util/config.js +26 -4
- package/src/util/setup_wizard.js +1 -6
- package/mcp-agent-lib/.claude/settings.local.json +0 -9
- package/src/config/openai_models.js +0 -152
- /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_buildManifest.js +0 -0
- /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_clientMiddlewareManifest.json +0 -0
- /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_ssgManifest.js +0 -0
package/src/util/setup_wizard.js
CHANGED
|
@@ -58,10 +58,5 @@ export async function runSetupWizard() {
|
|
|
58
58
|
*/
|
|
59
59
|
export async function isConfigured() {
|
|
60
60
|
const settings = await loadSettings();
|
|
61
|
-
|
|
62
|
-
if (settings.AI_PROVIDER === 'openai') {
|
|
63
|
-
return Boolean(settings.OPENAI_API_KEY && settings.OPENAI_API_KEY.trim());
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
return false;
|
|
61
|
+
return Boolean(settings.API_KEY && settings.API_KEY.trim());
|
|
67
62
|
}
|
|
@@ -1,152 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* OpenAI 모델 설정
|
|
3
|
-
*
|
|
4
|
-
* 모든 OpenAI 모델 정보를 중앙 집중식으로 관리합니다.
|
|
5
|
-
* 새 모델 추가 시 이 파일만 수정하면 됩니다.
|
|
6
|
-
*
|
|
7
|
-
* 참고: https://platform.openai.com/docs/pricing
|
|
8
|
-
*/
|
|
9
|
-
|
|
10
|
-
export const OPENAI_MODELS = {
|
|
11
|
-
// ========================================
|
|
12
|
-
// GPT-5 시리즈 (최신 세대, 권장)
|
|
13
|
-
// ========================================
|
|
14
|
-
'gpt-5': {
|
|
15
|
-
name: 'GPT-5',
|
|
16
|
-
description: 'Best model for coding and agentic tasks across industries',
|
|
17
|
-
contextWindow: 400000, // 400K
|
|
18
|
-
maxTokens: 128000, // 128K
|
|
19
|
-
pricing: {
|
|
20
|
-
input: 1.25, // $1.25 per million tokens
|
|
21
|
-
cachedInput: 0.125, // $0.125 per million tokens
|
|
22
|
-
output: 10.00 // $10.00 per million tokens
|
|
23
|
-
},
|
|
24
|
-
speed: 'fast',
|
|
25
|
-
supportsReasoning: true,
|
|
26
|
-
reasoningSupport: {
|
|
27
|
-
minimal: true,
|
|
28
|
-
low: true,
|
|
29
|
-
medium: true,
|
|
30
|
-
high: true
|
|
31
|
-
}
|
|
32
|
-
},
|
|
33
|
-
'gpt-5-mini': {
|
|
34
|
-
name: 'GPT-5 Mini',
|
|
35
|
-
description: 'Faster and more affordable version of GPT-5 for well-defined tasks',
|
|
36
|
-
contextWindow: 400000, // 400K
|
|
37
|
-
maxTokens: 128000, // 128K
|
|
38
|
-
pricing: {
|
|
39
|
-
input: 0.25, // $0.25 per million tokens
|
|
40
|
-
cachedInput: 0.025, // $0.025 per million tokens
|
|
41
|
-
output: 2.00 // $2.00 per million tokens
|
|
42
|
-
},
|
|
43
|
-
speed: 'very-fast',
|
|
44
|
-
supportsReasoning: true,
|
|
45
|
-
reasoningSupport: {
|
|
46
|
-
minimal: true,
|
|
47
|
-
low: true,
|
|
48
|
-
medium: true,
|
|
49
|
-
high: true
|
|
50
|
-
}
|
|
51
|
-
},
|
|
52
|
-
'gpt-5-nano': {
|
|
53
|
-
name: 'GPT-5 Nano',
|
|
54
|
-
description: 'Fastest and most affordable version of GPT-5, ideal for summarization and classification tasks',
|
|
55
|
-
contextWindow: 400000, // 400K
|
|
56
|
-
maxTokens: 128000, // 128K
|
|
57
|
-
pricing: {
|
|
58
|
-
input: 0.05, // $0.05 per million tokens
|
|
59
|
-
cachedInput: 0.005, // $0.005 per million tokens
|
|
60
|
-
output: 0.40 // $0.40 per million tokens
|
|
61
|
-
},
|
|
62
|
-
speed: 'fastest',
|
|
63
|
-
supportsReasoning: true,
|
|
64
|
-
reasoningSupport: {
|
|
65
|
-
minimal: true,
|
|
66
|
-
low: true,
|
|
67
|
-
medium: true,
|
|
68
|
-
high: true
|
|
69
|
-
}
|
|
70
|
-
},
|
|
71
|
-
'gpt-5-codex': {
|
|
72
|
-
name: 'GPT-5 Codex',
|
|
73
|
-
description: 'Optimized for agentic software engineering tasks',
|
|
74
|
-
contextWindow: 400000, // 400K
|
|
75
|
-
maxTokens: 128000, // 128K
|
|
76
|
-
pricing: {
|
|
77
|
-
input: 1.25, // $1.25 per million tokens
|
|
78
|
-
cachedInput: 0.125, // $0.125 per million tokens (90% discount)
|
|
79
|
-
output: 10.00 // $10.00 per million tokens
|
|
80
|
-
},
|
|
81
|
-
speed: 'fast',
|
|
82
|
-
supportsReasoning: true,
|
|
83
|
-
reasoningSupport: {
|
|
84
|
-
minimal: true,
|
|
85
|
-
low: true,
|
|
86
|
-
medium: true,
|
|
87
|
-
high: true
|
|
88
|
-
}
|
|
89
|
-
}
|
|
90
|
-
};
|
|
91
|
-
|
|
92
|
-
/**
|
|
93
|
-
* 모델 ID로 모델 정보 가져오기
|
|
94
|
-
*/
|
|
95
|
-
export function getOpenAIModelInfo(modelId) {
|
|
96
|
-
return OPENAI_MODELS[modelId] || null;
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
/**
|
|
100
|
-
* 모델 ID로 max_tokens 가져오기
|
|
101
|
-
*/
|
|
102
|
-
export function getOpenAIMaxTokens(modelId) {
|
|
103
|
-
const model = OPENAI_MODELS[modelId];
|
|
104
|
-
return model ? model.maxTokens : 128000; // 기본값 128K
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
/**
|
|
108
|
-
* 모델 ID로 context window 크기 가져오기
|
|
109
|
-
*/
|
|
110
|
-
export function getOpenAIContextWindow(modelId) {
|
|
111
|
-
const model = OPENAI_MODELS[modelId];
|
|
112
|
-
return model ? model.contextWindow : 400000; // 기본값 400K
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
/**
|
|
116
|
-
* 모든 OpenAI 모델 ID 목록 가져오기
|
|
117
|
-
*/
|
|
118
|
-
export function getAllOpenAIModelIds() {
|
|
119
|
-
return Object.keys(OPENAI_MODELS);
|
|
120
|
-
}
|
|
121
|
-
|
|
122
|
-
/**
|
|
123
|
-
* GPT-5 시리즈 모델 ID 목록
|
|
124
|
-
*/
|
|
125
|
-
export function getGPT5Models() {
|
|
126
|
-
return Object.keys(OPENAI_MODELS);
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
/**
|
|
130
|
-
* Reasoning 지원 모델 ID 목록
|
|
131
|
-
*/
|
|
132
|
-
export function getReasoningModels() {
|
|
133
|
-
return Object.keys(OPENAI_MODELS).filter(
|
|
134
|
-
modelId => OPENAI_MODELS[modelId].supportsReasoning
|
|
135
|
-
);
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
/**
|
|
139
|
-
* 특정 모델이 특정 reasoning effort를 지원하는지 확인
|
|
140
|
-
*/
|
|
141
|
-
export function supportsReasoningEffort(modelId, effort) {
|
|
142
|
-
const model = OPENAI_MODELS[modelId];
|
|
143
|
-
if (!model || !model.supportsReasoning) {
|
|
144
|
-
return false;
|
|
145
|
-
}
|
|
146
|
-
return model.reasoningSupport?.[effort] || false;
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
/**
|
|
150
|
-
* 기본 권장 OpenAI 모델 ID
|
|
151
|
-
*/
|
|
152
|
-
export const DEFAULT_OPENAI_MODEL = 'gpt-5-mini';
|
|
File without changes
|
|
File without changes
|
|
File without changes
|