aiexecode 1.0.90 → 1.0.92

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiexecode might be problematic. Click here for more details.

Files changed (50) hide show
  1. package/README.md +1 -0
  2. package/index.js +13 -11
  3. package/mcp-agent-lib/init.sh +3 -0
  4. package/mcp-agent-lib/package-lock.json +14 -1
  5. package/mcp-agent-lib/package.json +4 -6
  6. package/mcp-agent-lib/sampleFastMCPClient/client.py +25 -0
  7. package/mcp-agent-lib/sampleFastMCPClient/run.sh +3 -0
  8. package/mcp-agent-lib/sampleFastMCPServer/run.sh +3 -0
  9. package/mcp-agent-lib/sampleFastMCPServer/server.py +12 -0
  10. package/mcp-agent-lib/sampleFastMCPServerElicitationRequest/run.sh +3 -0
  11. package/mcp-agent-lib/sampleFastMCPServerElicitationRequest/server.py +43 -0
  12. package/mcp-agent-lib/sampleFastMCPServerRootsRequest/server.py +63 -0
  13. package/mcp-agent-lib/sampleMCPHost/index.js +182 -63
  14. package/mcp-agent-lib/sampleMCPHost/mcp_config.json +7 -1
  15. package/mcp-agent-lib/sampleMCPHostFeatures/elicitation.js +151 -0
  16. package/mcp-agent-lib/sampleMCPHostFeatures/index.js +166 -0
  17. package/mcp-agent-lib/sampleMCPHostFeatures/roots.js +197 -0
  18. package/mcp-agent-lib/src/mcp_client.js +129 -67
  19. package/mcp-agent-lib/src/mcp_message_logger.js +516 -0
  20. package/package.json +3 -1
  21. package/payload_viewer/out/404/index.html +1 -1
  22. package/payload_viewer/out/404.html +1 -1
  23. package/payload_viewer/out/index.html +1 -1
  24. package/payload_viewer/out/index.txt +1 -1
  25. package/src/LLMClient/client.js +992 -0
  26. package/src/LLMClient/converters/input-normalizer.js +238 -0
  27. package/src/LLMClient/converters/responses-to-claude.js +454 -0
  28. package/src/LLMClient/converters/responses-to-gemini.js +648 -0
  29. package/src/LLMClient/converters/responses-to-ollama.js +348 -0
  30. package/src/LLMClient/errors.js +372 -0
  31. package/src/LLMClient/index.js +31 -0
  32. package/src/commands/apikey.js +10 -22
  33. package/src/commands/model.js +28 -28
  34. package/src/commands/reasoning_effort.js +9 -23
  35. package/src/config/ai_models.js +212 -0
  36. package/src/config/feature_flags.js +1 -1
  37. package/src/frontend/App.js +5 -10
  38. package/src/frontend/components/CurrentModelView.js +0 -33
  39. package/src/frontend/components/Footer.js +3 -3
  40. package/src/frontend/components/ModelListView.js +30 -87
  41. package/src/frontend/components/ModelUpdatedView.js +7 -142
  42. package/src/frontend/components/SetupWizard.js +37 -32
  43. package/src/system/ai_request.js +57 -42
  44. package/src/util/config.js +26 -4
  45. package/src/util/setup_wizard.js +1 -6
  46. package/mcp-agent-lib/.claude/settings.local.json +0 -9
  47. package/src/config/openai_models.js +0 -152
  48. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_buildManifest.js +0 -0
  49. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_clientMiddlewareManifest.json +0 -0
  50. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_ssgManifest.js +0 -0
@@ -0,0 +1,31 @@
1
+ /**
2
+ * LLM Function Adapter
3
+ * Unified interface for LLMs using OpenAI Responses API format
4
+ */
5
+
6
+ export { UnifiedLLMClient } from './client.js';
7
+
8
+ // Export Responses API converters
9
+ export {
10
+ convertResponsesRequestToClaudeFormat,
11
+ convertClaudeResponseToResponsesFormat
12
+ } from './converters/responses-to-claude.js';
13
+
14
+ export {
15
+ convertResponsesRequestToGeminiFormat,
16
+ convertGeminiResponseToResponsesFormat
17
+ } from './converters/responses-to-gemini.js';
18
+
19
+ export {
20
+ convertResponsesRequestToOllamaFormat,
21
+ convertOllamaResponseToResponsesFormat
22
+ } from './converters/responses-to-ollama.js';
23
+
24
+ // Export error classes
25
+ export {
26
+ LLMError,
27
+ AuthenticationError,
28
+ InvalidRequestError,
29
+ RateLimitError,
30
+ NotFoundError
31
+ } from './errors.js';
@@ -3,19 +3,19 @@ import { loadSettings, saveSettings, SETTINGS_FILE } from '../util/config.js';
3
3
  import { resetAIClients } from '../system/ai_request.js';
4
4
 
5
5
  /**
6
- * /apikey 커맨드 - AI Provider 및 API 키 설정
6
+ * /apikey 커맨드 - API 키 설정
7
7
  */
8
8
  export default {
9
9
  name: 'apikey',
10
- description: 'Set AI provider and API key (openai)',
11
- usage: '/apikey openai <api-key>',
10
+ description: 'Set API key',
11
+ usage: '/apikey <api-key>',
12
12
  handler: async (args, context) => {
13
13
  // 인자 확인
14
- if (!args || args.length < 2) {
14
+ if (!args || args.length < 1) {
15
15
  uiEvents.addSystemMessage(
16
- `Please enter provider and API key.\n\n` +
16
+ `Please enter API key.\n\n` +
17
17
  `Usage:\n` +
18
- ` /apikey openai sk-proj-...`
18
+ ` /apikey sk-proj-...`
19
19
  );
20
20
  return;
21
21
  }
@@ -24,26 +24,15 @@ export default {
24
24
  // 현재 설정 로드
25
25
  const settings = await loadSettings();
26
26
 
27
- const providerArg = args[0].toLowerCase();
28
-
29
- // Provider 검증
30
- if (providerArg !== 'openai') {
31
- uiEvents.addSystemMessage(
32
- `Invalid provider: ${providerArg}\n\n` +
33
- `Valid providers: openai`
34
- );
35
- return;
36
- }
37
-
38
- const apiKey = args[1];
27
+ const apiKey = args[0];
39
28
 
40
29
  // API 키 업데이트
41
- settings.OPENAI_API_KEY = apiKey;
42
- process.env.OPENAI_API_KEY = apiKey;
30
+ settings.API_KEY = apiKey;
31
+ process.env.API_KEY = apiKey;
43
32
 
44
33
  // 설정 저장
45
34
  await saveSettings(settings);
46
-
35
+
47
36
  // AI 클라이언트 캐시 초기화 (다음 요청 시 새 설정 적용)
48
37
  resetAIClients();
49
38
 
@@ -54,7 +43,6 @@ export default {
54
43
 
55
44
  uiEvents.addSystemMessage(
56
45
  `API configuration complete\n\n` +
57
- `Provider: OPENAI\n` +
58
46
  `API Key: ${maskedKey}\n` +
59
47
  `Saved to: ${SETTINGS_FILE}`
60
48
  );
@@ -2,36 +2,35 @@ import React from 'react';
2
2
  import { uiEvents } from '../system/ui_events.js';
3
3
  import { loadSettings, saveSettings, SETTINGS_FILE } from '../util/config.js';
4
4
  import { resetAIClients } from '../system/ai_request.js';
5
- import { OPENAI_MODELS, getGPT5Models, DEFAULT_OPENAI_MODEL } from '../config/openai_models.js';
5
+ import { AI_MODELS, getModelsByProvider, DEFAULT_MODEL } from '../config/ai_models.js';
6
6
  import { renderInkComponent } from '../frontend/utils/renderInkComponent.js';
7
7
  import { ModelListView } from '../frontend/components/ModelListView.js';
8
8
  import { CurrentModelView } from '../frontend/components/CurrentModelView.js';
9
9
  import { ModelUpdatedView } from '../frontend/components/ModelUpdatedView.js';
10
10
 
11
- // 지원하는 모델 목록
12
- // OpenAI: https://platform.openai.com/docs/pricing
13
- const MODELS = {
14
- openai: OPENAI_MODELS
15
- };
16
-
17
11
  // 모델 ID로 provider 찾기
18
12
  function getProviderForModel(modelId) {
19
- if (MODELS.openai[modelId]) {
20
- return 'openai';
21
- }
22
- return null;
13
+ const modelInfo = AI_MODELS[modelId];
14
+ return modelInfo ? modelInfo.provider : null;
23
15
  }
24
16
 
25
17
  // 모든 모델 목록 표시
26
18
  async function listAllModels() {
27
- const gpt5Models = getGPT5Models();
28
- const openaiModels = gpt5Models.map(id => ({
29
- id,
30
- ...MODELS.openai[id]
31
- }));
19
+ // 모든 provider 목록 추출
20
+ const providers = [...new Set(Object.values(AI_MODELS).map(m => m.provider))];
21
+
22
+ // provider별로 모델 그룹화
23
+ const modelsByProvider = {};
24
+ providers.forEach(provider => {
25
+ const modelIds = getModelsByProvider(provider);
26
+ modelsByProvider[provider] = modelIds.map(id => ({
27
+ id,
28
+ ...AI_MODELS[id]
29
+ }));
30
+ });
32
31
 
33
32
  const component = React.createElement(ModelListView, {
34
- openaiModels
33
+ modelsByProvider
35
34
  });
36
35
 
37
36
  const output = await renderInkComponent(component);
@@ -42,11 +41,12 @@ async function listAllModels() {
42
41
  async function showCurrentModel() {
43
42
  try {
44
43
  const settings = await loadSettings();
45
- const currentModel = settings?.OPENAI_MODEL || DEFAULT_OPENAI_MODEL;
46
- const modelInfo = MODELS.openai?.[currentModel];
44
+ const currentModel = settings?.MODEL || DEFAULT_MODEL;
45
+ const modelInfo = AI_MODELS[currentModel];
46
+ const provider = modelInfo?.provider || 'unknown';
47
47
 
48
48
  const component = React.createElement(CurrentModelView, {
49
- provider: 'openai',
49
+ provider,
50
50
  modelId: currentModel,
51
51
  modelInfo
52
52
  });
@@ -99,26 +99,26 @@ export default {
99
99
  const settings = await loadSettings();
100
100
 
101
101
  // 모델 업데이트
102
- settings.OPENAI_MODEL = modelId;
103
- process.env.OPENAI_MODEL = modelId;
102
+ settings.MODEL = modelId;
103
+ process.env.MODEL = modelId;
104
104
 
105
105
  // 설정 저장
106
106
  await saveSettings(settings);
107
-
107
+
108
108
  // AI 클라이언트 캐시 초기화 (다음 요청 시 새 설정 적용)
109
109
  resetAIClients();
110
-
110
+
111
111
  // UI 모델 표시 업데이트
112
112
  uiEvents.emit('model:changed', { model: modelId });
113
113
 
114
114
  // 성공 메시지
115
- const modelInfo = MODELS[provider][modelId];
115
+ const modelInfo = AI_MODELS[modelId];
116
116
 
117
117
  let warning = null;
118
- if (!settings.OPENAI_API_KEY) {
118
+ if (!settings.API_KEY) {
119
119
  warning = {
120
- message: 'OpenAI API key is not configured.',
121
- hint: 'Set your API key with: `/apikey openai sk-proj-...`'
120
+ message: 'API key is not configured.',
121
+ hint: 'Set your API key with: `/apikey sk-proj-...`'
122
122
  };
123
123
  }
124
124
 
@@ -1,7 +1,7 @@
1
1
  import { uiEvents } from '../system/ui_events.js';
2
2
  import { loadSettings, saveSettings, SETTINGS_FILE } from '../util/config.js';
3
3
  import { resetAIClients } from '../system/ai_request.js';
4
- import { OPENAI_MODELS, getReasoningModels, supportsReasoningEffort, DEFAULT_OPENAI_MODEL } from '../config/openai_models.js';
4
+ import { AI_MODELS, getReasoningModels, supportsReasoningEffort, DEFAULT_MODEL } from '../config/ai_models.js';
5
5
 
6
6
  // reasoning_effort 값 검증 및 설명
7
7
  const EFFORT_LEVELS = {
@@ -34,14 +34,9 @@ const EFFORT_LEVELS = {
34
34
  // 현재 모델이 reasoning_effort를 지원하는지 확인
35
35
  async function checkModelSupport() {
36
36
  const settings = await loadSettings();
37
- const currentModel = settings?.OPENAI_MODEL || DEFAULT_OPENAI_MODEL;
38
- const provider = settings?.AI_PROVIDER || 'openai';
37
+ const currentModel = settings?.MODEL || DEFAULT_MODEL;
39
38
 
40
- if (provider !== 'openai') {
41
- return { supported: false, reason: 'Only OpenAI models support reasoning_effort.' };
42
- }
43
-
44
- const modelInfo = OPENAI_MODELS[currentModel];
39
+ const modelInfo = AI_MODELS[currentModel];
45
40
  if (!modelInfo || !modelInfo.supportsReasoning) {
46
41
  return { supported: false, reason: `Current model (${currentModel}) does not support reasoning_effort.` };
47
42
  }
@@ -52,21 +47,12 @@ async function checkModelSupport() {
52
47
  // 현재 설정 표시
53
48
  async function showCurrentSettings() {
54
49
  const settings = await loadSettings();
55
- const currentModel = settings?.OPENAI_MODEL || DEFAULT_OPENAI_MODEL;
56
- const provider = settings?.AI_PROVIDER || 'openai';
57
- const reasoningEffort = settings?.OPENAI_REASONING_EFFORT || 'medium';
50
+ const currentModel = settings?.MODEL || DEFAULT_MODEL;
51
+ const reasoningEffort = settings?.REASONING_EFFORT || 'medium';
58
52
 
59
53
  let message = 'Reasoning Effort Configuration\n\n';
60
54
 
61
- if (provider !== 'openai') {
62
- message += 'Current provider is not OpenAI.\n';
63
- message += `Current Provider: ${provider.toUpperCase()}\n`;
64
- message += '\nReasoning effort is only supported for OpenAI models.\n';
65
- message += 'Use `/model gpt-5` or `/model gpt-5-mini` to switch to an OpenAI model.\n';
66
- return message;
67
- }
68
-
69
- const modelInfo = OPENAI_MODELS[currentModel];
55
+ const modelInfo = AI_MODELS[currentModel];
70
56
 
71
57
  if (!modelInfo || !modelInfo.supportsReasoning) {
72
58
  message += `Current model (\`${currentModel}\`) does not support reasoning_effort.\n\n`;
@@ -90,7 +76,7 @@ async function showCurrentSettings() {
90
76
 
91
77
  // 모든 effort 레벨 표시
92
78
  function listEffortLevels(currentModel) {
93
- const modelInfo = OPENAI_MODELS[currentModel];
79
+ const modelInfo = AI_MODELS[currentModel];
94
80
 
95
81
  let message = 'Available Reasoning Effort Levels\n\n';
96
82
 
@@ -190,8 +176,8 @@ export default {
190
176
  const settings = await loadSettings();
191
177
 
192
178
  // reasoning_effort 업데이트
193
- settings.OPENAI_REASONING_EFFORT = effortLevel;
194
- process.env.OPENAI_REASONING_EFFORT = effortLevel;
179
+ settings.REASONING_EFFORT = effortLevel;
180
+ process.env.REASONING_EFFORT = effortLevel;
195
181
 
196
182
  // 설정 저장
197
183
  await saveSettings(settings);
@@ -0,0 +1,212 @@
1
+ /**
2
+ * AI 모델 설정
3
+ *
4
+ * 모든 AI 제조사의 모델 정보를 중앙 집중식으로 관리합니다.
5
+ * 새 모델 추가 시 이 파일만 수정하면 됩니다.
6
+ */
7
+
8
+ export const AI_MODELS = {
9
+ // ========================================
10
+ // Claude 시리즈
11
+ // ========================================
12
+ 'claude-sonnet-4-5-20250929': {
13
+ provider: 'claude',
14
+ name: 'Claude Sonnet 4.5',
15
+ contextWindow: 200000,
16
+ maxTokens: 64000,
17
+ },
18
+ 'claude-haiku-4-5-20251001': {
19
+ provider: 'claude',
20
+ name: 'Claude Haiku 4.5',
21
+ contextWindow: 200000,
22
+ maxTokens: 64000,
23
+ },
24
+ // 'claude-opus-4-1-20250805': {
25
+ // provider: 'claude',
26
+ // name: 'Claude Opus 4.1',
27
+ // contextWindow: 200000,
28
+ // maxTokens: 32000,
29
+ // supportsReasoning: true,
30
+ // },
31
+ // 'claude-sonnet-4-20250514': {
32
+ // provider: 'claude',
33
+ // name: 'Claude Sonnet 4',
34
+ // contextWindow: 200000,
35
+ // maxTokens: 64000,
36
+ // supportsReasoning: true,
37
+ // },
38
+ // 'claude-3-7-sonnet-20250219': {
39
+ // provider: 'claude',
40
+ // name: 'Claude Sonnet 3.7',
41
+ // contextWindow: 200000,
42
+ // maxTokens: 64000,
43
+ // supportsReasoning: true,
44
+ // },
45
+ // 'claude-opus-4-20250514': {
46
+ // provider: 'claude',
47
+ // name: 'Claude Opus 4',
48
+ // contextWindow: 200000,
49
+ // maxTokens: 32000,
50
+ // supportsReasoning: true,
51
+ // },
52
+ // 'claude-3-5-haiku-20241022': {
53
+ // provider: 'claude',
54
+ // name: 'Claude Haiku 3.5',
55
+ // contextWindow: 200000,
56
+ // maxTokens: 8000,
57
+ // },
58
+ 'claude-3-haiku-20240307': {
59
+ provider: 'claude',
60
+ name: 'Claude Haiku 3',
61
+ contextWindow: 200000,
62
+ maxTokens: 4096,
63
+ },
64
+
65
+ // ========================================
66
+ // Google Gemini 시리즈
67
+ // ========================================
68
+ // 'gemini-2.5-flash': {
69
+ // provider: 'gemini',
70
+ // name: 'Gemini 2.5 Flash',
71
+ // contextWindow: 1048576, // 1M tokens
72
+ // maxTokens: 65536, // 64K tokens
73
+ // supportsReasoning: true,
74
+ // },
75
+
76
+ // ========================================
77
+ // OpenAI GPT-5 시리즈
78
+ // ========================================
79
+ 'gpt-5': {
80
+ provider: 'openai',
81
+ name: 'GPT-5',
82
+ contextWindow: 400000, // 400K
83
+ maxTokens: 128000, // 128K
84
+ supportsReasoning: true,
85
+ reasoningSupport: {
86
+ minimal: true,
87
+ low: true,
88
+ medium: true,
89
+ high: true
90
+ }
91
+ },
92
+ 'gpt-5-mini': {
93
+ provider: 'openai',
94
+ name: 'GPT-5 Mini',
95
+ contextWindow: 400000, // 400K
96
+ maxTokens: 128000, // 128K
97
+ supportsReasoning: true,
98
+ reasoningSupport: {
99
+ minimal: true,
100
+ low: true,
101
+ medium: true,
102
+ high: true
103
+ }
104
+ },
105
+ 'gpt-5-nano': {
106
+ provider: 'openai',
107
+ name: 'GPT-5 Nano',
108
+ contextWindow: 400000, // 400K
109
+ maxTokens: 128000, // 128K
110
+ supportsReasoning: true,
111
+ reasoningSupport: {
112
+ minimal: true,
113
+ low: true,
114
+ medium: true,
115
+ high: true
116
+ }
117
+ },
118
+ 'gpt-5-codex': {
119
+ provider: 'openai',
120
+ name: 'GPT-5 Codex',
121
+ contextWindow: 400000, // 400K
122
+ maxTokens: 128000, // 128K
123
+ supportsReasoning: true,
124
+ reasoningSupport: {
125
+ minimal: true,
126
+ low: true,
127
+ medium: true,
128
+ high: true
129
+ }
130
+ }
131
+ };
132
+
133
+ /**
134
+ * 모델 ID로 모델 정보 가져오기
135
+ */
136
+ export function getModelInfo(modelId) {
137
+ return AI_MODELS[modelId] || null;
138
+ }
139
+
140
+ /**
141
+ * 모델 ID로 max_tokens 가져오기
142
+ */
143
+ export function getMaxTokens(modelId) {
144
+ const model = AI_MODELS[modelId];
145
+ return model ? model.maxTokens : 128000; // 기본값 128K
146
+ }
147
+
148
+ /**
149
+ * 모델 ID로 context window 크기 가져오기
150
+ */
151
+ export function getContextWindow(modelId) {
152
+ const model = AI_MODELS[modelId];
153
+ return model ? model.contextWindow : 400000; // 기본값 400K
154
+ }
155
+
156
+ /**
157
+ * 모든 모델 ID 목록 가져오기
158
+ */
159
+ export function getAllModelIds() {
160
+ return Object.keys(AI_MODELS);
161
+ }
162
+
163
+ /**
164
+ * 특정 제조사의 모델 ID 목록 가져오기
165
+ */
166
+ export function getModelsByProvider(provider) {
167
+ return Object.keys(AI_MODELS).filter(
168
+ modelId => AI_MODELS[modelId].provider === provider
169
+ );
170
+ }
171
+
172
+ /**
173
+ * GPT-5 시리즈 모델 ID 목록 (하위 호환성)
174
+ */
175
+ export function getGPT5Models() {
176
+ return getModelsByProvider('openai');
177
+ }
178
+
179
+ /**
180
+ * Reasoning 지원 모델 ID 목록
181
+ */
182
+ export function getReasoningModels() {
183
+ return Object.keys(AI_MODELS).filter(
184
+ modelId => AI_MODELS[modelId].supportsReasoning
185
+ );
186
+ }
187
+
188
+ /**
189
+ * 특정 모델이 특정 reasoning effort를 지원하는지 확인
190
+ */
191
+ export function supportsReasoningEffort(modelId, effort) {
192
+ const model = AI_MODELS[modelId];
193
+ if (!model || !model.supportsReasoning) {
194
+ return false;
195
+ }
196
+ return model.reasoningSupport?.[effort] || false;
197
+ }
198
+
199
+ /**
200
+ * 기본 권장 모델 ID
201
+ */
202
+ export const DEFAULT_MODEL = 'gpt-5-mini';
203
+
204
+ // ========================================
205
+ // 하위 호환성을 위한 별칭 (Deprecated)
206
+ // ========================================
207
+ export const OPENAI_MODELS = AI_MODELS;
208
+ export const getOpenAIModelInfo = getModelInfo;
209
+ export const getOpenAIMaxTokens = getMaxTokens;
210
+ export const getOpenAIContextWindow = getContextWindow;
211
+ export const getAllOpenAIModelIds = getAllModelIds;
212
+ export const DEFAULT_OPENAI_MODEL = DEFAULT_MODEL;
@@ -9,7 +9,7 @@
9
9
  * 'concise': 간결한 에러 메시지만 표시 (기본값)
10
10
  * 'verbose': 상세한 에러 정보 표시 (코드, 스택 트레이스, 전체 에러 객체 등)
11
11
  */
12
- export const ERROR_VERBOSITY = 'concise';
12
+ export const ERROR_VERBOSITY = 'verbose';
13
13
 
14
14
  export default {
15
15
  ERROR_VERBOSITY
@@ -980,10 +980,9 @@ export function App({ onSubmit, onClearScreen, onExit, commands = [], model, ver
980
980
  await saveSettings(settings);
981
981
 
982
982
  // 환경변수 업데이트
983
- process.env.AI_PROVIDER = settings.AI_PROVIDER;
984
- process.env.OPENAI_API_KEY = settings.OPENAI_API_KEY;
985
- process.env.OPENAI_MODEL = settings.OPENAI_MODEL;
986
- process.env.OPENAI_REASONING_EFFORT = settings.OPENAI_REASONING_EFFORT;
983
+ process.env.API_KEY = settings.API_KEY;
984
+ process.env.MODEL = settings.MODEL;
985
+ process.env.REASONING_EFFORT = settings.REASONING_EFFORT;
987
986
 
988
987
  // 클라이언트 리셋
989
988
  resetAIClients();
@@ -992,12 +991,8 @@ export function App({ onSubmit, onClearScreen, onExit, commands = [], model, ver
992
991
  const newModel = await getModelForProvider();
993
992
  setCurrentModel(newModel);
994
993
 
995
- // reasoning effort 업데이트 (OpenAI 모델인 경우)
996
- if (settings.AI_PROVIDER === 'openai') {
997
- setReasoningEffort(settings.OPENAI_REASONING_EFFORT);
998
- } else {
999
- setReasoningEffort(null);
1000
- }
994
+ // reasoning effort 업데이트
995
+ setReasoningEffort(settings.REASONING_EFFORT);
1001
996
 
1002
997
  // 설정 완료 메시지 추가
1003
998
  uiEvents.addSystemMessage('✓ Setup completed! New configuration will be used.');
@@ -82,39 +82,6 @@ export function CurrentModelView({ provider, modelId, modelInfo }) {
82
82
  React.createElement(Text, {
83
83
  color: 'white'
84
84
  }, modelInfo.name)
85
- ),
86
-
87
- React.createElement(Text, null),
88
-
89
- // Description
90
- React.createElement(Box, { flexDirection: 'column' },
91
- React.createElement(Text, {
92
- bold: true,
93
- color: theme.text.secondary
94
- }, 'Description:'),
95
- React.createElement(Text, {
96
- color: theme.text.secondary,
97
- dimColor: true
98
- }, ` ${modelInfo.description}`)
99
- ),
100
-
101
- modelInfo.pricing && React.createElement(React.Fragment, null,
102
- React.createElement(Text, null),
103
-
104
- // Pricing
105
- React.createElement(Box, { flexDirection: 'row', gap: 1 },
106
- React.createElement(Text, {
107
- bold: true,
108
- color: theme.text.secondary
109
- }, 'Pricing:'),
110
- React.createElement(Text, {
111
- color: 'green'
112
- }, `💰 $${modelInfo.pricing.input}/$${modelInfo.pricing.output}`),
113
- React.createElement(Text, {
114
- dimColor: true,
115
- italic: true
116
- }, '(input/output per 1M tokens)')
117
- )
118
85
  )
119
86
  )
120
87
  ),
@@ -5,14 +5,14 @@
5
5
  import React from 'react';
6
6
  import { Box, Text } from 'ink';
7
7
  import { theme } from '../design/themeColors.js';
8
- import { OPENAI_MODELS } from '../../config/openai_models.js';
8
+ import { AI_MODELS } from '../../config/ai_models.js';
9
9
 
10
10
  export function Footer({ model = 'gpt-4', reasoningEffort = null, cwd = process.cwd() }) {
11
11
  const displayCwd = cwd.length > 40 ? '...' + cwd.slice(-37) : cwd;
12
12
 
13
- // Display reasoning effort for OpenAI models that support it
13
+ // Display reasoning effort for models that support it
14
14
  let displayModel = model;
15
- const modelInfo = OPENAI_MODELS[model];
15
+ const modelInfo = AI_MODELS[model];
16
16
  const supportsReasoningEffort = modelInfo?.supportsReasoning || false;
17
17
  if (reasoningEffort && supportsReasoningEffort) {
18
18
  displayModel = `${model} (${reasoningEffort})`;