aiexecode 1.0.90 → 1.0.92

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aiexecode might be problematic. Click here for more details.

Files changed (50) hide show
  1. package/README.md +1 -0
  2. package/index.js +13 -11
  3. package/mcp-agent-lib/init.sh +3 -0
  4. package/mcp-agent-lib/package-lock.json +14 -1
  5. package/mcp-agent-lib/package.json +4 -6
  6. package/mcp-agent-lib/sampleFastMCPClient/client.py +25 -0
  7. package/mcp-agent-lib/sampleFastMCPClient/run.sh +3 -0
  8. package/mcp-agent-lib/sampleFastMCPServer/run.sh +3 -0
  9. package/mcp-agent-lib/sampleFastMCPServer/server.py +12 -0
  10. package/mcp-agent-lib/sampleFastMCPServerElicitationRequest/run.sh +3 -0
  11. package/mcp-agent-lib/sampleFastMCPServerElicitationRequest/server.py +43 -0
  12. package/mcp-agent-lib/sampleFastMCPServerRootsRequest/server.py +63 -0
  13. package/mcp-agent-lib/sampleMCPHost/index.js +182 -63
  14. package/mcp-agent-lib/sampleMCPHost/mcp_config.json +7 -1
  15. package/mcp-agent-lib/sampleMCPHostFeatures/elicitation.js +151 -0
  16. package/mcp-agent-lib/sampleMCPHostFeatures/index.js +166 -0
  17. package/mcp-agent-lib/sampleMCPHostFeatures/roots.js +197 -0
  18. package/mcp-agent-lib/src/mcp_client.js +129 -67
  19. package/mcp-agent-lib/src/mcp_message_logger.js +516 -0
  20. package/package.json +3 -1
  21. package/payload_viewer/out/404/index.html +1 -1
  22. package/payload_viewer/out/404.html +1 -1
  23. package/payload_viewer/out/index.html +1 -1
  24. package/payload_viewer/out/index.txt +1 -1
  25. package/src/LLMClient/client.js +992 -0
  26. package/src/LLMClient/converters/input-normalizer.js +238 -0
  27. package/src/LLMClient/converters/responses-to-claude.js +454 -0
  28. package/src/LLMClient/converters/responses-to-gemini.js +648 -0
  29. package/src/LLMClient/converters/responses-to-ollama.js +348 -0
  30. package/src/LLMClient/errors.js +372 -0
  31. package/src/LLMClient/index.js +31 -0
  32. package/src/commands/apikey.js +10 -22
  33. package/src/commands/model.js +28 -28
  34. package/src/commands/reasoning_effort.js +9 -23
  35. package/src/config/ai_models.js +212 -0
  36. package/src/config/feature_flags.js +1 -1
  37. package/src/frontend/App.js +5 -10
  38. package/src/frontend/components/CurrentModelView.js +0 -33
  39. package/src/frontend/components/Footer.js +3 -3
  40. package/src/frontend/components/ModelListView.js +30 -87
  41. package/src/frontend/components/ModelUpdatedView.js +7 -142
  42. package/src/frontend/components/SetupWizard.js +37 -32
  43. package/src/system/ai_request.js +57 -42
  44. package/src/util/config.js +26 -4
  45. package/src/util/setup_wizard.js +1 -6
  46. package/mcp-agent-lib/.claude/settings.local.json +0 -9
  47. package/src/config/openai_models.js +0 -152
  48. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_buildManifest.js +0 -0
  49. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_clientMiddlewareManifest.json +0 -0
  50. /package/payload_viewer/out/_next/static/{w4dMVYalgk7djrLxRxWiE → d0-fu2rgYnshgGFPxr1CR}/_ssgManifest.js +0 -0
@@ -6,101 +6,44 @@ import React from 'react';
6
6
  import { Box, Text } from 'ink';
7
7
  import { theme } from '../design/themeColors.js';
8
8
 
9
- export function ModelListView({ openaiModels }) {
9
+ export function ModelListView({ modelsByProvider }) {
10
10
  const sections = [
11
- // Header
12
- React.createElement(Box, {
11
+ React.createElement(Text, {
13
12
  key: 'header',
14
- borderStyle: 'double',
15
- borderColor: 'gray',
16
- paddingX: 2,
17
- paddingY: 0,
18
- justifyContent: 'center'
19
- },
20
- React.createElement(Text, {
21
- bold: true,
22
- color: 'whiteBright'
23
- }, 'Available AI Models')
24
- ),
25
-
26
- React.createElement(Text, { key: 'spacer1' }, null),
27
-
28
- // OpenAI Section
29
- React.createElement(Box, {
30
- key: 'openai-section',
31
- flexDirection: 'column',
32
- borderStyle: 'round',
33
- borderColor: 'gray',
34
- paddingX: 2,
35
- paddingY: 1
36
- },
37
- React.createElement(Box, { flexDirection: 'column' },
38
- React.createElement(Text, {
39
- bold: true,
40
- color: 'cyan'
41
- }, '🤖 OpenAI Models'),
42
- React.createElement(Text, {
43
- dimColor: true,
44
- italic: true
45
- }, 'GPT-5 Series (Latest generation, recommended)'),
46
- React.createElement(Text, null)
47
- ),
13
+ bold: true,
14
+ color: 'whiteBright'
15
+ }, 'Available AI Models'),
48
16
 
49
- openaiModels.map((model, index) =>
50
- React.createElement(Box, {
51
- key: model.id,
52
- flexDirection: 'column',
53
- marginBottom: index < openaiModels.length - 1 ? 1 : 0
54
- },
55
- React.createElement(Box, { flexDirection: 'row', gap: 1 },
56
- React.createElement(Text, { color: 'green', bold: true }, '•'),
57
- React.createElement(Text, {
58
- color: 'white',
59
- bold: true
60
- }, model.id)
61
- ),
62
- React.createElement(Box, {
63
- flexDirection: 'column',
64
- marginLeft: 2,
65
- gap: 0
66
- },
67
- React.createElement(Text, {
68
- color: theme.text.secondary
69
- }, `${model.name} - ${model.description}`),
70
- React.createElement(Text, {
71
- dimColor: true,
72
- italic: true
73
- }, `💰 $${model.pricing.input}/$${model.pricing.output} (input/output per 1M tokens)`)
74
- )
75
- )
76
- )
77
- )
17
+ React.createElement(Text, { key: 'spacer1' }, null)
78
18
  ];
79
19
 
80
- sections.push(
81
- React.createElement(Text, { key: 'spacer4' }, null),
20
+ // 모든 모델을 하나의 리스트로 통합
21
+ const allModels = [];
22
+ Object.keys(modelsByProvider).forEach(provider => {
23
+ const models = modelsByProvider[provider];
24
+ models.forEach(model => {
25
+ allModels.push(model);
26
+ });
27
+ });
28
+
29
+ // 모델 리스트 추가
30
+ allModels.forEach((model, index) => {
31
+ sections.push(
32
+ React.createElement(Text, {
33
+ key: model.id,
34
+ color: 'white'
35
+ }, ` • ${model.id}`)
36
+ );
37
+ });
82
38
 
83
- // Footer
84
- React.createElement(Box, {
85
- key: 'footer',
86
- flexDirection: 'column',
87
- borderStyle: 'single',
88
- borderColor: 'gray',
89
- paddingX: 2,
90
- paddingY: 1
91
- },
92
- React.createElement(Text, { bold: true }, '📖 Usage'),
93
- React.createElement(Text, null, ' /model <model-id>'),
94
- React.createElement(Text, { dimColor: true }, ' Example: /model gpt-5'),
95
- React.createElement(Text, null),
96
- React.createElement(Text, { bold: true }, '🔗 More Info'),
97
- React.createElement(Text, { color: theme.text.link }, ' OpenAI: https://platform.openai.com/docs/pricing')
98
- )
39
+ sections.push(
40
+ React.createElement(Text, { key: 'spacer2' }, null),
41
+ React.createElement(Text, { key: 'usage', bold: true }, 'Usage:'),
42
+ React.createElement(Text, { key: 'usage-cmd' }, ' /model <model-id>'),
43
+ React.createElement(Text, { key: 'usage-example', dimColor: true }, ' Example: /model gpt-5')
99
44
  );
100
45
 
101
46
  return React.createElement(Box, {
102
- flexDirection: 'column',
103
- paddingX: 2,
104
- paddingY: 1
47
+ flexDirection: 'column'
105
48
  }, ...sections);
106
49
  }
@@ -7,151 +7,16 @@ import { Box, Text } from 'ink';
7
7
  import { theme } from '../design/themeColors.js';
8
8
 
9
9
  export function ModelUpdatedView({ provider, modelId, modelInfo, settingsFile, warning }) {
10
- const providerColors = {
11
- openai: 'cyan'
12
- };
13
-
14
- const providerEmojis = {
15
- openai: '🤖'
16
- };
17
-
18
10
  return React.createElement(Box, {
19
- flexDirection: 'column',
20
- paddingX: 2,
21
- paddingY: 1
11
+ flexDirection: 'column'
22
12
  },
23
- // Header
24
- React.createElement(Box, {
25
- borderStyle: 'double',
26
- borderColor: 'gray',
27
- paddingX: 2,
28
- paddingY: 0,
29
- justifyContent: 'center'
30
- },
31
- React.createElement(Text, {
32
- bold: true,
33
- color: 'whiteBright'
34
- }, '✓ Model Updated Successfully')
35
- ),
36
-
37
- React.createElement(Text, null),
38
-
39
- // Main Content
40
- React.createElement(Box, {
41
- flexDirection: 'column',
42
- borderStyle: 'round',
43
- borderColor: 'gray',
44
- paddingX: 2,
45
- paddingY: 1
46
- },
47
- // Provider
48
- React.createElement(Box, { flexDirection: 'row', gap: 1 },
49
- React.createElement(Text, {
50
- bold: true,
51
- color: theme.text.secondary
52
- }, 'Provider:'),
53
- React.createElement(Text, {
54
- color: providerColors[provider] || 'white',
55
- bold: true
56
- }, `${providerEmojis[provider] || ''} ${provider.toUpperCase()}`)
57
- ),
58
-
59
- React.createElement(Text, null),
60
-
61
- // Model ID
62
- React.createElement(Box, { flexDirection: 'row', gap: 1 },
63
- React.createElement(Text, {
64
- bold: true,
65
- color: theme.text.secondary
66
- }, 'Model ID:'),
67
- React.createElement(Text, {
68
- color: 'white',
69
- bold: true
70
- }, modelId)
71
- ),
72
-
73
- React.createElement(Text, null),
74
-
75
- // Name
76
- React.createElement(Box, { flexDirection: 'row', gap: 1 },
77
- React.createElement(Text, {
78
- bold: true,
79
- color: theme.text.secondary
80
- }, 'Name:'),
81
- React.createElement(Text, {
82
- color: 'white'
83
- }, modelInfo.name)
84
- ),
85
-
86
- React.createElement(Text, null),
87
-
88
- // Description
89
- React.createElement(Box, { flexDirection: 'column' },
90
- React.createElement(Text, {
91
- bold: true,
92
- color: theme.text.secondary
93
- }, 'Description:'),
94
- React.createElement(Text, {
95
- color: theme.text.secondary,
96
- dimColor: true
97
- }, ` ${modelInfo.description}`)
98
- ),
99
-
100
- modelInfo.pricing && React.createElement(React.Fragment, null,
101
- React.createElement(Text, null),
102
-
103
- // Pricing
104
- React.createElement(Box, { flexDirection: 'row', gap: 1 },
105
- React.createElement(Text, {
106
- bold: true,
107
- color: theme.text.secondary
108
- }, 'Pricing:'),
109
- React.createElement(Text, {
110
- color: 'green'
111
- }, `💰 $${modelInfo.pricing.input}/$${modelInfo.pricing.output}`),
112
- React.createElement(Text, {
113
- dimColor: true,
114
- italic: true
115
- }, '(input/output per 1M tokens)')
116
- )
117
- ),
118
-
119
- React.createElement(Text, null),
120
-
121
- // Saved to
122
- React.createElement(Box, { flexDirection: 'row', gap: 1 },
123
- React.createElement(Text, {
124
- dimColor: true,
125
- italic: true
126
- }, '📁 Saved to:'),
127
- React.createElement(Text, {
128
- dimColor: true,
129
- italic: true
130
- }, settingsFile)
131
- )
132
- ),
13
+ React.createElement(Text, {
14
+ color: 'green'
15
+ }, `Model updated: ${modelId}`),
133
16
 
134
17
  // Warning if API key not configured
135
- warning && React.createElement(React.Fragment, null,
136
- React.createElement(Text, null),
137
- React.createElement(Box, {
138
- flexDirection: 'column',
139
- borderStyle: 'round',
140
- borderColor: 'gray',
141
- paddingX: 2,
142
- paddingY: 1
143
- },
144
- React.createElement(Text, {
145
- bold: true,
146
- color: 'yellow'
147
- }, '⚠ Warning'),
148
- React.createElement(Text, null),
149
- React.createElement(Text, { color: theme.text.secondary }, warning.message),
150
- React.createElement(Text, {
151
- dimColor: true,
152
- italic: true
153
- }, warning.hint)
154
- )
155
- )
18
+ warning && React.createElement(Text, {
19
+ color: 'yellow'
20
+ }, `Warning: ${warning.message} ${warning.hint}`)
156
21
  );
157
22
  }
@@ -5,29 +5,28 @@
5
5
  import React, { useState, useRef } from 'react';
6
6
  import { Box, Text, useInput } from 'ink';
7
7
  import { theme } from '../design/themeColors.js';
8
- import { OPENAI_MODELS, getGPT5Models, DEFAULT_OPENAI_MODEL } from '../../config/openai_models.js';
8
+ import { AI_MODELS, getAllModelIds, DEFAULT_MODEL } from '../../config/ai_models.js';
9
9
 
10
10
  const STEPS = {
11
- OPENAI_KEY: 'openai_key',
12
- OPENAI_MODEL: 'openai_model',
13
- OPENAI_EFFORT: 'openai_effort'
11
+ API_KEY: 'api_key',
12
+ MODEL: 'model',
13
+ REASONING_EFFORT: 'reasoning_effort'
14
14
  };
15
15
 
16
16
  export function SetupWizard({ onComplete, onCancel }) {
17
- const [step, setStep] = useState(STEPS.OPENAI_KEY);
17
+ const [step, setStep] = useState(STEPS.API_KEY);
18
18
  const [selectedIndex, setSelectedIndex] = useState(0);
19
19
  const [textInput, setTextInput] = useState('');
20
20
 
21
21
  // settings를 ref로 관리하여 stale closure 문제 방지
22
22
  const settingsRef = useRef({
23
- AI_PROVIDER: 'openai',
24
- OPENAI_API_KEY: '',
25
- OPENAI_MODEL: DEFAULT_OPENAI_MODEL,
26
- OPENAI_REASONING_EFFORT: 'medium'
23
+ API_KEY: '',
24
+ MODEL: DEFAULT_MODEL,
25
+ REASONING_EFFORT: 'medium'
27
26
  });
28
27
 
29
28
  // 현재 스텝이 텍스트 입력인지 선택지인지 판단
30
- const isTextInputStep = step === STEPS.OPENAI_KEY;
29
+ const isTextInputStep = step === STEPS.API_KEY;
31
30
 
32
31
  const completeSetup = () => {
33
32
  if (onComplete) {
@@ -38,29 +37,35 @@ export function SetupWizard({ onComplete, onCancel }) {
38
37
 
39
38
  const handleStepComplete = () => {
40
39
  switch (step) {
41
- case STEPS.OPENAI_KEY:
40
+ case STEPS.API_KEY:
42
41
  if (!textInput.trim()) {
43
42
  return;
44
43
  }
45
- settingsRef.current.OPENAI_API_KEY = textInput.trim();
46
- setStep(STEPS.OPENAI_MODEL);
44
+ settingsRef.current.API_KEY = textInput.trim();
45
+ setStep(STEPS.MODEL);
47
46
  setTextInput('');
48
47
  setSelectedIndex(0);
49
48
  break;
50
49
 
51
- case STEPS.OPENAI_MODEL:
52
- const models = getGPT5Models();
50
+ case STEPS.MODEL:
51
+ const models = getAllModelIds();
53
52
  const selectedModel = models[selectedIndex];
54
- settingsRef.current.OPENAI_MODEL = selectedModel;
55
-
56
- // gpt-5 모델은 모두 reasoning effort 설정 필요
57
- setStep(STEPS.OPENAI_EFFORT);
58
- setSelectedIndex(2); // default to 'medium'
53
+ settingsRef.current.MODEL = selectedModel;
54
+
55
+ // reasoning을 지원하는 모델만 reasoning effort 설정
56
+ const modelInfo = AI_MODELS[selectedModel];
57
+ if (modelInfo && modelInfo.supportsReasoning) {
58
+ setStep(STEPS.REASONING_EFFORT);
59
+ setSelectedIndex(2); // default to 'medium'
60
+ } else {
61
+ // reasoning을 지원하지 않으면 바로 완료
62
+ completeSetup();
63
+ }
59
64
  break;
60
65
 
61
- case STEPS.OPENAI_EFFORT:
66
+ case STEPS.REASONING_EFFORT:
62
67
  const efforts = ['minimal', 'low', 'medium', 'high'];
63
- settingsRef.current.OPENAI_REASONING_EFFORT = efforts[selectedIndex];
68
+ settingsRef.current.REASONING_EFFORT = efforts[selectedIndex];
64
69
  // 완료
65
70
  completeSetup();
66
71
  break;
@@ -112,9 +117,9 @@ export function SetupWizard({ onComplete, onCancel }) {
112
117
 
113
118
  const getMaxIndexForStep = (currentStep) => {
114
119
  switch (currentStep) {
115
- case STEPS.OPENAI_MODEL:
116
- return getGPT5Models().length - 1;
117
- case STEPS.OPENAI_EFFORT:
120
+ case STEPS.MODEL:
121
+ return getAllModelIds().length - 1;
122
+ case STEPS.REASONING_EFFORT:
118
123
  return 3; // 4 options
119
124
  default:
120
125
  return 0;
@@ -137,10 +142,10 @@ export function SetupWizard({ onComplete, onCancel }) {
137
142
 
138
143
  const renderStep = () => {
139
144
  switch (step) {
140
- case STEPS.OPENAI_KEY:
145
+ case STEPS.API_KEY:
141
146
  return React.createElement(Box, { flexDirection: 'column' },
142
- React.createElement(Text, { bold: true, color: theme.text.accent }, '1. OpenAI API Key:'),
143
- React.createElement(Text, { color: theme.text.secondary }, ' Get your API key from: https://platform.openai.com/account/api-keys'),
147
+ React.createElement(Text, { bold: true, color: theme.text.accent }, '1. API Key:'),
148
+ React.createElement(Text, { color: theme.text.secondary }, ' Get your API key from: https://platform.openai.com/account/api-keys or https://console.anthropic.com/settings/keys'),
144
149
  React.createElement(Text, null),
145
150
  React.createElement(Box, {
146
151
  borderStyle: 'round',
@@ -153,13 +158,13 @@ export function SetupWizard({ onComplete, onCancel }) {
153
158
  React.createElement(Text, { dimColor: true }, 'Type your API key and press Enter')
154
159
  );
155
160
 
156
- case STEPS.OPENAI_MODEL:
161
+ case STEPS.MODEL:
157
162
  return React.createElement(Box, { flexDirection: 'column' },
158
163
  React.createElement(Text, { bold: true, color: theme.text.accent }, '2. Choose Model:'),
159
164
  React.createElement(Text, null),
160
165
  renderOptions(
161
- getGPT5Models().map(modelId => {
162
- const model = OPENAI_MODELS[modelId];
166
+ getAllModelIds().map(modelId => {
167
+ const model = AI_MODELS[modelId];
163
168
  return `${modelId} (${model.name})`;
164
169
  })
165
170
  ),
@@ -167,7 +172,7 @@ export function SetupWizard({ onComplete, onCancel }) {
167
172
  React.createElement(Text, { dimColor: true }, '↑↓: Navigate Enter: Confirm')
168
173
  );
169
174
 
170
- case STEPS.OPENAI_EFFORT:
175
+ case STEPS.REASONING_EFFORT:
171
176
  return React.createElement(Box, { flexDirection: 'column' },
172
177
  React.createElement(Text, { bold: true, color: theme.text.accent }, '3. Reasoning Effort:'),
173
178
  React.createElement(Text, null),
@@ -1,12 +1,12 @@
1
- import OpenAI from "openai";
2
1
  import dotenv from "dotenv";
3
2
  import path from 'path';
4
3
  import { safeWriteFile } from '../util/safe_fs.js';
5
4
  import { logger } from "./log.js";
6
- import { ensureConfigDirectory, loadSettings, SETTINGS_FILE } from "../util/config.js";
7
- import { getReasoningModels, supportsReasoningEffort, DEFAULT_OPENAI_MODEL } from "../config/openai_models.js";
5
+ import { ensureConfigDirectory, loadSettings, SETTINGS_FILE, PAYLOAD_LLM_LOG_DIR } from "../util/config.js";
6
+ import { getReasoningModels, supportsReasoningEffort, DEFAULT_MODEL, getModelInfo } from "../config/ai_models.js";
8
7
  import { createDebugLogger } from "../util/debug_log.js";
9
8
  import { formatReadFileStdout } from "../util/output_formatter.js";
9
+ import { UnifiedLLMClient } from "../LLMClient/index.js";
10
10
 
11
11
  const debugLog = createDebugLogger('ai_request.log', 'ai_request');
12
12
 
@@ -14,70 +14,81 @@ const debugLog = createDebugLogger('ai_request.log', 'ai_request');
14
14
  dotenv.config({ quiet: true });
15
15
 
16
16
  // AI 클라이언트를 초기화합니다.
17
- let openaiClient = null;
17
+ let llmClient = null;
18
18
  let currentAbortController = null;
19
19
 
20
- async function getOpenAIClient() {
21
- debugLog('[getOpenAIClient] Called');
22
- if (openaiClient) {
23
- debugLog('[getOpenAIClient] Using cached client');
24
- return openaiClient;
20
+ async function getLLMClient() {
21
+ debugLog('[getLLMClient] Called');
22
+ if (llmClient) {
23
+ debugLog('[getLLMClient] Using cached client');
24
+ return llmClient;
25
25
  }
26
26
 
27
- debugLog('[getOpenAIClient] Creating new client');
27
+ debugLog('[getLLMClient] Creating new client');
28
28
  // Settings 로드 및 환경변수 설정
29
29
  await ensureConfigDirectory();
30
30
  const settings = await loadSettings();
31
- debugLog(`[getOpenAIClient] Settings loaded: AI_PROVIDER=${settings?.AI_PROVIDER}, OPENAI_MODEL=${settings?.OPENAI_MODEL}, OPENAI_REASONING_EFFORT=${settings?.OPENAI_REASONING_EFFORT}`);
31
+ debugLog(`[getLLMClient] Settings loaded: MODEL=${settings?.MODEL}, REASONING_EFFORT=${settings?.REASONING_EFFORT}`);
32
32
 
33
- if (!process.env.OPENAI_API_KEY && settings?.OPENAI_API_KEY) {
34
- process.env.OPENAI_API_KEY = settings.OPENAI_API_KEY;
35
- debugLog('[getOpenAIClient] OPENAI_API_KEY loaded from settings');
33
+ if (!process.env.API_KEY && settings?.API_KEY) {
34
+ process.env.API_KEY = settings.API_KEY;
35
+ debugLog('[getLLMClient] API_KEY loaded from settings');
36
36
  }
37
37
 
38
38
  // 모델 설정도 환경변수에 로드
39
- if (!process.env.OPENAI_MODEL && settings?.OPENAI_MODEL) {
40
- process.env.OPENAI_MODEL = settings.OPENAI_MODEL;
41
- debugLog(`[getOpenAIClient] OPENAI_MODEL set to: ${settings.OPENAI_MODEL}`);
39
+ if (!process.env.MODEL && settings?.MODEL) {
40
+ process.env.MODEL = settings.MODEL;
41
+ debugLog(`[getLLMClient] MODEL set to: ${settings.MODEL}`);
42
42
  }
43
43
 
44
44
  // reasoning_effort 설정도 환경변수에 로드
45
- if (!process.env.OPENAI_REASONING_EFFORT && settings?.OPENAI_REASONING_EFFORT) {
46
- process.env.OPENAI_REASONING_EFFORT = settings.OPENAI_REASONING_EFFORT;
47
- debugLog(`[getOpenAIClient] OPENAI_REASONING_EFFORT set to: ${settings.OPENAI_REASONING_EFFORT}`);
45
+ if (!process.env.REASONING_EFFORT && settings?.REASONING_EFFORT) {
46
+ process.env.REASONING_EFFORT = settings.REASONING_EFFORT;
47
+ debugLog(`[getLLMClient] REASONING_EFFORT set to: ${settings.REASONING_EFFORT}`);
48
48
  }
49
49
 
50
- if (!process.env.OPENAI_API_KEY) {
51
- debugLog('[getOpenAIClient] ERROR: OPENAI_API_KEY not configured');
52
- throw new Error(`OPENAI_API_KEY is not configured. Please update ${SETTINGS_FILE}.`);
50
+ if (!process.env.API_KEY) {
51
+ debugLog('[getLLMClient] ERROR: API_KEY not configured');
52
+ throw new Error(`API_KEY is not configured. Please update ${SETTINGS_FILE}.`);
53
53
  }
54
54
 
55
- debugLog('[getOpenAIClient] Initializing OpenAI client with API key (first 10 chars): ' + process.env.OPENAI_API_KEY.substring(0, 10) + '...');
56
- openaiClient = new OpenAI({
57
- apiKey: process.env.OPENAI_API_KEY
55
+ const currentModel = process.env.MODEL || settings?.MODEL || DEFAULT_MODEL;
56
+ const modelInfo = getModelInfo(currentModel);
57
+ const provider = modelInfo?.provider || 'openai';
58
+
59
+ debugLog('[getLLMClient] Initializing UnifiedLLMClient with API key (first 10 chars): ' + process.env.API_KEY.substring(0, 10) + '...');
60
+ debugLog(`[getLLMClient] Model: ${currentModel}, Provider: ${provider}`);
61
+ llmClient = new UnifiedLLMClient({
62
+ apiKey: process.env.API_KEY,
63
+ model: currentModel,
64
+ provider: provider,
65
+ logDir: PAYLOAD_LLM_LOG_DIR
58
66
  });
59
67
 
60
- debugLog('[getOpenAIClient] Client created successfully');
61
- return openaiClient;
68
+ debugLog('[getLLMClient] Client created successfully');
69
+ return llmClient;
62
70
  }
63
71
 
64
72
  async function getCurrentProvider() {
65
- return 'openai';
73
+ const settings = await loadSettings();
74
+ const currentModel = process.env.MODEL || settings?.MODEL || DEFAULT_MODEL;
75
+ const modelInfo = getModelInfo(currentModel);
76
+ return modelInfo?.provider || 'openai';
66
77
  }
67
78
 
68
79
  // Provider에 맞는 모델 이름 가져오기
69
80
  async function getModelForProvider() {
70
81
  debugLog('[getModelForProvider] Called');
71
82
  const settings = await loadSettings();
72
- const model = process.env.OPENAI_MODEL || settings?.OPENAI_MODEL || DEFAULT_OPENAI_MODEL;
73
- debugLog(`[getModelForProvider] Model: ${model} (from: ${process.env.OPENAI_MODEL ? 'env' : settings?.OPENAI_MODEL ? 'settings' : 'default'})`);
83
+ const model = process.env.MODEL || settings?.MODEL || DEFAULT_MODEL;
84
+ debugLog(`[getModelForProvider] Model: ${model} (from: ${process.env.MODEL ? 'env' : settings?.MODEL ? 'settings' : 'default'})`);
74
85
  return model;
75
86
  }
76
87
 
77
88
  // 클라이언트 및 캐시 리셋 함수 (모델 변경 시 사용)
78
89
  export function resetAIClients() {
79
90
  debugLog('[resetAIClients] Resetting AI client cache');
80
- openaiClient = null;
91
+ llmClient = null;
81
92
  currentAbortController = null;
82
93
  debugLog('[resetAIClients] Client cache cleared');
83
94
  }
@@ -431,18 +442,18 @@ export async function request(taskName, requestPayload) {
431
442
  currentAbortController = new AbortController();
432
443
 
433
444
  try {
434
- // OpenAI API 사용
435
- debugLog('[request] Getting OpenAI client');
436
- const openai = await getOpenAIClient();
445
+ // UnifiedLLMClient 사용
446
+ debugLog('[request] Getting LLM client');
447
+ const client = await getLLMClient();
437
448
 
438
449
  // reasoning 설정 추가 (OpenAI 추론 모델용)
439
450
  const settings = await loadSettings();
440
- const reasoningEffort = settings?.OPENAI_REASONING_EFFORT || process.env.OPENAI_REASONING_EFFORT || 'medium';
451
+ const reasoningEffort = settings?.REASONING_EFFORT || process.env.REASONING_EFFORT || 'medium';
441
452
  debugLog(`[request] Reasoning effort: ${reasoningEffort}`);
442
453
 
443
454
  // reasoning을 지원하는 모델인지 확인
444
455
  const reasoningModels = getReasoningModels();
445
- const currentModel = payloadCopy.model || settings?.OPENAI_MODEL || DEFAULT_OPENAI_MODEL;
456
+ const currentModel = payloadCopy.model || settings?.MODEL || DEFAULT_MODEL;
446
457
  debugLog(`[request] Current model: ${currentModel}`);
447
458
 
448
459
  if (reasoningModels.some(m => currentModel.startsWith(m))) {
@@ -481,16 +492,20 @@ export async function request(taskName, requestPayload) {
481
492
  debugLog(`[request] Model does not support reasoning`);
482
493
  }
483
494
 
484
- originalRequest = JSON.parse(JSON.stringify(payloadCopy)); // 원본 OpenAI 요청
495
+ originalRequest = JSON.parse(JSON.stringify(payloadCopy)); // 원본 요청
485
496
  debugLog(`[request] Request prepared - logging to file`);
486
497
 
487
- // 로그는 원본 OpenAI 포맷으로 저장 (API 호출 전)
498
+ // 로그는 원본 포맷으로 저장 (API 호출 전)
488
499
  await logger(`${taskName}_REQ`, originalRequest, provider);
489
- debugLog(`[request] Request logged - calling OpenAI API`);
500
+ debugLog(`[request] Request logged - calling LLM API`);
490
501
 
491
- response = await openai.responses.create(originalRequest, {
502
+ // AbortController의 signal을 options로 전달
503
+ const requestOptions = {
492
504
  signal: currentAbortController.signal
493
- });
505
+ };
506
+ debugLog(`[request] Calling client.response with abort signal`);
507
+
508
+ response = await client.response(originalRequest, requestOptions);
494
509
  debugLog(`[request] Response received - id: ${response?.id}, status: ${response?.status}, output items: ${response?.output?.length || 0}`);
495
510
 
496
511
  // 원본 응답을 깊은 복사로 보존 (이후 수정으로부터 보호)
@@ -3,7 +3,7 @@ import { homedir } from 'os';
3
3
  import { join, dirname } from 'path';
4
4
  import { safeReadFile, safeWriteFile, safeMkdir, safeReaddir, safeStat, safeCopyFile } from './safe_fs.js';
5
5
  import { fileURLToPath } from 'url';
6
- import { DEFAULT_OPENAI_MODEL } from '../config/openai_models.js';
6
+ import { DEFAULT_MODEL } from '../config/ai_models.js';
7
7
 
8
8
  // Get project root directory (this file is in src/util/, so go up 2 levels)
9
9
  const __filename = fileURLToPath(import.meta.url);
@@ -38,12 +38,13 @@ export const CONFIG_DIR = join(getHomeDirectory(), '.aiexe');
38
38
  export const SETTINGS_FILE = join(CONFIG_DIR, 'settings.json');
39
39
  export const MCP_CONFIG_FILE = join(CONFIG_DIR, 'mcp_config.json');
40
40
  export const PAYLOAD_LOG_DIR = join(CONFIG_DIR, 'payload_log');
41
+ export const PAYLOAD_LLM_LOG_DIR = join(CONFIG_DIR, 'payload_LLM_log');
41
42
  export const DEBUG_LOG_DIR = join(CONFIG_DIR, 'debuglog');
42
43
  export const DEBUG_LOG_FILE = join(CONFIG_DIR, 'debug.txt'); // Deprecated: 호환성을 위해 유지
43
44
  const DEFAULT_SETTINGS = {
44
- OPENAI_API_KEY: '',
45
- OPENAI_MODEL: DEFAULT_OPENAI_MODEL,
46
- OPENAI_REASONING_EFFORT: 'medium', // 'minimal', 'low', 'medium', 'high'
45
+ API_KEY: '',
46
+ MODEL: DEFAULT_MODEL,
47
+ REASONING_EFFORT: 'medium', // 'minimal', 'low', 'medium', 'high'
47
48
  // 도구 활성화 옵션
48
49
  TOOLS_ENABLED: {
49
50
  edit_file_range: false, // 기본적으로 비활성화 (edit_file_replace 사용 권장)
@@ -126,3 +127,24 @@ export async function saveSettings(settings) {
126
127
  throw error;
127
128
  }
128
129
  }
130
+
131
+ /**
132
+ * API 키의 접두어를 기반으로 발급처를 판단합니다.
133
+ * @param {string} apiKey - API 키 문자열
134
+ * @returns {string|null} 발급처 이름 ("Google", "OpenAI", "Anthropic") 또는 null (알 수 없는 경우)
135
+ */
136
+ export function APIKeyIssuedFrom(apiKey) {
137
+ if (!apiKey || typeof apiKey !== 'string') {
138
+ return null;
139
+ }
140
+
141
+ if (apiKey.startsWith('AIzaSy')) {
142
+ return 'Google';
143
+ } else if (apiKey.startsWith('sk-proj-')) {
144
+ return 'OpenAI';
145
+ } else if (apiKey.startsWith('sk-ant-')) {
146
+ return 'Anthropic';
147
+ }
148
+
149
+ return null;
150
+ }