gitarsenal-cli 1.9.25 → 1.9.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.venv_status.json +1 -1
- package/package.json +1 -1
- package/python/__pycache__/fetch_modal_tokens.cpython-313.pyc +0 -0
- package/python/command_manager.py +31 -48
- package/python/credentials_manager.py +45 -2
- package/python/fetch_modal_tokens.py +45 -31
- package/python/fix_modal_token.py +1 -1
- package/python/llm_debugging.py +481 -1695
- package/python/modal_container.py +103 -7
- package/python/requirements.txt +2 -1
- package/python/setup.py +2 -1
- package/python/test_modalSandboxScript.py +106 -64
package/python/llm_debugging.py
CHANGED
|
@@ -2,362 +2,189 @@ import os
|
|
|
2
2
|
import re
|
|
3
3
|
import json
|
|
4
4
|
import requests
|
|
5
|
-
import openai
|
|
6
|
-
import anthropic
|
|
7
5
|
import time
|
|
8
6
|
import getpass
|
|
7
|
+
from pathlib import Path
|
|
9
8
|
|
|
10
9
|
|
|
11
10
|
def get_stored_credentials():
|
|
12
11
|
"""Load stored credentials from ~/.gitarsenal/credentials.json"""
|
|
13
|
-
import json
|
|
14
|
-
from pathlib import Path
|
|
15
|
-
|
|
16
12
|
try:
|
|
17
13
|
credentials_file = Path.home() / ".gitarsenal" / "credentials.json"
|
|
18
14
|
if credentials_file.exists():
|
|
19
15
|
with open(credentials_file, 'r') as f:
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
else:
|
|
23
|
-
return {}
|
|
16
|
+
return json.load(f)
|
|
17
|
+
return {}
|
|
24
18
|
except Exception as e:
|
|
25
19
|
print(f"⚠️ Error loading stored credentials: {e}")
|
|
26
20
|
return {}
|
|
27
21
|
|
|
22
|
+
|
|
28
23
|
def generate_auth_context(stored_credentials):
|
|
29
|
-
"""Generate
|
|
24
|
+
"""Generate authentication context for the LLM prompt"""
|
|
30
25
|
if not stored_credentials:
|
|
31
26
|
return "No stored credentials available."
|
|
32
27
|
|
|
33
28
|
auth_context = "Available stored credentials (use actual values in commands):\n"
|
|
34
|
-
|
|
35
29
|
for key, value in stored_credentials.items():
|
|
36
|
-
# Mask the actual value for security in logs, but provide the real value
|
|
37
30
|
masked_value = value[:8] + "..." if len(value) > 8 else "***"
|
|
38
31
|
auth_context += f"- {key}: {masked_value} (actual value: {value})\n"
|
|
39
32
|
|
|
40
33
|
return auth_context
|
|
41
34
|
|
|
35
|
+
|
|
42
36
|
def get_current_debug_model():
|
|
43
37
|
"""Get the currently configured debugging model preference"""
|
|
44
|
-
return os.environ.get("GITARSENAL_DEBUG_MODEL", "
|
|
38
|
+
return os.environ.get("GITARSENAL_DEBUG_MODEL", "openai")
|
|
45
39
|
|
|
46
|
-
def call_llm_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
47
|
-
"""Unified function to call LLM for debugging - routes to OpenAI, Anthropic, or OpenRouter based on configuration"""
|
|
48
|
-
current_model = get_current_debug_model()
|
|
49
|
-
|
|
50
|
-
print(f"🔍 DEBUG: Using {current_model.upper()} for debugging...")
|
|
51
|
-
|
|
52
|
-
if current_model == "anthropic":
|
|
53
|
-
# Try to get Anthropic API key if not provided
|
|
54
|
-
if not api_key:
|
|
55
|
-
# First try environment variable
|
|
56
|
-
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
57
|
-
|
|
58
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
59
|
-
if not api_key:
|
|
60
|
-
try:
|
|
61
|
-
from fetch_modal_tokens import get_tokens
|
|
62
|
-
_, _, _, api_key = get_tokens()
|
|
63
|
-
except Exception as e:
|
|
64
|
-
print(f"⚠️ Error fetching Anthropic API key from server: {e}")
|
|
65
|
-
|
|
66
|
-
# Then try credentials manager
|
|
67
|
-
if not api_key:
|
|
68
|
-
try:
|
|
69
|
-
from credentials_manager import CredentialsManager
|
|
70
|
-
credentials_manager = CredentialsManager()
|
|
71
|
-
api_key = credentials_manager.get_anthropic_api_key()
|
|
72
|
-
except Exception as e:
|
|
73
|
-
print(f"⚠️ Error getting Anthropic API key from credentials manager: {e}")
|
|
74
|
-
|
|
75
|
-
return call_anthropic_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
76
|
-
elif current_model == "openrouter":
|
|
77
|
-
# Try to get OpenRouter API key if not provided
|
|
78
|
-
if not api_key:
|
|
79
|
-
# First try environment variable
|
|
80
|
-
api_key = os.environ.get("OPENROUTER_API_KEY")
|
|
81
|
-
|
|
82
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
83
|
-
if not api_key:
|
|
84
|
-
try:
|
|
85
|
-
from fetch_modal_tokens import get_tokens
|
|
86
|
-
# Assuming OpenRouter key is the 5th token in the tuple
|
|
87
|
-
tokens = get_tokens()
|
|
88
|
-
if len(tokens) >= 5:
|
|
89
|
-
api_key = tokens[4]
|
|
90
|
-
except Exception as e:
|
|
91
|
-
print(f"⚠️ Error fetching OpenRouter API key from server: {e}")
|
|
92
|
-
|
|
93
|
-
# Then try credentials manager
|
|
94
|
-
if not api_key:
|
|
95
|
-
try:
|
|
96
|
-
from credentials_manager import CredentialsManager
|
|
97
|
-
credentials_manager = CredentialsManager()
|
|
98
|
-
api_key = credentials_manager.get_openrouter_api_key()
|
|
99
|
-
except Exception as e:
|
|
100
|
-
print(f"⚠️ Error getting OpenRouter API key from credentials manager: {e}")
|
|
101
|
-
|
|
102
|
-
return call_openrouter_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
103
|
-
else:
|
|
104
|
-
# Default to OpenAI
|
|
105
|
-
# Try to get OpenAI API key if not provided
|
|
106
|
-
if not api_key:
|
|
107
|
-
# First try environment variable
|
|
108
|
-
api_key = os.environ.get("OPENAI_API_KEY")
|
|
109
|
-
|
|
110
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
111
|
-
if not api_key:
|
|
112
|
-
try:
|
|
113
|
-
from fetch_modal_tokens import get_tokens
|
|
114
|
-
_, _, api_key, _ = get_tokens()
|
|
115
|
-
except Exception as e:
|
|
116
|
-
print(f"⚠️ Error fetching OpenAI API key from server: {e}")
|
|
117
|
-
|
|
118
|
-
# Then try credentials manager
|
|
119
|
-
if not api_key:
|
|
120
|
-
try:
|
|
121
|
-
from credentials_manager import CredentialsManager
|
|
122
|
-
credentials_manager = CredentialsManager()
|
|
123
|
-
api_key = credentials_manager.get_openai_api_key()
|
|
124
|
-
except Exception as e:
|
|
125
|
-
print(f"⚠️ Error getting OpenAI API key from credentials manager: {e}")
|
|
126
|
-
|
|
127
|
-
return call_openai_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
128
40
|
|
|
129
|
-
def
|
|
130
|
-
"""
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
41
|
+
def _to_str(maybe_bytes):
|
|
42
|
+
"""Convert bytes to string safely"""
|
|
43
|
+
try:
|
|
44
|
+
return maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes
|
|
45
|
+
except UnicodeDecodeError:
|
|
46
|
+
if isinstance(maybe_bytes, (bytes, bytearray)):
|
|
47
|
+
return maybe_bytes.decode('utf-8', errors='replace')
|
|
48
|
+
return str(maybe_bytes)
|
|
49
|
+
except Exception:
|
|
50
|
+
return str(maybe_bytes)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_api_key(provider):
|
|
54
|
+
"""Get API key for the specified provider from multiple sources"""
|
|
55
|
+
env_var_map = {
|
|
56
|
+
"openai": "OPENAI_API_KEY",
|
|
57
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
|
58
|
+
"openrouter": "OPENROUTER_API_KEY",
|
|
59
|
+
"groq": "GROQ_API_KEY"
|
|
60
|
+
}
|
|
136
61
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
if isinstance(maybe_bytes, (bytes, bytearray)):
|
|
144
|
-
return maybe_bytes.decode('utf-8', errors='replace')
|
|
145
|
-
else:
|
|
146
|
-
return str(maybe_bytes)
|
|
147
|
-
except Exception:
|
|
148
|
-
# Last resort fallback
|
|
149
|
-
return str(maybe_bytes)
|
|
150
|
-
|
|
151
|
-
# Skip debugging for certain commands that commonly return non-zero exit codes
|
|
152
|
-
# but aren't actually errors (like test commands)
|
|
153
|
-
if command.strip().startswith("test "):
|
|
154
|
-
print("🔍 Skipping debugging for test command - non-zero exit code is expected behavior")
|
|
155
|
-
return None
|
|
156
|
-
|
|
157
|
-
# Validate error_output - if it's empty, we can't debug effectively
|
|
158
|
-
if not error_output or not error_output.strip():
|
|
159
|
-
print("⚠️ Error output is empty. Cannot effectively debug the command.")
|
|
160
|
-
print("⚠️ Skipping OpenAI debugging due to lack of error information.")
|
|
161
|
-
return None
|
|
62
|
+
key_file_map = {
|
|
63
|
+
"openai": "openai_key",
|
|
64
|
+
"anthropic": "anthropic_key",
|
|
65
|
+
"openrouter": "openrouter_key",
|
|
66
|
+
"groq": "groq_key"
|
|
67
|
+
}
|
|
162
68
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
|
|
170
|
-
if api_key:
|
|
171
|
-
print(f"🔍 DEBUG: Environment API key value: {api_key}")
|
|
172
|
-
|
|
173
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
174
|
-
if not api_key:
|
|
175
|
-
try:
|
|
176
|
-
print("🔍 DEBUG: Trying to fetch API key from server...")
|
|
177
|
-
from fetch_modal_tokens import get_tokens
|
|
178
|
-
_, _, api_key, _ = get_tokens()
|
|
179
|
-
if api_key:
|
|
180
|
-
# Set in environment for this session
|
|
181
|
-
os.environ["OPENAI_API_KEY"] = api_key
|
|
182
|
-
else:
|
|
183
|
-
print("⚠️ Could not fetch OpenAI API key from server")
|
|
184
|
-
except Exception as e:
|
|
185
|
-
print(f"⚠️ Error fetching API key from server: {e}")
|
|
186
|
-
|
|
187
|
-
# Store the API key in a persistent file if found
|
|
188
|
-
if api_key:
|
|
189
|
-
try:
|
|
190
|
-
os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
|
|
191
|
-
with open(os.path.expanduser("~/.gitarsenal/openai_key"), "w") as f:
|
|
192
|
-
f.write(api_key)
|
|
193
|
-
print("✅ Saved OpenAI API key for future use")
|
|
194
|
-
except Exception as e:
|
|
195
|
-
print(f"⚠️ Could not save API key: {e}")
|
|
196
|
-
|
|
197
|
-
# Try to load from saved file if not in environment
|
|
198
|
-
if not api_key:
|
|
199
|
-
try:
|
|
200
|
-
key_file = os.path.expanduser("~/.gitarsenal/openai_key")
|
|
201
|
-
print(f"🔍 DEBUG: Checking for saved API key at: {key_file}")
|
|
202
|
-
if os.path.exists(key_file):
|
|
203
|
-
with open(key_file, "r") as f:
|
|
204
|
-
api_key = f.read().strip()
|
|
205
|
-
if api_key:
|
|
206
|
-
print("✅ Loaded OpenAI API key from saved file")
|
|
207
|
-
print(f"🔍 DEBUG: API key from file: {api_key}")
|
|
208
|
-
print(f"🔍 DEBUG: API key length: {len(api_key)}")
|
|
209
|
-
# Also set in environment for this session
|
|
210
|
-
os.environ["OPENAI_API_KEY"] = api_key
|
|
211
|
-
else:
|
|
212
|
-
print("🔍 DEBUG: Saved file exists but is empty")
|
|
213
|
-
else:
|
|
214
|
-
print("🔍 DEBUG: No saved API key file found")
|
|
215
|
-
except Exception as e:
|
|
216
|
-
print(f"⚠️ Could not load saved API key: {e}")
|
|
217
|
-
|
|
218
|
-
# Then try credentials manager
|
|
219
|
-
if not api_key:
|
|
220
|
-
print("🔍 DEBUG: Trying credentials manager...")
|
|
221
|
-
try:
|
|
222
|
-
from credentials_manager import CredentialsManager
|
|
223
|
-
credentials_manager = CredentialsManager()
|
|
224
|
-
api_key = credentials_manager.get_openai_api_key()
|
|
225
|
-
if api_key:
|
|
226
|
-
print(f"🔍 DEBUG: API key from credentials manager: Found")
|
|
227
|
-
print(f"🔍 DEBUG: Credentials manager API key value: {api_key}")
|
|
228
|
-
# Set in environment for this session
|
|
229
|
-
os.environ["OPENAI_API_KEY"] = api_key
|
|
230
|
-
else:
|
|
231
|
-
print(f"🔍 DEBUG: API key from credentials manager: Not found")
|
|
232
|
-
except ImportError as e:
|
|
233
|
-
print(f"🔍 DEBUG: Credentials manager not available: {e}")
|
|
234
|
-
# Fall back to direct input if credentials_manager is not available
|
|
235
|
-
pass
|
|
236
|
-
|
|
237
|
-
# Finally, prompt the user if still no API key
|
|
238
|
-
if not api_key:
|
|
239
|
-
print("🔍 DEBUG: No API key found in any source, prompting user...")
|
|
240
|
-
print("\n" + "="*60)
|
|
241
|
-
print("🔑 OPENAI API KEY REQUIRED FOR DEBUGGING")
|
|
242
|
-
print("="*60)
|
|
243
|
-
print("To debug failed commands, an OpenAI API key is needed.")
|
|
244
|
-
print("📝 Please paste your OpenAI API key below:")
|
|
245
|
-
print(" (Your input will be hidden for security)")
|
|
246
|
-
print("-" * 60)
|
|
247
|
-
|
|
248
|
-
try:
|
|
249
|
-
api_key = getpass.getpass("OpenAI API Key: ").strip()
|
|
250
|
-
if not api_key:
|
|
251
|
-
print("❌ No API key provided. Skipping debugging.")
|
|
252
|
-
return None
|
|
253
|
-
print("✅ API key received successfully!")
|
|
254
|
-
print(f"🔍 DEBUG: User-provided API key: {api_key}")
|
|
255
|
-
# Save the API key to environment for future use in this session
|
|
256
|
-
os.environ["OPENAI_API_KEY"] = api_key
|
|
257
|
-
except KeyboardInterrupt:
|
|
258
|
-
print("\n❌ API key input cancelled by user.")
|
|
259
|
-
return None
|
|
260
|
-
except Exception as e:
|
|
261
|
-
print(f"❌ Error getting API key: {e}")
|
|
262
|
-
return None
|
|
69
|
+
token_index_map = {
|
|
70
|
+
"openai": 2,
|
|
71
|
+
"anthropic": 3,
|
|
72
|
+
"openrouter": 4,
|
|
73
|
+
"groq": 5,
|
|
74
|
+
}
|
|
263
75
|
|
|
264
|
-
|
|
265
|
-
if not
|
|
266
|
-
print("❌ No OpenAI API key available. Cannot perform LLM debugging.")
|
|
267
|
-
print("💡 To enable LLM debugging, set the OPENAI_API_KEY environment variable")
|
|
76
|
+
env_var = env_var_map.get(provider)
|
|
77
|
+
if not env_var:
|
|
268
78
|
return None
|
|
269
79
|
|
|
270
|
-
#
|
|
80
|
+
# Try environment variable first
|
|
81
|
+
api_key = os.environ.get(env_var)
|
|
82
|
+
if api_key:
|
|
83
|
+
return api_key
|
|
271
84
|
|
|
272
|
-
#
|
|
273
|
-
|
|
85
|
+
# Try fetch from server
|
|
86
|
+
try:
|
|
87
|
+
from fetch_modal_tokens import get_tokens
|
|
88
|
+
tokens = get_tokens()
|
|
89
|
+
token_index = token_index_map.get(provider)
|
|
90
|
+
if token_index is not None and len(tokens) > token_index:
|
|
91
|
+
api_key = tokens[token_index]
|
|
92
|
+
if api_key:
|
|
93
|
+
os.environ[env_var] = api_key
|
|
94
|
+
return api_key
|
|
95
|
+
except Exception:
|
|
96
|
+
pass
|
|
97
|
+
|
|
98
|
+
# Try credentials manager
|
|
99
|
+
try:
|
|
100
|
+
from credentials_manager import CredentialsManager
|
|
101
|
+
credentials_manager = CredentialsManager()
|
|
102
|
+
method_name = f"get_{provider}_api_key"
|
|
103
|
+
if hasattr(credentials_manager, method_name):
|
|
104
|
+
api_key = getattr(credentials_manager, method_name)()
|
|
105
|
+
if api_key:
|
|
106
|
+
os.environ[env_var] = api_key
|
|
107
|
+
return api_key
|
|
108
|
+
except Exception:
|
|
109
|
+
pass
|
|
110
|
+
|
|
111
|
+
# Try saved file
|
|
112
|
+
try:
|
|
113
|
+
key_file = Path.home() / ".gitarsenal" / key_file_map[provider]
|
|
114
|
+
if key_file.exists():
|
|
115
|
+
api_key = key_file.read_text().strip()
|
|
116
|
+
if api_key:
|
|
117
|
+
os.environ[env_var] = api_key
|
|
118
|
+
return api_key
|
|
119
|
+
except Exception:
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def save_api_key(provider, api_key):
|
|
126
|
+
"""Save API key to persistent file"""
|
|
127
|
+
key_file_map = {
|
|
128
|
+
"openai": "openai_key",
|
|
129
|
+
"anthropic": "anthropic_key",
|
|
130
|
+
"openrouter": "openrouter_key",
|
|
131
|
+
"groq": "groq_key"
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
gitarsenal_dir = Path.home() / ".gitarsenal"
|
|
136
|
+
gitarsenal_dir.mkdir(exist_ok=True)
|
|
137
|
+
key_file = gitarsenal_dir / key_file_map[provider]
|
|
138
|
+
key_file.write_text(api_key)
|
|
139
|
+
except Exception as e:
|
|
140
|
+
print(f"⚠️ Could not save {provider} API key: {e}")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def gather_context(sandbox, current_dir):
|
|
144
|
+
"""Gather system and directory context for debugging"""
|
|
274
145
|
system_info = ""
|
|
275
|
-
|
|
146
|
+
directory_context = ""
|
|
276
147
|
file_context = ""
|
|
277
148
|
|
|
278
|
-
if sandbox:
|
|
279
|
-
|
|
280
|
-
print("🔍 Getting system information for better debugging...")
|
|
281
|
-
|
|
282
|
-
# Get OS information
|
|
283
|
-
os_info_cmd = """
|
|
284
|
-
echo "OS Information:"
|
|
285
|
-
cat /etc/os-release 2>/dev/null || echo "OS release info not available"
|
|
286
|
-
echo -e "\nKernel Information:"
|
|
287
|
-
uname -a
|
|
288
|
-
echo -e "\nPython Information:"
|
|
289
|
-
python --version
|
|
290
|
-
pip --version
|
|
291
|
-
echo -e "\nPackage Manager:"
|
|
292
|
-
which apt 2>/dev/null && echo "apt available" || echo "apt not available"
|
|
293
|
-
which yum 2>/dev/null && echo "yum available" || echo "yum not available"
|
|
294
|
-
which dnf 2>/dev/null && echo "dnf available" || echo "dnf not available"
|
|
295
|
-
which apk 2>/dev/null && echo "apk available" || echo "apk not available"
|
|
296
|
-
echo -e "\nEnvironment Variables:"
|
|
297
|
-
env | grep -E "^(PATH|PYTHON|VIRTUAL_ENV|HOME|USER|SHELL|LANG)" || echo "No relevant env vars found"
|
|
298
|
-
"""
|
|
299
|
-
|
|
300
|
-
os_result = sandbox.exec("bash", "-c", os_info_cmd)
|
|
301
|
-
os_output = ""
|
|
302
|
-
for line in os_result.stdout:
|
|
303
|
-
os_output += _to_str(line)
|
|
304
|
-
os_result.wait()
|
|
305
|
-
|
|
306
|
-
system_info = f"""
|
|
307
|
-
System Information:
|
|
308
|
-
{os_output}
|
|
309
|
-
"""
|
|
310
|
-
print("✅ System information gathered successfully")
|
|
311
|
-
except Exception as e:
|
|
312
|
-
print(f"⚠️ Error getting system information: {e}")
|
|
313
|
-
system_info = "System information not available\n"
|
|
149
|
+
if not sandbox:
|
|
150
|
+
return system_info, directory_context, file_context
|
|
314
151
|
|
|
315
|
-
|
|
152
|
+
# Get system information
|
|
153
|
+
try:
|
|
154
|
+
os_info_cmd = """
|
|
155
|
+
echo "OS Information:"
|
|
156
|
+
cat /etc/os-release 2>/dev/null || echo "OS release info not available"
|
|
157
|
+
echo -e "\nKernel Information:"
|
|
158
|
+
uname -a
|
|
159
|
+
echo -e "\nPython Information:"
|
|
160
|
+
python --version
|
|
161
|
+
pip --version
|
|
162
|
+
"""
|
|
163
|
+
|
|
164
|
+
os_result = sandbox.exec("bash", "-c", os_info_cmd)
|
|
165
|
+
os_output = ""
|
|
166
|
+
for line in os_result.stdout:
|
|
167
|
+
os_output += _to_str(line)
|
|
168
|
+
os_result.wait()
|
|
169
|
+
|
|
170
|
+
system_info = f"\nSystem Information:\n{os_output}"
|
|
171
|
+
except Exception:
|
|
172
|
+
system_info = "System information not available\n"
|
|
173
|
+
|
|
174
|
+
# Get directory context
|
|
175
|
+
if current_dir:
|
|
316
176
|
try:
|
|
317
|
-
# print("🔍 Getting directory context for better debugging...")
|
|
318
|
-
|
|
319
|
-
# Get current directory contents
|
|
320
177
|
ls_result = sandbox.exec("bash", "-c", "ls -la")
|
|
321
178
|
ls_output = ""
|
|
322
179
|
for line in ls_result.stdout:
|
|
323
180
|
ls_output += _to_str(line)
|
|
324
181
|
ls_result.wait()
|
|
325
182
|
|
|
326
|
-
|
|
327
|
-
parent_result = sandbox.exec("bash", "-c", "ls -la ../")
|
|
328
|
-
parent_ls = ""
|
|
329
|
-
for line in parent_result.stdout:
|
|
330
|
-
parent_ls += _to_str(line)
|
|
331
|
-
parent_result.wait()
|
|
332
|
-
|
|
333
|
-
directory_context = f"""
|
|
334
|
-
Current directory contents:
|
|
335
|
-
{ls_output}
|
|
336
|
-
|
|
337
|
-
Parent directory contents:
|
|
338
|
-
{parent_ls}
|
|
339
|
-
"""
|
|
340
|
-
print("✅ Directory context gathered successfully")
|
|
183
|
+
directory_context = f"\nCurrent directory contents:\n{ls_output}"
|
|
341
184
|
|
|
342
|
-
# Check for
|
|
343
|
-
|
|
185
|
+
# Check for common config files
|
|
186
|
+
common_config_files = ["package.json", "requirements.txt", "pyproject.toml", "setup.py"]
|
|
344
187
|
relevant_files = []
|
|
345
|
-
error_files = re.findall(r'(?:No such file or directory|cannot open|not found): ([^\s:]+)', error_output)
|
|
346
|
-
if error_files:
|
|
347
|
-
for file_path in error_files:
|
|
348
|
-
# Clean up the file path
|
|
349
|
-
file_path = file_path.strip("'\"")
|
|
350
|
-
if not os.path.isabs(file_path):
|
|
351
|
-
file_path = os.path.join(current_dir, file_path)
|
|
352
|
-
|
|
353
|
-
# Try to get the parent directory if the file doesn't exist
|
|
354
|
-
if '/' in file_path:
|
|
355
|
-
parent_file_dir = os.path.dirname(file_path)
|
|
356
|
-
relevant_files.append(parent_file_dir)
|
|
357
|
-
|
|
358
|
-
# Look for package.json, requirements.txt, etc.
|
|
359
|
-
common_config_files = ["package.json", "requirements.txt", "pyproject.toml", "setup.py",
|
|
360
|
-
"Pipfile", "Dockerfile", "docker-compose.yml", "Makefile"]
|
|
361
188
|
|
|
362
189
|
for config_file in common_config_files:
|
|
363
190
|
check_cmd = f"test -f {current_dir}/{config_file}"
|
|
@@ -366,65 +193,32 @@ Parent directory contents:
|
|
|
366
193
|
if check_result.returncode == 0:
|
|
367
194
|
relevant_files.append(f"{current_dir}/{config_file}")
|
|
368
195
|
|
|
369
|
-
# Get content of relevant files
|
|
196
|
+
# Get content of relevant files (limit to 2)
|
|
370
197
|
if relevant_files:
|
|
371
198
|
file_context = "\nRelevant file contents:\n"
|
|
372
|
-
for file_path in relevant_files[:2]:
|
|
199
|
+
for file_path in relevant_files[:2]:
|
|
373
200
|
try:
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
201
|
+
cat_result = sandbox.exec("bash", "-c", f"cat {file_path}")
|
|
202
|
+
file_content = ""
|
|
203
|
+
for line in cat_result.stdout:
|
|
204
|
+
file_content += _to_str(line)
|
|
205
|
+
cat_result.wait()
|
|
377
206
|
|
|
378
|
-
if
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
cat_result.wait()
|
|
386
|
-
|
|
387
|
-
# Truncate if too long
|
|
388
|
-
if len(file_content) > 1000:
|
|
389
|
-
file_content = file_content[:1000] + "\n... (truncated)"
|
|
390
|
-
|
|
391
|
-
file_context += f"\n--- {file_path} ---\n{file_content}\n"
|
|
392
|
-
else:
|
|
393
|
-
# It's a directory, list its contents
|
|
394
|
-
ls_cmd = f"ls -la {file_path}"
|
|
395
|
-
ls_dir_result = sandbox.exec("bash", "-c", ls_cmd)
|
|
396
|
-
dir_content = ""
|
|
397
|
-
for line in ls_dir_result.stdout:
|
|
398
|
-
dir_content += _to_str(line)
|
|
399
|
-
ls_dir_result.wait()
|
|
400
|
-
|
|
401
|
-
file_context += f"\n--- Directory: {file_path} ---\n{dir_content}\n"
|
|
402
|
-
except Exception as e:
|
|
403
|
-
print(f"⚠️ Error getting content of {file_path}: {e}")
|
|
404
|
-
|
|
405
|
-
# print(f"✅ Additional file context gathered from {len(relevant_files)} relevant files")
|
|
406
|
-
|
|
407
|
-
except Exception as e:
|
|
408
|
-
print(f"⚠️ Error getting directory context: {e}")
|
|
207
|
+
if len(file_content) > 1000:
|
|
208
|
+
file_content = file_content[:1000] + "\n... (truncated)"
|
|
209
|
+
|
|
210
|
+
file_context += f"\n--- {file_path} ---\n{file_content}\n"
|
|
211
|
+
except Exception:
|
|
212
|
+
continue
|
|
213
|
+
except Exception:
|
|
409
214
|
directory_context = f"\nCurrent directory: {current_dir}\n"
|
|
410
215
|
|
|
411
|
-
|
|
412
|
-
headers = {
|
|
413
|
-
"Content-Type": "application/json",
|
|
414
|
-
"Authorization": f"Bearer {api_key}"
|
|
415
|
-
}
|
|
216
|
+
return system_info, directory_context, file_context
|
|
416
217
|
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
print("\n" + "="*60)
|
|
422
|
-
print("DEBUG: ERROR_OUTPUT SENT TO LLM:")
|
|
423
|
-
print("="*60)
|
|
424
|
-
print(f"{error_output}")
|
|
425
|
-
print("="*60 + "\n")
|
|
426
|
-
|
|
427
|
-
prompt = f"""
|
|
218
|
+
|
|
219
|
+
def create_debug_prompt(command, error_output, system_info, directory_context, file_context, auth_context):
|
|
220
|
+
"""Create the debugging prompt for LLM"""
|
|
221
|
+
return f"""
|
|
428
222
|
I'm trying to run the following command in a Linux environment:
|
|
429
223
|
|
|
430
224
|
```
|
|
@@ -465,1193 +259,354 @@ IMPORTANT GUIDELINES:
|
|
|
465
259
|
- Analyze the error to determine what type of authentication is needed
|
|
466
260
|
- ALWAYS use the actual credential values from the AVAILABLE CREDENTIALS section above (NOT placeholders)
|
|
467
261
|
- Look for the specific API key or token needed in the auth_context and use its exact value
|
|
468
|
-
- Common patterns:
|
|
469
|
-
* wandb errors: use wandb login with the actual WANDB_API_KEY value from auth_context
|
|
470
|
-
* huggingface errors: use huggingface-cli login with the actual HF_TOKEN or HUGGINGFACE_TOKEN value from auth_context
|
|
471
|
-
* github errors: configure git credentials with the actual GITHUB_TOKEN value from auth_context
|
|
472
|
-
* kaggle errors: create ~/.kaggle/kaggle.json with the actual KAGGLE_USERNAME and KAGGLE_KEY values from auth_context
|
|
473
|
-
* API errors: export the appropriate API key as environment variable using the actual value from auth_context
|
|
474
262
|
|
|
475
263
|
5. Environment variable exports:
|
|
476
264
|
- Use export commands for API keys that need to be in environment
|
|
477
265
|
- ALWAYS use the actual credential values from auth_context, never use placeholders like "YOUR_API_KEY"
|
|
478
|
-
- Example: export OPENAI_API_KEY="sk-..." (using the actual key from auth_context)
|
|
479
|
-
|
|
480
|
-
6. CRITICAL: When using any API key, token, or credential:
|
|
481
|
-
- Find the exact value in the AVAILABLE CREDENTIALS section
|
|
482
|
-
- Use that exact value in your command
|
|
483
|
-
- Do not use generic placeholders or dummy values
|
|
484
|
-
- The auth_context contains real, usable credentials
|
|
485
266
|
|
|
486
|
-
|
|
267
|
+
6. For Git SSH authentication failures:
|
|
487
268
|
- If the error contains "Host key verification failed" or "Could not read from remote repository"
|
|
488
269
|
- ALWAYS convert SSH URLs to HTTPS URLs for public repositories
|
|
489
270
|
- Replace git@github.com:username/repo.git with https://github.com/username/repo.git
|
|
490
|
-
- This works for public repositories without authentication
|
|
491
|
-
- Example: git clone https://github.com/xg-chu/ARTalk.git
|
|
492
271
|
|
|
493
272
|
Do not provide any explanations, just the exact command to run.
|
|
494
273
|
"""
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def extract_command_from_response(response_text):
|
|
277
|
+
"""Extract the actual command from LLM response"""
|
|
278
|
+
fix_command = response_text.strip()
|
|
495
279
|
|
|
496
|
-
#
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
"gpt-4.1-mini", # First choice: GPT-4o (most widely available)
|
|
502
|
-
]
|
|
503
|
-
|
|
504
|
-
# Check if we have a preferred model in environment
|
|
505
|
-
preferred_model = os.environ.get("OPENAI_MODEL")
|
|
506
|
-
if preferred_model:
|
|
507
|
-
# Insert the preferred model at the beginning of the list
|
|
508
|
-
models_to_try.insert(0, preferred_model)
|
|
509
|
-
# print(f"✅ Using preferred model from environment: {preferred_model}")
|
|
510
|
-
|
|
511
|
-
# Remove duplicates while preserving order
|
|
512
|
-
models_to_try = list(dict.fromkeys(models_to_try))
|
|
513
|
-
# print(f"🔍 DEBUG: Models to try: {models_to_try}")
|
|
280
|
+
# Extract from code blocks
|
|
281
|
+
if "```" in fix_command:
|
|
282
|
+
code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
|
|
283
|
+
if code_blocks:
|
|
284
|
+
fix_command = code_blocks[0].strip()
|
|
514
285
|
|
|
515
|
-
#
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
# print(f"🔍 DEBUG: API key length: {len(api_key)}")
|
|
521
|
-
# print(f"🔍 DEBUG: API key starts with: {api_key[:10]}...")
|
|
522
|
-
|
|
523
|
-
payload = {
|
|
524
|
-
"model": model_name,
|
|
525
|
-
"messages": [
|
|
526
|
-
{"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue. Analyze the issue first, understand why it's happening, then provide the command to fix it. For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found. For missing packages, use appropriate package managers (pip, apt-get, npm). For Git SSH authentication failures, always convert SSH URLs to HTTPS URLs (git@github.com:user/repo.git -> https://github.com/user/repo.git). For authentication, suggest login commands with placeholders."},
|
|
527
|
-
{"role": "user", "content": prompt}
|
|
528
|
-
],
|
|
529
|
-
"temperature": 0.2,
|
|
530
|
-
"max_tokens": 300
|
|
531
|
-
}
|
|
286
|
+
# If multi-line, try to find the actual command
|
|
287
|
+
if len(fix_command.split('\n')) > 1:
|
|
288
|
+
command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
|
|
289
|
+
'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
|
|
290
|
+
'curl', 'wget', 'docker', 'make', 'conda', 'uv', 'poetry']
|
|
532
291
|
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
# Add specific handling for common errors
|
|
536
|
-
last_error = None
|
|
537
|
-
for attempt in range(retries + 1):
|
|
538
|
-
try:
|
|
539
|
-
if attempt > 0:
|
|
540
|
-
# Exponential backoff
|
|
541
|
-
wait_time = backoff_factor * (2 ** (attempt - 1))
|
|
542
|
-
print(f"⏱️ Retrying in {wait_time:.1f} seconds... (attempt {attempt+1}/{retries+1})")
|
|
543
|
-
time.sleep(wait_time)
|
|
544
|
-
|
|
545
|
-
print(f"🤖 Calling OpenAI with {model_name} model to debug the failed command...")
|
|
546
|
-
print(f"🔍 DEBUG: Making POST request to OpenAI API...")
|
|
547
|
-
response = requests.post(
|
|
548
|
-
"https://api.openai.com/v1/chat/completions",
|
|
549
|
-
headers=headers,
|
|
550
|
-
json=payload,
|
|
551
|
-
timeout=45 # Increased timeout for reliability
|
|
552
|
-
)
|
|
553
|
-
|
|
554
|
-
print(f"🔍 DEBUG: Response received, status code: {response.status_code}")
|
|
555
|
-
|
|
556
|
-
# Handle specific status codes
|
|
557
|
-
if response.status_code == 200:
|
|
558
|
-
print(f"🔍 DEBUG: Success! Response length: {len(response.text)}")
|
|
559
|
-
return response.json(), None
|
|
560
|
-
elif response.status_code == 401:
|
|
561
|
-
error_msg = "Authentication error: Invalid API key"
|
|
562
|
-
print(f"❌ {error_msg}")
|
|
563
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
564
|
-
# Don't retry auth errors
|
|
565
|
-
return None, error_msg
|
|
566
|
-
elif response.status_code == 429:
|
|
567
|
-
error_msg = "Rate limit exceeded or quota reached"
|
|
568
|
-
print(f"⚠️ {error_msg}")
|
|
569
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
570
|
-
# Always retry rate limit errors with increasing backoff
|
|
571
|
-
last_error = error_msg
|
|
572
|
-
continue
|
|
573
|
-
elif response.status_code == 500:
|
|
574
|
-
error_msg = "OpenAI server error"
|
|
575
|
-
print(f"⚠️ {error_msg}")
|
|
576
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
577
|
-
# Retry server errors
|
|
578
|
-
last_error = error_msg
|
|
579
|
-
continue
|
|
580
|
-
else:
|
|
581
|
-
error_msg = f"Status code: {response.status_code}, Response: {response.text}"
|
|
582
|
-
print(f"⚠️ OpenAI API error: {error_msg}")
|
|
583
|
-
print(f"🔍 DEBUG: Full response text: {response.text}")
|
|
584
|
-
last_error = error_msg
|
|
585
|
-
# Only retry if we have attempts left
|
|
586
|
-
if attempt < retries:
|
|
587
|
-
continue
|
|
588
|
-
return None, error_msg
|
|
589
|
-
except requests.exceptions.Timeout:
|
|
590
|
-
error_msg = "Request timed out"
|
|
591
|
-
# print(f"⚠️ {error_msg}")
|
|
592
|
-
# print(f"🔍 DEBUG: Timeout after 45 seconds")
|
|
593
|
-
last_error = error_msg
|
|
594
|
-
# Always retry timeouts
|
|
595
|
-
continue
|
|
596
|
-
except requests.exceptions.ConnectionError:
|
|
597
|
-
error_msg = "Connection error"
|
|
598
|
-
print(f"⚠️ {error_msg}")
|
|
599
|
-
print(f"🔍 DEBUG: Connection failed to api.openai.com")
|
|
600
|
-
last_error = error_msg
|
|
601
|
-
# Always retry connection errors
|
|
602
|
-
continue
|
|
603
|
-
except Exception as e:
|
|
604
|
-
error_msg = str(e)
|
|
605
|
-
print(f"⚠️ Unexpected error: {error_msg}")
|
|
606
|
-
print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
|
|
607
|
-
print(f"🔍 DEBUG: Exception details: {str(e)}")
|
|
608
|
-
last_error = error_msg
|
|
609
|
-
# Only retry if we have attempts left
|
|
610
|
-
if attempt < retries:
|
|
611
|
-
continue
|
|
612
|
-
return None, error_msg
|
|
292
|
+
command_lines = [line.strip() for line in fix_command.split('\n')
|
|
293
|
+
if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
|
|
613
294
|
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
# Try each model in sequence until one works
|
|
618
|
-
result = None
|
|
619
|
-
last_error = None
|
|
620
|
-
|
|
621
|
-
for model in models_to_try:
|
|
622
|
-
result, error = try_api_call(model)
|
|
623
|
-
if result:
|
|
624
|
-
# print(f"✅ Successfully got response from {model}")
|
|
625
|
-
break
|
|
295
|
+
if command_lines:
|
|
296
|
+
fix_command = command_lines[0]
|
|
626
297
|
else:
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
if not result:
|
|
631
|
-
print(f"❌ All model attempts failed. Last error: {last_error}")
|
|
632
|
-
return None
|
|
633
|
-
|
|
634
|
-
# Process the response
|
|
635
|
-
try:
|
|
636
|
-
print(f"🔍 DEBUG: Processing OpenAI response...")
|
|
637
|
-
# print(f"🔍 DEBUG: Response structure: {list(result.keys())}")
|
|
638
|
-
print(f"🔍 DEBUG: Choices count: {len(result.get('choices', []))}")
|
|
639
|
-
|
|
640
|
-
fix_command = result["choices"][0]["message"]["content"].strip()
|
|
641
|
-
print(f"🔍 DEBUG: Raw response content: {fix_command}")
|
|
642
|
-
|
|
643
|
-
# Save the original response for debugging
|
|
644
|
-
original_response = fix_command
|
|
645
|
-
|
|
646
|
-
# Extract just the command if it's wrapped in backticks or explanation
|
|
647
|
-
if "```" in fix_command:
|
|
648
|
-
# Extract content between backticks
|
|
649
|
-
import re
|
|
650
|
-
code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
|
|
651
|
-
if code_blocks:
|
|
652
|
-
fix_command = code_blocks[0].strip()
|
|
653
|
-
print(f"✅ Extracted command from code block: {fix_command}")
|
|
654
|
-
|
|
655
|
-
# If the response still has explanatory text, try to extract just the command
|
|
656
|
-
if len(fix_command.split('\n')) > 1:
|
|
657
|
-
# First try to find lines that look like commands (start with common command prefixes)
|
|
658
|
-
command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
|
|
659
|
-
'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
|
|
660
|
-
'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
|
|
661
|
-
'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
|
|
662
|
-
|
|
663
|
-
# Check for lines that start with common command prefixes
|
|
298
|
+
# Try shell patterns
|
|
299
|
+
shell_patterns = [' | ', ' > ', ' >> ', ' && ', ' || ', ' ; ', '$(', '`', ' -y ', ' --yes ']
|
|
664
300
|
command_lines = [line.strip() for line in fix_command.split('\n')
|
|
665
|
-
if any(line
|
|
301
|
+
if any(pattern in line for pattern in shell_patterns)]
|
|
666
302
|
|
|
667
303
|
if command_lines:
|
|
668
|
-
# Use the first command line found
|
|
669
304
|
fix_command = command_lines[0]
|
|
670
|
-
print(f"✅ Identified command by prefix: {fix_command}")
|
|
671
305
|
else:
|
|
672
|
-
#
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
fix_command = min(lines, key=len)
|
|
691
|
-
print(f"✅ Selected shortest line as command: {fix_command}")
|
|
692
|
-
|
|
693
|
-
# Clean up the command - remove any trailing periods or quotes
|
|
694
|
-
fix_command = fix_command.rstrip('.;"\'')
|
|
695
|
-
|
|
696
|
-
# Remove common prefixes that LLMs sometimes add
|
|
697
|
-
prefixes_to_remove = [
|
|
698
|
-
"Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
|
|
699
|
-
"You should run: ", "You can run: ", "You need to run: "
|
|
700
|
-
]
|
|
701
|
-
for prefix in prefixes_to_remove:
|
|
702
|
-
if fix_command.startswith(prefix):
|
|
703
|
-
fix_command = fix_command[len(prefix):].strip()
|
|
704
|
-
print(f"✅ Removed prefix: {prefix}")
|
|
705
|
-
break
|
|
706
|
-
|
|
707
|
-
# If the command is still multi-line or very long, it might not be a valid command
|
|
708
|
-
if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
|
|
709
|
-
print("⚠️ Extracted command appears invalid (multi-line or too long)")
|
|
710
|
-
print("🔍 Original response from LLM:")
|
|
711
|
-
print("-" * 60)
|
|
712
|
-
print(original_response)
|
|
713
|
-
print("-" * 60)
|
|
714
|
-
print("⚠️ Using best guess for command")
|
|
715
|
-
|
|
716
|
-
print(f"🔧 Suggested fix: {fix_command}")
|
|
717
|
-
print(f"🔍 DEBUG: Returning fix command: {fix_command}")
|
|
718
|
-
return fix_command
|
|
719
|
-
except Exception as e:
|
|
720
|
-
print(f"❌ Error processing OpenAI response: {e}")
|
|
721
|
-
print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
|
|
722
|
-
print(f"🔍 DEBUG: Exception details: {str(e)}")
|
|
723
|
-
return None
|
|
724
|
-
|
|
725
|
-
def call_openai_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
726
|
-
"""Call OpenAI to debug multiple failed commands and suggest fixes for all of them at once"""
|
|
727
|
-
print("\n🔍 DEBUG: Starting batch LLM debugging...")
|
|
728
|
-
print(f"🔍 DEBUG: Analyzing {len(failed_commands)} failed commands")
|
|
729
|
-
|
|
730
|
-
if not failed_commands:
|
|
731
|
-
print("⚠️ No failed commands to analyze")
|
|
732
|
-
return []
|
|
733
|
-
|
|
734
|
-
if not api_key:
|
|
735
|
-
print("❌ No OpenAI API key provided for batch debugging")
|
|
736
|
-
return []
|
|
737
|
-
|
|
738
|
-
# Prepare context for batch analysis
|
|
739
|
-
context_parts = []
|
|
740
|
-
context_parts.append(f"Current directory: {current_dir}")
|
|
741
|
-
context_parts.append(f"Sandbox available: {sandbox is not None}")
|
|
742
|
-
|
|
743
|
-
# Add failed commands with their errors
|
|
744
|
-
for i, failed_cmd in enumerate(failed_commands, 1):
|
|
745
|
-
cmd_type = failed_cmd.get('type', 'main')
|
|
746
|
-
original_cmd = failed_cmd.get('original_command', '')
|
|
747
|
-
cmd_text = failed_cmd['command']
|
|
748
|
-
stderr = failed_cmd.get('stderr', '')
|
|
749
|
-
stdout = failed_cmd.get('stdout', '')
|
|
750
|
-
|
|
751
|
-
context_parts.append(f"\n--- Failed Command {i} ({cmd_type}) ---")
|
|
752
|
-
context_parts.append(f"Command: {cmd_text}")
|
|
753
|
-
if original_cmd and original_cmd != cmd_text:
|
|
754
|
-
context_parts.append(f"Original Command: {original_cmd}")
|
|
755
|
-
if stderr:
|
|
756
|
-
context_parts.append(f"Error Output: {stderr}")
|
|
757
|
-
if stdout:
|
|
758
|
-
context_parts.append(f"Standard Output: {stdout}")
|
|
306
|
+
# Use shortest non-empty line
|
|
307
|
+
lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
|
|
308
|
+
if lines:
|
|
309
|
+
valid_lines = [line for line in lines if len(line) > 5]
|
|
310
|
+
fix_command = min(valid_lines, key=len) if valid_lines else min(lines, key=len)
|
|
311
|
+
|
|
312
|
+
# Clean up the command
|
|
313
|
+
fix_command = fix_command.rstrip('.;"\'')
|
|
314
|
+
|
|
315
|
+
# Remove common prefixes
|
|
316
|
+
prefixes_to_remove = [
|
|
317
|
+
"Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
|
|
318
|
+
"You should run: ", "You can run: ", "You need to run: "
|
|
319
|
+
]
|
|
320
|
+
for prefix in prefixes_to_remove:
|
|
321
|
+
if fix_command.startswith(prefix):
|
|
322
|
+
fix_command = fix_command[len(prefix):].strip()
|
|
323
|
+
break
|
|
759
324
|
|
|
760
|
-
|
|
761
|
-
prompt = f"""You are a debugging assistant analyzing multiple failed commands.
|
|
325
|
+
return fix_command
|
|
762
326
|
|
|
763
|
-
Context:
|
|
764
|
-
{chr(10).join(context_parts)}
|
|
765
|
-
|
|
766
|
-
Please analyze each failed command and provide a fix command for each one. For each failed command, respond with:
|
|
767
327
|
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
328
|
+
def make_api_request(provider, api_key, prompt, retries=2):
|
|
329
|
+
"""Make API request to the specified provider"""
|
|
330
|
+
if provider == "openai":
|
|
331
|
+
return make_openai_request(api_key, prompt, retries)
|
|
332
|
+
elif provider == "anthropic":
|
|
333
|
+
return make_anthropic_request(api_key, prompt, retries)
|
|
334
|
+
elif provider == "openrouter":
|
|
335
|
+
return make_openrouter_request(api_key, prompt, retries)
|
|
336
|
+
elif provider == "groq":
|
|
337
|
+
return make_groq_request(api_key, prompt, retries)
|
|
338
|
+
else:
|
|
339
|
+
return None
|
|
778
340
|
|
|
779
|
-
Provide fixes for all {len(failed_commands)} failed commands:"""
|
|
780
341
|
|
|
781
|
-
|
|
342
|
+
def make_openai_request(api_key, prompt, retries=2):
|
|
343
|
+
"""Make request to OpenAI API"""
|
|
782
344
|
headers = {
|
|
783
|
-
"
|
|
784
|
-
"
|
|
345
|
+
"Content-Type": "application/json",
|
|
346
|
+
"Authorization": f"Bearer {api_key}"
|
|
785
347
|
}
|
|
786
348
|
|
|
787
349
|
payload = {
|
|
788
|
-
"model": "gpt-
|
|
350
|
+
"model": os.environ.get("OPENAI_MODEL", "gpt-5-mini"),
|
|
789
351
|
"messages": [
|
|
790
|
-
{"role": "system", "content": "You are a debugging assistant.
|
|
352
|
+
{"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue."},
|
|
791
353
|
{"role": "user", "content": prompt}
|
|
792
354
|
],
|
|
793
|
-
"temperature": 0.
|
|
794
|
-
"max_tokens":
|
|
355
|
+
"temperature": 0.2,
|
|
356
|
+
"max_tokens": 300
|
|
795
357
|
}
|
|
796
358
|
|
|
797
|
-
|
|
798
|
-
print(f"🤖 Calling OpenAI for batch debugging of {len(failed_commands)} commands...")
|
|
799
|
-
response = requests.post(
|
|
800
|
-
"https://api.openai.com/v1/chat/completions",
|
|
801
|
-
headers=headers,
|
|
802
|
-
json=payload,
|
|
803
|
-
timeout=60
|
|
804
|
-
)
|
|
805
|
-
|
|
806
|
-
if response.status_code == 200:
|
|
807
|
-
result = response.json()
|
|
808
|
-
content = result['choices'][0]['message']['content']
|
|
809
|
-
print(f"✅ Batch analysis completed")
|
|
810
|
-
|
|
811
|
-
# Parse the response to extract fix commands
|
|
812
|
-
fixes = []
|
|
813
|
-
for i in range(1, len(failed_commands) + 1):
|
|
814
|
-
fix_pattern = f"FIX_COMMAND_{i}: (.+)"
|
|
815
|
-
reason_pattern = f"REASON_{i}: (.+)"
|
|
816
|
-
|
|
817
|
-
fix_match = re.search(fix_pattern, content, re.MULTILINE)
|
|
818
|
-
reason_match = re.search(reason_pattern, content, re.MULTILINE)
|
|
819
|
-
|
|
820
|
-
if fix_match:
|
|
821
|
-
fix_command = fix_match.group(1).strip()
|
|
822
|
-
reason = reason_match.group(1).strip() if reason_match else "LLM suggested fix"
|
|
823
|
-
|
|
824
|
-
# Clean up the fix command
|
|
825
|
-
if fix_command.startswith('`') and fix_command.endswith('`'):
|
|
826
|
-
fix_command = fix_command[1:-1]
|
|
827
|
-
|
|
828
|
-
fixes.append({
|
|
829
|
-
'original_command': failed_commands[i-1]['command'],
|
|
830
|
-
'fix_command': fix_command,
|
|
831
|
-
'reason': reason,
|
|
832
|
-
'command_index': i-1
|
|
833
|
-
})
|
|
834
|
-
|
|
835
|
-
print(f"🔧 Generated {len(fixes)} fix commands from batch analysis")
|
|
836
|
-
return fixes
|
|
837
|
-
else:
|
|
838
|
-
print(f"❌ OpenAI API error: {response.status_code} - {response.text}")
|
|
839
|
-
return []
|
|
840
|
-
|
|
841
|
-
except Exception as e:
|
|
842
|
-
print(f"❌ Error during batch debugging: {e}")
|
|
843
|
-
return []
|
|
844
|
-
|
|
845
|
-
def call_anthropic_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
846
|
-
"""Call Anthropic Claude to debug a failed command and suggest a fix"""
|
|
847
|
-
print("\n🔍 DEBUG: Starting Anthropic Claude debugging...")
|
|
848
|
-
print(f"🔍 DEBUG: Command: {command}")
|
|
849
|
-
print(f"🔍 DEBUG: Error output length: {len(error_output) if error_output else 0}")
|
|
850
|
-
print(f"🔍 DEBUG: Current directory: {current_dir}")
|
|
851
|
-
print(f"🔍 DEBUG: Sandbox available: {sandbox is not None}")
|
|
852
|
-
|
|
853
|
-
# Define _to_str function locally to avoid NameError
|
|
854
|
-
def _to_str(maybe_bytes):
|
|
359
|
+
for attempt in range(retries + 1):
|
|
855
360
|
try:
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
361
|
+
if attempt > 0:
|
|
362
|
+
time.sleep(1.5 * (2 ** (attempt - 1)))
|
|
363
|
+
|
|
364
|
+
response = requests.post(
|
|
365
|
+
"https://api.openai.com/v1/chat/completions",
|
|
366
|
+
headers=headers,
|
|
367
|
+
json=payload,
|
|
368
|
+
timeout=45
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
if response.status_code == 200:
|
|
372
|
+
result = response.json()
|
|
373
|
+
return result["choices"][0]["message"]["content"]
|
|
374
|
+
elif response.status_code == 401:
|
|
375
|
+
print("❌ Invalid OpenAI API key")
|
|
376
|
+
return None
|
|
377
|
+
elif response.status_code in [429, 500]:
|
|
378
|
+
continue # Retry
|
|
861
379
|
else:
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
print("🔍 Skipping debugging for test command - non-zero exit code is expected behavior")
|
|
871
|
-
return None
|
|
872
|
-
|
|
873
|
-
# Validate error_output - if it's empty, we can't debug effectively
|
|
874
|
-
if not error_output or not error_output.strip():
|
|
875
|
-
print("⚠️ Error output is empty. Cannot effectively debug the command.")
|
|
876
|
-
print("⚠️ Skipping Anthropic debugging due to lack of error information.")
|
|
877
|
-
return None
|
|
878
|
-
|
|
879
|
-
# Try to get API key from multiple sources
|
|
880
|
-
if not api_key:
|
|
881
|
-
print("🔍 DEBUG: No Anthropic API key provided, searching for one...")
|
|
882
|
-
|
|
883
|
-
# First try environment variable
|
|
884
|
-
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
885
|
-
print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
|
|
886
|
-
if api_key:
|
|
887
|
-
print(f"🔍 DEBUG: Environment API key value: {api_key}")
|
|
888
|
-
|
|
889
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
890
|
-
if not api_key:
|
|
891
|
-
try:
|
|
892
|
-
print("🔍 DEBUG: Trying to fetch API key from server...")
|
|
893
|
-
from fetch_modal_tokens import get_tokens
|
|
894
|
-
_, _, _, api_key = get_tokens()
|
|
895
|
-
if api_key:
|
|
896
|
-
# Set in environment for this session
|
|
897
|
-
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
898
|
-
else:
|
|
899
|
-
print("⚠️ Could not fetch Anthropic API key from server")
|
|
900
|
-
except Exception as e:
|
|
901
|
-
print(f"⚠️ Error fetching API key from server: {e}")
|
|
902
|
-
|
|
903
|
-
# Then try credentials manager
|
|
904
|
-
if not api_key:
|
|
905
|
-
print("🔍 DEBUG: Trying credentials manager...")
|
|
906
|
-
try:
|
|
907
|
-
from credentials_manager import CredentialsManager
|
|
908
|
-
credentials_manager = CredentialsManager()
|
|
909
|
-
api_key = credentials_manager.get_anthropic_api_key()
|
|
910
|
-
if api_key:
|
|
911
|
-
print(f"🔍 DEBUG: API key from credentials manager: Found")
|
|
912
|
-
print(f"🔍 DEBUG: Credentials manager API key value: {api_key}")
|
|
913
|
-
# Set in environment for this session
|
|
914
|
-
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
915
|
-
else:
|
|
916
|
-
print("⚠️ Could not fetch Anthropic API key from credentials manager")
|
|
917
|
-
except Exception as e:
|
|
918
|
-
print(f"⚠️ Error fetching API key from credentials manager: {e}")
|
|
919
|
-
|
|
920
|
-
# Store the API key in a persistent file if found
|
|
921
|
-
if api_key:
|
|
922
|
-
try:
|
|
923
|
-
os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
|
|
924
|
-
with open(os.path.expanduser("~/.gitarsenal/anthropic_key"), "w") as f:
|
|
925
|
-
f.write(api_key)
|
|
926
|
-
print("✅ Saved Anthropic API key for future use")
|
|
927
|
-
except Exception as e:
|
|
928
|
-
print(f"⚠️ Could not save API key: {e}")
|
|
929
|
-
|
|
930
|
-
# Try to load from saved file if not in environment
|
|
931
|
-
if not api_key:
|
|
932
|
-
try:
|
|
933
|
-
key_file = os.path.expanduser("~/.gitarsenal/anthropic_key")
|
|
934
|
-
print(f"🔍 DEBUG: Checking for saved API key at: {key_file}")
|
|
935
|
-
if os.path.exists(key_file):
|
|
936
|
-
with open(key_file, "r") as f:
|
|
937
|
-
api_key = f.read().strip()
|
|
938
|
-
if api_key:
|
|
939
|
-
print("✅ Loaded Anthropic API key from saved file")
|
|
940
|
-
print(f"🔍 DEBUG: API key from file: {api_key}")
|
|
941
|
-
print(f"🔍 DEBUG: API key length: {len(api_key)}")
|
|
942
|
-
# Also set in environment for this session
|
|
943
|
-
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
944
|
-
else:
|
|
945
|
-
print("🔍 DEBUG: Saved file exists but is empty")
|
|
946
|
-
else:
|
|
947
|
-
print("🔍 DEBUG: No saved API key file found")
|
|
948
|
-
except Exception as e:
|
|
949
|
-
print(f"⚠️ Could not load saved API key: {e}")
|
|
950
|
-
|
|
951
|
-
if not api_key:
|
|
952
|
-
print("❌ No Anthropic API key available for debugging")
|
|
953
|
-
return None
|
|
380
|
+
print(f"⚠️ OpenAI API error: {response.status_code}")
|
|
381
|
+
return None
|
|
382
|
+
|
|
383
|
+
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
|
|
384
|
+
continue # Retry
|
|
385
|
+
except Exception as e:
|
|
386
|
+
print(f"⚠️ OpenAI request error: {e}")
|
|
387
|
+
return None
|
|
954
388
|
|
|
955
|
-
|
|
956
|
-
error_str = _to_str(error_output)
|
|
957
|
-
prompt = f"""You are a debugging assistant. Provide only the terminal command to fix the issue.
|
|
958
|
-
|
|
959
|
-
Context:
|
|
960
|
-
- Current directory: {current_dir}
|
|
961
|
-
- Sandbox available: {sandbox is not None}
|
|
962
|
-
- Failed command: {command}
|
|
963
|
-
- Error output: {error_str}
|
|
964
|
-
|
|
965
|
-
Analyze the issue first, understand why it's happening, then provide the command to fix it.
|
|
966
|
-
|
|
967
|
-
Guidelines:
|
|
968
|
-
- For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found
|
|
969
|
-
- For missing packages, use appropriate package managers (pip, apt-get, npm)
|
|
970
|
-
- For Git SSH authentication failures, always convert SSH URLs to HTTPS URLs (git@github.com:user/repo.git -> https://github.com/user/repo.git)
|
|
971
|
-
- For authentication, suggest login commands with placeholders
|
|
972
|
-
- For permission errors, suggest commands with sudo if appropriate
|
|
973
|
-
- For network issues, suggest retry commands or alternative URLs
|
|
389
|
+
return None
|
|
974
390
|
|
|
975
|
-
Return only the command to fix the issue, nothing else."""
|
|
976
391
|
|
|
977
|
-
|
|
392
|
+
def make_anthropic_request(api_key, prompt, retries=2):
|
|
393
|
+
"""Make request to Anthropic API"""
|
|
978
394
|
headers = {
|
|
979
395
|
"x-api-key": api_key,
|
|
980
396
|
"anthropic-version": "2023-06-01",
|
|
981
397
|
"content-type": "application/json"
|
|
982
398
|
}
|
|
983
399
|
|
|
984
|
-
|
|
985
|
-
|
|
400
|
+
payload = {
|
|
401
|
+
"model": "claude-sonnet-4-20250514",
|
|
402
|
+
"max_tokens": 300,
|
|
403
|
+
"messages": [{"role": "user", "content": prompt}]
|
|
404
|
+
}
|
|
986
405
|
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
print(f"
|
|
1009
|
-
|
|
1010
|
-
response = requests.post(
|
|
1011
|
-
"https://api.anthropic.com/v1/messages",
|
|
1012
|
-
headers=headers,
|
|
1013
|
-
json=payload,
|
|
1014
|
-
timeout=45 # Increased timeout for reliability
|
|
1015
|
-
)
|
|
1016
|
-
|
|
1017
|
-
print(f"🔍 DEBUG: Response received, status code: {response.status_code}")
|
|
406
|
+
for attempt in range(retries + 1):
|
|
407
|
+
try:
|
|
408
|
+
if attempt > 0:
|
|
409
|
+
time.sleep(1.5 * (2 ** (attempt - 1)))
|
|
410
|
+
|
|
411
|
+
response = requests.post(
|
|
412
|
+
"https://api.anthropic.com/v1/messages",
|
|
413
|
+
headers=headers,
|
|
414
|
+
json=payload,
|
|
415
|
+
timeout=45
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
if response.status_code == 200:
|
|
419
|
+
result = response.json()
|
|
420
|
+
return result["content"][0]["text"]
|
|
421
|
+
elif response.status_code == 401:
|
|
422
|
+
print("❌ Invalid Anthropic API key")
|
|
423
|
+
return None
|
|
424
|
+
elif response.status_code in [429, 500]:
|
|
425
|
+
continue # Retry
|
|
426
|
+
else:
|
|
427
|
+
print(f"⚠️ Anthropic API error: {response.status_code}")
|
|
428
|
+
return None
|
|
1018
429
|
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
error_msg = "Authentication error: Invalid API key"
|
|
1025
|
-
print(f"❌ {error_msg}")
|
|
1026
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
1027
|
-
# Don't retry auth errors
|
|
1028
|
-
return None, error_msg
|
|
1029
|
-
elif response.status_code == 429:
|
|
1030
|
-
error_msg = "Rate limit exceeded or quota reached"
|
|
1031
|
-
print(f"⚠️ {error_msg}")
|
|
1032
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
1033
|
-
# Always retry rate limit errors with increasing backoff
|
|
1034
|
-
last_error = error_msg
|
|
1035
|
-
continue
|
|
1036
|
-
elif response.status_code == 500:
|
|
1037
|
-
error_msg = "Anthropic server error"
|
|
1038
|
-
print(f"⚠️ {error_msg}")
|
|
1039
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
1040
|
-
# Retry server errors
|
|
1041
|
-
last_error = error_msg
|
|
1042
|
-
continue
|
|
1043
|
-
else:
|
|
1044
|
-
error_msg = f"Status code: {response.status_code}, Response: {response.text}"
|
|
1045
|
-
print(f"⚠️ Anthropic API error: {error_msg}")
|
|
1046
|
-
print(f"🔍 DEBUG: Full response text: {response.text}")
|
|
1047
|
-
last_error = error_msg
|
|
1048
|
-
# Only retry if we have attempts left
|
|
1049
|
-
if attempt < retries:
|
|
1050
|
-
continue
|
|
1051
|
-
return None, error_msg
|
|
1052
|
-
except requests.exceptions.Timeout:
|
|
1053
|
-
error_msg = "Request timed out"
|
|
1054
|
-
last_error = error_msg
|
|
1055
|
-
# Always retry timeouts
|
|
1056
|
-
continue
|
|
1057
|
-
except requests.exceptions.ConnectionError:
|
|
1058
|
-
error_msg = "Connection error"
|
|
1059
|
-
print(f"⚠️ {error_msg}")
|
|
1060
|
-
print(f"🔍 DEBUG: Connection failed to api.anthropic.com")
|
|
1061
|
-
last_error = error_msg
|
|
1062
|
-
# Always retry connection errors
|
|
1063
|
-
continue
|
|
1064
|
-
except Exception as e:
|
|
1065
|
-
error_msg = str(e)
|
|
1066
|
-
print(f"⚠️ Unexpected error: {error_msg}")
|
|
1067
|
-
print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
|
|
1068
|
-
print(f"🔍 DEBUG: Exception details: {str(e)}")
|
|
1069
|
-
last_error = error_msg
|
|
1070
|
-
# Only retry if we have attempts left
|
|
1071
|
-
if attempt < retries:
|
|
1072
|
-
continue
|
|
1073
|
-
return None, error_msg
|
|
1074
|
-
|
|
1075
|
-
# If we get here, all retries failed
|
|
1076
|
-
return None, last_error
|
|
1077
|
-
|
|
1078
|
-
# Try each model in sequence until one works
|
|
1079
|
-
result = None
|
|
1080
|
-
last_error = None
|
|
430
|
+
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
|
|
431
|
+
continue # Retry
|
|
432
|
+
except Exception as e:
|
|
433
|
+
print(f"⚠️ Anthropic request error: {e}")
|
|
434
|
+
return None
|
|
1081
435
|
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
436
|
+
return None
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
def make_openrouter_request(api_key, prompt, retries=2):
|
|
440
|
+
"""Make request to OpenRouter API"""
|
|
441
|
+
headers = {
|
|
442
|
+
"x-api-key": api_key,
|
|
443
|
+
"content-type": "application/json"
|
|
444
|
+
}
|
|
1089
445
|
|
|
1090
|
-
|
|
1091
|
-
|
|
1092
|
-
|
|
446
|
+
payload = {
|
|
447
|
+
"model": "openai/gpt-5-mini",
|
|
448
|
+
"max_tokens": 300,
|
|
449
|
+
"messages": [{"role": "user", "content": prompt}]
|
|
450
|
+
}
|
|
1093
451
|
|
|
1094
|
-
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
# If the response still has explanatory text, try to extract just the command
|
|
1115
|
-
if len(fix_command.split('\n')) > 1:
|
|
1116
|
-
# First try to find lines that look like commands (start with common command prefixes)
|
|
1117
|
-
command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
|
|
1118
|
-
'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
|
|
1119
|
-
'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
|
|
1120
|
-
'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
|
|
1121
|
-
|
|
1122
|
-
# Check for lines that start with common command prefixes
|
|
1123
|
-
command_lines = [line.strip() for line in fix_command.split('\n')
|
|
1124
|
-
if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
|
|
1125
|
-
|
|
1126
|
-
if command_lines:
|
|
1127
|
-
# Use the first command line found
|
|
1128
|
-
fix_command = command_lines[0]
|
|
1129
|
-
print(f"✅ Identified command by prefix: {fix_command}")
|
|
452
|
+
for attempt in range(retries + 1):
|
|
453
|
+
try:
|
|
454
|
+
if attempt > 0:
|
|
455
|
+
time.sleep(1.5 * (2 ** (attempt - 1)))
|
|
456
|
+
|
|
457
|
+
response = requests.post(
|
|
458
|
+
"https://openrouter.ai/api/v1/chat/completions",
|
|
459
|
+
headers=headers,
|
|
460
|
+
json=payload,
|
|
461
|
+
timeout=45
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
if response.status_code == 200:
|
|
465
|
+
result = response.json()
|
|
466
|
+
return result["choices"][0]["message"]["content"]
|
|
467
|
+
elif response.status_code == 401:
|
|
468
|
+
print("❌ Invalid OpenRouter API key")
|
|
469
|
+
return None
|
|
470
|
+
elif response.status_code in [429, 500]:
|
|
471
|
+
continue # Retry
|
|
1130
472
|
else:
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
command_lines = [line.strip() for line in fix_command.split('\n')
|
|
1134
|
-
if any(pattern in line for pattern in shell_patterns)]
|
|
473
|
+
print(f"⚠️ OpenRouter API error: {response.status_code}")
|
|
474
|
+
return None
|
|
1135
475
|
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
# Fall back to the shortest non-empty line as it's likely the command
|
|
1142
|
-
lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
|
|
1143
|
-
if lines:
|
|
1144
|
-
# Exclude very short lines that are likely not commands
|
|
1145
|
-
valid_lines = [line for line in lines if len(line) > 5]
|
|
1146
|
-
if valid_lines:
|
|
1147
|
-
fix_command = min(valid_lines, key=len)
|
|
1148
|
-
else:
|
|
1149
|
-
fix_command = min(lines, key=len)
|
|
1150
|
-
print(f"✅ Selected shortest line as command: {fix_command}")
|
|
1151
|
-
|
|
1152
|
-
# Clean up the command - remove any trailing periods or quotes
|
|
1153
|
-
fix_command = fix_command.rstrip('.;"\'')
|
|
1154
|
-
|
|
1155
|
-
# Remove common prefixes that LLMs sometimes add
|
|
1156
|
-
prefixes_to_remove = [
|
|
1157
|
-
"Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
|
|
1158
|
-
"You should run: ", "You can run: ", "You need to run: "
|
|
1159
|
-
]
|
|
1160
|
-
for prefix in prefixes_to_remove:
|
|
1161
|
-
if fix_command.startswith(prefix):
|
|
1162
|
-
fix_command = fix_command[len(prefix):].strip()
|
|
1163
|
-
print(f"✅ Removed prefix: {prefix}")
|
|
1164
|
-
break
|
|
1165
|
-
|
|
1166
|
-
# If the command is still multi-line or very long, it might not be a valid command
|
|
1167
|
-
if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
|
|
1168
|
-
print("⚠️ Extracted command appears invalid (multi-line or too long)")
|
|
1169
|
-
print("🔍 Original response from LLM:")
|
|
1170
|
-
print("-" * 60)
|
|
1171
|
-
print(original_response)
|
|
1172
|
-
print("-" * 60)
|
|
1173
|
-
print("⚠️ Using best guess for command")
|
|
1174
|
-
|
|
1175
|
-
print(f"🔧 Suggested fix: {fix_command}")
|
|
1176
|
-
print(f"🔍 DEBUG: Returning fix command: {fix_command}")
|
|
1177
|
-
return fix_command
|
|
1178
|
-
except Exception as e:
|
|
1179
|
-
print(f"❌ Error processing Anthropic response: {e}")
|
|
1180
|
-
print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
|
|
1181
|
-
print(f"🔍 DEBUG: Exception details: {str(e)}")
|
|
1182
|
-
return None
|
|
1183
|
-
|
|
1184
|
-
def call_openrouter_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
1185
|
-
"""Call OpenRouter to debug a failed command and suggest a fix"""
|
|
1186
|
-
print("\n🔍 DEBUG: Starting OpenRouter debugging...")
|
|
1187
|
-
print(f"🔍 DEBUG: Command: {command}")
|
|
1188
|
-
print(f"🔍 DEBUG: Error output length: {len(error_output) if error_output else 0}")
|
|
1189
|
-
print(f"🔍 DEBUG: Current directory: {current_dir}")
|
|
1190
|
-
print(f"🔍 DEBUG: Sandbox available: {sandbox is not None}")
|
|
476
|
+
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
|
|
477
|
+
continue # Retry
|
|
478
|
+
except Exception as e:
|
|
479
|
+
print(f"⚠️ OpenRouter request error: {e}")
|
|
480
|
+
return None
|
|
1191
481
|
|
|
1192
|
-
|
|
1193
|
-
|
|
482
|
+
return None
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
def make_groq_request(api_key, prompt, retries=2):
|
|
486
|
+
"""Make request to Groq API (OpenAI-compatible endpoint)"""
|
|
487
|
+
headers = {
|
|
488
|
+
"Content-Type": "application/json",
|
|
489
|
+
"Authorization": f"Bearer {api_key}"
|
|
490
|
+
}
|
|
491
|
+
|
|
492
|
+
payload = {
|
|
493
|
+
"model": os.environ.get("GROQ_MODEL", "openai/gpt-oss-20b"),
|
|
494
|
+
"messages": [
|
|
495
|
+
{"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue."},
|
|
496
|
+
{"role": "user", "content": prompt}
|
|
497
|
+
],
|
|
498
|
+
"temperature": 0.2,
|
|
499
|
+
"max_tokens": 300
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
endpoint = os.environ.get("GROQ_BASE_URL", "https://api.groq.com/openai/v1/chat/completions")
|
|
503
|
+
|
|
504
|
+
for attempt in range(retries + 1):
|
|
1194
505
|
try:
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
506
|
+
if attempt > 0:
|
|
507
|
+
time.sleep(1.5 * (2 ** (attempt - 1)))
|
|
508
|
+
|
|
509
|
+
response = requests.post(
|
|
510
|
+
endpoint,
|
|
511
|
+
headers=headers,
|
|
512
|
+
json=payload,
|
|
513
|
+
timeout=45
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
if response.status_code == 200:
|
|
517
|
+
result = response.json()
|
|
518
|
+
return result["choices"][0]["message"]["content"]
|
|
519
|
+
elif response.status_code == 401:
|
|
520
|
+
print("❌ Invalid Groq API key")
|
|
521
|
+
return None
|
|
522
|
+
elif response.status_code in [429, 500]:
|
|
523
|
+
continue # Retry
|
|
1200
524
|
else:
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
|
|
1207
|
-
|
|
525
|
+
print(f"⚠️ Groq API error: {response.status_code}")
|
|
526
|
+
return None
|
|
527
|
+
|
|
528
|
+
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError):
|
|
529
|
+
continue # Retry
|
|
530
|
+
except Exception as e:
|
|
531
|
+
print(f"⚠️ Groq request error: {e}")
|
|
532
|
+
return None
|
|
533
|
+
|
|
534
|
+
return None
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
def call_llm_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None, use_web_search=False):
|
|
538
|
+
"""Unified function to call LLM for debugging"""
|
|
539
|
+
# Skip debugging for test commands
|
|
1208
540
|
if command.strip().startswith("test "):
|
|
1209
|
-
print("🔍 Skipping debugging for test command - non-zero exit code is expected behavior")
|
|
1210
541
|
return None
|
|
1211
542
|
|
|
1212
|
-
# Validate
|
|
543
|
+
# Validate error output
|
|
1213
544
|
if not error_output or not error_output.strip():
|
|
1214
|
-
print("⚠️ Error output is empty. Cannot
|
|
1215
|
-
print("⚠️ Skipping OpenRouter debugging due to lack of error information.")
|
|
545
|
+
print("⚠️ Error output is empty. Cannot debug effectively.")
|
|
1216
546
|
return None
|
|
1217
547
|
|
|
1218
|
-
|
|
548
|
+
current_model = get_current_debug_model()
|
|
549
|
+
print(f"🔍 Using {current_model.upper()} for debugging...")
|
|
550
|
+
|
|
551
|
+
# Get API key
|
|
1219
552
|
if not api_key:
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
# First try environment variable
|
|
1223
|
-
api_key = os.environ.get("OPENROUTER_API_KEY")
|
|
1224
|
-
print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
|
|
1225
|
-
if api_key:
|
|
1226
|
-
print(f"🔍 DEBUG: Environment API key value: {api_key}")
|
|
1227
|
-
|
|
1228
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
1229
|
-
if not api_key:
|
|
1230
|
-
try:
|
|
1231
|
-
print("🔍 DEBUG: Trying to fetch API key from server...")
|
|
1232
|
-
from fetch_modal_tokens import get_tokens
|
|
1233
|
-
# Assuming OpenRouter key is the 5th token in the tuple
|
|
1234
|
-
tokens = get_tokens()
|
|
1235
|
-
if len(tokens) >= 5:
|
|
1236
|
-
api_key = tokens[4]
|
|
1237
|
-
except Exception as e:
|
|
1238
|
-
print(f"⚠️ Error fetching OpenRouter API key from server: {e}")
|
|
1239
|
-
|
|
1240
|
-
# Then try credentials manager
|
|
1241
|
-
if not api_key:
|
|
1242
|
-
try:
|
|
1243
|
-
from credentials_manager import CredentialsManager
|
|
1244
|
-
credentials_manager = CredentialsManager()
|
|
1245
|
-
api_key = credentials_manager.get_openrouter_api_key()
|
|
1246
|
-
except Exception as e:
|
|
1247
|
-
print(f"⚠️ Error getting OpenRouter API key from credentials manager: {e}")
|
|
1248
|
-
|
|
1249
|
-
# Store the API key in a persistent file if found
|
|
1250
|
-
if api_key:
|
|
1251
|
-
try:
|
|
1252
|
-
os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
|
|
1253
|
-
with open(os.path.expanduser("~/.gitarsenal/openrouter_key"), "w") as f:
|
|
1254
|
-
f.write(api_key)
|
|
1255
|
-
print("✅ Saved OpenRouter API key for future use")
|
|
1256
|
-
except Exception as e:
|
|
1257
|
-
print(f"⚠️ Could not save API key: {e}")
|
|
1258
|
-
|
|
1259
|
-
# Try to load from saved file if not in environment
|
|
1260
|
-
if not api_key:
|
|
1261
|
-
try:
|
|
1262
|
-
key_file = os.path.expanduser("~/.gitarsenal/openrouter_key")
|
|
1263
|
-
print(f"🔍 DEBUG: Checking for saved API key at: {key_file}")
|
|
1264
|
-
if os.path.exists(key_file):
|
|
1265
|
-
with open(key_file, "r") as f:
|
|
1266
|
-
api_key = f.read().strip()
|
|
1267
|
-
if api_key:
|
|
1268
|
-
print("✅ Loaded OpenRouter API key from saved file")
|
|
1269
|
-
print(f"🔍 DEBUG: API key from file: {api_key}")
|
|
1270
|
-
print(f"🔍 DEBUG: API key length: {len(api_key)}")
|
|
1271
|
-
# Also set in environment for this session
|
|
1272
|
-
os.environ["OPENROUTER_API_KEY"] = api_key
|
|
1273
|
-
else:
|
|
1274
|
-
print("🔍 DEBUG: Saved file exists but is empty")
|
|
1275
|
-
else:
|
|
1276
|
-
print("🔍 DEBUG: No saved API key file found")
|
|
1277
|
-
except Exception as e:
|
|
1278
|
-
print(f"⚠️ Could not load saved API key: {e}")
|
|
553
|
+
api_key = get_api_key(current_model)
|
|
1279
554
|
|
|
1280
555
|
if not api_key:
|
|
1281
|
-
print("❌ No
|
|
556
|
+
print(f"❌ No {current_model} API key available. Cannot perform LLM debugging.")
|
|
1282
557
|
return None
|
|
1283
558
|
|
|
1284
|
-
#
|
|
1285
|
-
|
|
1286
|
-
prompt = f"""You are a debugging assistant. Provide only the terminal command to fix the issue.
|
|
1287
|
-
|
|
1288
|
-
Context:
|
|
1289
|
-
- Current directory: {current_dir}
|
|
1290
|
-
- Sandbox available: {sandbox is not None}
|
|
1291
|
-
- Failed command: {command}
|
|
1292
|
-
- Error output: {error_str}
|
|
1293
|
-
|
|
1294
|
-
Analyze the issue first, understand why it's happening, then provide the command to fix it.
|
|
1295
|
-
|
|
1296
|
-
Guidelines:
|
|
1297
|
-
- For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found
|
|
1298
|
-
- For missing packages, use appropriate package managers (pip, apt-get, npm)
|
|
1299
|
-
- For Git SSH authentication failures, always convert SSH URLs to HTTPS URLs (git@github.com:user/repo.git -> https://github.com/user/repo.git)
|
|
1300
|
-
- For authentication, suggest login commands with placeholders
|
|
1301
|
-
- For permission errors, suggest commands with sudo if appropriate
|
|
1302
|
-
- For network issues, suggest retry commands or alternative URLs
|
|
1303
|
-
|
|
1304
|
-
Return only the command to fix the issue, nothing else."""
|
|
1305
|
-
|
|
1306
|
-
# Set up headers for OpenRouter API
|
|
1307
|
-
headers = {
|
|
1308
|
-
"x-api-key": api_key,
|
|
1309
|
-
"content-type": "application/json"
|
|
1310
|
-
}
|
|
559
|
+
# Save API key for future use
|
|
560
|
+
save_api_key(current_model, api_key)
|
|
1311
561
|
|
|
1312
|
-
#
|
|
1313
|
-
|
|
562
|
+
# Gather context
|
|
563
|
+
system_info, directory_context, file_context = gather_context(sandbox, current_dir)
|
|
1314
564
|
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
"max_tokens": 300,
|
|
1319
|
-
"messages": [
|
|
1320
|
-
{"role": "user", "content": prompt}
|
|
1321
|
-
]
|
|
1322
|
-
}
|
|
1323
|
-
|
|
1324
|
-
print(f"🔍 DEBUG: Payload prepared, prompt length: {len(prompt)}")
|
|
1325
|
-
|
|
1326
|
-
# Add specific handling for common errors
|
|
1327
|
-
last_error = None
|
|
1328
|
-
for attempt in range(retries + 1):
|
|
1329
|
-
try:
|
|
1330
|
-
if attempt > 0:
|
|
1331
|
-
# Exponential backoff
|
|
1332
|
-
wait_time = backoff_factor * (2 ** (attempt - 1))
|
|
1333
|
-
print(f"⏱️ Retrying in {wait_time:.1f} seconds... (attempt {attempt+1}/{retries+1})")
|
|
1334
|
-
time.sleep(wait_time)
|
|
1335
|
-
|
|
1336
|
-
print(f"🤖 Calling OpenRouter with {model_name} model to debug the failed command...")
|
|
1337
|
-
print(f"🔍 DEBUG: Making POST request to OpenRouter API...")
|
|
1338
|
-
response = requests.post(
|
|
1339
|
-
"https://openrouter.ai/api/v1/chat/completions", # OpenRouter API endpoint
|
|
1340
|
-
headers=headers,
|
|
1341
|
-
json=payload,
|
|
1342
|
-
timeout=45 # Increased timeout for reliability
|
|
1343
|
-
)
|
|
1344
|
-
|
|
1345
|
-
print(f"🔍 DEBUG: Response received, status code: {response.status_code}")
|
|
1346
|
-
|
|
1347
|
-
# Handle specific status codes
|
|
1348
|
-
if response.status_code == 200:
|
|
1349
|
-
print(f"🔍 DEBUG: Success! Response length: {len(response.text)}")
|
|
1350
|
-
return response.json(), None
|
|
1351
|
-
elif response.status_code == 401:
|
|
1352
|
-
error_msg = "Authentication error: Invalid API key"
|
|
1353
|
-
print(f"❌ {error_msg}")
|
|
1354
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
1355
|
-
# Don't retry auth errors
|
|
1356
|
-
return None, error_msg
|
|
1357
|
-
elif response.status_code == 429:
|
|
1358
|
-
error_msg = "Rate limit exceeded or quota reached"
|
|
1359
|
-
print(f"⚠️ {error_msg}")
|
|
1360
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
1361
|
-
# Always retry rate limit errors with increasing backoff
|
|
1362
|
-
last_error = error_msg
|
|
1363
|
-
continue
|
|
1364
|
-
elif response.status_code == 500:
|
|
1365
|
-
error_msg = "OpenRouter server error"
|
|
1366
|
-
print(f"⚠️ {error_msg}")
|
|
1367
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
|
1368
|
-
# Retry server errors
|
|
1369
|
-
last_error = error_msg
|
|
1370
|
-
continue
|
|
1371
|
-
else:
|
|
1372
|
-
error_msg = f"Status code: {response.status_code}, Response: {response.text}"
|
|
1373
|
-
print(f"⚠️ OpenRouter API error: {error_msg}")
|
|
1374
|
-
print(f"🔍 DEBUG: Full response text: {response.text}")
|
|
1375
|
-
last_error = error_msg
|
|
1376
|
-
# Only retry if we have attempts left
|
|
1377
|
-
if attempt < retries:
|
|
1378
|
-
continue
|
|
1379
|
-
return None, error_msg
|
|
1380
|
-
except requests.exceptions.Timeout:
|
|
1381
|
-
error_msg = "Request timed out"
|
|
1382
|
-
last_error = error_msg
|
|
1383
|
-
# Always retry timeouts
|
|
1384
|
-
continue
|
|
1385
|
-
except requests.exceptions.ConnectionError:
|
|
1386
|
-
error_msg = "Connection error"
|
|
1387
|
-
print(f"⚠️ {error_msg}")
|
|
1388
|
-
print(f"🔍 DEBUG: Connection failed to openrouter.ai")
|
|
1389
|
-
last_error = error_msg
|
|
1390
|
-
# Always retry connection errors
|
|
1391
|
-
continue
|
|
1392
|
-
except Exception as e:
|
|
1393
|
-
error_msg = str(e)
|
|
1394
|
-
print(f"⚠️ Unexpected error: {error_msg}")
|
|
1395
|
-
print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
|
|
1396
|
-
print(f"🔍 DEBUG: Exception details: {str(e)}")
|
|
1397
|
-
last_error = error_msg
|
|
1398
|
-
# Only retry if we have attempts left
|
|
1399
|
-
if attempt < retries:
|
|
1400
|
-
continue
|
|
1401
|
-
return None, error_msg
|
|
1402
|
-
|
|
1403
|
-
# If we get here, all retries failed
|
|
1404
|
-
return None, last_error
|
|
565
|
+
# Get credentials context
|
|
566
|
+
stored_credentials = get_stored_credentials()
|
|
567
|
+
auth_context = generate_auth_context(stored_credentials)
|
|
1405
568
|
|
|
1406
|
-
#
|
|
1407
|
-
|
|
1408
|
-
last_error = None
|
|
569
|
+
# Create prompt
|
|
570
|
+
prompt = create_debug_prompt(command, error_output, system_info, directory_context, file_context, auth_context)
|
|
1409
571
|
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
print(f"⚠️ Failed to get response from {model}: {error}")
|
|
1416
|
-
last_error = error
|
|
572
|
+
print(f"\n{'='*60}")
|
|
573
|
+
print("DEBUG: ERROR_OUTPUT SENT TO LLM:")
|
|
574
|
+
print(f"{'='*60}")
|
|
575
|
+
print(f"{error_output}")
|
|
576
|
+
print(f"{'='*60}\n")
|
|
1417
577
|
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
578
|
+
# Make API request
|
|
579
|
+
print(f"🤖 Calling {current_model} to debug the failed command...")
|
|
580
|
+
response_text = make_api_request(current_model, api_key, prompt)
|
|
1421
581
|
|
|
1422
|
-
|
|
1423
|
-
try:
|
|
1424
|
-
print(f"🔍 DEBUG: Processing OpenRouter response...")
|
|
1425
|
-
print(f"🔍 DEBUG: Choices count: {len(result.get('choices', []))}")
|
|
1426
|
-
|
|
1427
|
-
fix_command = result["choices"][0]["message"]["content"].strip()
|
|
1428
|
-
print(f"🔍 DEBUG: Raw response content: {fix_command}")
|
|
1429
|
-
|
|
1430
|
-
# Save the original response for debugging
|
|
1431
|
-
original_response = fix_command
|
|
1432
|
-
|
|
1433
|
-
# Extract just the command if it's wrapped in backticks or explanation
|
|
1434
|
-
if "```" in fix_command:
|
|
1435
|
-
# Extract content between backticks
|
|
1436
|
-
import re
|
|
1437
|
-
code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
|
|
1438
|
-
if code_blocks:
|
|
1439
|
-
fix_command = code_blocks[0].strip()
|
|
1440
|
-
print(f"✅ Extracted command from code block: {fix_command}")
|
|
1441
|
-
|
|
1442
|
-
# If the response still has explanatory text, try to extract just the command
|
|
1443
|
-
if len(fix_command.split('\n')) > 1:
|
|
1444
|
-
# First try to find lines that look like commands (start with common command prefixes)
|
|
1445
|
-
command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
|
|
1446
|
-
'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
|
|
1447
|
-
'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
|
|
1448
|
-
'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
|
|
1449
|
-
|
|
1450
|
-
# Check for lines that start with common command prefixes
|
|
1451
|
-
command_lines = [line.strip() for line in fix_command.split('\n')
|
|
1452
|
-
if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
|
|
1453
|
-
|
|
1454
|
-
if command_lines:
|
|
1455
|
-
# Use the first command line found
|
|
1456
|
-
fix_command = command_lines[0]
|
|
1457
|
-
print(f"✅ Identified command by prefix: {fix_command}")
|
|
1458
|
-
else:
|
|
1459
|
-
# Try to find lines that look like commands (contain common shell patterns)
|
|
1460
|
-
shell_patterns = [' | ', ' > ', ' >> ', ' && ', ' || ', ' ; ', '$(', '`', ' -y ', ' --yes ']
|
|
1461
|
-
command_lines = [line.strip() for line in fix_command.split('\n')
|
|
1462
|
-
if any(pattern in line for pattern in shell_patterns)]
|
|
1463
|
-
|
|
1464
|
-
if command_lines:
|
|
1465
|
-
# Use the first command line found
|
|
1466
|
-
fix_command = command_lines[0]
|
|
1467
|
-
print(f"✅ Identified command by shell pattern: {fix_command}")
|
|
1468
|
-
else:
|
|
1469
|
-
# Fall back to the shortest non-empty line as it's likely the command
|
|
1470
|
-
lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
|
|
1471
|
-
if lines:
|
|
1472
|
-
# Exclude very short lines that are likely not commands
|
|
1473
|
-
valid_lines = [line for line in lines if len(line) > 5]
|
|
1474
|
-
if valid_lines:
|
|
1475
|
-
fix_command = min(valid_lines, key=len)
|
|
1476
|
-
else:
|
|
1477
|
-
fix_command = min(lines, key=len)
|
|
1478
|
-
print(f"✅ Selected shortest line as command: {fix_command}")
|
|
1479
|
-
|
|
1480
|
-
# Clean up the command - remove any trailing periods or quotes
|
|
1481
|
-
fix_command = fix_command.rstrip('.;"\'')
|
|
1482
|
-
|
|
1483
|
-
# Remove common prefixes that LLMs sometimes add
|
|
1484
|
-
prefixes_to_remove = [
|
|
1485
|
-
"Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
|
|
1486
|
-
"You should run: ", "You can run: ", "You need to run: "
|
|
1487
|
-
]
|
|
1488
|
-
for prefix in prefixes_to_remove:
|
|
1489
|
-
if fix_command.startswith(prefix):
|
|
1490
|
-
fix_command = fix_command[len(prefix):].strip()
|
|
1491
|
-
print(f"✅ Removed prefix: {prefix}")
|
|
1492
|
-
break
|
|
1493
|
-
|
|
1494
|
-
# If the command is still multi-line or very long, it might not be a valid command
|
|
1495
|
-
if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
|
|
1496
|
-
print("⚠️ Extracted command appears invalid (multi-line or too long)")
|
|
1497
|
-
print("🔍 Original response from LLM:")
|
|
1498
|
-
print("-" * 60)
|
|
1499
|
-
print(original_response)
|
|
1500
|
-
print("-" * 60)
|
|
1501
|
-
print("⚠️ Using best guess for command")
|
|
1502
|
-
|
|
1503
|
-
print(f"🔧 Suggested fix: {fix_command}")
|
|
1504
|
-
print(f"🔍 DEBUG: Returning fix command: {fix_command}")
|
|
1505
|
-
return fix_command
|
|
1506
|
-
except Exception as e:
|
|
1507
|
-
print(f"❌ Error processing OpenRouter response: {e}")
|
|
1508
|
-
print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
|
|
1509
|
-
print(f"🔍 DEBUG: Exception details: {str(e)}")
|
|
582
|
+
if not response_text:
|
|
1510
583
|
return None
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
1514
|
-
"""Unified function to call LLM for batch debugging - routes to OpenAI or Anthropic based on configuration"""
|
|
1515
|
-
current_model = get_current_debug_model()
|
|
1516
584
|
|
|
1517
|
-
|
|
585
|
+
# Extract command from response
|
|
586
|
+
fix_command = extract_command_from_response(response_text)
|
|
1518
587
|
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
if not api_key:
|
|
1522
|
-
# First try environment variable
|
|
1523
|
-
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
1524
|
-
|
|
1525
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
1526
|
-
if not api_key:
|
|
1527
|
-
try:
|
|
1528
|
-
from fetch_modal_tokens import get_tokens
|
|
1529
|
-
_, _, _, api_key = get_tokens()
|
|
1530
|
-
except Exception as e:
|
|
1531
|
-
print(f"⚠️ Error fetching Anthropic API key from server: {e}")
|
|
1532
|
-
|
|
1533
|
-
# Then try credentials manager
|
|
1534
|
-
if not api_key:
|
|
1535
|
-
try:
|
|
1536
|
-
from credentials_manager import CredentialsManager
|
|
1537
|
-
credentials_manager = CredentialsManager()
|
|
1538
|
-
api_key = credentials_manager.get_anthropic_api_key()
|
|
1539
|
-
except Exception as e:
|
|
1540
|
-
print(f"⚠️ Error getting Anthropic API key from credentials manager: {e}")
|
|
1541
|
-
|
|
1542
|
-
return call_anthropic_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
1543
|
-
elif current_model == "openrouter":
|
|
1544
|
-
# Try to get OpenRouter API key if not provided
|
|
1545
|
-
if not api_key:
|
|
1546
|
-
# First try environment variable
|
|
1547
|
-
api_key = os.environ.get("OPENROUTER_API_KEY")
|
|
1548
|
-
|
|
1549
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
1550
|
-
if not api_key:
|
|
1551
|
-
try:
|
|
1552
|
-
from fetch_modal_tokens import get_tokens
|
|
1553
|
-
# Assuming OpenRouter key is the 5th token in the tuple
|
|
1554
|
-
tokens = get_tokens()
|
|
1555
|
-
if len(tokens) >= 5:
|
|
1556
|
-
api_key = tokens[4]
|
|
1557
|
-
except Exception as e:
|
|
1558
|
-
print(f"⚠️ Error fetching OpenRouter API key from server: {e}")
|
|
1559
|
-
|
|
1560
|
-
# Then try credentials manager
|
|
1561
|
-
if not api_key:
|
|
1562
|
-
try:
|
|
1563
|
-
from credentials_manager import CredentialsManager
|
|
1564
|
-
credentials_manager = CredentialsManager()
|
|
1565
|
-
api_key = credentials_manager.get_openrouter_api_key()
|
|
1566
|
-
except Exception as e:
|
|
1567
|
-
print(f"⚠️ Error getting OpenRouter API key from credentials manager: {e}")
|
|
1568
|
-
|
|
1569
|
-
return call_openrouter_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
1570
|
-
else:
|
|
1571
|
-
# Default to OpenAI
|
|
1572
|
-
# Try to get OpenAI API key if not provided
|
|
1573
|
-
if not api_key:
|
|
1574
|
-
# First try environment variable
|
|
1575
|
-
api_key = os.environ.get("OPENAI_API_KEY")
|
|
1576
|
-
|
|
1577
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
1578
|
-
if not api_key:
|
|
1579
|
-
try:
|
|
1580
|
-
from fetch_modal_tokens import get_tokens
|
|
1581
|
-
_, _, api_key, _ = get_tokens()
|
|
1582
|
-
except Exception as e:
|
|
1583
|
-
print(f"⚠️ Error fetching OpenAI API key from server: {e}")
|
|
1584
|
-
|
|
1585
|
-
# Then try credentials manager
|
|
1586
|
-
if not api_key:
|
|
1587
|
-
try:
|
|
1588
|
-
from credentials_manager import CredentialsManager
|
|
1589
|
-
credentials_manager = CredentialsManager()
|
|
1590
|
-
api_key = credentials_manager.get_openai_api_key()
|
|
1591
|
-
except Exception as e:
|
|
1592
|
-
print(f"⚠️ Error getting OpenAI API key from credentials manager: {e}")
|
|
1593
|
-
|
|
1594
|
-
return call_openai_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
588
|
+
print(f"🔧 Suggested fix: {fix_command}")
|
|
589
|
+
return fix_command
|
|
1595
590
|
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
print(f"🔍 DEBUG: Analyzing {len(failed_commands)} failed commands")
|
|
1600
|
-
|
|
591
|
+
|
|
592
|
+
def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None, use_web_search=False):
|
|
593
|
+
"""Call LLM for batch debugging of multiple failed commands"""
|
|
1601
594
|
if not failed_commands:
|
|
1602
|
-
print("⚠️ No failed commands to analyze")
|
|
1603
595
|
return []
|
|
1604
596
|
|
|
597
|
+
current_model = get_current_debug_model()
|
|
598
|
+
|
|
599
|
+
# Get API key
|
|
1605
600
|
if not api_key:
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
if api_key:
|
|
1612
|
-
print(f"🔍 DEBUG: Environment API key value: {api_key}")
|
|
1613
|
-
|
|
1614
|
-
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
1615
|
-
if not api_key:
|
|
1616
|
-
try:
|
|
1617
|
-
print("🔍 DEBUG: Trying to fetch API key from server...")
|
|
1618
|
-
from fetch_modal_tokens import get_tokens
|
|
1619
|
-
_, _, _, api_key = get_tokens()
|
|
1620
|
-
if api_key:
|
|
1621
|
-
# Set in environment for this session
|
|
1622
|
-
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
1623
|
-
else:
|
|
1624
|
-
print("⚠️ Could not fetch Anthropic API key from server")
|
|
1625
|
-
except Exception as e:
|
|
1626
|
-
print(f"⚠️ Error fetching API key from server: {e}")
|
|
1627
|
-
|
|
1628
|
-
# Then try credentials manager
|
|
1629
|
-
if not api_key:
|
|
1630
|
-
print("🔍 DEBUG: Trying credentials manager...")
|
|
1631
|
-
try:
|
|
1632
|
-
from credentials_manager import CredentialsManager
|
|
1633
|
-
credentials_manager = CredentialsManager()
|
|
1634
|
-
api_key = credentials_manager.get_anthropic_api_key()
|
|
1635
|
-
if api_key:
|
|
1636
|
-
print(f"🔍 DEBUG: API key from credentials manager: Found")
|
|
1637
|
-
print(f"🔍 DEBUG: Credentials manager API key value: {api_key}")
|
|
1638
|
-
# Set in environment for this session
|
|
1639
|
-
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
1640
|
-
else:
|
|
1641
|
-
print("⚠️ Could not fetch Anthropic API key from credentials manager")
|
|
1642
|
-
except Exception as e:
|
|
1643
|
-
print(f"⚠️ Error fetching API key from credentials manager: {e}")
|
|
1644
|
-
|
|
1645
|
-
if not api_key:
|
|
1646
|
-
print("❌ No Anthropic API key available for batch debugging")
|
|
1647
|
-
return []
|
|
601
|
+
api_key = get_api_key(current_model)
|
|
602
|
+
|
|
603
|
+
if not api_key:
|
|
604
|
+
print(f"❌ No {current_model} API key available for batch debugging")
|
|
605
|
+
return []
|
|
1648
606
|
|
|
1649
607
|
# Prepare context for batch analysis
|
|
1650
|
-
context_parts = []
|
|
1651
|
-
context_parts.append(f"Current directory: {current_dir}")
|
|
1652
|
-
context_parts.append(f"Sandbox available: {sandbox is not None}")
|
|
608
|
+
context_parts = [f"Current directory: {current_dir}", f"Sandbox available: {sandbox is not None}"]
|
|
1653
609
|
|
|
1654
|
-
# Add failed commands with their errors
|
|
1655
610
|
for i, failed_cmd in enumerate(failed_commands, 1):
|
|
1656
611
|
cmd_type = failed_cmd.get('type', 'main')
|
|
1657
612
|
original_cmd = failed_cmd.get('original_command', '')
|
|
@@ -1668,7 +623,7 @@ def call_anthropic_for_batch_debug(failed_commands, api_key=None, current_dir=No
|
|
|
1668
623
|
if stdout:
|
|
1669
624
|
context_parts.append(f"Standard Output: {stdout}")
|
|
1670
625
|
|
|
1671
|
-
# Create
|
|
626
|
+
# Create batch prompt
|
|
1672
627
|
prompt = f"""You are a debugging assistant analyzing multiple failed commands.
|
|
1673
628
|
|
|
1674
629
|
Context:
|
|
@@ -1676,257 +631,88 @@ Context:
|
|
|
1676
631
|
|
|
1677
632
|
Please analyze each failed command and provide a fix command for each one. For each failed command, respond with:
|
|
1678
633
|
|
|
1679
|
-
FIX_COMMAND_{i}: <the fix command>
|
|
1680
|
-
REASON_{i}: <brief explanation of why the original command failed and how the fix addresses it>
|
|
634
|
+
FIX_COMMAND_{{i}}: <the fix command>
|
|
635
|
+
REASON_{{i}}: <brief explanation of why the original command failed and how the fix addresses it>
|
|
1681
636
|
|
|
1682
637
|
Guidelines:
|
|
1683
638
|
- For file not found errors, first search for the file using 'find . -name filename -type f'
|
|
1684
639
|
- For missing packages, use appropriate package managers (pip, apt-get, npm)
|
|
1685
640
|
- For Git SSH authentication failures, convert SSH URLs to HTTPS URLs
|
|
1686
641
|
- For permission errors, suggest commands with sudo if appropriate
|
|
1687
|
-
- For network issues, suggest retry commands or alternative URLs
|
|
1688
642
|
- Keep each fix command simple and focused on the specific error
|
|
1689
643
|
|
|
1690
644
|
Provide fixes for all {len(failed_commands)} failed commands:"""
|
|
1691
|
-
|
|
1692
|
-
# Set up headers for Anthropic API
|
|
1693
|
-
headers = {
|
|
1694
|
-
"x-api-key": api_key,
|
|
1695
|
-
"anthropic-version": "2023-06-01",
|
|
1696
|
-
"content-type": "application/json"
|
|
1697
|
-
}
|
|
1698
645
|
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
"max_tokens": 1000,
|
|
1702
|
-
"messages": [
|
|
1703
|
-
{"role": "user", "content": prompt}
|
|
1704
|
-
]
|
|
1705
|
-
}
|
|
1706
|
-
|
|
1707
|
-
try:
|
|
1708
|
-
print(f"🤖 Calling Anthropic Claude for batch debugging of {len(failed_commands)} commands...")
|
|
1709
|
-
response = requests.post(
|
|
1710
|
-
"https://api.anthropic.com/v1/messages",
|
|
1711
|
-
headers=headers,
|
|
1712
|
-
json=payload,
|
|
1713
|
-
timeout=60
|
|
1714
|
-
)
|
|
1715
|
-
|
|
1716
|
-
if response.status_code == 200:
|
|
1717
|
-
result = response.json()
|
|
1718
|
-
content = result['content'][0]['text']
|
|
1719
|
-
print(f"✅ Batch analysis completed")
|
|
1720
|
-
|
|
1721
|
-
# Parse the response to extract fix commands
|
|
1722
|
-
fixes = []
|
|
1723
|
-
for i in range(1, len(failed_commands) + 1):
|
|
1724
|
-
fix_pattern = f"FIX_COMMAND_{i}: (.+)"
|
|
1725
|
-
reason_pattern = f"REASON_{i}: (.+)"
|
|
1726
|
-
|
|
1727
|
-
fix_match = re.search(fix_pattern, content, re.MULTILINE)
|
|
1728
|
-
reason_match = re.search(reason_pattern, content, re.MULTILINE)
|
|
1729
|
-
|
|
1730
|
-
if fix_match:
|
|
1731
|
-
fix_command = fix_match.group(1).strip()
|
|
1732
|
-
reason = reason_match.group(1).strip() if reason_match else "Anthropic Claude suggested fix"
|
|
1733
|
-
|
|
1734
|
-
# Clean up the fix command
|
|
1735
|
-
if fix_command.startswith('`') and fix_command.endswith('`'):
|
|
1736
|
-
fix_command = fix_command[1:-1]
|
|
1737
|
-
|
|
1738
|
-
fixes.append({
|
|
1739
|
-
'original_command': failed_commands[i-1]['command'],
|
|
1740
|
-
'fix_command': fix_command,
|
|
1741
|
-
'reason': reason,
|
|
1742
|
-
'command_index': i-1
|
|
1743
|
-
})
|
|
1744
|
-
|
|
1745
|
-
print(f"🔧 Generated {len(fixes)} fix commands from batch analysis")
|
|
1746
|
-
return fixes
|
|
1747
|
-
else:
|
|
1748
|
-
print(f"❌ Anthropic API error: {response.status_code} - {response.text}")
|
|
1749
|
-
return []
|
|
1750
|
-
|
|
1751
|
-
except Exception as e:
|
|
1752
|
-
print(f"❌ Error during batch debugging: {e}")
|
|
1753
|
-
return []
|
|
1754
|
-
|
|
1755
|
-
def call_openrouter_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
1756
|
-
"""Call OpenRouter to debug multiple failed commands and suggest fixes for all of them at once"""
|
|
1757
|
-
print("\n🔍 DEBUG: Starting batch OpenRouter debugging...")
|
|
1758
|
-
print(f"🔍 DEBUG: Analyzing {len(failed_commands)} failed commands")
|
|
646
|
+
print(f"🤖 Calling {current_model} for batch debugging of {len(failed_commands)} commands...")
|
|
647
|
+
response_text = make_api_request(current_model, api_key, prompt)
|
|
1759
648
|
|
|
1760
|
-
if not
|
|
1761
|
-
print("⚠️ No failed commands to analyze")
|
|
649
|
+
if not response_text:
|
|
1762
650
|
return []
|
|
1763
651
|
|
|
1764
|
-
|
|
1765
|
-
|
|
652
|
+
# Parse the response to extract fix commands
|
|
653
|
+
fixes = []
|
|
654
|
+
for i in range(1, len(failed_commands) + 1):
|
|
655
|
+
fix_pattern = f"FIX_COMMAND_{i}: (.+)"
|
|
656
|
+
reason_pattern = f"REASON_{i}: (.+)"
|
|
1766
657
|
|
|
1767
|
-
|
|
1768
|
-
|
|
1769
|
-
print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
|
|
1770
|
-
if api_key:
|
|
1771
|
-
print(f"🔍 DEBUG: Environment API key value: {api_key}")
|
|
658
|
+
fix_match = re.search(fix_pattern, response_text, re.MULTILINE)
|
|
659
|
+
reason_match = re.search(reason_pattern, response_text, re.MULTILINE)
|
|
1772
660
|
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
print("🔍 DEBUG: Trying to fetch API key from server...")
|
|
1777
|
-
from fetch_modal_tokens import get_tokens
|
|
1778
|
-
# Assuming OpenRouter key is the 5th token in the tuple
|
|
1779
|
-
tokens = get_tokens()
|
|
1780
|
-
if len(tokens) >= 5:
|
|
1781
|
-
api_key = tokens[4]
|
|
1782
|
-
except Exception as e:
|
|
1783
|
-
print(f"⚠️ Error fetching OpenRouter API key from server: {e}")
|
|
661
|
+
if fix_match:
|
|
662
|
+
fix_command = fix_match.group(1).strip()
|
|
663
|
+
reason = reason_match.group(1).strip() if reason_match else "LLM suggested fix"
|
|
1784
664
|
|
|
1785
|
-
#
|
|
1786
|
-
if
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
if api_key:
|
|
1796
|
-
try:
|
|
1797
|
-
os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
|
|
1798
|
-
with open(os.path.expanduser("~/.gitarsenal/openrouter_key"), "w") as f:
|
|
1799
|
-
f.write(api_key)
|
|
1800
|
-
print("✅ Saved OpenRouter API key for future use")
|
|
1801
|
-
except Exception as e:
|
|
1802
|
-
print(f"⚠️ Could not save API key: {e}")
|
|
1803
|
-
|
|
1804
|
-
# Try to load from saved file if not in environment
|
|
1805
|
-
if not api_key:
|
|
1806
|
-
try:
|
|
1807
|
-
key_file = os.path.expanduser("~/.gitarsenal/openrouter_key")
|
|
1808
|
-
print(f"🔍 DEBUG: Checking for saved API key at: {key_file}")
|
|
1809
|
-
if os.path.exists(key_file):
|
|
1810
|
-
with open(key_file, "r") as f:
|
|
1811
|
-
api_key = f.read().strip()
|
|
1812
|
-
if api_key:
|
|
1813
|
-
print("✅ Loaded OpenRouter API key from saved file")
|
|
1814
|
-
print(f"🔍 DEBUG: API key from file: {api_key}")
|
|
1815
|
-
print(f"🔍 DEBUG: API key length: {len(api_key)}")
|
|
1816
|
-
# Also set in environment for this session
|
|
1817
|
-
os.environ["OPENROUTER_API_KEY"] = api_key
|
|
1818
|
-
else:
|
|
1819
|
-
print("🔍 DEBUG: Saved file exists but is empty")
|
|
1820
|
-
else:
|
|
1821
|
-
print("🔍 DEBUG: No saved API key file found")
|
|
1822
|
-
except Exception as e:
|
|
1823
|
-
print(f"⚠️ Could not load saved API key: {e}")
|
|
1824
|
-
|
|
1825
|
-
if not api_key:
|
|
1826
|
-
print("❌ No OpenRouter API key available for batch debugging")
|
|
1827
|
-
return []
|
|
1828
|
-
|
|
1829
|
-
# Prepare context for batch analysis
|
|
1830
|
-
context_parts = []
|
|
1831
|
-
context_parts.append(f"Current directory: {current_dir}")
|
|
1832
|
-
context_parts.append(f"Sandbox available: {sandbox is not None}")
|
|
1833
|
-
|
|
1834
|
-
# Add failed commands with their errors
|
|
1835
|
-
for i, failed_cmd in enumerate(failed_commands, 1):
|
|
1836
|
-
cmd_type = failed_cmd.get('type', 'main')
|
|
1837
|
-
original_cmd = failed_cmd.get('original_command', '')
|
|
1838
|
-
cmd_text = failed_cmd['command']
|
|
1839
|
-
stderr = failed_cmd.get('stderr', '')
|
|
1840
|
-
stdout = failed_cmd.get('stdout', '')
|
|
1841
|
-
|
|
1842
|
-
context_parts.append(f"\n--- Failed Command {i} ({cmd_type}) ---")
|
|
1843
|
-
context_parts.append(f"Command: {cmd_text}")
|
|
1844
|
-
if original_cmd and original_cmd != cmd_text:
|
|
1845
|
-
context_parts.append(f"Original Command: {original_cmd}")
|
|
1846
|
-
if stderr:
|
|
1847
|
-
context_parts.append(f"Error Output: {stderr}")
|
|
1848
|
-
if stdout:
|
|
1849
|
-
context_parts.append(f"Standard Output: {stdout}")
|
|
665
|
+
# Clean up the fix command
|
|
666
|
+
if fix_command.startswith('`') and fix_command.endswith('`'):
|
|
667
|
+
fix_command = fix_command[1:-1]
|
|
668
|
+
|
|
669
|
+
fixes.append({
|
|
670
|
+
'original_command': failed_commands[i-1]['command'],
|
|
671
|
+
'fix_command': fix_command,
|
|
672
|
+
'reason': reason,
|
|
673
|
+
'command_index': i-1
|
|
674
|
+
})
|
|
1850
675
|
|
|
1851
|
-
|
|
1852
|
-
|
|
676
|
+
print(f"🔧 Generated {len(fixes)} fix commands from batch analysis")
|
|
677
|
+
return fixes
|
|
1853
678
|
|
|
1854
|
-
Context:
|
|
1855
|
-
{chr(10).join(context_parts)}
|
|
1856
679
|
|
|
1857
|
-
|
|
680
|
+
# Legacy function aliases for backward compatibility
|
|
681
|
+
def call_openai_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
682
|
+
"""Legacy OpenAI-specific function - now routes to unified function"""
|
|
683
|
+
return call_llm_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
1858
684
|
|
|
1859
|
-
FIX_COMMAND_{i}: <the fix command>
|
|
1860
|
-
REASON_{i}: <brief explanation of why the original command failed and how the fix addresses it>
|
|
1861
685
|
|
|
1862
|
-
|
|
1863
|
-
-
|
|
1864
|
-
|
|
1865
|
-
- For Git SSH authentication failures, convert SSH URLs to HTTPS URLs
|
|
1866
|
-
- For permission errors, suggest commands with sudo if appropriate
|
|
1867
|
-
- For network issues, suggest retry commands or alternative URLs
|
|
1868
|
-
- Keep each fix command simple and focused on the specific error
|
|
686
|
+
def call_anthropic_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
687
|
+
"""Legacy Anthropic-specific function - now routes to unified function"""
|
|
688
|
+
return call_llm_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
1869
689
|
|
|
1870
|
-
Provide fixes for all {len(failed_commands)} failed commands:"""
|
|
1871
690
|
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
|
|
1893
|
-
|
|
1894
|
-
|
|
1895
|
-
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1899
|
-
|
|
1900
|
-
# Parse the response to extract fix commands
|
|
1901
|
-
fixes = []
|
|
1902
|
-
for i in range(1, len(failed_commands) + 1):
|
|
1903
|
-
fix_pattern = f"FIX_COMMAND_{i}: (.+)"
|
|
1904
|
-
reason_pattern = f"REASON_{i}: (.+)"
|
|
1905
|
-
|
|
1906
|
-
fix_match = re.search(fix_pattern, content, re.MULTILINE)
|
|
1907
|
-
reason_match = re.search(reason_pattern, content, re.MULTILINE)
|
|
1908
|
-
|
|
1909
|
-
if fix_match:
|
|
1910
|
-
fix_command = fix_match.group(1).strip()
|
|
1911
|
-
reason = reason_match.group(1).strip() if reason_match else "OpenRouter suggested fix"
|
|
1912
|
-
|
|
1913
|
-
# Clean up the fix command
|
|
1914
|
-
if fix_command.startswith('`') and fix_command.endswith('`'):
|
|
1915
|
-
fix_command = fix_command[1:-1]
|
|
1916
|
-
|
|
1917
|
-
fixes.append({
|
|
1918
|
-
'original_command': failed_commands[i-1]['command'],
|
|
1919
|
-
'fix_command': fix_command,
|
|
1920
|
-
'reason': reason,
|
|
1921
|
-
'command_index': i-1
|
|
1922
|
-
})
|
|
1923
|
-
|
|
1924
|
-
print(f"🔧 Generated {len(fixes)} fix commands from batch analysis")
|
|
1925
|
-
return fixes
|
|
1926
|
-
else:
|
|
1927
|
-
print(f"❌ OpenRouter API error: {response.status_code} - {response.text}")
|
|
1928
|
-
return []
|
|
1929
|
-
|
|
1930
|
-
except Exception as e:
|
|
1931
|
-
print(f"❌ Error during batch debugging: {e}")
|
|
1932
|
-
return []
|
|
691
|
+
def call_openrouter_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
692
|
+
"""Legacy OpenRouter-specific function - now routes to unified function"""
|
|
693
|
+
return call_llm_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
694
|
+
|
|
695
|
+
|
|
696
|
+
def call_openai_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
697
|
+
"""Legacy OpenAI batch function - now routes to unified function"""
|
|
698
|
+
return call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
699
|
+
|
|
700
|
+
|
|
701
|
+
def call_anthropic_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
702
|
+
"""Legacy Anthropic batch function - now routes to unified function"""
|
|
703
|
+
return call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
704
|
+
|
|
705
|
+
|
|
706
|
+
def call_openrouter_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
707
|
+
"""Legacy OpenRouter batch function - now routes to unified function"""
|
|
708
|
+
return call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
709
|
+
|
|
710
|
+
|
|
711
|
+
def call_groq_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
712
|
+
"""Legacy Groq-specific function - now routes to unified function"""
|
|
713
|
+
return call_llm_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def call_groq_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
717
|
+
"""Legacy Groq batch function - now routes to unified function"""
|
|
718
|
+
return call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|