gitarsenal-cli 1.9.21 → 1.9.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.venv_status.json +1 -1
  2. package/package.json +1 -1
  3. package/python/__pycache__/auth_manager.cpython-313.pyc +0 -0
  4. package/python/__pycache__/command_manager.cpython-313.pyc +0 -0
  5. package/python/__pycache__/fetch_modal_tokens.cpython-313.pyc +0 -0
  6. package/python/__pycache__/llm_debugging.cpython-313.pyc +0 -0
  7. package/python/__pycache__/modal_container.cpython-313.pyc +0 -0
  8. package/python/__pycache__/shell.cpython-313.pyc +0 -0
  9. package/python/api_integration.py +0 -0
  10. package/python/command_manager.py +613 -0
  11. package/python/credentials_manager.py +0 -0
  12. package/python/fetch_modal_tokens.py +0 -0
  13. package/python/fix_modal_token.py +0 -0
  14. package/python/fix_modal_token_advanced.py +0 -0
  15. package/python/gitarsenal.py +0 -0
  16. package/python/gitarsenal_proxy_client.py +0 -0
  17. package/python/llm_debugging.py +1369 -0
  18. package/python/modal_container.py +626 -0
  19. package/python/setup.py +15 -0
  20. package/python/setup_modal_token.py +0 -39
  21. package/python/shell.py +627 -0
  22. package/python/test_modalSandboxScript.py +75 -2639
  23. package/scripts/postinstall.js +22 -23
  24. package/python/__pycache__/credentials_manager.cpython-313.pyc +0 -0
  25. package/python/__pycache__/test_modalSandboxScript.cpython-313.pyc +0 -0
  26. package/python/__pycache__/test_modalSandboxScript_stable.cpython-313.pyc +0 -0
  27. package/python/debug_delete.py +0 -167
  28. package/python/documentation.py +0 -76
  29. package/python/fix_setup_commands.py +0 -116
  30. package/python/modal_auth_patch.py +0 -178
  31. package/python/modal_proxy_service.py +0 -665
  32. package/python/modal_token_solution.py +0 -293
  33. package/python/test_dynamic_commands.py +0 -147
  34. package/test_modalSandboxScript.py +0 -5004
@@ -0,0 +1,1369 @@
1
+ import os
2
+ import re
3
+ import json
4
+ import requests
5
+ import openai
6
+ import anthropic
7
+
8
+
9
+ def get_stored_credentials():
10
+ """Load stored credentials from ~/.gitarsenal/credentials.json"""
11
+ import json
12
+ from pathlib import Path
13
+
14
+ try:
15
+ credentials_file = Path.home() / ".gitarsenal" / "credentials.json"
16
+ if credentials_file.exists():
17
+ with open(credentials_file, 'r') as f:
18
+ credentials = json.load(f)
19
+ return credentials
20
+ else:
21
+ return {}
22
+ except Exception as e:
23
+ print(f"⚠️ Error loading stored credentials: {e}")
24
+ return {}
25
+
26
+ def generate_auth_context(stored_credentials):
27
+ """Generate simple authentication context for the OpenAI prompt"""
28
+ if not stored_credentials:
29
+ return "No stored credentials available."
30
+
31
+ auth_context = "Available stored credentials (use actual values in commands):\n"
32
+
33
+ for key, value in stored_credentials.items():
34
+ # Mask the actual value for security in logs, but provide the real value
35
+ masked_value = value[:8] + "..." if len(value) > 8 else "***"
36
+ auth_context += f"- {key}: {masked_value} (actual value: {value})\n"
37
+
38
+ return auth_context
39
+
40
+ def get_current_debug_model():
41
+ """Get the currently configured debugging model preference"""
42
+ return os.environ.get("GITARSENAL_DEBUG_MODEL", "openai")
43
+
44
+ def call_llm_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
45
+ """Unified function to call LLM for debugging - routes to OpenAI or Anthropic based on configuration"""
46
+ current_model = get_current_debug_model()
47
+
48
+ print(f"🔍 DEBUG: Using {current_model.upper()} for debugging...")
49
+
50
+ if current_model == "anthropic":
51
+ # Try to get Anthropic API key if not provided
52
+ if not api_key:
53
+ # First try environment variable
54
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
55
+
56
+ # If not in environment, try to fetch from server using fetch_modal_tokens
57
+ if not api_key:
58
+ try:
59
+ from fetch_modal_tokens import get_tokens
60
+ _, _, _, api_key = get_tokens()
61
+ except Exception as e:
62
+ print(f"⚠️ Error fetching Anthropic API key from server: {e}")
63
+
64
+ # Then try credentials manager
65
+ if not api_key:
66
+ try:
67
+ from credentials_manager import CredentialsManager
68
+ credentials_manager = CredentialsManager()
69
+ api_key = credentials_manager.get_anthropic_api_key()
70
+ except Exception as e:
71
+ print(f"⚠️ Error getting Anthropic API key from credentials manager: {e}")
72
+
73
+ return call_anthropic_for_debug(command, error_output, api_key, current_dir, sandbox)
74
+ else:
75
+ # Default to OpenAI
76
+ # Try to get OpenAI API key if not provided
77
+ if not api_key:
78
+ # First try environment variable
79
+ api_key = os.environ.get("OPENAI_API_KEY")
80
+
81
+ # If not in environment, try to fetch from server using fetch_modal_tokens
82
+ if not api_key:
83
+ try:
84
+ from fetch_modal_tokens import get_tokens
85
+ _, _, api_key, _ = get_tokens()
86
+ except Exception as e:
87
+ print(f"⚠️ Error fetching OpenAI API key from server: {e}")
88
+
89
+ # Then try credentials manager
90
+ if not api_key:
91
+ try:
92
+ from credentials_manager import CredentialsManager
93
+ credentials_manager = CredentialsManager()
94
+ api_key = credentials_manager.get_openai_api_key()
95
+ except Exception as e:
96
+ print(f"⚠️ Error getting OpenAI API key from credentials manager: {e}")
97
+
98
+ return call_openai_for_debug(command, error_output, api_key, current_dir, sandbox)
99
+
100
+ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
101
+ """Call OpenAI to debug a failed command and suggest a fix"""
102
+ print("\n🔍 DEBUG: Starting LLM debugging...")
103
+ print(f"🔍 DEBUG: Command: {command}")
104
+ print(f"🔍 DEBUG: Error output length: {len(error_output) if error_output else 0}")
105
+ print(f"🔍 DEBUG: Current directory: {current_dir}")
106
+ print(f"🔍 DEBUG: Sandbox available: {sandbox is not None}")
107
+
108
+ # Define _to_str function locally to avoid NameError
109
+ def _to_str(maybe_bytes):
110
+ try:
111
+ return (maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes)
112
+ except UnicodeDecodeError:
113
+ # Handle non-UTF-8 bytes by replacing invalid characters
114
+ if isinstance(maybe_bytes, (bytes, bytearray)):
115
+ return maybe_bytes.decode('utf-8', errors='replace')
116
+ else:
117
+ return str(maybe_bytes)
118
+ except Exception:
119
+ # Last resort fallback
120
+ return str(maybe_bytes)
121
+
122
+ # Skip debugging for certain commands that commonly return non-zero exit codes
123
+ # but aren't actually errors (like test commands)
124
+ if command.strip().startswith("test "):
125
+ print("🔍 Skipping debugging for test command - non-zero exit code is expected behavior")
126
+ return None
127
+
128
+ # Validate error_output - if it's empty, we can't debug effectively
129
+ if not error_output or not error_output.strip():
130
+ print("⚠️ Error output is empty. Cannot effectively debug the command.")
131
+ print("⚠️ Skipping OpenAI debugging due to lack of error information.")
132
+ return None
133
+
134
+ # Try to get API key from multiple sources
135
+ if not api_key:
136
+ print("🔍 DEBUG: No API key provided, searching for one...")
137
+
138
+ # First try environment variable
139
+ api_key = os.environ.get("OPENAI_API_KEY")
140
+ print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
141
+ if api_key:
142
+ print(f"🔍 DEBUG: Environment API key value: {api_key}")
143
+
144
+ # If not in environment, try to fetch from server using fetch_modal_tokens
145
+ if not api_key:
146
+ try:
147
+ print("🔍 DEBUG: Trying to fetch API key from server...")
148
+ from fetch_modal_tokens import get_tokens
149
+ _, _, api_key, _ = get_tokens()
150
+ if api_key:
151
+ # Set in environment for this session
152
+ os.environ["OPENAI_API_KEY"] = api_key
153
+ else:
154
+ print("⚠️ Could not fetch OpenAI API key from server")
155
+ except Exception as e:
156
+ print(f"⚠️ Error fetching API key from server: {e}")
157
+
158
+ # Store the API key in a persistent file if found
159
+ if api_key:
160
+ try:
161
+ os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
162
+ with open(os.path.expanduser("~/.gitarsenal/openai_key"), "w") as f:
163
+ f.write(api_key)
164
+ print("✅ Saved OpenAI API key for future use")
165
+ except Exception as e:
166
+ print(f"⚠️ Could not save API key: {e}")
167
+
168
+ # Try to load from saved file if not in environment
169
+ if not api_key:
170
+ try:
171
+ key_file = os.path.expanduser("~/.gitarsenal/openai_key")
172
+ print(f"🔍 DEBUG: Checking for saved API key at: {key_file}")
173
+ if os.path.exists(key_file):
174
+ with open(key_file, "r") as f:
175
+ api_key = f.read().strip()
176
+ if api_key:
177
+ print("✅ Loaded OpenAI API key from saved file")
178
+ print(f"🔍 DEBUG: API key from file: {api_key}")
179
+ print(f"🔍 DEBUG: API key length: {len(api_key)}")
180
+ # Also set in environment for this session
181
+ os.environ["OPENAI_API_KEY"] = api_key
182
+ else:
183
+ print("🔍 DEBUG: Saved file exists but is empty")
184
+ else:
185
+ print("🔍 DEBUG: No saved API key file found")
186
+ except Exception as e:
187
+ print(f"⚠️ Could not load saved API key: {e}")
188
+
189
+ # Then try credentials manager
190
+ if not api_key:
191
+ print("🔍 DEBUG: Trying credentials manager...")
192
+ try:
193
+ from credentials_manager import CredentialsManager
194
+ credentials_manager = CredentialsManager()
195
+ api_key = credentials_manager.get_openai_api_key()
196
+ if api_key:
197
+ print(f"🔍 DEBUG: API key from credentials manager: Found")
198
+ print(f"🔍 DEBUG: Credentials manager API key value: {api_key}")
199
+ # Set in environment for this session
200
+ os.environ["OPENAI_API_KEY"] = api_key
201
+ else:
202
+ print(f"🔍 DEBUG: API key from credentials manager: Not found")
203
+ except ImportError as e:
204
+ print(f"🔍 DEBUG: Credentials manager not available: {e}")
205
+ # Fall back to direct input if credentials_manager is not available
206
+ pass
207
+
208
+ # Finally, prompt the user if still no API key
209
+ if not api_key:
210
+ print("🔍 DEBUG: No API key found in any source, prompting user...")
211
+ print("\n" + "="*60)
212
+ print("🔑 OPENAI API KEY REQUIRED FOR DEBUGGING")
213
+ print("="*60)
214
+ print("To debug failed commands, an OpenAI API key is needed.")
215
+ print("📝 Please paste your OpenAI API key below:")
216
+ print(" (Your input will be hidden for security)")
217
+ print("-" * 60)
218
+
219
+ try:
220
+ api_key = getpass.getpass("OpenAI API Key: ").strip()
221
+ if not api_key:
222
+ print("❌ No API key provided. Skipping debugging.")
223
+ return None
224
+ print("✅ API key received successfully!")
225
+ print(f"🔍 DEBUG: User-provided API key: {api_key}")
226
+ # Save the API key to environment for future use in this session
227
+ os.environ["OPENAI_API_KEY"] = api_key
228
+ except KeyboardInterrupt:
229
+ print("\n❌ API key input cancelled by user.")
230
+ return None
231
+ except Exception as e:
232
+ print(f"❌ Error getting API key: {e}")
233
+ return None
234
+
235
+ # If we still don't have an API key, we can't proceed
236
+ if not api_key:
237
+ print("❌ No OpenAI API key available. Cannot perform LLM debugging.")
238
+ print("💡 To enable LLM debugging, set the OPENAI_API_KEY environment variable")
239
+ return None
240
+
241
+ # print(f"✅ OpenAI API key available (length: {len(api_key)})")
242
+
243
+ # Gather additional context to help with debugging
244
+ directory_context = ""
245
+ system_info = ""
246
+ command_history = ""
247
+ file_context = ""
248
+
249
+ if sandbox:
250
+ try:
251
+ print("🔍 Getting system information for better debugging...")
252
+
253
+ # Get OS information
254
+ os_info_cmd = """
255
+ echo "OS Information:"
256
+ cat /etc/os-release 2>/dev/null || echo "OS release info not available"
257
+ echo -e "\nKernel Information:"
258
+ uname -a
259
+ echo -e "\nPython Information:"
260
+ python --version
261
+ pip --version
262
+ echo -e "\nPackage Manager:"
263
+ which apt 2>/dev/null && echo "apt available" || echo "apt not available"
264
+ which yum 2>/dev/null && echo "yum available" || echo "yum not available"
265
+ which dnf 2>/dev/null && echo "dnf available" || echo "dnf not available"
266
+ which apk 2>/dev/null && echo "apk available" || echo "apk not available"
267
+ echo -e "\nEnvironment Variables:"
268
+ env | grep -E "^(PATH|PYTHON|VIRTUAL_ENV|HOME|USER|SHELL|LANG)" || echo "No relevant env vars found"
269
+ """
270
+
271
+ os_result = sandbox.exec("bash", "-c", os_info_cmd)
272
+ os_output = ""
273
+ for line in os_result.stdout:
274
+ os_output += _to_str(line)
275
+ os_result.wait()
276
+
277
+ system_info = f"""
278
+ System Information:
279
+ {os_output}
280
+ """
281
+ print("✅ System information gathered successfully")
282
+ except Exception as e:
283
+ print(f"⚠️ Error getting system information: {e}")
284
+ system_info = "System information not available\n"
285
+
286
+ if current_dir and sandbox:
287
+ try:
288
+ # print("🔍 Getting directory context for better debugging...")
289
+
290
+ # Get current directory contents
291
+ ls_result = sandbox.exec("bash", "-c", "ls -la")
292
+ ls_output = ""
293
+ for line in ls_result.stdout:
294
+ ls_output += _to_str(line)
295
+ ls_result.wait()
296
+
297
+ # Get parent directory contents
298
+ parent_result = sandbox.exec("bash", "-c", "ls -la ../")
299
+ parent_ls = ""
300
+ for line in parent_result.stdout:
301
+ parent_ls += _to_str(line)
302
+ parent_result.wait()
303
+
304
+ directory_context = f"""
305
+ Current directory contents:
306
+ {ls_output}
307
+
308
+ Parent directory contents:
309
+ {parent_ls}
310
+ """
311
+ print("✅ Directory context gathered successfully")
312
+
313
+ # Check for relevant files that might provide additional context
314
+ # For example, if error mentions a specific file, try to get its content
315
+ relevant_files = []
316
+ error_files = re.findall(r'(?:No such file or directory|cannot open|not found): ([^\s:]+)', error_output)
317
+ if error_files:
318
+ for file_path in error_files:
319
+ # Clean up the file path
320
+ file_path = file_path.strip("'\"")
321
+ if not os.path.isabs(file_path):
322
+ file_path = os.path.join(current_dir, file_path)
323
+
324
+ # Try to get the parent directory if the file doesn't exist
325
+ if '/' in file_path:
326
+ parent_file_dir = os.path.dirname(file_path)
327
+ relevant_files.append(parent_file_dir)
328
+
329
+ # Look for package.json, requirements.txt, etc.
330
+ common_config_files = ["package.json", "requirements.txt", "pyproject.toml", "setup.py",
331
+ "Pipfile", "Dockerfile", "docker-compose.yml", "Makefile"]
332
+
333
+ for config_file in common_config_files:
334
+ check_cmd = f"test -f {current_dir}/{config_file}"
335
+ check_result = sandbox.exec("bash", "-c", check_cmd)
336
+ check_result.wait()
337
+ if check_result.returncode == 0:
338
+ relevant_files.append(f"{current_dir}/{config_file}")
339
+
340
+ # Get content of relevant files
341
+ if relevant_files:
342
+ file_context = "\nRelevant file contents:\n"
343
+ for file_path in relevant_files[:2]: # Limit to 2 files to avoid too much context
344
+ try:
345
+ file_check_cmd = f"test -f {file_path}"
346
+ file_check = sandbox.exec("bash", "-c", file_check_cmd)
347
+ file_check.wait()
348
+
349
+ if file_check.returncode == 0:
350
+ # It's a file, get its content
351
+ cat_cmd = f"cat {file_path}"
352
+ cat_result = sandbox.exec("bash", "-c", cat_cmd)
353
+ file_content = ""
354
+ for line in cat_result.stdout:
355
+ file_content += _to_str(line)
356
+ cat_result.wait()
357
+
358
+ # Truncate if too long
359
+ if len(file_content) > 1000:
360
+ file_content = file_content[:1000] + "\n... (truncated)"
361
+
362
+ file_context += f"\n--- {file_path} ---\n{file_content}\n"
363
+ else:
364
+ # It's a directory, list its contents
365
+ ls_cmd = f"ls -la {file_path}"
366
+ ls_dir_result = sandbox.exec("bash", "-c", ls_cmd)
367
+ dir_content = ""
368
+ for line in ls_dir_result.stdout:
369
+ dir_content += _to_str(line)
370
+ ls_dir_result.wait()
371
+
372
+ file_context += f"\n--- Directory: {file_path} ---\n{dir_content}\n"
373
+ except Exception as e:
374
+ print(f"⚠️ Error getting content of {file_path}: {e}")
375
+
376
+ # print(f"✅ Additional file context gathered from {len(relevant_files)} relevant files")
377
+
378
+ except Exception as e:
379
+ print(f"⚠️ Error getting directory context: {e}")
380
+ directory_context = f"\nCurrent directory: {current_dir}\n"
381
+
382
+ # Prepare the API request
383
+ headers = {
384
+ "Content-Type": "application/json",
385
+ "Authorization": f"Bearer {api_key}"
386
+ }
387
+
388
+ stored_credentials = get_stored_credentials()
389
+ auth_context = generate_auth_context(stored_credentials)
390
+
391
+ # Create a prompt for the LLM
392
+ print("\n" + "="*60)
393
+ print("DEBUG: ERROR_OUTPUT SENT TO LLM:")
394
+ print("="*60)
395
+ print(f"{error_output}")
396
+ print("="*60 + "\n")
397
+
398
+ prompt = f"""
399
+ I'm trying to run the following command in a Linux environment:
400
+
401
+ ```
402
+ {command}
403
+ ```
404
+
405
+ But it failed with this error:
406
+
407
+ ```
408
+ {error_output}
409
+ ```
410
+ {system_info}
411
+ {directory_context}
412
+ {file_context}
413
+
414
+ AVAILABLE CREDENTIALS:
415
+ {auth_context}
416
+
417
+ Please analyze the error and provide ONLY a single terminal command that would fix the issue.
418
+ Consider the current directory, system information, directory contents, and available credentials carefully before suggesting a solution.
419
+
420
+ IMPORTANT GUIDELINES:
421
+ 1. For any commands that might ask for yes/no confirmation, use the appropriate non-interactive flag:
422
+ - For apt/apt-get: use -y or --yes
423
+ - For rm: use -f or --force
424
+
425
+ 2. If the error indicates a file is not found:
426
+ - FIRST try to search for the file using: find . -name "filename" -type f 2>/dev/null
427
+ - If found, navigate to that directory using: cd /path/to/directory
428
+ - If not found, then consider creating the file or installing missing packages
429
+
430
+ 3. For missing packages or dependencies:
431
+ - Use pip install for Python packages
432
+ - Use apt-get install -y for system packages
433
+ - Use npm install for Node.js packages
434
+
435
+ 4. For authentication issues:
436
+ - Analyze the error to determine what type of authentication is needed
437
+ - ALWAYS use the actual credential values from the AVAILABLE CREDENTIALS section above (NOT placeholders)
438
+ - Look for the specific API key or token needed in the auth_context and use its exact value
439
+ - Common patterns:
440
+ * wandb errors: use wandb login with the actual WANDB_API_KEY value from auth_context
441
+ * huggingface errors: use huggingface-cli login with the actual HF_TOKEN or HUGGINGFACE_TOKEN value from auth_context
442
+ * github errors: configure git credentials with the actual GITHUB_TOKEN value from auth_context
443
+ * kaggle errors: create ~/.kaggle/kaggle.json with the actual KAGGLE_USERNAME and KAGGLE_KEY values from auth_context
444
+ * API errors: export the appropriate API key as environment variable using the actual value from auth_context
445
+
446
+ 5. Environment variable exports:
447
+ - Use export commands for API keys that need to be in environment
448
+ - ALWAYS use the actual credential values from auth_context, never use placeholders like "YOUR_API_KEY"
449
+ - Example: export OPENAI_API_KEY="sk-..." (using the actual key from auth_context)
450
+
451
+ 6. CRITICAL: When using any API key, token, or credential:
452
+ - Find the exact value in the AVAILABLE CREDENTIALS section
453
+ - Use that exact value in your command
454
+ - Do not use generic placeholders or dummy values
455
+ - The auth_context contains real, usable credentials
456
+
457
+ 7. For Git SSH authentication failures:
458
+ - If the error contains "Host key verification failed" or "Could not read from remote repository"
459
+ - ALWAYS convert SSH URLs to HTTPS URLs for public repositories
460
+ - Replace git@github.com:username/repo.git with https://github.com/username/repo.git
461
+ - This works for public repositories without authentication
462
+ - Example: git clone https://github.com/xg-chu/ARTalk.git
463
+
464
+ Do not provide any explanations, just the exact command to run.
465
+ """
466
+
467
+ # Prepare the API request payload
468
+ # print("🔍 DEBUG: Preparing API request...")
469
+
470
+ # Try to use GPT-4 first, but fall back to other models if needed
471
+ models_to_try = [
472
+ "gpt-4o-mini", # First choice: GPT-4o (most widely available)
473
+ ]
474
+
475
+ # Check if we have a preferred model in environment
476
+ preferred_model = os.environ.get("OPENAI_MODEL")
477
+ if preferred_model:
478
+ # Insert the preferred model at the beginning of the list
479
+ models_to_try.insert(0, preferred_model)
480
+ # print(f"✅ Using preferred model from environment: {preferred_model}")
481
+
482
+ # Remove duplicates while preserving order
483
+ models_to_try = list(dict.fromkeys(models_to_try))
484
+ # print(f"🔍 DEBUG: Models to try: {models_to_try}")
485
+
486
+ # Function to make the API call with a specific model
487
+ def try_api_call(model_name, retries=2, backoff_factor=1.5):
488
+ # print(f"🔍 DEBUG: Attempting API call with model: {model_name}")
489
+ # print(f"🔍 DEBUG: API key available: {'Yes' if api_key else 'No'}")
490
+ # if api_key:
491
+ # print(f"🔍 DEBUG: API key length: {len(api_key)}")
492
+ # print(f"🔍 DEBUG: API key starts with: {api_key[:10]}...")
493
+
494
+ payload = {
495
+ "model": model_name,
496
+ "messages": [
497
+ {"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue. Analyze the issue first, understand why it's happening, then provide the command to fix it. For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found. For missing packages, use appropriate package managers (pip, apt-get, npm). For Git SSH authentication failures, always convert SSH URLs to HTTPS URLs (git@github.com:user/repo.git -> https://github.com/user/repo.git). For authentication, suggest login commands with placeholders."},
498
+ {"role": "user", "content": prompt}
499
+ ],
500
+ "temperature": 0.2,
501
+ "max_tokens": 300
502
+ }
503
+
504
+ print(f"🔍 DEBUG: Payload prepared, prompt length: {len(prompt)}")
505
+
506
+ # Add specific handling for common errors
507
+ last_error = None
508
+ for attempt in range(retries + 1):
509
+ try:
510
+ if attempt > 0:
511
+ # Exponential backoff
512
+ wait_time = backoff_factor * (2 ** (attempt - 1))
513
+ print(f"⏱️ Retrying in {wait_time:.1f} seconds... (attempt {attempt+1}/{retries+1})")
514
+ time.sleep(wait_time)
515
+
516
+ print(f"🤖 Calling OpenAI with {model_name} model to debug the failed command...")
517
+ print(f"🔍 DEBUG: Making POST request to OpenAI API...")
518
+ response = requests.post(
519
+ "https://api.openai.com/v1/chat/completions",
520
+ headers=headers,
521
+ json=payload,
522
+ timeout=45 # Increased timeout for reliability
523
+ )
524
+
525
+ print(f"🔍 DEBUG: Response received, status code: {response.status_code}")
526
+
527
+ # Handle specific status codes
528
+ if response.status_code == 200:
529
+ print(f"🔍 DEBUG: Success! Response length: {len(response.text)}")
530
+ return response.json(), None
531
+ elif response.status_code == 401:
532
+ error_msg = "Authentication error: Invalid API key"
533
+ print(f"❌ {error_msg}")
534
+ print(f"🔍 DEBUG: Response text: {response.text}")
535
+ # Don't retry auth errors
536
+ return None, error_msg
537
+ elif response.status_code == 429:
538
+ error_msg = "Rate limit exceeded or quota reached"
539
+ print(f"⚠️ {error_msg}")
540
+ print(f"🔍 DEBUG: Response text: {response.text}")
541
+ # Always retry rate limit errors with increasing backoff
542
+ last_error = error_msg
543
+ continue
544
+ elif response.status_code == 500:
545
+ error_msg = "OpenAI server error"
546
+ print(f"⚠️ {error_msg}")
547
+ print(f"🔍 DEBUG: Response text: {response.text}")
548
+ # Retry server errors
549
+ last_error = error_msg
550
+ continue
551
+ else:
552
+ error_msg = f"Status code: {response.status_code}, Response: {response.text}"
553
+ print(f"⚠️ OpenAI API error: {error_msg}")
554
+ print(f"🔍 DEBUG: Full response text: {response.text}")
555
+ last_error = error_msg
556
+ # Only retry if we have attempts left
557
+ if attempt < retries:
558
+ continue
559
+ return None, error_msg
560
+ except requests.exceptions.Timeout:
561
+ error_msg = "Request timed out"
562
+ # print(f"⚠️ {error_msg}")
563
+ # print(f"🔍 DEBUG: Timeout after 45 seconds")
564
+ last_error = error_msg
565
+ # Always retry timeouts
566
+ continue
567
+ except requests.exceptions.ConnectionError:
568
+ error_msg = "Connection error"
569
+ print(f"⚠️ {error_msg}")
570
+ print(f"🔍 DEBUG: Connection failed to api.openai.com")
571
+ last_error = error_msg
572
+ # Always retry connection errors
573
+ continue
574
+ except Exception as e:
575
+ error_msg = str(e)
576
+ print(f"⚠️ Unexpected error: {error_msg}")
577
+ print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
578
+ print(f"🔍 DEBUG: Exception details: {str(e)}")
579
+ last_error = error_msg
580
+ # Only retry if we have attempts left
581
+ if attempt < retries:
582
+ continue
583
+ return None, error_msg
584
+
585
+ # If we get here, all retries failed
586
+ return None, last_error
587
+
588
+ # Try each model in sequence until one works
589
+ result = None
590
+ last_error = None
591
+
592
+ for model in models_to_try:
593
+ result, error = try_api_call(model)
594
+ if result:
595
+ # print(f"✅ Successfully got response from {model}")
596
+ break
597
+ else:
598
+ print(f"⚠️ Failed to get response from {model}: {error}")
599
+ last_error = error
600
+
601
+ if not result:
602
+ print(f"❌ All model attempts failed. Last error: {last_error}")
603
+ return None
604
+
605
+ # Process the response
606
+ try:
607
+ print(f"🔍 DEBUG: Processing OpenAI response...")
608
+ # print(f"🔍 DEBUG: Response structure: {list(result.keys())}")
609
+ print(f"🔍 DEBUG: Choices count: {len(result.get('choices', []))}")
610
+
611
+ fix_command = result["choices"][0]["message"]["content"].strip()
612
+ print(f"🔍 DEBUG: Raw response content: {fix_command}")
613
+
614
+ # Save the original response for debugging
615
+ original_response = fix_command
616
+
617
+ # Extract just the command if it's wrapped in backticks or explanation
618
+ if "```" in fix_command:
619
+ # Extract content between backticks
620
+ import re
621
+ code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
622
+ if code_blocks:
623
+ fix_command = code_blocks[0].strip()
624
+ print(f"✅ Extracted command from code block: {fix_command}")
625
+
626
+ # If the response still has explanatory text, try to extract just the command
627
+ if len(fix_command.split('\n')) > 1:
628
+ # First try to find lines that look like commands (start with common command prefixes)
629
+ command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
630
+ 'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
631
+ 'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
632
+ 'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
633
+
634
+ # Check for lines that start with common command prefixes
635
+ command_lines = [line.strip() for line in fix_command.split('\n')
636
+ if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
637
+
638
+ if command_lines:
639
+ # Use the first command line found
640
+ fix_command = command_lines[0]
641
+ print(f"✅ Identified command by prefix: {fix_command}")
642
+ else:
643
+ # Try to find lines that look like commands (contain common shell patterns)
644
+ shell_patterns = [' | ', ' > ', ' >> ', ' && ', ' || ', ' ; ', '$(', '`', ' -y ', ' --yes ']
645
+ command_lines = [line.strip() for line in fix_command.split('\n')
646
+ if any(pattern in line for pattern in shell_patterns)]
647
+
648
+ if command_lines:
649
+ # Use the first command line found
650
+ fix_command = command_lines[0]
651
+ print(f"✅ Identified command by shell pattern: {fix_command}")
652
+ else:
653
+ # Fall back to the shortest non-empty line as it's likely the command
654
+ lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
655
+ if lines:
656
+ # Exclude very short lines that are likely not commands
657
+ valid_lines = [line for line in lines if len(line) > 5]
658
+ if valid_lines:
659
+ fix_command = min(valid_lines, key=len)
660
+ else:
661
+ fix_command = min(lines, key=len)
662
+ print(f"✅ Selected shortest line as command: {fix_command}")
663
+
664
+ # Clean up the command - remove any trailing periods or quotes
665
+ fix_command = fix_command.rstrip('.;"\'')
666
+
667
+ # Remove common prefixes that LLMs sometimes add
668
+ prefixes_to_remove = [
669
+ "Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
670
+ "You should run: ", "You can run: ", "You need to run: "
671
+ ]
672
+ for prefix in prefixes_to_remove:
673
+ if fix_command.startswith(prefix):
674
+ fix_command = fix_command[len(prefix):].strip()
675
+ print(f"✅ Removed prefix: {prefix}")
676
+ break
677
+
678
+ # If the command is still multi-line or very long, it might not be a valid command
679
+ if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
680
+ print("⚠️ Extracted command appears invalid (multi-line or too long)")
681
+ print("🔍 Original response from LLM:")
682
+ print("-" * 60)
683
+ print(original_response)
684
+ print("-" * 60)
685
+ print("⚠️ Using best guess for command")
686
+
687
+ print(f"🔧 Suggested fix: {fix_command}")
688
+ print(f"🔍 DEBUG: Returning fix command: {fix_command}")
689
+ return fix_command
690
+ except Exception as e:
691
+ print(f"❌ Error processing OpenAI response: {e}")
692
+ print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
693
+ print(f"🔍 DEBUG: Exception details: {str(e)}")
694
+ return None
695
+
696
+ def call_openai_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
697
+ """Call OpenAI to debug multiple failed commands and suggest fixes for all of them at once"""
698
+ print("\n🔍 DEBUG: Starting batch LLM debugging...")
699
+ print(f"🔍 DEBUG: Analyzing {len(failed_commands)} failed commands")
700
+
701
+ if not failed_commands:
702
+ print("⚠️ No failed commands to analyze")
703
+ return []
704
+
705
+ if not api_key:
706
+ print("❌ No OpenAI API key provided for batch debugging")
707
+ return []
708
+
709
+ # Prepare context for batch analysis
710
+ context_parts = []
711
+ context_parts.append(f"Current directory: {current_dir}")
712
+ context_parts.append(f"Sandbox available: {sandbox is not None}")
713
+
714
+ # Add failed commands with their errors
715
+ for i, failed_cmd in enumerate(failed_commands, 1):
716
+ cmd_type = failed_cmd.get('type', 'main')
717
+ original_cmd = failed_cmd.get('original_command', '')
718
+ cmd_text = failed_cmd['command']
719
+ stderr = failed_cmd.get('stderr', '')
720
+ stdout = failed_cmd.get('stdout', '')
721
+
722
+ context_parts.append(f"\n--- Failed Command {i} ({cmd_type}) ---")
723
+ context_parts.append(f"Command: {cmd_text}")
724
+ if original_cmd and original_cmd != cmd_text:
725
+ context_parts.append(f"Original Command: {original_cmd}")
726
+ if stderr:
727
+ context_parts.append(f"Error Output: {stderr}")
728
+ if stdout:
729
+ context_parts.append(f"Standard Output: {stdout}")
730
+
731
+ # Create the prompt for batch analysis
732
+ prompt = f"""You are a debugging assistant analyzing multiple failed commands.
733
+
734
+ Context:
735
+ {chr(10).join(context_parts)}
736
+
737
+ Please analyze each failed command and provide a fix command for each one. For each failed command, respond with:
738
+
739
+ FIX_COMMAND_{i}: <the fix command>
740
+ REASON_{i}: <brief explanation of why the original command failed and how the fix addresses it>
741
+
742
+ Guidelines:
743
+ - For file not found errors, first search for the file using 'find . -name filename -type f'
744
+ - For missing packages, use appropriate package managers (pip, apt-get, npm)
745
+ - For Git SSH authentication failures, convert SSH URLs to HTTPS URLs
746
+ - For permission errors, suggest commands with sudo if appropriate
747
+ - For network issues, suggest retry commands or alternative URLs
748
+ - Keep each fix command simple and focused on the specific error
749
+
750
+ Provide fixes for all {len(failed_commands)} failed commands:"""
751
+
752
+ # Make the API call
753
+ headers = {
754
+ "Authorization": f"Bearer {api_key}",
755
+ "Content-Type": "application/json"
756
+ }
757
+
758
+ payload = {
759
+ "model": "gpt-4o-mini", # Use a more capable model for batch analysis
760
+ "messages": [
761
+ {"role": "system", "content": "You are a debugging assistant. Analyze failed commands and provide specific fix commands. Return only the fix commands and reasons in the specified format."},
762
+ {"role": "user", "content": prompt}
763
+ ],
764
+ "temperature": 0.1,
765
+ "max_tokens": 1000
766
+ }
767
+
768
+ try:
769
+ print(f"🤖 Calling OpenAI for batch debugging of {len(failed_commands)} commands...")
770
+ response = requests.post(
771
+ "https://api.openai.com/v1/chat/completions",
772
+ headers=headers,
773
+ json=payload,
774
+ timeout=60
775
+ )
776
+
777
+ if response.status_code == 200:
778
+ result = response.json()
779
+ content = result['choices'][0]['message']['content']
780
+ print(f"✅ Batch analysis completed")
781
+
782
+ # Parse the response to extract fix commands
783
+ fixes = []
784
+ for i in range(1, len(failed_commands) + 1):
785
+ fix_pattern = f"FIX_COMMAND_{i}: (.+)"
786
+ reason_pattern = f"REASON_{i}: (.+)"
787
+
788
+ fix_match = re.search(fix_pattern, content, re.MULTILINE)
789
+ reason_match = re.search(reason_pattern, content, re.MULTILINE)
790
+
791
+ if fix_match:
792
+ fix_command = fix_match.group(1).strip()
793
+ reason = reason_match.group(1).strip() if reason_match else "LLM suggested fix"
794
+
795
+ # Clean up the fix command
796
+ if fix_command.startswith('`') and fix_command.endswith('`'):
797
+ fix_command = fix_command[1:-1]
798
+
799
+ fixes.append({
800
+ 'original_command': failed_commands[i-1]['command'],
801
+ 'fix_command': fix_command,
802
+ 'reason': reason,
803
+ 'command_index': i-1
804
+ })
805
+
806
+ print(f"🔧 Generated {len(fixes)} fix commands from batch analysis")
807
+ return fixes
808
+ else:
809
+ print(f"❌ OpenAI API error: {response.status_code} - {response.text}")
810
+ return []
811
+
812
+ except Exception as e:
813
+ print(f"❌ Error during batch debugging: {e}")
814
+ return []
815
+
816
+ def call_anthropic_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
817
+ """Call Anthropic Claude to debug a failed command and suggest a fix"""
818
+ print("\n🔍 DEBUG: Starting Anthropic Claude debugging...")
819
+ print(f"🔍 DEBUG: Command: {command}")
820
+ print(f"🔍 DEBUG: Error output length: {len(error_output) if error_output else 0}")
821
+ print(f"🔍 DEBUG: Current directory: {current_dir}")
822
+ print(f"🔍 DEBUG: Sandbox available: {sandbox is not None}")
823
+
824
+ # Define _to_str function locally to avoid NameError
825
+ def _to_str(maybe_bytes):
826
+ try:
827
+ return (maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes)
828
+ except UnicodeDecodeError:
829
+ # Handle non-UTF-8 bytes by replacing invalid characters
830
+ if isinstance(maybe_bytes, (bytes, bytearray)):
831
+ return maybe_bytes.decode('utf-8', errors='replace')
832
+ else:
833
+ return str(maybe_bytes)
834
+ except Exception:
835
+ # Last resort fallback
836
+ return str(maybe_bytes)
837
+
838
+ # Skip debugging for certain commands that commonly return non-zero exit codes
839
+ # but aren't actually errors (like test commands)
840
+ if command.strip().startswith("test "):
841
+ print("🔍 Skipping debugging for test command - non-zero exit code is expected behavior")
842
+ return None
843
+
844
+ # Validate error_output - if it's empty, we can't debug effectively
845
+ if not error_output or not error_output.strip():
846
+ print("⚠️ Error output is empty. Cannot effectively debug the command.")
847
+ print("⚠️ Skipping Anthropic debugging due to lack of error information.")
848
+ return None
849
+
850
+ # Try to get API key from multiple sources
851
+ if not api_key:
852
+ print("🔍 DEBUG: No Anthropic API key provided, searching for one...")
853
+
854
+ # First try environment variable
855
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
856
+ print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
857
+ if api_key:
858
+ print(f"🔍 DEBUG: Environment API key value: {api_key}")
859
+
860
+ # If not in environment, try to fetch from server using fetch_modal_tokens
861
+ if not api_key:
862
+ try:
863
+ print("🔍 DEBUG: Trying to fetch API key from server...")
864
+ from fetch_modal_tokens import get_tokens
865
+ _, _, _, api_key = get_tokens()
866
+ if api_key:
867
+ # Set in environment for this session
868
+ os.environ["ANTHROPIC_API_KEY"] = api_key
869
+ else:
870
+ print("⚠️ Could not fetch Anthropic API key from server")
871
+ except Exception as e:
872
+ print(f"⚠️ Error fetching API key from server: {e}")
873
+
874
+ # Then try credentials manager
875
+ if not api_key:
876
+ print("🔍 DEBUG: Trying credentials manager...")
877
+ try:
878
+ from credentials_manager import CredentialsManager
879
+ credentials_manager = CredentialsManager()
880
+ api_key = credentials_manager.get_anthropic_api_key()
881
+ if api_key:
882
+ print(f"🔍 DEBUG: API key from credentials manager: Found")
883
+ print(f"🔍 DEBUG: Credentials manager API key value: {api_key}")
884
+ # Set in environment for this session
885
+ os.environ["ANTHROPIC_API_KEY"] = api_key
886
+ else:
887
+ print("⚠️ Could not fetch Anthropic API key from credentials manager")
888
+ except Exception as e:
889
+ print(f"⚠️ Error fetching API key from credentials manager: {e}")
890
+
891
+ # Store the API key in a persistent file if found
892
+ if api_key:
893
+ try:
894
+ os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
895
+ with open(os.path.expanduser("~/.gitarsenal/anthropic_key"), "w") as f:
896
+ f.write(api_key)
897
+ print("✅ Saved Anthropic API key for future use")
898
+ except Exception as e:
899
+ print(f"⚠️ Could not save API key: {e}")
900
+
901
+ # Try to load from saved file if not in environment
902
+ if not api_key:
903
+ try:
904
+ key_file = os.path.expanduser("~/.gitarsenal/anthropic_key")
905
+ print(f"🔍 DEBUG: Checking for saved API key at: {key_file}")
906
+ if os.path.exists(key_file):
907
+ with open(key_file, "r") as f:
908
+ api_key = f.read().strip()
909
+ if api_key:
910
+ print("✅ Loaded Anthropic API key from saved file")
911
+ print(f"🔍 DEBUG: API key from file: {api_key}")
912
+ print(f"🔍 DEBUG: API key length: {len(api_key)}")
913
+ # Also set in environment for this session
914
+ os.environ["ANTHROPIC_API_KEY"] = api_key
915
+ else:
916
+ print("🔍 DEBUG: Saved file exists but is empty")
917
+ else:
918
+ print("🔍 DEBUG: No saved API key file found")
919
+ except Exception as e:
920
+ print(f"⚠️ Could not load saved API key: {e}")
921
+
922
+ if not api_key:
923
+ print("❌ No Anthropic API key available for debugging")
924
+ return None
925
+
926
+ # Prepare the prompt for debugging
927
+ error_str = _to_str(error_output)
928
+ prompt = f"""You are a debugging assistant. Provide only the terminal command to fix the issue.
929
+
930
+ Context:
931
+ - Current directory: {current_dir}
932
+ - Sandbox available: {sandbox is not None}
933
+ - Failed command: {command}
934
+ - Error output: {error_str}
935
+
936
+ Analyze the issue first, understand why it's happening, then provide the command to fix it.
937
+
938
+ Guidelines:
939
+ - For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found
940
+ - For missing packages, use appropriate package managers (pip, apt-get, npm)
941
+ - For Git SSH authentication failures, always convert SSH URLs to HTTPS URLs (git@github.com:user/repo.git -> https://github.com/user/repo.git)
942
+ - For authentication, suggest login commands with placeholders
943
+ - For permission errors, suggest commands with sudo if appropriate
944
+ - For network issues, suggest retry commands or alternative URLs
945
+
946
+ Return only the command to fix the issue, nothing else."""
947
+
948
+ # Set up headers for Anthropic API
949
+ headers = {
950
+ "x-api-key": api_key,
951
+ "anthropic-version": "2023-06-01",
952
+ "content-type": "application/json"
953
+ }
954
+
955
+ # Models to try in order of preference
956
+ models_to_try = ["claude-4-sonnet"]
957
+
958
+ def try_api_call(model_name, retries=2, backoff_factor=1.5):
959
+ payload = {
960
+ "model": model_name,
961
+ "max_tokens": 300,
962
+ "messages": [
963
+ {"role": "user", "content": prompt}
964
+ ]
965
+ }
966
+
967
+ print(f"🔍 DEBUG: Payload prepared, prompt length: {len(prompt)}")
968
+
969
+ # Add specific handling for common errors
970
+ last_error = None
971
+ for attempt in range(retries + 1):
972
+ try:
973
+ if attempt > 0:
974
+ # Exponential backoff
975
+ wait_time = backoff_factor * (2 ** (attempt - 1))
976
+ print(f"⏱️ Retrying in {wait_time:.1f} seconds... (attempt {attempt+1}/{retries+1})")
977
+ time.sleep(wait_time)
978
+
979
+ print(f"🤖 Calling Anthropic Claude with {model_name} model to debug the failed command...")
980
+ print(f"🔍 DEBUG: Making POST request to Anthropic API...")
981
+ response = requests.post(
982
+ "https://api.anthropic.com/v1/messages",
983
+ headers=headers,
984
+ json=payload,
985
+ timeout=45 # Increased timeout for reliability
986
+ )
987
+
988
+ print(f"🔍 DEBUG: Response received, status code: {response.status_code}")
989
+
990
+ # Handle specific status codes
991
+ if response.status_code == 200:
992
+ print(f"🔍 DEBUG: Success! Response length: {len(response.text)}")
993
+ return response.json(), None
994
+ elif response.status_code == 401:
995
+ error_msg = "Authentication error: Invalid API key"
996
+ print(f"❌ {error_msg}")
997
+ print(f"🔍 DEBUG: Response text: {response.text}")
998
+ # Don't retry auth errors
999
+ return None, error_msg
1000
+ elif response.status_code == 429:
1001
+ error_msg = "Rate limit exceeded or quota reached"
1002
+ print(f"⚠️ {error_msg}")
1003
+ print(f"🔍 DEBUG: Response text: {response.text}")
1004
+ # Always retry rate limit errors with increasing backoff
1005
+ last_error = error_msg
1006
+ continue
1007
+ elif response.status_code == 500:
1008
+ error_msg = "Anthropic server error"
1009
+ print(f"⚠️ {error_msg}")
1010
+ print(f"🔍 DEBUG: Response text: {response.text}")
1011
+ # Retry server errors
1012
+ last_error = error_msg
1013
+ continue
1014
+ else:
1015
+ error_msg = f"Status code: {response.status_code}, Response: {response.text}"
1016
+ print(f"⚠️ Anthropic API error: {error_msg}")
1017
+ print(f"🔍 DEBUG: Full response text: {response.text}")
1018
+ last_error = error_msg
1019
+ # Only retry if we have attempts left
1020
+ if attempt < retries:
1021
+ continue
1022
+ return None, error_msg
1023
+ except requests.exceptions.Timeout:
1024
+ error_msg = "Request timed out"
1025
+ last_error = error_msg
1026
+ # Always retry timeouts
1027
+ continue
1028
+ except requests.exceptions.ConnectionError:
1029
+ error_msg = "Connection error"
1030
+ print(f"⚠️ {error_msg}")
1031
+ print(f"🔍 DEBUG: Connection failed to api.anthropic.com")
1032
+ last_error = error_msg
1033
+ # Always retry connection errors
1034
+ continue
1035
+ except Exception as e:
1036
+ error_msg = str(e)
1037
+ print(f"⚠️ Unexpected error: {error_msg}")
1038
+ print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
1039
+ print(f"🔍 DEBUG: Exception details: {str(e)}")
1040
+ last_error = error_msg
1041
+ # Only retry if we have attempts left
1042
+ if attempt < retries:
1043
+ continue
1044
+ return None, error_msg
1045
+
1046
+ # If we get here, all retries failed
1047
+ return None, last_error
1048
+
1049
+ # Try each model in sequence until one works
1050
+ result = None
1051
+ last_error = None
1052
+
1053
+ for model in models_to_try:
1054
+ result, error = try_api_call(model)
1055
+ if result:
1056
+ break
1057
+ else:
1058
+ print(f"⚠️ Failed to get response from {model}: {error}")
1059
+ last_error = error
1060
+
1061
+ if not result:
1062
+ print(f"❌ All model attempts failed. Last error: {last_error}")
1063
+ return None
1064
+
1065
+ # Process the response
1066
+ try:
1067
+ print(f"🔍 DEBUG: Processing Anthropic response...")
1068
+ print(f"🔍 DEBUG: Choices count: {len(result.get('content', []))}")
1069
+
1070
+ fix_command = result["content"][0]["text"].strip()
1071
+ print(f"🔍 DEBUG: Raw response content: {fix_command}")
1072
+
1073
+ # Save the original response for debugging
1074
+ original_response = fix_command
1075
+
1076
+ # Extract just the command if it's wrapped in backticks or explanation
1077
+ if "```" in fix_command:
1078
+ # Extract content between backticks
1079
+ import re
1080
+ code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
1081
+ if code_blocks:
1082
+ fix_command = code_blocks[0].strip()
1083
+ print(f"✅ Extracted command from code block: {fix_command}")
1084
+
1085
+ # If the response still has explanatory text, try to extract just the command
1086
+ if len(fix_command.split('\n')) > 1:
1087
+ # First try to find lines that look like commands (start with common command prefixes)
1088
+ command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
1089
+ 'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
1090
+ 'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
1091
+ 'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
1092
+
1093
+ # Check for lines that start with common command prefixes
1094
+ command_lines = [line.strip() for line in fix_command.split('\n')
1095
+ if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
1096
+
1097
+ if command_lines:
1098
+ # Use the first command line found
1099
+ fix_command = command_lines[0]
1100
+ print(f"✅ Identified command by prefix: {fix_command}")
1101
+ else:
1102
+ # Try to find lines that look like commands (contain common shell patterns)
1103
+ shell_patterns = [' | ', ' > ', ' >> ', ' && ', ' || ', ' ; ', '$(', '`', ' -y ', ' --yes ']
1104
+ command_lines = [line.strip() for line in fix_command.split('\n')
1105
+ if any(pattern in line for pattern in shell_patterns)]
1106
+
1107
+ if command_lines:
1108
+ # Use the first command line found
1109
+ fix_command = command_lines[0]
1110
+ print(f"✅ Identified command by shell pattern: {fix_command}")
1111
+ else:
1112
+ # Fall back to the shortest non-empty line as it's likely the command
1113
+ lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
1114
+ if lines:
1115
+ # Exclude very short lines that are likely not commands
1116
+ valid_lines = [line for line in lines if len(line) > 5]
1117
+ if valid_lines:
1118
+ fix_command = min(valid_lines, key=len)
1119
+ else:
1120
+ fix_command = min(lines, key=len)
1121
+ print(f"✅ Selected shortest line as command: {fix_command}")
1122
+
1123
+ # Clean up the command - remove any trailing periods or quotes
1124
+ fix_command = fix_command.rstrip('.;"\'')
1125
+
1126
+ # Remove common prefixes that LLMs sometimes add
1127
+ prefixes_to_remove = [
1128
+ "Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
1129
+ "You should run: ", "You can run: ", "You need to run: "
1130
+ ]
1131
+ for prefix in prefixes_to_remove:
1132
+ if fix_command.startswith(prefix):
1133
+ fix_command = fix_command[len(prefix):].strip()
1134
+ print(f"✅ Removed prefix: {prefix}")
1135
+ break
1136
+
1137
+ # If the command is still multi-line or very long, it might not be a valid command
1138
+ if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
1139
+ print("⚠️ Extracted command appears invalid (multi-line or too long)")
1140
+ print("🔍 Original response from LLM:")
1141
+ print("-" * 60)
1142
+ print(original_response)
1143
+ print("-" * 60)
1144
+ print("⚠️ Using best guess for command")
1145
+
1146
+ print(f"🔧 Suggested fix: {fix_command}")
1147
+ print(f"🔍 DEBUG: Returning fix command: {fix_command}")
1148
+ return fix_command
1149
+ except Exception as e:
1150
+ print(f"❌ Error processing Anthropic response: {e}")
1151
+ print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
1152
+ print(f"🔍 DEBUG: Exception details: {str(e)}")
1153
+ return None
1154
+
1155
+
1156
+ def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
1157
+ """Unified function to call LLM for batch debugging - routes to OpenAI or Anthropic based on configuration"""
1158
+ current_model = get_current_debug_model()
1159
+
1160
+ print(f"🔍 DEBUG: Using {current_model.upper()} for batch debugging...")
1161
+
1162
+ if current_model == "anthropic":
1163
+ # Try to get Anthropic API key if not provided
1164
+ if not api_key:
1165
+ # First try environment variable
1166
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
1167
+
1168
+ # If not in environment, try to fetch from server using fetch_modal_tokens
1169
+ if not api_key:
1170
+ try:
1171
+ from fetch_modal_tokens import get_tokens
1172
+ _, _, _, api_key = get_tokens()
1173
+ except Exception as e:
1174
+ print(f"⚠️ Error fetching Anthropic API key from server: {e}")
1175
+
1176
+ # Then try credentials manager
1177
+ if not api_key:
1178
+ try:
1179
+ from credentials_manager import CredentialsManager
1180
+ credentials_manager = CredentialsManager()
1181
+ api_key = credentials_manager.get_anthropic_api_key()
1182
+ except Exception as e:
1183
+ print(f"⚠️ Error getting Anthropic API key from credentials manager: {e}")
1184
+
1185
+ return call_anthropic_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
1186
+ else:
1187
+ # Default to OpenAI
1188
+ # Try to get OpenAI API key if not provided
1189
+ if not api_key:
1190
+ # First try environment variable
1191
+ api_key = os.environ.get("OPENAI_API_KEY")
1192
+
1193
+ # If not in environment, try to fetch from server using fetch_modal_tokens
1194
+ if not api_key:
1195
+ try:
1196
+ from fetch_modal_tokens import get_tokens
1197
+ _, _, api_key, _ = get_tokens()
1198
+ except Exception as e:
1199
+ print(f"⚠️ Error fetching OpenAI API key from server: {e}")
1200
+
1201
+ # Then try credentials manager
1202
+ if not api_key:
1203
+ try:
1204
+ from credentials_manager import CredentialsManager
1205
+ credentials_manager = CredentialsManager()
1206
+ api_key = credentials_manager.get_openai_api_key()
1207
+ except Exception as e:
1208
+ print(f"⚠️ Error getting OpenAI API key from credentials manager: {e}")
1209
+
1210
+ return call_openai_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
1211
+
1212
+ def call_anthropic_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
1213
+ """Call Anthropic Claude to debug multiple failed commands and suggest fixes for all of them at once"""
1214
+ print("\n🔍 DEBUG: Starting batch Anthropic Claude debugging...")
1215
+ print(f"🔍 DEBUG: Analyzing {len(failed_commands)} failed commands")
1216
+
1217
+ if not failed_commands:
1218
+ print("⚠️ No failed commands to analyze")
1219
+ return []
1220
+
1221
+ if not api_key:
1222
+ print("🔍 DEBUG: No Anthropic API key provided, searching for one...")
1223
+
1224
+ # First try environment variable
1225
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
1226
+ print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
1227
+ if api_key:
1228
+ print(f"🔍 DEBUG: Environment API key value: {api_key}")
1229
+
1230
+ # If not in environment, try to fetch from server using fetch_modal_tokens
1231
+ if not api_key:
1232
+ try:
1233
+ print("🔍 DEBUG: Trying to fetch API key from server...")
1234
+ from fetch_modal_tokens import get_tokens
1235
+ _, _, _, api_key = get_tokens()
1236
+ if api_key:
1237
+ # Set in environment for this session
1238
+ os.environ["ANTHROPIC_API_KEY"] = api_key
1239
+ else:
1240
+ print("⚠️ Could not fetch Anthropic API key from server")
1241
+ except Exception as e:
1242
+ print(f"⚠️ Error fetching API key from server: {e}")
1243
+
1244
+ # Then try credentials manager
1245
+ if not api_key:
1246
+ print("🔍 DEBUG: Trying credentials manager...")
1247
+ try:
1248
+ from credentials_manager import CredentialsManager
1249
+ credentials_manager = CredentialsManager()
1250
+ api_key = credentials_manager.get_anthropic_api_key()
1251
+ if api_key:
1252
+ print(f"🔍 DEBUG: API key from credentials manager: Found")
1253
+ print(f"🔍 DEBUG: Credentials manager API key value: {api_key}")
1254
+ # Set in environment for this session
1255
+ os.environ["ANTHROPIC_API_KEY"] = api_key
1256
+ else:
1257
+ print("⚠️ Could not fetch Anthropic API key from credentials manager")
1258
+ except Exception as e:
1259
+ print(f"⚠️ Error fetching API key from credentials manager: {e}")
1260
+
1261
+ if not api_key:
1262
+ print("❌ No Anthropic API key available for batch debugging")
1263
+ return []
1264
+
1265
+ # Prepare context for batch analysis
1266
+ context_parts = []
1267
+ context_parts.append(f"Current directory: {current_dir}")
1268
+ context_parts.append(f"Sandbox available: {sandbox is not None}")
1269
+
1270
+ # Add failed commands with their errors
1271
+ for i, failed_cmd in enumerate(failed_commands, 1):
1272
+ cmd_type = failed_cmd.get('type', 'main')
1273
+ original_cmd = failed_cmd.get('original_command', '')
1274
+ cmd_text = failed_cmd['command']
1275
+ stderr = failed_cmd.get('stderr', '')
1276
+ stdout = failed_cmd.get('stdout', '')
1277
+
1278
+ context_parts.append(f"\n--- Failed Command {i} ({cmd_type}) ---")
1279
+ context_parts.append(f"Command: {cmd_text}")
1280
+ if original_cmd and original_cmd != cmd_text:
1281
+ context_parts.append(f"Original Command: {original_cmd}")
1282
+ if stderr:
1283
+ context_parts.append(f"Error Output: {stderr}")
1284
+ if stdout:
1285
+ context_parts.append(f"Standard Output: {stdout}")
1286
+
1287
+ # Create the prompt for batch analysis
1288
+ prompt = f"""You are a debugging assistant analyzing multiple failed commands.
1289
+
1290
+ Context:
1291
+ {chr(10).join(context_parts)}
1292
+
1293
+ Please analyze each failed command and provide a fix command for each one. For each failed command, respond with:
1294
+
1295
+ FIX_COMMAND_{i}: <the fix command>
1296
+ REASON_{i}: <brief explanation of why the original command failed and how the fix addresses it>
1297
+
1298
+ Guidelines:
1299
+ - For file not found errors, first search for the file using 'find . -name filename -type f'
1300
+ - For missing packages, use appropriate package managers (pip, apt-get, npm)
1301
+ - For Git SSH authentication failures, convert SSH URLs to HTTPS URLs
1302
+ - For permission errors, suggest commands with sudo if appropriate
1303
+ - For network issues, suggest retry commands or alternative URLs
1304
+ - Keep each fix command simple and focused on the specific error
1305
+
1306
+ Provide fixes for all {len(failed_commands)} failed commands:"""
1307
+
1308
+ # Set up headers for Anthropic API
1309
+ headers = {
1310
+ "x-api-key": api_key,
1311
+ "anthropic-version": "2023-06-01",
1312
+ "content-type": "application/json"
1313
+ }
1314
+
1315
+ payload = {
1316
+ "model": "claude-3-5-sonnet-20241022", # Use a more capable model for batch analysis
1317
+ "max_tokens": 1000,
1318
+ "messages": [
1319
+ {"role": "user", "content": prompt}
1320
+ ]
1321
+ }
1322
+
1323
+ try:
1324
+ print(f"🤖 Calling Anthropic Claude for batch debugging of {len(failed_commands)} commands...")
1325
+ response = requests.post(
1326
+ "https://api.anthropic.com/v1/messages",
1327
+ headers=headers,
1328
+ json=payload,
1329
+ timeout=60
1330
+ )
1331
+
1332
+ if response.status_code == 200:
1333
+ result = response.json()
1334
+ content = result['content'][0]['text']
1335
+ print(f"✅ Batch analysis completed")
1336
+
1337
+ # Parse the response to extract fix commands
1338
+ fixes = []
1339
+ for i in range(1, len(failed_commands) + 1):
1340
+ fix_pattern = f"FIX_COMMAND_{i}: (.+)"
1341
+ reason_pattern = f"REASON_{i}: (.+)"
1342
+
1343
+ fix_match = re.search(fix_pattern, content, re.MULTILINE)
1344
+ reason_match = re.search(reason_pattern, content, re.MULTILINE)
1345
+
1346
+ if fix_match:
1347
+ fix_command = fix_match.group(1).strip()
1348
+ reason = reason_match.group(1).strip() if reason_match else "Anthropic Claude suggested fix"
1349
+
1350
+ # Clean up the fix command
1351
+ if fix_command.startswith('`') and fix_command.endswith('`'):
1352
+ fix_command = fix_command[1:-1]
1353
+
1354
+ fixes.append({
1355
+ 'original_command': failed_commands[i-1]['command'],
1356
+ 'fix_command': fix_command,
1357
+ 'reason': reason,
1358
+ 'command_index': i-1
1359
+ })
1360
+
1361
+ print(f"🔧 Generated {len(fixes)} fix commands from batch analysis")
1362
+ return fixes
1363
+ else:
1364
+ print(f"❌ Anthropic API error: {response.status_code} - {response.text}")
1365
+ return []
1366
+
1367
+ except Exception as e:
1368
+ print(f"❌ Error during batch debugging: {e}")
1369
+ return []