gitarsenal-cli 1.9.18 → 1.9.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -899,8 +899,8 @@ class CommandListManager:
899
899
 
900
900
  print(f"šŸ” Analyzing {len(failed_commands)} failed commands with LLM...")
901
901
 
902
- # Use batch debugging for efficiency
903
- fixes = call_openai_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
902
+ # Use unified batch debugging for efficiency
903
+ fixes = call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
904
904
 
905
905
  # Add the fixes to the command list
906
906
  added_fixes = []
@@ -1250,7 +1250,7 @@ class CommandListManager:
1250
1250
  # Import the fetch_modal_tokens module
1251
1251
  # print("šŸ”„ Fetching tokens from proxy server...")
1252
1252
  from fetch_modal_tokens import get_tokens
1253
- token_id, token_secret, openai_api_key = get_tokens()
1253
+ token_id, token_secret, openai_api_key, _ = get_tokens()
1254
1254
 
1255
1255
  # Check if we got valid tokens
1256
1256
  if token_id is None or token_secret is None:
@@ -1349,7 +1349,7 @@ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None,
1349
1349
  try:
1350
1350
  print("šŸ” DEBUG: Trying to fetch API key from server...")
1351
1351
  from fetch_modal_tokens import get_tokens
1352
- _, _, api_key = get_tokens()
1352
+ _, _, api_key, _ = get_tokens()
1353
1353
  if api_key:
1354
1354
  # Set in environment for this session
1355
1355
  os.environ["OPENAI_API_KEY"] = api_key
@@ -2016,6 +2016,674 @@ Provide fixes for all {len(failed_commands)} failed commands:"""
2016
2016
  print(f"āŒ Error during batch debugging: {e}")
2017
2017
  return []
2018
2018
 
2019
+ def call_anthropic_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
2020
+ """Call Anthropic Claude to debug a failed command and suggest a fix"""
2021
+ print("\nšŸ” DEBUG: Starting Anthropic Claude debugging...")
2022
+ print(f"šŸ” DEBUG: Command: {command}")
2023
+ print(f"šŸ” DEBUG: Error output length: {len(error_output) if error_output else 0}")
2024
+ print(f"šŸ” DEBUG: Current directory: {current_dir}")
2025
+ print(f"šŸ” DEBUG: Sandbox available: {sandbox is not None}")
2026
+
2027
+ # Define _to_str function locally to avoid NameError
2028
+ def _to_str(maybe_bytes):
2029
+ try:
2030
+ return (maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes)
2031
+ except UnicodeDecodeError:
2032
+ # Handle non-UTF-8 bytes by replacing invalid characters
2033
+ if isinstance(maybe_bytes, (bytes, bytearray)):
2034
+ return maybe_bytes.decode('utf-8', errors='replace')
2035
+ else:
2036
+ return str(maybe_bytes)
2037
+ except Exception:
2038
+ # Last resort fallback
2039
+ return str(maybe_bytes)
2040
+
2041
+ # Skip debugging for certain commands that commonly return non-zero exit codes
2042
+ # but aren't actually errors (like test commands)
2043
+ if command.strip().startswith("test "):
2044
+ print("šŸ” Skipping debugging for test command - non-zero exit code is expected behavior")
2045
+ return None
2046
+
2047
+ # Validate error_output - if it's empty, we can't debug effectively
2048
+ if not error_output or not error_output.strip():
2049
+ print("āš ļø Error output is empty. Cannot effectively debug the command.")
2050
+ print("āš ļø Skipping Anthropic debugging due to lack of error information.")
2051
+ return None
2052
+
2053
+ # Try to get API key from multiple sources
2054
+ if not api_key:
2055
+ print("šŸ” DEBUG: No Anthropic API key provided, searching for one...")
2056
+
2057
+ # First try environment variable
2058
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
2059
+ print(f"šŸ” DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
2060
+ if api_key:
2061
+ print(f"šŸ” DEBUG: Environment API key value: {api_key}")
2062
+
2063
+ # If not in environment, try to fetch from server using fetch_modal_tokens
2064
+ if not api_key:
2065
+ try:
2066
+ print("šŸ” DEBUG: Trying to fetch API key from server...")
2067
+ from fetch_modal_tokens import get_tokens
2068
+ _, _, _, api_key = get_tokens()
2069
+ if api_key:
2070
+ # Set in environment for this session
2071
+ os.environ["ANTHROPIC_API_KEY"] = api_key
2072
+ else:
2073
+ print("āš ļø Could not fetch Anthropic API key from server")
2074
+ except Exception as e:
2075
+ print(f"āš ļø Error fetching API key from server: {e}")
2076
+
2077
+ # Then try credentials manager
2078
+ if not api_key:
2079
+ print("šŸ” DEBUG: Trying credentials manager...")
2080
+ try:
2081
+ from credentials_manager import CredentialsManager
2082
+ credentials_manager = CredentialsManager()
2083
+ api_key = credentials_manager.get_anthropic_api_key()
2084
+ if api_key:
2085
+ print(f"šŸ” DEBUG: API key from credentials manager: Found")
2086
+ print(f"šŸ” DEBUG: Credentials manager API key value: {api_key}")
2087
+ # Set in environment for this session
2088
+ os.environ["ANTHROPIC_API_KEY"] = api_key
2089
+ else:
2090
+ print("āš ļø Could not fetch Anthropic API key from credentials manager")
2091
+ except Exception as e:
2092
+ print(f"āš ļø Error fetching API key from credentials manager: {e}")
2093
+
2094
+ # Store the API key in a persistent file if found
2095
+ if api_key:
2096
+ try:
2097
+ os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
2098
+ with open(os.path.expanduser("~/.gitarsenal/anthropic_key"), "w") as f:
2099
+ f.write(api_key)
2100
+ print("āœ… Saved Anthropic API key for future use")
2101
+ except Exception as e:
2102
+ print(f"āš ļø Could not save API key: {e}")
2103
+
2104
+ # Try to load from saved file if not in environment
2105
+ if not api_key:
2106
+ try:
2107
+ key_file = os.path.expanduser("~/.gitarsenal/anthropic_key")
2108
+ print(f"šŸ” DEBUG: Checking for saved API key at: {key_file}")
2109
+ if os.path.exists(key_file):
2110
+ with open(key_file, "r") as f:
2111
+ api_key = f.read().strip()
2112
+ if api_key:
2113
+ print("āœ… Loaded Anthropic API key from saved file")
2114
+ print(f"šŸ” DEBUG: API key from file: {api_key}")
2115
+ print(f"šŸ” DEBUG: API key length: {len(api_key)}")
2116
+ # Also set in environment for this session
2117
+ os.environ["ANTHROPIC_API_KEY"] = api_key
2118
+ else:
2119
+ print("šŸ” DEBUG: Saved file exists but is empty")
2120
+ else:
2121
+ print("šŸ” DEBUG: No saved API key file found")
2122
+ except Exception as e:
2123
+ print(f"āš ļø Could not load saved API key: {e}")
2124
+
2125
+ if not api_key:
2126
+ print("āŒ No Anthropic API key available for debugging")
2127
+ return None
2128
+
2129
+ # Prepare the prompt for debugging
2130
+ error_str = _to_str(error_output)
2131
+ prompt = f"""You are a debugging assistant. Provide only the terminal command to fix the issue.
2132
+
2133
+ Context:
2134
+ - Current directory: {current_dir}
2135
+ - Sandbox available: {sandbox is not None}
2136
+ - Failed command: {command}
2137
+ - Error output: {error_str}
2138
+
2139
+ Analyze the issue first, understand why it's happening, then provide the command to fix it.
2140
+
2141
+ Guidelines:
2142
+ - For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found
2143
+ - For missing packages, use appropriate package managers (pip, apt-get, npm)
2144
+ - For Git SSH authentication failures, always convert SSH URLs to HTTPS URLs (git@github.com:user/repo.git -> https://github.com/user/repo.git)
2145
+ - For authentication, suggest login commands with placeholders
2146
+ - For permission errors, suggest commands with sudo if appropriate
2147
+ - For network issues, suggest retry commands or alternative URLs
2148
+
2149
+ Return only the command to fix the issue, nothing else."""
2150
+
2151
+ # Set up headers for Anthropic API
2152
+ headers = {
2153
+ "x-api-key": api_key,
2154
+ "anthropic-version": "2023-06-01",
2155
+ "content-type": "application/json"
2156
+ }
2157
+
2158
+ # Models to try in order of preference
2159
+ models_to_try = ["claude-4-sonnet"]
2160
+
2161
+ def try_api_call(model_name, retries=2, backoff_factor=1.5):
2162
+ payload = {
2163
+ "model": model_name,
2164
+ "max_tokens": 300,
2165
+ "messages": [
2166
+ {"role": "user", "content": prompt}
2167
+ ]
2168
+ }
2169
+
2170
+ print(f"šŸ” DEBUG: Payload prepared, prompt length: {len(prompt)}")
2171
+
2172
+ # Add specific handling for common errors
2173
+ last_error = None
2174
+ for attempt in range(retries + 1):
2175
+ try:
2176
+ if attempt > 0:
2177
+ # Exponential backoff
2178
+ wait_time = backoff_factor * (2 ** (attempt - 1))
2179
+ print(f"ā±ļø Retrying in {wait_time:.1f} seconds... (attempt {attempt+1}/{retries+1})")
2180
+ time.sleep(wait_time)
2181
+
2182
+ print(f"šŸ¤– Calling Anthropic Claude with {model_name} model to debug the failed command...")
2183
+ print(f"šŸ” DEBUG: Making POST request to Anthropic API...")
2184
+ response = requests.post(
2185
+ "https://api.anthropic.com/v1/messages",
2186
+ headers=headers,
2187
+ json=payload,
2188
+ timeout=45 # Increased timeout for reliability
2189
+ )
2190
+
2191
+ print(f"šŸ” DEBUG: Response received, status code: {response.status_code}")
2192
+
2193
+ # Handle specific status codes
2194
+ if response.status_code == 200:
2195
+ print(f"šŸ” DEBUG: Success! Response length: {len(response.text)}")
2196
+ return response.json(), None
2197
+ elif response.status_code == 401:
2198
+ error_msg = "Authentication error: Invalid API key"
2199
+ print(f"āŒ {error_msg}")
2200
+ print(f"šŸ” DEBUG: Response text: {response.text}")
2201
+ # Don't retry auth errors
2202
+ return None, error_msg
2203
+ elif response.status_code == 429:
2204
+ error_msg = "Rate limit exceeded or quota reached"
2205
+ print(f"āš ļø {error_msg}")
2206
+ print(f"šŸ” DEBUG: Response text: {response.text}")
2207
+ # Always retry rate limit errors with increasing backoff
2208
+ last_error = error_msg
2209
+ continue
2210
+ elif response.status_code == 500:
2211
+ error_msg = "Anthropic server error"
2212
+ print(f"āš ļø {error_msg}")
2213
+ print(f"šŸ” DEBUG: Response text: {response.text}")
2214
+ # Retry server errors
2215
+ last_error = error_msg
2216
+ continue
2217
+ else:
2218
+ error_msg = f"Status code: {response.status_code}, Response: {response.text}"
2219
+ print(f"āš ļø Anthropic API error: {error_msg}")
2220
+ print(f"šŸ” DEBUG: Full response text: {response.text}")
2221
+ last_error = error_msg
2222
+ # Only retry if we have attempts left
2223
+ if attempt < retries:
2224
+ continue
2225
+ return None, error_msg
2226
+ except requests.exceptions.Timeout:
2227
+ error_msg = "Request timed out"
2228
+ last_error = error_msg
2229
+ # Always retry timeouts
2230
+ continue
2231
+ except requests.exceptions.ConnectionError:
2232
+ error_msg = "Connection error"
2233
+ print(f"āš ļø {error_msg}")
2234
+ print(f"šŸ” DEBUG: Connection failed to api.anthropic.com")
2235
+ last_error = error_msg
2236
+ # Always retry connection errors
2237
+ continue
2238
+ except Exception as e:
2239
+ error_msg = str(e)
2240
+ print(f"āš ļø Unexpected error: {error_msg}")
2241
+ print(f"šŸ” DEBUG: Exception type: {type(e).__name__}")
2242
+ print(f"šŸ” DEBUG: Exception details: {str(e)}")
2243
+ last_error = error_msg
2244
+ # Only retry if we have attempts left
2245
+ if attempt < retries:
2246
+ continue
2247
+ return None, error_msg
2248
+
2249
+ # If we get here, all retries failed
2250
+ return None, last_error
2251
+
2252
+ # Try each model in sequence until one works
2253
+ result = None
2254
+ last_error = None
2255
+
2256
+ for model in models_to_try:
2257
+ result, error = try_api_call(model)
2258
+ if result:
2259
+ break
2260
+ else:
2261
+ print(f"āš ļø Failed to get response from {model}: {error}")
2262
+ last_error = error
2263
+
2264
+ if not result:
2265
+ print(f"āŒ All model attempts failed. Last error: {last_error}")
2266
+ return None
2267
+
2268
+ # Process the response
2269
+ try:
2270
+ print(f"šŸ” DEBUG: Processing Anthropic response...")
2271
+ print(f"šŸ” DEBUG: Choices count: {len(result.get('content', []))}")
2272
+
2273
+ fix_command = result["content"][0]["text"].strip()
2274
+ print(f"šŸ” DEBUG: Raw response content: {fix_command}")
2275
+
2276
+ # Save the original response for debugging
2277
+ original_response = fix_command
2278
+
2279
+ # Extract just the command if it's wrapped in backticks or explanation
2280
+ if "```" in fix_command:
2281
+ # Extract content between backticks
2282
+ import re
2283
+ code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
2284
+ if code_blocks:
2285
+ fix_command = code_blocks[0].strip()
2286
+ print(f"āœ… Extracted command from code block: {fix_command}")
2287
+
2288
+ # If the response still has explanatory text, try to extract just the command
2289
+ if len(fix_command.split('\n')) > 1:
2290
+ # First try to find lines that look like commands (start with common command prefixes)
2291
+ command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
2292
+ 'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
2293
+ 'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
2294
+ 'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
2295
+
2296
+ # Check for lines that start with common command prefixes
2297
+ command_lines = [line.strip() for line in fix_command.split('\n')
2298
+ if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
2299
+
2300
+ if command_lines:
2301
+ # Use the first command line found
2302
+ fix_command = command_lines[0]
2303
+ print(f"āœ… Identified command by prefix: {fix_command}")
2304
+ else:
2305
+ # Try to find lines that look like commands (contain common shell patterns)
2306
+ shell_patterns = [' | ', ' > ', ' >> ', ' && ', ' || ', ' ; ', '$(', '`', ' -y ', ' --yes ']
2307
+ command_lines = [line.strip() for line in fix_command.split('\n')
2308
+ if any(pattern in line for pattern in shell_patterns)]
2309
+
2310
+ if command_lines:
2311
+ # Use the first command line found
2312
+ fix_command = command_lines[0]
2313
+ print(f"āœ… Identified command by shell pattern: {fix_command}")
2314
+ else:
2315
+ # Fall back to the shortest non-empty line as it's likely the command
2316
+ lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
2317
+ if lines:
2318
+ # Exclude very short lines that are likely not commands
2319
+ valid_lines = [line for line in lines if len(line) > 5]
2320
+ if valid_lines:
2321
+ fix_command = min(valid_lines, key=len)
2322
+ else:
2323
+ fix_command = min(lines, key=len)
2324
+ print(f"āœ… Selected shortest line as command: {fix_command}")
2325
+
2326
+ # Clean up the command - remove any trailing periods or quotes
2327
+ fix_command = fix_command.rstrip('.;"\'')
2328
+
2329
+ # Remove common prefixes that LLMs sometimes add
2330
+ prefixes_to_remove = [
2331
+ "Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
2332
+ "You should run: ", "You can run: ", "You need to run: "
2333
+ ]
2334
+ for prefix in prefixes_to_remove:
2335
+ if fix_command.startswith(prefix):
2336
+ fix_command = fix_command[len(prefix):].strip()
2337
+ print(f"āœ… Removed prefix: {prefix}")
2338
+ break
2339
+
2340
+ # If the command is still multi-line or very long, it might not be a valid command
2341
+ if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
2342
+ print("āš ļø Extracted command appears invalid (multi-line or too long)")
2343
+ print("šŸ” Original response from LLM:")
2344
+ print("-" * 60)
2345
+ print(original_response)
2346
+ print("-" * 60)
2347
+ print("āš ļø Using best guess for command")
2348
+
2349
+ print(f"šŸ”§ Suggested fix: {fix_command}")
2350
+ print(f"šŸ” DEBUG: Returning fix command: {fix_command}")
2351
+ return fix_command
2352
+ except Exception as e:
2353
+ print(f"āŒ Error processing Anthropic response: {e}")
2354
+ print(f"šŸ” DEBUG: Exception type: {type(e).__name__}")
2355
+ print(f"šŸ” DEBUG: Exception details: {str(e)}")
2356
+ return None
2357
+
2358
+ def switch_to_anthropic_models():
2359
+ """Switch the debugging system to use Anthropic Claude models instead of OpenAI"""
2360
+ print("\nšŸ”„ Switching to Anthropic Claude models for debugging...")
2361
+
2362
+ # Set environment variable to indicate Anthropic preference
2363
+ os.environ["GITARSENAL_DEBUG_MODEL"] = "anthropic"
2364
+
2365
+ # Try to get Anthropic API key
2366
+ try:
2367
+ from credentials_manager import CredentialsManager
2368
+ credentials_manager = CredentialsManager()
2369
+ api_key = credentials_manager.get_anthropic_api_key()
2370
+ if api_key:
2371
+ os.environ["ANTHROPIC_API_KEY"] = api_key
2372
+ print("āœ… Anthropic API key configured")
2373
+ print("āœ… Debugging will now use Anthropic Claude models")
2374
+ return True
2375
+ else:
2376
+ print("āš ļø No Anthropic API key found")
2377
+ print("šŸ’” You can set your Anthropic API key using:")
2378
+ print(" export ANTHROPIC_API_KEY='your-key'")
2379
+ print(" Or run the credentials manager to set it up")
2380
+ return False
2381
+ except Exception as e:
2382
+ print(f"āŒ Error configuring Anthropic: {e}")
2383
+ return False
2384
+
2385
+ def switch_to_openai_models():
2386
+ """Switch the debugging system to use OpenAI models (default)"""
2387
+ print("\nšŸ”„ Switching to OpenAI models for debugging...")
2388
+
2389
+ # Set environment variable to indicate OpenAI preference
2390
+ os.environ["GITARSENAL_DEBUG_MODEL"] = "openai"
2391
+
2392
+ # Try to get OpenAI API key
2393
+ try:
2394
+ from credentials_manager import CredentialsManager
2395
+ credentials_manager = CredentialsManager()
2396
+ api_key = credentials_manager.get_openai_api_key()
2397
+ if api_key:
2398
+ os.environ["OPENAI_API_KEY"] = api_key
2399
+ print("āœ… OpenAI API key configured")
2400
+ print("āœ… Debugging will now use OpenAI models")
2401
+ return True
2402
+ else:
2403
+ print("āš ļø No OpenAI API key found")
2404
+ print("šŸ’” You can set your OpenAI API key using:")
2405
+ print(" export OPENAI_API_KEY='your-key'")
2406
+ print(" Or run the credentials manager to set it up")
2407
+ return False
2408
+ except Exception as e:
2409
+ print(f"āŒ Error configuring OpenAI: {e}")
2410
+ return False
2411
+
2412
+ def get_current_debug_model():
2413
+ """Get the currently configured debugging model preference"""
2414
+ return os.environ.get("GITARSENAL_DEBUG_MODEL", "openai")
2415
+
2416
+ def call_llm_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
2417
+ """Unified function to call LLM for debugging - routes to OpenAI or Anthropic based on configuration"""
2418
+ current_model = get_current_debug_model()
2419
+
2420
+ print(f"šŸ” DEBUG: Using {current_model.upper()} for debugging...")
2421
+
2422
+ if current_model == "anthropic":
2423
+ # Try to get Anthropic API key if not provided
2424
+ if not api_key:
2425
+ # First try environment variable
2426
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
2427
+
2428
+ # If not in environment, try to fetch from server using fetch_modal_tokens
2429
+ if not api_key:
2430
+ try:
2431
+ from fetch_modal_tokens import get_tokens
2432
+ _, _, _, api_key = get_tokens()
2433
+ except Exception as e:
2434
+ print(f"āš ļø Error fetching Anthropic API key from server: {e}")
2435
+
2436
+ # Then try credentials manager
2437
+ if not api_key:
2438
+ try:
2439
+ from credentials_manager import CredentialsManager
2440
+ credentials_manager = CredentialsManager()
2441
+ api_key = credentials_manager.get_anthropic_api_key()
2442
+ except Exception as e:
2443
+ print(f"āš ļø Error getting Anthropic API key from credentials manager: {e}")
2444
+
2445
+ return call_anthropic_for_debug(command, error_output, api_key, current_dir, sandbox)
2446
+ else:
2447
+ # Default to OpenAI
2448
+ # Try to get OpenAI API key if not provided
2449
+ if not api_key:
2450
+ # First try environment variable
2451
+ api_key = os.environ.get("OPENAI_API_KEY")
2452
+
2453
+ # If not in environment, try to fetch from server using fetch_modal_tokens
2454
+ if not api_key:
2455
+ try:
2456
+ from fetch_modal_tokens import get_tokens
2457
+ _, _, api_key, _ = get_tokens()
2458
+ except Exception as e:
2459
+ print(f"āš ļø Error fetching OpenAI API key from server: {e}")
2460
+
2461
+ # Then try credentials manager
2462
+ if not api_key:
2463
+ try:
2464
+ from credentials_manager import CredentialsManager
2465
+ credentials_manager = CredentialsManager()
2466
+ api_key = credentials_manager.get_openai_api_key()
2467
+ except Exception as e:
2468
+ print(f"āš ļø Error getting OpenAI API key from credentials manager: {e}")
2469
+
2470
+ return call_openai_for_debug(command, error_output, api_key, current_dir, sandbox)
2471
+
2472
+ def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
2473
+ """Unified function to call LLM for batch debugging - routes to OpenAI or Anthropic based on configuration"""
2474
+ current_model = get_current_debug_model()
2475
+
2476
+ print(f"šŸ” DEBUG: Using {current_model.upper()} for batch debugging...")
2477
+
2478
+ if current_model == "anthropic":
2479
+ # Try to get Anthropic API key if not provided
2480
+ if not api_key:
2481
+ # First try environment variable
2482
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
2483
+
2484
+ # If not in environment, try to fetch from server using fetch_modal_tokens
2485
+ if not api_key:
2486
+ try:
2487
+ from fetch_modal_tokens import get_tokens
2488
+ _, _, _, api_key = get_tokens()
2489
+ except Exception as e:
2490
+ print(f"āš ļø Error fetching Anthropic API key from server: {e}")
2491
+
2492
+ # Then try credentials manager
2493
+ if not api_key:
2494
+ try:
2495
+ from credentials_manager import CredentialsManager
2496
+ credentials_manager = CredentialsManager()
2497
+ api_key = credentials_manager.get_anthropic_api_key()
2498
+ except Exception as e:
2499
+ print(f"āš ļø Error getting Anthropic API key from credentials manager: {e}")
2500
+
2501
+ return call_anthropic_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
2502
+ else:
2503
+ # Default to OpenAI
2504
+ # Try to get OpenAI API key if not provided
2505
+ if not api_key:
2506
+ # First try environment variable
2507
+ api_key = os.environ.get("OPENAI_API_KEY")
2508
+
2509
+ # If not in environment, try to fetch from server using fetch_modal_tokens
2510
+ if not api_key:
2511
+ try:
2512
+ from fetch_modal_tokens import get_tokens
2513
+ _, _, api_key, _ = get_tokens()
2514
+ except Exception as e:
2515
+ print(f"āš ļø Error fetching OpenAI API key from server: {e}")
2516
+
2517
+ # Then try credentials manager
2518
+ if not api_key:
2519
+ try:
2520
+ from credentials_manager import CredentialsManager
2521
+ credentials_manager = CredentialsManager()
2522
+ api_key = credentials_manager.get_openai_api_key()
2523
+ except Exception as e:
2524
+ print(f"āš ļø Error getting OpenAI API key from credentials manager: {e}")
2525
+
2526
+ return call_openai_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
2527
+
2528
+ def call_anthropic_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
2529
+ """Call Anthropic Claude to debug multiple failed commands and suggest fixes for all of them at once"""
2530
+ print("\nšŸ” DEBUG: Starting batch Anthropic Claude debugging...")
2531
+ print(f"šŸ” DEBUG: Analyzing {len(failed_commands)} failed commands")
2532
+
2533
+ if not failed_commands:
2534
+ print("āš ļø No failed commands to analyze")
2535
+ return []
2536
+
2537
+ if not api_key:
2538
+ print("šŸ” DEBUG: No Anthropic API key provided, searching for one...")
2539
+
2540
+ # First try environment variable
2541
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
2542
+ print(f"šŸ” DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
2543
+ if api_key:
2544
+ print(f"šŸ” DEBUG: Environment API key value: {api_key}")
2545
+
2546
+ # If not in environment, try to fetch from server using fetch_modal_tokens
2547
+ if not api_key:
2548
+ try:
2549
+ print("šŸ” DEBUG: Trying to fetch API key from server...")
2550
+ from fetch_modal_tokens import get_tokens
2551
+ _, _, _, api_key = get_tokens()
2552
+ if api_key:
2553
+ # Set in environment for this session
2554
+ os.environ["ANTHROPIC_API_KEY"] = api_key
2555
+ else:
2556
+ print("āš ļø Could not fetch Anthropic API key from server")
2557
+ except Exception as e:
2558
+ print(f"āš ļø Error fetching API key from server: {e}")
2559
+
2560
+ # Then try credentials manager
2561
+ if not api_key:
2562
+ print("šŸ” DEBUG: Trying credentials manager...")
2563
+ try:
2564
+ from credentials_manager import CredentialsManager
2565
+ credentials_manager = CredentialsManager()
2566
+ api_key = credentials_manager.get_anthropic_api_key()
2567
+ if api_key:
2568
+ print(f"šŸ” DEBUG: API key from credentials manager: Found")
2569
+ print(f"šŸ” DEBUG: Credentials manager API key value: {api_key}")
2570
+ # Set in environment for this session
2571
+ os.environ["ANTHROPIC_API_KEY"] = api_key
2572
+ else:
2573
+ print("āš ļø Could not fetch Anthropic API key from credentials manager")
2574
+ except Exception as e:
2575
+ print(f"āš ļø Error fetching API key from credentials manager: {e}")
2576
+
2577
+ if not api_key:
2578
+ print("āŒ No Anthropic API key available for batch debugging")
2579
+ return []
2580
+
2581
+ # Prepare context for batch analysis
2582
+ context_parts = []
2583
+ context_parts.append(f"Current directory: {current_dir}")
2584
+ context_parts.append(f"Sandbox available: {sandbox is not None}")
2585
+
2586
+ # Add failed commands with their errors
2587
+ for i, failed_cmd in enumerate(failed_commands, 1):
2588
+ cmd_type = failed_cmd.get('type', 'main')
2589
+ original_cmd = failed_cmd.get('original_command', '')
2590
+ cmd_text = failed_cmd['command']
2591
+ stderr = failed_cmd.get('stderr', '')
2592
+ stdout = failed_cmd.get('stdout', '')
2593
+
2594
+ context_parts.append(f"\n--- Failed Command {i} ({cmd_type}) ---")
2595
+ context_parts.append(f"Command: {cmd_text}")
2596
+ if original_cmd and original_cmd != cmd_text:
2597
+ context_parts.append(f"Original Command: {original_cmd}")
2598
+ if stderr:
2599
+ context_parts.append(f"Error Output: {stderr}")
2600
+ if stdout:
2601
+ context_parts.append(f"Standard Output: {stdout}")
2602
+
2603
+ # Create the prompt for batch analysis
2604
+ prompt = f"""You are a debugging assistant analyzing multiple failed commands.
2605
+
2606
+ Context:
2607
+ {chr(10).join(context_parts)}
2608
+
2609
+ Please analyze each failed command and provide a fix command for each one. For each failed command, respond with:
2610
+
2611
+ FIX_COMMAND_{i}: <the fix command>
2612
+ REASON_{i}: <brief explanation of why the original command failed and how the fix addresses it>
2613
+
2614
+ Guidelines:
2615
+ - For file not found errors, first search for the file using 'find . -name filename -type f'
2616
+ - For missing packages, use appropriate package managers (pip, apt-get, npm)
2617
+ - For Git SSH authentication failures, convert SSH URLs to HTTPS URLs
2618
+ - For permission errors, suggest commands with sudo if appropriate
2619
+ - For network issues, suggest retry commands or alternative URLs
2620
+ - Keep each fix command simple and focused on the specific error
2621
+
2622
+ Provide fixes for all {len(failed_commands)} failed commands:"""
2623
+
2624
+ # Set up headers for Anthropic API
2625
+ headers = {
2626
+ "x-api-key": api_key,
2627
+ "anthropic-version": "2023-06-01",
2628
+ "content-type": "application/json"
2629
+ }
2630
+
2631
+ payload = {
2632
+ "model": "claude-3-5-sonnet-20241022", # Use a more capable model for batch analysis
2633
+ "max_tokens": 1000,
2634
+ "messages": [
2635
+ {"role": "user", "content": prompt}
2636
+ ]
2637
+ }
2638
+
2639
+ try:
2640
+ print(f"šŸ¤– Calling Anthropic Claude for batch debugging of {len(failed_commands)} commands...")
2641
+ response = requests.post(
2642
+ "https://api.anthropic.com/v1/messages",
2643
+ headers=headers,
2644
+ json=payload,
2645
+ timeout=60
2646
+ )
2647
+
2648
+ if response.status_code == 200:
2649
+ result = response.json()
2650
+ content = result['content'][0]['text']
2651
+ print(f"āœ… Batch analysis completed")
2652
+
2653
+ # Parse the response to extract fix commands
2654
+ fixes = []
2655
+ for i in range(1, len(failed_commands) + 1):
2656
+ fix_pattern = f"FIX_COMMAND_{i}: (.+)"
2657
+ reason_pattern = f"REASON_{i}: (.+)"
2658
+
2659
+ fix_match = re.search(fix_pattern, content, re.MULTILINE)
2660
+ reason_match = re.search(reason_pattern, content, re.MULTILINE)
2661
+
2662
+ if fix_match:
2663
+ fix_command = fix_match.group(1).strip()
2664
+ reason = reason_match.group(1).strip() if reason_match else "Anthropic Claude suggested fix"
2665
+
2666
+ # Clean up the fix command
2667
+ if fix_command.startswith('`') and fix_command.endswith('`'):
2668
+ fix_command = fix_command[1:-1]
2669
+
2670
+ fixes.append({
2671
+ 'original_command': failed_commands[i-1]['command'],
2672
+ 'fix_command': fix_command,
2673
+ 'reason': reason,
2674
+ 'command_index': i-1
2675
+ })
2676
+
2677
+ print(f"šŸ”§ Generated {len(fixes)} fix commands from batch analysis")
2678
+ return fixes
2679
+ else:
2680
+ print(f"āŒ Anthropic API error: {response.status_code} - {response.text}")
2681
+ return []
2682
+
2683
+ except Exception as e:
2684
+ print(f"āŒ Error during batch debugging: {e}")
2685
+ return []
2686
+
2019
2687
  def generate_random_password(length=16):
2020
2688
  """Generate a random password for SSH access"""
2021
2689
  alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
@@ -2418,13 +3086,12 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
2418
3086
  if not success:
2419
3087
  print(f"āš ļø Command failed, attempting LLM debugging...")
2420
3088
 
2421
- # Call OpenAI for debugging
3089
+ # Call LLM for debugging
2422
3090
  try:
2423
3091
  current_dir = shell.get_cwd()
2424
- api_key = os.environ.get("OPENAI_API_KEY")
2425
3092
 
2426
- # Use existing call_openai_for_debug function
2427
- fix_command = call_openai_for_debug(cmd_text, stderr, api_key=api_key, current_dir=current_dir, sandbox=shell)
3093
+ # Use unified LLM debugging function
3094
+ fix_command = call_llm_for_debug(cmd_text, stderr, current_dir=current_dir, sandbox=shell)
2428
3095
 
2429
3096
  if fix_command:
2430
3097
  print(f"šŸ”§ OpenAI suggested fix command: {fix_command}")