gitarsenal-cli 1.9.24 → 1.9.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.venv_status.json CHANGED
@@ -1 +1 @@
1
- {"created":"2025-08-07T09:48:26.761Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
1
+ {"created":"2025-08-07T10:49:35.026Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.9.24",
3
+ "version": "1.9.25",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -4,6 +4,8 @@ import json
4
4
  import requests
5
5
  import openai
6
6
  import anthropic
7
+ import time
8
+ import getpass
7
9
 
8
10
 
9
11
  def get_stored_credentials():
@@ -39,10 +41,10 @@ def generate_auth_context(stored_credentials):
39
41
 
40
42
  def get_current_debug_model():
41
43
  """Get the currently configured debugging model preference"""
42
- return os.environ.get("GITARSENAL_DEBUG_MODEL", "openai")
44
+ return os.environ.get("GITARSENAL_DEBUG_MODEL", "anthropic")
43
45
 
44
46
  def call_llm_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
45
- """Unified function to call LLM for debugging - routes to OpenAI or Anthropic based on configuration"""
47
+ """Unified function to call LLM for debugging - routes to OpenAI, Anthropic, or OpenRouter based on configuration"""
46
48
  current_model = get_current_debug_model()
47
49
 
48
50
  print(f"šŸ” DEBUG: Using {current_model.upper()} for debugging...")
@@ -71,6 +73,33 @@ def call_llm_for_debug(command, error_output, api_key=None, current_dir=None, sa
71
73
  print(f"āš ļø Error getting Anthropic API key from credentials manager: {e}")
72
74
 
73
75
  return call_anthropic_for_debug(command, error_output, api_key, current_dir, sandbox)
76
+ elif current_model == "openrouter":
77
+ # Try to get OpenRouter API key if not provided
78
+ if not api_key:
79
+ # First try environment variable
80
+ api_key = os.environ.get("OPENROUTER_API_KEY")
81
+
82
+ # If not in environment, try to fetch from server using fetch_modal_tokens
83
+ if not api_key:
84
+ try:
85
+ from fetch_modal_tokens import get_tokens
86
+ # Assuming OpenRouter key is the 5th token in the tuple
87
+ tokens = get_tokens()
88
+ if len(tokens) >= 5:
89
+ api_key = tokens[4]
90
+ except Exception as e:
91
+ print(f"āš ļø Error fetching OpenRouter API key from server: {e}")
92
+
93
+ # Then try credentials manager
94
+ if not api_key:
95
+ try:
96
+ from credentials_manager import CredentialsManager
97
+ credentials_manager = CredentialsManager()
98
+ api_key = credentials_manager.get_openrouter_api_key()
99
+ except Exception as e:
100
+ print(f"āš ļø Error getting OpenRouter API key from credentials manager: {e}")
101
+
102
+ return call_openrouter_for_debug(command, error_output, api_key, current_dir, sandbox)
74
103
  else:
75
104
  # Default to OpenAI
76
105
  # Try to get OpenAI API key if not provided
@@ -469,7 +498,7 @@ Do not provide any explanations, just the exact command to run.
469
498
 
470
499
  # Try to use GPT-4 first, but fall back to other models if needed
471
500
  models_to_try = [
472
- "gpt-4o-mini", # First choice: GPT-4o (most widely available)
501
+ "gpt-4.1-mini", # First choice: GPT-4o (most widely available)
473
502
  ]
474
503
 
475
504
  # Check if we have a preferred model in environment
@@ -953,7 +982,7 @@ Return only the command to fix the issue, nothing else."""
953
982
  }
954
983
 
955
984
  # Models to try in order of preference
956
- models_to_try = ["claude-4-sonnet"]
985
+ models_to_try = ["claude-sonnet-4-20250514"]
957
986
 
958
987
  def try_api_call(model_name, retries=2, backoff_factor=1.5):
959
988
  payload = {
@@ -1152,6 +1181,334 @@ Return only the command to fix the issue, nothing else."""
1152
1181
  print(f"šŸ” DEBUG: Exception details: {str(e)}")
1153
1182
  return None
1154
1183
 
1184
+ def call_openrouter_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
1185
+ """Call OpenRouter to debug a failed command and suggest a fix"""
1186
+ print("\nšŸ” DEBUG: Starting OpenRouter debugging...")
1187
+ print(f"šŸ” DEBUG: Command: {command}")
1188
+ print(f"šŸ” DEBUG: Error output length: {len(error_output) if error_output else 0}")
1189
+ print(f"šŸ” DEBUG: Current directory: {current_dir}")
1190
+ print(f"šŸ” DEBUG: Sandbox available: {sandbox is not None}")
1191
+
1192
+ # Define _to_str function locally to avoid NameError
1193
+ def _to_str(maybe_bytes):
1194
+ try:
1195
+ return (maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes)
1196
+ except UnicodeDecodeError:
1197
+ # Handle non-UTF-8 bytes by replacing invalid characters
1198
+ if isinstance(maybe_bytes, (bytes, bytearray)):
1199
+ return maybe_bytes.decode('utf-8', errors='replace')
1200
+ else:
1201
+ return str(maybe_bytes)
1202
+ except Exception:
1203
+ # Last resort fallback
1204
+ return str(maybe_bytes)
1205
+
1206
+ # Skip debugging for certain commands that commonly return non-zero exit codes
1207
+ # but aren't actually errors (like test commands)
1208
+ if command.strip().startswith("test "):
1209
+ print("šŸ” Skipping debugging for test command - non-zero exit code is expected behavior")
1210
+ return None
1211
+
1212
+ # Validate error_output - if it's empty, we can't debug effectively
1213
+ if not error_output or not error_output.strip():
1214
+ print("āš ļø Error output is empty. Cannot effectively debug the command.")
1215
+ print("āš ļø Skipping OpenRouter debugging due to lack of error information.")
1216
+ return None
1217
+
1218
+ # Try to get API key from multiple sources
1219
+ if not api_key:
1220
+ print("šŸ” DEBUG: No OpenRouter API key provided, searching for one...")
1221
+
1222
+ # First try environment variable
1223
+ api_key = os.environ.get("OPENROUTER_API_KEY")
1224
+ print(f"šŸ” DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
1225
+ if api_key:
1226
+ print(f"šŸ” DEBUG: Environment API key value: {api_key}")
1227
+
1228
+ # If not in environment, try to fetch from server using fetch_modal_tokens
1229
+ if not api_key:
1230
+ try:
1231
+ print("šŸ” DEBUG: Trying to fetch API key from server...")
1232
+ from fetch_modal_tokens import get_tokens
1233
+ # Assuming OpenRouter key is the 5th token in the tuple
1234
+ tokens = get_tokens()
1235
+ if len(tokens) >= 5:
1236
+ api_key = tokens[4]
1237
+ except Exception as e:
1238
+ print(f"āš ļø Error fetching OpenRouter API key from server: {e}")
1239
+
1240
+ # Then try credentials manager
1241
+ if not api_key:
1242
+ try:
1243
+ from credentials_manager import CredentialsManager
1244
+ credentials_manager = CredentialsManager()
1245
+ api_key = credentials_manager.get_openrouter_api_key()
1246
+ except Exception as e:
1247
+ print(f"āš ļø Error getting OpenRouter API key from credentials manager: {e}")
1248
+
1249
+ # Store the API key in a persistent file if found
1250
+ if api_key:
1251
+ try:
1252
+ os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
1253
+ with open(os.path.expanduser("~/.gitarsenal/openrouter_key"), "w") as f:
1254
+ f.write(api_key)
1255
+ print("āœ… Saved OpenRouter API key for future use")
1256
+ except Exception as e:
1257
+ print(f"āš ļø Could not save API key: {e}")
1258
+
1259
+ # Try to load from saved file if not in environment
1260
+ if not api_key:
1261
+ try:
1262
+ key_file = os.path.expanduser("~/.gitarsenal/openrouter_key")
1263
+ print(f"šŸ” DEBUG: Checking for saved API key at: {key_file}")
1264
+ if os.path.exists(key_file):
1265
+ with open(key_file, "r") as f:
1266
+ api_key = f.read().strip()
1267
+ if api_key:
1268
+ print("āœ… Loaded OpenRouter API key from saved file")
1269
+ print(f"šŸ” DEBUG: API key from file: {api_key}")
1270
+ print(f"šŸ” DEBUG: API key length: {len(api_key)}")
1271
+ # Also set in environment for this session
1272
+ os.environ["OPENROUTER_API_KEY"] = api_key
1273
+ else:
1274
+ print("šŸ” DEBUG: Saved file exists but is empty")
1275
+ else:
1276
+ print("šŸ” DEBUG: No saved API key file found")
1277
+ except Exception as e:
1278
+ print(f"āš ļø Could not load saved API key: {e}")
1279
+
1280
+ if not api_key:
1281
+ print("āŒ No OpenRouter API key available for debugging")
1282
+ return None
1283
+
1284
+ # Prepare the prompt for debugging
1285
+ error_str = _to_str(error_output)
1286
+ prompt = f"""You are a debugging assistant. Provide only the terminal command to fix the issue.
1287
+
1288
+ Context:
1289
+ - Current directory: {current_dir}
1290
+ - Sandbox available: {sandbox is not None}
1291
+ - Failed command: {command}
1292
+ - Error output: {error_str}
1293
+
1294
+ Analyze the issue first, understand why it's happening, then provide the command to fix it.
1295
+
1296
+ Guidelines:
1297
+ - For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found
1298
+ - For missing packages, use appropriate package managers (pip, apt-get, npm)
1299
+ - For Git SSH authentication failures, always convert SSH URLs to HTTPS URLs (git@github.com:user/repo.git -> https://github.com/user/repo.git)
1300
+ - For authentication, suggest login commands with placeholders
1301
+ - For permission errors, suggest commands with sudo if appropriate
1302
+ - For network issues, suggest retry commands or alternative URLs
1303
+
1304
+ Return only the command to fix the issue, nothing else."""
1305
+
1306
+ # Set up headers for OpenRouter API
1307
+ headers = {
1308
+ "x-api-key": api_key,
1309
+ "content-type": "application/json"
1310
+ }
1311
+
1312
+ # Models to try in order of preference
1313
+ models_to_try = ["openrouter/openai-gpt-4o-mini"]
1314
+
1315
+ def try_api_call(model_name, retries=2, backoff_factor=1.5):
1316
+ payload = {
1317
+ "model": model_name,
1318
+ "max_tokens": 300,
1319
+ "messages": [
1320
+ {"role": "user", "content": prompt}
1321
+ ]
1322
+ }
1323
+
1324
+ print(f"šŸ” DEBUG: Payload prepared, prompt length: {len(prompt)}")
1325
+
1326
+ # Add specific handling for common errors
1327
+ last_error = None
1328
+ for attempt in range(retries + 1):
1329
+ try:
1330
+ if attempt > 0:
1331
+ # Exponential backoff
1332
+ wait_time = backoff_factor * (2 ** (attempt - 1))
1333
+ print(f"ā±ļø Retrying in {wait_time:.1f} seconds... (attempt {attempt+1}/{retries+1})")
1334
+ time.sleep(wait_time)
1335
+
1336
+ print(f"šŸ¤– Calling OpenRouter with {model_name} model to debug the failed command...")
1337
+ print(f"šŸ” DEBUG: Making POST request to OpenRouter API...")
1338
+ response = requests.post(
1339
+ "https://openrouter.ai/api/v1/chat/completions", # OpenRouter API endpoint
1340
+ headers=headers,
1341
+ json=payload,
1342
+ timeout=45 # Increased timeout for reliability
1343
+ )
1344
+
1345
+ print(f"šŸ” DEBUG: Response received, status code: {response.status_code}")
1346
+
1347
+ # Handle specific status codes
1348
+ if response.status_code == 200:
1349
+ print(f"šŸ” DEBUG: Success! Response length: {len(response.text)}")
1350
+ return response.json(), None
1351
+ elif response.status_code == 401:
1352
+ error_msg = "Authentication error: Invalid API key"
1353
+ print(f"āŒ {error_msg}")
1354
+ print(f"šŸ” DEBUG: Response text: {response.text}")
1355
+ # Don't retry auth errors
1356
+ return None, error_msg
1357
+ elif response.status_code == 429:
1358
+ error_msg = "Rate limit exceeded or quota reached"
1359
+ print(f"āš ļø {error_msg}")
1360
+ print(f"šŸ” DEBUG: Response text: {response.text}")
1361
+ # Always retry rate limit errors with increasing backoff
1362
+ last_error = error_msg
1363
+ continue
1364
+ elif response.status_code == 500:
1365
+ error_msg = "OpenRouter server error"
1366
+ print(f"āš ļø {error_msg}")
1367
+ print(f"šŸ” DEBUG: Response text: {response.text}")
1368
+ # Retry server errors
1369
+ last_error = error_msg
1370
+ continue
1371
+ else:
1372
+ error_msg = f"Status code: {response.status_code}, Response: {response.text}"
1373
+ print(f"āš ļø OpenRouter API error: {error_msg}")
1374
+ print(f"šŸ” DEBUG: Full response text: {response.text}")
1375
+ last_error = error_msg
1376
+ # Only retry if we have attempts left
1377
+ if attempt < retries:
1378
+ continue
1379
+ return None, error_msg
1380
+ except requests.exceptions.Timeout:
1381
+ error_msg = "Request timed out"
1382
+ last_error = error_msg
1383
+ # Always retry timeouts
1384
+ continue
1385
+ except requests.exceptions.ConnectionError:
1386
+ error_msg = "Connection error"
1387
+ print(f"āš ļø {error_msg}")
1388
+ print(f"šŸ” DEBUG: Connection failed to openrouter.ai")
1389
+ last_error = error_msg
1390
+ # Always retry connection errors
1391
+ continue
1392
+ except Exception as e:
1393
+ error_msg = str(e)
1394
+ print(f"āš ļø Unexpected error: {error_msg}")
1395
+ print(f"šŸ” DEBUG: Exception type: {type(e).__name__}")
1396
+ print(f"šŸ” DEBUG: Exception details: {str(e)}")
1397
+ last_error = error_msg
1398
+ # Only retry if we have attempts left
1399
+ if attempt < retries:
1400
+ continue
1401
+ return None, error_msg
1402
+
1403
+ # If we get here, all retries failed
1404
+ return None, last_error
1405
+
1406
+ # Try each model in sequence until one works
1407
+ result = None
1408
+ last_error = None
1409
+
1410
+ for model in models_to_try:
1411
+ result, error = try_api_call(model)
1412
+ if result:
1413
+ break
1414
+ else:
1415
+ print(f"āš ļø Failed to get response from {model}: {error}")
1416
+ last_error = error
1417
+
1418
+ if not result:
1419
+ print(f"āŒ All model attempts failed. Last error: {last_error}")
1420
+ return None
1421
+
1422
+ # Process the response
1423
+ try:
1424
+ print(f"šŸ” DEBUG: Processing OpenRouter response...")
1425
+ print(f"šŸ” DEBUG: Choices count: {len(result.get('choices', []))}")
1426
+
1427
+ fix_command = result["choices"][0]["message"]["content"].strip()
1428
+ print(f"šŸ” DEBUG: Raw response content: {fix_command}")
1429
+
1430
+ # Save the original response for debugging
1431
+ original_response = fix_command
1432
+
1433
+ # Extract just the command if it's wrapped in backticks or explanation
1434
+ if "```" in fix_command:
1435
+ # Extract content between backticks
1436
+ import re
1437
+ code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
1438
+ if code_blocks:
1439
+ fix_command = code_blocks[0].strip()
1440
+ print(f"āœ… Extracted command from code block: {fix_command}")
1441
+
1442
+ # If the response still has explanatory text, try to extract just the command
1443
+ if len(fix_command.split('\n')) > 1:
1444
+ # First try to find lines that look like commands (start with common command prefixes)
1445
+ command_prefixes = ['sudo', 'apt', 'pip', 'npm', 'yarn', 'git', 'cd', 'mv', 'cp', 'rm', 'mkdir', 'touch',
1446
+ 'chmod', 'chown', 'echo', 'cat', 'python', 'python3', 'node', 'export',
1447
+ 'curl', 'wget', 'docker', 'make', 'gcc', 'g++', 'javac', 'java',
1448
+ 'conda', 'uv', 'poetry', 'nvm', 'rbenv', 'pyenv', 'rustup']
1449
+
1450
+ # Check for lines that start with common command prefixes
1451
+ command_lines = [line.strip() for line in fix_command.split('\n')
1452
+ if any(line.strip().startswith(prefix) for prefix in command_prefixes)]
1453
+
1454
+ if command_lines:
1455
+ # Use the first command line found
1456
+ fix_command = command_lines[0]
1457
+ print(f"āœ… Identified command by prefix: {fix_command}")
1458
+ else:
1459
+ # Try to find lines that look like commands (contain common shell patterns)
1460
+ shell_patterns = [' | ', ' > ', ' >> ', ' && ', ' || ', ' ; ', '$(', '`', ' -y ', ' --yes ']
1461
+ command_lines = [line.strip() for line in fix_command.split('\n')
1462
+ if any(pattern in line for pattern in shell_patterns)]
1463
+
1464
+ if command_lines:
1465
+ # Use the first command line found
1466
+ fix_command = command_lines[0]
1467
+ print(f"āœ… Identified command by shell pattern: {fix_command}")
1468
+ else:
1469
+ # Fall back to the shortest non-empty line as it's likely the command
1470
+ lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
1471
+ if lines:
1472
+ # Exclude very short lines that are likely not commands
1473
+ valid_lines = [line for line in lines if len(line) > 5]
1474
+ if valid_lines:
1475
+ fix_command = min(valid_lines, key=len)
1476
+ else:
1477
+ fix_command = min(lines, key=len)
1478
+ print(f"āœ… Selected shortest line as command: {fix_command}")
1479
+
1480
+ # Clean up the command - remove any trailing periods or quotes
1481
+ fix_command = fix_command.rstrip('.;"\'')
1482
+
1483
+ # Remove common prefixes that LLMs sometimes add
1484
+ prefixes_to_remove = [
1485
+ "Run: ", "Execute: ", "Try: ", "Command: ", "Fix: ", "Solution: ",
1486
+ "You should run: ", "You can run: ", "You need to run: "
1487
+ ]
1488
+ for prefix in prefixes_to_remove:
1489
+ if fix_command.startswith(prefix):
1490
+ fix_command = fix_command[len(prefix):].strip()
1491
+ print(f"āœ… Removed prefix: {prefix}")
1492
+ break
1493
+
1494
+ # If the command is still multi-line or very long, it might not be a valid command
1495
+ if len(fix_command.split('\n')) > 1 or len(fix_command) > 500:
1496
+ print("āš ļø Extracted command appears invalid (multi-line or too long)")
1497
+ print("šŸ” Original response from LLM:")
1498
+ print("-" * 60)
1499
+ print(original_response)
1500
+ print("-" * 60)
1501
+ print("āš ļø Using best guess for command")
1502
+
1503
+ print(f"šŸ”§ Suggested fix: {fix_command}")
1504
+ print(f"šŸ” DEBUG: Returning fix command: {fix_command}")
1505
+ return fix_command
1506
+ except Exception as e:
1507
+ print(f"āŒ Error processing OpenRouter response: {e}")
1508
+ print(f"šŸ” DEBUG: Exception type: {type(e).__name__}")
1509
+ print(f"šŸ” DEBUG: Exception details: {str(e)}")
1510
+ return None
1511
+
1155
1512
 
1156
1513
  def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
1157
1514
  """Unified function to call LLM for batch debugging - routes to OpenAI or Anthropic based on configuration"""
@@ -1183,6 +1540,33 @@ def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sa
1183
1540
  print(f"āš ļø Error getting Anthropic API key from credentials manager: {e}")
1184
1541
 
1185
1542
  return call_anthropic_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
1543
+ elif current_model == "openrouter":
1544
+ # Try to get OpenRouter API key if not provided
1545
+ if not api_key:
1546
+ # First try environment variable
1547
+ api_key = os.environ.get("OPENROUTER_API_KEY")
1548
+
1549
+ # If not in environment, try to fetch from server using fetch_modal_tokens
1550
+ if not api_key:
1551
+ try:
1552
+ from fetch_modal_tokens import get_tokens
1553
+ # Assuming OpenRouter key is the 5th token in the tuple
1554
+ tokens = get_tokens()
1555
+ if len(tokens) >= 5:
1556
+ api_key = tokens[4]
1557
+ except Exception as e:
1558
+ print(f"āš ļø Error fetching OpenRouter API key from server: {e}")
1559
+
1560
+ # Then try credentials manager
1561
+ if not api_key:
1562
+ try:
1563
+ from credentials_manager import CredentialsManager
1564
+ credentials_manager = CredentialsManager()
1565
+ api_key = credentials_manager.get_openrouter_api_key()
1566
+ except Exception as e:
1567
+ print(f"āš ļø Error getting OpenRouter API key from credentials manager: {e}")
1568
+
1569
+ return call_openrouter_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
1186
1570
  else:
1187
1571
  # Default to OpenAI
1188
1572
  # Try to get OpenAI API key if not provided
@@ -1364,6 +1748,185 @@ Provide fixes for all {len(failed_commands)} failed commands:"""
1364
1748
  print(f"āŒ Anthropic API error: {response.status_code} - {response.text}")
1365
1749
  return []
1366
1750
 
1751
+ except Exception as e:
1752
+ print(f"āŒ Error during batch debugging: {e}")
1753
+ return []
1754
+
1755
+ def call_openrouter_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
1756
+ """Call OpenRouter to debug multiple failed commands and suggest fixes for all of them at once"""
1757
+ print("\nšŸ” DEBUG: Starting batch OpenRouter debugging...")
1758
+ print(f"šŸ” DEBUG: Analyzing {len(failed_commands)} failed commands")
1759
+
1760
+ if not failed_commands:
1761
+ print("āš ļø No failed commands to analyze")
1762
+ return []
1763
+
1764
+ if not api_key:
1765
+ print("šŸ” DEBUG: No OpenRouter API key provided, searching for one...")
1766
+
1767
+ # First try environment variable
1768
+ api_key = os.environ.get("OPENROUTER_API_KEY")
1769
+ print(f"šŸ” DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
1770
+ if api_key:
1771
+ print(f"šŸ” DEBUG: Environment API key value: {api_key}")
1772
+
1773
+ # If not in environment, try to fetch from server using fetch_modal_tokens
1774
+ if not api_key:
1775
+ try:
1776
+ print("šŸ” DEBUG: Trying to fetch API key from server...")
1777
+ from fetch_modal_tokens import get_tokens
1778
+ # Assuming OpenRouter key is the 5th token in the tuple
1779
+ tokens = get_tokens()
1780
+ if len(tokens) >= 5:
1781
+ api_key = tokens[4]
1782
+ except Exception as e:
1783
+ print(f"āš ļø Error fetching OpenRouter API key from server: {e}")
1784
+
1785
+ # Then try credentials manager
1786
+ if not api_key:
1787
+ try:
1788
+ from credentials_manager import CredentialsManager
1789
+ credentials_manager = CredentialsManager()
1790
+ api_key = credentials_manager.get_openrouter_api_key()
1791
+ except Exception as e:
1792
+ print(f"āš ļø Error getting OpenRouter API key from credentials manager: {e}")
1793
+
1794
+ # Store the API key in a persistent file if found
1795
+ if api_key:
1796
+ try:
1797
+ os.makedirs(os.path.expanduser("~/.gitarsenal"), exist_ok=True)
1798
+ with open(os.path.expanduser("~/.gitarsenal/openrouter_key"), "w") as f:
1799
+ f.write(api_key)
1800
+ print("āœ… Saved OpenRouter API key for future use")
1801
+ except Exception as e:
1802
+ print(f"āš ļø Could not save API key: {e}")
1803
+
1804
+ # Try to load from saved file if not in environment
1805
+ if not api_key:
1806
+ try:
1807
+ key_file = os.path.expanduser("~/.gitarsenal/openrouter_key")
1808
+ print(f"šŸ” DEBUG: Checking for saved API key at: {key_file}")
1809
+ if os.path.exists(key_file):
1810
+ with open(key_file, "r") as f:
1811
+ api_key = f.read().strip()
1812
+ if api_key:
1813
+ print("āœ… Loaded OpenRouter API key from saved file")
1814
+ print(f"šŸ” DEBUG: API key from file: {api_key}")
1815
+ print(f"šŸ” DEBUG: API key length: {len(api_key)}")
1816
+ # Also set in environment for this session
1817
+ os.environ["OPENROUTER_API_KEY"] = api_key
1818
+ else:
1819
+ print("šŸ” DEBUG: Saved file exists but is empty")
1820
+ else:
1821
+ print("šŸ” DEBUG: No saved API key file found")
1822
+ except Exception as e:
1823
+ print(f"āš ļø Could not load saved API key: {e}")
1824
+
1825
+ if not api_key:
1826
+ print("āŒ No OpenRouter API key available for batch debugging")
1827
+ return []
1828
+
1829
+ # Prepare context for batch analysis
1830
+ context_parts = []
1831
+ context_parts.append(f"Current directory: {current_dir}")
1832
+ context_parts.append(f"Sandbox available: {sandbox is not None}")
1833
+
1834
+ # Add failed commands with their errors
1835
+ for i, failed_cmd in enumerate(failed_commands, 1):
1836
+ cmd_type = failed_cmd.get('type', 'main')
1837
+ original_cmd = failed_cmd.get('original_command', '')
1838
+ cmd_text = failed_cmd['command']
1839
+ stderr = failed_cmd.get('stderr', '')
1840
+ stdout = failed_cmd.get('stdout', '')
1841
+
1842
+ context_parts.append(f"\n--- Failed Command {i} ({cmd_type}) ---")
1843
+ context_parts.append(f"Command: {cmd_text}")
1844
+ if original_cmd and original_cmd != cmd_text:
1845
+ context_parts.append(f"Original Command: {original_cmd}")
1846
+ if stderr:
1847
+ context_parts.append(f"Error Output: {stderr}")
1848
+ if stdout:
1849
+ context_parts.append(f"Standard Output: {stdout}")
1850
+
1851
+ # Create the prompt for batch analysis
1852
+ prompt = f"""You are a debugging assistant analyzing multiple failed commands.
1853
+
1854
+ Context:
1855
+ {chr(10).join(context_parts)}
1856
+
1857
+ Please analyze each failed command and provide a fix command for each one. For each failed command, respond with:
1858
+
1859
+ FIX_COMMAND_{i}: <the fix command>
1860
+ REASON_{i}: <brief explanation of why the original command failed and how the fix addresses it>
1861
+
1862
+ Guidelines:
1863
+ - For file not found errors, first search for the file using 'find . -name filename -type f'
1864
+ - For missing packages, use appropriate package managers (pip, apt-get, npm)
1865
+ - For Git SSH authentication failures, convert SSH URLs to HTTPS URLs
1866
+ - For permission errors, suggest commands with sudo if appropriate
1867
+ - For network issues, suggest retry commands or alternative URLs
1868
+ - Keep each fix command simple and focused on the specific error
1869
+
1870
+ Provide fixes for all {len(failed_commands)} failed commands:"""
1871
+
1872
+ # Set up headers for OpenRouter API
1873
+ headers = {
1874
+ "x-api-key": api_key,
1875
+ "content-type": "application/json"
1876
+ }
1877
+
1878
+ payload = {
1879
+ "model": "openrouter/openai-gpt-4o-mini", # Use a more capable model for batch analysis
1880
+ "max_tokens": 1000,
1881
+ "messages": [
1882
+ {"role": "user", "content": prompt}
1883
+ ]
1884
+ }
1885
+
1886
+ try:
1887
+ print(f"šŸ¤– Calling OpenRouter for batch debugging of {len(failed_commands)} commands...")
1888
+ response = requests.post(
1889
+ "https://openrouter.ai/api/v1/chat/completions", # OpenRouter API endpoint
1890
+ headers=headers,
1891
+ json=payload,
1892
+ timeout=60
1893
+ )
1894
+
1895
+ if response.status_code == 200:
1896
+ result = response.json()
1897
+ content = result['choices'][0]['message']['content']
1898
+ print(f"āœ… Batch analysis completed")
1899
+
1900
+ # Parse the response to extract fix commands
1901
+ fixes = []
1902
+ for i in range(1, len(failed_commands) + 1):
1903
+ fix_pattern = f"FIX_COMMAND_{i}: (.+)"
1904
+ reason_pattern = f"REASON_{i}: (.+)"
1905
+
1906
+ fix_match = re.search(fix_pattern, content, re.MULTILINE)
1907
+ reason_match = re.search(reason_pattern, content, re.MULTILINE)
1908
+
1909
+ if fix_match:
1910
+ fix_command = fix_match.group(1).strip()
1911
+ reason = reason_match.group(1).strip() if reason_match else "OpenRouter suggested fix"
1912
+
1913
+ # Clean up the fix command
1914
+ if fix_command.startswith('`') and fix_command.endswith('`'):
1915
+ fix_command = fix_command[1:-1]
1916
+
1917
+ fixes.append({
1918
+ 'original_command': failed_commands[i-1]['command'],
1919
+ 'fix_command': fix_command,
1920
+ 'reason': reason,
1921
+ 'command_index': i-1
1922
+ })
1923
+
1924
+ print(f"šŸ”§ Generated {len(fixes)} fix commands from batch analysis")
1925
+ return fixes
1926
+ else:
1927
+ print(f"āŒ OpenRouter API error: {response.status_code} - {response.text}")
1928
+ return []
1929
+
1367
1930
  except Exception as e:
1368
1931
  print(f"āŒ Error during batch debugging: {e}")
1369
1932
  return []
@@ -381,7 +381,7 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
381
381
  try:
382
382
  from command_manager import CommandListManager
383
383
  from shell import PersistentShell
384
- from llm_debugging import get_stored_credentials, generate_auth_context, call_llm_for_debug, call_llm_for_batch_debug, call_anthropic_for_debug, call_openai_for_debug, call_openai_for_batch_debug, call_anthropic_for_batch_debug, get_current_debug_model
384
+ from llm_debugging import get_stored_credentials, generate_auth_context, call_llm_for_debug, call_llm_for_batch_debug, call_anthropic_for_debug, call_openai_for_debug, call_openai_for_batch_debug, call_anthropic_for_batch_debug, call_openrouter_for_debug, call_openrouter_for_batch_debug, get_current_debug_model
385
385
 
386
386
  print("āœ… Successfully imported CommandListManager, PersistentShell, and all llm_debugging functions from mounted modules")
387
387
  except ImportError as e:
@@ -1867,7 +1867,7 @@ Return only the JSON array, no other text.
1867
1867
  client = openai.OpenAI(api_key=api_key)
1868
1868
 
1869
1869
  response = client.chat.completions.create(
1870
- model="gpt-3.5-turbo",
1870
+ model="gpt-4.1-mini",
1871
1871
  messages=[
1872
1872
  {"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
1873
1873
  {"role": "user", "content": prompt}