gitarsenal-cli 1.9.23 → 1.9.24
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.venv_status.json
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"created":"2025-08-
|
|
1
|
+
{"created":"2025-08-07T09:48:26.761Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
|
package/package.json
CHANGED
|
Binary file
|
|
@@ -3,7 +3,15 @@ import time
|
|
|
3
3
|
import requests
|
|
4
4
|
import re
|
|
5
5
|
import json
|
|
6
|
-
|
|
6
|
+
|
|
7
|
+
# Import the LLM debugging function
|
|
8
|
+
try:
|
|
9
|
+
from llm_debugging import call_llm_for_batch_debug
|
|
10
|
+
except ImportError:
|
|
11
|
+
# Fallback: define a simple version if the import fails
|
|
12
|
+
def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
13
|
+
print("⚠️ LLM batch debugging not available")
|
|
14
|
+
return []
|
|
7
15
|
|
|
8
16
|
class CommandListManager:
|
|
9
17
|
"""Manages a dynamic list of setup commands with status tracking and LLM-suggested fixes."""
|
package/python/llm_debugging.py
CHANGED
|
@@ -5,6 +5,98 @@ import requests
|
|
|
5
5
|
import openai
|
|
6
6
|
import anthropic
|
|
7
7
|
|
|
8
|
+
|
|
9
|
+
def get_stored_credentials():
|
|
10
|
+
"""Load stored credentials from ~/.gitarsenal/credentials.json"""
|
|
11
|
+
import json
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
credentials_file = Path.home() / ".gitarsenal" / "credentials.json"
|
|
16
|
+
if credentials_file.exists():
|
|
17
|
+
with open(credentials_file, 'r') as f:
|
|
18
|
+
credentials = json.load(f)
|
|
19
|
+
return credentials
|
|
20
|
+
else:
|
|
21
|
+
return {}
|
|
22
|
+
except Exception as e:
|
|
23
|
+
print(f"⚠️ Error loading stored credentials: {e}")
|
|
24
|
+
return {}
|
|
25
|
+
|
|
26
|
+
def generate_auth_context(stored_credentials):
|
|
27
|
+
"""Generate simple authentication context for the OpenAI prompt"""
|
|
28
|
+
if not stored_credentials:
|
|
29
|
+
return "No stored credentials available."
|
|
30
|
+
|
|
31
|
+
auth_context = "Available stored credentials (use actual values in commands):\n"
|
|
32
|
+
|
|
33
|
+
for key, value in stored_credentials.items():
|
|
34
|
+
# Mask the actual value for security in logs, but provide the real value
|
|
35
|
+
masked_value = value[:8] + "..." if len(value) > 8 else "***"
|
|
36
|
+
auth_context += f"- {key}: {masked_value} (actual value: {value})\n"
|
|
37
|
+
|
|
38
|
+
return auth_context
|
|
39
|
+
|
|
40
|
+
def get_current_debug_model():
|
|
41
|
+
"""Get the currently configured debugging model preference"""
|
|
42
|
+
return os.environ.get("GITARSENAL_DEBUG_MODEL", "openai")
|
|
43
|
+
|
|
44
|
+
def call_llm_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
45
|
+
"""Unified function to call LLM for debugging - routes to OpenAI or Anthropic based on configuration"""
|
|
46
|
+
current_model = get_current_debug_model()
|
|
47
|
+
|
|
48
|
+
print(f"🔍 DEBUG: Using {current_model.upper()} for debugging...")
|
|
49
|
+
|
|
50
|
+
if current_model == "anthropic":
|
|
51
|
+
# Try to get Anthropic API key if not provided
|
|
52
|
+
if not api_key:
|
|
53
|
+
# First try environment variable
|
|
54
|
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
55
|
+
|
|
56
|
+
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
57
|
+
if not api_key:
|
|
58
|
+
try:
|
|
59
|
+
from fetch_modal_tokens import get_tokens
|
|
60
|
+
_, _, _, api_key = get_tokens()
|
|
61
|
+
except Exception as e:
|
|
62
|
+
print(f"⚠️ Error fetching Anthropic API key from server: {e}")
|
|
63
|
+
|
|
64
|
+
# Then try credentials manager
|
|
65
|
+
if not api_key:
|
|
66
|
+
try:
|
|
67
|
+
from credentials_manager import CredentialsManager
|
|
68
|
+
credentials_manager = CredentialsManager()
|
|
69
|
+
api_key = credentials_manager.get_anthropic_api_key()
|
|
70
|
+
except Exception as e:
|
|
71
|
+
print(f"⚠️ Error getting Anthropic API key from credentials manager: {e}")
|
|
72
|
+
|
|
73
|
+
return call_anthropic_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
74
|
+
else:
|
|
75
|
+
# Default to OpenAI
|
|
76
|
+
# Try to get OpenAI API key if not provided
|
|
77
|
+
if not api_key:
|
|
78
|
+
# First try environment variable
|
|
79
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
|
80
|
+
|
|
81
|
+
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
82
|
+
if not api_key:
|
|
83
|
+
try:
|
|
84
|
+
from fetch_modal_tokens import get_tokens
|
|
85
|
+
_, _, api_key, _ = get_tokens()
|
|
86
|
+
except Exception as e:
|
|
87
|
+
print(f"⚠️ Error fetching OpenAI API key from server: {e}")
|
|
88
|
+
|
|
89
|
+
# Then try credentials manager
|
|
90
|
+
if not api_key:
|
|
91
|
+
try:
|
|
92
|
+
from credentials_manager import CredentialsManager
|
|
93
|
+
credentials_manager = CredentialsManager()
|
|
94
|
+
api_key = credentials_manager.get_openai_api_key()
|
|
95
|
+
except Exception as e:
|
|
96
|
+
print(f"⚠️ Error getting OpenAI API key from credentials manager: {e}")
|
|
97
|
+
|
|
98
|
+
return call_openai_for_debug(command, error_output, api_key, current_dir, sandbox)
|
|
99
|
+
|
|
8
100
|
def call_openai_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
|
9
101
|
"""Call OpenAI to debug a failed command and suggest a fix"""
|
|
10
102
|
print("\n🔍 DEBUG: Starting LLM debugging...")
|
|
@@ -1058,4 +1150,220 @@ Return only the command to fix the issue, nothing else."""
|
|
|
1058
1150
|
print(f"❌ Error processing Anthropic response: {e}")
|
|
1059
1151
|
print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
|
|
1060
1152
|
print(f"🔍 DEBUG: Exception details: {str(e)}")
|
|
1061
|
-
return None
|
|
1153
|
+
return None
|
|
1154
|
+
|
|
1155
|
+
|
|
1156
|
+
def call_llm_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
1157
|
+
"""Unified function to call LLM for batch debugging - routes to OpenAI or Anthropic based on configuration"""
|
|
1158
|
+
current_model = get_current_debug_model()
|
|
1159
|
+
|
|
1160
|
+
print(f"🔍 DEBUG: Using {current_model.upper()} for batch debugging...")
|
|
1161
|
+
|
|
1162
|
+
if current_model == "anthropic":
|
|
1163
|
+
# Try to get Anthropic API key if not provided
|
|
1164
|
+
if not api_key:
|
|
1165
|
+
# First try environment variable
|
|
1166
|
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
1167
|
+
|
|
1168
|
+
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
1169
|
+
if not api_key:
|
|
1170
|
+
try:
|
|
1171
|
+
from fetch_modal_tokens import get_tokens
|
|
1172
|
+
_, _, _, api_key = get_tokens()
|
|
1173
|
+
except Exception as e:
|
|
1174
|
+
print(f"⚠️ Error fetching Anthropic API key from server: {e}")
|
|
1175
|
+
|
|
1176
|
+
# Then try credentials manager
|
|
1177
|
+
if not api_key:
|
|
1178
|
+
try:
|
|
1179
|
+
from credentials_manager import CredentialsManager
|
|
1180
|
+
credentials_manager = CredentialsManager()
|
|
1181
|
+
api_key = credentials_manager.get_anthropic_api_key()
|
|
1182
|
+
except Exception as e:
|
|
1183
|
+
print(f"⚠️ Error getting Anthropic API key from credentials manager: {e}")
|
|
1184
|
+
|
|
1185
|
+
return call_anthropic_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
1186
|
+
else:
|
|
1187
|
+
# Default to OpenAI
|
|
1188
|
+
# Try to get OpenAI API key if not provided
|
|
1189
|
+
if not api_key:
|
|
1190
|
+
# First try environment variable
|
|
1191
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
|
1192
|
+
|
|
1193
|
+
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
1194
|
+
if not api_key:
|
|
1195
|
+
try:
|
|
1196
|
+
from fetch_modal_tokens import get_tokens
|
|
1197
|
+
_, _, api_key, _ = get_tokens()
|
|
1198
|
+
except Exception as e:
|
|
1199
|
+
print(f"⚠️ Error fetching OpenAI API key from server: {e}")
|
|
1200
|
+
|
|
1201
|
+
# Then try credentials manager
|
|
1202
|
+
if not api_key:
|
|
1203
|
+
try:
|
|
1204
|
+
from credentials_manager import CredentialsManager
|
|
1205
|
+
credentials_manager = CredentialsManager()
|
|
1206
|
+
api_key = credentials_manager.get_openai_api_key()
|
|
1207
|
+
except Exception as e:
|
|
1208
|
+
print(f"⚠️ Error getting OpenAI API key from credentials manager: {e}")
|
|
1209
|
+
|
|
1210
|
+
return call_openai_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
1211
|
+
|
|
1212
|
+
def call_anthropic_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
|
1213
|
+
"""Call Anthropic Claude to debug multiple failed commands and suggest fixes for all of them at once"""
|
|
1214
|
+
print("\n🔍 DEBUG: Starting batch Anthropic Claude debugging...")
|
|
1215
|
+
print(f"🔍 DEBUG: Analyzing {len(failed_commands)} failed commands")
|
|
1216
|
+
|
|
1217
|
+
if not failed_commands:
|
|
1218
|
+
print("⚠️ No failed commands to analyze")
|
|
1219
|
+
return []
|
|
1220
|
+
|
|
1221
|
+
if not api_key:
|
|
1222
|
+
print("🔍 DEBUG: No Anthropic API key provided, searching for one...")
|
|
1223
|
+
|
|
1224
|
+
# First try environment variable
|
|
1225
|
+
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
|
1226
|
+
print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
|
|
1227
|
+
if api_key:
|
|
1228
|
+
print(f"🔍 DEBUG: Environment API key value: {api_key}")
|
|
1229
|
+
|
|
1230
|
+
# If not in environment, try to fetch from server using fetch_modal_tokens
|
|
1231
|
+
if not api_key:
|
|
1232
|
+
try:
|
|
1233
|
+
print("🔍 DEBUG: Trying to fetch API key from server...")
|
|
1234
|
+
from fetch_modal_tokens import get_tokens
|
|
1235
|
+
_, _, _, api_key = get_tokens()
|
|
1236
|
+
if api_key:
|
|
1237
|
+
# Set in environment for this session
|
|
1238
|
+
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
1239
|
+
else:
|
|
1240
|
+
print("⚠️ Could not fetch Anthropic API key from server")
|
|
1241
|
+
except Exception as e:
|
|
1242
|
+
print(f"⚠️ Error fetching API key from server: {e}")
|
|
1243
|
+
|
|
1244
|
+
# Then try credentials manager
|
|
1245
|
+
if not api_key:
|
|
1246
|
+
print("🔍 DEBUG: Trying credentials manager...")
|
|
1247
|
+
try:
|
|
1248
|
+
from credentials_manager import CredentialsManager
|
|
1249
|
+
credentials_manager = CredentialsManager()
|
|
1250
|
+
api_key = credentials_manager.get_anthropic_api_key()
|
|
1251
|
+
if api_key:
|
|
1252
|
+
print(f"🔍 DEBUG: API key from credentials manager: Found")
|
|
1253
|
+
print(f"🔍 DEBUG: Credentials manager API key value: {api_key}")
|
|
1254
|
+
# Set in environment for this session
|
|
1255
|
+
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
1256
|
+
else:
|
|
1257
|
+
print("⚠️ Could not fetch Anthropic API key from credentials manager")
|
|
1258
|
+
except Exception as e:
|
|
1259
|
+
print(f"⚠️ Error fetching API key from credentials manager: {e}")
|
|
1260
|
+
|
|
1261
|
+
if not api_key:
|
|
1262
|
+
print("❌ No Anthropic API key available for batch debugging")
|
|
1263
|
+
return []
|
|
1264
|
+
|
|
1265
|
+
# Prepare context for batch analysis
|
|
1266
|
+
context_parts = []
|
|
1267
|
+
context_parts.append(f"Current directory: {current_dir}")
|
|
1268
|
+
context_parts.append(f"Sandbox available: {sandbox is not None}")
|
|
1269
|
+
|
|
1270
|
+
# Add failed commands with their errors
|
|
1271
|
+
for i, failed_cmd in enumerate(failed_commands, 1):
|
|
1272
|
+
cmd_type = failed_cmd.get('type', 'main')
|
|
1273
|
+
original_cmd = failed_cmd.get('original_command', '')
|
|
1274
|
+
cmd_text = failed_cmd['command']
|
|
1275
|
+
stderr = failed_cmd.get('stderr', '')
|
|
1276
|
+
stdout = failed_cmd.get('stdout', '')
|
|
1277
|
+
|
|
1278
|
+
context_parts.append(f"\n--- Failed Command {i} ({cmd_type}) ---")
|
|
1279
|
+
context_parts.append(f"Command: {cmd_text}")
|
|
1280
|
+
if original_cmd and original_cmd != cmd_text:
|
|
1281
|
+
context_parts.append(f"Original Command: {original_cmd}")
|
|
1282
|
+
if stderr:
|
|
1283
|
+
context_parts.append(f"Error Output: {stderr}")
|
|
1284
|
+
if stdout:
|
|
1285
|
+
context_parts.append(f"Standard Output: {stdout}")
|
|
1286
|
+
|
|
1287
|
+
# Create the prompt for batch analysis
|
|
1288
|
+
prompt = f"""You are a debugging assistant analyzing multiple failed commands.
|
|
1289
|
+
|
|
1290
|
+
Context:
|
|
1291
|
+
{chr(10).join(context_parts)}
|
|
1292
|
+
|
|
1293
|
+
Please analyze each failed command and provide a fix command for each one. For each failed command, respond with:
|
|
1294
|
+
|
|
1295
|
+
FIX_COMMAND_{i}: <the fix command>
|
|
1296
|
+
REASON_{i}: <brief explanation of why the original command failed and how the fix addresses it>
|
|
1297
|
+
|
|
1298
|
+
Guidelines:
|
|
1299
|
+
- For file not found errors, first search for the file using 'find . -name filename -type f'
|
|
1300
|
+
- For missing packages, use appropriate package managers (pip, apt-get, npm)
|
|
1301
|
+
- For Git SSH authentication failures, convert SSH URLs to HTTPS URLs
|
|
1302
|
+
- For permission errors, suggest commands with sudo if appropriate
|
|
1303
|
+
- For network issues, suggest retry commands or alternative URLs
|
|
1304
|
+
- Keep each fix command simple and focused on the specific error
|
|
1305
|
+
|
|
1306
|
+
Provide fixes for all {len(failed_commands)} failed commands:"""
|
|
1307
|
+
|
|
1308
|
+
# Set up headers for Anthropic API
|
|
1309
|
+
headers = {
|
|
1310
|
+
"x-api-key": api_key,
|
|
1311
|
+
"anthropic-version": "2023-06-01",
|
|
1312
|
+
"content-type": "application/json"
|
|
1313
|
+
}
|
|
1314
|
+
|
|
1315
|
+
payload = {
|
|
1316
|
+
"model": "claude-3-5-sonnet-20241022", # Use a more capable model for batch analysis
|
|
1317
|
+
"max_tokens": 1000,
|
|
1318
|
+
"messages": [
|
|
1319
|
+
{"role": "user", "content": prompt}
|
|
1320
|
+
]
|
|
1321
|
+
}
|
|
1322
|
+
|
|
1323
|
+
try:
|
|
1324
|
+
print(f"🤖 Calling Anthropic Claude for batch debugging of {len(failed_commands)} commands...")
|
|
1325
|
+
response = requests.post(
|
|
1326
|
+
"https://api.anthropic.com/v1/messages",
|
|
1327
|
+
headers=headers,
|
|
1328
|
+
json=payload,
|
|
1329
|
+
timeout=60
|
|
1330
|
+
)
|
|
1331
|
+
|
|
1332
|
+
if response.status_code == 200:
|
|
1333
|
+
result = response.json()
|
|
1334
|
+
content = result['content'][0]['text']
|
|
1335
|
+
print(f"✅ Batch analysis completed")
|
|
1336
|
+
|
|
1337
|
+
# Parse the response to extract fix commands
|
|
1338
|
+
fixes = []
|
|
1339
|
+
for i in range(1, len(failed_commands) + 1):
|
|
1340
|
+
fix_pattern = f"FIX_COMMAND_{i}: (.+)"
|
|
1341
|
+
reason_pattern = f"REASON_{i}: (.+)"
|
|
1342
|
+
|
|
1343
|
+
fix_match = re.search(fix_pattern, content, re.MULTILINE)
|
|
1344
|
+
reason_match = re.search(reason_pattern, content, re.MULTILINE)
|
|
1345
|
+
|
|
1346
|
+
if fix_match:
|
|
1347
|
+
fix_command = fix_match.group(1).strip()
|
|
1348
|
+
reason = reason_match.group(1).strip() if reason_match else "Anthropic Claude suggested fix"
|
|
1349
|
+
|
|
1350
|
+
# Clean up the fix command
|
|
1351
|
+
if fix_command.startswith('`') and fix_command.endswith('`'):
|
|
1352
|
+
fix_command = fix_command[1:-1]
|
|
1353
|
+
|
|
1354
|
+
fixes.append({
|
|
1355
|
+
'original_command': failed_commands[i-1]['command'],
|
|
1356
|
+
'fix_command': fix_command,
|
|
1357
|
+
'reason': reason,
|
|
1358
|
+
'command_index': i-1
|
|
1359
|
+
})
|
|
1360
|
+
|
|
1361
|
+
print(f"🔧 Generated {len(fixes)} fix commands from batch analysis")
|
|
1362
|
+
return fixes
|
|
1363
|
+
else:
|
|
1364
|
+
print(f"❌ Anthropic API error: {response.status_code} - {response.text}")
|
|
1365
|
+
return []
|
|
1366
|
+
|
|
1367
|
+
except Exception as e:
|
|
1368
|
+
print(f"❌ Error during batch debugging: {e}")
|
|
1369
|
+
return []
|
package/python/shell.py
CHANGED
|
@@ -5,6 +5,23 @@ import time
|
|
|
5
5
|
import uuid
|
|
6
6
|
import re
|
|
7
7
|
|
|
8
|
+
def get_stored_credentials():
|
|
9
|
+
"""Load stored credentials from ~/.gitarsenal/credentials.json"""
|
|
10
|
+
import json
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
credentials_file = Path.home() / ".gitarsenal" / "credentials.json"
|
|
15
|
+
if credentials_file.exists():
|
|
16
|
+
with open(credentials_file, 'r') as f:
|
|
17
|
+
credentials = json.load(f)
|
|
18
|
+
return credentials
|
|
19
|
+
else:
|
|
20
|
+
return {}
|
|
21
|
+
except Exception as e:
|
|
22
|
+
print(f"⚠️ Error loading stored credentials: {e}")
|
|
23
|
+
return {}
|
|
24
|
+
|
|
8
25
|
class PersistentShell:
|
|
9
26
|
"""A persistent bash shell using subprocess.Popen for executing commands with state persistence."""
|
|
10
27
|
|