gitarsenal-cli 1.9.49 → 1.9.51
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.venv_status.json +1 -1
- package/package.json +1 -1
- package/python/__pycache__/fetch_modal_tokens.cpython-313.pyc +0 -0
- package/python/__pycache__/test_modalSandboxScript.cpython-313.pyc +0 -0
- package/python/command_manager.py +4 -4
- package/python/fetch_modal_tokens.py +44 -22
- package/python/llm_debugging.py +3 -3
- package/python/test_modalSandboxScript.py +9 -7
package/.venv_status.json
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"created":"2025-08-
|
|
1
|
+
{"created":"2025-08-12T11:49:41.397Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
|
package/package.json
CHANGED
|
Binary file
|
|
Binary file
|
|
@@ -406,12 +406,12 @@ class CommandListManager:
|
|
|
406
406
|
RUN: <reason>
|
|
407
407
|
"""
|
|
408
408
|
|
|
409
|
-
preferred =
|
|
409
|
+
preferred = os.environ.get("GITARSENAL_DEBUG_MODEL", "openai")
|
|
410
410
|
providers = get_provider_rotation_order(preferred)
|
|
411
411
|
|
|
412
412
|
for provider in providers:
|
|
413
|
-
#
|
|
414
|
-
provider_key =
|
|
413
|
+
# Always fetch provider-specific key; don't reuse a different provider's key
|
|
414
|
+
provider_key = get_api_key(provider)
|
|
415
415
|
if not provider_key:
|
|
416
416
|
print(f"⚠️ No {provider} API key available for skip analysis. Trying next provider...")
|
|
417
417
|
continue
|
|
@@ -494,7 +494,7 @@ class CommandListManager:
|
|
|
494
494
|
get_provider_rotation_order,
|
|
495
495
|
)
|
|
496
496
|
# Get API key if not provided
|
|
497
|
-
preferred =
|
|
497
|
+
preferred = os.environ.get("GITARSENAL_DEBUG_MODEL", "openai")
|
|
498
498
|
providers = get_provider_rotation_order(preferred)
|
|
499
499
|
|
|
500
500
|
# Get all commands for context
|
|
@@ -17,7 +17,7 @@ def fetch_default_tokens_from_gitarsenal():
|
|
|
17
17
|
Fetch default Modal tokens and OpenAI API key from gitarsenal.dev API.
|
|
18
18
|
|
|
19
19
|
Returns:
|
|
20
|
-
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key) if successful, (None, None, None, None, None) otherwise
|
|
20
|
+
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key) if successful, (None, None, None, None, None, None) otherwise
|
|
21
21
|
"""
|
|
22
22
|
endpoint = "https://gitarsenal.dev/api/credentials"
|
|
23
23
|
|
|
@@ -40,30 +40,31 @@ def fetch_default_tokens_from_gitarsenal():
|
|
|
40
40
|
token_secret = data.get("modalTokenSecret")
|
|
41
41
|
openai_api_key = data.get("openaiApiKey")
|
|
42
42
|
anthropic_api_key = data.get("anthropicApiKey")
|
|
43
|
+
openrouter_api_key = data.get("openrouterApiKey")
|
|
43
44
|
groq_api_key = data.get("groqApiKey")
|
|
44
45
|
|
|
45
46
|
if token_id and token_secret:
|
|
46
47
|
# print("✅ Successfully fetched default tokens from gitarsenal.dev")
|
|
47
|
-
return token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key
|
|
48
|
+
return token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key
|
|
48
49
|
else:
|
|
49
50
|
print("❌ Modal tokens not found in gitarsenal.dev response")
|
|
50
|
-
return None, None, None, None, None
|
|
51
|
+
return None, None, None, None, None, None
|
|
51
52
|
except json.JSONDecodeError:
|
|
52
53
|
print("❌ Invalid JSON response from gitarsenal.dev")
|
|
53
|
-
return None, None, None, None, None
|
|
54
|
+
return None, None, None, None, None, None
|
|
54
55
|
else:
|
|
55
56
|
print(f"❌ Failed to fetch from gitarsenal.dev: {response.status_code} - {response.text[:200]}")
|
|
56
|
-
return None, None, None, None, None
|
|
57
|
+
return None, None, None, None, None, None
|
|
57
58
|
|
|
58
59
|
except requests.exceptions.Timeout:
|
|
59
60
|
print("❌ Request timeout when fetching from gitarsenal.dev")
|
|
60
|
-
return None, None, None, None, None
|
|
61
|
+
return None, None, None, None, None, None
|
|
61
62
|
except requests.exceptions.ConnectionError:
|
|
62
63
|
print("❌ Connection failed to gitarsenal.dev")
|
|
63
|
-
return None, None, None, None, None
|
|
64
|
+
return None, None, None, None, None, None
|
|
64
65
|
except requests.exceptions.RequestException as e:
|
|
65
66
|
print(f"❌ Request failed to gitarsenal.dev: {e}")
|
|
66
|
-
return None, None, None, None, None
|
|
67
|
+
return None, None, None, None, None, None
|
|
67
68
|
|
|
68
69
|
def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
|
|
69
70
|
"""
|
|
@@ -74,7 +75,7 @@ def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
|
|
|
74
75
|
api_key: API key for authentication
|
|
75
76
|
|
|
76
77
|
Returns:
|
|
77
|
-
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key) if successful, (None, None, None, None, None) otherwise
|
|
78
|
+
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key) if successful, (None, None, None, None, None, None) otherwise
|
|
78
79
|
"""
|
|
79
80
|
# Use environment variables if not provided
|
|
80
81
|
if not proxy_url:
|
|
@@ -91,12 +92,12 @@ def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
|
|
|
91
92
|
if not proxy_url:
|
|
92
93
|
# print("❌ No proxy URL provided or found in environment")
|
|
93
94
|
# print("💡 Set MODAL_PROXY_URL environment variable or use --proxy-url argument")
|
|
94
|
-
return None, None, None, None, None
|
|
95
|
+
return None, None, None, None, None, None
|
|
95
96
|
|
|
96
97
|
if not api_key:
|
|
97
98
|
print("❌ No API key provided or found in environment")
|
|
98
99
|
print("💡 Set MODAL_PROXY_API_KEY environment variable or use --proxy-api-key argument")
|
|
99
|
-
return None, None, None, None, None
|
|
100
|
+
return None, None, None, None, None, None
|
|
100
101
|
|
|
101
102
|
# Ensure the URL ends with a slash
|
|
102
103
|
if not proxy_url.endswith("/"):
|
|
@@ -120,19 +121,20 @@ def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
|
|
|
120
121
|
token_secret = data.get("token_secret")
|
|
121
122
|
openai_api_key = data.get("openai_api_key")
|
|
122
123
|
anthropic_api_key = data.get("anthropic_api_key")
|
|
124
|
+
openrouter_api_key = data.get("openrouter_api_key")
|
|
123
125
|
groq_api_key = data.get("groq_api_key")
|
|
124
126
|
if token_id and token_secret:
|
|
125
127
|
print("✅ Successfully fetched tokens from proxy server")
|
|
126
|
-
return token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key
|
|
128
|
+
return token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key
|
|
127
129
|
else:
|
|
128
130
|
print("❌ Tokens not found in response")
|
|
129
|
-
return None, None, None, None, None
|
|
131
|
+
return None, None, None, None, None, None
|
|
130
132
|
else:
|
|
131
133
|
print(f"❌ Failed to fetch tokens: {response.status_code} - {response.text}")
|
|
132
|
-
return None, None, None, None, None
|
|
134
|
+
return None, None, None, None, None, None
|
|
133
135
|
except Exception as e:
|
|
134
136
|
print(f"❌ Error fetching tokens: {e}")
|
|
135
|
-
return None, None, None, None, None
|
|
137
|
+
return None, None, None, None, None, None
|
|
136
138
|
|
|
137
139
|
def get_tokens():
|
|
138
140
|
"""
|
|
@@ -140,21 +142,21 @@ def get_tokens():
|
|
|
140
142
|
Also sets the tokens in environment variables.
|
|
141
143
|
|
|
142
144
|
Returns:
|
|
143
|
-
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key)
|
|
145
|
+
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key)
|
|
144
146
|
"""
|
|
145
147
|
# Try to fetch from the proxy server
|
|
146
|
-
token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key = fetch_tokens_from_proxy()
|
|
148
|
+
token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key = fetch_tokens_from_proxy()
|
|
147
149
|
|
|
148
150
|
# If we couldn't fetch from the server, try to get default tokens from gitarsenal.dev
|
|
149
151
|
if not token_id or not token_secret:
|
|
150
152
|
# print("⚠️ Proxy server failed, trying to fetch default tokens from gitarsenal.dev")
|
|
151
|
-
token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key = fetch_default_tokens_from_gitarsenal()
|
|
153
|
+
token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key = fetch_default_tokens_from_gitarsenal()
|
|
152
154
|
|
|
153
155
|
# If we still don't have tokens, we can't proceed
|
|
154
156
|
if not token_id or not token_secret:
|
|
155
157
|
print("❌ Failed to fetch tokens from both proxy server and gitarsenal.dev")
|
|
156
158
|
print("💡 Please check your network connection and API endpoints")
|
|
157
|
-
return None, None, None, None, None
|
|
159
|
+
return None, None, None, None, None, None
|
|
158
160
|
|
|
159
161
|
# Set the tokens in environment variables
|
|
160
162
|
os.environ["MODAL_TOKEN_ID"] = token_id
|
|
@@ -169,11 +171,15 @@ def get_tokens():
|
|
|
169
171
|
if anthropic_api_key:
|
|
170
172
|
os.environ["ANTHROPIC_API_KEY"] = anthropic_api_key
|
|
171
173
|
|
|
174
|
+
# Set OpenRouter API key if available
|
|
175
|
+
if openrouter_api_key:
|
|
176
|
+
os.environ["OPENROUTER_API_KEY"] = openrouter_api_key
|
|
177
|
+
|
|
172
178
|
# Set Groq API key if available
|
|
173
179
|
if groq_api_key:
|
|
174
180
|
os.environ["GROQ_API_KEY"] = groq_api_key
|
|
175
181
|
|
|
176
|
-
return token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key
|
|
182
|
+
return token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key
|
|
177
183
|
|
|
178
184
|
if __name__ == "__main__":
|
|
179
185
|
# Parse command-line arguments if run directly
|
|
@@ -194,19 +200,21 @@ if __name__ == "__main__":
|
|
|
194
200
|
print(f"✅ Set MODAL_PROXY_API_KEY from command line")
|
|
195
201
|
|
|
196
202
|
# Get tokens
|
|
197
|
-
token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key = get_tokens()
|
|
203
|
+
token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key = get_tokens()
|
|
198
204
|
print(f"Token ID: {token_id}")
|
|
199
205
|
print(f"Token Secret: {token_secret}")
|
|
200
206
|
print(f"OpenAI API Key: {openai_api_key[:5] + '...' if openai_api_key else None}")
|
|
201
207
|
print(f"Anthropic API Key: {anthropic_api_key[:5] + '...' if anthropic_api_key else None}")
|
|
208
|
+
print(f"OpenRouter API Key: {openrouter_api_key[:5] + '...' if openrouter_api_key else None}")
|
|
202
209
|
print(f"Groq API Key: {groq_api_key[:5] + '...' if groq_api_key else None}")
|
|
203
210
|
|
|
204
211
|
# Check if tokens are set in environment variables
|
|
205
|
-
print(f"\n🔍 DEBUG: Checking environment variables")
|
|
212
|
+
# print(f"\n🔍 DEBUG: Checking environment variables")
|
|
206
213
|
print(f"🔍 MODAL_TOKEN_ID exists: {'Yes' if os.environ.get('MODAL_TOKEN_ID') else 'No'}")
|
|
207
214
|
print(f"🔍 MODAL_TOKEN_SECRET exists: {'Yes' if os.environ.get('MODAL_TOKEN_SECRET') else 'No'}")
|
|
208
215
|
print(f"🔍 OPENAI_API_KEY exists: {'Yes' if os.environ.get('OPENAI_API_KEY') else 'No'}")
|
|
209
216
|
print(f"🔍 ANTHROPIC_API_KEY exists: {'Yes' if os.environ.get('ANTHROPIC_API_KEY') else 'No'}")
|
|
217
|
+
print(f"🔍 OPENROUTER_API_KEY exists: {'Yes' if os.environ.get('OPENROUTER_API_KEY') else 'No'}")
|
|
210
218
|
print(f"🔍 GROQ_API_KEY exists: {'Yes' if os.environ.get('GROQ_API_KEY') else 'No'}")
|
|
211
219
|
if os.environ.get('MODAL_TOKEN_ID'):
|
|
212
220
|
print(f"🔍 MODAL_TOKEN_ID length: {len(os.environ.get('MODAL_TOKEN_ID'))}")
|
|
@@ -216,6 +224,8 @@ if __name__ == "__main__":
|
|
|
216
224
|
print(f"🔍 OPENAI_API_KEY length: {len(os.environ.get('OPENAI_API_KEY'))}")
|
|
217
225
|
if os.environ.get('ANTHROPIC_API_KEY'):
|
|
218
226
|
print(f"🔍 ANTHROPIC_API_KEY length: {len(os.environ.get('ANTHROPIC_API_KEY'))}")
|
|
227
|
+
if os.environ.get('OPENROUTER_API_KEY'):
|
|
228
|
+
print(f"🔍 OPENROUTER_API_KEY length: {len(os.environ.get('OPENROUTER_API_KEY'))}")
|
|
219
229
|
if os.environ.get('GROQ_API_KEY'):
|
|
220
230
|
print(f"🔍 GROQ_API_KEY length: {len(os.environ.get('GROQ_API_KEY'))}")
|
|
221
231
|
|
|
@@ -227,6 +237,7 @@ if __name__ == "__main__":
|
|
|
227
237
|
"token_secret": token_secret,
|
|
228
238
|
"openai_api_key": openai_api_key,
|
|
229
239
|
"anthropic_api_key": anthropic_api_key,
|
|
240
|
+
"openrouter_api_key": openrouter_api_key,
|
|
230
241
|
"groq_api_key": groq_api_key
|
|
231
242
|
}, f)
|
|
232
243
|
print(f"\n✅ Tokens written to {tokens_file}")
|
|
@@ -283,6 +294,17 @@ if __name__ == "__main__":
|
|
|
283
294
|
f.write(env_content)
|
|
284
295
|
print(f"✅ Updated Anthropic API key in {env_file}")
|
|
285
296
|
|
|
297
|
+
# Update or add OPENROUTER_API_KEY
|
|
298
|
+
if openrouter_api_key:
|
|
299
|
+
if "OPENROUTER_API_KEY" in env_content:
|
|
300
|
+
import re
|
|
301
|
+
env_content = re.sub(r'OPENROUTER_API_KEY=.*\n', f'OPENROUTER_API_KEY={openrouter_api_key}\n', env_content)
|
|
302
|
+
else:
|
|
303
|
+
env_content += f'\nOPENROUTER_API_KEY={openrouter_api_key}\n'
|
|
304
|
+
with open(env_file, 'w') as f:
|
|
305
|
+
f.write(env_content)
|
|
306
|
+
print(f"✅ Updated OpenRouter API key in {env_file}")
|
|
307
|
+
|
|
286
308
|
# Update or add GROQ_API_KEY
|
|
287
309
|
if groq_api_key:
|
|
288
310
|
if "GROQ_API_KEY" in env_content:
|
package/python/llm_debugging.py
CHANGED
|
@@ -35,7 +35,7 @@ def generate_auth_context(stored_credentials):
|
|
|
35
35
|
|
|
36
36
|
def get_current_debug_model():
|
|
37
37
|
"""Get the currently configured debugging model preference"""
|
|
38
|
-
return os.environ.get("GITARSENAL_DEBUG_MODEL", "
|
|
38
|
+
return os.environ.get("GITARSENAL_DEBUG_MODEL", "openai")
|
|
39
39
|
|
|
40
40
|
|
|
41
41
|
def _to_str(maybe_bytes):
|
|
@@ -347,7 +347,7 @@ def make_openai_request(api_key, prompt, retries=2):
|
|
|
347
347
|
}
|
|
348
348
|
|
|
349
349
|
payload = {
|
|
350
|
-
"model": "gpt-
|
|
350
|
+
"model": "gpt-4.1",
|
|
351
351
|
"messages": [
|
|
352
352
|
{"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue."},
|
|
353
353
|
{"role": "user", "content": prompt}
|
|
@@ -536,7 +536,7 @@ def make_groq_request(api_key, prompt, retries=2):
|
|
|
536
536
|
|
|
537
537
|
def get_provider_rotation_order(preferred=None):
|
|
538
538
|
"""Return provider rotation order starting with preferred if valid."""
|
|
539
|
-
default_order = ["
|
|
539
|
+
default_order = ["openai", "anthropic", "groq", "openrouter"]
|
|
540
540
|
if preferred and preferred in default_order:
|
|
541
541
|
return [preferred] + [p for p in default_order if p != preferred]
|
|
542
542
|
return default_order
|
|
@@ -46,7 +46,7 @@ if args.proxy_api_key:
|
|
|
46
46
|
# Import the fetch_modal_tokens module
|
|
47
47
|
# print("🔄 Fetching tokens from proxy server...")
|
|
48
48
|
from fetch_modal_tokens import get_tokens
|
|
49
|
-
token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key = get_tokens()
|
|
49
|
+
token_id, token_secret, openai_api_key, anthropic_api_key, openrouter_api_key, groq_api_key = get_tokens()
|
|
50
50
|
|
|
51
51
|
# Check if we got valid tokens
|
|
52
52
|
if token_id is None or token_secret is None:
|
|
@@ -139,12 +139,14 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
|
|
|
139
139
|
print(f"🔍 token exists: {'Yes' if modal_token else 'No'}")
|
|
140
140
|
print(f"🔍 openai_api_key exists: {'Yes' if openai_api_key else 'No'}")
|
|
141
141
|
if modal_token_id:
|
|
142
|
-
print(f"🔍 token length: {len(modal_token_id)}")
|
|
142
|
+
# print(f"🔍 token length: {len(modal_token_id)}")
|
|
143
|
+
pass
|
|
143
144
|
if modal_token:
|
|
144
|
-
print(f"🔍 token length: {len(modal_token)}")
|
|
145
|
+
# print(f"🔍 token length: {len(modal_token)}")
|
|
146
|
+
pass
|
|
145
147
|
if openai_api_key:
|
|
146
|
-
print(f"🔍 openai_api_key length: {len(openai_api_key)}")
|
|
147
|
-
|
|
148
|
+
# print(f"🔍 openai_api_key length: {len(openai_api_key)}")
|
|
149
|
+
pass
|
|
148
150
|
# Try to access Modal token to check authentication
|
|
149
151
|
try:
|
|
150
152
|
# Check if token is set in environment
|
|
@@ -1840,7 +1842,7 @@ Return only the JSON array, no other text.
|
|
|
1840
1842
|
client = openai.OpenAI(api_key=api_key)
|
|
1841
1843
|
|
|
1842
1844
|
response = client.chat.completions.create(
|
|
1843
|
-
model="gpt-
|
|
1845
|
+
model="gpt-4.1", # Fixed: using valid OpenAI model
|
|
1844
1846
|
messages=[
|
|
1845
1847
|
{"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
|
|
1846
1848
|
{"role": "user", "content": prompt}
|
|
@@ -2208,7 +2210,7 @@ if __name__ == "__main__":
|
|
|
2208
2210
|
if args.gpu:
|
|
2209
2211
|
gpu_type = args.gpu
|
|
2210
2212
|
# Validate the provided GPU type
|
|
2211
|
-
valid_gpus = ['T4', 'L4', 'A10G', 'A100-
|
|
2213
|
+
valid_gpus = ['T4', 'L4', 'A10G', 'A100-40GB', 'A100-80GB', 'L40S', 'H100', 'H200', 'B200']
|
|
2212
2214
|
if gpu_type not in valid_gpus:
|
|
2213
2215
|
print(f"⚠️ Warning: '{gpu_type}' is not in the list of known GPU types.")
|
|
2214
2216
|
print(f"Available GPU types: {', '.join(valid_gpus)}")
|