gitarsenal-cli 1.9.25 ā 1.9.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.venv_status.json +1 -1
- package/package.json +1 -1
- package/python/__pycache__/fetch_modal_tokens.cpython-313.pyc +0 -0
- package/python/command_manager.py +31 -48
- package/python/credentials_manager.py +45 -2
- package/python/fetch_modal_tokens.py +45 -31
- package/python/fix_modal_token.py +1 -1
- package/python/llm_debugging.py +481 -1695
- package/python/modal_container.py +103 -7
- package/python/requirements.txt +2 -1
- package/python/setup.py +2 -1
- package/python/test_modalSandboxScript.py +106 -64
package/.venv_status.json
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"created":"2025-08-
|
|
1
|
+
{"created":"2025-08-08T04:25:52.914Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
|
package/package.json
CHANGED
|
Binary file
|
|
@@ -255,7 +255,7 @@ class CommandListManager:
|
|
|
255
255
|
|
|
256
256
|
return all_commands
|
|
257
257
|
|
|
258
|
-
def analyze_failed_commands_with_llm(self, api_key=None, current_dir=None, sandbox=None):
|
|
258
|
+
def analyze_failed_commands_with_llm(self, api_key=None, current_dir=None, sandbox=None, use_web_search=False):
|
|
259
259
|
"""Analyze all failed commands using LLM and add suggested fixes."""
|
|
260
260
|
failed_commands = self.get_failed_commands_for_llm()
|
|
261
261
|
|
|
@@ -266,7 +266,7 @@ class CommandListManager:
|
|
|
266
266
|
print(f"š Analyzing {len(failed_commands)} failed commands with LLM...")
|
|
267
267
|
|
|
268
268
|
# Use unified batch debugging for efficiency
|
|
269
|
-
fixes = call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
|
269
|
+
fixes = call_llm_for_batch_debug(failed_commands, api_key, current_dir, sandbox, use_web_search)
|
|
270
270
|
|
|
271
271
|
# Add the fixes to the command list
|
|
272
272
|
added_fixes = []
|
|
@@ -296,18 +296,17 @@ class CommandListManager:
|
|
|
296
296
|
tuple: (should_skip, reason)
|
|
297
297
|
"""
|
|
298
298
|
try:
|
|
299
|
+
# Import required helpers once for this function scope
|
|
300
|
+
from llm_debugging import get_current_debug_model, get_api_key, make_api_request
|
|
301
|
+
|
|
299
302
|
# Get API key if not provided
|
|
300
303
|
if not api_key:
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
key_file = os.path.expanduser("~/.gitarsenal/openai_key")
|
|
305
|
-
if os.path.exists(key_file):
|
|
306
|
-
with open(key_file, "r") as f:
|
|
307
|
-
api_key = f.read().strip()
|
|
304
|
+
# Use the same API key retrieval logic as the debugging functions
|
|
305
|
+
current_model = get_current_debug_model()
|
|
306
|
+
api_key = get_api_key(current_model)
|
|
308
307
|
|
|
309
308
|
if not api_key:
|
|
310
|
-
print("ā ļø No
|
|
309
|
+
print(f"ā ļø No {current_model} API key available for command list analysis")
|
|
311
310
|
return False, "No API key available"
|
|
312
311
|
|
|
313
312
|
# Get all commands for context
|
|
@@ -342,23 +341,15 @@ class CommandListManager:
|
|
|
342
341
|
RUN: <reason>
|
|
343
342
|
"""
|
|
344
343
|
|
|
345
|
-
|
|
346
|
-
import openai
|
|
347
|
-
client = openai.OpenAI(api_key=api_key)
|
|
344
|
+
current_model = get_current_debug_model()
|
|
348
345
|
|
|
349
|
-
print("š Analyzing if original command should be skipped...")
|
|
346
|
+
print(f"š Analyzing if original command should be skipped using {current_model}...")
|
|
350
347
|
|
|
351
|
-
|
|
352
|
-
model="gpt-3.5-turbo",
|
|
353
|
-
messages=[
|
|
354
|
-
{"role": "system", "content": "You are a helpful assistant that analyzes command execution."},
|
|
355
|
-
{"role": "user", "content": prompt}
|
|
356
|
-
],
|
|
357
|
-
max_tokens=100,
|
|
358
|
-
temperature=0.3
|
|
359
|
-
)
|
|
348
|
+
response_text = make_api_request(current_model, api_key, prompt)
|
|
360
349
|
|
|
361
|
-
|
|
350
|
+
if not response_text:
|
|
351
|
+
print(f"ā ļø Failed to get response from {current_model}")
|
|
352
|
+
return False, f"Failed to get response from {current_model}"
|
|
362
353
|
|
|
363
354
|
# Parse the response
|
|
364
355
|
if response_text.startswith("SKIP:"):
|
|
@@ -421,18 +412,16 @@ class CommandListManager:
|
|
|
421
412
|
bool: True if the list was updated, False otherwise
|
|
422
413
|
"""
|
|
423
414
|
try:
|
|
415
|
+
from llm_debugging import get_current_debug_model, get_api_key, make_api_request
|
|
424
416
|
# Get API key if not provided
|
|
425
417
|
if not api_key:
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
if os.path.exists(key_file):
|
|
431
|
-
with open(key_file, "r") as f:
|
|
432
|
-
api_key = f.read().strip()
|
|
418
|
+
# Use the same API key retrieval logic as the debugging functions
|
|
419
|
+
from llm_debugging import get_current_debug_model, get_api_key
|
|
420
|
+
current_model = get_current_debug_model()
|
|
421
|
+
api_key = get_api_key(current_model)
|
|
433
422
|
|
|
434
423
|
if not api_key:
|
|
435
|
-
print("ā ļø No
|
|
424
|
+
print(f"ā ļø No {current_model} API key available for command list analysis")
|
|
436
425
|
return False
|
|
437
426
|
|
|
438
427
|
# Get all commands for context
|
|
@@ -486,24 +475,18 @@ class CommandListManager:
|
|
|
486
475
|
Only include commands that need changes (SKIP, MODIFY, ADD_AFTER), not KEEP actions.
|
|
487
476
|
"""
|
|
488
477
|
|
|
489
|
-
#
|
|
490
|
-
import
|
|
478
|
+
# Use the unified LLM API call
|
|
479
|
+
from llm_debugging import make_api_request
|
|
491
480
|
import json
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
print("š Analyzing command list for optimizations...")
|
|
495
|
-
|
|
496
|
-
response = client.chat.completions.create(
|
|
497
|
-
model="gpt-4o-mini", # Use a more capable model for this complex task
|
|
498
|
-
messages=[
|
|
499
|
-
{"role": "system", "content": "You are a helpful assistant that analyzes and optimizes command lists."},
|
|
500
|
-
{"role": "user", "content": prompt}
|
|
501
|
-
],
|
|
502
|
-
max_tokens=1000,
|
|
503
|
-
temperature=0.2
|
|
504
|
-
)
|
|
481
|
+
current_model = get_current_debug_model()
|
|
505
482
|
|
|
506
|
-
|
|
483
|
+
print(f"š Analyzing command list for optimizations using {current_model}...")
|
|
484
|
+
|
|
485
|
+
response_text = make_api_request(current_model, api_key, prompt)
|
|
486
|
+
|
|
487
|
+
if not response_text:
|
|
488
|
+
print(f"ā ļø Failed to get response from {current_model}")
|
|
489
|
+
return False
|
|
507
490
|
|
|
508
491
|
# Extract JSON from the response
|
|
509
492
|
try:
|
|
@@ -145,7 +145,7 @@ class CredentialsManager:
|
|
|
145
145
|
# First try to fetch from server using fetch_modal_tokens (GitArsenal's key)
|
|
146
146
|
try:
|
|
147
147
|
from fetch_modal_tokens import get_tokens
|
|
148
|
-
_, _, api_key, _ = get_tokens()
|
|
148
|
+
_, _, api_key, _, _ = get_tokens()
|
|
149
149
|
if api_key and validate_openai_key(api_key):
|
|
150
150
|
# Set in environment for future use
|
|
151
151
|
os.environ["OPENAI_API_KEY"] = api_key
|
|
@@ -240,6 +240,48 @@ class CredentialsManager:
|
|
|
240
240
|
|
|
241
241
|
prompt = "An Anthropic API key is required.\nYou can get your API key from: https://console.anthropic.com/"
|
|
242
242
|
return self.get_credential("anthropic_api_key", prompt, is_password=True, validate_func=validate_anthropic_key)
|
|
243
|
+
|
|
244
|
+
def get_groq_api_key(self):
|
|
245
|
+
"""Get Groq API key with validation"""
|
|
246
|
+
def validate_groq_key(key):
|
|
247
|
+
# Groq keys are non-empty; basic length check
|
|
248
|
+
return bool(key) and len(key) > 20
|
|
249
|
+
|
|
250
|
+
# First check stored credentials
|
|
251
|
+
credentials = self.load_credentials()
|
|
252
|
+
if "groq_api_key" in credentials:
|
|
253
|
+
stored_key = credentials["groq_api_key"]
|
|
254
|
+
if validate_groq_key(stored_key):
|
|
255
|
+
return stored_key
|
|
256
|
+
|
|
257
|
+
# Then check environment variable
|
|
258
|
+
env_key = os.environ.get("GROQ_API_KEY")
|
|
259
|
+
if env_key and validate_groq_key(env_key):
|
|
260
|
+
return env_key
|
|
261
|
+
|
|
262
|
+
prompt = "A Groq API key is required for Groq models.\nYou can get your key from: https://console.groq.com/keys"
|
|
263
|
+
return self.get_credential("groq_api_key", prompt, is_password=True, validate_func=validate_groq_key)
|
|
264
|
+
|
|
265
|
+
def get_exa_api_key(self):
|
|
266
|
+
"""Get Exa API key with validation"""
|
|
267
|
+
def validate_exa_key(key):
|
|
268
|
+
# Exa API keys are typically 32+ characters
|
|
269
|
+
return len(key) >= 32
|
|
270
|
+
|
|
271
|
+
# First check stored credentials
|
|
272
|
+
credentials = self.load_credentials()
|
|
273
|
+
if "exa_api_key" in credentials:
|
|
274
|
+
stored_key = credentials["exa_api_key"]
|
|
275
|
+
if validate_exa_key(stored_key):
|
|
276
|
+
return stored_key
|
|
277
|
+
|
|
278
|
+
# Then check environment variable
|
|
279
|
+
env_key = os.environ.get("EXA_API_KEY")
|
|
280
|
+
if env_key and validate_exa_key(env_key):
|
|
281
|
+
return env_key
|
|
282
|
+
|
|
283
|
+
prompt = "An Exa API key is required for web search functionality.\nYou can get your API key from: https://exa.ai/"
|
|
284
|
+
return self.get_credential("exa_api_key", prompt, is_password=True, validate_func=validate_exa_key)
|
|
243
285
|
|
|
244
286
|
def clear_credential(self, key):
|
|
245
287
|
"""Remove a specific credential"""
|
|
@@ -285,7 +327,8 @@ class CredentialsManager:
|
|
|
285
327
|
"WANDB_API_KEY",
|
|
286
328
|
"MODAL_TOKEN_ID",
|
|
287
329
|
"MODAL_TOKEN",
|
|
288
|
-
"MODAL_TOKEN_SECRET"
|
|
330
|
+
"MODAL_TOKEN_SECRET",
|
|
331
|
+
"GROQ_API_KEY"
|
|
289
332
|
]
|
|
290
333
|
|
|
291
334
|
for var in security_vars:
|
|
@@ -17,7 +17,7 @@ def fetch_default_tokens_from_gitarsenal():
|
|
|
17
17
|
Fetch default Modal tokens and OpenAI API key from gitarsenal.dev API.
|
|
18
18
|
|
|
19
19
|
Returns:
|
|
20
|
-
tuple: (token_id, token_secret, openai_api_key) if successful, (None, None, None) otherwise
|
|
20
|
+
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key) if successful, (None, None, None, None, None) otherwise
|
|
21
21
|
"""
|
|
22
22
|
endpoint = "https://gitarsenal.dev/api/credentials"
|
|
23
23
|
|
|
@@ -40,29 +40,30 @@ def fetch_default_tokens_from_gitarsenal():
|
|
|
40
40
|
token_secret = data.get("modalTokenSecret")
|
|
41
41
|
openai_api_key = data.get("openaiApiKey")
|
|
42
42
|
anthropic_api_key = data.get("anthropicApiKey")
|
|
43
|
+
groq_api_key = data.get("groqApiKey")
|
|
43
44
|
|
|
44
45
|
if token_id and token_secret:
|
|
45
46
|
# print("ā
Successfully fetched default tokens from gitarsenal.dev")
|
|
46
|
-
return token_id, token_secret, openai_api_key, anthropic_api_key
|
|
47
|
+
return token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key
|
|
47
48
|
else:
|
|
48
49
|
print("ā Modal tokens not found in gitarsenal.dev response")
|
|
49
|
-
return None, None, None, None
|
|
50
|
+
return None, None, None, None, None
|
|
50
51
|
except json.JSONDecodeError:
|
|
51
52
|
print("ā Invalid JSON response from gitarsenal.dev")
|
|
52
|
-
return None, None, None, None
|
|
53
|
+
return None, None, None, None, None
|
|
53
54
|
else:
|
|
54
55
|
print(f"ā Failed to fetch from gitarsenal.dev: {response.status_code} - {response.text[:200]}")
|
|
55
|
-
return None, None, None, None
|
|
56
|
+
return None, None, None, None, None
|
|
56
57
|
|
|
57
58
|
except requests.exceptions.Timeout:
|
|
58
59
|
print("ā Request timeout when fetching from gitarsenal.dev")
|
|
59
|
-
return None, None, None, None
|
|
60
|
+
return None, None, None, None, None
|
|
60
61
|
except requests.exceptions.ConnectionError:
|
|
61
62
|
print("ā Connection failed to gitarsenal.dev")
|
|
62
|
-
return None, None, None, None
|
|
63
|
+
return None, None, None, None, None
|
|
63
64
|
except requests.exceptions.RequestException as e:
|
|
64
65
|
print(f"ā Request failed to gitarsenal.dev: {e}")
|
|
65
|
-
return None, None, None, None
|
|
66
|
+
return None, None, None, None, None
|
|
66
67
|
|
|
67
68
|
def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
|
|
68
69
|
"""
|
|
@@ -73,7 +74,7 @@ def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
|
|
|
73
74
|
api_key: API key for authentication
|
|
74
75
|
|
|
75
76
|
Returns:
|
|
76
|
-
tuple: (token_id, token_secret, openai_api_key) if successful, (None, None, None) otherwise
|
|
77
|
+
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key) if successful, (None, None, None, None, None) otherwise
|
|
77
78
|
"""
|
|
78
79
|
# Use environment variables if not provided
|
|
79
80
|
if not proxy_url:
|
|
@@ -90,12 +91,12 @@ def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
|
|
|
90
91
|
if not proxy_url:
|
|
91
92
|
# print("ā No proxy URL provided or found in environment")
|
|
92
93
|
print("š” Set MODAL_PROXY_URL environment variable or use --proxy-url argument")
|
|
93
|
-
return None, None, None, None
|
|
94
|
+
return None, None, None, None, None
|
|
94
95
|
|
|
95
96
|
if not api_key:
|
|
96
97
|
print("ā No API key provided or found in environment")
|
|
97
98
|
print("š” Set MODAL_PROXY_API_KEY environment variable or use --proxy-api-key argument")
|
|
98
|
-
return None, None, None, None
|
|
99
|
+
return None, None, None, None, None
|
|
99
100
|
|
|
100
101
|
# Ensure the URL ends with a slash
|
|
101
102
|
if not proxy_url.endswith("/"):
|
|
@@ -119,48 +120,41 @@ def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
|
|
|
119
120
|
token_secret = data.get("token_secret")
|
|
120
121
|
openai_api_key = data.get("openai_api_key")
|
|
121
122
|
anthropic_api_key = data.get("anthropic_api_key")
|
|
122
|
-
|
|
123
|
+
groq_api_key = data.get("groq_api_key")
|
|
123
124
|
if token_id and token_secret:
|
|
124
125
|
print("ā
Successfully fetched tokens from proxy server")
|
|
125
|
-
return token_id, token_secret, openai_api_key, anthropic_api_key
|
|
126
|
+
return token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key
|
|
126
127
|
else:
|
|
127
128
|
print("ā Tokens not found in response")
|
|
128
|
-
return None, None, None, None
|
|
129
|
+
return None, None, None, None, None
|
|
129
130
|
else:
|
|
130
131
|
print(f"ā Failed to fetch tokens: {response.status_code} - {response.text}")
|
|
131
|
-
return None, None, None, None
|
|
132
|
+
return None, None, None, None, None
|
|
132
133
|
except Exception as e:
|
|
133
134
|
print(f"ā Error fetching tokens: {e}")
|
|
134
|
-
return None, None, None, None
|
|
135
|
+
return None, None, None, None, None
|
|
135
136
|
|
|
136
137
|
def get_tokens():
|
|
137
138
|
"""
|
|
138
|
-
Get Modal tokens, OpenAI API key, and
|
|
139
|
+
Get Modal tokens, OpenAI API key, Anthropic API key, and Groq API key, trying to fetch from the proxy server first.
|
|
139
140
|
Also sets the tokens in environment variables.
|
|
140
141
|
|
|
141
142
|
Returns:
|
|
142
|
-
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key)
|
|
143
|
+
tuple: (token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key)
|
|
143
144
|
"""
|
|
144
145
|
# Try to fetch from the proxy server
|
|
145
|
-
token_id, token_secret, openai_api_key, anthropic_api_key = fetch_tokens_from_proxy()
|
|
146
|
+
token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key = fetch_tokens_from_proxy()
|
|
146
147
|
|
|
147
148
|
# If we couldn't fetch from the server, try to get default tokens from gitarsenal.dev
|
|
148
149
|
if not token_id or not token_secret:
|
|
149
150
|
# print("ā ļø Proxy server failed, trying to fetch default tokens from gitarsenal.dev")
|
|
150
|
-
token_id, token_secret, openai_api_key, anthropic_api_key = fetch_default_tokens_from_gitarsenal()
|
|
151
|
+
token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key = fetch_default_tokens_from_gitarsenal()
|
|
151
152
|
|
|
152
153
|
# If we still don't have tokens, we can't proceed
|
|
153
154
|
if not token_id or not token_secret:
|
|
154
155
|
print("ā Failed to fetch tokens from both proxy server and gitarsenal.dev")
|
|
155
156
|
print("š” Please check your network connection and API endpoints")
|
|
156
|
-
return None, None, None, None
|
|
157
|
-
|
|
158
|
-
# Debug print the full token values
|
|
159
|
-
# print("\nš DEBUG: FULL TOKEN VALUES:")
|
|
160
|
-
# print(f"š DEBUG: MODAL_TOKEN_ID: {token_id}")
|
|
161
|
-
# print(f"š DEBUG: MODAL_TOKEN_SECRET: {token_secret}")
|
|
162
|
-
# print(f"š DEBUG: OPENAI_API_KEY: {openai_api_key}")
|
|
163
|
-
# print("š DEBUG: END OF TOKEN VALUES\n")
|
|
157
|
+
return None, None, None, None, None
|
|
164
158
|
|
|
165
159
|
# Set the tokens in environment variables
|
|
166
160
|
os.environ["MODAL_TOKEN_ID"] = token_id
|
|
@@ -175,7 +169,11 @@ def get_tokens():
|
|
|
175
169
|
if anthropic_api_key:
|
|
176
170
|
os.environ["ANTHROPIC_API_KEY"] = anthropic_api_key
|
|
177
171
|
|
|
178
|
-
|
|
172
|
+
# Set Groq API key if available
|
|
173
|
+
if groq_api_key:
|
|
174
|
+
os.environ["GROQ_API_KEY"] = groq_api_key
|
|
175
|
+
|
|
176
|
+
return token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key
|
|
179
177
|
|
|
180
178
|
if __name__ == "__main__":
|
|
181
179
|
# Parse command-line arguments if run directly
|
|
@@ -196,11 +194,12 @@ if __name__ == "__main__":
|
|
|
196
194
|
print(f"ā
Set MODAL_PROXY_API_KEY from command line")
|
|
197
195
|
|
|
198
196
|
# Get tokens
|
|
199
|
-
token_id, token_secret, openai_api_key, anthropic_api_key = get_tokens()
|
|
197
|
+
token_id, token_secret, openai_api_key, anthropic_api_key, groq_api_key = get_tokens()
|
|
200
198
|
print(f"Token ID: {token_id}")
|
|
201
199
|
print(f"Token Secret: {token_secret}")
|
|
202
200
|
print(f"OpenAI API Key: {openai_api_key[:5] + '...' if openai_api_key else None}")
|
|
203
201
|
print(f"Anthropic API Key: {anthropic_api_key[:5] + '...' if anthropic_api_key else None}")
|
|
202
|
+
print(f"Groq API Key: {groq_api_key[:5] + '...' if groq_api_key else None}")
|
|
204
203
|
|
|
205
204
|
# Check if tokens are set in environment variables
|
|
206
205
|
print(f"\nš DEBUG: Checking environment variables")
|
|
@@ -208,6 +207,7 @@ if __name__ == "__main__":
|
|
|
208
207
|
print(f"š MODAL_TOKEN_SECRET exists: {'Yes' if os.environ.get('MODAL_TOKEN_SECRET') else 'No'}")
|
|
209
208
|
print(f"š OPENAI_API_KEY exists: {'Yes' if os.environ.get('OPENAI_API_KEY') else 'No'}")
|
|
210
209
|
print(f"š ANTHROPIC_API_KEY exists: {'Yes' if os.environ.get('ANTHROPIC_API_KEY') else 'No'}")
|
|
210
|
+
print(f"š GROQ_API_KEY exists: {'Yes' if os.environ.get('GROQ_API_KEY') else 'No'}")
|
|
211
211
|
if os.environ.get('MODAL_TOKEN_ID'):
|
|
212
212
|
print(f"š MODAL_TOKEN_ID length: {len(os.environ.get('MODAL_TOKEN_ID'))}")
|
|
213
213
|
if os.environ.get('MODAL_TOKEN_SECRET'):
|
|
@@ -216,6 +216,8 @@ if __name__ == "__main__":
|
|
|
216
216
|
print(f"š OPENAI_API_KEY length: {len(os.environ.get('OPENAI_API_KEY'))}")
|
|
217
217
|
if os.environ.get('ANTHROPIC_API_KEY'):
|
|
218
218
|
print(f"š ANTHROPIC_API_KEY length: {len(os.environ.get('ANTHROPIC_API_KEY'))}")
|
|
219
|
+
if os.environ.get('GROQ_API_KEY'):
|
|
220
|
+
print(f"š GROQ_API_KEY length: {len(os.environ.get('GROQ_API_KEY'))}")
|
|
219
221
|
|
|
220
222
|
# Write the tokens to a file for use by other scripts
|
|
221
223
|
tokens_file = Path(__file__).parent / "modal_tokens.json"
|
|
@@ -224,7 +226,8 @@ if __name__ == "__main__":
|
|
|
224
226
|
"token_id": token_id,
|
|
225
227
|
"token_secret": token_secret,
|
|
226
228
|
"openai_api_key": openai_api_key,
|
|
227
|
-
"anthropic_api_key": anthropic_api_key
|
|
229
|
+
"anthropic_api_key": anthropic_api_key,
|
|
230
|
+
"groq_api_key": groq_api_key
|
|
228
231
|
}, f)
|
|
229
232
|
print(f"\nā
Tokens written to {tokens_file}")
|
|
230
233
|
|
|
@@ -279,6 +282,17 @@ if __name__ == "__main__":
|
|
|
279
282
|
with open(env_file, 'w') as f:
|
|
280
283
|
f.write(env_content)
|
|
281
284
|
print(f"ā
Updated Anthropic API key in {env_file}")
|
|
285
|
+
|
|
286
|
+
# Update or add GROQ_API_KEY
|
|
287
|
+
if groq_api_key:
|
|
288
|
+
if "GROQ_API_KEY" in env_content:
|
|
289
|
+
import re
|
|
290
|
+
env_content = re.sub(r'GROQ_API_KEY=.*\n', f'GROQ_API_KEY={groq_api_key}\n', env_content)
|
|
291
|
+
else:
|
|
292
|
+
env_content += f'\nGROQ_API_KEY={groq_api_key}\n'
|
|
293
|
+
with open(env_file, 'w') as f:
|
|
294
|
+
f.write(env_content)
|
|
295
|
+
print(f"ā
Updated Groq API key in {env_file}")
|
|
282
296
|
|
|
283
297
|
# Try to use the Modal CLI to set the token
|
|
284
298
|
try:
|
|
@@ -42,7 +42,7 @@ except Exception as e:
|
|
|
42
42
|
try:
|
|
43
43
|
# First, try to import the fetch_modal_tokens module
|
|
44
44
|
from fetch_modal_tokens import get_tokens
|
|
45
|
-
TOKEN_ID, TOKEN_SECRET, _, _ = get_tokens()
|
|
45
|
+
TOKEN_ID, TOKEN_SECRET, _, _, _ = get_tokens()
|
|
46
46
|
|
|
47
47
|
# Check if we got valid tokens
|
|
48
48
|
if TOKEN_ID is None or TOKEN_SECRET is None:
|