gitarsenal-cli 1.5.4 → 1.5.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/python/test_modalSandboxScript.py +3 -7
- package/install.sh +0 -30
- package/python/MODAL_PROXY_README.md +0 -145
- package/python/README.md +0 -68
- package/python/fix_commands.py +0 -71
- package/python/fixed_function.py +0 -58
- package/python/manage_credentials.py +0 -119
- package/python/modal_logs_patch.py +0 -86
- package/python/patch_modal_script.py +0 -75
- package/python/run_with_modal_token.py +0 -47
- package/python/test_import.py +0 -55
- package/python/test_llm_debug.py +0 -120
- package/python/test_modalSandboxScript.py.bak +0 -3672
- package/python/test_modal_auth.py +0 -90
- package/python/test_token_cleanup.py +0 -256
- package/python/verify_env_vars.py +0 -64
@@ -1,3672 +0,0 @@
|
|
1
|
-
import os
|
2
|
-
import sys
|
3
|
-
import time
|
4
|
-
import subprocess
|
5
|
-
import json
|
6
|
-
import re
|
7
|
-
import datetime
|
8
|
-
import getpass
|
9
|
-
import requests
|
10
|
-
import secrets
|
11
|
-
import string
|
12
|
-
import argparse
|
13
|
-
from pathlib import Path
|
14
|
-
|
15
|
-
# Parse command-line arguments
|
16
|
-
parser = argparse.ArgumentParser(description='Launch a Modal sandbox')
|
17
|
-
parser.add_argument('--proxy-url', help='URL of the proxy server')
|
18
|
-
parser.add_argument('--proxy-api-key', help='API key for the proxy server')
|
19
|
-
parser.add_argument('--gpu', default='A10G', help='GPU type to use')
|
20
|
-
parser.add_argument('--repo-url', help='Repository URL')
|
21
|
-
parser.add_argument('--volume-name', help='Volume name')
|
22
|
-
parser.add_argument('--use-api', action='store_true', help='Use API to fetch setup commands')
|
23
|
-
|
24
|
-
# Parse only known args to avoid conflicts with other arguments
|
25
|
-
args, unknown = parser.parse_known_args()
|
26
|
-
|
27
|
-
# Set proxy URL and API key in environment variables if provided
|
28
|
-
if args.proxy_url:
|
29
|
-
os.environ["MODAL_PROXY_URL"] = args.proxy_url
|
30
|
-
# print(f"✅ Set MODAL_PROXY_URL from command line")
|
31
|
-
|
32
|
-
if args.proxy_api_key:
|
33
|
-
os.environ["MODAL_PROXY_API_KEY"] = args.proxy_api_key
|
34
|
-
# print(f"✅ Set MODAL_PROXY_API_KEY from command line")
|
35
|
-
|
36
|
-
# First, try to fetch tokens from the proxy server
|
37
|
-
try:
|
38
|
-
# Import the fetch_modal_tokens module
|
39
|
-
# print("🔄 Fetching tokens from proxy server...")
|
40
|
-
from fetch_modal_tokens import get_tokens
|
41
|
-
token_id, token_secret = get_tokens()
|
42
|
-
# print(f"✅ Tokens fetched successfully")
|
43
|
-
|
44
|
-
# Explicitly set the environment variables again to be sure
|
45
|
-
os.environ["MODAL_TOKEN_ID"] = token_id
|
46
|
-
os.environ["MODAL_TOKEN_SECRET"] = token_secret
|
47
|
-
|
48
|
-
# Also set the old environment variable for backward compatibility
|
49
|
-
os.environ["MODAL_TOKEN"] = token_id
|
50
|
-
|
51
|
-
# Set token variables for later use
|
52
|
-
token = token_id # For backward compatibility
|
53
|
-
except Exception as e:
|
54
|
-
print(f"⚠️ Error fetching Modal tokens: {e}")
|
55
|
-
|
56
|
-
# Apply the comprehensive Modal token solution as fallback
|
57
|
-
try:
|
58
|
-
# Import the comprehensive solution module
|
59
|
-
# print("🔄 Applying comprehensive Modal token solution...")
|
60
|
-
import modal_token_solution
|
61
|
-
# print("✅ Comprehensive Modal token solution applied")
|
62
|
-
|
63
|
-
# Set token variables for later use
|
64
|
-
token = modal_token_solution.TOKEN_ID # For backward compatibility
|
65
|
-
except Exception as e:
|
66
|
-
# print(f"⚠️ Error applying comprehensive Modal token solution: {e}")
|
67
|
-
|
68
|
-
# Fall back to the authentication patch
|
69
|
-
try:
|
70
|
-
# Import the patch module
|
71
|
-
# print("🔄 Falling back to Modal authentication patch...")
|
72
|
-
import modal_auth_patch
|
73
|
-
# print("✅ Modal authentication patch applied")
|
74
|
-
|
75
|
-
# Set token variables for later use
|
76
|
-
token = modal_auth_patch.TOKEN_ID # For backward compatibility
|
77
|
-
except Exception as e:
|
78
|
-
# print(f"⚠️ Error applying Modal authentication patch: {e}")
|
79
|
-
|
80
|
-
# Fall back to fix_modal_token.py
|
81
|
-
try:
|
82
|
-
# Execute the fix_modal_token.py script
|
83
|
-
print("🔄 Falling back to fix_modal_token.py...")
|
84
|
-
result = subprocess.run(
|
85
|
-
["python", os.path.join(os.path.dirname(__file__), "fix_modal_token.py")],
|
86
|
-
capture_output=True,
|
87
|
-
text=True
|
88
|
-
)
|
89
|
-
|
90
|
-
# Print the output but hide sensitive information
|
91
|
-
output_lines = result.stdout.split('\n')
|
92
|
-
for line in output_lines:
|
93
|
-
if 'TOKEN_ID' in line or 'TOKEN_SECRET' in line or 'token_id' in line or 'token_secret' in line:
|
94
|
-
# Hide the actual token values
|
95
|
-
if '=' in line:
|
96
|
-
parts = line.split('=', 1)
|
97
|
-
if len(parts) == 2:
|
98
|
-
print(f"{parts[0]}= [HIDDEN]")
|
99
|
-
else:
|
100
|
-
print(line.replace('ak-sLhYqCjkvixiYcb9LAuCHp', '[HIDDEN]').replace('as-fPzD0Zm0dl6IFAEkhaH9pq', '[HIDDEN]'))
|
101
|
-
else:
|
102
|
-
print(line)
|
103
|
-
|
104
|
-
if result.returncode != 0:
|
105
|
-
print(f"⚠️ Warning: fix_modal_token.py exited with code {result.returncode}")
|
106
|
-
if result.stderr:
|
107
|
-
print(f"Error: {result.stderr}")
|
108
|
-
|
109
|
-
# Set token variables for later use
|
110
|
-
token = "ak-sLhYqCjkvixiYcb9LAuCHp" # Default token ID
|
111
|
-
except Exception as e:
|
112
|
-
print(f"⚠️ Error running fix_modal_token.py: {e}")
|
113
|
-
|
114
|
-
# Last resort: use hardcoded tokens
|
115
|
-
token = "ak-sLhYqCjkvixiYcb9LAuCHp" # Default token ID
|
116
|
-
|
117
|
-
# Print debug info
|
118
|
-
# print(f"🔍 DEBUG: Checking environment variables")
|
119
|
-
# print(f"🔍 Token ID exists: {'Yes' if os.environ.get('MODAL_TOKEN_ID') else 'No'}")
|
120
|
-
# print(f"🔍 Token secret exists: {'Yes' if os.environ.get('MODAL_TOKEN_SECRET') else 'No'}")
|
121
|
-
# print(f"🔍 Token exists: {'Yes' if os.environ.get('MODAL_TOKEN') else 'No'}")
|
122
|
-
if os.environ.get('MODAL_TOKEN_ID'):
|
123
|
-
print(f"🔍 Token ID length: {len(os.environ.get('MODAL_TOKEN_ID'))}")
|
124
|
-
if os.environ.get('MODAL_TOKEN_SECRET'):
|
125
|
-
print(f"🔍 Token secret length: {len(os.environ.get('MODAL_TOKEN_SECRET'))}")
|
126
|
-
if os.environ.get('MODAL_TOKEN'):
|
127
|
-
print(f"🔍 Token length: {len(os.environ.get('MODAL_TOKEN'))}")
|
128
|
-
# print(f"✅ Token setup completed")
|
129
|
-
|
130
|
-
# Import modal after token setup
|
131
|
-
import modal
|
132
|
-
|
133
|
-
def handle_interactive_input(prompt, is_password=False):
|
134
|
-
"""Handle interactive input from the user with optional password masking"""
|
135
|
-
print("\n" + "="*60)
|
136
|
-
print(f"{prompt}")
|
137
|
-
print("="*60)
|
138
|
-
|
139
|
-
try:
|
140
|
-
if is_password:
|
141
|
-
user_input = getpass.getpass("Input (hidden): ").strip()
|
142
|
-
else:
|
143
|
-
user_input = input("Input: ").strip()
|
144
|
-
|
145
|
-
if not user_input:
|
146
|
-
print("❌ No input provided.")
|
147
|
-
return None
|
148
|
-
print("✅ Input received successfully!")
|
149
|
-
return user_input
|
150
|
-
except KeyboardInterrupt:
|
151
|
-
print("\n❌ Input cancelled by user.")
|
152
|
-
return None
|
153
|
-
except Exception as e:
|
154
|
-
print(f"❌ Error getting input: {e}")
|
155
|
-
return None
|
156
|
-
|
157
|
-
def handle_wandb_login(sandbox, current_dir):
|
158
|
-
"""Handle Weights & Biases login with proper API key input"""
|
159
|
-
# Define _to_str function locally to avoid NameError
|
160
|
-
def _to_str(maybe_bytes):
|
161
|
-
try:
|
162
|
-
return (maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes)
|
163
|
-
except UnicodeDecodeError:
|
164
|
-
# Handle non-UTF-8 bytes by replacing invalid characters
|
165
|
-
if isinstance(maybe_bytes, (bytes, bytearray)):
|
166
|
-
return maybe_bytes.decode('utf-8', errors='replace')
|
167
|
-
else:
|
168
|
-
return str(maybe_bytes)
|
169
|
-
except Exception:
|
170
|
-
# Last resort fallback
|
171
|
-
return str(maybe_bytes)
|
172
|
-
|
173
|
-
print("\n🔑 WEIGHTS & BIASES LOGIN")
|
174
|
-
print("="*60)
|
175
|
-
print("Setting up Weights & Biases credentials")
|
176
|
-
print("You can get your API key from: https://wandb.ai/authorize")
|
177
|
-
|
178
|
-
# Try to use credentials manager first
|
179
|
-
api_key = None
|
180
|
-
try:
|
181
|
-
from credentials_manager import CredentialsManager
|
182
|
-
credentials_manager = CredentialsManager()
|
183
|
-
api_key = credentials_manager.get_wandb_api_key()
|
184
|
-
except ImportError:
|
185
|
-
# Fall back to direct input if credentials_manager is not available
|
186
|
-
pass
|
187
|
-
|
188
|
-
# If credentials manager didn't provide a key, use direct input
|
189
|
-
if not api_key:
|
190
|
-
# Get API key from user
|
191
|
-
api_key = handle_interactive_input(
|
192
|
-
"🔑 WEIGHTS & BIASES API KEY REQUIRED\n" +
|
193
|
-
"Please paste your W&B API key below:\n" +
|
194
|
-
"(Your API key should be 40 characters long)",
|
195
|
-
is_password=True
|
196
|
-
)
|
197
|
-
|
198
|
-
if not api_key:
|
199
|
-
print("❌ No API key provided. Cannot continue with W&B login.")
|
200
|
-
return False, "", "No W&B API key provided"
|
201
|
-
|
202
|
-
# Validate API key length
|
203
|
-
if len(api_key) != 40:
|
204
|
-
print(f"⚠️ Warning: API key should be 40 characters long, yours was {len(api_key)}")
|
205
|
-
confirm = handle_interactive_input("Continue anyway? (yes/no)")
|
206
|
-
if not confirm or confirm.lower() not in ["yes", "y"]:
|
207
|
-
print("❌ W&B login cancelled.")
|
208
|
-
return False, "", "W&B login cancelled"
|
209
|
-
|
210
|
-
# Use non-interactive login
|
211
|
-
cmd = f"wandb login {api_key}"
|
212
|
-
print(f"🔄 Running non-interactive login command")
|
213
|
-
|
214
|
-
# Execute the command
|
215
|
-
result = sandbox.exec("bash", "-c", f"cd {current_dir} && {cmd}")
|
216
|
-
|
217
|
-
# Collect output
|
218
|
-
stdout_lines = []
|
219
|
-
stderr_lines = []
|
220
|
-
|
221
|
-
for line in result.stdout:
|
222
|
-
line_str = _to_str(line)
|
223
|
-
stdout_lines.append(line_str)
|
224
|
-
sys.stdout.write(line_str)
|
225
|
-
sys.stdout.flush()
|
226
|
-
|
227
|
-
for line in result.stderr:
|
228
|
-
line_str = _to_str(line)
|
229
|
-
stderr_lines.append(line_str)
|
230
|
-
sys.stderr.write(line_str)
|
231
|
-
sys.stderr.flush()
|
232
|
-
|
233
|
-
result.wait()
|
234
|
-
exit_code = result.returncode
|
235
|
-
|
236
|
-
stdout_buffer = ''.join(stdout_lines)
|
237
|
-
stderr_buffer = ''.join(stderr_lines)
|
238
|
-
|
239
|
-
if exit_code == 0:
|
240
|
-
print("✅ Weights & Biases login successful")
|
241
|
-
# Also set the environment variable for this session
|
242
|
-
os.environ["WANDB_API_KEY"] = api_key
|
243
|
-
print("✅ WANDB_API_KEY environment variable set")
|
244
|
-
else:
|
245
|
-
print(f"❌ Weights & Biases login failed with exit code {exit_code}")
|
246
|
-
if stderr_buffer:
|
247
|
-
print(f"Error: {stderr_buffer}")
|
248
|
-
|
249
|
-
return exit_code == 0, stdout_buffer, stderr_buffer
|
250
|
-
|
251
|
-
def handle_huggingface_login(sandbox, current_dir):
|
252
|
-
"""Handle Hugging Face login with proper token input"""
|
253
|
-
# Define _to_str function locally to avoid NameError
|
254
|
-
def _to_str(maybe_bytes):
|
255
|
-
try:
|
256
|
-
return (maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes)
|
257
|
-
except UnicodeDecodeError:
|
258
|
-
# Handle non-UTF-8 bytes by replacing invalid characters
|
259
|
-
if isinstance(maybe_bytes, (bytes, bytearray)):
|
260
|
-
return maybe_bytes.decode('utf-8', errors='replace')
|
261
|
-
else:
|
262
|
-
return str(maybe_bytes)
|
263
|
-
except Exception:
|
264
|
-
# Last resort fallback
|
265
|
-
return str(maybe_bytes)
|
266
|
-
|
267
|
-
print("\n🔑 HUGGING FACE LOGIN")
|
268
|
-
print("="*60)
|
269
|
-
print("Setting up Hugging Face credentials")
|
270
|
-
|
271
|
-
# Get token from user
|
272
|
-
token = prompt_for_hf_token()
|
273
|
-
if not token:
|
274
|
-
print("❌ No token provided. Cannot continue with Hugging Face login.")
|
275
|
-
return False, "", "No Hugging Face token provided"
|
276
|
-
|
277
|
-
# Use non-interactive login
|
278
|
-
cmd = f"huggingface-cli login --token {token} --add-to-git-credential"
|
279
|
-
print(f"🔄 Running non-interactive login command")
|
280
|
-
|
281
|
-
# Execute the command
|
282
|
-
result = sandbox.exec("bash", "-c", f"cd {current_dir} && {cmd}")
|
283
|
-
|
284
|
-
# Collect output
|
285
|
-
stdout_lines = []
|
286
|
-
stderr_lines = []
|
287
|
-
|
288
|
-
for line in result.stdout:
|
289
|
-
line_str = _to_str(line)
|
290
|
-
stdout_lines.append(line_str)
|
291
|
-
sys.stdout.write(line_str)
|
292
|
-
sys.stdout.flush()
|
293
|
-
|
294
|
-
for line in result.stderr:
|
295
|
-
line_str = _to_str(line)
|
296
|
-
stderr_lines.append(line_str)
|
297
|
-
sys.stderr.write(line_str)
|
298
|
-
sys.stderr.flush()
|
299
|
-
|
300
|
-
result.wait()
|
301
|
-
exit_code = result.returncode
|
302
|
-
|
303
|
-
stdout_buffer = ''.join(stdout_lines)
|
304
|
-
stderr_buffer = ''.join(stderr_lines)
|
305
|
-
|
306
|
-
if exit_code == 0:
|
307
|
-
print("✅ Hugging Face login successful")
|
308
|
-
# Also set the environment variable for this session
|
309
|
-
os.environ["HF_TOKEN"] = token
|
310
|
-
print("✅ HF_TOKEN environment variable set")
|
311
|
-
else:
|
312
|
-
print(f"❌ Hugging Face login failed with exit code {exit_code}")
|
313
|
-
if stderr_buffer:
|
314
|
-
print(f"Error: {stderr_buffer}")
|
315
|
-
|
316
|
-
return exit_code == 0, stdout_buffer, stderr_buffer
|
317
|
-
|
318
|
-
def handle_interactive_command(cmd, sandbox, current_dir):
|
319
|
-
"""Handle interactive commands by prompting the user for input"""
|
320
|
-
print(f"⚠️ Interactive command detected: {cmd}")
|
321
|
-
print("⚠️ Some prompts may not be visible. If the command appears stuck, it may be waiting for input.")
|
322
|
-
|
323
|
-
# This is a placeholder for more sophisticated interactive command handling
|
324
|
-
# In a real implementation, you would need to handle specific interactive commands differently
|
325
|
-
return None
|
326
|
-
|
327
|
-
def call_openai_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
|
328
|
-
"""Call OpenAI to debug a failed command and suggest a fix"""
|
329
|
-
# Define _to_str function locally to avoid NameError
|
330
|
-
def _to_str(maybe_bytes):
|
331
|
-
try:
|
332
|
-
return (maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes)
|
333
|
-
except UnicodeDecodeError:
|
334
|
-
# Handle non-UTF-8 bytes by replacing invalid characters
|
335
|
-
if isinstance(maybe_bytes, (bytes, bytearray)):
|
336
|
-
return maybe_bytes.decode('utf-8', errors='replace')
|
337
|
-
else:
|
338
|
-
return str(maybe_bytes)
|
339
|
-
except Exception:
|
340
|
-
# Last resort fallback
|
341
|
-
return str(maybe_bytes)
|
342
|
-
|
343
|
-
# Skip debugging for certain commands that commonly return non-zero exit codes
|
344
|
-
# but aren't actually errors (like test commands)
|
345
|
-
if command.strip().startswith("test "):
|
346
|
-
print("🔍 Skipping debugging for test command - non-zero exit code is expected behavior")
|
347
|
-
return None
|
348
|
-
|
349
|
-
# Validate error_output - if it's empty, we can't debug effectively
|
350
|
-
if not error_output or not error_output.strip():
|
351
|
-
print("⚠️ Error output is empty. Cannot effectively debug the command.")
|
352
|
-
print("⚠️ Skipping OpenAI debugging due to lack of error information.")
|
353
|
-
return None
|
354
|
-
|
355
|
-
if not api_key:
|
356
|
-
# Try to get API key from environment
|
357
|
-
api_key = os.environ.get("OPENAI_API_KEY")
|
358
|
-
|
359
|
-
if not api_key:
|
360
|
-
# Use the CredentialsManager to get the API key
|
361
|
-
try:
|
362
|
-
from credentials_manager import CredentialsManager
|
363
|
-
credentials_manager = CredentialsManager()
|
364
|
-
api_key = credentials_manager.get_openai_api_key()
|
365
|
-
if not api_key:
|
366
|
-
print("❌ No API key provided. Skipping debugging.")
|
367
|
-
return None
|
368
|
-
except ImportError:
|
369
|
-
# Fall back to direct input if credentials_manager module is not available
|
370
|
-
print("\n" + "="*60)
|
371
|
-
print("🔑 OPENAI API KEY REQUIRED FOR DEBUGGING")
|
372
|
-
print("="*60)
|
373
|
-
print("To debug failed commands, an OpenAI API key is needed.")
|
374
|
-
print("📝 Please paste your OpenAI API key below:")
|
375
|
-
print(" (Your input will be hidden for security)")
|
376
|
-
print("-" * 60)
|
377
|
-
|
378
|
-
try:
|
379
|
-
api_key = getpass.getpass("OpenAI API Key: ").strip()
|
380
|
-
if not api_key:
|
381
|
-
print("❌ No API key provided. Skipping debugging.")
|
382
|
-
return None
|
383
|
-
print("✅ API key received successfully!")
|
384
|
-
except KeyboardInterrupt:
|
385
|
-
print("\n❌ API key input cancelled by user.")
|
386
|
-
return None
|
387
|
-
except Exception as e:
|
388
|
-
print(f"❌ Error getting API key: {e}")
|
389
|
-
return None
|
390
|
-
|
391
|
-
# Get current directory context
|
392
|
-
directory_context = ""
|
393
|
-
system_info = ""
|
394
|
-
|
395
|
-
if sandbox:
|
396
|
-
try:
|
397
|
-
print("🔍 Getting system information for better debugging...")
|
398
|
-
|
399
|
-
# Get OS information
|
400
|
-
os_info_cmd = """
|
401
|
-
echo "OS Information:"
|
402
|
-
cat /etc/os-release 2>/dev/null || echo "OS release info not available"
|
403
|
-
echo -e "\nKernel Information:"
|
404
|
-
uname -a
|
405
|
-
echo -e "\nPython Information:"
|
406
|
-
python --version
|
407
|
-
echo -e "\nPackage Manager:"
|
408
|
-
which apt 2>/dev/null && echo "apt available" || echo "apt not available"
|
409
|
-
which yum 2>/dev/null && echo "yum available" || echo "yum not available"
|
410
|
-
which dnf 2>/dev/null && echo "dnf available" || echo "dnf not available"
|
411
|
-
which apk 2>/dev/null && echo "apk available" || echo "apk not available"
|
412
|
-
echo -e "\nEnvironment Variables:"
|
413
|
-
env | grep -E "^(PATH|PYTHON|VIRTUAL_ENV|HOME|USER|SHELL|LANG)" || echo "No relevant env vars found"
|
414
|
-
"""
|
415
|
-
|
416
|
-
os_result = sandbox.exec("bash", "-c", os_info_cmd)
|
417
|
-
os_output = ""
|
418
|
-
for line in os_result.stdout:
|
419
|
-
os_output += _to_str(line)
|
420
|
-
os_result.wait()
|
421
|
-
|
422
|
-
system_info = f"""
|
423
|
-
System Information:
|
424
|
-
{os_output}
|
425
|
-
"""
|
426
|
-
print("✅ System information gathered successfully")
|
427
|
-
except Exception as e:
|
428
|
-
print(f"⚠️ Error getting system information: {e}")
|
429
|
-
system_info = "System information not available\n"
|
430
|
-
|
431
|
-
if current_dir and sandbox:
|
432
|
-
try:
|
433
|
-
print("🔍 Getting directory context for better debugging...")
|
434
|
-
|
435
|
-
# Get current directory contents
|
436
|
-
ls_result = sandbox.exec("bash", "-c", f"cd {current_dir} && ls -la")
|
437
|
-
ls_output = ""
|
438
|
-
for line in ls_result.stdout:
|
439
|
-
ls_output += _to_str(line)
|
440
|
-
ls_result.wait()
|
441
|
-
|
442
|
-
# Get parent directory contents if this isn't root
|
443
|
-
parent_context = ""
|
444
|
-
if current_dir != "/" and "/" in current_dir:
|
445
|
-
parent_dir = os.path.dirname(current_dir)
|
446
|
-
parent_result = sandbox.exec("bash", "-c", f"cd {parent_dir} && ls -la")
|
447
|
-
parent_ls = ""
|
448
|
-
for line in parent_result.stdout:
|
449
|
-
parent_ls += _to_str(line)
|
450
|
-
parent_result.wait()
|
451
|
-
parent_context = f"\nParent directory ({parent_dir}) contents:\n{parent_ls}"
|
452
|
-
|
453
|
-
directory_context = f"""
|
454
|
-
Current directory: {current_dir}
|
455
|
-
|
456
|
-
Directory contents:
|
457
|
-
{ls_output}
|
458
|
-
{parent_context}
|
459
|
-
"""
|
460
|
-
print("✅ Directory context gathered successfully")
|
461
|
-
except Exception as e:
|
462
|
-
print(f"⚠️ Error getting directory context: {e}")
|
463
|
-
directory_context = f"\nCurrent directory: {current_dir}\n"
|
464
|
-
|
465
|
-
# Prepare the API request
|
466
|
-
headers = {
|
467
|
-
"Content-Type": "application/json",
|
468
|
-
"Authorization": f"Bearer {api_key}"
|
469
|
-
}
|
470
|
-
|
471
|
-
# Create a prompt for the LLM
|
472
|
-
print("\n" + "="*60)
|
473
|
-
print("DEBUG: ERROR_OUTPUT SENT TO LLM:")
|
474
|
-
print("="*60)
|
475
|
-
print(f"{error_output}")
|
476
|
-
print("="*60 + "\n")
|
477
|
-
|
478
|
-
prompt = f"""
|
479
|
-
I'm trying to run the following command in a Linux environment:
|
480
|
-
|
481
|
-
```
|
482
|
-
{command}
|
483
|
-
```
|
484
|
-
|
485
|
-
But it failed with this error:
|
486
|
-
|
487
|
-
```
|
488
|
-
{error_output}
|
489
|
-
```
|
490
|
-
{system_info}
|
491
|
-
{directory_context}
|
492
|
-
Please analyze the error and provide ONLY a single terminal command that would fix the issue.
|
493
|
-
Consider the current directory, system information, and directory contents carefully before suggesting a solution.
|
494
|
-
|
495
|
-
IMPORTANT: For any commands that might ask for yes/no confirmation, use the appropriate non-interactive flag:
|
496
|
-
- For apt/apt-get: use -y or --yes
|
497
|
-
- For pip: use --no-input
|
498
|
-
- For rm: use -f or --force
|
499
|
-
- For other commands: check their documentation for the appropriate non-interactive flag
|
500
|
-
|
501
|
-
Do not provide any explanations, just the exact command to run.
|
502
|
-
"""
|
503
|
-
|
504
|
-
# Prepare the API request payload
|
505
|
-
payload = {
|
506
|
-
"model": "gpt-4.1",
|
507
|
-
"messages": [
|
508
|
-
{"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue, analyze the issue first understand why its happening and then provide the command to fix the issue. If you see missing pytest errors, suggest 'pip install pytest'. For wandb login issues, suggest 'wandb login YOUR_API_KEY' and the system will handle prompting for the actual key."},
|
509
|
-
{"role": "user", "content": prompt}
|
510
|
-
],
|
511
|
-
"temperature": 0.2,
|
512
|
-
"max_tokens": 300
|
513
|
-
}
|
514
|
-
|
515
|
-
try:
|
516
|
-
print("🤖 Calling OpenAI to debug the failed command...")
|
517
|
-
response = requests.post(
|
518
|
-
"https://api.openai.com/v1/chat/completions",
|
519
|
-
headers=headers,
|
520
|
-
json=payload,
|
521
|
-
timeout=30
|
522
|
-
)
|
523
|
-
|
524
|
-
if response.status_code == 200:
|
525
|
-
result = response.json()
|
526
|
-
fix_command = result["choices"][0]["message"]["content"].strip()
|
527
|
-
|
528
|
-
# Extract just the command if it's wrapped in backticks or explanation
|
529
|
-
if "```" in fix_command:
|
530
|
-
# Extract content between backticks
|
531
|
-
import re
|
532
|
-
code_blocks = re.findall(r'```(?:bash|sh)?\s*(.*?)\s*```', fix_command, re.DOTALL)
|
533
|
-
if code_blocks:
|
534
|
-
fix_command = code_blocks[0].strip()
|
535
|
-
|
536
|
-
# If the response still has explanatory text, try to extract just the command
|
537
|
-
if len(fix_command.split('\n')) > 1:
|
538
|
-
# Take the shortest non-empty line as it's likely the command
|
539
|
-
lines = [line.strip() for line in fix_command.split('\n') if line.strip()]
|
540
|
-
if lines:
|
541
|
-
fix_command = min(lines, key=len)
|
542
|
-
|
543
|
-
print(f"🔧 Suggested fix: {fix_command}")
|
544
|
-
return fix_command
|
545
|
-
else:
|
546
|
-
print(f"❌ OpenAI API error: {response.status_code} - {response.text}")
|
547
|
-
return None
|
548
|
-
except Exception as e:
|
549
|
-
print(f"❌ Error calling OpenAI API: {e}")
|
550
|
-
return None
|
551
|
-
|
552
|
-
def prompt_for_hf_token():
|
553
|
-
"""Prompt user for Hugging Face token when needed"""
|
554
|
-
# Try to use credentials manager first
|
555
|
-
try:
|
556
|
-
from credentials_manager import CredentialsManager
|
557
|
-
credentials_manager = CredentialsManager()
|
558
|
-
token = credentials_manager.get_huggingface_token()
|
559
|
-
if token:
|
560
|
-
return token
|
561
|
-
except ImportError:
|
562
|
-
# Fall back to direct input if credentials_manager is not available
|
563
|
-
pass
|
564
|
-
|
565
|
-
# Traditional direct input method as fallback
|
566
|
-
print("\n" + "="*60)
|
567
|
-
print("🔑 HUGGING FACE TOKEN REQUIRED")
|
568
|
-
print("="*60)
|
569
|
-
print("The training script requires a valid Hugging Face token.")
|
570
|
-
print("You can get your token from: https://huggingface.co/settings/tokens")
|
571
|
-
print("📝 Please paste your Hugging Face token below:")
|
572
|
-
print(" (Your input will be hidden for security)")
|
573
|
-
print("-" * 60)
|
574
|
-
|
575
|
-
try:
|
576
|
-
token = getpass.getpass("HF Token: ").strip()
|
577
|
-
if not token:
|
578
|
-
print("❌ No token provided.")
|
579
|
-
return None
|
580
|
-
print("✅ Token received successfully!")
|
581
|
-
return token
|
582
|
-
except KeyboardInterrupt:
|
583
|
-
print("\n❌ Token input cancelled by user.")
|
584
|
-
return None
|
585
|
-
except Exception as e:
|
586
|
-
print(f"❌ Error getting token: {e}")
|
587
|
-
return None
|
588
|
-
|
589
|
-
def create_modal_sandbox(gpu_type, repo_url=None, repo_name=None, setup_commands=None, volume_name=None):
|
590
|
-
# Import the credentials manager if available
|
591
|
-
try:
|
592
|
-
from credentials_manager import CredentialsManager
|
593
|
-
credentials_manager = CredentialsManager()
|
594
|
-
except ImportError:
|
595
|
-
credentials_manager = None
|
596
|
-
print("⚠️ Credentials manager not found, will use environment variables or prompt for credentials")
|
597
|
-
|
598
|
-
# Check if Modal is authenticated
|
599
|
-
try:
|
600
|
-
# Try to import modal first to check if it's installed
|
601
|
-
import modal
|
602
|
-
|
603
|
-
# Try to access Modal token to check authentication
|
604
|
-
try:
|
605
|
-
# This will raise an exception if not authenticated
|
606
|
-
modal.config.get_current_workspace_name()
|
607
|
-
print("✅ Authentication verified")
|
608
|
-
except modal.exception.AuthError:
|
609
|
-
print("\n" + "="*80)
|
610
|
-
print("🔑 AUTHENTICATION REQUIRED")
|
611
|
-
print("="*80)
|
612
|
-
print("GitArsenal requires authentication to create cloud environments.")
|
613
|
-
|
614
|
-
# Try to get token from credentials manager
|
615
|
-
modal_token = None
|
616
|
-
if credentials_manager:
|
617
|
-
try:
|
618
|
-
modal_token = credentials_manager.get_modal_token()
|
619
|
-
if modal_token:
|
620
|
-
# Set the token in the environment
|
621
|
-
os.environ["MODAL_TOKEN_ID"] = modal_token
|
622
|
-
print("✅ Modal token set from credentials manager")
|
623
|
-
|
624
|
-
# Try to authenticate with the token
|
625
|
-
try:
|
626
|
-
import subprocess
|
627
|
-
token_result = subprocess.run(
|
628
|
-
["modal", "token", "set", "--from-env"],
|
629
|
-
capture_output=True, text=True
|
630
|
-
)
|
631
|
-
if token_result.returncode == 0:
|
632
|
-
print("✅ Successfully authenticated with Modal")
|
633
|
-
else:
|
634
|
-
print(f"⚠️ Failed to authenticate with Modal: {token_result.stderr}")
|
635
|
-
print("\nPlease authenticate manually:")
|
636
|
-
print("1. Run 'modal token new' to get a new token")
|
637
|
-
print("2. Then restart this command")
|
638
|
-
return None
|
639
|
-
except Exception as e:
|
640
|
-
print(f"⚠️ Error setting Modal token: {e}")
|
641
|
-
return None
|
642
|
-
except Exception as e:
|
643
|
-
print(f"⚠️ Error getting Modal token: {e}")
|
644
|
-
|
645
|
-
if not modal_token:
|
646
|
-
print("\nTo authenticate with Modal, you need to:")
|
647
|
-
print("1. Create a Modal account at https://modal.com if you don't have one")
|
648
|
-
print("2. Run the following command to get a token:")
|
649
|
-
print(" modal token new")
|
650
|
-
print("3. Then set up your credentials in GitArsenal:")
|
651
|
-
print(" ./gitarsenal.py credentials set modal_token")
|
652
|
-
print("\nAfter completing these steps, try your command again.")
|
653
|
-
print("="*80)
|
654
|
-
return None
|
655
|
-
except ImportError:
|
656
|
-
print("\n" + "="*80)
|
657
|
-
print("❌ MODAL PACKAGE NOT INSTALLED")
|
658
|
-
print("="*80)
|
659
|
-
print("GitArsenal requires the Modal package to be installed.")
|
660
|
-
print("\nTo install Modal, run:")
|
661
|
-
print(" pip install modal")
|
662
|
-
print("\nAfter installation, authenticate with Modal:")
|
663
|
-
print("1. Run 'modal token new'")
|
664
|
-
print("2. Then run './gitarsenal.py credentials set modal_token'")
|
665
|
-
print("="*80)
|
666
|
-
return None
|
667
|
-
except Exception as e:
|
668
|
-
print(f"⚠️ Error checking Modal authentication: {e}")
|
669
|
-
print("Continuing anyway, but Modal operations may fail")
|
670
|
-
|
671
|
-
# Execution history for tracking all commands and their results in this session
|
672
|
-
execution_history = []
|
673
|
-
|
674
|
-
# Track session start time
|
675
|
-
session_start = datetime.datetime.now().isoformat()
|
676
|
-
|
677
|
-
# Track previous errors to detect repeated failures
|
678
|
-
previous_errors = {}
|
679
|
-
|
680
|
-
# Track Python version management
|
681
|
-
conda_installed = False
|
682
|
-
python_version_switched = False
|
683
|
-
current_python_version = None
|
684
|
-
|
685
|
-
# Generate a unique app name with timestamp to avoid conflicts
|
686
|
-
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
687
|
-
app_name = f"sandbox-{timestamp}"
|
688
|
-
|
689
|
-
gpu_configs = {
|
690
|
-
'T4': {'gpu': 'T4', 'memory': 16},
|
691
|
-
'L4': {'gpu': 'L4', 'memory': 24},
|
692
|
-
'A10G': {'gpu': 'A10G', 'memory': 24},
|
693
|
-
'A100-40GB': {'gpu': 'A100-SXM4-40GB', 'memory': 40},
|
694
|
-
'A100-80GB': {'gpu': 'A100-80GB', 'memory': 80},
|
695
|
-
'L40S': {'gpu': 'L40S', 'memory': 48},
|
696
|
-
'H100': {'gpu': 'H100', 'memory': 80},
|
697
|
-
'H200': {'gpu': 'H200', 'memory': 141},
|
698
|
-
'B200': {'gpu': 'B200', 'memory': 96}
|
699
|
-
}
|
700
|
-
|
701
|
-
if gpu_type not in gpu_configs:
|
702
|
-
print(f"⚠️ Unknown GPU type: {gpu_type}. Using A10G as default.")
|
703
|
-
gpu_type = 'A10G'
|
704
|
-
|
705
|
-
gpu_spec = gpu_configs[gpu_type]
|
706
|
-
print(f"🚀 Creating sandbox with {gpu_spec['gpu']} GPU ({gpu_spec['memory']}GB VRAM)")
|
707
|
-
|
708
|
-
# Initialize uv_path variable
|
709
|
-
uv_path = ""
|
710
|
-
|
711
|
-
# Setup volume if specified
|
712
|
-
volume = None
|
713
|
-
volume_mount_path = "/persistent"
|
714
|
-
|
715
|
-
if volume_name:
|
716
|
-
print(f"📦 Setting up volume: {volume_name}")
|
717
|
-
try:
|
718
|
-
# Try to get existing volume or create new one
|
719
|
-
volume = modal.Volume.from_name(volume_name, create_if_missing=True)
|
720
|
-
print(f"✅ Volume '{volume_name}' ready for use")
|
721
|
-
except Exception as e:
|
722
|
-
print(f"⚠️ Could not setup volume '{volume_name}': {e}")
|
723
|
-
print("⚠️ Continuing without persistent volume")
|
724
|
-
volume = None
|
725
|
-
else:
|
726
|
-
# Create a default volume for this session
|
727
|
-
default_volume_name = f"sandbox-vol-{timestamp}"
|
728
|
-
print(f"📦 Creating default volume: {default_volume_name}")
|
729
|
-
try:
|
730
|
-
volume = modal.Volume.from_name(default_volume_name, create_if_missing=True)
|
731
|
-
volume_name = default_volume_name
|
732
|
-
print(f"✅ Default volume '{default_volume_name}' created")
|
733
|
-
except Exception as e:
|
734
|
-
print(f"⚠️ Could not create default volume: {e}")
|
735
|
-
print("⚠️ Continuing without persistent volume")
|
736
|
-
volume = None
|
737
|
-
|
738
|
-
# Enable output for image building
|
739
|
-
with modal.enable_output():
|
740
|
-
# Create a Modal app and sandbox
|
741
|
-
print(f"🚀 Creating sandbox with GPU: {gpu_type.lower()} (App: {app_name})...")
|
742
|
-
# Always use lookup with create_if_missing=True to properly initialize the app
|
743
|
-
app = modal.App.lookup(app_name, create_if_missing=True)
|
744
|
-
print(f"Created app: {app_name}")
|
745
|
-
|
746
|
-
# Create the sandbox with increased timeout for long-running operations
|
747
|
-
print("⏱️ Setting 30-minute timeout for long-running installations...")
|
748
|
-
|
749
|
-
# Setup volume mount if available
|
750
|
-
volumes = {}
|
751
|
-
if volume:
|
752
|
-
volumes[volume_mount_path] = volume
|
753
|
-
print(f"📦 Mounting volume '{volume_name}' at {volume_mount_path}")
|
754
|
-
|
755
|
-
cuda_image = modal.Image.from_registry("nvidia/cuda:12.8.1-devel-ubuntu24.04", add_python="3.12")
|
756
|
-
|
757
|
-
sandbox = modal.Sandbox.create(
|
758
|
-
"sleep", "infinity",
|
759
|
-
app=app,
|
760
|
-
gpu=gpu_type.lower(),
|
761
|
-
image=cuda_image,
|
762
|
-
timeout=3600, # 40 minutes instead of 15 minutes
|
763
|
-
volumes=volumes if volumes else None
|
764
|
-
)
|
765
|
-
|
766
|
-
# Get the sandbox ID for reference
|
767
|
-
sandbox_id = sandbox.object_id
|
768
|
-
print(f"📋 Sandbox ID: {sandbox_id}")
|
769
|
-
|
770
|
-
# Wait a moment for the container to be registered
|
771
|
-
print("⏳ Waiting for container to be registered...")
|
772
|
-
time.sleep(5) # Increased wait time
|
773
|
-
|
774
|
-
# Function to extract container ID from text output
|
775
|
-
def extract_container_id_from_text(output):
|
776
|
-
print("Extracting container ID from text output...")
|
777
|
-
|
778
|
-
# First, try to find lines with the app name
|
779
|
-
lines = output.split('\n')
|
780
|
-
app_lines = [line for line in lines if app_name in line]
|
781
|
-
|
782
|
-
if app_lines:
|
783
|
-
# Get the first line with the app name
|
784
|
-
app_line = app_lines[0]
|
785
|
-
print(f"Found line with app name: {app_line}")
|
786
|
-
|
787
|
-
# Try to extract the container ID
|
788
|
-
if '│' in app_line:
|
789
|
-
parts = app_line.split('│')
|
790
|
-
if len(parts) >= 2:
|
791
|
-
container_id_part = parts[1].strip()
|
792
|
-
if container_id_part.startswith('ta-'):
|
793
|
-
return container_id_part
|
794
|
-
|
795
|
-
# If that didn't work, try regex pattern matching
|
796
|
-
container_matches = re.findall(r'ta-[A-Z0-9]+', output)
|
797
|
-
if container_matches:
|
798
|
-
return container_matches[0]
|
799
|
-
|
800
|
-
return None
|
801
|
-
|
802
|
-
# Get the container ID using multiple approaches
|
803
|
-
print("📋 Getting container ID...")
|
804
|
-
container_id = None
|
805
|
-
|
806
|
-
# Approach 1: Use modal container list --json
|
807
|
-
try:
|
808
|
-
print("Trying JSON approach...")
|
809
|
-
result = subprocess.run(["modal", "container", "list", "--json"], capture_output=True, text=True)
|
810
|
-
output = result.stdout
|
811
|
-
print(f"JSON output: {output}")
|
812
|
-
|
813
|
-
import json
|
814
|
-
try:
|
815
|
-
containers = json.loads(output)
|
816
|
-
print(f"Parsed JSON: {containers}")
|
817
|
-
if containers and isinstance(containers, list) and len(containers) > 0:
|
818
|
-
# The container ID is in the "Container ID" field, not "id"
|
819
|
-
container_id = containers[0].get("Container ID")
|
820
|
-
if container_id:
|
821
|
-
print(f"📋 Found container ID from JSON: {container_id}")
|
822
|
-
else:
|
823
|
-
# Try lowercase keys as a fallback
|
824
|
-
container_id = containers[0].get("container_id") or containers[0].get("container id")
|
825
|
-
if container_id:
|
826
|
-
print(f"📋 Found container ID from JSON with lowercase keys: {container_id}")
|
827
|
-
except json.JSONDecodeError as json_err:
|
828
|
-
print(f"JSON parse error: {json_err}")
|
829
|
-
except Exception as e:
|
830
|
-
print(f"Error with JSON approach: {e}")
|
831
|
-
|
832
|
-
# Approach 2: Use modal container list with text parsing
|
833
|
-
if not container_id:
|
834
|
-
try:
|
835
|
-
print("Trying text output approach...")
|
836
|
-
result = subprocess.run(["modal", "container", "list"], capture_output=True, text=True)
|
837
|
-
output = result.stdout
|
838
|
-
print("Modal container list output:")
|
839
|
-
print(output)
|
840
|
-
|
841
|
-
container_id = extract_container_id_from_text(output)
|
842
|
-
if container_id:
|
843
|
-
print(f"📋 Found container ID from text: {container_id}")
|
844
|
-
except Exception as e:
|
845
|
-
print(f"Error with text approach: {e}")
|
846
|
-
|
847
|
-
# Approach 3: Use shell command to get first container
|
848
|
-
if not container_id:
|
849
|
-
try:
|
850
|
-
print("Trying shell command approach...")
|
851
|
-
cmd = "modal container list | grep -v Container | grep -v '─' | head -1 | awk '{print $1}'"
|
852
|
-
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
|
853
|
-
output = result.stdout.strip()
|
854
|
-
print(f"Shell command output: {output}")
|
855
|
-
|
856
|
-
if output and output.startswith('ta-'):
|
857
|
-
container_id = output
|
858
|
-
print(f"📋 Found container ID from shell command: {container_id}")
|
859
|
-
except Exception as e:
|
860
|
-
print(f"Error with shell command approach: {e}")
|
861
|
-
|
862
|
-
# Approach 4: Get all containers and find the one with our app
|
863
|
-
if not container_id:
|
864
|
-
try:
|
865
|
-
print("Trying app matching approach...")
|
866
|
-
cmd = "modal container list"
|
867
|
-
result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
|
868
|
-
output = result.stdout
|
869
|
-
|
870
|
-
# Look for our app name in the output
|
871
|
-
if app_name in output:
|
872
|
-
print(f"Found {app_name} in container list")
|
873
|
-
# Try to get the container ID from the same line
|
874
|
-
lines = output.split('\n')
|
875
|
-
for line in lines:
|
876
|
-
if app_name in line:
|
877
|
-
print(f"Found line: {line}")
|
878
|
-
# Try to extract the first column
|
879
|
-
if '│' in line:
|
880
|
-
container_id_part = line.split('│')[1].strip()
|
881
|
-
if container_id_part.startswith('ta-'):
|
882
|
-
container_id = container_id_part
|
883
|
-
print(f"📋 Found container ID from app matching: {container_id}")
|
884
|
-
break
|
885
|
-
except Exception as e:
|
886
|
-
print(f"Error with app matching approach: {e}")
|
887
|
-
|
888
|
-
# Final fallback: Use sandbox ID to create a container ID
|
889
|
-
if not container_id:
|
890
|
-
print("⚠️ All approaches failed to find container ID")
|
891
|
-
# Use sandbox ID as container prefix
|
892
|
-
short_id = sandbox_id.split('-')[1][:8] if '-' in sandbox_id else sandbox_id[:8]
|
893
|
-
container_id = f"ta-{short_id.upper()}"
|
894
|
-
print(f"📋 Using derived container ID: {container_id}")
|
895
|
-
|
896
|
-
# Ensure we have a non-None container ID
|
897
|
-
if not container_id:
|
898
|
-
print("⚠️ Critical error: Failed to determine container ID")
|
899
|
-
print("⚠️ Using a placeholder container ID")
|
900
|
-
container_id = "ta-UNKNOWN"
|
901
|
-
|
902
|
-
# Try to verify the container ID exists
|
903
|
-
print("🔍 Verifying container ID...")
|
904
|
-
verify_cmd = f"modal container logs {container_id} --tail 1 2>/dev/null || echo 'Container not found'"
|
905
|
-
verify_result = subprocess.run(verify_cmd, shell=True, capture_output=True, text=True)
|
906
|
-
if "Container not found" in verify_result.stdout:
|
907
|
-
print(f"⚠️ Container ID verification failed: {container_id}")
|
908
|
-
|
909
|
-
# Last resort: Try to find any valid container
|
910
|
-
print("🔍 Looking for any valid container as last resort...")
|
911
|
-
list_cmd = "modal container list | grep -v Container | grep -v '─' | grep -v '┏' | grep -v '┃' | head -1"
|
912
|
-
list_result = subprocess.run(list_cmd, shell=True, capture_output=True, text=True)
|
913
|
-
if list_result.stdout.strip():
|
914
|
-
print(f"Found container line: {list_result.stdout.strip()}")
|
915
|
-
# Try to extract the ID from the first column
|
916
|
-
container_line = list_result.stdout.strip()
|
917
|
-
if '│' in container_line:
|
918
|
-
possible_id = container_line.split('│')[1].strip()
|
919
|
-
if possible_id.startswith('ta-'):
|
920
|
-
container_id = possible_id
|
921
|
-
print(f"📋 Using container ID from list as last resort: {container_id}")
|
922
|
-
|
923
|
-
# Verify this container
|
924
|
-
verify_cmd = f"modal container logs {container_id} --tail 1 2>/dev/null || echo 'Container not found'"
|
925
|
-
verify_result = subprocess.run(verify_cmd, shell=True, capture_output=True, text=True)
|
926
|
-
if "Container not found" not in verify_result.stdout:
|
927
|
-
print(f"✅ Last resort container ID verified: {container_id}")
|
928
|
-
else:
|
929
|
-
print("⚠️ Last resort container ID also failed verification")
|
930
|
-
|
931
|
-
print("⚠️ Container connection may fail. You may need to connect manually.")
|
932
|
-
else:
|
933
|
-
print(f"✅ Container ID verified: {container_id}")
|
934
|
-
|
935
|
-
# Function to convert bytes to string
|
936
|
-
def _to_str(maybe_bytes):
|
937
|
-
try:
|
938
|
-
return (maybe_bytes.decode('utf-8') if isinstance(maybe_bytes, (bytes, bytearray)) else maybe_bytes)
|
939
|
-
except UnicodeDecodeError:
|
940
|
-
# Handle non-UTF-8 bytes by replacing invalid characters
|
941
|
-
if isinstance(maybe_bytes, (bytes, bytearray)):
|
942
|
-
return maybe_bytes.decode('utf-8', errors='replace')
|
943
|
-
else:
|
944
|
-
return str(maybe_bytes)
|
945
|
-
except Exception:
|
946
|
-
# Last resort fallback
|
947
|
-
return str(maybe_bytes)
|
948
|
-
|
949
|
-
# Skip the persistent shell approach for now due to async stream complexity
|
950
|
-
print("🔍 async streams require complex async handling")
|
951
|
-
print("🔄 Switching to individual command execution approach for reliability...")
|
952
|
-
|
953
|
-
# Initialize state tracking variables
|
954
|
-
current_dir = "/"
|
955
|
-
execution_history = []
|
956
|
-
|
957
|
-
# Function to run commands using individual sandbox.exec calls
|
958
|
-
def run_command(cmd, show_output=True, retry_count=0, max_retries=3, debug_with_llm=True, timeout=600):
|
959
|
-
"""
|
960
|
-
Execute a command in the sandbox with error handling and automatic retries.
|
961
|
-
|
962
|
-
When a command fails and is fixed by the LLM debugging system, the retry count
|
963
|
-
is reset to 0, so successful fixes don't count against the maximum retry limit.
|
964
|
-
This ensures that a command that's been fixed gets a fresh set of retry attempts.
|
965
|
-
"""
|
966
|
-
# Use the outer scope variables
|
967
|
-
nonlocal current_dir, execution_history, sandbox, previous_errors
|
968
|
-
nonlocal conda_installed, python_version_switched, current_python_version
|
969
|
-
|
970
|
-
# Record command start time
|
971
|
-
command_start_time = datetime.datetime.now().isoformat()
|
972
|
-
start_time = time.time()
|
973
|
-
|
974
|
-
# Prevent infinite retry loops
|
975
|
-
if retry_count >= max_retries:
|
976
|
-
print(f"⚠️ Maximum retry count ({max_retries}) reached. Stopping retries.")
|
977
|
-
return False, "", f"Maximum retry count ({max_retries}) reached"
|
978
|
-
|
979
|
-
# Special handling for cd commands to prevent common navigation errors
|
980
|
-
if cmd.strip().startswith("cd "):
|
981
|
-
# Extract the target directory from the cd command
|
982
|
-
cd_parts = cmd.split(None, 1)
|
983
|
-
if len(cd_parts) >= 2:
|
984
|
-
target_dir = cd_parts[1].strip().strip('"\'')
|
985
|
-
|
986
|
-
# Check if this is a repo name that matches the end of current_dir
|
987
|
-
# This prevents errors like "cd repo-name" when already in "/root/repo-name"
|
988
|
-
# BUT we need to be careful about nested directories like /root/litex/litex
|
989
|
-
if (target_dir != "/" and target_dir != "." and target_dir != ".." and
|
990
|
-
not target_dir.startswith("/") and not target_dir.startswith("./") and
|
991
|
-
not target_dir.startswith("../") and current_dir.endswith("/" + target_dir)):
|
992
|
-
|
993
|
-
# Advanced check: analyze directory contents to determine if navigation makes sense
|
994
|
-
print(f"🔍 Analyzing directory contents to determine navigation necessity...")
|
995
|
-
|
996
|
-
# Get current directory contents
|
997
|
-
current_contents_cmd = "ls -la"
|
998
|
-
current_result = sandbox.exec("bash", "-c", current_contents_cmd)
|
999
|
-
current_result.wait()
|
1000
|
-
current_contents = _to_str(current_result.stdout) if current_result.stdout else ""
|
1001
|
-
|
1002
|
-
# Check if target directory exists
|
1003
|
-
test_cmd = f"test -d \"{target_dir}\""
|
1004
|
-
test_result = sandbox.exec("bash", "-c", test_cmd)
|
1005
|
-
test_result.wait()
|
1006
|
-
|
1007
|
-
if test_result.returncode == 0:
|
1008
|
-
# Target directory exists, get its contents
|
1009
|
-
target_contents_cmd = f"ls -la \"{target_dir}\""
|
1010
|
-
target_result = sandbox.exec("bash", "-c", target_contents_cmd)
|
1011
|
-
target_result.wait()
|
1012
|
-
target_contents = _to_str(target_result.stdout) if target_result.stdout else ""
|
1013
|
-
|
1014
|
-
try:
|
1015
|
-
# Call LLM for analysis with the dedicated function
|
1016
|
-
llm_response = analyze_directory_navigation_with_llm(current_dir, target_dir, current_contents, target_contents, api_key)
|
1017
|
-
|
1018
|
-
# Extract decision from LLM response
|
1019
|
-
if llm_response and "NAVIGATE" in llm_response.upper():
|
1020
|
-
print(f"🤖 LLM Analysis: Navigation makes sense - contents are different")
|
1021
|
-
print(f"📂 Current: {current_dir}")
|
1022
|
-
print(f"🎯 Target: {target_dir}")
|
1023
|
-
print(f"🔄 Proceeding with navigation...")
|
1024
|
-
else:
|
1025
|
-
print(f"🤖 LLM Analysis: Navigation is redundant - contents are similar")
|
1026
|
-
print(f"⚠️ Detected redundant directory navigation: {cmd}")
|
1027
|
-
print(f"📂 Already in the correct directory: {current_dir}")
|
1028
|
-
print(f"✅ Skipping unnecessary navigation command")
|
1029
|
-
return True, f"Already in directory {current_dir}", ""
|
1030
|
-
|
1031
|
-
except Exception as e:
|
1032
|
-
print(f"⚠️ LLM analysis failed: {e}")
|
1033
|
-
print(f"🔄 Falling back to simple directory existence check...")
|
1034
|
-
# Fallback to simple check
|
1035
|
-
print(f"🔍 Detected nested directory '{target_dir}' exists in current location")
|
1036
|
-
print(f"📂 Current: {current_dir}")
|
1037
|
-
print(f"🎯 Target: {target_dir}")
|
1038
|
-
print(f"🔄 Proceeding with navigation to nested directory...")
|
1039
|
-
else:
|
1040
|
-
# No nested directory exists, so this is truly redundant
|
1041
|
-
print(f"⚠️ Detected redundant directory navigation: {cmd}")
|
1042
|
-
print(f"📂 Already in the correct directory: {current_dir}")
|
1043
|
-
print(f"✅ Skipping unnecessary navigation command")
|
1044
|
-
return True, f"Already in directory {current_dir}", ""
|
1045
|
-
|
1046
|
-
# Remove any parenthetical text that could cause syntax errors in bash
|
1047
|
-
if '(' in cmd:
|
1048
|
-
original_cmd = cmd
|
1049
|
-
cmd = re.sub(r'\([^)]*\)', '', cmd).strip()
|
1050
|
-
print(f"🔄 Removing parenthetical text:")
|
1051
|
-
print(f" Original: {original_cmd}")
|
1052
|
-
print(f" Cleaned: {cmd}")
|
1053
|
-
|
1054
|
-
# Convert pip install commands to use uv for faster installation
|
1055
|
-
original_cmd = cmd
|
1056
|
-
if 'uv_path' in globals() and uv_path and ('pip install' in cmd or 'pip3 install' in cmd) and not cmd.startswith(uv_path):
|
1057
|
-
# Replace pip/pip3 install with uv pip install, but only if not already using uv
|
1058
|
-
cmd = cmd.replace('pip install', f'{uv_path} pip install')
|
1059
|
-
cmd = cmd.replace('pip3 install', f'{uv_path} pip install')
|
1060
|
-
print(f"🚀 Converting to uv for faster installation:")
|
1061
|
-
print(f" Original: {original_cmd}")
|
1062
|
-
print(f" Converted: {cmd}")
|
1063
|
-
|
1064
|
-
print(f"\n▶ {cmd}\n")
|
1065
|
-
|
1066
|
-
# Check if this is a potentially long-running command
|
1067
|
-
long_running_patterns = [
|
1068
|
-
'pip install', 'apt install', 'yum install',
|
1069
|
-
'wget', 'curl', 'git clone', 'npm install', 'yarn install',
|
1070
|
-
'cmake', 'make', 'gcc', 'g++', 'python setup.py'
|
1071
|
-
]
|
1072
|
-
|
1073
|
-
is_long_running = any(pattern in cmd.lower() for pattern in long_running_patterns)
|
1074
|
-
if is_long_running:
|
1075
|
-
print(f"⏱️ Detected potentially long-running command. This may take several minutes...")
|
1076
|
-
print(f"📦 Large packages (like PyTorch) can take 5-10 minutes to download and install.")
|
1077
|
-
print(f"🔄 The container has a 30-minute timeout to accommodate this.")
|
1078
|
-
|
1079
|
-
# Use the original command without modification for interactivity
|
1080
|
-
cmd_to_execute = cmd
|
1081
|
-
|
1082
|
-
# Special handling for huggingface-cli login command
|
1083
|
-
if "huggingface-cli login" in cmd_to_execute:
|
1084
|
-
print("🔍 Detected huggingface-cli login command")
|
1085
|
-
print("🔄 Using non-interactive login approach with token instead")
|
1086
|
-
|
1087
|
-
# Check if the command already has a token
|
1088
|
-
if "--token" in cmd_to_execute:
|
1089
|
-
print("✅ Command already includes token parameter")
|
1090
|
-
else:
|
1091
|
-
# Prompt for HF token
|
1092
|
-
hf_token = prompt_for_hf_token()
|
1093
|
-
if hf_token:
|
1094
|
-
# Replace with non-interactive command
|
1095
|
-
cmd_to_execute = f"huggingface-cli login --token {hf_token} --add-to-git-credential"
|
1096
|
-
print(f"🔄 Using non-interactive command: {cmd_to_execute}")
|
1097
|
-
else:
|
1098
|
-
print("❌ No token provided. Cannot continue with Hugging Face login.")
|
1099
|
-
return False, "", "No Hugging Face token provided"
|
1100
|
-
|
1101
|
-
# Special handling for wandb login command
|
1102
|
-
elif "wandb login" in cmd_to_execute and "YOUR_API_KEY" not in cmd_to_execute:
|
1103
|
-
print("🔍 Detected Weights & Biases login command")
|
1104
|
-
print("🔄 Using API key approach for non-interactive login")
|
1105
|
-
|
1106
|
-
# Check if the command already includes an API key
|
1107
|
-
has_api_key = False
|
1108
|
-
cmd_parts = cmd_to_execute.split()
|
1109
|
-
for part in cmd_parts:
|
1110
|
-
if part != "wandb" and part != "login" and not part.startswith("-"):
|
1111
|
-
has_api_key = True
|
1112
|
-
break
|
1113
|
-
|
1114
|
-
if not has_api_key:
|
1115
|
-
# Prompt for W&B API key
|
1116
|
-
print("\n" + "="*60)
|
1117
|
-
print("🔑 WEIGHTS & BIASES API KEY REQUIRED")
|
1118
|
-
print("="*60)
|
1119
|
-
print("You can get your API key from: https://wandb.ai/authorize")
|
1120
|
-
print("📝 Please paste your W&B API key below:")
|
1121
|
-
print(" (Your input will be hidden for security)")
|
1122
|
-
print("-" * 60)
|
1123
|
-
|
1124
|
-
try:
|
1125
|
-
api_key = getpass.getpass("W&B API Key: ").strip()
|
1126
|
-
if not api_key:
|
1127
|
-
print("❌ No API key provided. Cannot continue with W&B login.")
|
1128
|
-
return False, "", "No W&B API key provided"
|
1129
|
-
|
1130
|
-
# Validate API key length (typically 40 characters)
|
1131
|
-
if len(api_key) != 40:
|
1132
|
-
print(f"⚠️ Warning: API key should be 40 characters long, yours was {len(api_key)}")
|
1133
|
-
confirm = input("Continue anyway? (yes/no): ").strip().lower()
|
1134
|
-
if confirm not in ["yes", "y"]:
|
1135
|
-
print("❌ W&B login cancelled.")
|
1136
|
-
return False, "", "W&B login cancelled"
|
1137
|
-
|
1138
|
-
print("✅ API key received successfully!")
|
1139
|
-
|
1140
|
-
# Replace with non-interactive command
|
1141
|
-
cmd_to_execute = f"wandb login {api_key}"
|
1142
|
-
print(f"🔄 Using non-interactive command: wandb login [API_KEY_HIDDEN]")
|
1143
|
-
except KeyboardInterrupt:
|
1144
|
-
print("\n❌ API key input cancelled by user.")
|
1145
|
-
return False, "", "W&B API key input cancelled"
|
1146
|
-
except Exception as e:
|
1147
|
-
print(f"❌ Error getting API key: {e}")
|
1148
|
-
return False, "", f"Error getting W&B API key: {e}"
|
1149
|
-
|
1150
|
-
# Validate the command before execution
|
1151
|
-
if not cmd_to_execute or cmd_to_execute.strip() == "":
|
1152
|
-
print("⚠️ Empty command detected, skipping execution")
|
1153
|
-
return False, "", "Empty command"
|
1154
|
-
|
1155
|
-
# Sanitize command to prevent issues with special characters
|
1156
|
-
# Remove any null bytes or other problematic characters
|
1157
|
-
cmd_to_execute = cmd_to_execute.replace('\x00', '').strip()
|
1158
|
-
|
1159
|
-
if len(cmd_to_execute) > 10000: # Prevent extremely long commands
|
1160
|
-
print("⚠️ Command too long, truncating")
|
1161
|
-
cmd_to_execute = cmd_to_execute[:10000]
|
1162
|
-
|
1163
|
-
# Prepare the command with environment variables and error handling
|
1164
|
-
full_command = f"""
|
1165
|
-
# Change to current directory
|
1166
|
-
cd "{current_dir}"
|
1167
|
-
|
1168
|
-
# Execute the command
|
1169
|
-
{cmd_to_execute}
|
1170
|
-
"""
|
1171
|
-
|
1172
|
-
# Execute the command using sandbox.exec
|
1173
|
-
try:
|
1174
|
-
print(f"🔄 Executing command in directory: {current_dir}")
|
1175
|
-
|
1176
|
-
# Use sandbox.exec for individual command execution
|
1177
|
-
result = sandbox.exec("bash", "-c", full_command.strip())
|
1178
|
-
|
1179
|
-
# Collect output in real-time - Modal streams are already set up for line-by-line streaming
|
1180
|
-
stdout_lines = []
|
1181
|
-
stderr_lines = []
|
1182
|
-
|
1183
|
-
# Process output streams in real-time - Modal handles this natively
|
1184
|
-
# We don't need to use threading here as Modal's streams are designed to be consumed directly
|
1185
|
-
if show_output:
|
1186
|
-
print("\n--- Command Output ---")
|
1187
|
-
|
1188
|
-
# Track if we've shown timeout warnings
|
1189
|
-
timeout_warnings = set()
|
1190
|
-
last_output_time = time.time()
|
1191
|
-
|
1192
|
-
# Read stdout in real-time
|
1193
|
-
for line in result.stdout:
|
1194
|
-
# Check for timeout
|
1195
|
-
current_time = time.time()
|
1196
|
-
elapsed = current_time - start_time
|
1197
|
-
time_since_output = current_time - last_output_time
|
1198
|
-
|
1199
|
-
# Show timeout warning every 30 seconds if no output for 30+ seconds
|
1200
|
-
if time_since_output > 30 and int(time_since_output) // 30 not in timeout_warnings:
|
1201
|
-
warning_time = int(time_since_output) // 30 * 30
|
1202
|
-
timeout_warnings.add(int(time_since_output) // 30)
|
1203
|
-
print(f"Still running after {int(elapsed)} seconds...")
|
1204
|
-
|
1205
|
-
# If total time exceeds timeout, break
|
1206
|
-
if elapsed > timeout:
|
1207
|
-
print(f"⚠️ Command timed out after {timeout} seconds")
|
1208
|
-
# Force terminate the command
|
1209
|
-
try:
|
1210
|
-
result.terminate()
|
1211
|
-
except:
|
1212
|
-
pass
|
1213
|
-
return False, "Command timed out", f"Command execution exceeded timeout of {timeout} seconds"
|
1214
|
-
|
1215
|
-
# Process the line
|
1216
|
-
line_str = _to_str(line)
|
1217
|
-
stdout_lines.append(line_str)
|
1218
|
-
if show_output:
|
1219
|
-
# Print immediately with flush to ensure real-time display
|
1220
|
-
print(line_str, end="", flush=True)
|
1221
|
-
|
1222
|
-
# Update last output time
|
1223
|
-
last_output_time = time.time()
|
1224
|
-
|
1225
|
-
# Read stderr in real-time
|
1226
|
-
for line in result.stderr:
|
1227
|
-
# Check for timeout
|
1228
|
-
current_time = time.time()
|
1229
|
-
elapsed = current_time - start_time
|
1230
|
-
time_since_output = current_time - last_output_time
|
1231
|
-
|
1232
|
-
# Show timeout warning every 30 seconds if no output for 30+ seconds
|
1233
|
-
if time_since_output > 30 and int(time_since_output) // 30 not in timeout_warnings:
|
1234
|
-
warning_time = int(time_since_output) // 30 * 30
|
1235
|
-
timeout_warnings.add(int(time_since_output) // 30)
|
1236
|
-
print(f"Still running after {int(elapsed)} seconds...")
|
1237
|
-
|
1238
|
-
# If total time exceeds timeout, break
|
1239
|
-
if elapsed > timeout:
|
1240
|
-
print(f"⚠️ Command timed out after {timeout} seconds")
|
1241
|
-
# Force terminate the command
|
1242
|
-
try:
|
1243
|
-
result.terminate()
|
1244
|
-
except:
|
1245
|
-
pass
|
1246
|
-
return False, "Command timed out", f"Command execution exceeded timeout of {timeout} seconds"
|
1247
|
-
|
1248
|
-
# Process the line
|
1249
|
-
line_str = _to_str(line)
|
1250
|
-
stderr_lines.append(line_str)
|
1251
|
-
if show_output:
|
1252
|
-
# Print immediately with flush to ensure real-time display
|
1253
|
-
print(line_str, end="", file=sys.stderr, flush=True)
|
1254
|
-
|
1255
|
-
# Update last output time
|
1256
|
-
last_output_time = time.time()
|
1257
|
-
|
1258
|
-
if show_output:
|
1259
|
-
print("--- End Output ---\n")
|
1260
|
-
|
1261
|
-
stdout_buffer = ''.join(stdout_lines)
|
1262
|
-
stderr_buffer = ''.join(stderr_lines)
|
1263
|
-
|
1264
|
-
# Wait for the process to complete before accessing returncode
|
1265
|
-
result.wait()
|
1266
|
-
exit_code = result.returncode
|
1267
|
-
|
1268
|
-
except Exception as e:
|
1269
|
-
print(f"❌ Error executing command: {e}")
|
1270
|
-
return False, "", str(e)
|
1271
|
-
|
1272
|
-
# Record command completion time
|
1273
|
-
command_end_time = datetime.datetime.now().isoformat()
|
1274
|
-
|
1275
|
-
# Calculate duration in seconds
|
1276
|
-
start_dt = datetime.datetime.fromisoformat(command_start_time)
|
1277
|
-
end_dt = datetime.datetime.fromisoformat(command_end_time)
|
1278
|
-
duration = (end_dt - start_dt).total_seconds()
|
1279
|
-
|
1280
|
-
# Record this command execution in history
|
1281
|
-
execution_record = {
|
1282
|
-
"command": cmd_to_execute,
|
1283
|
-
"original_command": cmd if cmd != cmd_to_execute else None,
|
1284
|
-
"start_time": command_start_time,
|
1285
|
-
"end_time": command_end_time,
|
1286
|
-
"duration_seconds": duration,
|
1287
|
-
"exit_code": exit_code,
|
1288
|
-
"stdout": stdout_buffer,
|
1289
|
-
"stderr": stderr_buffer,
|
1290
|
-
"directory": current_dir
|
1291
|
-
}
|
1292
|
-
execution_history.append(execution_record)
|
1293
|
-
|
1294
|
-
# Update current directory if this was a cd command and it succeeded
|
1295
|
-
if cmd_to_execute.strip().startswith("cd ") and exit_code == 0:
|
1296
|
-
# Extract the target directory from the cd command
|
1297
|
-
cd_parts = cmd_to_execute.split(None, 1)
|
1298
|
-
if len(cd_parts) >= 2:
|
1299
|
-
target_dir = cd_parts[1].strip('"\'')
|
1300
|
-
|
1301
|
-
# Store the previous directory for logging
|
1302
|
-
previous_dir = current_dir
|
1303
|
-
|
1304
|
-
# Handle different types of paths
|
1305
|
-
if target_dir.startswith('/'):
|
1306
|
-
# Absolute path
|
1307
|
-
current_dir = target_dir
|
1308
|
-
elif target_dir == '..':
|
1309
|
-
# Parent directory
|
1310
|
-
current_dir = '/'.join(current_dir.rstrip('/').split('/')[:-1]) or '/'
|
1311
|
-
elif target_dir == '.':
|
1312
|
-
# Current directory - no change
|
1313
|
-
pass
|
1314
|
-
else:
|
1315
|
-
# Relative path - handle special case where target is already at the end of current_dir
|
1316
|
-
if current_dir.endswith('/' + target_dir):
|
1317
|
-
print(f"📂 Already in directory {current_dir}, no change needed")
|
1318
|
-
else:
|
1319
|
-
current_dir = f"{current_dir.rstrip('/')}/{target_dir}"
|
1320
|
-
|
1321
|
-
print(f"📂 Updated current directory: {previous_dir} -> {current_dir}")
|
1322
|
-
execution_record["new_current_dir"] = current_dir
|
1323
|
-
|
1324
|
-
# Verify the directory actually exists
|
1325
|
-
verify_cmd = f"test -d \"{current_dir}\""
|
1326
|
-
verify_result = sandbox.exec("bash", "-c", verify_cmd)
|
1327
|
-
verify_result.wait()
|
1328
|
-
|
1329
|
-
if verify_result.returncode != 0:
|
1330
|
-
print(f"⚠️ Warning: Directory {current_dir} does not exist")
|
1331
|
-
print(f"⚠️ Reverting to previous directory: {previous_dir}")
|
1332
|
-
current_dir = previous_dir
|
1333
|
-
execution_record["new_current_dir"] = current_dir
|
1334
|
-
|
1335
|
-
# Check for errors and handle Hugging Face token issues
|
1336
|
-
if exit_code != 0:
|
1337
|
-
# Check for specific Hugging Face token errors
|
1338
|
-
hf_token_error_patterns = [
|
1339
|
-
"Token is required",
|
1340
|
-
"LocalTokenNotFoundError",
|
1341
|
-
"Invalid user token",
|
1342
|
-
"401 Client Error: Unauthorized",
|
1343
|
-
"Invalid credentials in Authorization header",
|
1344
|
-
"HF_TOKEN environment variable is invalid"
|
1345
|
-
]
|
1346
|
-
|
1347
|
-
is_hf_token_error = any(pattern in stderr_buffer for pattern in hf_token_error_patterns)
|
1348
|
-
|
1349
|
-
if is_hf_token_error:
|
1350
|
-
print(f"🔑 Detected Hugging Face token authentication error!")
|
1351
|
-
print(f"🔍 Error details: {stderr_buffer}")
|
1352
|
-
|
1353
|
-
# Prompt for the real token
|
1354
|
-
real_token = prompt_for_hf_token()
|
1355
|
-
|
1356
|
-
if real_token:
|
1357
|
-
print(f"🔄 Setting HF_TOKEN and retrying command...")
|
1358
|
-
|
1359
|
-
# Retry with the token set
|
1360
|
-
token_command = f"export HF_TOKEN='{real_token}'; {cmd_to_execute}"
|
1361
|
-
return run_command(token_command, show_output, retry_count + 1, max_retries)
|
1362
|
-
else:
|
1363
|
-
print("❌ No token provided. Cannot continue with Hugging Face operations.")
|
1364
|
-
return False, stdout_buffer, "No Hugging Face token provided"
|
1365
|
-
|
1366
|
-
# Check for "No such file or directory" errors with cd commands
|
1367
|
-
if "cd " in cmd_to_execute and "No such file or directory" in stderr_buffer:
|
1368
|
-
print("⚠️ Directory navigation error detected")
|
1369
|
-
|
1370
|
-
# Extract the target directory from the cd command
|
1371
|
-
cd_parts = cmd_to_execute.split(None, 1)
|
1372
|
-
if len(cd_parts) >= 2:
|
1373
|
-
target_dir = cd_parts[1].strip('"\'')
|
1374
|
-
|
1375
|
-
# Check if this might be a repository name that's already in the path
|
1376
|
-
if not target_dir.startswith('/') and '/' + target_dir in current_dir:
|
1377
|
-
print(f"🔍 The directory '{target_dir}' appears to be part of the current path: {current_dir}")
|
1378
|
-
print(f"⚠️ This is likely a redundant navigation attempt")
|
1379
|
-
|
1380
|
-
# If we're already in a directory that ends with the target, consider it a success
|
1381
|
-
if current_dir.endswith('/' + target_dir):
|
1382
|
-
print(f"✅ Already in the correct directory: {current_dir}")
|
1383
|
-
return True, f"Already in directory {current_dir}", ""
|
1384
|
-
|
1385
|
-
print(f"⚠️ Command failed with exit code {exit_code}")
|
1386
|
-
if stderr_buffer.strip():
|
1387
|
-
print(f"Error output: {stderr_buffer}")
|
1388
|
-
|
1389
|
-
# If command failed and we're debugging with LLM
|
1390
|
-
if debug_with_llm:
|
1391
|
-
print("🔍 Attempting to debug the failed command with OpenAI...")
|
1392
|
-
|
1393
|
-
# Check if the command is a hanging huggingface-cli login
|
1394
|
-
if "huggingface-cli login" in cmd_to_execute and not stderr_buffer.strip():
|
1395
|
-
print("🔍 Detected hanging huggingface-cli login command")
|
1396
|
-
print("🔄 Using non-interactive login approach with HF_TOKEN instead")
|
1397
|
-
|
1398
|
-
# Prompt for HF token
|
1399
|
-
hf_token = prompt_for_hf_token()
|
1400
|
-
if hf_token:
|
1401
|
-
# Set the token as environment variable and create .huggingface folder
|
1402
|
-
print("✅ Token received, setting up non-interactive authentication")
|
1403
|
-
setup_commands = [
|
1404
|
-
"mkdir -p ~/.huggingface",
|
1405
|
-
f"echo '{hf_token}' > ~/.huggingface/token",
|
1406
|
-
f"export HF_TOKEN='{hf_token}'",
|
1407
|
-
"echo 'HF_TOKEN and token file have been set up'"
|
1408
|
-
]
|
1409
|
-
|
1410
|
-
for setup_cmd in setup_commands:
|
1411
|
-
setup_success, setup_stdout, _ = run_command(setup_cmd, show_output=True, debug_with_llm=False)
|
1412
|
-
if not setup_success:
|
1413
|
-
print(f"⚠️ Setup command failed: {setup_cmd}")
|
1414
|
-
|
1415
|
-
print("✅ Hugging Face authentication set up non-interactively")
|
1416
|
-
return True, "Hugging Face authentication set up successfully", ""
|
1417
|
-
else:
|
1418
|
-
print("❌ No token provided. Cannot set up Hugging Face authentication.")
|
1419
|
-
return False, "", "No Hugging Face token provided"
|
1420
|
-
|
1421
|
-
# Check if the error is related to missing pytest
|
1422
|
-
if "ModuleNotFoundError: No module named 'pytest'" in stderr_buffer or "ImportError: No module named pytest" in stderr_buffer:
|
1423
|
-
print("🔍 Detected missing pytest module, installing it automatically...")
|
1424
|
-
pytest_install_success, _, _ = run_command("pip install pytest", show_output=True, debug_with_llm=False)
|
1425
|
-
if pytest_install_success:
|
1426
|
-
print("✅ Successfully installed pytest, retrying original command...")
|
1427
|
-
return run_command(cmd, show_output, retry_count + 1, max_retries)
|
1428
|
-
|
1429
|
-
# Check for Python version-specific errors
|
1430
|
-
python_version_errors = [
|
1431
|
-
# Python 3.13 distutils issue
|
1432
|
-
("ModuleNotFoundError: No module named 'distutils'", "3.13"),
|
1433
|
-
# Add more version-specific error patterns here
|
1434
|
-
("ImportError: cannot import name 'soft_unicode' from 'markupsafe'", None),
|
1435
|
-
("AttributeError: module 'setuptools.dist' has no attribute 'check_specifier'", None)
|
1436
|
-
]
|
1437
|
-
|
1438
|
-
# Check if any of the error patterns match
|
1439
|
-
for error_pattern, problematic_version in python_version_errors:
|
1440
|
-
if error_pattern in stderr_buffer:
|
1441
|
-
print(f"🔍 Detected Python version-specific error: {error_pattern}")
|
1442
|
-
|
1443
|
-
# Get current Python version if not already known
|
1444
|
-
if not current_python_version:
|
1445
|
-
version_cmd = "python --version"
|
1446
|
-
version_success, version_stdout, _ = run_command(version_cmd, show_output=False, debug_with_llm=False)
|
1447
|
-
if version_success:
|
1448
|
-
current_python_version = version_stdout.strip()
|
1449
|
-
print(f"🐍 Current Python version: {current_python_version}")
|
1450
|
-
|
1451
|
-
# Check if we've already tried switching Python versions
|
1452
|
-
if python_version_switched:
|
1453
|
-
print("⚠️ Already attempted to switch Python versions once, not trying again")
|
1454
|
-
break
|
1455
|
-
|
1456
|
-
print("🔄 Attempting to fix by switching Python version...")
|
1457
|
-
|
1458
|
-
# Install conda if not already installed
|
1459
|
-
if not conda_installed:
|
1460
|
-
print("📦 Installing Miniconda to manage Python versions...")
|
1461
|
-
conda_install_cmds = [
|
1462
|
-
"apt-get update -y",
|
1463
|
-
"apt-get install -y wget bzip2",
|
1464
|
-
"wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /tmp/miniconda.sh",
|
1465
|
-
"bash /tmp/miniconda.sh -b -p /opt/conda",
|
1466
|
-
"rm /tmp/miniconda.sh",
|
1467
|
-
"echo 'export PATH=/opt/conda/bin:$PATH' >> ~/.bashrc",
|
1468
|
-
"export PATH=/opt/conda/bin:$PATH",
|
1469
|
-
"conda init bash",
|
1470
|
-
"source ~/.bashrc",
|
1471
|
-
"conda activate base"
|
1472
|
-
]
|
1473
|
-
|
1474
|
-
for conda_cmd in conda_install_cmds:
|
1475
|
-
print(f"🔄 Running: {conda_cmd}")
|
1476
|
-
conda_success, _, _ = run_command(conda_cmd, show_output=True, debug_with_llm=False)
|
1477
|
-
if not conda_success:
|
1478
|
-
print("⚠️ Failed to install conda, continuing with system Python")
|
1479
|
-
break
|
1480
|
-
|
1481
|
-
# Check if conda was successfully installed
|
1482
|
-
conda_check_cmd = "conda --version"
|
1483
|
-
conda_check_success, conda_check_stdout, _ = run_command(conda_check_cmd, show_output=True, debug_with_llm=False)
|
1484
|
-
conda_installed = conda_check_success
|
1485
|
-
|
1486
|
-
if conda_installed:
|
1487
|
-
print(f"✅ Successfully installed conda: {conda_check_stdout.strip()}")
|
1488
|
-
else:
|
1489
|
-
print("⚠️ Failed to verify conda installation")
|
1490
|
-
break
|
1491
|
-
|
1492
|
-
# Determine target Python version
|
1493
|
-
target_version = "3.10" # Default to a stable version
|
1494
|
-
if problematic_version == "3.13":
|
1495
|
-
# If we're on 3.13 and having issues, go to 3.10
|
1496
|
-
target_version = "3.10"
|
1497
|
-
elif "3.13" in str(current_python_version):
|
1498
|
-
# If we're on 3.13 for any other error, try 3.10
|
1499
|
-
target_version = "3.10"
|
1500
|
-
elif "3.10" in str(current_python_version):
|
1501
|
-
# If we're on 3.10 and having issues, try 3.9
|
1502
|
-
target_version = "3.9"
|
1503
|
-
|
1504
|
-
print(f"🐍 Switching from {current_python_version} to Python {target_version}...")
|
1505
|
-
|
1506
|
-
# Create and activate a conda environment with the target Python version
|
1507
|
-
conda_cmds = [
|
1508
|
-
f"conda create -y -n py{target_version} python={target_version}",
|
1509
|
-
f"echo 'conda activate py{target_version}' >> ~/.bashrc",
|
1510
|
-
f"conda init bash",
|
1511
|
-
f"source ~/.bashrc",
|
1512
|
-
f"conda activate py{target_version}"
|
1513
|
-
]
|
1514
|
-
|
1515
|
-
for conda_cmd in conda_cmds:
|
1516
|
-
print(f"🔄 Running: {conda_cmd}")
|
1517
|
-
conda_success, _, _ = run_command(conda_cmd, show_output=True, debug_with_llm=False)
|
1518
|
-
if not conda_success:
|
1519
|
-
print(f"⚠️ Failed to run conda command: {conda_cmd}")
|
1520
|
-
|
1521
|
-
# Verify Python version changed
|
1522
|
-
verify_cmd = "python --version"
|
1523
|
-
verify_success, verify_stdout, _ = run_command(verify_cmd, show_output=True, debug_with_llm=False)
|
1524
|
-
|
1525
|
-
if verify_success and target_version in verify_stdout:
|
1526
|
-
print(f"✅ Successfully switched to Python {verify_stdout.strip()}")
|
1527
|
-
python_version_switched = True
|
1528
|
-
current_python_version = verify_stdout.strip()
|
1529
|
-
|
1530
|
-
# Reinstall pip and setuptools in the new environment
|
1531
|
-
print("📦 Installing pip and setuptools in new environment...")
|
1532
|
-
run_command("pip install --upgrade pip setuptools wheel", show_output=True, debug_with_llm=False)
|
1533
|
-
|
1534
|
-
# Retry the original command with the new Python version
|
1535
|
-
print(f"🔄 Retrying original command with Python {target_version}...")
|
1536
|
-
# Reset the retry counter since we've made a significant change
|
1537
|
-
return run_command(cmd, show_output, 0, max_retries)
|
1538
|
-
else:
|
1539
|
-
print("⚠️ Failed to switch Python version, continuing with current version")
|
1540
|
-
|
1541
|
-
break
|
1542
|
-
|
1543
|
-
# Check if stderr is empty, try to use stdout as fallback
|
1544
|
-
debug_output = stderr_buffer
|
1545
|
-
if not debug_output or not debug_output.strip():
|
1546
|
-
print("⚠️ stderr is empty, checking if stdout contains error information...")
|
1547
|
-
if stdout_buffer and stdout_buffer.strip():
|
1548
|
-
print("✅ Using stdout for debugging as stderr is empty")
|
1549
|
-
debug_output = stdout_buffer
|
1550
|
-
else:
|
1551
|
-
print("⚠️ Both stderr and stdout are empty. Limited debugging information available.")
|
1552
|
-
debug_output = f"Command failed with exit code {exit_code}, but no error output was captured."
|
1553
|
-
|
1554
|
-
# Print debug output for verification
|
1555
|
-
print(f"🔍 Debug output to be sent to OpenAI ({len(debug_output)} chars):")
|
1556
|
-
print("="*60)
|
1557
|
-
print(debug_output if debug_output else "[EMPTY]")
|
1558
|
-
print("="*60)
|
1559
|
-
|
1560
|
-
fix_command = call_openai_for_debug(cmd_to_execute, debug_output, current_dir=current_dir, sandbox=sandbox)
|
1561
|
-
|
1562
|
-
if fix_command:
|
1563
|
-
print(f"🔧 OpenAI suggested fix command: {fix_command}")
|
1564
|
-
|
1565
|
-
# Check if the suggested command is "wandb login YOUR_API_KEY" or similar
|
1566
|
-
if "wandb login" in fix_command and ("YOUR_API_KEY" in fix_command or "[your_api_key]" in fix_command):
|
1567
|
-
print("🔍 Detected placeholder API key in suggested command")
|
1568
|
-
print("🔄 Prompting for actual W&B API key instead")
|
1569
|
-
|
1570
|
-
# Prompt for W&B API key
|
1571
|
-
print("\n" + "="*60)
|
1572
|
-
print("🔑 WEIGHTS & BIASES API KEY REQUIRED")
|
1573
|
-
print("="*60)
|
1574
|
-
print("You can get your API key from: https://wandb.ai/authorize")
|
1575
|
-
print("📝 Please paste your W&B API key below:")
|
1576
|
-
print(" (Your input will be hidden for security)")
|
1577
|
-
print("-" * 60)
|
1578
|
-
|
1579
|
-
try:
|
1580
|
-
api_key = getpass.getpass("W&B API Key: ").strip()
|
1581
|
-
if api_key:
|
1582
|
-
# Replace placeholder with actual API key
|
1583
|
-
fix_command = f"wandb login {api_key}"
|
1584
|
-
print(f"🔄 Using actual API key: wandb login [API_KEY_HIDDEN]")
|
1585
|
-
else:
|
1586
|
-
print("❌ No API key provided. Cannot continue with W&B login.")
|
1587
|
-
return False, stdout_buffer, stderr_buffer
|
1588
|
-
except Exception as e:
|
1589
|
-
print(f"❌ Error getting API key: {e}")
|
1590
|
-
return False, stdout_buffer, stderr_buffer
|
1591
|
-
|
1592
|
-
# Special handling for cd commands to prevent directory navigation loops
|
1593
|
-
if fix_command.strip().startswith("cd "):
|
1594
|
-
# Extract the target directory from the cd command
|
1595
|
-
cd_parts = fix_command.split(None, 1)
|
1596
|
-
if len(cd_parts) >= 2:
|
1597
|
-
target_dir = cd_parts[1].strip('"\'')
|
1598
|
-
|
1599
|
-
# Check if this is trying to navigate to a directory we're already in
|
1600
|
-
if target_dir.endswith(current_dir.split('/')[-1]) or current_dir.endswith('/' + target_dir):
|
1601
|
-
print(f"⚠️ Detected potential directory navigation loop")
|
1602
|
-
print(f"🔍 Current directory: {current_dir}")
|
1603
|
-
print(f"🔍 Suggested navigation: {target_dir}")
|
1604
|
-
|
1605
|
-
# Check if we're already in the target directory or a directory that contains it
|
1606
|
-
if current_dir.endswith('/' + target_dir) or ('/' + target_dir + '/' in current_dir):
|
1607
|
-
print(f"✅ Already in or past the target directory")
|
1608
|
-
print(f"🔄 Skipping redundant navigation and retrying the original command")
|
1609
|
-
return run_command(cmd, show_output, retry_count + 1, max_retries)
|
1610
|
-
|
1611
|
-
# Automatically run the fix command without asking for permission
|
1612
|
-
print(f"🔄 Running suggested fix command: {fix_command}")
|
1613
|
-
# Run the fix command with debugging disabled to prevent infinite loop
|
1614
|
-
fix_success, fix_stdout, fix_stderr = run_command(fix_command, show_output=True, debug_with_llm=False)
|
1615
|
-
|
1616
|
-
if fix_success:
|
1617
|
-
print("✅ Fix command succeeded!")
|
1618
|
-
# Retry the original command with reset retry count
|
1619
|
-
print(f"🔄 Retrying original command: {cmd}")
|
1620
|
-
|
1621
|
-
# Create a key for tracking this error
|
1622
|
-
error_key = f"{cmd}:{stderr_buffer[:100]}"
|
1623
|
-
|
1624
|
-
# Check if we've seen this error before
|
1625
|
-
if error_key in previous_errors:
|
1626
|
-
# We've seen this error before, don't reset the retry count
|
1627
|
-
previous_errors[error_key] += 1
|
1628
|
-
print(f"⚠️ Same error encountered {previous_errors[error_key]} times. Not resetting retry count.")
|
1629
|
-
return run_command(cmd, show_output, retry_count + 1, max_retries)
|
1630
|
-
else:
|
1631
|
-
# First time seeing this error, track it and reset retry count
|
1632
|
-
previous_errors[error_key] = 1
|
1633
|
-
print(f"🔄 Resetting retry count to 0 after successful fix")
|
1634
|
-
return run_command(cmd, show_output, 0, max_retries) # Reset retry count to 0
|
1635
|
-
else:
|
1636
|
-
print("❌ Fix command failed.")
|
1637
|
-
return False, stdout_buffer, stderr_buffer
|
1638
|
-
|
1639
|
-
return exit_code == 0, stdout_buffer, stderr_buffer
|
1640
|
-
|
1641
|
-
# Initialize the environment with basic commands
|
1642
|
-
print("🔄 Initializing environment...")
|
1643
|
-
init_commands = [
|
1644
|
-
"export PS1='$ '", # Set a simple prompt
|
1645
|
-
"export TERM=xterm-256color", # Set terminal type
|
1646
|
-
"source ~/.bashrc 2>/dev/null || true" # Source bashrc if available
|
1647
|
-
]
|
1648
|
-
|
1649
|
-
# Add volume-specific initialization if volume is available
|
1650
|
-
if volume:
|
1651
|
-
volume_commands = [
|
1652
|
-
f"mkdir -p {volume_mount_path}/venvs", # Create virtual environments directory
|
1653
|
-
f"mkdir -p {volume_mount_path}/cache", # Create cache directory
|
1654
|
-
f"export PIP_CACHE_DIR={volume_mount_path}/cache/pip", # Pip cache
|
1655
|
-
f"export UV_CACHE_DIR={volume_mount_path}/cache/uv", # UV cache
|
1656
|
-
]
|
1657
|
-
init_commands.extend(volume_commands)
|
1658
|
-
print(f"📦 Setting up persistent storage directories in {volume_mount_path}")
|
1659
|
-
|
1660
|
-
# Run initialization commands
|
1661
|
-
for i, init_cmd in enumerate(init_commands, 1):
|
1662
|
-
print(f"📋 Running init command {i}/{len(init_commands)}: {init_cmd}")
|
1663
|
-
success, stdout, stderr = run_command(init_cmd, show_output=False)
|
1664
|
-
if not success:
|
1665
|
-
print(f"⚠️ Init command failed: {stderr}")
|
1666
|
-
|
1667
|
-
print("✅ Environment initialization completed")
|
1668
|
-
|
1669
|
-
print("📦 Installing basic tools...")
|
1670
|
-
run_command("apt-get update && apt-get install -y git curl wget")
|
1671
|
-
|
1672
|
-
print("📦 Installing uv with pip...")
|
1673
|
-
run_command("pip install uv")
|
1674
|
-
|
1675
|
-
# Set uv path to system installation
|
1676
|
-
uv_path = "uv"
|
1677
|
-
|
1678
|
-
# Test if uv is available and working
|
1679
|
-
test_uv_cmd = f"{uv_path} --version || echo 'uv not found'"
|
1680
|
-
test_success, test_stdout, test_stderr = run_command(test_uv_cmd)
|
1681
|
-
if not test_success or 'uv not found' in test_stdout:
|
1682
|
-
print("⚠️ uv installation not found in system path, trying alternative installation...")
|
1683
|
-
# Try alternative installation method
|
1684
|
-
print("📦 Installing uv using the official installer...")
|
1685
|
-
run_command("curl -LsSf https://astral.sh/uv/install.sh | sh")
|
1686
|
-
run_command("source $HOME/.local/bin/env")
|
1687
|
-
run_command('export PATH="$HOME/.local/bin:$PATH"')
|
1688
|
-
|
1689
|
-
# Update path to the local installation
|
1690
|
-
uv_path = "$HOME/.local/bin/uv"
|
1691
|
-
|
1692
|
-
# Test again
|
1693
|
-
test_uv_cmd = f"{uv_path} --version || echo 'uv not found'"
|
1694
|
-
test_success, test_stdout, test_stderr = run_command(test_uv_cmd)
|
1695
|
-
if not test_success or 'uv not found' in test_stdout:
|
1696
|
-
print("⚠️ uv installation still failed, using standard pip")
|
1697
|
-
uv_path = ""
|
1698
|
-
else:
|
1699
|
-
print(f"✅ uv installed successfully via alternative method: {test_stdout.strip()}")
|
1700
|
-
else:
|
1701
|
-
print(f"✅ uv installed successfully via pip: {test_stdout.strip()}")
|
1702
|
-
|
1703
|
-
# Initialize repo_clone_dir for use throughout the function
|
1704
|
-
repo_clone_dir = "/root" # Always use home directory for repositories
|
1705
|
-
|
1706
|
-
# Clone repository if URL is provided
|
1707
|
-
if repo_url:
|
1708
|
-
try:
|
1709
|
-
# Extract repo name from URL
|
1710
|
-
repo_name_from_url = repo_name or repo_url.split('/')[-1].replace('.git', '')
|
1711
|
-
|
1712
|
-
print(f"📥 Cloning repository in Modal container: {repo_url}")
|
1713
|
-
|
1714
|
-
# Determine the best location for the repository
|
1715
|
-
repo_clone_dir = "/root" # Always use home directory for repositories
|
1716
|
-
print(f"📦 Using home directory for repository: {repo_clone_dir}")
|
1717
|
-
|
1718
|
-
# Ensure we're in the home directory and update current directory tracking
|
1719
|
-
cd_success, cd_stdout, cd_stderr = run_command(f"cd {repo_clone_dir}", show_output=False)
|
1720
|
-
if cd_success:
|
1721
|
-
current_dir = repo_clone_dir
|
1722
|
-
print(f"📂 Successfully changed to: {repo_clone_dir}")
|
1723
|
-
else:
|
1724
|
-
print(f"⚠️ Failed to change to {repo_clone_dir}: {cd_stderr}")
|
1725
|
-
current_dir = "/"
|
1726
|
-
|
1727
|
-
# First, list current directory contents for debugging
|
1728
|
-
print("📂 Current directory contents before cloning:")
|
1729
|
-
run_command("pwd && ls -la", show_output=True)
|
1730
|
-
|
1731
|
-
# Check if repository already exists in current location
|
1732
|
-
print(f"🔍 Checking if {repo_name_from_url} directory exists...")
|
1733
|
-
|
1734
|
-
# First ensure we're in the right directory and check with absolute path
|
1735
|
-
check_cmd = f"cd {repo_clone_dir} && test -d {repo_name_from_url}"
|
1736
|
-
success, stdout, stderr = run_command(check_cmd, show_output=False, retry_count=0, max_retries=0)
|
1737
|
-
|
1738
|
-
# The directory exists if the test command succeeds (exit code 0)
|
1739
|
-
repo_exists = success
|
1740
|
-
print(f"📂 Repository check result: exists={repo_exists} (exit code: {0 if success else 1})")
|
1741
|
-
print(f"📂 Checking in directory: {repo_clone_dir}/{repo_name_from_url}")
|
1742
|
-
|
1743
|
-
if repo_exists:
|
1744
|
-
print(f"📂 Repository directory already exists: {repo_name_from_url}")
|
1745
|
-
# Check if it's actually a git repository - disable retries to avoid bad debugging
|
1746
|
-
git_check_cmd = f"cd {repo_clone_dir}/{repo_name_from_url} && git status"
|
1747
|
-
git_check_success, git_stdout, git_stderr = run_command(git_check_cmd, show_output=False, retry_count=0, max_retries=0)
|
1748
|
-
if git_check_success:
|
1749
|
-
print(f"✅ Valid git repository found, using existing: {repo_name_from_url}")
|
1750
|
-
else:
|
1751
|
-
print(f"⚠️ Directory exists but is not a valid git repository, removing and re-cloning...")
|
1752
|
-
remove_cmd = f"cd {repo_clone_dir} && rm -rf {repo_name_from_url}"
|
1753
|
-
run_command(remove_cmd, show_output=False)
|
1754
|
-
repo_exists = False
|
1755
|
-
|
1756
|
-
if not repo_exists:
|
1757
|
-
print(f"📥 Repository does not exist, proceeding with clone...")
|
1758
|
-
print(f"📥 Cloning repository: {repo_url}")
|
1759
|
-
print(f"📥 Repository name will be: {repo_name_from_url}")
|
1760
|
-
print(f"📥 Clone location: {repo_clone_dir}")
|
1761
|
-
|
1762
|
-
# Ensure we're in the right directory before cloning
|
1763
|
-
run_command(f"cd {repo_clone_dir}", show_output=False)
|
1764
|
-
|
1765
|
-
# Execute the git clone command with verbose output - use absolute path, disable retries
|
1766
|
-
clone_cmd = f"cd {repo_clone_dir} && git clone {repo_url}"
|
1767
|
-
clone_success, clone_stdout, clone_stderr = run_command(clone_cmd, show_output=True, retry_count=0, max_retries=0)
|
1768
|
-
|
1769
|
-
print(f"📥 Clone command completed. Success: {clone_success}")
|
1770
|
-
if clone_stdout.strip():
|
1771
|
-
print(f"📥 Clone stdout: {clone_stdout.strip()}")
|
1772
|
-
if clone_stderr.strip():
|
1773
|
-
print(f"📥 Clone stderr: {clone_stderr.strip()}")
|
1774
|
-
|
1775
|
-
if not clone_success:
|
1776
|
-
print(f"❌ Failed to clone repository: {clone_stderr}")
|
1777
|
-
print("🔄 Trying alternative clone methods...")
|
1778
|
-
|
1779
|
-
# Try with different git options - use absolute path, disable retries
|
1780
|
-
print("🔄 Attempting shallow clone...")
|
1781
|
-
shallow_clone_cmd = f"cd {repo_clone_dir} && git clone --depth 1 {repo_url}"
|
1782
|
-
clone_success, clone_stdout, clone_stderr = run_command(shallow_clone_cmd, show_output=True, retry_count=0, max_retries=0)
|
1783
|
-
|
1784
|
-
print(f"📥 Shallow clone command completed. Success: {clone_success}")
|
1785
|
-
if clone_stdout.strip():
|
1786
|
-
print(f"📥 Shallow clone stdout: {clone_stdout.strip()}")
|
1787
|
-
if clone_stderr.strip():
|
1788
|
-
print(f"📥 Shallow clone stderr: {clone_stderr.strip()}")
|
1789
|
-
|
1790
|
-
if not clone_success:
|
1791
|
-
print(f"❌ Alternative clone also failed: {clone_stderr}")
|
1792
|
-
print("⚠️ Continuing without repository...")
|
1793
|
-
repo_name_from_url = None
|
1794
|
-
else:
|
1795
|
-
print(f"✅ Repository cloned successfully with shallow clone")
|
1796
|
-
else:
|
1797
|
-
print(f"✅ Repository cloned successfully")
|
1798
|
-
else:
|
1799
|
-
print(f"📂 Repository already exists, skipping clone")
|
1800
|
-
|
1801
|
-
# Verify repository directory exists and change to it
|
1802
|
-
if repo_name_from_url:
|
1803
|
-
print("📂 Verifying repository directory...")
|
1804
|
-
|
1805
|
-
# List available directories for debugging
|
1806
|
-
print("📂 Available directories after cloning:")
|
1807
|
-
run_command("ls -la", show_output=True)
|
1808
|
-
|
1809
|
-
# Check if the repository directory exists using simple test
|
1810
|
-
check_success, _, _ = run_command(f"test -d {repo_name_from_url}", show_output=False)
|
1811
|
-
|
1812
|
-
if check_success:
|
1813
|
-
print(f"📂 Repository directory confirmed: {repo_name_from_url}")
|
1814
|
-
# Change to the repository directory
|
1815
|
-
cd_success, cd_stdout, cd_stderr = run_command(f"cd {repo_name_from_url}")
|
1816
|
-
if cd_success:
|
1817
|
-
print(f"📂 Successfully changed to repository directory: {repo_name_from_url}")
|
1818
|
-
repo_dir_name = f"{repo_clone_dir}/{repo_name_from_url}" if repo_clone_dir != "/" else repo_name_from_url
|
1819
|
-
else:
|
1820
|
-
print(f"⚠️ Failed to change to repository directory: {cd_stderr}")
|
1821
|
-
repo_dir_name = repo_clone_dir
|
1822
|
-
else:
|
1823
|
-
print(f"⚠️ Repository directory not found after cloning: {repo_name_from_url}")
|
1824
|
-
print("🔍 Looking for alternative directories...")
|
1825
|
-
|
1826
|
-
# Look for any git repositories
|
1827
|
-
search_success, search_stdout, search_stderr = run_command("find . -maxdepth 1 -type d -name '.git' -exec dirname {} \\;", show_output=False)
|
1828
|
-
|
1829
|
-
if search_success and search_stdout.strip():
|
1830
|
-
found_dirs = [d.replace('./', '') for d in search_stdout.strip().split('\n') if d.strip() and d != '.']
|
1831
|
-
if found_dirs:
|
1832
|
-
repo_dir_name = f"{repo_clone_dir}/{found_dirs[0]}" if repo_clone_dir != "/" else found_dirs[0]
|
1833
|
-
print(f"📂 Found git repository: {repo_dir_name}")
|
1834
|
-
run_command(f"cd {found_dirs[0]}")
|
1835
|
-
else:
|
1836
|
-
repo_dir_name = repo_clone_dir
|
1837
|
-
print("📂 Using current directory")
|
1838
|
-
else:
|
1839
|
-
repo_dir_name = repo_clone_dir
|
1840
|
-
print("📂 Using current directory")
|
1841
|
-
else:
|
1842
|
-
repo_dir_name = repo_clone_dir
|
1843
|
-
print("📂 No valid repository, using current directory")
|
1844
|
-
|
1845
|
-
# Show final directory status
|
1846
|
-
print("📂 Final directory status:")
|
1847
|
-
run_command("pwd && ls -la", show_output=True)
|
1848
|
-
|
1849
|
-
except Exception as e:
|
1850
|
-
print(f"❌ Error during repository cloning: {e}")
|
1851
|
-
print(f"❌ Exception type: {type(e).__name__}")
|
1852
|
-
print("⚠️ Continuing without repository...")
|
1853
|
-
repo_dir_name = repo_clone_dir
|
1854
|
-
run_command("pwd && ls -la", show_output=True)
|
1855
|
-
else:
|
1856
|
-
repo_dir_name = repo_clone_dir
|
1857
|
-
print("📂 No repository URL provided, using current directory")
|
1858
|
-
run_command("pwd && ls -la", show_output=True)
|
1859
|
-
|
1860
|
-
# Run setup commands if provided - now we're already in the repository directory
|
1861
|
-
if setup_commands:
|
1862
|
-
print("⚙️ Running user setup commands in Modal container...")
|
1863
|
-
|
1864
|
-
# Check if git clone is already in the setup commands
|
1865
|
-
has_git_clone = any('git clone' in cmd for cmd in setup_commands)
|
1866
|
-
|
1867
|
-
# Only add git clone if:
|
1868
|
-
# 1. No git clone in setup commands AND
|
1869
|
-
# 2. We have a repo URL AND
|
1870
|
-
# 3. Repository was NOT already cloned successfully
|
1871
|
-
if not has_git_clone and repo_url and not repo_exists:
|
1872
|
-
print("📥 Git clone not found in setup commands and repository not yet cloned, adding it...")
|
1873
|
-
clone_cmd = f"git clone {repo_url}"
|
1874
|
-
setup_commands = [clone_cmd] + setup_commands
|
1875
|
-
print(f"📥 Added git clone command: {clone_cmd}")
|
1876
|
-
elif has_git_clone and repo_exists:
|
1877
|
-
print("⚠️ Repository already cloned successfully, removing duplicate git clone from setup commands...")
|
1878
|
-
# Remove git clone commands since repository is already cloned
|
1879
|
-
setup_commands = [cmd for cmd in setup_commands if 'git clone' not in cmd]
|
1880
|
-
print(f"📥 Removed duplicate git clone commands")
|
1881
|
-
elif repo_exists:
|
1882
|
-
print("📂 Repository already cloned successfully, skipping git clone in setup commands")
|
1883
|
-
|
1884
|
-
# Print all commands that will be executed
|
1885
|
-
print("📋 Setup commands to execute in container:")
|
1886
|
-
for i, cmd in enumerate(setup_commands, 1):
|
1887
|
-
print(f" {i}. {cmd}")
|
1888
|
-
|
1889
|
-
print(f"\n🚀 Executing commands in container directory: {repo_dir_name}")
|
1890
|
-
|
1891
|
-
# Ensure we start in the /root directory and reset current_dir
|
1892
|
-
current_dir = "/root"
|
1893
|
-
print(f"📂 Resetting working directory to: {current_dir}")
|
1894
|
-
|
1895
|
-
# Verify we can access /root directory
|
1896
|
-
verify_success, verify_output, _ = run_command("pwd", show_output=True)
|
1897
|
-
if verify_success:
|
1898
|
-
print(f"✅ Current directory verified: {verify_output.strip()}")
|
1899
|
-
|
1900
|
-
# Execute each command individually in the repository directory within the container
|
1901
|
-
for i, cmd in enumerate(setup_commands, 1):
|
1902
|
-
print(f"\n📋 Executing command {i}/{len(setup_commands)} in container: {cmd}")
|
1903
|
-
|
1904
|
-
# If this is a cd command, just run it directly
|
1905
|
-
if cmd.strip().startswith('cd '):
|
1906
|
-
# Execute the command directly (we're already in the right directory)
|
1907
|
-
success, stdout, stderr = run_command(cmd)
|
1908
|
-
continue
|
1909
|
-
|
1910
|
-
# For git clone commands, handle as before
|
1911
|
-
if 'git clone' in cmd:
|
1912
|
-
# Execute the command directly
|
1913
|
-
success, stdout, stderr = run_command(cmd)
|
1914
|
-
|
1915
|
-
if success:
|
1916
|
-
print(f"✅ Command executed successfully in container: {cmd}")
|
1917
|
-
if stdout.strip():
|
1918
|
-
print(f"📄 Output: {stdout.strip()}")
|
1919
|
-
|
1920
|
-
# Handle repository directory change as before
|
1921
|
-
print("📂 Git clone detected, attempting to change to repository directory...")
|
1922
|
-
# Extract repository name from the clone command
|
1923
|
-
parts = cmd.split()
|
1924
|
-
if len(parts) >= 3:
|
1925
|
-
clone_url = parts[2] # git clone <url>
|
1926
|
-
target_dir = clone_url.split('/')[-1].replace('.git', '')
|
1927
|
-
|
1928
|
-
# Check if we're already in the target directory
|
1929
|
-
if current_dir.endswith(f"/{target_dir}") or current_dir == f"/{target_dir}":
|
1930
|
-
print(f"📂 Already in target directory: {current_dir}")
|
1931
|
-
else:
|
1932
|
-
# The repository should now be at current_dir/target_dir
|
1933
|
-
repo_full_path = f"{current_dir.rstrip('/')}/{target_dir}"
|
1934
|
-
|
1935
|
-
# Check if directory exists using absolute path
|
1936
|
-
dir_check_success, _, _ = run_command(f"test -d '{repo_full_path}'", show_output=False)
|
1937
|
-
if dir_check_success:
|
1938
|
-
current_dir = repo_full_path
|
1939
|
-
print(f"📂 Successfully changed current directory to: {current_dir}")
|
1940
|
-
# Verify the change worked
|
1941
|
-
verify_success, verify_output, _ = run_command("pwd", show_output=True)
|
1942
|
-
if verify_success:
|
1943
|
-
print(f"✅ Directory change verified: {verify_output.strip()}")
|
1944
|
-
# List contents to confirm we're in the right place
|
1945
|
-
run_command("ls -la", show_output=True)
|
1946
|
-
|
1947
|
-
# Initialize git submodules if they exist
|
1948
|
-
print("📦 Checking for git submodules...")
|
1949
|
-
submodule_check_success, _, _ = run_command("test -f .gitmodules", show_output=False)
|
1950
|
-
if submodule_check_success:
|
1951
|
-
print("📦 Git submodules found, initializing...")
|
1952
|
-
run_command("git submodule update --init --recursive", show_output=True)
|
1953
|
-
print("✅ Git submodules initialized")
|
1954
|
-
else:
|
1955
|
-
print("📦 No git submodules found")
|
1956
|
-
else:
|
1957
|
-
print("⚠️ Directory change verification failed")
|
1958
|
-
else:
|
1959
|
-
print(f"⚠️ Repository directory {repo_full_path} not found after clone")
|
1960
|
-
print("🔍 Checking what was actually created:")
|
1961
|
-
run_command("find . -maxdepth 2 -name '*.git' -type d", show_output=True)
|
1962
|
-
run_command("ls -la", show_output=True)
|
1963
|
-
else:
|
1964
|
-
# For Python commands, make sure we're in the correct directory first
|
1965
|
-
if cmd.startswith('python '):
|
1966
|
-
# Fix the directory path issue - ensure we're in the correct repository directory
|
1967
|
-
# Check if we're in a nested directory that matches the repo name
|
1968
|
-
repo_dir_parts = current_dir.split('/')
|
1969
|
-
if len(repo_dir_parts) >= 2 and repo_dir_parts[-1] == repo_dir_parts[-2]:
|
1970
|
-
# We're in a nested directory like /root/nanoGPT/nanoGPT
|
1971
|
-
# Move up one level to /root/nanoGPT
|
1972
|
-
print(f"⚠️ Detected nested directory structure: {current_dir}")
|
1973
|
-
parent_dir = '/'.join(repo_dir_parts[:-1])
|
1974
|
-
print(f"🔄 Moving to parent directory: {parent_dir}")
|
1975
|
-
cd_success, _, _ = run_command(f"cd {parent_dir}", show_output=False)
|
1976
|
-
if cd_success:
|
1977
|
-
current_dir = parent_dir
|
1978
|
-
print(f"📂 Updated current directory to: {current_dir}")
|
1979
|
-
|
1980
|
-
# Execute the command directly (we're already in the right directory)
|
1981
|
-
success, stdout, stderr = run_command(cmd)
|
1982
|
-
|
1983
|
-
if success:
|
1984
|
-
print(f"✅ Command executed successfully in container: {cmd}")
|
1985
|
-
if stdout.strip():
|
1986
|
-
print(f"📄 Output: {stdout.strip()}")
|
1987
|
-
else:
|
1988
|
-
print(f"❌ Command failed in container: {cmd}")
|
1989
|
-
print(f"❌ Error: {stderr}")
|
1990
|
-
# Continue with next command even if this one failed
|
1991
|
-
|
1992
|
-
# Show final status of the repository directory in container
|
1993
|
-
print(f"\n📂 Final directory contents in container ({repo_dir_name}):")
|
1994
|
-
run_command("pwd && ls -la")
|
1995
|
-
|
1996
|
-
else:
|
1997
|
-
print("⚠️ No setup commands provided.")
|
1998
|
-
|
1999
|
-
# If no setup commands but we have a repo URL, at least try to clone it
|
2000
|
-
if repo_url and not repo_exists:
|
2001
|
-
print("📥 No setup commands provided, but cloning repository anyway...")
|
2002
|
-
clone_success, _, _ = run_command(f"git clone {repo_url}", show_output=True)
|
2003
|
-
if clone_success:
|
2004
|
-
print(f"✅ Repository cloned successfully")
|
2005
|
-
# Try to change to the repository directory
|
2006
|
-
if repo_name_from_url:
|
2007
|
-
run_command(f"cd {repo_name_from_url}")
|
2008
|
-
print("📂 Final directory status after clone:")
|
2009
|
-
run_command("pwd && ls -la", show_output=True)
|
2010
|
-
|
2011
|
-
# Write container ID to file for future reference
|
2012
|
-
with open(os.path.expanduser("~/.modal_last_container_id"), "w") as f:
|
2013
|
-
f.write(container_id)
|
2014
|
-
|
2015
|
-
# Print connection instructions
|
2016
|
-
print(f"✅ Sandbox created successfully!")
|
2017
|
-
print(f"📋 Sandbox ID: {sandbox_id}")
|
2018
|
-
print(f"📋 Container ID: {container_id}")
|
2019
|
-
if volume:
|
2020
|
-
print(f"📦 Volume: {volume_name} (mounted at {volume_mount_path})")
|
2021
|
-
print(f"💾 Persistent storage available for pip and uv caches")
|
2022
|
-
print(f"📂 Repositories will be cloned in home directory (/root) for faster access")
|
2023
|
-
print("🔗 To connect to this container, run:")
|
2024
|
-
print(f"modal container exec --pty {container_id} bash")
|
2025
|
-
print("⏳ Sandbox will remain running until you terminate it with:")
|
2026
|
-
print(f"modal sandbox terminate {sandbox_id}")
|
2027
|
-
|
2028
|
-
# Try to open a new terminal window and connect to the container
|
2029
|
-
if container_id:
|
2030
|
-
print("🖥️ Attempting to open new terminal window...")
|
2031
|
-
# Use osascript to open a new terminal with the modal shell command
|
2032
|
-
terminal_script = f'''
|
2033
|
-
tell application "Terminal"
|
2034
|
-
do script "modal shell {container_id}"
|
2035
|
-
activate
|
2036
|
-
end tell
|
2037
|
-
'''
|
2038
|
-
|
2039
|
-
try:
|
2040
|
-
result = subprocess.run(['osascript', '-e', terminal_script],
|
2041
|
-
capture_output=True, text=True, timeout=30)
|
2042
|
-
if result.returncode == 0:
|
2043
|
-
print("✅ New terminal window opened successfully")
|
2044
|
-
else:
|
2045
|
-
print(f"⚠️ Failed to open terminal window: {result.stderr}")
|
2046
|
-
|
2047
|
-
# Try alternative approach with iTerm2 if Terminal failed
|
2048
|
-
print("🔄 Trying with iTerm2 instead...")
|
2049
|
-
iterm_script = f'''
|
2050
|
-
tell application "iTerm"
|
2051
|
-
create window with default profile
|
2052
|
-
tell current session of current window
|
2053
|
-
write text "modal shell {container_id}"
|
2054
|
-
end tell
|
2055
|
-
end tell
|
2056
|
-
'''
|
2057
|
-
|
2058
|
-
try:
|
2059
|
-
iterm_result = subprocess.run(['osascript', '-e', iterm_script],
|
2060
|
-
capture_output=True, text=True, timeout=30)
|
2061
|
-
if iterm_result.returncode == 0:
|
2062
|
-
print("✅ New iTerm2 window opened successfully")
|
2063
|
-
else:
|
2064
|
-
print(f"⚠️ Failed to open iTerm2 window: {iterm_result.stderr}")
|
2065
|
-
print("📝 You can manually connect using:")
|
2066
|
-
print(f" modal shell {container_id}")
|
2067
|
-
except Exception as e:
|
2068
|
-
print(f"⚠️ Error opening iTerm2: {e}")
|
2069
|
-
print("📝 You can manually connect using:")
|
2070
|
-
print(f" modal shell {container_id}")
|
2071
|
-
except subprocess.TimeoutExpired:
|
2072
|
-
print("⚠️ Terminal opening timed out")
|
2073
|
-
except Exception as e:
|
2074
|
-
print(f"⚠️ Error opening terminal: {e}")
|
2075
|
-
print("📝 You can manually connect using:")
|
2076
|
-
print(f" modal shell {container_id}")
|
2077
|
-
|
2078
|
-
# Also provide manual connection instructions
|
2079
|
-
print("\n" + "="*60)
|
2080
|
-
print("🚀 SANDBOX READY!")
|
2081
|
-
print("="*60)
|
2082
|
-
print(f"📋 Sandbox ID: {sandbox_id}")
|
2083
|
-
print(f"🆔 Container ID: {container_id}")
|
2084
|
-
if volume:
|
2085
|
-
print(f"💾 Volume: {volume_name} mounted at {volume_mount_path}")
|
2086
|
-
print("📁 Persistent storage available for caches and repositories")
|
2087
|
-
print("\n🔗 To connect to your container, run:")
|
2088
|
-
print(f" modal shell {container_id}")
|
2089
|
-
print("="*60)
|
2090
|
-
else:
|
2091
|
-
print("❌ No container ID available for connection")
|
2092
|
-
|
2093
|
-
return {
|
2094
|
-
"run_command": run_command,
|
2095
|
-
"current_dir": current_dir,
|
2096
|
-
"execution_history": execution_history,
|
2097
|
-
"container_id": container_id,
|
2098
|
-
"sandbox_id": sandbox_id
|
2099
|
-
}
|
2100
|
-
|
2101
|
-
|
2102
|
-
def handle_interactive_input(prompt, is_password=False):
|
2103
|
-
"""Handle interactive input from the user with optional password masking"""
|
2104
|
-
print("\n" + "="*60)
|
2105
|
-
print(f"{prompt}")
|
2106
|
-
print("="*60)
|
2107
|
-
|
2108
|
-
try:
|
2109
|
-
if is_password:
|
2110
|
-
user_input = getpass.getpass("Input (hidden): ").strip()
|
2111
|
-
else:
|
2112
|
-
user_input = input("Input: ").strip()
|
2113
|
-
|
2114
|
-
if not user_input:
|
2115
|
-
print("❌ No input provided.")
|
2116
|
-
return None
|
2117
|
-
print("✅ Input received successfully!")
|
2118
|
-
return user_input
|
2119
|
-
except KeyboardInterrupt:
|
2120
|
-
print("\n❌ Input cancelled by user.")
|
2121
|
-
return None
|
2122
|
-
except Exception as e:
|
2123
|
-
print(f"❌ Error getting input: {e}")
|
2124
|
-
return None
|
2125
|
-
|
2126
|
-
def generate_random_password(length=16):
|
2127
|
-
"""Generate a random password for SSH access"""
|
2128
|
-
alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
|
2129
|
-
password = ''.join(secrets.choice(alphabet) for i in range(length))
|
2130
|
-
return password
|
2131
|
-
|
2132
|
-
# First, add the standalone ssh_container function at the module level, before the create_modal_ssh_container function
|
2133
|
-
|
2134
|
-
# Define a module-level ssh container function
|
2135
|
-
ssh_app = modal.App("ssh-container-app")
|
2136
|
-
|
2137
|
-
@ssh_app.function(
|
2138
|
-
image=modal.Image.debian_slim()
|
2139
|
-
.apt_install(
|
2140
|
-
"openssh-server", "sudo", "curl", "wget", "vim", "htop", "git",
|
2141
|
-
"python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
|
2142
|
-
"gpg", "ca-certificates", "software-properties-common"
|
2143
|
-
)
|
2144
|
-
.pip_install("uv", "modal") # Fast Python package installer and Modal
|
2145
|
-
.run_commands(
|
2146
|
-
# Create SSH directory
|
2147
|
-
"mkdir -p /var/run/sshd",
|
2148
|
-
"mkdir -p /root/.ssh",
|
2149
|
-
"chmod 700 /root/.ssh",
|
2150
|
-
|
2151
|
-
# Configure SSH server
|
2152
|
-
"sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config",
|
2153
|
-
"sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config",
|
2154
|
-
"sed -i 's/#PubkeyAuthentication yes/PubkeyAuthentication yes/' /etc/ssh/sshd_config",
|
2155
|
-
|
2156
|
-
# SSH keep-alive settings
|
2157
|
-
"echo 'ClientAliveInterval 60' >> /etc/ssh/sshd_config",
|
2158
|
-
"echo 'ClientAliveCountMax 3' >> /etc/ssh/sshd_config",
|
2159
|
-
|
2160
|
-
# Generate SSH host keys
|
2161
|
-
"ssh-keygen -A",
|
2162
|
-
|
2163
|
-
# Install Modal CLI
|
2164
|
-
"pip install modal",
|
2165
|
-
|
2166
|
-
# Set up a nice bash prompt
|
2167
|
-
"echo 'export PS1=\"\\[\\e[1;32m\\]modal:\\[\\e[1;34m\\]\\w\\[\\e[0m\\]$ \"' >> /root/.bashrc",
|
2168
|
-
),
|
2169
|
-
timeout=3600, # Default 1 hour timeout
|
2170
|
-
gpu="a10g", # Default GPU - this will be overridden when called
|
2171
|
-
cpu=2,
|
2172
|
-
memory=8192,
|
2173
|
-
serialized=True,
|
2174
|
-
)
|
2175
|
-
def ssh_container_function(ssh_password, repo_url=None, repo_name=None, setup_commands=None):
|
2176
|
-
import subprocess
|
2177
|
-
import time
|
2178
|
-
import os
|
2179
|
-
|
2180
|
-
# Set root password
|
2181
|
-
subprocess.run(["bash", "-c", f"echo 'root:{ssh_password}' | chpasswd"], check=True)
|
2182
|
-
|
2183
|
-
# Start SSH service
|
2184
|
-
subprocess.run(["service", "ssh", "start"], check=True)
|
2185
|
-
|
2186
|
-
# Setup environment
|
2187
|
-
os.environ['PS1'] = r'\[\e[1;32m\]modal:\[\e[1;34m\]\w\[\e[0m\]$ '
|
2188
|
-
|
2189
|
-
# Clone repository if provided
|
2190
|
-
if repo_url:
|
2191
|
-
repo_name_from_url = repo_name or repo_url.split('/')[-1].replace('.git', '')
|
2192
|
-
print(f"📥 Cloning repository: {repo_url}")
|
2193
|
-
|
2194
|
-
try:
|
2195
|
-
subprocess.run(["git", "clone", repo_url], check=True, cwd="/root")
|
2196
|
-
print(f"✅ Repository cloned successfully: {repo_name_from_url}")
|
2197
|
-
|
2198
|
-
# Change to repository directory
|
2199
|
-
repo_dir = f"/root/{repo_name_from_url}"
|
2200
|
-
if os.path.exists(repo_dir):
|
2201
|
-
os.chdir(repo_dir)
|
2202
|
-
print(f"📂 Changed to repository directory: {repo_dir}")
|
2203
|
-
|
2204
|
-
except subprocess.CalledProcessError as e:
|
2205
|
-
print(f"❌ Failed to clone repository: {e}")
|
2206
|
-
|
2207
|
-
# Run setup commands if provided
|
2208
|
-
if setup_commands:
|
2209
|
-
print(f"⚙️ Running {len(setup_commands)} setup commands...")
|
2210
|
-
for i, cmd in enumerate(setup_commands, 1):
|
2211
|
-
print(f"📋 Executing command {i}/{len(setup_commands)}: {cmd}")
|
2212
|
-
try:
|
2213
|
-
result = subprocess.run(cmd, shell=True, check=True,
|
2214
|
-
capture_output=True, text=True)
|
2215
|
-
if result.stdout:
|
2216
|
-
print(f"✅ Output: {result.stdout}")
|
2217
|
-
except subprocess.CalledProcessError as e:
|
2218
|
-
print(f"❌ Command failed: {e}")
|
2219
|
-
if e.stderr:
|
2220
|
-
print(f"❌ Error: {e.stderr}")
|
2221
|
-
|
2222
|
-
# Get container info
|
2223
|
-
print("🔍 Container started successfully!")
|
2224
|
-
print(f"🆔 Container ID: {os.environ.get('MODAL_TASK_ID', 'unknown')}")
|
2225
|
-
|
2226
|
-
# Keep the container running
|
2227
|
-
while True:
|
2228
|
-
time.sleep(30)
|
2229
|
-
# Check if SSH service is still running
|
2230
|
-
try:
|
2231
|
-
subprocess.run(["service", "ssh", "status"], check=True,
|
2232
|
-
capture_output=True)
|
2233
|
-
except subprocess.CalledProcessError:
|
2234
|
-
print("⚠️ SSH service stopped, restarting...")
|
2235
|
-
subprocess.run(["service", "ssh", "start"], check=True)
|
2236
|
-
|
2237
|
-
# Now modify the create_modal_ssh_container function to use the standalone ssh_container_function
|
2238
|
-
|
2239
|
-
def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_commands=None,
|
2240
|
-
volume_name=None, timeout_minutes=60, ssh_password=None):
|
2241
|
-
"""Create a Modal SSH container with GPU support and tunneling"""
|
2242
|
-
|
2243
|
-
# Check if Modal is authenticated
|
2244
|
-
try:
|
2245
|
-
# Print all environment variables for debugging
|
2246
|
-
print("🔍 DEBUG: Checking environment variables")
|
2247
|
-
modal_token_id = os.environ.get("MODAL_TOKEN_ID")
|
2248
|
-
modal_token = os.environ.get("MODAL_TOKEN")
|
2249
|
-
print(f"🔍 token exists: {'Yes' if modal_token_id else 'No'}")
|
2250
|
-
print(f"🔍 token exists: {'Yes' if modal_token else 'No'}")
|
2251
|
-
if modal_token_id:
|
2252
|
-
print(f"🔍 token length: {len(modal_token_id)}")
|
2253
|
-
if modal_token:
|
2254
|
-
print(f"🔍 token length: {len(modal_token)}")
|
2255
|
-
|
2256
|
-
# Try to access Modal token to check authentication
|
2257
|
-
try:
|
2258
|
-
# Check if token is set in environment
|
2259
|
-
modal_token_id = os.environ.get("MODAL_TOKEN_ID")
|
2260
|
-
if not modal_token_id:
|
2261
|
-
print("⚠️ MODAL_TOKEN_ID not found in environment.")
|
2262
|
-
# Try to get from MODAL_TOKEN
|
2263
|
-
modal_token = os.environ.get("MODAL_TOKEN")
|
2264
|
-
if modal_token:
|
2265
|
-
print("✅ Found token in environment variable")
|
2266
|
-
os.environ["MODAL_TOKEN_ID"] = modal_token
|
2267
|
-
modal_token_id = modal_token
|
2268
|
-
print(f"✅ Set token (length: {len(modal_token)})")
|
2269
|
-
|
2270
|
-
if modal_token_id:
|
2271
|
-
print(f"✅ token found (length: {len(modal_token_id)})")
|
2272
|
-
|
2273
|
-
# Use the comprehensive fix_modal_token script
|
2274
|
-
try:
|
2275
|
-
# Execute the fix_modal_token.py script
|
2276
|
-
import subprocess
|
2277
|
-
print(f"🔄 Running fix_modal_token.py to set up Modal token...")
|
2278
|
-
result = subprocess.run(
|
2279
|
-
["python", os.path.join(os.path.dirname(__file__), "fix_modal_token.py")],
|
2280
|
-
capture_output=True,
|
2281
|
-
text=True
|
2282
|
-
)
|
2283
|
-
|
2284
|
-
# Print the output
|
2285
|
-
print(result.stdout)
|
2286
|
-
|
2287
|
-
if result.returncode != 0:
|
2288
|
-
print(f"⚠️ Warning: fix_modal_token.py exited with code {result.returncode}")
|
2289
|
-
if result.stderr:
|
2290
|
-
print(f"Error: {result.stderr}")
|
2291
|
-
|
2292
|
-
print(f"✅ token setup completed")
|
2293
|
-
except Exception as e:
|
2294
|
-
print(f"⚠️ Error running fix_modal_token.py: {e}")
|
2295
|
-
else:
|
2296
|
-
print("❌ No token found in environment variables")
|
2297
|
-
# Try to get from file as a last resort
|
2298
|
-
try:
|
2299
|
-
home_dir = os.path.expanduser("~")
|
2300
|
-
modal_dir = os.path.join(home_dir, ".modal")
|
2301
|
-
token_file = os.path.join(modal_dir, "token.json")
|
2302
|
-
if os.path.exists(token_file):
|
2303
|
-
print(f"🔍 Found Modal token file at {token_file}")
|
2304
|
-
with open(token_file, 'r') as f:
|
2305
|
-
import json
|
2306
|
-
token_data = json.load(f)
|
2307
|
-
if "token_id" in token_data:
|
2308
|
-
modal_token_id = token_data["token_id"]
|
2309
|
-
os.environ["MODAL_TOKEN_ID"] = modal_token_id
|
2310
|
-
os.environ["MODAL_TOKEN"] = modal_token_id
|
2311
|
-
print(f"✅ Loaded token from file (length: {len(modal_token_id)})")
|
2312
|
-
else:
|
2313
|
-
print("❌ Token file does not contain token_id")
|
2314
|
-
else:
|
2315
|
-
print("❌ token file not found")
|
2316
|
-
except Exception as e:
|
2317
|
-
print(f"❌ Error loading token from file: {e}")
|
2318
|
-
|
2319
|
-
if not os.environ.get("MODAL_TOKEN_ID"):
|
2320
|
-
print("❌ Could not find Modal token in any location")
|
2321
|
-
return None
|
2322
|
-
|
2323
|
-
except Exception as e:
|
2324
|
-
print(f"⚠️ Error checking Modal token: {e}")
|
2325
|
-
# Try to use the token from environment
|
2326
|
-
modal_token_id = os.environ.get("MODAL_TOKEN_ID")
|
2327
|
-
modal_token = os.environ.get("MODAL_TOKEN")
|
2328
|
-
if modal_token_id:
|
2329
|
-
print(f"🔄 Using token from environment (length: {len(modal_token_id)})")
|
2330
|
-
elif modal_token:
|
2331
|
-
print(f"🔄 Using token from environment (length: {len(modal_token)})")
|
2332
|
-
os.environ["MODAL_TOKEN_ID"] = modal_token
|
2333
|
-
modal_token_id = modal_token
|
2334
|
-
else:
|
2335
|
-
print("❌ No Modal token available. Cannot proceed.")
|
2336
|
-
return None
|
2337
|
-
|
2338
|
-
# Set it in both environment variables
|
2339
|
-
os.environ["MODAL_TOKEN_ID"] = modal_token_id
|
2340
|
-
os.environ["MODAL_TOKEN"] = modal_token_id
|
2341
|
-
print("✅ Set both token and id environment variables")
|
2342
|
-
except Exception as e:
|
2343
|
-
print(f"⚠️ Error checking Modal authentication: {e}")
|
2344
|
-
print("Continuing anyway, but Modal operations may fail")
|
2345
|
-
|
2346
|
-
# Generate a unique app name with timestamp to avoid conflicts
|
2347
|
-
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
2348
|
-
app_name = f"ssh-container-{timestamp}"
|
2349
|
-
|
2350
|
-
gpu_configs = {
|
2351
|
-
'T4': {'gpu': 't4', 'memory': 16},
|
2352
|
-
'L4': {'gpu': 'l4', 'memory': 24},
|
2353
|
-
'A10G': {'gpu': 'a10g', 'memory': 24},
|
2354
|
-
'A100-40GB': {'gpu': 'a100', 'memory': 40},
|
2355
|
-
'A100-80GB': {'gpu': 'a100-80gb', 'memory': 80},
|
2356
|
-
'L40S': {'gpu': 'l40s', 'memory': 48},
|
2357
|
-
'H100': {'gpu': 'h100', 'memory': 80},
|
2358
|
-
'H200': {'gpu': 'h200', 'memory': 141},
|
2359
|
-
'B200': {'gpu': 'b200', 'memory': 96}
|
2360
|
-
}
|
2361
|
-
|
2362
|
-
if gpu_type not in gpu_configs:
|
2363
|
-
print(f"⚠️ Unknown GPU type: {gpu_type}. Using A10G as default.")
|
2364
|
-
gpu_type = 'A10G'
|
2365
|
-
|
2366
|
-
gpu_spec = gpu_configs[gpu_type]
|
2367
|
-
print(f"🚀 Creating SSH container with {gpu_spec['gpu']} GPU ({gpu_spec['memory']}GB VRAM)")
|
2368
|
-
|
2369
|
-
# Generate or use provided SSH password
|
2370
|
-
if not ssh_password:
|
2371
|
-
ssh_password = generate_random_password()
|
2372
|
-
print(f"🔐 Generated SSH password: {ssh_password}")
|
2373
|
-
|
2374
|
-
# Setup volume if specified
|
2375
|
-
volume = None
|
2376
|
-
volume_mount_path = "/persistent"
|
2377
|
-
|
2378
|
-
if volume_name:
|
2379
|
-
print(f"📦 Setting up volume: {volume_name}")
|
2380
|
-
try:
|
2381
|
-
volume = modal.Volume.from_name(volume_name, create_if_missing=True)
|
2382
|
-
print(f"✅ Volume '{volume_name}' ready for use")
|
2383
|
-
except Exception as e:
|
2384
|
-
print(f"⚠️ Could not setup volume '{volume_name}': {e}")
|
2385
|
-
print("⚠️ Continuing without persistent volume")
|
2386
|
-
volume = None
|
2387
|
-
else:
|
2388
|
-
# Create a default volume for this session
|
2389
|
-
default_volume_name = f"ssh-vol-{timestamp}"
|
2390
|
-
print(f"📦 Creating default volume: {default_volume_name}")
|
2391
|
-
try:
|
2392
|
-
volume = modal.Volume.from_name(default_volume_name, create_if_missing=True)
|
2393
|
-
volume_name = default_volume_name
|
2394
|
-
print(f"✅ Default volume '{default_volume_name}' created")
|
2395
|
-
except Exception as e:
|
2396
|
-
print(f"⚠️ Could not create default volume: {e}")
|
2397
|
-
print("⚠️ Continuing without persistent volume")
|
2398
|
-
volume = None
|
2399
|
-
|
2400
|
-
# Print debug info for authentication
|
2401
|
-
print("🔍 Modal authentication debug info:")
|
2402
|
-
modal_token = os.environ.get("MODAL_TOKEN_ID")
|
2403
|
-
print(f" - token in env: {'Yes' if modal_token else 'No'}")
|
2404
|
-
print(f" - Token length: {len(modal_token) if modal_token else 'N/A'}")
|
2405
|
-
|
2406
|
-
# Verify we can create a Modal app
|
2407
|
-
try:
|
2408
|
-
print("🔍 Testing app creation...")
|
2409
|
-
app = modal.App(app_name)
|
2410
|
-
print("✅ Created app successfully")
|
2411
|
-
except Exception as e:
|
2412
|
-
print(f"❌ Error creating app: {e}")
|
2413
|
-
return None
|
2414
|
-
|
2415
|
-
# Create SSH-enabled image
|
2416
|
-
try:
|
2417
|
-
print("📦 Building SSH-enabled image...")
|
2418
|
-
ssh_image = (
|
2419
|
-
modal.Image.debian_slim()
|
2420
|
-
.apt_install(
|
2421
|
-
"openssh-server", "sudo", "curl", "wget", "vim", "htop", "git",
|
2422
|
-
"python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
|
2423
|
-
"gpg", "ca-certificates", "software-properties-common"
|
2424
|
-
)
|
2425
|
-
.pip_install("uv", "modal") # Fast Python package installer and Modal
|
2426
|
-
.run_commands(
|
2427
|
-
# Create SSH directory
|
2428
|
-
"mkdir -p /var/run/sshd",
|
2429
|
-
"mkdir -p /root/.ssh",
|
2430
|
-
"chmod 700 /root/.ssh",
|
2431
|
-
|
2432
|
-
# Configure SSH server
|
2433
|
-
"sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config",
|
2434
|
-
"sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config",
|
2435
|
-
"sed -i 's/#PubkeyAuthentication yes/PubkeyAuthentication yes/' /etc/ssh/sshd_config",
|
2436
|
-
|
2437
|
-
# SSH keep-alive settings
|
2438
|
-
"echo 'ClientAliveInterval 60' >> /etc/ssh/sshd_config",
|
2439
|
-
"echo 'ClientAliveCountMax 3' >> /etc/ssh/sshd_config",
|
2440
|
-
|
2441
|
-
# Generate SSH host keys
|
2442
|
-
"ssh-keygen -A",
|
2443
|
-
|
2444
|
-
# Set up a nice bash prompt
|
2445
|
-
"echo 'export PS1=\"\\[\\e[1;32m\\]modal:\\[\\e[1;34m\\]\\w\\[\\e[0m\\]$ \"' >> /root/.bashrc",
|
2446
|
-
)
|
2447
|
-
)
|
2448
|
-
print("✅ SSH image built successfully")
|
2449
|
-
except Exception as e:
|
2450
|
-
print(f"❌ Error building SSH image: {e}")
|
2451
|
-
return None
|
2452
|
-
|
2453
|
-
# Configure volumes if available
|
2454
|
-
volumes_config = {}
|
2455
|
-
if volume:
|
2456
|
-
volumes_config[volume_mount_path] = volume
|
2457
|
-
|
2458
|
-
# Define the SSH container function
|
2459
|
-
@app.function(
|
2460
|
-
image=ssh_image,
|
2461
|
-
timeout=timeout_minutes * 60, # Convert to seconds
|
2462
|
-
gpu=gpu_spec['gpu'],
|
2463
|
-
cpu=2,
|
2464
|
-
memory=8192,
|
2465
|
-
serialized=True,
|
2466
|
-
volumes=volumes_config if volumes_config else None,
|
2467
|
-
)
|
2468
|
-
def ssh_container_function():
|
2469
|
-
"""Start SSH container with password authentication and optional setup."""
|
2470
|
-
import subprocess
|
2471
|
-
import time
|
2472
|
-
import os
|
2473
|
-
|
2474
|
-
# Set root password
|
2475
|
-
subprocess.run(["bash", "-c", f"echo 'root:{ssh_password}' | chpasswd"], check=True)
|
2476
|
-
|
2477
|
-
# Start SSH service
|
2478
|
-
subprocess.run(["service", "ssh", "start"], check=True)
|
2479
|
-
|
2480
|
-
# Clone repository if provided
|
2481
|
-
if repo_url:
|
2482
|
-
repo_name_from_url = repo_name or repo_url.split('/')[-1].replace('.git', '')
|
2483
|
-
print(f"📥 Cloning repository: {repo_url}")
|
2484
|
-
|
2485
|
-
try:
|
2486
|
-
subprocess.run(["git", "clone", repo_url], check=True, cwd="/root")
|
2487
|
-
print(f"✅ Repository cloned successfully: {repo_name_from_url}")
|
2488
|
-
|
2489
|
-
# Change to repository directory
|
2490
|
-
repo_dir = f"/root/{repo_name_from_url}"
|
2491
|
-
if os.path.exists(repo_dir):
|
2492
|
-
os.chdir(repo_dir)
|
2493
|
-
print(f"📂 Changed to repository directory: {repo_dir}")
|
2494
|
-
|
2495
|
-
except subprocess.CalledProcessError as e:
|
2496
|
-
print(f"❌ Failed to clone repository: {e}")
|
2497
|
-
|
2498
|
-
# Run setup commands if provided
|
2499
|
-
if setup_commands:
|
2500
|
-
print(f"⚙️ Running {len(setup_commands)} setup commands...")
|
2501
|
-
for i, cmd in enumerate(setup_commands, 1):
|
2502
|
-
print(f"📋 Executing command {i}/{len(setup_commands)}: {cmd}")
|
2503
|
-
try:
|
2504
|
-
result = subprocess.run(cmd, shell=True, check=True,
|
2505
|
-
capture_output=True, text=True)
|
2506
|
-
if result.stdout:
|
2507
|
-
print(f"✅ Output: {result.stdout}")
|
2508
|
-
except subprocess.CalledProcessError as e:
|
2509
|
-
print(f"❌ Command failed: {e}")
|
2510
|
-
if e.stderr:
|
2511
|
-
print(f"❌ Error: {e.stderr}")
|
2512
|
-
|
2513
|
-
# Create SSH tunnel
|
2514
|
-
with modal.forward(22, unencrypted=True) as tunnel:
|
2515
|
-
host, port = tunnel.tcp_socket
|
2516
|
-
|
2517
|
-
print("\n" + "=" * 80)
|
2518
|
-
print("🎉 SSH CONTAINER IS READY!")
|
2519
|
-
print("=" * 80)
|
2520
|
-
print(f"🌐 SSH Host: {host}")
|
2521
|
-
print(f"🔌 SSH Port: {port}")
|
2522
|
-
print(f"👤 Username: root")
|
2523
|
-
print(f"🔐 Password: {ssh_password}")
|
2524
|
-
print()
|
2525
|
-
print("🔗 CONNECT USING THIS COMMAND:")
|
2526
|
-
print(f"ssh -p {port} root@{host}")
|
2527
|
-
print("=" * 80)
|
2528
|
-
|
2529
|
-
# Keep the container running
|
2530
|
-
while True:
|
2531
|
-
time.sleep(30)
|
2532
|
-
# Check if SSH service is still running
|
2533
|
-
try:
|
2534
|
-
subprocess.run(["service", "ssh", "status"], check=True,
|
2535
|
-
capture_output=True)
|
2536
|
-
except subprocess.CalledProcessError:
|
2537
|
-
print("⚠️ SSH service stopped, restarting...")
|
2538
|
-
subprocess.run(["service", "ssh", "start"], check=True)
|
2539
|
-
|
2540
|
-
# Run the container
|
2541
|
-
try:
|
2542
|
-
print("⏳ Starting container... This may take 1-2 minutes...")
|
2543
|
-
|
2544
|
-
# Start the container in a new thread to avoid blocking
|
2545
|
-
with modal.enable_output():
|
2546
|
-
with app.run():
|
2547
|
-
ssh_container_function.remote()
|
2548
|
-
|
2549
|
-
# Clean up Modal token after container is successfully created
|
2550
|
-
cleanup_modal_token()
|
2551
|
-
|
2552
|
-
return {
|
2553
|
-
"app_name": app_name,
|
2554
|
-
"ssh_password": ssh_password,
|
2555
|
-
"volume_name": volume_name
|
2556
|
-
}
|
2557
|
-
except Exception as e:
|
2558
|
-
print(f"❌ Error running container: {e}")
|
2559
|
-
return None
|
2560
|
-
|
2561
|
-
def is_local_server_running(url, timeout=2):
|
2562
|
-
"""Check if a local server is running by attempting a connection."""
|
2563
|
-
import requests
|
2564
|
-
try:
|
2565
|
-
response = requests.get(url, timeout=timeout)
|
2566
|
-
return True
|
2567
|
-
except requests.exceptions.RequestException:
|
2568
|
-
return False
|
2569
|
-
|
2570
|
-
def fetch_setup_commands_from_api(repo_url):
|
2571
|
-
"""Fetch setup commands from the GitIngest API using real repository analysis."""
|
2572
|
-
import tempfile
|
2573
|
-
import subprocess
|
2574
|
-
import os
|
2575
|
-
import shutil
|
2576
|
-
import json
|
2577
|
-
import time
|
2578
|
-
import requests
|
2579
|
-
|
2580
|
-
# Define API endpoints to try in order - using only online endpoints
|
2581
|
-
api_endpoints = [
|
2582
|
-
"https://www.gitarsenal.dev/api/analyze-with-gitingest" # Working endpoint with www prefix
|
2583
|
-
]
|
2584
|
-
|
2585
|
-
print(f"🔍 Fetching setup commands from API for repository: {repo_url}")
|
2586
|
-
|
2587
|
-
# Check if gitingest command line tool is available - try multiple possible command names
|
2588
|
-
has_gitingest_cli = False
|
2589
|
-
gitingest_cmd_name = None
|
2590
|
-
|
2591
|
-
# Try the standard command name first
|
2592
|
-
try:
|
2593
|
-
print(f"🔍 Checking for GitIngest CLI tool...")
|
2594
|
-
result = subprocess.run(["gitingest", "--help"], check=True, capture_output=True, text=True)
|
2595
|
-
has_gitingest_cli = True
|
2596
|
-
gitingest_cmd_name = "gitingest"
|
2597
|
-
print(f"✅ GitIngest CLI tool found")
|
2598
|
-
except (subprocess.SubprocessError, FileNotFoundError) as e:
|
2599
|
-
print(f" - GitIngest command not found: {str(e)}")
|
2600
|
-
|
2601
|
-
# Create a temporary directory for output
|
2602
|
-
temp_dir = tempfile.mkdtemp(prefix="repo_analysis_")
|
2603
|
-
output_file = os.path.join(temp_dir, "digest.json")
|
2604
|
-
|
2605
|
-
# Create a directory to save GitIngest results
|
2606
|
-
save_dir = os.path.join(os.path.expanduser("~"), "gitarsenal_results")
|
2607
|
-
os.makedirs(save_dir, exist_ok=True)
|
2608
|
-
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
2609
|
-
repo_name = repo_url.split("/")[-1].replace(".git", "")
|
2610
|
-
save_file = os.path.join(save_dir, f"gitingest_{repo_name}_{timestamp}.txt")
|
2611
|
-
|
2612
|
-
try:
|
2613
|
-
if has_gitingest_cli:
|
2614
|
-
# Use gitingest CLI tool to analyze the repository directly from URL
|
2615
|
-
print(f"🔎 Running GitIngest analysis on {repo_url}...")
|
2616
|
-
|
2617
|
-
# Based on the help output, the correct format is:
|
2618
|
-
# gitingest [OPTIONS] [SOURCE]
|
2619
|
-
# With options:
|
2620
|
-
# -o, --output TEXT Output file path
|
2621
|
-
# --format TEXT Output format (json)
|
2622
|
-
|
2623
|
-
# Run gitingest command with proper parameters
|
2624
|
-
gitingest_run_cmd = [
|
2625
|
-
gitingest_cmd_name,
|
2626
|
-
repo_url,
|
2627
|
-
"-o", output_file, # Use -o for output file
|
2628
|
-
]
|
2629
|
-
|
2630
|
-
print(f"🔄 Executing: {' '.join(gitingest_run_cmd)}")
|
2631
|
-
|
2632
|
-
result = subprocess.run(gitingest_run_cmd, capture_output=True, text=True)
|
2633
|
-
|
2634
|
-
if result.returncode != 0:
|
2635
|
-
print(f"⚠️ GitIngest CLI failed with exit code {result.returncode}")
|
2636
|
-
print(f"⚠️ Error output: {result.stderr}")
|
2637
|
-
print("Falling back to basic analysis")
|
2638
|
-
gitingest_data = generate_basic_repo_analysis_from_url(repo_url)
|
2639
|
-
else:
|
2640
|
-
print(f"✅ GitIngest analysis completed successfully")
|
2641
|
-
|
2642
|
-
# Read the output file - note that the default format might not be JSON
|
2643
|
-
try:
|
2644
|
-
# First try to parse as JSON
|
2645
|
-
try:
|
2646
|
-
with open(output_file, 'r', encoding='utf-8') as f:
|
2647
|
-
content = f.read()
|
2648
|
-
|
2649
|
-
# Save the GitIngest output to the results directory
|
2650
|
-
with open(save_file, 'w', encoding='utf-8') as save_f:
|
2651
|
-
save_f.write(content)
|
2652
|
-
print(f"📁 GitIngest output saved to: {save_file}")
|
2653
|
-
|
2654
|
-
try:
|
2655
|
-
gitingest_data = json.loads(content)
|
2656
|
-
print(f"✅ GitIngest data loaded as JSON from {output_file}")
|
2657
|
-
except json.JSONDecodeError:
|
2658
|
-
# If not JSON, convert the text output to a basic structure
|
2659
|
-
print(f"⚠️ GitIngest output is not in JSON format, converting text to structure")
|
2660
|
-
|
2661
|
-
# Process the text to extract useful information
|
2662
|
-
import re
|
2663
|
-
|
2664
|
-
# Try to identify language
|
2665
|
-
language_match = re.search(r"(?i)language[s]?:?\s*(\w+)", content)
|
2666
|
-
detected_language = language_match.group(1) if language_match else "Unknown"
|
2667
|
-
|
2668
|
-
# Try to identify technologies with stronger evidence requirements
|
2669
|
-
tech_patterns = {
|
2670
|
-
"python": r"(?i)(python|\.py\b|pip\b|requirements\.txt|setup\.py)",
|
2671
|
-
"javascript": r"(?i)(javascript|\.js\b|node|npm|yarn|package\.json)",
|
2672
|
-
"typescript": r"(?i)(typescript|\.ts\b|tsc\b|tsconfig\.json)",
|
2673
|
-
"go": r"(?i)(\bgo\b|golang|\.go\b|go\.mod|go\.sum)",
|
2674
|
-
"rust": r"(?i)(rust|\.rs\b|cargo|Cargo\.toml)",
|
2675
|
-
"java": r"(?i)(java\b|\.java\b|maven|gradle|pom\.xml)",
|
2676
|
-
"c++": r"(?i)(c\+\+|\.cpp\b|\.hpp\b|cmake\b|CMakeLists\.txt)",
|
2677
|
-
"pytorch": r"(?i)(pytorch|torch\b|nn\.Module)",
|
2678
|
-
"tensorflow": r"(?i)(tensorflow|tf\.|keras\b)",
|
2679
|
-
}
|
2680
|
-
|
2681
|
-
# Count occurrences to filter out false positives
|
2682
|
-
tech_counts = {}
|
2683
|
-
for tech, pattern in tech_patterns.items():
|
2684
|
-
matches = re.findall(pattern, content)
|
2685
|
-
if matches:
|
2686
|
-
tech_counts[tech] = len(matches)
|
2687
|
-
|
2688
|
-
# Filter technologies based on threshold
|
2689
|
-
thresholds = {
|
2690
|
-
"javascript": 3, # Higher threshold for JavaScript
|
2691
|
-
"go": 3, # Higher threshold for Go
|
2692
|
-
"default": 2 # Default threshold
|
2693
|
-
}
|
2694
|
-
|
2695
|
-
detected_technologies = []
|
2696
|
-
for tech, count in tech_counts.items():
|
2697
|
-
threshold = thresholds.get(tech, thresholds["default"])
|
2698
|
-
if count >= threshold:
|
2699
|
-
detected_technologies.append(tech)
|
2700
|
-
print(f"📊 Detected {tech} with confidence score {count}")
|
2701
|
-
|
2702
|
-
# Create a structured representation
|
2703
|
-
gitingest_data = {
|
2704
|
-
"system_info": {
|
2705
|
-
"detected_language": detected_language,
|
2706
|
-
"detected_technologies": detected_technologies,
|
2707
|
-
},
|
2708
|
-
"repository_analysis": {
|
2709
|
-
"summary": content[:5000], # First 5000 chars as summary
|
2710
|
-
"content_preview": content[:10000] # First 10000 chars as preview
|
2711
|
-
},
|
2712
|
-
"success": True
|
2713
|
-
}
|
2714
|
-
|
2715
|
-
# Save the processed data
|
2716
|
-
processed_file = os.path.join(save_dir, f"gitingest_processed_{repo_name}_{timestamp}.json")
|
2717
|
-
with open(processed_file, 'w', encoding='utf-8') as proc_f:
|
2718
|
-
json.dump(gitingest_data, proc_f, indent=2)
|
2719
|
-
print(f"📁 Processed GitIngest data saved to: {processed_file}")
|
2720
|
-
except FileNotFoundError:
|
2721
|
-
print(f"⚠️ Output file not found at {output_file}")
|
2722
|
-
gitingest_data = generate_basic_repo_analysis_from_url(repo_url)
|
2723
|
-
except Exception as e:
|
2724
|
-
print(f"⚠️ Error reading GitIngest output: {e}")
|
2725
|
-
gitingest_data = generate_basic_repo_analysis_from_url(repo_url)
|
2726
|
-
else:
|
2727
|
-
# Fall back to basic analysis if gitingest CLI is not available
|
2728
|
-
gitingest_data = generate_basic_repo_analysis_from_url(repo_url)
|
2729
|
-
|
2730
|
-
# Prepare the request payload with GitIngest data
|
2731
|
-
payload = {
|
2732
|
-
"repoUrl": repo_url,
|
2733
|
-
"gitingestData": gitingest_data,
|
2734
|
-
"userRequest": "Setup and run the repository"
|
2735
|
-
}
|
2736
|
-
|
2737
|
-
print(f"📤 API Request payload prepared (GitIngest data size: {len(json.dumps(gitingest_data))} bytes)")
|
2738
|
-
|
2739
|
-
# Try each endpoint in sequence until one succeeds
|
2740
|
-
response = None
|
2741
|
-
for api_url in api_endpoints:
|
2742
|
-
# Use the retry mechanism for more reliable requests
|
2743
|
-
response = make_api_request_with_retry(
|
2744
|
-
url=api_url,
|
2745
|
-
payload=payload,
|
2746
|
-
max_retries=2,
|
2747
|
-
timeout=180 # 3 minute timeout
|
2748
|
-
)
|
2749
|
-
|
2750
|
-
# If we got a response and it's successful, break out of the loop
|
2751
|
-
if response and response.status_code == 200:
|
2752
|
-
print(f"✅ Successful response from {api_url}")
|
2753
|
-
break
|
2754
|
-
|
2755
|
-
if response:
|
2756
|
-
print(f"⚠️ Endpoint {api_url} returned status code {response.status_code}, trying next endpoint...")
|
2757
|
-
else:
|
2758
|
-
print(f"⚠️ Failed to connect to {api_url}, trying next endpoint...")
|
2759
|
-
|
2760
|
-
# If we've tried all endpoints and still don't have a response, use fallback
|
2761
|
-
if response is None:
|
2762
|
-
print("❌ All API endpoints failed")
|
2763
|
-
return generate_fallback_commands(gitingest_data)
|
2764
|
-
|
2765
|
-
# Continue with the response we got from the successful endpoint
|
2766
|
-
if not response:
|
2767
|
-
print("❌ No valid response received from any endpoint")
|
2768
|
-
return generate_fallback_commands(gitingest_data)
|
2769
|
-
|
2770
|
-
try:
|
2771
|
-
print(f"📥 API Response status code: {response.status_code}")
|
2772
|
-
|
2773
|
-
if response.status_code == 200:
|
2774
|
-
try:
|
2775
|
-
data = response.json()
|
2776
|
-
print(f"📄 API Response data received")
|
2777
|
-
print(f"📄 Response size: {len(response.text)} bytes")
|
2778
|
-
print(f"📄 Response URL: {response.url}")
|
2779
|
-
|
2780
|
-
# Extract setup commands from the response
|
2781
|
-
if "setupInstructions" in data and "commands" in data["setupInstructions"]:
|
2782
|
-
commands = data["setupInstructions"]["commands"]
|
2783
|
-
print(f"✅ Successfully fetched {len(commands)} setup commands from API")
|
2784
|
-
|
2785
|
-
# Print the original commands for reference
|
2786
|
-
print("📋 Original commands from API:")
|
2787
|
-
for i, cmd in enumerate(commands, 1):
|
2788
|
-
print(f" {i}. {cmd}")
|
2789
|
-
|
2790
|
-
# Fix the commands by removing placeholders and comments
|
2791
|
-
fixed_commands = fix_setup_commands(commands)
|
2792
|
-
|
2793
|
-
# If we have a temp_dir with the cloned repo, try to find the entry point
|
2794
|
-
# and replace any placeholder entry points
|
2795
|
-
for i, cmd in enumerate(fixed_commands):
|
2796
|
-
if "python main.py" in cmd or "python3 main.py" in cmd:
|
2797
|
-
try:
|
2798
|
-
entry_point = find_entry_point(temp_dir)
|
2799
|
-
if entry_point and entry_point != "main.py":
|
2800
|
-
fixed_commands[i] = cmd.replace("main.py", entry_point)
|
2801
|
-
print(f"🔄 Replaced main.py with detected entry point: {entry_point}")
|
2802
|
-
except Exception as e:
|
2803
|
-
print(f"⚠️ Error finding entry point: {e}")
|
2804
|
-
|
2805
|
-
# Print the fixed commands
|
2806
|
-
print("\n📋 Fixed commands:")
|
2807
|
-
for i, cmd in enumerate(fixed_commands, 1):
|
2808
|
-
print(f" {i}. {cmd}")
|
2809
|
-
|
2810
|
-
return fixed_commands
|
2811
|
-
else:
|
2812
|
-
print("⚠️ API response did not contain setupInstructions.commands field")
|
2813
|
-
print("📋 Available fields in response:")
|
2814
|
-
for key in data.keys():
|
2815
|
-
print(f" - {key}")
|
2816
|
-
# Return fallback commands
|
2817
|
-
return generate_fallback_commands(gitingest_data)
|
2818
|
-
except json.JSONDecodeError as e:
|
2819
|
-
print(f"❌ Failed to parse API response as JSON: {e}")
|
2820
|
-
print(f"Raw response: {response.text[:500]}...")
|
2821
|
-
# Return fallback commands
|
2822
|
-
return generate_fallback_commands(gitingest_data)
|
2823
|
-
elif response.status_code == 504:
|
2824
|
-
print(f"❌ API request timed out (504 Gateway Timeout)")
|
2825
|
-
print("⚠️ The server took too long to respond. Using fallback commands instead.")
|
2826
|
-
# Return fallback commands
|
2827
|
-
return generate_fallback_commands(gitingest_data)
|
2828
|
-
else:
|
2829
|
-
print(f"❌ API request failed with status code: {response.status_code}")
|
2830
|
-
print(f"❌ Response URL: {response.url}")
|
2831
|
-
print(f"❌ Response headers: {dict(response.headers)}")
|
2832
|
-
print(f"❌ Error response: {response.text[:500]}...")
|
2833
|
-
# Return fallback commands
|
2834
|
-
return generate_fallback_commands(gitingest_data)
|
2835
|
-
except Exception as e:
|
2836
|
-
print(f"❌ Error processing API response: {str(e)}")
|
2837
|
-
print("⚠️ Using fallback commands instead")
|
2838
|
-
# Return fallback commands
|
2839
|
-
return generate_fallback_commands(gitingest_data)
|
2840
|
-
except Exception as e:
|
2841
|
-
print(f"❌ Error fetching setup commands from API: {e}")
|
2842
|
-
import traceback
|
2843
|
-
traceback.print_exc()
|
2844
|
-
# Return fallback commands
|
2845
|
-
return generate_fallback_commands(None)
|
2846
|
-
finally:
|
2847
|
-
# Clean up the temporary directory
|
2848
|
-
print(f"🧹 Cleaning up temporary directory...")
|
2849
|
-
shutil.rmtree(temp_dir, ignore_errors=True)
|
2850
|
-
|
2851
|
-
def generate_fallback_commands(gitingest_data):
|
2852
|
-
"""Generate fallback setup commands based on repository analysis"""
|
2853
|
-
print("\n" + "="*80)
|
2854
|
-
print("📋 GENERATING FALLBACK SETUP COMMANDS")
|
2855
|
-
print("="*80)
|
2856
|
-
print("Using basic repository analysis to generate setup commands")
|
2857
|
-
|
2858
|
-
# Default commands that work for most repositories
|
2859
|
-
default_commands = [
|
2860
|
-
"apt-get update -y",
|
2861
|
-
"apt-get install -y git curl wget",
|
2862
|
-
"pip install --upgrade pip setuptools wheel"
|
2863
|
-
]
|
2864
|
-
|
2865
|
-
# If we don't have any analysis data, return default commands
|
2866
|
-
if not gitingest_data:
|
2867
|
-
print("⚠️ No repository analysis data available. Using default commands.")
|
2868
|
-
return default_commands
|
2869
|
-
|
2870
|
-
# Extract language and technologies information
|
2871
|
-
detected_language = gitingest_data.get("system_info", {}).get("detected_language", "Unknown")
|
2872
|
-
detected_technologies = gitingest_data.get("system_info", {}).get("detected_technologies", [])
|
2873
|
-
primary_package_manager = gitingest_data.get("system_info", {}).get("primary_package_manager", "Unknown")
|
2874
|
-
|
2875
|
-
# Add language-specific commands
|
2876
|
-
language_commands = []
|
2877
|
-
|
2878
|
-
print(f"📋 Detected primary language: {detected_language}")
|
2879
|
-
print(f"📋 Detected technologies: {', '.join(detected_technologies) if detected_technologies else 'None'}")
|
2880
|
-
print(f"📋 Detected package manager: {primary_package_manager}")
|
2881
|
-
|
2882
|
-
# Python-specific commands
|
2883
|
-
if detected_language == "Python" or primary_package_manager == "pip":
|
2884
|
-
print("📦 Adding Python-specific setup commands")
|
2885
|
-
|
2886
|
-
# Check for requirements.txt
|
2887
|
-
requirements_check = [
|
2888
|
-
"if [ -f requirements.txt ]; then",
|
2889
|
-
" echo 'Installing from requirements.txt'",
|
2890
|
-
" pip install -r requirements.txt",
|
2891
|
-
"elif [ -f setup.py ]; then",
|
2892
|
-
" echo 'Installing from setup.py'",
|
2893
|
-
" pip install -e .",
|
2894
|
-
"fi"
|
2895
|
-
]
|
2896
|
-
language_commands.extend(requirements_check)
|
2897
|
-
|
2898
|
-
# Add common Python packages
|
2899
|
-
language_commands.append("pip install pytest numpy pandas matplotlib")
|
2900
|
-
|
2901
|
-
# JavaScript/Node.js specific commands
|
2902
|
-
elif detected_language in ["JavaScript", "TypeScript"] or primary_package_manager in ["npm", "yarn", "pnpm"]:
|
2903
|
-
print("📦 Adding JavaScript/Node.js-specific setup commands")
|
2904
|
-
|
2905
|
-
# Install Node.js if not available
|
2906
|
-
language_commands.append("apt-get install -y nodejs npm")
|
2907
|
-
|
2908
|
-
# Check for package.json
|
2909
|
-
package_json_check = [
|
2910
|
-
"if [ -f package.json ]; then",
|
2911
|
-
" echo 'Installing from package.json'",
|
2912
|
-
" npm install",
|
2913
|
-
"fi"
|
2914
|
-
]
|
2915
|
-
language_commands.extend(package_json_check)
|
2916
|
-
|
2917
|
-
# Java specific commands
|
2918
|
-
elif detected_language == "Java" or primary_package_manager in ["maven", "gradle"]:
|
2919
|
-
print("📦 Adding Java-specific setup commands")
|
2920
|
-
|
2921
|
-
language_commands.append("apt-get install -y openjdk-11-jdk maven gradle")
|
2922
|
-
|
2923
|
-
# Check for Maven or Gradle
|
2924
|
-
build_check = [
|
2925
|
-
"if [ -f pom.xml ]; then",
|
2926
|
-
" echo 'Building with Maven'",
|
2927
|
-
" mvn clean install -DskipTests",
|
2928
|
-
"elif [ -f build.gradle ]; then",
|
2929
|
-
" echo 'Building with Gradle'",
|
2930
|
-
" gradle build --no-daemon",
|
2931
|
-
"fi"
|
2932
|
-
]
|
2933
|
-
language_commands.extend(build_check)
|
2934
|
-
|
2935
|
-
# Go specific commands
|
2936
|
-
elif detected_language == "Go" or primary_package_manager == "go":
|
2937
|
-
print("📦 Adding Go-specific setup commands")
|
2938
|
-
|
2939
|
-
language_commands.append("apt-get install -y golang-go")
|
2940
|
-
language_commands.append("go mod tidy")
|
2941
|
-
|
2942
|
-
# Rust specific commands
|
2943
|
-
elif detected_language == "Rust" or primary_package_manager == "cargo":
|
2944
|
-
print("📦 Adding Rust-specific setup commands")
|
2945
|
-
|
2946
|
-
language_commands.append("curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y")
|
2947
|
-
language_commands.append("source $HOME/.cargo/env")
|
2948
|
-
language_commands.append("cargo build")
|
2949
|
-
|
2950
|
-
# Combine all commands
|
2951
|
-
all_commands = default_commands + language_commands
|
2952
|
-
|
2953
|
-
# Fix the commands
|
2954
|
-
fixed_commands = fix_setup_commands(all_commands)
|
2955
|
-
|
2956
|
-
print("\n📋 Generated fallback setup commands:")
|
2957
|
-
for i, cmd in enumerate(fixed_commands, 1):
|
2958
|
-
print(f" {i}. {cmd}")
|
2959
|
-
|
2960
|
-
return fixed_commands
|
2961
|
-
|
2962
|
-
def generate_basic_repo_analysis_from_url(repo_url):
|
2963
|
-
"""Generate basic repository analysis data from a repository URL."""
|
2964
|
-
import tempfile
|
2965
|
-
import subprocess
|
2966
|
-
import os
|
2967
|
-
import shutil
|
2968
|
-
|
2969
|
-
# Create a temporary directory for cloning
|
2970
|
-
temp_dir = tempfile.mkdtemp(prefix="repo_basic_analysis_")
|
2971
|
-
|
2972
|
-
try:
|
2973
|
-
print(f"📥 Cloning repository to {temp_dir} for basic analysis...")
|
2974
|
-
clone_result = subprocess.run(
|
2975
|
-
["git", "clone", "--depth", "1", repo_url, temp_dir],
|
2976
|
-
capture_output=True,
|
2977
|
-
text=True
|
2978
|
-
)
|
2979
|
-
|
2980
|
-
if clone_result.returncode != 0:
|
2981
|
-
print(f"❌ Failed to clone repository: {clone_result.stderr}")
|
2982
|
-
return {
|
2983
|
-
"system_info": {
|
2984
|
-
"platform": "linux",
|
2985
|
-
"python_version": "3.10",
|
2986
|
-
"detected_language": "Unknown",
|
2987
|
-
"detected_technologies": [],
|
2988
|
-
"file_count": 0,
|
2989
|
-
"repo_stars": 0,
|
2990
|
-
"repo_forks": 0,
|
2991
|
-
"primary_package_manager": "Unknown",
|
2992
|
-
"complexity_level": "low"
|
2993
|
-
},
|
2994
|
-
"repository_analysis": {
|
2995
|
-
"summary": f"Repository analysis for {repo_url}",
|
2996
|
-
"tree": "Failed to clone repository",
|
2997
|
-
"content_preview": "No content available"
|
2998
|
-
},
|
2999
|
-
"success": False
|
3000
|
-
}
|
3001
|
-
|
3002
|
-
print(f"✅ Repository cloned successfully for basic analysis")
|
3003
|
-
|
3004
|
-
# Use the existing generate_basic_repo_analysis function
|
3005
|
-
return generate_basic_repo_analysis(temp_dir)
|
3006
|
-
finally:
|
3007
|
-
# Clean up the temporary directory
|
3008
|
-
print(f"🧹 Cleaning up temporary directory for basic analysis...")
|
3009
|
-
shutil.rmtree(temp_dir, ignore_errors=True)
|
3010
|
-
|
3011
|
-
def generate_basic_repo_analysis(repo_dir):
|
3012
|
-
"""Generate basic repository analysis when GitIngest is not available."""
|
3013
|
-
import os
|
3014
|
-
import subprocess
|
3015
|
-
|
3016
|
-
# Detect language and technologies based on file extensions
|
3017
|
-
file_extensions = {}
|
3018
|
-
file_count = 0
|
3019
|
-
|
3020
|
-
for root, _, files in os.walk(repo_dir):
|
3021
|
-
for file in files:
|
3022
|
-
file_count += 1
|
3023
|
-
ext = os.path.splitext(file)[1].lower()
|
3024
|
-
if ext:
|
3025
|
-
file_extensions[ext] = file_extensions.get(ext, 0) + 1
|
3026
|
-
|
3027
|
-
# Determine primary language
|
3028
|
-
language_map = {
|
3029
|
-
'.py': 'Python',
|
3030
|
-
'.js': 'JavaScript',
|
3031
|
-
'.ts': 'TypeScript',
|
3032
|
-
'.jsx': 'JavaScript',
|
3033
|
-
'.tsx': 'TypeScript',
|
3034
|
-
'.java': 'Java',
|
3035
|
-
'.cpp': 'C++',
|
3036
|
-
'.c': 'C',
|
3037
|
-
'.go': 'Go',
|
3038
|
-
'.rs': 'Rust',
|
3039
|
-
'.rb': 'Ruby',
|
3040
|
-
'.php': 'PHP',
|
3041
|
-
'.swift': 'Swift',
|
3042
|
-
'.kt': 'Kotlin',
|
3043
|
-
'.cs': 'C#'
|
3044
|
-
}
|
3045
|
-
|
3046
|
-
# Count files by language
|
3047
|
-
language_counts = {}
|
3048
|
-
for ext, count in file_extensions.items():
|
3049
|
-
if ext in language_map:
|
3050
|
-
lang = language_map[ext]
|
3051
|
-
language_counts[lang] = language_counts.get(lang, 0) + count
|
3052
|
-
|
3053
|
-
# Determine primary language
|
3054
|
-
primary_language = max(language_counts.items(), key=lambda x: x[1])[0] if language_counts else "Unknown"
|
3055
|
-
|
3056
|
-
# Detect package managers
|
3057
|
-
package_managers = []
|
3058
|
-
package_files = {
|
3059
|
-
'requirements.txt': 'pip',
|
3060
|
-
'setup.py': 'pip',
|
3061
|
-
'pyproject.toml': 'pip',
|
3062
|
-
'package.json': 'npm',
|
3063
|
-
'yarn.lock': 'yarn',
|
3064
|
-
'pnpm-lock.yaml': 'pnpm',
|
3065
|
-
'Cargo.toml': 'cargo',
|
3066
|
-
'go.mod': 'go',
|
3067
|
-
'Gemfile': 'bundler',
|
3068
|
-
'pom.xml': 'maven',
|
3069
|
-
'build.gradle': 'gradle',
|
3070
|
-
'composer.json': 'composer'
|
3071
|
-
}
|
3072
|
-
|
3073
|
-
for file, manager in package_files.items():
|
3074
|
-
if os.path.exists(os.path.join(repo_dir, file)):
|
3075
|
-
package_managers.append(manager)
|
3076
|
-
|
3077
|
-
primary_package_manager = package_managers[0] if package_managers else "Unknown"
|
3078
|
-
|
3079
|
-
# Get README content
|
3080
|
-
readme_content = ""
|
3081
|
-
for readme_name in ['README.md', 'README', 'README.txt', 'readme.md']:
|
3082
|
-
readme_path = os.path.join(repo_dir, readme_name)
|
3083
|
-
if os.path.exists(readme_path):
|
3084
|
-
with open(readme_path, 'r', encoding='utf-8', errors='ignore') as f:
|
3085
|
-
readme_content = f.read()
|
3086
|
-
break
|
3087
|
-
|
3088
|
-
# Try to get repository info
|
3089
|
-
repo_info = {}
|
3090
|
-
try:
|
3091
|
-
# Get remote origin URL
|
3092
|
-
cmd = ["git", "config", "--get", "remote.origin.url"]
|
3093
|
-
result = subprocess.run(cmd, cwd=repo_dir, capture_output=True, text=True)
|
3094
|
-
if result.returncode == 0:
|
3095
|
-
repo_info["url"] = result.stdout.strip()
|
3096
|
-
|
3097
|
-
# Get commit count as a proxy for activity
|
3098
|
-
cmd = ["git", "rev-list", "--count", "HEAD"]
|
3099
|
-
result = subprocess.run(cmd, cwd=repo_dir, capture_output=True, text=True)
|
3100
|
-
if result.returncode == 0:
|
3101
|
-
repo_info["commit_count"] = int(result.stdout.strip())
|
3102
|
-
except Exception:
|
3103
|
-
pass
|
3104
|
-
|
3105
|
-
# Build the analysis data
|
3106
|
-
return {
|
3107
|
-
"system_info": {
|
3108
|
-
"platform": "linux", # Assuming Linux for container environment
|
3109
|
-
"python_version": "3.10", # Common Python version
|
3110
|
-
"detected_language": primary_language,
|
3111
|
-
"detected_technologies": list(language_counts.keys()),
|
3112
|
-
"file_count": file_count,
|
3113
|
-
"repo_stars": repo_info.get("stars", 0),
|
3114
|
-
"repo_forks": repo_info.get("forks", 0),
|
3115
|
-
"primary_package_manager": primary_package_manager,
|
3116
|
-
"complexity_level": "medium" # Default assumption
|
3117
|
-
},
|
3118
|
-
"repository_analysis": {
|
3119
|
-
"summary": f"Repository analysis for {repo_dir}",
|
3120
|
-
"readme_content": readme_content[:5000] if readme_content else "No README found",
|
3121
|
-
"package_managers": package_managers,
|
3122
|
-
"file_extensions": list(file_extensions.keys())
|
3123
|
-
},
|
3124
|
-
"success": True
|
3125
|
-
}
|
3126
|
-
|
3127
|
-
def get_setup_commands_from_local_api(repo_url, gitingest_data):
|
3128
|
-
"""Try to get setup commands from the API."""
|
3129
|
-
# Use only online endpoints
|
3130
|
-
api_endpoints = [
|
3131
|
-
"https://www.gitarsenal.dev/api/analyze-with-gitingest" # Working endpoint with www prefix
|
3132
|
-
]
|
3133
|
-
|
3134
|
-
# Prepare the request payload
|
3135
|
-
payload = {
|
3136
|
-
"repoUrl": repo_url,
|
3137
|
-
"gitingestData": gitingest_data,
|
3138
|
-
"userRequest": "Setup and run the repository"
|
3139
|
-
}
|
3140
|
-
|
3141
|
-
# Try each API endpoint
|
3142
|
-
for api_url in api_endpoints:
|
3143
|
-
# Use the retry mechanism for more reliable requests
|
3144
|
-
response = make_api_request_with_retry(
|
3145
|
-
url=api_url,
|
3146
|
-
payload=payload,
|
3147
|
-
max_retries=2,
|
3148
|
-
timeout=180 # 3 minute timeout
|
3149
|
-
)
|
3150
|
-
|
3151
|
-
if response and response.status_code == 200:
|
3152
|
-
try:
|
3153
|
-
data = response.json()
|
3154
|
-
print(f"📄 Response size: {len(response.text)} bytes")
|
3155
|
-
print(f"📄 Response URL: {response.url}")
|
3156
|
-
if "setupInstructions" in data and "commands" in data["setupInstructions"]:
|
3157
|
-
commands = data["setupInstructions"]["commands"]
|
3158
|
-
print(f"✅ Successfully fetched {len(commands)} setup commands from API at {api_url}")
|
3159
|
-
|
3160
|
-
# Print the original commands
|
3161
|
-
print("📋 Original commands from API:")
|
3162
|
-
for i, cmd in enumerate(commands, 1):
|
3163
|
-
print(f" {i}. {cmd}")
|
3164
|
-
|
3165
|
-
# Fix the commands
|
3166
|
-
fixed_commands = fix_setup_commands(commands)
|
3167
|
-
|
3168
|
-
# Print the fixed commands
|
3169
|
-
print("\n📋 Fixed commands:")
|
3170
|
-
for i, cmd in enumerate(fixed_commands, 1):
|
3171
|
-
print(f" {i}. {cmd}")
|
3172
|
-
|
3173
|
-
return fixed_commands
|
3174
|
-
else:
|
3175
|
-
print("⚠️ API response did not contain setupInstructions.commands field")
|
3176
|
-
except json.JSONDecodeError:
|
3177
|
-
print(f"❌ Failed to parse API response as JSON")
|
3178
|
-
elif response:
|
3179
|
-
print(f"❌ API request failed with status code: {response.status_code}")
|
3180
|
-
else:
|
3181
|
-
print(f"❌ Failed to connect to {api_url}")
|
3182
|
-
|
3183
|
-
print("❌ All API endpoints failed")
|
3184
|
-
return None
|
3185
|
-
print(f"❌ Failed to connect to {api_url}")
|
3186
|
-
|
3187
|
-
print("❌ All API endpoints failed")
|
3188
|
-
return None
|
3189
|
-
|
3190
|
-
# Define a function to create and return a properly configured ssh container function
|
3191
|
-
def create_ssh_container_function(gpu_type="a10g", timeout_minutes=60, volume=None, volume_mount_path="/persistent"):
|
3192
|
-
# Create a new app for this specific container
|
3193
|
-
app_name = f"ssh-container-{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}"
|
3194
|
-
ssh_app = modal.App.lookup(app_name, create_if_missing=True)
|
3195
|
-
|
3196
|
-
# Create SSH-enabled image
|
3197
|
-
ssh_image = (
|
3198
|
-
modal.Image.debian_slim()
|
3199
|
-
.apt_install(
|
3200
|
-
"openssh-server", "sudo", "curl", "wget", "vim", "htop", "git",
|
3201
|
-
"python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
|
3202
|
-
"gpg", "ca-certificates", "software-properties-common"
|
3203
|
-
)
|
3204
|
-
.pip_install("uv", "modal") # Fast Python package installer and Modal
|
3205
|
-
.run_commands(
|
3206
|
-
# Create SSH directory
|
3207
|
-
"mkdir -p /var/run/sshd",
|
3208
|
-
"mkdir -p /root/.ssh",
|
3209
|
-
"chmod 700 /root/.ssh",
|
3210
|
-
|
3211
|
-
# Configure SSH server
|
3212
|
-
"sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config",
|
3213
|
-
"sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config",
|
3214
|
-
"sed -i 's/#PubkeyAuthentication yes/PubkeyAuthentication yes/' /etc/ssh/sshd_config",
|
3215
|
-
|
3216
|
-
# SSH keep-alive settings
|
3217
|
-
"echo 'ClientAliveInterval 60' >> /etc/ssh/sshd_config",
|
3218
|
-
"echo 'ClientAliveCountMax 3' >> /etc/ssh/sshd_config",
|
3219
|
-
|
3220
|
-
# Generate SSH host keys
|
3221
|
-
"ssh-keygen -A",
|
3222
|
-
|
3223
|
-
# Set up a nice bash prompt
|
3224
|
-
"echo 'export PS1=\"\\[\\e[1;32m\\]modal:\\[\\e[1;34m\\]\\w\\[\\e[0m\\]$ \"' >> /root/.bashrc",
|
3225
|
-
)
|
3226
|
-
)
|
3227
|
-
|
3228
|
-
# Setup volume mount if available
|
3229
|
-
volumes = {}
|
3230
|
-
if volume:
|
3231
|
-
volumes[volume_mount_path] = volume
|
3232
|
-
|
3233
|
-
# Define the function with the specific configuration
|
3234
|
-
@ssh_app.function(
|
3235
|
-
image=ssh_image,
|
3236
|
-
timeout=timeout_minutes * 60, # Convert to seconds
|
3237
|
-
gpu=gpu_type,
|
3238
|
-
cpu=2,
|
3239
|
-
memory=8192,
|
3240
|
-
serialized=True,
|
3241
|
-
volumes=volumes if volumes else None,
|
3242
|
-
)
|
3243
|
-
def ssh_container(ssh_password, repo_url=None, repo_name=None, setup_commands=None):
|
3244
|
-
import subprocess
|
3245
|
-
import time
|
3246
|
-
import os
|
3247
|
-
|
3248
|
-
# Set root password
|
3249
|
-
subprocess.run(["bash", "-c", f"echo 'root:{ssh_password}' | chpasswd"], check=True)
|
3250
|
-
|
3251
|
-
# Start SSH service
|
3252
|
-
subprocess.run(["service", "ssh", "start"], check=True)
|
3253
|
-
|
3254
|
-
# Setup environment
|
3255
|
-
os.environ['PS1'] = r'\[\e[1;32m\]modal:\[\e[1;34m\]\w\[\e[0m\]$ '
|
3256
|
-
|
3257
|
-
# Clone repository if provided
|
3258
|
-
if repo_url:
|
3259
|
-
repo_name_from_url = repo_name or repo_url.split('/')[-1].replace('.git', '')
|
3260
|
-
print(f"📥 Cloning repository: {repo_url}")
|
3261
|
-
|
3262
|
-
try:
|
3263
|
-
subprocess.run(["git", "clone", repo_url], check=True, cwd="/root")
|
3264
|
-
print(f"✅ Repository cloned successfully: {repo_name_from_url}")
|
3265
|
-
|
3266
|
-
# Change to repository directory
|
3267
|
-
repo_dir = f"/root/{repo_name_from_url}"
|
3268
|
-
if os.path.exists(repo_dir):
|
3269
|
-
os.chdir(repo_dir)
|
3270
|
-
print(f"📂 Changed to repository directory: {repo_dir}")
|
3271
|
-
|
3272
|
-
except subprocess.CalledProcessError as e:
|
3273
|
-
print(f"❌ Failed to clone repository: {e}")
|
3274
|
-
|
3275
|
-
# Run setup commands if provided
|
3276
|
-
if setup_commands:
|
3277
|
-
print(f"⚙️ Running {len(setup_commands)} setup commands...")
|
3278
|
-
for i, cmd in enumerate(setup_commands, 1):
|
3279
|
-
print(f"📋 Executing command {i}/{len(setup_commands)}: {cmd}")
|
3280
|
-
try:
|
3281
|
-
result = subprocess.run(cmd, shell=True, check=True,
|
3282
|
-
capture_output=True, text=True)
|
3283
|
-
if result.stdout:
|
3284
|
-
print(f"✅ Output: {result.stdout}")
|
3285
|
-
except subprocess.CalledProcessError as e:
|
3286
|
-
print(f"❌ Command failed: {e}")
|
3287
|
-
if e.stderr:
|
3288
|
-
print(f"❌ Error: {e.stderr}")
|
3289
|
-
|
3290
|
-
# Get container info
|
3291
|
-
print("🔍 Container started successfully!")
|
3292
|
-
print(f"🆔 Container ID: {os.environ.get('MODAL_TASK_ID', 'unknown')}")
|
3293
|
-
|
3294
|
-
# Keep the container running
|
3295
|
-
while True:
|
3296
|
-
time.sleep(30)
|
3297
|
-
# Check if SSH service is still running
|
3298
|
-
try:
|
3299
|
-
subprocess.run(["service", "ssh", "status"], check=True,
|
3300
|
-
capture_output=True)
|
3301
|
-
except subprocess.CalledProcessError:
|
3302
|
-
print("⚠️ SSH service stopped, restarting...")
|
3303
|
-
subprocess.run(["service", "ssh", "start"], check=True)
|
3304
|
-
|
3305
|
-
# Return the configured function
|
3306
|
-
return ssh_container, app_name
|
3307
|
-
|
3308
|
-
def fix_setup_commands(commands):
|
3309
|
-
"""Fix setup commands by removing placeholders and comments."""
|
3310
|
-
fixed_commands = []
|
3311
|
-
|
3312
|
-
for cmd in commands:
|
3313
|
-
# Remove placeholders like "(or the appropriate entry point...)"
|
3314
|
-
cmd = re.sub(r'\([^)]*\)', '', cmd).strip()
|
3315
|
-
|
3316
|
-
# Skip empty commands or pure comments
|
3317
|
-
if not cmd or cmd.startswith('#'):
|
3318
|
-
continue
|
3319
|
-
|
3320
|
-
# Remove trailing comments
|
3321
|
-
cmd = re.sub(r'#.*$', '', cmd).strip()
|
3322
|
-
|
3323
|
-
if cmd:
|
3324
|
-
fixed_commands.append(cmd)
|
3325
|
-
|
3326
|
-
return fixed_commands
|
3327
|
-
|
3328
|
-
def find_entry_point(repo_dir):
|
3329
|
-
"""Find the entry point script for a repository."""
|
3330
|
-
# Common entry point files to check
|
3331
|
-
common_entry_points = [
|
3332
|
-
"main.py", "app.py", "run.py", "train.py", "start.py",
|
3333
|
-
"server.py", "cli.py", "demo.py", "example.py"
|
3334
|
-
]
|
3335
|
-
|
3336
|
-
# Check if any of the common entry points exist
|
3337
|
-
for entry_point in common_entry_points:
|
3338
|
-
if os.path.exists(os.path.join(repo_dir, entry_point)):
|
3339
|
-
return entry_point
|
3340
|
-
|
3341
|
-
# Look for Python files in the root directory
|
3342
|
-
python_files = [f for f in os.listdir(repo_dir) if f.endswith('.py')]
|
3343
|
-
if python_files:
|
3344
|
-
# Prioritize files with main function or if_name_main pattern
|
3345
|
-
for py_file in python_files:
|
3346
|
-
file_path = os.path.join(repo_dir, py_file)
|
3347
|
-
try:
|
3348
|
-
with open(file_path, 'r') as f:
|
3349
|
-
content = f.read()
|
3350
|
-
if "def main" in content or "if __name__ == '__main__'" in content or 'if __name__ == "__main__"' in content:
|
3351
|
-
return py_file
|
3352
|
-
except:
|
3353
|
-
pass
|
3354
|
-
|
3355
|
-
# If no main function found, return the first Python file
|
3356
|
-
return python_files[0]
|
3357
|
-
|
3358
|
-
return None
|
3359
|
-
|
3360
|
-
def analyze_directory_navigation_with_llm(current_dir, target_dir, current_contents, target_contents, api_key=None):
|
3361
|
-
"""Use LLM to analyze if directory navigation makes sense"""
|
3362
|
-
if not api_key:
|
3363
|
-
# Try to get API key from environment
|
3364
|
-
api_key = os.environ.get("OPENAI_API_KEY")
|
3365
|
-
|
3366
|
-
if not api_key:
|
3367
|
-
print("⚠️ No OpenAI API key available for directory analysis")
|
3368
|
-
return None
|
3369
|
-
|
3370
|
-
# Create analysis prompt
|
3371
|
-
analysis_prompt = f"""
|
3372
|
-
I'm trying to determine if a 'cd {target_dir}' command makes sense.
|
3373
|
-
|
3374
|
-
CURRENT DIRECTORY: {current_dir}
|
3375
|
-
Current directory contents:
|
3376
|
-
{current_contents}
|
3377
|
-
|
3378
|
-
TARGET DIRECTORY: {target_dir}
|
3379
|
-
Target directory contents:
|
3380
|
-
{target_contents}
|
3381
|
-
|
3382
|
-
Please analyze if navigating to the target directory makes sense by considering:
|
3383
|
-
1. Are the contents significantly different?
|
3384
|
-
2. Does the target directory contain important files (like source code, config files, etc.)?
|
3385
|
-
3. Is this likely a nested project directory or just a duplicate?
|
3386
|
-
4. Would navigating provide access to different functionality or files?
|
3387
|
-
|
3388
|
-
Respond with only 'NAVIGATE' if navigation makes sense, or 'SKIP' if it's redundant.
|
3389
|
-
"""
|
3390
|
-
|
3391
|
-
# Prepare the API request
|
3392
|
-
headers = {
|
3393
|
-
"Content-Type": "application/json",
|
3394
|
-
"Authorization": f"Bearer {api_key}"
|
3395
|
-
}
|
3396
|
-
|
3397
|
-
payload = {
|
3398
|
-
"model": "gpt-4",
|
3399
|
-
"messages": [
|
3400
|
-
{"role": "system", "content": "You are a directory navigation assistant. Analyze if navigating to a target directory makes sense based on the contents of both directories. Respond with only 'NAVIGATE' or 'SKIP'."},
|
3401
|
-
{"role": "user", "content": analysis_prompt}
|
3402
|
-
],
|
3403
|
-
"temperature": 0.1,
|
3404
|
-
"max_tokens": 50
|
3405
|
-
}
|
3406
|
-
|
3407
|
-
try:
|
3408
|
-
print("🤖 Calling OpenAI for directory navigation analysis...")
|
3409
|
-
response = requests.post(
|
3410
|
-
"https://api.openai.com/v1/chat/completions",
|
3411
|
-
headers=headers,
|
3412
|
-
json=payload,
|
3413
|
-
timeout=30
|
3414
|
-
)
|
3415
|
-
|
3416
|
-
if response.status_code == 200:
|
3417
|
-
result = response.json()
|
3418
|
-
llm_response = result["choices"][0]["message"]["content"].strip()
|
3419
|
-
print(f"🤖 LLM Response: {llm_response}")
|
3420
|
-
return llm_response
|
3421
|
-
else:
|
3422
|
-
print(f"❌ OpenAI API error: {response.status_code} - {response.text}")
|
3423
|
-
return None
|
3424
|
-
except Exception as e:
|
3425
|
-
print(f"❌ Error calling OpenAI API: {e}")
|
3426
|
-
return None
|
3427
|
-
|
3428
|
-
def cleanup_modal_token():
|
3429
|
-
"""Delete token files and environment variables after SSH container is started"""
|
3430
|
-
print("🧹 Cleaning up tokens for security...")
|
3431
|
-
|
3432
|
-
try:
|
3433
|
-
# Remove token from environment variables
|
3434
|
-
if "MODAL_TOKEN_ID" in os.environ:
|
3435
|
-
del os.environ["MODAL_TOKEN_ID"]
|
3436
|
-
# print("✅ Removed token ID from environment")
|
3437
|
-
|
3438
|
-
if "MODAL_TOKEN" in os.environ:
|
3439
|
-
del os.environ["MODAL_TOKEN"]
|
3440
|
-
# print("✅ Removed token from environment")
|
3441
|
-
|
3442
|
-
if "MODAL_TOKEN_SECRET" in os.environ:
|
3443
|
-
del os.environ["MODAL_TOKEN_SECRET"]
|
3444
|
-
# print("✅ Removed token secret from environment")
|
3445
|
-
|
3446
|
-
# Delete ~/.modal.toml file
|
3447
|
-
home_dir = os.path.expanduser("~")
|
3448
|
-
modal_toml = os.path.join(home_dir, ".modal.toml")
|
3449
|
-
if os.path.exists(modal_toml):
|
3450
|
-
os.remove(modal_toml)
|
3451
|
-
# print(f"✅ Deleted token file at {modal_toml}")
|
3452
|
-
|
3453
|
-
# print("✅ Token cleanup completed successfully")
|
3454
|
-
except Exception as e:
|
3455
|
-
print(f"❌ Error during token cleanup: {e}")
|
3456
|
-
|
3457
|
-
def show_usage_examples():
|
3458
|
-
"""Display usage examples for the script."""
|
3459
|
-
print("Usage Examples\n")
|
3460
|
-
|
3461
|
-
print("Basic Container Creation")
|
3462
|
-
print("┌────────────────────────────────────────────────────────────────────────┐")
|
3463
|
-
print("│ gitarsenal --gpu A10G --repo-url https://github.com/username/repo.git │")
|
3464
|
-
print("└────────────────────────────────────────────────────────────────────────┘\n")
|
3465
|
-
|
3466
|
-
print("With Setup Commands")
|
3467
|
-
print("┌────────────────────────────────────────────────────────────────────────────────────────────────────┐")
|
3468
|
-
print("│ gitarsenal --gpu A100 --repo-url https://github.com/username/repo.git \\ │")
|
3469
|
-
print("│ --setup-commands \"pip install -r requirements.txt\" \"python setup.py install\" │")
|
3470
|
-
print("└────────────────────────────────────────────────────────────────────────────────────────────────────┘\n")
|
3471
|
-
|
3472
|
-
print("With Persistent Storage")
|
3473
|
-
print("┌────────────────────────────────────────────────────────────────────────────────────┐")
|
3474
|
-
print("│ gitarsenal --gpu A10G --repo-url https://github.com/username/repo.git \\ │")
|
3475
|
-
print("│ --volume-name my-persistent-volume │")
|
3476
|
-
print("└────────────────────────────────────────────────────────────────────────────────────┘\n")
|
3477
|
-
|
3478
|
-
print("Available GPU Options:")
|
3479
|
-
print(" T4, L4, A10G, A100-40GB, A100-80GB, L40S, H100, H200, B200")
|
3480
|
-
|
3481
|
-
def make_api_request_with_retry(url, payload, max_retries=2, timeout=180):
|
3482
|
-
"""Make an API request with retry mechanism."""
|
3483
|
-
import requests
|
3484
|
-
import time
|
3485
|
-
|
3486
|
-
for attempt in range(max_retries + 1):
|
3487
|
-
try:
|
3488
|
-
if attempt > 0:
|
3489
|
-
print(f"🔄 Retry attempt {attempt}/{max_retries}...")
|
3490
|
-
|
3491
|
-
print(f"🌐 Making POST request to: {url}")
|
3492
|
-
print(f"⏳ Waiting up to {timeout//60} minutes for response...")
|
3493
|
-
|
3494
|
-
# Set allow_redirects=True to follow redirects automatically
|
3495
|
-
response = requests.post(
|
3496
|
-
url,
|
3497
|
-
json=payload,
|
3498
|
-
timeout=timeout,
|
3499
|
-
allow_redirects=True,
|
3500
|
-
headers={
|
3501
|
-
'Content-Type': 'application/json',
|
3502
|
-
'User-Agent': 'GitArsenal-CLI/1.0'
|
3503
|
-
}
|
3504
|
-
)
|
3505
|
-
|
3506
|
-
# Print redirect info if any
|
3507
|
-
if response.history:
|
3508
|
-
print(f"✅ Request was redirected {len(response.history)} times")
|
3509
|
-
for resp in response.history:
|
3510
|
-
print(f" - Redirect: {resp.status_code} from {resp.url}")
|
3511
|
-
print(f"✅ Final URL: {response.url}")
|
3512
|
-
|
3513
|
-
return response
|
3514
|
-
except requests.exceptions.RequestException as e:
|
3515
|
-
if attempt < max_retries:
|
3516
|
-
retry_delay = 2 ** attempt # Exponential backoff
|
3517
|
-
print(f"⚠️ Request failed: {str(e)}")
|
3518
|
-
print(f"⏳ Waiting {retry_delay} seconds before retrying...")
|
3519
|
-
time.sleep(retry_delay)
|
3520
|
-
else:
|
3521
|
-
print(f"❌ All retry attempts failed: {str(e)}")
|
3522
|
-
return None
|
3523
|
-
|
3524
|
-
return None
|
3525
|
-
|
3526
|
-
if __name__ == "__main__":
|
3527
|
-
# Parse command line arguments when script is run directly
|
3528
|
-
import argparse
|
3529
|
-
import sys
|
3530
|
-
|
3531
|
-
parser = argparse.ArgumentParser(description='Create a Modal SSH container with GPU')
|
3532
|
-
parser.add_argument('--gpu', type=str, default='A10G', help='GPU type (default: A10G)')
|
3533
|
-
parser.add_argument('--repo-url', type=str, help='Repository URL to clone')
|
3534
|
-
parser.add_argument('--repo-name', type=str, help='Repository name override')
|
3535
|
-
parser.add_argument('--setup-commands', type=str, nargs='+', help='Setup commands to run (deprecated)')
|
3536
|
-
parser.add_argument('--setup-commands-json', type=str, help='Setup commands as JSON array')
|
3537
|
-
parser.add_argument('--commands-file', type=str, help='Path to file containing setup commands (one per line)')
|
3538
|
-
parser.add_argument('--setup-script', type=str, help='Path to bash script containing setup commands')
|
3539
|
-
parser.add_argument('--working-dir', type=str, help='Working directory for the setup script')
|
3540
|
-
parser.add_argument('--volume-name', type=str, help='Name of the Modal volume for persistent storage')
|
3541
|
-
parser.add_argument('--timeout', type=int, default=60, help='Container timeout in minutes (default: 60)')
|
3542
|
-
parser.add_argument('--ssh-password', type=str, help='SSH password (random if not provided)')
|
3543
|
-
parser.add_argument('--use-api', action='store_true', help='Fetch setup commands from API')
|
3544
|
-
parser.add_argument('--show-examples', action='store_true', help='Show usage examples')
|
3545
|
-
|
3546
|
-
args = parser.parse_args()
|
3547
|
-
|
3548
|
-
# If no arguments or only --show-examples is provided, show usage examples
|
3549
|
-
if len(sys.argv) == 1 or args.show_examples:
|
3550
|
-
show_usage_examples()
|
3551
|
-
sys.exit(0)
|
3552
|
-
|
3553
|
-
try:
|
3554
|
-
# Get setup commands from file if specified
|
3555
|
-
setup_commands = args.setup_commands or []
|
3556
|
-
|
3557
|
-
# If --use-api flag is set and repo_url is provided, fetch setup commands from API
|
3558
|
-
|
3559
|
-
# If --use-api flag is set and repo_url is provided, fetch setup commands from API
|
3560
|
-
if args.use_api and args.repo_url:
|
3561
|
-
print("🔄 Using API to fetch setup commands")
|
3562
|
-
api_commands = fetch_setup_commands_from_api(args.repo_url)
|
3563
|
-
if api_commands:
|
3564
|
-
setup_commands = api_commands
|
3565
|
-
print(f"📋 Using {len(setup_commands)} commands from API")
|
3566
|
-
else:
|
3567
|
-
print("⚠️ Failed to get commands from API, no fallback commands will be used")
|
3568
|
-
# Do not fall back to basic setup commands
|
3569
|
-
setup_commands = []
|
3570
|
-
|
3571
|
-
# Parse setup commands from JSON if provided
|
3572
|
-
if args.setup_commands_json:
|
3573
|
-
try:
|
3574
|
-
json_commands = json.loads(args.setup_commands_json)
|
3575
|
-
if isinstance(json_commands, list):
|
3576
|
-
setup_commands = json_commands
|
3577
|
-
print(f"📋 Parsed {len(setup_commands)} commands from JSON:")
|
3578
|
-
for i, cmd in enumerate(setup_commands, 1):
|
3579
|
-
print(f" {i}. {cmd}")
|
3580
|
-
else:
|
3581
|
-
print(f"⚠️ Invalid JSON format for setup commands: not a list")
|
3582
|
-
except json.JSONDecodeError as e:
|
3583
|
-
print(f"⚠️ Error parsing JSON setup commands: {e}")
|
3584
|
-
print(f"Received JSON string: {args.setup_commands_json}")
|
3585
|
-
|
3586
|
-
# Print received setup commands for debugging
|
3587
|
-
if setup_commands:
|
3588
|
-
print(f"📋 Using {len(setup_commands)} setup commands:")
|
3589
|
-
for i, cmd in enumerate(setup_commands, 1):
|
3590
|
-
print(f" {i}. {cmd}")
|
3591
|
-
|
3592
|
-
# Load commands from file if specified
|
3593
|
-
if args.commands_file and os.path.exists(args.commands_file):
|
3594
|
-
try:
|
3595
|
-
with open(args.commands_file, 'r') as f:
|
3596
|
-
# Check if the file contains JSON or line-by-line commands
|
3597
|
-
content = f.read().strip()
|
3598
|
-
|
3599
|
-
if content.startswith('[') and content.endswith(']'):
|
3600
|
-
# JSON format
|
3601
|
-
try:
|
3602
|
-
json_commands = json.loads(content)
|
3603
|
-
if isinstance(json_commands, list):
|
3604
|
-
setup_commands.extend(json_commands)
|
3605
|
-
print(f"📋 Loaded {len(json_commands)} commands from JSON file {args.commands_file}")
|
3606
|
-
else:
|
3607
|
-
print(f"⚠️ Invalid JSON format in commands file: not a list")
|
3608
|
-
except json.JSONDecodeError as json_err:
|
3609
|
-
print(f"⚠️ Error parsing JSON commands file: {json_err}")
|
3610
|
-
# Fall back to line-by-line parsing
|
3611
|
-
file_commands = [line.strip() for line in content.split('\n') if line.strip()]
|
3612
|
-
setup_commands.extend(file_commands)
|
3613
|
-
print(f"📋 Loaded {len(file_commands)} commands from file (line-by-line fallback)")
|
3614
|
-
else:
|
3615
|
-
# Line-by-line format
|
3616
|
-
file_commands = [line.strip() for line in content.split('\n') if line.strip()]
|
3617
|
-
setup_commands.extend(file_commands)
|
3618
|
-
print(f"📋 Loaded {len(file_commands)} commands from file (line-by-line format)")
|
3619
|
-
except Exception as e:
|
3620
|
-
print(f"⚠️ Error loading commands from file: {e}")
|
3621
|
-
|
3622
|
-
# Load commands from setup script if specified
|
3623
|
-
if args.setup_script and os.path.exists(args.setup_script):
|
3624
|
-
try:
|
3625
|
-
with open(args.setup_script, 'r') as f:
|
3626
|
-
script_content = f.read().strip()
|
3627
|
-
# Convert script to individual commands
|
3628
|
-
script_commands = [line.strip() for line in script_content.split('\n')
|
3629
|
-
if line.strip() and not line.strip().startswith('#')]
|
3630
|
-
setup_commands.extend(script_commands)
|
3631
|
-
print(f"📋 Loaded {len(script_commands)} commands from script {args.setup_script}")
|
3632
|
-
except Exception as e:
|
3633
|
-
print(f"⚠️ Error loading commands from script: {e}")
|
3634
|
-
|
3635
|
-
# Create the container with the specified options
|
3636
|
-
if args.ssh_password:
|
3637
|
-
print(f"🔑 Using provided SSH password")
|
3638
|
-
ssh_password = args.ssh_password
|
3639
|
-
else:
|
3640
|
-
ssh_password = generate_random_password()
|
3641
|
-
print(f"🔑 Generated random SSH password: {ssh_password}")
|
3642
|
-
|
3643
|
-
# Extract repository name from URL if not provided
|
3644
|
-
repo_name = args.repo_name
|
3645
|
-
if not repo_name and args.repo_url:
|
3646
|
-
# Try to extract repo name from URL
|
3647
|
-
url_parts = args.repo_url.rstrip('/').split('/')
|
3648
|
-
if url_parts:
|
3649
|
-
repo_name = url_parts[-1]
|
3650
|
-
if repo_name.endswith('.git'):
|
3651
|
-
repo_name = repo_name[:-4]
|
3652
|
-
|
3653
|
-
# Create the container
|
3654
|
-
create_modal_ssh_container(
|
3655
|
-
gpu_type=args.gpu,
|
3656
|
-
repo_url=args.repo_url,
|
3657
|
-
repo_name=repo_name,
|
3658
|
-
setup_commands=setup_commands,
|
3659
|
-
volume_name=args.volume_name,
|
3660
|
-
timeout_minutes=args.timeout,
|
3661
|
-
ssh_password=ssh_password
|
3662
|
-
)
|
3663
|
-
except KeyboardInterrupt:
|
3664
|
-
# print("\n\n🛑 Execution interrupted")
|
3665
|
-
# print("🧹 Cleaning up resources...")
|
3666
|
-
cleanup_modal_token()
|
3667
|
-
sys.exit(1)
|
3668
|
-
except Exception as e:
|
3669
|
-
# print(f"\n❌ Error: {e}")
|
3670
|
-
# print("🧹 Cleaning up resources...")
|
3671
|
-
cleanup_modal_token()
|
3672
|
-
sys.exit(1)
|