gitarsenal-cli 1.7.6 → 1.7.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ascii_banner.txt +12 -12
- package/bin/gitarsenal.js +8 -6
- package/package.json +1 -1
- package/python/__pycache__/test_modalSandboxScript.cpython-313.pyc +0 -0
- package/python/test_dynamic_commands.py +147 -0
- package/python/test_modalSandboxScript.py +612 -254
- package/test_modalSandboxScript.py +612 -254
- package/test_credentials_integration.py +0 -108
@@ -14,6 +14,7 @@ import threading
|
|
14
14
|
import uuid
|
15
15
|
import signal
|
16
16
|
from pathlib import Path
|
17
|
+
import modal
|
17
18
|
|
18
19
|
# Parse command-line arguments
|
19
20
|
parser = argparse.ArgumentParser()
|
@@ -85,7 +86,6 @@ class PersistentShell:
|
|
85
86
|
self._send_command_raw("cd " + self.working_dir) # Change to working directory
|
86
87
|
time.sleep(0.5) # Let initial commands settle
|
87
88
|
|
88
|
-
print("✅ Persistent shell started successfully")
|
89
89
|
|
90
90
|
def _read_stdout(self):
|
91
91
|
"""Read stdout in a separate thread."""
|
@@ -306,12 +306,13 @@ class PersistentShell:
|
|
306
306
|
|
307
307
|
if success:
|
308
308
|
if stdout_text:
|
309
|
-
print(
|
309
|
+
print("")
|
310
|
+
# print(f"✅ Output: {stdout_text}")
|
310
311
|
# Track virtual environment activation
|
311
312
|
if command.strip().startswith("source ") and "/bin/activate" in command:
|
312
313
|
venv_path = command.replace("source ", "").replace("/bin/activate", "").strip()
|
313
314
|
self.virtual_env_path = venv_path
|
314
|
-
print(f"✅ Virtual environment activated: {venv_path}")
|
315
|
+
# print(f"✅ Virtual environment activated: {venv_path}")
|
315
316
|
else:
|
316
317
|
print(f"❌ Command failed with exit code: {exit_code}")
|
317
318
|
if stderr_text:
|
@@ -410,123 +411,296 @@ class PersistentShell:
|
|
410
411
|
print("✅ Shell cleanup completed")
|
411
412
|
|
412
413
|
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
414
|
+
class CommandListManager:
|
415
|
+
"""Manages a dynamic list of setup commands with status tracking and LLM-suggested fixes."""
|
416
|
+
|
417
|
+
def __init__(self, initial_commands=None):
|
418
|
+
self.commands = []
|
419
|
+
self.executed_commands = []
|
420
|
+
self.failed_commands = []
|
421
|
+
self.suggested_fixes = []
|
422
|
+
self.current_index = 0
|
423
|
+
self.total_commands = 0
|
424
|
+
|
425
|
+
if initial_commands:
|
426
|
+
self.add_commands(initial_commands)
|
427
|
+
|
428
|
+
def add_commands(self, commands):
|
429
|
+
"""Add new commands to the list."""
|
430
|
+
if isinstance(commands, str):
|
431
|
+
commands = [commands]
|
432
|
+
|
433
|
+
added_count = 0
|
434
|
+
for cmd in commands:
|
435
|
+
if cmd and cmd.strip():
|
436
|
+
self.commands.append({
|
437
|
+
'command': cmd.strip(),
|
438
|
+
'status': 'pending',
|
439
|
+
'index': len(self.commands),
|
440
|
+
'stdout': '',
|
441
|
+
'stderr': '',
|
442
|
+
'execution_time': None,
|
443
|
+
'fix_attempts': 0,
|
444
|
+
'max_fix_attempts': 3
|
445
|
+
})
|
446
|
+
added_count += 1
|
447
|
+
|
448
|
+
self.total_commands = len(self.commands)
|
449
|
+
if added_count > 0:
|
450
|
+
print(f"📋 Added {added_count} commands to list. Total: {self.total_commands}")
|
451
|
+
|
452
|
+
def add_command_dynamically(self, command, priority='normal'):
|
453
|
+
"""Add a single command dynamically during execution."""
|
454
|
+
if not command or not command.strip():
|
455
|
+
return False
|
456
|
+
|
457
|
+
new_command = {
|
458
|
+
'command': command.strip(),
|
459
|
+
'status': 'pending',
|
460
|
+
'index': len(self.commands),
|
461
|
+
'stdout': '',
|
462
|
+
'stderr': '',
|
463
|
+
'execution_time': None,
|
464
|
+
'fix_attempts': 0,
|
465
|
+
'max_fix_attempts': 3,
|
466
|
+
'priority': priority
|
467
|
+
}
|
452
468
|
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
469
|
+
if priority == 'high':
|
470
|
+
# Insert at the beginning of pending commands
|
471
|
+
self.commands.insert(self.current_index, new_command)
|
472
|
+
# Update indices for all commands after insertion
|
473
|
+
for i in range(self.current_index + 1, len(self.commands)):
|
474
|
+
self.commands[i]['index'] = i
|
475
|
+
else:
|
476
|
+
# Add to the end
|
477
|
+
self.commands.append(new_command)
|
478
|
+
|
479
|
+
self.total_commands = len(self.commands)
|
480
|
+
print(f"📋 Added dynamic command: {command.strip()}")
|
481
|
+
return True
|
482
|
+
|
483
|
+
def add_suggested_fix(self, original_command, fix_command, reason=""):
|
484
|
+
"""Add a LLM-suggested fix for a failed command."""
|
485
|
+
fix_entry = {
|
486
|
+
'original_command': original_command,
|
487
|
+
'fix_command': fix_command,
|
488
|
+
'reason': reason,
|
489
|
+
'status': 'pending',
|
490
|
+
'index': len(self.suggested_fixes),
|
491
|
+
'stdout': '',
|
492
|
+
'stderr': '',
|
493
|
+
'execution_time': None
|
494
|
+
}
|
495
|
+
self.suggested_fixes.append(fix_entry)
|
496
|
+
print(f"🔧 Added suggested fix: {fix_command}")
|
497
|
+
return len(self.suggested_fixes) - 1
|
498
|
+
|
499
|
+
def get_next_command(self):
|
500
|
+
"""Get the next pending command to execute."""
|
501
|
+
# First, try to get a pending command from the main list
|
502
|
+
for i in range(self.current_index, len(self.commands)):
|
503
|
+
if self.commands[i]['status'] == 'pending':
|
504
|
+
return self.commands[i], 'main'
|
505
|
+
|
506
|
+
# If no pending commands in main list, check suggested fixes
|
507
|
+
for fix in self.suggested_fixes:
|
508
|
+
if fix['status'] == 'pending':
|
509
|
+
return fix, 'fix'
|
510
|
+
|
511
|
+
return None, None
|
512
|
+
|
513
|
+
def mark_command_executed(self, command_index, command_type='main', success=True, stdout='', stderr='', execution_time=None):
|
514
|
+
"""Mark a command as executed with results."""
|
515
|
+
if command_type == 'main':
|
516
|
+
if 0 <= command_index < len(self.commands):
|
517
|
+
self.commands[command_index].update({
|
518
|
+
'status': 'success' if success else 'failed',
|
519
|
+
'stdout': stdout,
|
520
|
+
'stderr': stderr,
|
521
|
+
'execution_time': execution_time
|
522
|
+
})
|
471
523
|
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
parts = line.split('=', 1)
|
479
|
-
if len(parts) == 2:
|
480
|
-
print(f"{parts[0]}= [HIDDEN]")
|
481
|
-
else:
|
482
|
-
print(line.replace('ak-sLhYqCjkvixiYcb9LAuCHp', '[HIDDEN]').replace('as-fPzD0Zm0dl6IFAEkhaH9pq', '[HIDDEN]'))
|
483
|
-
else:
|
484
|
-
print(line)
|
524
|
+
if success:
|
525
|
+
self.executed_commands.append(self.commands[command_index])
|
526
|
+
print(f"✅ Command {command_index + 1}/{self.total_commands} completed successfully")
|
527
|
+
else:
|
528
|
+
self.failed_commands.append(self.commands[command_index])
|
529
|
+
print(f"❌ Command {command_index + 1}/{self.total_commands} failed")
|
485
530
|
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
531
|
+
self.current_index = max(self.current_index, command_index + 1)
|
532
|
+
|
533
|
+
elif command_type == 'fix':
|
534
|
+
if 0 <= command_index < len(self.suggested_fixes):
|
535
|
+
self.suggested_fixes[command_index].update({
|
536
|
+
'status': 'success' if success else 'failed',
|
537
|
+
'stdout': stdout,
|
538
|
+
'stderr': stderr,
|
539
|
+
'execution_time': execution_time
|
540
|
+
})
|
490
541
|
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
542
|
+
if success:
|
543
|
+
print(f"✅ Fix command {command_index + 1} completed successfully")
|
544
|
+
else:
|
545
|
+
print(f"❌ Fix command {command_index + 1} failed")
|
546
|
+
|
547
|
+
def get_status_summary(self):
|
548
|
+
"""Get a summary of command execution status."""
|
549
|
+
total_main = len(self.commands)
|
550
|
+
total_fixes = len(self.suggested_fixes)
|
551
|
+
executed_main = len([c for c in self.commands if c['status'] == 'success'])
|
552
|
+
failed_main = len([c for c in self.commands if c['status'] == 'failed'])
|
553
|
+
pending_main = len([c for c in self.commands if c['status'] == 'pending'])
|
554
|
+
executed_fixes = len([f for f in self.suggested_fixes if f['status'] == 'success'])
|
555
|
+
failed_fixes = len([f for f in self.suggested_fixes if f['status'] == 'failed'])
|
556
|
+
|
557
|
+
return {
|
558
|
+
'total_main_commands': total_main,
|
559
|
+
'executed_main_commands': executed_main,
|
560
|
+
'failed_main_commands': failed_main,
|
561
|
+
'pending_main_commands': pending_main,
|
562
|
+
'total_fix_commands': total_fixes,
|
563
|
+
'executed_fix_commands': executed_fixes,
|
564
|
+
'failed_fix_commands': failed_fixes,
|
565
|
+
'progress_percentage': (executed_main / total_main * 100) if total_main > 0 else 0
|
566
|
+
}
|
567
|
+
|
568
|
+
def print_status(self):
|
569
|
+
"""Print current status of all commands."""
|
570
|
+
summary = self.get_status_summary()
|
571
|
+
|
572
|
+
print("\n" + "="*60)
|
573
|
+
print("📋 COMMAND EXECUTION STATUS")
|
574
|
+
print("="*60)
|
575
|
+
|
576
|
+
# Main commands status
|
577
|
+
print(f"📋 Main Commands: {summary['executed_main_commands']}/{summary['total_main_commands']} completed")
|
578
|
+
print(f" ✅ Successful: {summary['executed_main_commands']}")
|
579
|
+
print(f" ❌ Failed: {summary['failed_main_commands']}")
|
580
|
+
print(f" ⏳ Pending: {summary['pending_main_commands']}")
|
581
|
+
|
582
|
+
# Fix commands status
|
583
|
+
if summary['total_fix_commands'] > 0:
|
584
|
+
print(f"🔧 Fix Commands: {summary['executed_fix_commands']}/{summary['total_fix_commands']} completed")
|
585
|
+
print(f" ✅ Successful: {summary['executed_fix_commands']}")
|
586
|
+
print(f" ❌ Failed: {summary['failed_fix_commands']}")
|
587
|
+
|
588
|
+
# Progress bar
|
589
|
+
progress = summary['progress_percentage']
|
590
|
+
bar_length = 30
|
591
|
+
filled_length = int(bar_length * progress / 100)
|
592
|
+
bar = '█' * filled_length + '░' * (bar_length - filled_length)
|
593
|
+
print(f"📊 Progress: [{bar}] {progress:.1f}%")
|
594
|
+
|
595
|
+
# Show current command if any
|
596
|
+
next_cmd, cmd_type = self.get_next_command()
|
597
|
+
if next_cmd:
|
598
|
+
cmd_type_str = "main" if cmd_type == 'main' else "fix"
|
599
|
+
cmd_text = next_cmd.get('command', next_cmd.get('fix_command', 'Unknown command'))
|
600
|
+
print(f"🔄 Current: {cmd_type_str} command - {cmd_text[:50]}...")
|
601
|
+
|
602
|
+
print("="*60)
|
603
|
+
|
604
|
+
def get_failed_commands_for_llm(self):
|
605
|
+
"""Get failed commands for LLM analysis."""
|
606
|
+
failed_commands = []
|
607
|
+
|
608
|
+
# Get failed main commands
|
609
|
+
for cmd in self.commands:
|
610
|
+
if cmd['status'] == 'failed':
|
611
|
+
failed_commands.append({
|
612
|
+
'command': cmd['command'],
|
613
|
+
'stderr': cmd['stderr'],
|
614
|
+
'stdout': cmd['stdout'],
|
615
|
+
'type': 'main'
|
616
|
+
})
|
617
|
+
|
618
|
+
# Get failed fix commands
|
619
|
+
for fix in self.suggested_fixes:
|
620
|
+
if fix['status'] == 'failed':
|
621
|
+
failed_commands.append({
|
622
|
+
'command': fix['fix_command'],
|
623
|
+
'stderr': fix['stderr'],
|
624
|
+
'stdout': fix['stdout'],
|
625
|
+
'type': 'fix',
|
626
|
+
'original_command': fix['original_command']
|
627
|
+
})
|
628
|
+
|
629
|
+
return failed_commands
|
630
|
+
|
631
|
+
def has_pending_commands(self):
|
632
|
+
"""Check if there are any pending commands."""
|
633
|
+
return any(cmd['status'] == 'pending' for cmd in self.commands) or \
|
634
|
+
any(fix['status'] == 'pending' for fix in self.suggested_fixes)
|
635
|
+
|
636
|
+
def get_all_commands(self):
|
637
|
+
"""Get all commands (main + fixes) in execution order."""
|
638
|
+
all_commands = []
|
639
|
+
|
640
|
+
# Add main commands
|
641
|
+
for cmd in self.commands:
|
642
|
+
all_commands.append({
|
643
|
+
**cmd,
|
644
|
+
'type': 'main'
|
645
|
+
})
|
646
|
+
|
647
|
+
# Add fix commands
|
648
|
+
for fix in self.suggested_fixes:
|
649
|
+
all_commands.append({
|
650
|
+
**fix,
|
651
|
+
'type': 'fix'
|
652
|
+
})
|
653
|
+
|
654
|
+
return all_commands
|
655
|
+
|
656
|
+
def analyze_failed_commands_with_llm(self, api_key=None, current_dir=None, sandbox=None):
|
657
|
+
"""Analyze all failed commands using LLM and add suggested fixes."""
|
658
|
+
failed_commands = self.get_failed_commands_for_llm()
|
659
|
+
|
660
|
+
if not failed_commands:
|
661
|
+
print("✅ No failed commands to analyze")
|
662
|
+
return []
|
663
|
+
|
664
|
+
print(f"🔍 Analyzing {len(failed_commands)} failed commands with LLM...")
|
665
|
+
|
666
|
+
# Use batch debugging for efficiency
|
667
|
+
fixes = call_openai_for_batch_debug(failed_commands, api_key, current_dir, sandbox)
|
668
|
+
|
669
|
+
# Add the fixes to the command list
|
670
|
+
added_fixes = []
|
671
|
+
for fix in fixes:
|
672
|
+
fix_index = self.add_suggested_fix(
|
673
|
+
fix['original_command'],
|
674
|
+
fix['fix_command'],
|
675
|
+
fix['reason']
|
676
|
+
)
|
677
|
+
added_fixes.append(fix_index)
|
678
|
+
|
679
|
+
print(f"🔧 Added {len(added_fixes)} LLM-suggested fixes to command list")
|
680
|
+
return added_fixes
|
496
681
|
|
497
|
-
if os.environ.get('MODAL_TOKEN_ID'):
|
498
|
-
print(f"🔍 Token ID length: {len(os.environ.get('MODAL_TOKEN_ID'))}")
|
499
|
-
if os.environ.get('MODAL_TOKEN_SECRET'):
|
500
|
-
print(f"🔍 Token secret length: {len(os.environ.get('MODAL_TOKEN_SECRET'))}")
|
501
|
-
if os.environ.get('MODAL_TOKEN'):
|
502
|
-
print(f"🔍 Token length: {len(os.environ.get('MODAL_TOKEN'))}")
|
503
682
|
|
504
|
-
# Import
|
505
|
-
|
683
|
+
# Import the fetch_modal_tokens module
|
684
|
+
# print("🔄 Fetching tokens from proxy server...")
|
685
|
+
from fetch_modal_tokens import get_tokens
|
686
|
+
token_id, token_secret, openai_api_key = get_tokens()
|
687
|
+
|
688
|
+
# Check if we got valid tokens
|
689
|
+
if token_id is None or token_secret is None:
|
690
|
+
raise ValueError("Could not get valid tokens")
|
691
|
+
|
692
|
+
print(f"✅ Tokens fetched successfully")
|
693
|
+
|
694
|
+
# Explicitly set the environment variables again to be sure
|
695
|
+
os.environ["MODAL_TOKEN_ID"] = token_id
|
696
|
+
os.environ["MODAL_TOKEN_SECRET"] = token_secret
|
697
|
+
os.environ["OPENAI_API_KEY"] = openai_api_key
|
698
|
+
# Also set the old environment variable for backward compatibility
|
699
|
+
os.environ["MODAL_TOKEN"] = token_id
|
700
|
+
|
701
|
+
# Set token variables for later use
|
702
|
+
token = token_id # For backward compatibility
|
506
703
|
|
507
|
-
def handle_interactive_input(prompt, is_password=False):
|
508
|
-
"""Handle interactive input from the user with optional password masking"""
|
509
|
-
print("\n" + "="*60)
|
510
|
-
print(f"{prompt}")
|
511
|
-
print("="*60)
|
512
|
-
|
513
|
-
try:
|
514
|
-
if is_password:
|
515
|
-
user_input = getpass.getpass("Input (hidden): ").strip()
|
516
|
-
else:
|
517
|
-
user_input = input("Input: ").strip()
|
518
|
-
|
519
|
-
if not user_input:
|
520
|
-
print("❌ No input provided.")
|
521
|
-
return None
|
522
|
-
print("✅ Input received successfully!")
|
523
|
-
return user_input
|
524
|
-
except KeyboardInterrupt:
|
525
|
-
print("\n❌ Input cancelled by user.")
|
526
|
-
return None
|
527
|
-
except Exception as e:
|
528
|
-
print(f"❌ Error getting input: {e}")
|
529
|
-
return None
|
530
704
|
|
531
705
|
def get_stored_credentials():
|
532
706
|
"""Load stored credentials from ~/.gitarsenal/credentials.json"""
|
@@ -747,7 +921,7 @@ System Information:
|
|
747
921
|
|
748
922
|
if current_dir and sandbox:
|
749
923
|
try:
|
750
|
-
print("🔍 Getting directory context for better debugging...")
|
924
|
+
# print("🔍 Getting directory context for better debugging...")
|
751
925
|
|
752
926
|
# Get current directory contents
|
753
927
|
ls_result = sandbox.exec("bash", "-c", "ls -la")
|
@@ -835,7 +1009,7 @@ Parent directory contents:
|
|
835
1009
|
except Exception as e:
|
836
1010
|
print(f"⚠️ Error getting content of {file_path}: {e}")
|
837
1011
|
|
838
|
-
print(f"✅ Additional file context gathered from {len(relevant_files)} relevant files")
|
1012
|
+
# print(f"✅ Additional file context gathered from {len(relevant_files)} relevant files")
|
839
1013
|
|
840
1014
|
except Exception as e:
|
841
1015
|
print(f"⚠️ Error getting directory context: {e}")
|
@@ -849,12 +1023,6 @@ Parent directory contents:
|
|
849
1023
|
|
850
1024
|
stored_credentials = get_stored_credentials()
|
851
1025
|
auth_context = generate_auth_context(stored_credentials)
|
852
|
-
|
853
|
-
|
854
|
-
print("DEBUG: AUTH_CONTEXT SENT TO LLM:")
|
855
|
-
print("="*60)
|
856
|
-
print(auth_context)
|
857
|
-
print("="*60 + "\n")
|
858
1026
|
|
859
1027
|
# Create a prompt for the LLM
|
860
1028
|
print("\n" + "="*60)
|
@@ -923,11 +1091,18 @@ IMPORTANT GUIDELINES:
|
|
923
1091
|
- Do not use generic placeholders or dummy values
|
924
1092
|
- The auth_context contains real, usable credentials
|
925
1093
|
|
1094
|
+
7. For Git SSH authentication failures:
|
1095
|
+
- If the error contains "Host key verification failed" or "Could not read from remote repository"
|
1096
|
+
- ALWAYS convert SSH URLs to HTTPS URLs for public repositories
|
1097
|
+
- Replace git@github.com:username/repo.git with https://github.com/username/repo.git
|
1098
|
+
- This works for public repositories without authentication
|
1099
|
+
- Example: git clone https://github.com/xg-chu/ARTalk.git
|
1100
|
+
|
926
1101
|
Do not provide any explanations, just the exact command to run.
|
927
1102
|
"""
|
928
1103
|
|
929
1104
|
# Prepare the API request payload
|
930
|
-
print("🔍 DEBUG: Preparing API request...")
|
1105
|
+
# print("🔍 DEBUG: Preparing API request...")
|
931
1106
|
|
932
1107
|
# Try to use GPT-4 first, but fall back to other models if needed
|
933
1108
|
models_to_try = [
|
@@ -939,16 +1114,16 @@ Do not provide any explanations, just the exact command to run.
|
|
939
1114
|
if preferred_model:
|
940
1115
|
# Insert the preferred model at the beginning of the list
|
941
1116
|
models_to_try.insert(0, preferred_model)
|
942
|
-
print(f"✅ Using preferred model from environment: {preferred_model}")
|
1117
|
+
# print(f"✅ Using preferred model from environment: {preferred_model}")
|
943
1118
|
|
944
1119
|
# Remove duplicates while preserving order
|
945
1120
|
models_to_try = list(dict.fromkeys(models_to_try))
|
946
|
-
print(f"🔍 DEBUG: Models to try: {models_to_try}")
|
1121
|
+
# print(f"🔍 DEBUG: Models to try: {models_to_try}")
|
947
1122
|
|
948
1123
|
# Function to make the API call with a specific model
|
949
1124
|
def try_api_call(model_name, retries=2, backoff_factor=1.5):
|
950
|
-
print(f"🔍 DEBUG: Attempting API call with model: {model_name}")
|
951
|
-
print(f"🔍 DEBUG: API key available: {'Yes' if api_key else 'No'}")
|
1125
|
+
# print(f"🔍 DEBUG: Attempting API call with model: {model_name}")
|
1126
|
+
# print(f"🔍 DEBUG: API key available: {'Yes' if api_key else 'No'}")
|
952
1127
|
# if api_key:
|
953
1128
|
# print(f"🔍 DEBUG: API key length: {len(api_key)}")
|
954
1129
|
# print(f"🔍 DEBUG: API key starts with: {api_key[:10]}...")
|
@@ -956,14 +1131,14 @@ Do not provide any explanations, just the exact command to run.
|
|
956
1131
|
payload = {
|
957
1132
|
"model": model_name,
|
958
1133
|
"messages": [
|
959
|
-
{"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue. Analyze the issue first, understand why it's happening, then provide the command to fix it. For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found. For missing packages, use appropriate package managers (pip, apt-get, npm). For authentication, suggest login commands with placeholders."},
|
1134
|
+
{"role": "system", "content": "You are a debugging assistant. Provide only the terminal command to fix the issue. Analyze the issue first, understand why it's happening, then provide the command to fix it. For file not found errors, first search for the file using 'find . -name filename -type f' and navigate to the directory if found. For missing packages, use appropriate package managers (pip, apt-get, npm). For Git SSH authentication failures, always convert SSH URLs to HTTPS URLs (git@github.com:user/repo.git -> https://github.com/user/repo.git). For authentication, suggest login commands with placeholders."},
|
960
1135
|
{"role": "user", "content": prompt}
|
961
1136
|
],
|
962
1137
|
"temperature": 0.2,
|
963
1138
|
"max_tokens": 300
|
964
1139
|
}
|
965
1140
|
|
966
|
-
print(f"🔍 DEBUG: Payload prepared, prompt length: {len(prompt)}")
|
1141
|
+
# print(f"🔍 DEBUG: Payload prepared, prompt length: {len(prompt)}")
|
967
1142
|
|
968
1143
|
# Add specific handling for common errors
|
969
1144
|
last_error = None
|
@@ -975,8 +1150,8 @@ Do not provide any explanations, just the exact command to run.
|
|
975
1150
|
print(f"⏱️ Retrying in {wait_time:.1f} seconds... (attempt {attempt+1}/{retries+1})")
|
976
1151
|
time.sleep(wait_time)
|
977
1152
|
|
978
|
-
print(f"🤖 Calling OpenAI with {model_name} model to debug the failed command...")
|
979
|
-
print(f"🔍 DEBUG: Making POST request to OpenAI API...")
|
1153
|
+
# print(f"🤖 Calling OpenAI with {model_name} model to debug the failed command...")
|
1154
|
+
# print(f"🔍 DEBUG: Making POST request to OpenAI API...")
|
980
1155
|
response = requests.post(
|
981
1156
|
"https://api.openai.com/v1/chat/completions",
|
982
1157
|
headers=headers,
|
@@ -984,45 +1159,12 @@ Do not provide any explanations, just the exact command to run.
|
|
984
1159
|
timeout=45 # Increased timeout for reliability
|
985
1160
|
)
|
986
1161
|
|
987
|
-
print(f"🔍 DEBUG: Response received, status code: {response.status_code}")
|
1162
|
+
# print(f"🔍 DEBUG: Response received, status code: {response.status_code}")
|
988
1163
|
|
989
|
-
# Handle specific status codes
|
990
|
-
if response.status_code == 200:
|
991
|
-
print(f"🔍 DEBUG: Success! Response length: {len(response.text)}")
|
992
|
-
return response.json(), None
|
993
|
-
elif response.status_code == 401:
|
994
|
-
error_msg = "Authentication error: Invalid API key"
|
995
|
-
print(f"❌ {error_msg}")
|
996
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
997
|
-
# Don't retry auth errors
|
998
|
-
return None, error_msg
|
999
|
-
elif response.status_code == 429:
|
1000
|
-
error_msg = "Rate limit exceeded or quota reached"
|
1001
|
-
print(f"⚠️ {error_msg}")
|
1002
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
1003
|
-
# Always retry rate limit errors with increasing backoff
|
1004
|
-
last_error = error_msg
|
1005
|
-
continue
|
1006
|
-
elif response.status_code == 500:
|
1007
|
-
error_msg = "OpenAI server error"
|
1008
|
-
print(f"⚠️ {error_msg}")
|
1009
|
-
print(f"🔍 DEBUG: Response text: {response.text}")
|
1010
|
-
# Retry server errors
|
1011
|
-
last_error = error_msg
|
1012
|
-
continue
|
1013
|
-
else:
|
1014
|
-
error_msg = f"Status code: {response.status_code}, Response: {response.text}"
|
1015
|
-
print(f"⚠️ OpenAI API error: {error_msg}")
|
1016
|
-
print(f"🔍 DEBUG: Full response text: {response.text}")
|
1017
|
-
last_error = error_msg
|
1018
|
-
# Only retry if we have attempts left
|
1019
|
-
if attempt < retries:
|
1020
|
-
continue
|
1021
|
-
return None, error_msg
|
1022
1164
|
except requests.exceptions.Timeout:
|
1023
1165
|
error_msg = "Request timed out"
|
1024
|
-
print(f"⚠️ {error_msg}")
|
1025
|
-
print(f"🔍 DEBUG: Timeout after 45 seconds")
|
1166
|
+
# print(f"⚠️ {error_msg}")
|
1167
|
+
# print(f"🔍 DEBUG: Timeout after 45 seconds")
|
1026
1168
|
last_error = error_msg
|
1027
1169
|
# Always retry timeouts
|
1028
1170
|
continue
|
@@ -1155,6 +1297,126 @@ Do not provide any explanations, just the exact command to run.
|
|
1155
1297
|
print(f"🔍 DEBUG: Exception details: {str(e)}")
|
1156
1298
|
return None
|
1157
1299
|
|
1300
|
+
def call_openai_for_batch_debug(failed_commands, api_key=None, current_dir=None, sandbox=None):
|
1301
|
+
"""Call OpenAI to debug multiple failed commands and suggest fixes for all of them at once"""
|
1302
|
+
print("\n🔍 DEBUG: Starting batch LLM debugging...")
|
1303
|
+
print(f"🔍 DEBUG: Analyzing {len(failed_commands)} failed commands")
|
1304
|
+
|
1305
|
+
if not failed_commands:
|
1306
|
+
print("⚠️ No failed commands to analyze")
|
1307
|
+
return []
|
1308
|
+
|
1309
|
+
if not api_key:
|
1310
|
+
print("❌ No OpenAI API key provided for batch debugging")
|
1311
|
+
return []
|
1312
|
+
|
1313
|
+
# Prepare context for batch analysis
|
1314
|
+
context_parts = []
|
1315
|
+
context_parts.append(f"Current directory: {current_dir}")
|
1316
|
+
context_parts.append(f"Sandbox available: {sandbox is not None}")
|
1317
|
+
|
1318
|
+
# Add failed commands with their errors
|
1319
|
+
for i, failed_cmd in enumerate(failed_commands, 1):
|
1320
|
+
cmd_type = failed_cmd.get('type', 'main')
|
1321
|
+
original_cmd = failed_cmd.get('original_command', '')
|
1322
|
+
cmd_text = failed_cmd['command']
|
1323
|
+
stderr = failed_cmd.get('stderr', '')
|
1324
|
+
stdout = failed_cmd.get('stdout', '')
|
1325
|
+
|
1326
|
+
context_parts.append(f"\n--- Failed Command {i} ({cmd_type}) ---")
|
1327
|
+
context_parts.append(f"Command: {cmd_text}")
|
1328
|
+
if original_cmd and original_cmd != cmd_text:
|
1329
|
+
context_parts.append(f"Original Command: {original_cmd}")
|
1330
|
+
if stderr:
|
1331
|
+
context_parts.append(f"Error Output: {stderr}")
|
1332
|
+
if stdout:
|
1333
|
+
context_parts.append(f"Standard Output: {stdout}")
|
1334
|
+
|
1335
|
+
# Create the prompt for batch analysis
|
1336
|
+
prompt = f"""You are a debugging assistant analyzing multiple failed commands.
|
1337
|
+
|
1338
|
+
Context:
|
1339
|
+
{chr(10).join(context_parts)}
|
1340
|
+
|
1341
|
+
Please analyze each failed command and provide a fix command for each one. For each failed command, respond with:
|
1342
|
+
|
1343
|
+
FIX_COMMAND_{i}: <the fix command>
|
1344
|
+
REASON_{i}: <brief explanation of why the original command failed and how the fix addresses it>
|
1345
|
+
|
1346
|
+
Guidelines:
|
1347
|
+
- For file not found errors, first search for the file using 'find . -name filename -type f'
|
1348
|
+
- For missing packages, use appropriate package managers (pip, apt-get, npm)
|
1349
|
+
- For Git SSH authentication failures, convert SSH URLs to HTTPS URLs
|
1350
|
+
- For permission errors, suggest commands with sudo if appropriate
|
1351
|
+
- For network issues, suggest retry commands or alternative URLs
|
1352
|
+
- Keep each fix command simple and focused on the specific error
|
1353
|
+
|
1354
|
+
Provide fixes for all {len(failed_commands)} failed commands:"""
|
1355
|
+
|
1356
|
+
# Make the API call
|
1357
|
+
headers = {
|
1358
|
+
"Authorization": f"Bearer {api_key}",
|
1359
|
+
"Content-Type": "application/json"
|
1360
|
+
}
|
1361
|
+
|
1362
|
+
payload = {
|
1363
|
+
"model": "gpt-4o-mini", # Use a more capable model for batch analysis
|
1364
|
+
"messages": [
|
1365
|
+
{"role": "system", "content": "You are a debugging assistant. Analyze failed commands and provide specific fix commands. Return only the fix commands and reasons in the specified format."},
|
1366
|
+
{"role": "user", "content": prompt}
|
1367
|
+
],
|
1368
|
+
"temperature": 0.1,
|
1369
|
+
"max_tokens": 1000
|
1370
|
+
}
|
1371
|
+
|
1372
|
+
try:
|
1373
|
+
print(f"🤖 Calling OpenAI for batch debugging of {len(failed_commands)} commands...")
|
1374
|
+
response = requests.post(
|
1375
|
+
"https://api.openai.com/v1/chat/completions",
|
1376
|
+
headers=headers,
|
1377
|
+
json=payload,
|
1378
|
+
timeout=60
|
1379
|
+
)
|
1380
|
+
|
1381
|
+
if response.status_code == 200:
|
1382
|
+
result = response.json()
|
1383
|
+
content = result['choices'][0]['message']['content']
|
1384
|
+
print(f"✅ Batch analysis completed")
|
1385
|
+
|
1386
|
+
# Parse the response to extract fix commands
|
1387
|
+
fixes = []
|
1388
|
+
for i in range(1, len(failed_commands) + 1):
|
1389
|
+
fix_pattern = f"FIX_COMMAND_{i}: (.+)"
|
1390
|
+
reason_pattern = f"REASON_{i}: (.+)"
|
1391
|
+
|
1392
|
+
fix_match = re.search(fix_pattern, content, re.MULTILINE)
|
1393
|
+
reason_match = re.search(reason_pattern, content, re.MULTILINE)
|
1394
|
+
|
1395
|
+
if fix_match:
|
1396
|
+
fix_command = fix_match.group(1).strip()
|
1397
|
+
reason = reason_match.group(1).strip() if reason_match else "LLM suggested fix"
|
1398
|
+
|
1399
|
+
# Clean up the fix command
|
1400
|
+
if fix_command.startswith('`') and fix_command.endswith('`'):
|
1401
|
+
fix_command = fix_command[1:-1]
|
1402
|
+
|
1403
|
+
fixes.append({
|
1404
|
+
'original_command': failed_commands[i-1]['command'],
|
1405
|
+
'fix_command': fix_command,
|
1406
|
+
'reason': reason,
|
1407
|
+
'command_index': i-1
|
1408
|
+
})
|
1409
|
+
|
1410
|
+
print(f"🔧 Generated {len(fixes)} fix commands from batch analysis")
|
1411
|
+
return fixes
|
1412
|
+
else:
|
1413
|
+
print(f"❌ OpenAI API error: {response.status_code} - {response.text}")
|
1414
|
+
return []
|
1415
|
+
|
1416
|
+
except Exception as e:
|
1417
|
+
print(f"❌ Error during batch debugging: {e}")
|
1418
|
+
return []
|
1419
|
+
|
1158
1420
|
def prompt_for_hf_token():
|
1159
1421
|
"""Prompt user for Hugging Face token when needed"""
|
1160
1422
|
# Try to use credentials manager first
|
@@ -1209,6 +1471,8 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
|
|
1209
1471
|
# If GPU type is not specified, prompt for it
|
1210
1472
|
if not gpu_type:
|
1211
1473
|
gpu_type = prompt_for_gpu()
|
1474
|
+
else:
|
1475
|
+
print(f"✅ Using provided GPU type: {gpu_type}")
|
1212
1476
|
|
1213
1477
|
# If repo URL is not specified, prompt for it
|
1214
1478
|
if not repo_url:
|
@@ -1400,15 +1664,6 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
|
|
1400
1664
|
modal_token = os.environ.get("MODAL_TOKEN_ID")
|
1401
1665
|
print(f" - token in env: {'Yes' if modal_token else 'No'}")
|
1402
1666
|
print(f" - Token length: {len(modal_token) if modal_token else 'N/A'}")
|
1403
|
-
|
1404
|
-
# Verify we can create a Modal app
|
1405
|
-
try:
|
1406
|
-
print("🔍 Testing app creation...")
|
1407
|
-
app = modal.App(app_name)
|
1408
|
-
print("✅ Created app successfully")
|
1409
|
-
except Exception as e:
|
1410
|
-
print(f"❌ Error creating app: {e}")
|
1411
|
-
return None
|
1412
1667
|
|
1413
1668
|
# Create SSH-enabled image
|
1414
1669
|
try:
|
@@ -1416,14 +1671,14 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
|
|
1416
1671
|
|
1417
1672
|
# Use a more stable CUDA base image and avoid problematic packages
|
1418
1673
|
ssh_image = (
|
1419
|
-
# modal.Image.from_registry("nvidia/cuda:12.4.0-
|
1674
|
+
# modal.Image.from_registry("nvidia/cuda:12.4.0-devel-ubuntu22.04", add_python="3.11")
|
1420
1675
|
modal.Image.debian_slim()
|
1421
1676
|
.apt_install(
|
1422
1677
|
"openssh-server", "sudo", "curl", "wget", "vim", "htop", "git",
|
1423
1678
|
"python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
|
1424
1679
|
"gpg", "ca-certificates", "software-properties-common"
|
1425
1680
|
)
|
1426
|
-
.
|
1681
|
+
.uv_pip_install("uv", "modal", "requests", "openai") # Remove problematic CUDA packages
|
1427
1682
|
.run_commands(
|
1428
1683
|
# Create SSH directory
|
1429
1684
|
"mkdir -p /var/run/sshd",
|
@@ -1468,8 +1723,7 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
|
|
1468
1723
|
# Define the SSH container function (remove image from decorator)
|
1469
1724
|
@app.function(
|
1470
1725
|
timeout=timeout_minutes * 60, # Convert to seconds
|
1471
|
-
gpu=
|
1472
|
-
cpu=2,
|
1726
|
+
gpu="A10G",
|
1473
1727
|
serialized=True,
|
1474
1728
|
volumes=volumes_config if volumes_config else None,
|
1475
1729
|
)
|
@@ -1526,9 +1780,12 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
|
|
1526
1780
|
# Start SSH service
|
1527
1781
|
subprocess.run(["service", "ssh", "start"], check=True)
|
1528
1782
|
|
1529
|
-
# Run setup commands if provided using PersistentShell
|
1783
|
+
# Run setup commands if provided using PersistentShell and CommandListManager
|
1530
1784
|
if setup_commands:
|
1531
|
-
print(f"⚙️ Running {len(setup_commands)} setup commands with
|
1785
|
+
print(f"⚙️ Running {len(setup_commands)} setup commands with dynamic command list...")
|
1786
|
+
|
1787
|
+
# Create command list manager
|
1788
|
+
cmd_manager = CommandListManager(setup_commands)
|
1532
1789
|
|
1533
1790
|
# Create persistent shell instance starting in /root
|
1534
1791
|
shell = PersistentShell(working_dir="/root", timeout=120)
|
@@ -1537,55 +1794,136 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
|
|
1537
1794
|
# Start the persistent shell
|
1538
1795
|
shell.start()
|
1539
1796
|
|
1540
|
-
# Execute
|
1541
|
-
|
1542
|
-
|
1797
|
+
# Execute commands using the command list manager
|
1798
|
+
while cmd_manager.has_pending_commands():
|
1799
|
+
# Get next command to execute
|
1800
|
+
next_cmd, cmd_type = cmd_manager.get_next_command()
|
1543
1801
|
|
1544
|
-
|
1802
|
+
if not next_cmd:
|
1803
|
+
break
|
1545
1804
|
|
1546
|
-
|
1547
|
-
|
1805
|
+
# Print status before executing
|
1806
|
+
cmd_manager.print_status()
|
1807
|
+
|
1808
|
+
# Execute the command
|
1809
|
+
if cmd_type == 'main':
|
1810
|
+
cmd_text = next_cmd['command']
|
1811
|
+
cmd_index = next_cmd['index']
|
1812
|
+
print(f"📋 Executing main command {cmd_index + 1}/{cmd_manager.total_commands}: {cmd_text}")
|
1548
1813
|
|
1549
|
-
|
1550
|
-
|
1551
|
-
|
1552
|
-
|
1553
|
-
|
1554
|
-
|
1555
|
-
|
1814
|
+
start_time = time.time()
|
1815
|
+
success, stdout, stderr = shell.execute(cmd_text, timeout=120)
|
1816
|
+
execution_time = time.time() - start_time
|
1817
|
+
|
1818
|
+
# Mark command as executed
|
1819
|
+
cmd_manager.mark_command_executed(
|
1820
|
+
cmd_index, 'main', success, stdout, stderr, execution_time
|
1821
|
+
)
|
1822
|
+
|
1823
|
+
if not success:
|
1824
|
+
print(f"⚠️ Command failed, attempting LLM debugging...")
|
1556
1825
|
|
1557
|
-
|
1558
|
-
|
1826
|
+
# Call OpenAI for debugging
|
1827
|
+
try:
|
1828
|
+
current_dir = shell.get_cwd()
|
1829
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
1559
1830
|
|
1560
|
-
#
|
1561
|
-
|
1562
|
-
fix_success, fix_stdout, fix_stderr = shell.execute(fix_command, timeout=120)
|
1831
|
+
# Use existing call_openai_for_debug function
|
1832
|
+
fix_command = call_openai_for_debug(cmd_text, stderr, api_key=api_key, current_dir=current_dir, sandbox=shell)
|
1563
1833
|
|
1564
|
-
if
|
1565
|
-
print(f"
|
1834
|
+
if fix_command:
|
1835
|
+
print(f"🔧 OpenAI suggested fix command: {fix_command}")
|
1836
|
+
|
1837
|
+
# Add the fix to the command list manager
|
1838
|
+
fix_index = cmd_manager.add_suggested_fix(cmd_text, fix_command, "LLM suggested fix")
|
1566
1839
|
|
1567
|
-
#
|
1568
|
-
print(f"🔄
|
1569
|
-
|
1840
|
+
# Execute the fix command
|
1841
|
+
print(f"🔄 Running suggested fix command: {fix_command}")
|
1842
|
+
fix_start_time = time.time()
|
1843
|
+
fix_success, fix_stdout, fix_stderr = shell.execute(fix_command, timeout=120)
|
1844
|
+
fix_execution_time = time.time() - fix_start_time
|
1570
1845
|
|
1571
|
-
|
1572
|
-
|
1846
|
+
# Mark fix command as executed
|
1847
|
+
cmd_manager.mark_command_executed(
|
1848
|
+
fix_index, 'fix', fix_success, fix_stdout, fix_stderr, fix_execution_time
|
1849
|
+
)
|
1850
|
+
|
1851
|
+
if fix_success:
|
1852
|
+
print(f"✅ Fix command succeeded")
|
1853
|
+
|
1854
|
+
# Retry the original command
|
1855
|
+
print(f"🔄 Retrying original command: {cmd_text}")
|
1856
|
+
retry_start_time = time.time()
|
1857
|
+
retry_success, retry_stdout, retry_stderr = shell.execute(cmd_text, timeout=120)
|
1858
|
+
retry_execution_time = time.time() - retry_start_time
|
1859
|
+
|
1860
|
+
# Update the original command status
|
1861
|
+
cmd_manager.mark_command_executed(
|
1862
|
+
cmd_index, 'main', retry_success, retry_stdout, retry_stderr, retry_execution_time
|
1863
|
+
)
|
1864
|
+
|
1865
|
+
if retry_success:
|
1866
|
+
print(f"✅ Original command succeeded after fix!")
|
1867
|
+
else:
|
1868
|
+
print(f"⚠️ Original command still failed after fix, continuing...")
|
1573
1869
|
else:
|
1574
|
-
print(f"
|
1870
|
+
print(f"❌ Fix command failed: {fix_stderr}")
|
1871
|
+
print(f"⚠️ Continuing with remaining commands...")
|
1575
1872
|
else:
|
1576
|
-
print(
|
1873
|
+
print("❌ No fix suggested by OpenAI")
|
1577
1874
|
print(f"⚠️ Continuing with remaining commands...")
|
1578
|
-
|
1579
|
-
|
1875
|
+
|
1876
|
+
except Exception as debug_e:
|
1877
|
+
print(f"❌ LLM debugging failed: {debug_e}")
|
1580
1878
|
print(f"⚠️ Continuing with remaining commands...")
|
1581
|
-
|
1582
|
-
|
1583
|
-
|
1584
|
-
|
1585
|
-
|
1586
|
-
|
1879
|
+
|
1880
|
+
elif cmd_type == 'fix':
|
1881
|
+
cmd_text = next_cmd['fix_command']
|
1882
|
+
cmd_index = next_cmd['index']
|
1883
|
+
print(f"🔧 Executing fix command {cmd_index + 1}: {cmd_text}")
|
1884
|
+
|
1885
|
+
start_time = time.time()
|
1886
|
+
success, stdout, stderr = shell.execute(cmd_text, timeout=120)
|
1887
|
+
execution_time = time.time() - start_time
|
1888
|
+
|
1889
|
+
# Mark fix command as executed
|
1890
|
+
cmd_manager.mark_command_executed(
|
1891
|
+
cmd_index, 'fix', success, stdout, stderr, execution_time
|
1892
|
+
)
|
1587
1893
|
|
1588
|
-
|
1894
|
+
# After all commands are processed, do a final batch analysis of any remaining failed commands
|
1895
|
+
failed_commands = cmd_manager.get_failed_commands_for_llm()
|
1896
|
+
if failed_commands:
|
1897
|
+
print(f"\n🔍 Final batch analysis of {len(failed_commands)} failed commands...")
|
1898
|
+
current_dir = shell.get_cwd()
|
1899
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
1900
|
+
|
1901
|
+
# Use batch analysis to get additional fixes
|
1902
|
+
additional_fixes = cmd_manager.analyze_failed_commands_with_llm(api_key, current_dir, shell)
|
1903
|
+
|
1904
|
+
if additional_fixes:
|
1905
|
+
print(f"🔧 Executing {len(additional_fixes)} additional fix commands...")
|
1906
|
+
|
1907
|
+
# Execute the additional fix commands
|
1908
|
+
for fix_index in additional_fixes:
|
1909
|
+
fix_cmd = cmd_manager.suggested_fixes[fix_index]
|
1910
|
+
cmd_text = fix_cmd['fix_command']
|
1911
|
+
print(f"🔧 Executing additional fix: {cmd_text}")
|
1912
|
+
|
1913
|
+
start_time = time.time()
|
1914
|
+
success, stdout, stderr = shell.execute(cmd_text, timeout=120)
|
1915
|
+
execution_time = time.time() - start_time
|
1916
|
+
|
1917
|
+
# Mark fix command as executed
|
1918
|
+
cmd_manager.mark_command_executed(
|
1919
|
+
fix_index, 'fix', success, stdout, stderr, execution_time
|
1920
|
+
)
|
1921
|
+
|
1922
|
+
# Print final status
|
1923
|
+
print("\n" + "="*60)
|
1924
|
+
print("🎉 SETUP COMMANDS EXECUTION COMPLETED")
|
1925
|
+
print("="*60)
|
1926
|
+
cmd_manager.print_status()
|
1589
1927
|
|
1590
1928
|
except Exception as e:
|
1591
1929
|
print(f"❌ Error during setup command execution: {e}")
|
@@ -2260,35 +2598,6 @@ def find_entry_point(repo_dir):
|
|
2260
2598
|
|
2261
2599
|
return None
|
2262
2600
|
|
2263
|
-
def cleanup_modal_token():
|
2264
|
-
"""Delete token files and environment variables after SSH container is started"""
|
2265
|
-
print("🧹 Cleaning up tokens for security...")
|
2266
|
-
|
2267
|
-
try:
|
2268
|
-
# Remove token from environment variables
|
2269
|
-
if "MODAL_TOKEN_ID" in os.environ:
|
2270
|
-
del os.environ["MODAL_TOKEN_ID"]
|
2271
|
-
# print("✅ Removed token ID from environment")
|
2272
|
-
|
2273
|
-
if "MODAL_TOKEN" in os.environ:
|
2274
|
-
del os.environ["MODAL_TOKEN"]
|
2275
|
-
# print("✅ Removed token from environment")
|
2276
|
-
|
2277
|
-
if "MODAL_TOKEN_SECRET" in os.environ:
|
2278
|
-
del os.environ["MODAL_TOKEN_SECRET"]
|
2279
|
-
# print("✅ Removed token secret from environment")
|
2280
|
-
|
2281
|
-
# Delete ~/.modal.toml file
|
2282
|
-
home_dir = os.path.expanduser("~")
|
2283
|
-
modal_toml = os.path.join(home_dir, ".modal.toml")
|
2284
|
-
if os.path.exists(modal_toml):
|
2285
|
-
os.remove(modal_toml)
|
2286
|
-
# print(f"✅ Deleted token file at {modal_toml}")
|
2287
|
-
|
2288
|
-
# print("✅ Token cleanup completed successfully")
|
2289
|
-
except Exception as e:
|
2290
|
-
print(f"❌ Error during token cleanup: {e}")
|
2291
|
-
|
2292
2601
|
def cleanup_security_tokens():
|
2293
2602
|
"""Delete all security tokens and API keys after SSH container is started"""
|
2294
2603
|
print("🧹 Cleaning up security tokens and API keys...")
|
@@ -2368,6 +2677,17 @@ def show_usage_examples():
|
|
2368
2677
|
|
2369
2678
|
print("Available GPU Options:")
|
2370
2679
|
print(" T4, L4, A10G, A100-40GB, A100-80GB, L40S, H100, H200, B200")
|
2680
|
+
print()
|
2681
|
+
print("GPU Selection Behavior:")
|
2682
|
+
print(" • With --gpu: Uses specified GPU without prompting")
|
2683
|
+
print(" • Without --gpu: Shows interactive GPU selection menu")
|
2684
|
+
print()
|
2685
|
+
print("Examples:")
|
2686
|
+
print(" # Uses A10G without prompting:")
|
2687
|
+
print(" gitarsenal --gpu A10G --repo-url https://github.com/username/repo.git")
|
2688
|
+
print()
|
2689
|
+
print(" # Shows interactive GPU selection menu:")
|
2690
|
+
print(" gitarsenal --repo-url https://github.com/username/repo.git")
|
2371
2691
|
|
2372
2692
|
def make_api_request_with_retry(url, payload, max_retries=2, timeout=180):
|
2373
2693
|
"""Make an API request with retry mechanism."""
|
@@ -2719,6 +3039,10 @@ def prompt_for_gpu():
|
|
2719
3039
|
import tty
|
2720
3040
|
import termios
|
2721
3041
|
|
3042
|
+
print("\n🔧 GPU Selection Required")
|
3043
|
+
print("No GPU type was specified with --gpu flag.")
|
3044
|
+
print("Please select a GPU type for your container:")
|
3045
|
+
|
2722
3046
|
# Define available GPU types and their specifications
|
2723
3047
|
gpu_specs = {
|
2724
3048
|
'T4': {'gpu': 'T4', 'memory': '16GB'},
|
@@ -2814,20 +3138,41 @@ def prompt_for_gpu():
|
|
2814
3138
|
print("\n🛑 Selection cancelled.")
|
2815
3139
|
sys.exit(1)
|
2816
3140
|
except Exception as e:
|
2817
|
-
print(f"\n❌ Error: {e}")
|
3141
|
+
print(f"\n❌ Error with interactive menu: {e}")
|
3142
|
+
print("🔄 Falling back to simple text input...")
|
2818
3143
|
# Fall back to simple input method
|
2819
3144
|
try:
|
3145
|
+
print("\n📊 Available GPU Options:")
|
3146
|
+
for i, gpu_type in enumerate(options, 1):
|
3147
|
+
specs = gpu_specs[gpu_type]
|
3148
|
+
print(f" {i}. {gpu_type} ({specs['memory']})")
|
3149
|
+
print(f" Default: A10G")
|
3150
|
+
|
2820
3151
|
choice = input("\n🔍 Select GPU type (number or name, default is A10G): ").strip()
|
2821
3152
|
if not choice:
|
3153
|
+
print("✅ Using default GPU: A10G")
|
2822
3154
|
return "A10G"
|
2823
3155
|
if choice.isdigit():
|
2824
3156
|
index = int(choice) - 1
|
2825
3157
|
if 0 <= index < len(options):
|
2826
|
-
|
3158
|
+
selected = options[index]
|
3159
|
+
print(f"✅ Selected GPU: {selected}")
|
3160
|
+
return selected
|
3161
|
+
else:
|
3162
|
+
print(f"⚠️ Invalid number. Using default: A10G")
|
3163
|
+
return "A10G"
|
2827
3164
|
elif choice in options:
|
3165
|
+
print(f"✅ Selected GPU: {choice}")
|
2828
3166
|
return choice
|
2829
|
-
|
2830
|
-
|
3167
|
+
else:
|
3168
|
+
print(f"⚠️ Invalid choice '{choice}'. Using default: A10G")
|
3169
|
+
return "A10G"
|
3170
|
+
except KeyboardInterrupt:
|
3171
|
+
print("\n🛑 Selection cancelled.")
|
3172
|
+
sys.exit(1)
|
3173
|
+
except Exception as fallback_error:
|
3174
|
+
print(f"❌ Error in fallback input: {fallback_error}")
|
3175
|
+
print("✅ Using default GPU: A10G")
|
2831
3176
|
return "A10G"
|
2832
3177
|
|
2833
3178
|
# Replace the existing GPU argument parsing in the main section
|
@@ -2837,7 +3182,7 @@ if __name__ == "__main__":
|
|
2837
3182
|
import sys
|
2838
3183
|
|
2839
3184
|
parser = argparse.ArgumentParser()
|
2840
|
-
parser.add_argument('--gpu', type=str, help='GPU type (e.g., A10G, T4, A100-80GB)')
|
3185
|
+
parser.add_argument('--gpu', type=str, help='GPU type (e.g., A10G, T4, A100-80GB). If not provided, will prompt for GPU selection.')
|
2841
3186
|
parser.add_argument('--repo-url', type=str, help='Repository URL to clone')
|
2842
3187
|
parser.add_argument('--repo-name', type=str, help='Repository name override')
|
2843
3188
|
parser.add_argument('--setup-commands', type=str, nargs='+', help='Setup commands to run (deprecated)')
|
@@ -2899,9 +3244,22 @@ if __name__ == "__main__":
|
|
2899
3244
|
print("------------------------")
|
2900
3245
|
print("\n✔ Dependencies checked")
|
2901
3246
|
|
2902
|
-
#
|
2903
|
-
|
2904
|
-
|
3247
|
+
# Use provided GPU argument or prompt for selection
|
3248
|
+
if args.gpu:
|
3249
|
+
gpu_type = args.gpu
|
3250
|
+
# Validate the provided GPU type
|
3251
|
+
valid_gpus = ['T4', 'L4', 'A10G', 'A100-40', 'A100-80', 'L40S', 'H100', 'H200', 'B200']
|
3252
|
+
if gpu_type not in valid_gpus:
|
3253
|
+
print(f"⚠️ Warning: '{gpu_type}' is not in the list of known GPU types.")
|
3254
|
+
print(f"Available GPU types: {', '.join(valid_gpus)}")
|
3255
|
+
print(f"Proceeding with '{gpu_type}' anyway...")
|
3256
|
+
else:
|
3257
|
+
print(f"✅ Using specified GPU: {gpu_type}")
|
3258
|
+
else:
|
3259
|
+
print("\n📋 No GPU type specified with --gpu flag.")
|
3260
|
+
print("🔄 Prompting for GPU selection...")
|
3261
|
+
gpu_type = prompt_for_gpu()
|
3262
|
+
args.gpu = gpu_type
|
2905
3263
|
|
2906
3264
|
# Display configuration after GPU selection
|
2907
3265
|
print("\n📋 Container Configuration:")
|