gitarsenal-cli 1.4.10 → 1.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.4.10",
3
+ "version": "1.5.1",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -21,15 +21,16 @@
21
21
  "author": "",
22
22
  "license": "MIT",
23
23
  "dependencies": {
24
+ "boxen": "^5.1.2",
24
25
  "chalk": "^4.1.2",
25
- "inquirer": "^8.2.4",
26
- "ora": "^5.4.1",
27
26
  "commander": "^9.4.1",
28
- "which": "^3.0.0",
29
- "fs-extra": "^11.1.0",
30
27
  "execa": "^5.1.1",
31
- "boxen": "^5.1.2",
32
- "update-notifier": "^5.1.0"
28
+ "fs-extra": "^11.1.0",
29
+ "g": "^2.0.1",
30
+ "inquirer": "^8.2.4",
31
+ "ora": "^5.4.1",
32
+ "update-notifier": "^5.1.0",
33
+ "which": "^3.0.0"
33
34
  },
34
35
  "engines": {
35
36
  "node": ">=14.0.0"
@@ -1,8 +1,8 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
- Fetch Modal Tokens
3
+ Fetch Modal Tokens and OpenAI API Key
4
4
 
5
- This script fetches Modal tokens from the proxy server.
5
+ This script fetches Modal tokens and OpenAI API key from the proxy server.
6
6
  """
7
7
 
8
8
  import os
@@ -14,10 +14,10 @@ from pathlib import Path
14
14
 
15
15
  def fetch_default_tokens_from_gitarsenal():
16
16
  """
17
- Fetch default Modal tokens from gitarsenal.dev API.
17
+ Fetch default Modal tokens and OpenAI API key from gitarsenal.dev API.
18
18
 
19
19
  Returns:
20
- tuple: (token_id, token_secret) if successful, (None, None) otherwise
20
+ tuple: (token_id, token_secret, openai_api_key) if successful, (None, None, None) otherwise
21
21
  """
22
22
  endpoint = "https://gitarsenal.dev/api/credentials"
23
23
 
@@ -38,40 +38,41 @@ def fetch_default_tokens_from_gitarsenal():
38
38
  data = response.json()
39
39
  token_id = data.get("modalTokenId")
40
40
  token_secret = data.get("modalTokenSecret")
41
+ openai_api_key = data.get("openaiApiKey")
41
42
 
42
43
  if token_id and token_secret:
43
44
  print("✅ Successfully fetched default tokens from gitarsenal.dev")
44
- return token_id, token_secret
45
+ return token_id, token_secret, openai_api_key
45
46
  else:
46
47
  print("❌ Modal tokens not found in gitarsenal.dev response")
47
- return None, None
48
+ return None, None, None
48
49
  except json.JSONDecodeError:
49
50
  print("❌ Invalid JSON response from gitarsenal.dev")
50
- return None, None
51
+ return None, None, None
51
52
  else:
52
53
  print(f"❌ Failed to fetch from gitarsenal.dev: {response.status_code} - {response.text[:200]}")
53
- return None, None
54
+ return None, None, None
54
55
 
55
56
  except requests.exceptions.Timeout:
56
57
  print("❌ Request timeout when fetching from gitarsenal.dev")
57
- return None, None
58
+ return None, None, None
58
59
  except requests.exceptions.ConnectionError:
59
60
  print("❌ Connection failed to gitarsenal.dev")
60
- return None, None
61
+ return None, None, None
61
62
  except requests.exceptions.RequestException as e:
62
63
  print(f"❌ Request failed to gitarsenal.dev: {e}")
63
- return None, None
64
+ return None, None, None
64
65
 
65
66
  def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
66
67
  """
67
- Fetch Modal tokens from the proxy server.
68
+ Fetch Modal tokens and OpenAI API key from the proxy server.
68
69
 
69
70
  Args:
70
71
  proxy_url: URL of the proxy server
71
72
  api_key: API key for authentication
72
73
 
73
74
  Returns:
74
- tuple: (token_id, token_secret) if successful, (None, None) otherwise
75
+ tuple: (token_id, token_secret, openai_api_key) if successful, (None, None, None) otherwise
75
76
  """
76
77
  # Use environment variables if not provided
77
78
  if not proxy_url:
@@ -88,12 +89,12 @@ def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
88
89
  if not proxy_url:
89
90
  # print("❌ No proxy URL provided or found in environment")
90
91
  print("💡 Set MODAL_PROXY_URL environment variable or use --proxy-url argument")
91
- return None, None
92
+ return None, None, None
92
93
 
93
94
  if not api_key:
94
95
  print("❌ No API key provided or found in environment")
95
96
  print("💡 Set MODAL_PROXY_API_KEY environment variable or use --proxy-api-key argument")
96
- return None, None
97
+ return None, None, None
97
98
 
98
99
  # Ensure the URL ends with a slash
99
100
  if not proxy_url.endswith("/"):
@@ -115,54 +116,60 @@ def fetch_tokens_from_proxy(proxy_url=None, api_key=None):
115
116
  data = response.json()
116
117
  token_id = data.get("token_id")
117
118
  token_secret = data.get("token_secret")
119
+ openai_api_key = data.get("openai_api_key")
118
120
 
119
121
  if token_id and token_secret:
120
122
  print("✅ Successfully fetched tokens from proxy server")
121
- return token_id, token_secret
123
+ return token_id, token_secret, openai_api_key
122
124
  else:
123
125
  print("❌ Tokens not found in response")
124
- return None, None
126
+ return None, None, None
125
127
  else:
126
128
  print(f"❌ Failed to fetch tokens: {response.status_code} - {response.text}")
127
- return None, None
129
+ return None, None, None
128
130
  except Exception as e:
129
131
  print(f"❌ Error fetching tokens: {e}")
130
- return None, None
132
+ return None, None, None
131
133
 
132
134
  def get_tokens():
133
135
  """
134
- Get Modal tokens, trying to fetch from the proxy server first.
136
+ Get Modal tokens and OpenAI API key, trying to fetch from the proxy server first.
135
137
  Also sets the tokens in environment variables.
136
138
 
137
139
  Returns:
138
- tuple: (token_id, token_secret)
140
+ tuple: (token_id, token_secret, openai_api_key)
139
141
  """
140
142
  # Try to fetch from the proxy server
141
- token_id, token_secret = fetch_tokens_from_proxy()
143
+ token_id, token_secret, openai_api_key = fetch_tokens_from_proxy()
142
144
 
143
145
  # If we couldn't fetch from the server, try to get default tokens from gitarsenal.dev
144
146
  if not token_id or not token_secret:
145
147
  print("⚠️ Proxy server failed, trying to fetch default tokens from gitarsenal.dev")
146
- token_id, token_secret = fetch_default_tokens_from_gitarsenal()
148
+ token_id, token_secret, openai_api_key = fetch_default_tokens_from_gitarsenal()
147
149
 
148
150
  # If we still don't have tokens, we can't proceed
149
151
  if not token_id or not token_secret:
150
152
  print("❌ Failed to fetch tokens from both proxy server and gitarsenal.dev")
151
153
  print("💡 Please check your network connection and API endpoints")
152
- return None, None
154
+ return None, None, None
153
155
 
154
156
  # Set the tokens in environment variables
155
157
  os.environ["MODAL_TOKEN_ID"] = token_id
156
158
  os.environ["MODAL_TOKEN_SECRET"] = token_secret
157
159
  # print(f"✅ Set MODAL_TOKEN_ID and MODAL_TOKEN_SECRET environment variables")
158
160
 
159
- return token_id, token_secret
161
+ # Set OpenAI API key if available
162
+ if openai_api_key:
163
+ os.environ["OPENAI_API_KEY"] = openai_api_key
164
+ print(f"✅ Set OPENAI_API_KEY environment variable")
165
+
166
+ return token_id, token_secret, openai_api_key
160
167
 
161
168
  if __name__ == "__main__":
162
169
  # Parse command-line arguments if run directly
163
170
  import argparse
164
171
 
165
- parser = argparse.ArgumentParser(description='Fetch Modal tokens from the proxy server')
172
+ parser = argparse.ArgumentParser(description='Fetch Modal tokens and OpenAI API key from the proxy server')
166
173
  parser.add_argument('--proxy-url', help='URL of the proxy server')
167
174
  parser.add_argument('--proxy-api-key', help='API key for the proxy server')
168
175
  args = parser.parse_args()
@@ -177,25 +184,30 @@ if __name__ == "__main__":
177
184
  print(f"✅ Set MODAL_PROXY_API_KEY from command line")
178
185
 
179
186
  # Get tokens
180
- token_id, token_secret = get_tokens()
187
+ token_id, token_secret, openai_api_key = get_tokens()
181
188
  print(f"Token ID: {token_id}")
182
189
  print(f"Token Secret: {token_secret}")
190
+ print(f"OpenAI API Key: {openai_api_key[:5] + '...' if openai_api_key else None}")
183
191
 
184
192
  # Check if tokens are set in environment variables
185
193
  print(f"\n🔍 DEBUG: Checking environment variables")
186
194
  print(f"🔍 MODAL_TOKEN_ID exists: {'Yes' if os.environ.get('MODAL_TOKEN_ID') else 'No'}")
187
195
  print(f"🔍 MODAL_TOKEN_SECRET exists: {'Yes' if os.environ.get('MODAL_TOKEN_SECRET') else 'No'}")
196
+ print(f"🔍 OPENAI_API_KEY exists: {'Yes' if os.environ.get('OPENAI_API_KEY') else 'No'}")
188
197
  if os.environ.get('MODAL_TOKEN_ID'):
189
198
  print(f"🔍 MODAL_TOKEN_ID length: {len(os.environ.get('MODAL_TOKEN_ID'))}")
190
199
  if os.environ.get('MODAL_TOKEN_SECRET'):
191
200
  print(f"🔍 MODAL_TOKEN_SECRET length: {len(os.environ.get('MODAL_TOKEN_SECRET'))}")
201
+ if os.environ.get('OPENAI_API_KEY'):
202
+ print(f"🔍 OPENAI_API_KEY length: {len(os.environ.get('OPENAI_API_KEY'))}")
192
203
 
193
204
  # Write the tokens to a file for use by other scripts
194
205
  tokens_file = Path(__file__).parent / "modal_tokens.json"
195
206
  with open(tokens_file, 'w') as f:
196
207
  json.dump({
197
208
  "token_id": token_id,
198
- "token_secret": token_secret
209
+ "token_secret": token_secret,
210
+ "openai_api_key": openai_api_key
199
211
  }, f)
200
212
  print(f"\n✅ Tokens written to {tokens_file}")
201
213
 
@@ -216,6 +228,27 @@ if __name__ == "__main__":
216
228
  f.write(f"token_secret = {token_secret}\n")
217
229
  print(f"✅ Created .modalconfig file at {modalconfig_file}")
218
230
 
231
+ # Create or update .env file with OpenAI API key
232
+ env_file = Path.home() / ".env"
233
+ env_content = ""
234
+ if env_file.exists():
235
+ with open(env_file, 'r') as f:
236
+ env_content = f.read()
237
+
238
+ # Update or add OPENAI_API_KEY
239
+ if openai_api_key:
240
+ if "OPENAI_API_KEY" in env_content:
241
+ # Replace existing key
242
+ import re
243
+ env_content = re.sub(r'OPENAI_API_KEY=.*\n', f'OPENAI_API_KEY={openai_api_key}\n', env_content)
244
+ else:
245
+ # Add new key
246
+ env_content += f'\nOPENAI_API_KEY={openai_api_key}\n'
247
+
248
+ with open(env_file, 'w') as f:
249
+ f.write(env_content)
250
+ print(f"✅ Updated OpenAI API key in {env_file}")
251
+
219
252
  # Try to use the Modal CLI to set the token
220
253
  try:
221
254
  print(f"\n🔄 Setting token via Modal CLI...")
@@ -0,0 +1,71 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Fix commands for Modal SSH container
4
+ This script fixes common issues with commands in the Modal SSH container
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import subprocess
10
+
11
+ def fix_command(cmd):
12
+ """Fix common issues with commands"""
13
+ # Replace 'source' with '.' for sh shell
14
+ if 'source' in cmd:
15
+ cmd = cmd.replace('source', '.')
16
+ print(f"✅ Fixed: Replaced 'source' with '.' for sh shell")
17
+
18
+ # Fix uv venv and activation in one command
19
+ if 'uv venv' in cmd and '&&' in cmd:
20
+ # The command is already trying to create and activate a venv
21
+ # Just need to make sure 'source' is replaced with '.'
22
+ pass
23
+
24
+ # Fix uv pip install without active venv
25
+ if 'uv pip install' in cmd and 'venv' not in cmd:
26
+ # Create and activate a venv first
27
+ cmd = f"uv venv .venv && . .venv/bin/activate && {cmd}"
28
+ print(f"✅ Fixed: Added venv creation and activation before pip install")
29
+
30
+ return cmd
31
+
32
+ def run_fixed_command(cmd):
33
+ """Run the fixed command"""
34
+ fixed_cmd = fix_command(cmd)
35
+ print(f"\n🔧 Running fixed command: {fixed_cmd}")
36
+
37
+ # Run the command
38
+ result = subprocess.run(fixed_cmd, shell=True, text=True, capture_output=True)
39
+
40
+ # Print the output
41
+ if result.stdout:
42
+ print(f"\n--- Output ---\n{result.stdout}")
43
+ if result.stderr:
44
+ print(f"\n--- Error ---\n{result.stderr}")
45
+
46
+ # Return the result
47
+ return result.returncode == 0
48
+
49
+ def main():
50
+ """Main function"""
51
+ if len(sys.argv) < 2:
52
+ print("Usage: python fix_commands.py 'command to fix'")
53
+ return 1
54
+
55
+ # Get the command from command line
56
+ cmd = ' '.join(sys.argv[1:])
57
+ print(f"🔍 Original command: {cmd}")
58
+
59
+ # Run the fixed command
60
+ success = run_fixed_command(cmd)
61
+
62
+ # Print the result
63
+ if success:
64
+ print("\n✅ Command executed successfully")
65
+ else:
66
+ print("\n❌ Command failed")
67
+
68
+ return 0 if success else 1
69
+
70
+ if __name__ == "__main__":
71
+ sys.exit(main())
@@ -35,7 +35,7 @@ try:
35
35
  # Fall back to the basic implementation
36
36
  print("🔄 Falling back to basic implementation")
37
37
  except Exception as e:
38
- print(f"❌ Error running advanced Modal token fixer: {e}")
38
+ # print(f"❌ Error running advanced Modal token fixer: {e}")
39
39
  print("🔄 Falling back to basic implementation")
40
40
 
41
41
  # Try to get tokens from the proxy server
@@ -0,0 +1,116 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Fix setup commands for open-r1 repository
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import subprocess
9
+
10
+ # Original problematic commands
11
+ original_commands = [
12
+ "git clone https://github.com/huggingface/open-r1.git",
13
+ "cd open-r1",
14
+ "uv venv openr1 --python 3.11 && source openr1/bin/activate && uv pip install --upgrade pip",
15
+ "uv pip install vllm==0.8.5.post1",
16
+ "uv pip install setuptools && uv pip install flash-attn --no-build-isolation"
17
+ ]
18
+
19
+ # Fixed commands
20
+ fixed_commands = [
21
+ "git clone https://github.com/huggingface/open-r1.git",
22
+ "cd open-r1",
23
+ "uv venv openr1 --python 3.11 && . openr1/bin/activate && uv pip install --upgrade pip",
24
+ ". openr1/bin/activate && uv pip install vllm==0.8.5.post1",
25
+ ". openr1/bin/activate && uv pip install setuptools && uv pip install flash-attn --no-build-isolation"
26
+ ]
27
+
28
+ def run_command(cmd, cwd=None):
29
+ """Run a command and return the result"""
30
+ print(f"\n▶ {cmd}")
31
+
32
+ try:
33
+ result = subprocess.run(
34
+ cmd,
35
+ shell=True,
36
+ text=True,
37
+ capture_output=True,
38
+ cwd=cwd
39
+ )
40
+
41
+ # Print output
42
+ if result.stdout:
43
+ print(result.stdout)
44
+
45
+ # Print error
46
+ if result.stderr:
47
+ print(f"Error: {result.stderr}")
48
+
49
+ # Return success/failure
50
+ return result.returncode == 0, result.stdout, result.stderr
51
+ except Exception as e:
52
+ print(f"Error executing command: {e}")
53
+ return False, "", str(e)
54
+
55
+ def main():
56
+ """Main function"""
57
+ print("🔧 Fixing setup commands for open-r1 repository")
58
+
59
+ # Check if we're in the right directory
60
+ cwd = os.getcwd()
61
+ print(f"📂 Current directory: {cwd}")
62
+
63
+ # Check if we need to change directory
64
+ if not cwd.endswith('open-r1'):
65
+ parent_dir = cwd
66
+ # Check if open-r1 exists in the current directory
67
+ if os.path.exists(os.path.join(cwd, 'open-r1')):
68
+ print(f"📂 Found open-r1 directory, changing to it")
69
+ os.chdir(os.path.join(cwd, 'open-r1'))
70
+ cwd = os.getcwd()
71
+ print(f"📂 New current directory: {cwd}")
72
+
73
+ # Run the fixed commands
74
+ for i, cmd in enumerate(fixed_commands):
75
+ print(f"\n📋 Running command {i+1}/{len(fixed_commands)}: {cmd}")
76
+
77
+ # Skip git clone if the directory already exists
78
+ if cmd.startswith("git clone") and os.path.exists("open-r1"):
79
+ print("✅ Repository already cloned, skipping")
80
+ continue
81
+
82
+ # Skip cd if we're already in the right directory
83
+ if cmd.startswith("cd "):
84
+ target_dir = cmd.split(" ", 1)[1]
85
+ if cwd.endswith(target_dir):
86
+ print(f"✅ Already in {target_dir}, skipping")
87
+ continue
88
+
89
+ # Run the command
90
+ success, stdout, stderr = run_command(cmd)
91
+
92
+ # Check if the command succeeded
93
+ if not success:
94
+ print(f"❌ Command failed: {cmd}")
95
+ print(f"❌ Error: {stderr}")
96
+
97
+ # If this is a cd command, try to continue
98
+ if cmd.startswith("cd "):
99
+ print("⚠️ Directory change failed, but continuing with next command")
100
+ continue
101
+
102
+ # For other commands, ask if the user wants to continue
103
+ try:
104
+ choice = input("Continue with next command? (y/n): ").strip().lower()
105
+ if choice != 'y':
106
+ print("🛑 Stopping execution")
107
+ return 1
108
+ except:
109
+ print("🛑 Stopping execution due to error")
110
+ return 1
111
+
112
+ print("\n✅ All commands executed")
113
+ return 0
114
+
115
+ if __name__ == "__main__":
116
+ sys.exit(main())
@@ -0,0 +1,75 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Patch the test_modalSandboxScript.py file to fix issues with LLM debugging
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import re
9
+
10
+ def read_file(filename):
11
+ """Read a file and return its contents"""
12
+ with open(filename, 'r') as f:
13
+ return f.read()
14
+
15
+ def write_file(filename, content):
16
+ """Write content to a file"""
17
+ with open(filename, 'w') as f:
18
+ f.write(content)
19
+
20
+ def patch_file(filename):
21
+ """Patch the file to fix issues with LLM debugging"""
22
+ print(f"🔧 Patching {filename}...")
23
+
24
+ # Read the file
25
+ content = read_file(filename)
26
+
27
+ # Make a backup
28
+ backup_filename = f"{filename}.bak"
29
+ write_file(backup_filename, content)
30
+ print(f"✅ Created backup: {backup_filename}")
31
+
32
+ # Fix 1: Replace 'source' with '.' in commands
33
+ pattern1 = r'(uv venv.*?)source(.*?activate)'
34
+ replacement1 = r'\1.\2'
35
+ content = re.sub(pattern1, replacement1, content)
36
+ print("✅ Fixed 'source' commands")
37
+
38
+ # Fix 2: Add environment activation before uv pip commands
39
+ pattern2 = r'(uv pip install.*?)(?!\. .*?activate)'
40
+ replacement2 = r'. openr1/bin/activate && \1'
41
+ content = re.sub(pattern2, replacement2, content)
42
+ print("✅ Added environment activation before pip commands")
43
+
44
+ # Write the patched file
45
+ write_file(filename, content)
46
+ print(f"✅ Patched file written: {filename}")
47
+
48
+ return True
49
+
50
+ def main():
51
+ """Main function"""
52
+ # Get the file to patch
53
+ if len(sys.argv) > 1:
54
+ filename = sys.argv[1]
55
+ else:
56
+ # Default to the test_modalSandboxScript.py in the current directory
57
+ filename = "test_modalSandboxScript.py"
58
+
59
+ # Check if the file exists
60
+ if not os.path.exists(filename):
61
+ print(f"❌ File not found: {filename}")
62
+ return 1
63
+
64
+ # Patch the file
65
+ success = patch_file(filename)
66
+
67
+ if success:
68
+ print("\n✅ Patching completed successfully")
69
+ return 0
70
+ else:
71
+ print("\n❌ Patching failed")
72
+ return 1
73
+
74
+ if __name__ == "__main__":
75
+ sys.exit(main())
@@ -0,0 +1,120 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script to verify LLM debugging functionality
4
+ """
5
+
6
+ import os
7
+ import sys
8
+ import requests
9
+ import getpass
10
+
11
+ def test_openai_connection():
12
+ """Test if we can connect to OpenAI API"""
13
+ print("🔍 Testing OpenAI API connection...")
14
+
15
+ # Try to get API key
16
+ api_key = os.environ.get("OPENAI_API_KEY")
17
+ if not api_key:
18
+ print("❌ No OPENAI_API_KEY environment variable found")
19
+ print("💡 Please set your OpenAI API key:")
20
+ print(" export OPENAI_API_KEY='your-api-key-here'")
21
+ return False
22
+
23
+ print(f"✅ Found API key (length: {len(api_key)})")
24
+
25
+ # Test API connection
26
+ headers = {
27
+ "Content-Type": "application/json",
28
+ "Authorization": f"Bearer {api_key}"
29
+ }
30
+
31
+ payload = {
32
+ "model": "gpt-4o-mini",
33
+ "messages": [
34
+ {"role": "user", "content": "Say 'Hello, LLM debugging is working!'"}
35
+ ],
36
+ "max_tokens": 50
37
+ }
38
+
39
+ try:
40
+ print("🤖 Testing API call...")
41
+ response = requests.post(
42
+ "https://api.openai.com/v1/chat/completions",
43
+ headers=headers,
44
+ json=payload,
45
+ timeout=30
46
+ )
47
+
48
+ if response.status_code == 200:
49
+ result = response.json()
50
+ message = result["choices"][0]["message"]["content"]
51
+ print(f"✅ API test successful: {message}")
52
+ return True
53
+ else:
54
+ print(f"❌ API test failed with status code: {response.status_code}")
55
+ print(f"Response: {response.text}")
56
+ return False
57
+
58
+ except Exception as e:
59
+ print(f"❌ API test failed with exception: {e}")
60
+ return False
61
+
62
+ def test_llm_debug_function():
63
+ """Test the LLM debug function from the main script"""
64
+ print("\n🔍 Testing LLM debug function...")
65
+
66
+ # Import the function from the main script
67
+ try:
68
+ # Add the current directory to Python path
69
+ sys.path.insert(0, os.path.dirname(__file__))
70
+
71
+ # Import the function
72
+ from test_modalSandboxScript import call_openai_for_debug
73
+
74
+ # Test with a simple command failure
75
+ test_command = "ls /nonexistent/directory"
76
+ test_error = "ls: cannot access '/nonexistent/directory': No such file or directory"
77
+
78
+ print(f"🧪 Testing with command: {test_command}")
79
+ print(f"🧪 Error: {test_error}")
80
+
81
+ result = call_openai_for_debug(test_command, test_error)
82
+
83
+ if result:
84
+ print(f"✅ LLM debug function returned: {result}")
85
+ return True
86
+ else:
87
+ print("❌ LLM debug function returned None")
88
+ return False
89
+
90
+ except Exception as e:
91
+ print(f"❌ Error testing LLM debug function: {e}")
92
+ return False
93
+
94
+ if __name__ == "__main__":
95
+ print("🧪 Testing LLM debugging functionality...")
96
+ print("=" * 60)
97
+
98
+ # Test 1: OpenAI API connection
99
+ api_ok = test_openai_connection()
100
+
101
+ # Test 2: LLM debug function
102
+ if api_ok:
103
+ debug_ok = test_llm_debug_function()
104
+ else:
105
+ debug_ok = False
106
+
107
+ print("\n" + "=" * 60)
108
+ print("📊 Test Results:")
109
+ print(f" OpenAI API Connection: {'✅ PASS' if api_ok else '❌ FAIL'}")
110
+ print(f" LLM Debug Function: {'✅ PASS' if debug_ok else '❌ FAIL'}")
111
+
112
+ if api_ok and debug_ok:
113
+ print("\n🎉 All tests passed! LLM debugging should work.")
114
+ else:
115
+ print("\n⚠️ Some tests failed. LLM debugging may not work properly.")
116
+ if not api_ok:
117
+ print("💡 To fix OpenAI API issues:")
118
+ print(" 1. Get an API key from https://platform.openai.com/api-keys")
119
+ print(" 2. Set it as environment variable: export OPENAI_API_KEY='your-key'")
120
+ print(" 3. Run this test again")
@@ -38,7 +38,7 @@ try:
38
38
  # Import the fetch_modal_tokens module
39
39
  # print("🔄 Fetching tokens from proxy server...")
40
40
  from fetch_modal_tokens import get_tokens
41
- token_id, token_secret = get_tokens()
41
+ token_id, token_secret, openai_api_key = get_tokens()
42
42
 
43
43
  # Check if we got valid tokens
44
44
  if token_id is None or token_secret is None:
@@ -49,7 +49,7 @@ try:
49
49
  # Explicitly set the environment variables again to be sure
50
50
  os.environ["MODAL_TOKEN_ID"] = token_id
51
51
  os.environ["MODAL_TOKEN_SECRET"] = token_secret
52
-
52
+ os.environ["OPENAI_API_KEY"] = openai_api_key
53
53
  # Also set the old environment variable for backward compatibility
54
54
  os.environ["MODAL_TOKEN"] = token_id
55
55
 
@@ -331,6 +331,12 @@ def handle_interactive_command(cmd, sandbox, current_dir):
331
331
 
332
332
  def call_openai_for_debug(command, error_output, api_key=None, current_dir=None, sandbox=None):
333
333
  """Call OpenAI to debug a failed command and suggest a fix"""
334
+ print("\n🔍 DEBUG: Starting OpenAI LLM debugging...")
335
+ print(f"🔍 DEBUG: Command: {command}")
336
+ print(f"🔍 DEBUG: Error output length: {len(error_output) if error_output else 0}")
337
+ print(f"🔍 DEBUG: Current directory: {current_dir}")
338
+ print(f"🔍 DEBUG: Sandbox available: {sandbox is not None}")
339
+
334
340
  # Define _to_str function locally to avoid NameError
335
341
  def _to_str(maybe_bytes):
336
342
  try:
@@ -359,8 +365,11 @@ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None,
359
365
 
360
366
  # Try to get API key from multiple sources
361
367
  if not api_key:
368
+ print("🔍 DEBUG: No API key provided, searching for one...")
369
+
362
370
  # First try environment variable
363
371
  api_key = os.environ.get("OPENAI_API_KEY")
372
+ print(f"🔍 DEBUG: API key from environment: {'Found' if api_key else 'Not found'}")
364
373
 
365
374
  # Store the API key in a persistent file if found
366
375
  if api_key:
@@ -376,28 +385,38 @@ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None,
376
385
  if not api_key:
377
386
  try:
378
387
  key_file = os.path.expanduser("~/.gitarsenal/openai_key")
388
+ print(f"🔍 DEBUG: Checking for saved API key at: {key_file}")
379
389
  if os.path.exists(key_file):
380
390
  with open(key_file, "r") as f:
381
391
  api_key = f.read().strip()
382
392
  if api_key:
383
393
  print("✅ Loaded OpenAI API key from saved file")
394
+ print(f"🔍 DEBUG: API key length: {len(api_key)}")
384
395
  # Also set in environment for this session
385
396
  os.environ["OPENAI_API_KEY"] = api_key
397
+ else:
398
+ print("🔍 DEBUG: Saved file exists but is empty")
399
+ else:
400
+ print("🔍 DEBUG: No saved API key file found")
386
401
  except Exception as e:
387
402
  print(f"⚠️ Could not load saved API key: {e}")
388
403
 
389
404
  # Then try credentials manager
390
405
  if not api_key:
406
+ print("🔍 DEBUG: Trying credentials manager...")
391
407
  try:
392
408
  from credentials_manager import CredentialsManager
393
409
  credentials_manager = CredentialsManager()
394
410
  api_key = credentials_manager.get_openai_api_key()
395
- except ImportError:
411
+ print(f"🔍 DEBUG: API key from credentials manager: {'Found' if api_key else 'Not found'}")
412
+ except ImportError as e:
413
+ print(f"🔍 DEBUG: Credentials manager not available: {e}")
396
414
  # Fall back to direct input if credentials_manager is not available
397
415
  pass
398
416
 
399
417
  # Finally, prompt the user if still no API key
400
418
  if not api_key:
419
+ print("🔍 DEBUG: No API key found in any source, prompting user...")
401
420
  print("\n" + "="*60)
402
421
  print("🔑 OPENAI API KEY REQUIRED FOR DEBUGGING")
403
422
  print("="*60)
@@ -421,6 +440,14 @@ def call_openai_for_debug(command, error_output, api_key=None, current_dir=None,
421
440
  print(f"❌ Error getting API key: {e}")
422
441
  return None
423
442
 
443
+ # If we still don't have an API key, we can't proceed
444
+ if not api_key:
445
+ print("❌ No OpenAI API key available. Cannot perform LLM debugging.")
446
+ print("💡 To enable LLM debugging, set the OPENAI_API_KEY environment variable")
447
+ return None
448
+
449
+ print(f"✅ OpenAI API key available (length: {len(api_key)})")
450
+
424
451
  # Gather additional context to help with debugging
425
452
  directory_context = ""
426
453
  system_info = ""
@@ -606,6 +633,8 @@ Do not provide any explanations, just the exact command to run.
606
633
  """
607
634
 
608
635
  # Prepare the API request payload
636
+ print("🔍 DEBUG: Preparing API request...")
637
+
609
638
  # Try to use GPT-4 first, but fall back to other models if needed
610
639
  models_to_try = [
611
640
  "gpt-4o-mini", # First choice: GPT-4o (most widely available)
@@ -620,9 +649,16 @@ Do not provide any explanations, just the exact command to run.
620
649
 
621
650
  # Remove duplicates while preserving order
622
651
  models_to_try = list(dict.fromkeys(models_to_try))
652
+ print(f"🔍 DEBUG: Models to try: {models_to_try}")
623
653
 
624
654
  # Function to make the API call with a specific model
625
655
  def try_api_call(model_name, retries=2, backoff_factor=1.5):
656
+ print(f"🔍 DEBUG: Attempting API call with model: {model_name}")
657
+ print(f"🔍 DEBUG: API key available: {'Yes' if api_key else 'No'}")
658
+ if api_key:
659
+ print(f"🔍 DEBUG: API key length: {len(api_key)}")
660
+ print(f"🔍 DEBUG: API key starts with: {api_key[:10]}...")
661
+
626
662
  payload = {
627
663
  "model": model_name,
628
664
  "messages": [
@@ -633,6 +669,8 @@ Do not provide any explanations, just the exact command to run.
633
669
  "max_tokens": 300
634
670
  }
635
671
 
672
+ print(f"🔍 DEBUG: Payload prepared, prompt length: {len(prompt)}")
673
+
636
674
  # Add specific handling for common errors
637
675
  last_error = None
638
676
  for attempt in range(retries + 1):
@@ -644,6 +682,7 @@ Do not provide any explanations, just the exact command to run.
644
682
  time.sleep(wait_time)
645
683
 
646
684
  print(f"🤖 Calling OpenAI with {model_name} model to debug the failed command...")
685
+ print(f"🔍 DEBUG: Making POST request to OpenAI API...")
647
686
  response = requests.post(
648
687
  "https://api.openai.com/v1/chat/completions",
649
688
  headers=headers,
@@ -651,29 +690,36 @@ Do not provide any explanations, just the exact command to run.
651
690
  timeout=45 # Increased timeout for reliability
652
691
  )
653
692
 
693
+ print(f"🔍 DEBUG: Response received, status code: {response.status_code}")
694
+
654
695
  # Handle specific status codes
655
696
  if response.status_code == 200:
697
+ print(f"🔍 DEBUG: Success! Response length: {len(response.text)}")
656
698
  return response.json(), None
657
699
  elif response.status_code == 401:
658
700
  error_msg = "Authentication error: Invalid API key"
659
701
  print(f"❌ {error_msg}")
702
+ print(f"🔍 DEBUG: Response text: {response.text}")
660
703
  # Don't retry auth errors
661
704
  return None, error_msg
662
705
  elif response.status_code == 429:
663
706
  error_msg = "Rate limit exceeded or quota reached"
664
707
  print(f"⚠️ {error_msg}")
708
+ print(f"🔍 DEBUG: Response text: {response.text}")
665
709
  # Always retry rate limit errors with increasing backoff
666
710
  last_error = error_msg
667
711
  continue
668
712
  elif response.status_code == 500:
669
713
  error_msg = "OpenAI server error"
670
714
  print(f"⚠️ {error_msg}")
715
+ print(f"🔍 DEBUG: Response text: {response.text}")
671
716
  # Retry server errors
672
717
  last_error = error_msg
673
718
  continue
674
719
  else:
675
720
  error_msg = f"Status code: {response.status_code}, Response: {response.text}"
676
721
  print(f"⚠️ OpenAI API error: {error_msg}")
722
+ print(f"🔍 DEBUG: Full response text: {response.text}")
677
723
  last_error = error_msg
678
724
  # Only retry if we have attempts left
679
725
  if attempt < retries:
@@ -682,18 +728,22 @@ Do not provide any explanations, just the exact command to run.
682
728
  except requests.exceptions.Timeout:
683
729
  error_msg = "Request timed out"
684
730
  print(f"⚠️ {error_msg}")
731
+ print(f"🔍 DEBUG: Timeout after 45 seconds")
685
732
  last_error = error_msg
686
733
  # Always retry timeouts
687
734
  continue
688
735
  except requests.exceptions.ConnectionError:
689
736
  error_msg = "Connection error"
690
737
  print(f"⚠️ {error_msg}")
738
+ print(f"🔍 DEBUG: Connection failed to api.openai.com")
691
739
  last_error = error_msg
692
740
  # Always retry connection errors
693
741
  continue
694
742
  except Exception as e:
695
743
  error_msg = str(e)
696
744
  print(f"⚠️ Unexpected error: {error_msg}")
745
+ print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
746
+ print(f"🔍 DEBUG: Exception details: {str(e)}")
697
747
  last_error = error_msg
698
748
  # Only retry if we have attempts left
699
749
  if attempt < retries:
@@ -722,7 +772,12 @@ Do not provide any explanations, just the exact command to run.
722
772
 
723
773
  # Process the response
724
774
  try:
775
+ print(f"🔍 DEBUG: Processing OpenAI response...")
776
+ print(f"🔍 DEBUG: Response structure: {list(result.keys())}")
777
+ print(f"🔍 DEBUG: Choices count: {len(result.get('choices', []))}")
778
+
725
779
  fix_command = result["choices"][0]["message"]["content"].strip()
780
+ print(f"🔍 DEBUG: Raw response content: {fix_command}")
726
781
 
727
782
  # Save the original response for debugging
728
783
  original_response = fix_command
@@ -798,9 +853,12 @@ Do not provide any explanations, just the exact command to run.
798
853
  print("⚠️ Using best guess for command")
799
854
 
800
855
  print(f"🔧 Suggested fix: {fix_command}")
856
+ print(f"🔍 DEBUG: Returning fix command: {fix_command}")
801
857
  return fix_command
802
858
  except Exception as e:
803
859
  print(f"❌ Error processing OpenAI response: {e}")
860
+ print(f"🔍 DEBUG: Exception type: {type(e).__name__}")
861
+ print(f"🔍 DEBUG: Exception details: {str(e)}")
804
862
  return None
805
863
 
806
864
  def prompt_for_hf_token():
@@ -1646,6 +1704,11 @@ cd "{current_dir}"
1646
1704
  # If command failed and we're debugging with LLM
1647
1705
  if debug_with_llm:
1648
1706
  print("🔍 Attempting to debug the failed command with OpenAI...")
1707
+ print(f"🔍 DEBUG: Command that failed: {cmd_to_execute}")
1708
+ print(f"🔍 DEBUG: Exit code: {exit_code}")
1709
+ print(f"🔍 DEBUG: stderr length: {len(stderr_buffer)}")
1710
+ print(f"🔍 DEBUG: stdout length: {len(stdout_buffer)}")
1711
+
1649
1712
  # Ensure we have a non-empty error message to debug
1650
1713
  if not stderr_buffer.strip() and stdout_buffer.strip():
1651
1714
  print("⚠️ stderr is empty but stdout contains content, using stdout for debugging")
@@ -1679,158 +1742,6 @@ cd "{current_dir}"
1679
1742
  print("❌ No token provided. Cannot set up Hugging Face authentication.")
1680
1743
  return False, "", "No Hugging Face token provided"
1681
1744
 
1682
- # Check if the error is related to missing pytest
1683
- if "ModuleNotFoundError: No module named 'pytest'" in stderr_buffer or "ImportError: No module named pytest" in stderr_buffer:
1684
- print("🔍 Detected missing pytest module, installing it automatically...")
1685
- pytest_install_success, _, _ = run_command("pip install pytest", show_output=True, debug_with_llm=False)
1686
- if pytest_install_success:
1687
- print("✅ Successfully installed pytest, retrying original command...")
1688
- return run_command(cmd, show_output, retry_count + 1, max_retries)
1689
-
1690
- # Check for common errors that we can fix automatically
1691
- common_errors = [
1692
- # Source command not found in sh shell
1693
- {
1694
- "pattern": "source: not found",
1695
- "fix": lambda cmd: cmd.replace("source ", ". ")
1696
- },
1697
- # No virtual environment found for uv
1698
- {
1699
- "pattern": "No virtual environment found; run `uv venv`",
1700
- "fix": lambda cmd: "uv venv .venv && . .venv/bin/activate && " + cmd
1701
- }
1702
- ]
1703
-
1704
- # Check if any of the common errors match and apply automatic fix
1705
- for error_info in common_errors:
1706
- if error_info["pattern"] in stderr_buffer:
1707
- print(f"🔍 Detected common error: {error_info['pattern']}")
1708
- fix_func = error_info["fix"]
1709
- fixed_cmd = fix_func(cmd_to_execute)
1710
- print(f"🔧 Applying automatic fix: {fixed_cmd}")
1711
-
1712
- # Run the fixed command
1713
- fix_success, fix_stdout, fix_stderr = run_command(fixed_cmd, show_output=True, debug_with_llm=False)
1714
- if fix_success:
1715
- print("✅ Automatic fix succeeded!")
1716
- return True, fix_stdout, ""
1717
- else:
1718
- print("❌ Automatic fix failed, continuing with LLM debugging")
1719
-
1720
- # Check for Python version-specific errors
1721
- python_version_errors = [
1722
- # Python 3.13 distutils issue
1723
- ("ModuleNotFoundError: No module named 'distutils'", "3.13"),
1724
- # Add more version-specific error patterns here
1725
- ("ImportError: cannot import name 'soft_unicode' from 'markupsafe'", None),
1726
- ("AttributeError: module 'setuptools.dist' has no attribute 'check_specifier'", None)
1727
- ]
1728
-
1729
- # Check if any of the error patterns match
1730
- for error_pattern, problematic_version in python_version_errors:
1731
- if error_pattern in stderr_buffer:
1732
- print(f"🔍 Detected Python version-specific error: {error_pattern}")
1733
-
1734
- # Get current Python version if not already known
1735
- if not current_python_version:
1736
- version_cmd = "python --version"
1737
- version_success, version_stdout, _ = run_command(version_cmd, show_output=False, debug_with_llm=False)
1738
- if version_success:
1739
- current_python_version = version_stdout.strip()
1740
- print(f"🐍 Current Python version: {current_python_version}")
1741
-
1742
- # Check if we've already tried switching Python versions
1743
- if python_version_switched:
1744
- print("⚠️ Already attempted to switch Python versions once, not trying again")
1745
- break
1746
-
1747
- print("🔄 Attempting to fix by switching Python version...")
1748
-
1749
- # Install conda if not already installed
1750
- if not conda_installed:
1751
- print("📦 Installing Miniconda to manage Python versions...")
1752
- conda_install_cmds = [
1753
- "apt-get update -y",
1754
- "apt-get install -y wget bzip2",
1755
- "wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /tmp/miniconda.sh",
1756
- "bash /tmp/miniconda.sh -b -p /opt/conda",
1757
- "rm /tmp/miniconda.sh",
1758
- "echo 'export PATH=/opt/conda/bin:$PATH' >> ~/.bashrc",
1759
- "export PATH=/opt/conda/bin:$PATH",
1760
- "conda init bash",
1761
- "source ~/.bashrc",
1762
- "conda activate base"
1763
- ]
1764
-
1765
- for conda_cmd in conda_install_cmds:
1766
- print(f"🔄 Running: {conda_cmd}")
1767
- conda_success, _, _ = run_command(conda_cmd, show_output=True, debug_with_llm=False)
1768
- if not conda_success:
1769
- print("⚠️ Failed to install conda, continuing with system Python")
1770
- break
1771
-
1772
- # Check if conda was successfully installed
1773
- conda_check_cmd = "conda --version"
1774
- conda_check_success, conda_check_stdout, _ = run_command(conda_check_cmd, show_output=True, debug_with_llm=False)
1775
- conda_installed = conda_check_success
1776
-
1777
- if conda_installed:
1778
- print(f"✅ Successfully installed conda: {conda_check_stdout.strip()}")
1779
- else:
1780
- print("⚠️ Failed to verify conda installation")
1781
- break
1782
-
1783
- # Determine target Python version
1784
- target_version = "3.10" # Default to a stable version
1785
- if problematic_version == "3.13":
1786
- # If we're on 3.13 and having issues, go to 3.10
1787
- target_version = "3.10"
1788
- elif "3.13" in str(current_python_version):
1789
- # If we're on 3.13 for any other error, try 3.10
1790
- target_version = "3.10"
1791
- elif "3.10" in str(current_python_version):
1792
- # If we're on 3.10 and having issues, try 3.9
1793
- target_version = "3.9"
1794
-
1795
- print(f"🐍 Switching from {current_python_version} to Python {target_version}...")
1796
-
1797
- # Create and activate a conda environment with the target Python version
1798
- conda_cmds = [
1799
- f"conda create -y -n py{target_version} python={target_version}",
1800
- f"echo 'conda activate py{target_version}' >> ~/.bashrc",
1801
- f"conda init bash",
1802
- f"source ~/.bashrc",
1803
- f"conda activate py{target_version}"
1804
- ]
1805
-
1806
- for conda_cmd in conda_cmds:
1807
- print(f"🔄 Running: {conda_cmd}")
1808
- conda_success, _, _ = run_command(conda_cmd, show_output=True, debug_with_llm=False)
1809
- if not conda_success:
1810
- print(f"⚠️ Failed to run conda command: {conda_cmd}")
1811
-
1812
- # Verify Python version changed
1813
- verify_cmd = "python --version"
1814
- verify_success, verify_stdout, _ = run_command(verify_cmd, show_output=True, debug_with_llm=False)
1815
-
1816
- if verify_success and target_version in verify_stdout:
1817
- print(f"✅ Successfully switched to Python {verify_stdout.strip()}")
1818
- python_version_switched = True
1819
- current_python_version = verify_stdout.strip()
1820
-
1821
- # Reinstall pip and setuptools in the new environment
1822
- print("📦 Installing pip and setuptools in new environment...")
1823
- run_command("pip install --upgrade pip setuptools wheel", show_output=True, debug_with_llm=False)
1824
-
1825
- # Retry the original command with the new Python version
1826
- print(f"🔄 Retrying original command with Python {target_version}...")
1827
- # Reset the retry counter since we've made a significant change
1828
- return run_command(cmd, show_output, 0, max_retries)
1829
- else:
1830
- print("⚠️ Failed to switch Python version, continuing with current version")
1831
-
1832
- break
1833
-
1834
1745
  # Check if stderr is empty, try to use stdout as fallback
1835
1746
  debug_output = stderr_buffer
1836
1747
  if not debug_output or not debug_output.strip():
@@ -1848,8 +1759,17 @@ cd "{current_dir}"
1848
1759
  print(debug_output if debug_output else "[EMPTY]")
1849
1760
  print("="*60)
1850
1761
 
1762
+ print(f"🔍 DEBUG: About to call call_openai_for_debug...")
1763
+ print(f"🔍 DEBUG: Command: {cmd_to_execute}")
1764
+ print(f"🔍 DEBUG: Debug output length: {len(debug_output)}")
1765
+ print(f"🔍 DEBUG: Current directory: {current_dir}")
1766
+ print(f"🔍 DEBUG: Sandbox available: {sandbox is not None}")
1767
+ print(f"🔍 DEBUG: Debug output preview: {debug_output[:200]}...")
1768
+
1851
1769
  fix_command = call_openai_for_debug(cmd_to_execute, debug_output, current_dir=current_dir, sandbox=sandbox)
1852
1770
 
1771
+ print(f"🔍 DEBUG: call_openai_for_debug returned: {fix_command}")
1772
+
1853
1773
  if fix_command:
1854
1774
  print(f"🔧 OpenAI suggested fix command: {fix_command}")
1855
1775
 
@@ -2487,7 +2407,7 @@ ssh_app = modal.App("ssh-container-app")
2487
2407
  "python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
2488
2408
  "gpg", "ca-certificates", "software-properties-common"
2489
2409
  )
2490
- .pip_install("uv", "modal") # Fast Python package installer and Modal
2410
+ .pip_install("uv", "modal", "requests", "openai") # Fast Python package installer and Modal
2491
2411
  .run_commands(
2492
2412
  # Create SSH directory
2493
2413
  "mkdir -p /var/run/sshd",
@@ -2553,17 +2473,116 @@ def ssh_container_function(ssh_password, repo_url=None, repo_name=None, setup_co
2553
2473
  # Run setup commands if provided
2554
2474
  if setup_commands:
2555
2475
  print(f"⚙️ Running {len(setup_commands)} setup commands...")
2556
- for i, cmd in enumerate(setup_commands, 1):
2557
- print(f"📋 Executing command {i}/{len(setup_commands)}: {cmd}")
2476
+
2477
+ # First, let's check the current directory structure
2478
+ print("🔍 Checking current directory structure before running setup commands...")
2479
+ try:
2480
+ result = subprocess.run("pwd && ls -la", shell=True, check=True,
2481
+ capture_output=True, text=True)
2482
+ print(f"📂 Current directory: {result.stdout}")
2483
+ except subprocess.CalledProcessError as e:
2484
+ print(f"⚠️ Could not check directory structure: {e}")
2485
+
2486
+ # Define a simple run_command function for SSH container
2487
+ def run_command_with_llm_debug(cmd, show_output=True, retry_count=0, max_retries=3):
2488
+ """Execute a command with LLM debugging enabled"""
2489
+ print(f"🔧 Executing: {cmd}")
2558
2490
  try:
2559
- result = subprocess.run(cmd, shell=True, check=True,
2560
- capture_output=True, text=True)
2561
- if result.stdout:
2491
+ # Handle special case for source command which doesn't work with subprocess.run
2492
+ if cmd.strip().startswith("source ") or " source " in cmd:
2493
+ print("⚠️ Detected 'source' command which doesn't work with subprocess.run")
2494
+ print("🔄 Converting to bash -c with dot (.) instead of source")
2495
+ # Replace source with . (dot) which is the same as source but works in sh
2496
+ modified_cmd = cmd.replace("source ", ". ")
2497
+ # Wrap in bash -c to ensure it runs in bash
2498
+ bash_cmd = f"bash -c '{modified_cmd}'"
2499
+ print(f"🔄 Modified command: {bash_cmd}")
2500
+ result = subprocess.run(bash_cmd, shell=True, check=True,
2501
+ capture_output=True, text=True)
2502
+ else:
2503
+ result = subprocess.run(cmd, shell=True, check=True,
2504
+ capture_output=True, text=True)
2505
+
2506
+ if result.stdout and show_output:
2562
2507
  print(f"✅ Output: {result.stdout}")
2508
+ return True, result.stdout, ""
2563
2509
  except subprocess.CalledProcessError as e:
2510
+ error_output = e.stderr if e.stderr else str(e)
2564
2511
  print(f"❌ Command failed: {e}")
2565
- if e.stderr:
2566
- print(f"❌ Error: {e.stderr}")
2512
+ print(f"❌ Error: {error_output}")
2513
+
2514
+ # Call OpenAI for debugging
2515
+ print("🔍 Attempting to debug the failed command with OpenAI...")
2516
+ try:
2517
+ # Get the current directory for context
2518
+ current_dir = os.getcwd()
2519
+
2520
+ # Call OpenAI for debugging
2521
+ print(f"🔍 DEBUG: About to call call_openai_for_debug...")
2522
+ print(f"🔍 DEBUG: Command: {cmd}")
2523
+ print(f"🔍 DEBUG: Error output length: {len(error_output)}")
2524
+ print(f"🔍 DEBUG: Current directory: {current_dir}")
2525
+
2526
+ fix_command = call_openai_for_debug(cmd, error_output, current_dir=current_dir)
2527
+
2528
+ print(f"🔍 DEBUG: call_openai_for_debug returned: {fix_command}")
2529
+
2530
+ if fix_command:
2531
+ print(f"🔧 OpenAI suggested fix command: {fix_command}")
2532
+
2533
+ # Run the fix command
2534
+ print(f"🔄 Running suggested fix command: {fix_command}")
2535
+ try:
2536
+ fix_result = subprocess.run(fix_command, shell=True, check=True,
2537
+ capture_output=True, text=True)
2538
+ if fix_result.stdout:
2539
+ print(f"✅ Fix command output: {fix_result.stdout}")
2540
+
2541
+ # Retry the original command
2542
+ print(f"🔄 Retrying original command: {cmd}")
2543
+ return run_command_with_llm_debug(cmd, show_output, retry_count + 1, max_retries)
2544
+ except subprocess.CalledProcessError as fix_e:
2545
+ print(f"❌ Fix command also failed: {fix_e}")
2546
+ return False, "", error_output
2547
+ else:
2548
+ print("❌ No fix suggested by OpenAI")
2549
+ return False, "", error_output
2550
+
2551
+ except Exception as debug_e:
2552
+ print(f"❌ LLM debugging failed: {debug_e}")
2553
+ return False, "", error_output
2554
+
2555
+ for i, cmd in enumerate(setup_commands, 1):
2556
+ print(f"📋 Executing command {i}/{len(setup_commands)}: {cmd}")
2557
+
2558
+ # Check if this is a cd command and if the directory exists
2559
+ if cmd.strip().startswith("cd "):
2560
+ cd_parts = cmd.split(None, 1)
2561
+ if len(cd_parts) >= 2:
2562
+ target_dir = cd_parts[1].strip('"\'')
2563
+ print(f"🔍 Checking if directory exists: {target_dir}")
2564
+ try:
2565
+ check_result = subprocess.run(f"test -d '{target_dir}'", shell=True,
2566
+ capture_output=True, text=True)
2567
+ if check_result.returncode != 0:
2568
+ print(f"⚠️ Directory does not exist: {target_dir}")
2569
+ print(f"🔍 Current directory contents:")
2570
+ subprocess.run("pwd && ls -la", shell=True, check=False)
2571
+
2572
+ # Try to find similar directories
2573
+ print(f"🔍 Looking for similar directories...")
2574
+ subprocess.run("find . -type d -name '*llama*' -o -name '*nano*' 2>/dev/null | head -10", shell=True, check=False)
2575
+ except Exception as e:
2576
+ print(f"⚠️ Could not check directory: {e}")
2577
+
2578
+ success, stdout, stderr = run_command_with_llm_debug(cmd, show_output=True)
2579
+ if not success:
2580
+ print(f"⚠️ Command {i} failed, but continuing with remaining commands...")
2581
+
2582
+ # If this was a cd command that failed, try to understand the directory structure
2583
+ if cmd.strip().startswith("cd ") and "No such file or directory" in stderr:
2584
+ print(f"🔍 Analyzing directory structure after failed cd command...")
2585
+ subprocess.run("pwd && ls -la && echo '--- Parent directory ---' && ls -la ..", shell=True, check=False)
2567
2586
 
2568
2587
  # Get container info
2569
2588
  print("🔍 Container started successfully!")
@@ -2798,7 +2817,7 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
2798
2817
  "python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
2799
2818
  "gpg", "ca-certificates", "software-properties-common"
2800
2819
  )
2801
- .pip_install("uv", "modal") # Fast Python package installer and Modal
2820
+ .pip_install("uv", "modal", "requests", "openai") # Fast Python package installer and Modal
2802
2821
  .run_commands(
2803
2822
  # Create SSH directory
2804
2823
  "mkdir -p /var/run/sshd",
@@ -2874,17 +2893,108 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
2874
2893
  # Run setup commands if provided
2875
2894
  if setup_commands:
2876
2895
  print(f"⚙️ Running {len(setup_commands)} setup commands...")
2877
- for i, cmd in enumerate(setup_commands, 1):
2878
- print(f"📋 Executing command {i}/{len(setup_commands)}: {cmd}")
2896
+
2897
+ # Define a helper function for running commands with LLM debugging
2898
+ def run_command_with_basic_error_handling(cmd, show_output=True, retry_count=0, max_retries=3):
2899
+ """Execute a command with LLM debugging enabled"""
2900
+ print(f"🔧 Executing: {cmd}")
2879
2901
  try:
2880
- result = subprocess.run(cmd, shell=True, check=True,
2881
- capture_output=True, text=True)
2882
- if result.stdout:
2902
+ # Handle special case for source command which doesn't work with subprocess.run
2903
+ if cmd.strip().startswith("source ") or " source " in cmd:
2904
+ print("⚠️ Detected 'source' command which doesn't work with subprocess.run")
2905
+ print("🔄 Converting to bash -c with dot (.) instead of source")
2906
+ # Replace source with . (dot) which is the same as source but works in sh
2907
+ modified_cmd = cmd.replace("source ", ". ")
2908
+ # Wrap in bash -c to ensure it runs in bash
2909
+ bash_cmd = f"bash -c '{modified_cmd}'"
2910
+ print(f"🔄 Modified command: {bash_cmd}")
2911
+ result = subprocess.run(bash_cmd, shell=True, check=True,
2912
+ capture_output=True, text=True)
2913
+ else:
2914
+ result = subprocess.run(cmd, shell=True, check=True,
2915
+ capture_output=True, text=True)
2916
+
2917
+ if result.stdout and show_output:
2883
2918
  print(f"✅ Output: {result.stdout}")
2919
+ return True, result.stdout, ""
2884
2920
  except subprocess.CalledProcessError as e:
2921
+ error_output = e.stderr if e.stderr else str(e)
2885
2922
  print(f"❌ Command failed: {e}")
2886
- if e.stderr:
2887
- print(f"❌ Error: {e.stderr}")
2923
+ print(f"❌ Error: {error_output}")
2924
+
2925
+ # Call OpenAI for debugging
2926
+ print("🔍 Attempting to debug the failed command with OpenAI...")
2927
+ try:
2928
+ # Get the current directory for context
2929
+ current_dir = os.getcwd()
2930
+
2931
+ # Call OpenAI for debugging
2932
+ print(f"🔍 DEBUG: About to call call_openai_for_debug...")
2933
+ print(f"🔍 DEBUG: Command: {cmd}")
2934
+ print(f"🔍 DEBUG: Error output length: {len(error_output)}")
2935
+ print(f"🔍 DEBUG: Current directory: {current_dir}")
2936
+
2937
+ fix_command = call_openai_for_debug(cmd, error_output, current_dir=current_dir)
2938
+
2939
+ print(f"🔍 DEBUG: call_openai_for_debug returned: {fix_command}")
2940
+
2941
+ if fix_command:
2942
+ print(f"🔧 OpenAI suggested fix command: {fix_command}")
2943
+
2944
+ # Run the fix command
2945
+ print(f"🔄 Running suggested fix command: {fix_command}")
2946
+ try:
2947
+ fix_result = subprocess.run(fix_command, shell=True, check=True,
2948
+ capture_output=True, text=True)
2949
+ if fix_result.stdout:
2950
+ print(f"✅ Fix command output: {fix_result.stdout}")
2951
+
2952
+ # Retry the original command
2953
+ print(f"🔄 Retrying original command: {cmd}")
2954
+ return run_command_with_basic_error_handling(cmd, show_output, retry_count + 1, max_retries)
2955
+ except subprocess.CalledProcessError as fix_e:
2956
+ print(f"❌ Fix command also failed: {fix_e}")
2957
+ return False, "", error_output
2958
+ else:
2959
+ print("❌ No fix suggested by OpenAI")
2960
+ return False, "", error_output
2961
+
2962
+ except Exception as debug_e:
2963
+ print(f"❌ LLM debugging failed: {debug_e}")
2964
+ return False, "", error_output
2965
+
2966
+ # Run each setup command
2967
+ for i, cmd in enumerate(setup_commands, 1):
2968
+ print(f"📋 Executing command {i}/{len(setup_commands)}: {cmd}")
2969
+
2970
+ # Check if this is a cd command and if the directory exists
2971
+ if cmd.strip().startswith("cd "):
2972
+ cd_parts = cmd.split(None, 1)
2973
+ if len(cd_parts) >= 2:
2974
+ target_dir = cd_parts[1].strip('"\'')
2975
+ print(f"🔍 Checking if directory exists: {target_dir}")
2976
+ try:
2977
+ check_result = subprocess.run(f"test -d '{target_dir}'", shell=True,
2978
+ capture_output=True, text=True)
2979
+ if check_result.returncode != 0:
2980
+ print(f"⚠️ Directory does not exist: {target_dir}")
2981
+ print(f"🔍 Current directory contents:")
2982
+ subprocess.run("pwd && ls -la", shell=True, check=False)
2983
+
2984
+ # Try to find similar directories
2985
+ print(f"🔍 Looking for similar directories...")
2986
+ subprocess.run("find . -type d -name '*llama*' -o -name '*nano*' 2>/dev/null | head -10", shell=True, check=False)
2987
+ except Exception as e:
2988
+ print(f"⚠️ Could not check directory: {e}")
2989
+
2990
+ success, stdout, stderr = run_command_with_basic_error_handling(cmd, show_output=True)
2991
+ if not success:
2992
+ print(f"⚠️ Command {i} failed, but continuing with remaining commands...")
2993
+
2994
+ # If this was a cd command that failed, try to understand the directory structure
2995
+ if cmd.strip().startswith("cd ") and "No such file or directory" in stderr:
2996
+ print(f"🔍 Analyzing directory structure after failed cd command...")
2997
+ subprocess.run("pwd && ls -la && echo '--- Parent directory ---' && ls -la ..", shell=True, check=False)
2888
2998
 
2889
2999
  # Create SSH tunnel
2890
3000
  with modal.forward(22, unencrypted=True) as tunnel:
@@ -3573,7 +3683,7 @@ def create_ssh_container_function(gpu_type="a10g", timeout_minutes=60, volume=No
3573
3683
  "python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
3574
3684
  "gpg", "ca-certificates", "software-properties-common"
3575
3685
  )
3576
- .pip_install("uv", "modal") # Fast Python package installer and Modal
3686
+ .pip_install("uv", "modal", "requests", "openai") # Fast Python package installer and Modal
3577
3687
  .run_commands(
3578
3688
  # Create SSH directory
3579
3689
  "mkdir -p /var/run/sshd",