gitarsenal-cli 1.9.21 → 1.9.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/.venv_status.json +1 -1
  2. package/package.json +1 -1
  3. package/python/__pycache__/auth_manager.cpython-313.pyc +0 -0
  4. package/python/__pycache__/command_manager.cpython-313.pyc +0 -0
  5. package/python/__pycache__/fetch_modal_tokens.cpython-313.pyc +0 -0
  6. package/python/__pycache__/llm_debugging.cpython-313.pyc +0 -0
  7. package/python/__pycache__/modal_container.cpython-313.pyc +0 -0
  8. package/python/__pycache__/shell.cpython-313.pyc +0 -0
  9. package/python/api_integration.py +0 -0
  10. package/python/command_manager.py +613 -0
  11. package/python/credentials_manager.py +0 -0
  12. package/python/fetch_modal_tokens.py +0 -0
  13. package/python/fix_modal_token.py +0 -0
  14. package/python/fix_modal_token_advanced.py +0 -0
  15. package/python/gitarsenal.py +0 -0
  16. package/python/gitarsenal_proxy_client.py +0 -0
  17. package/python/llm_debugging.py +1369 -0
  18. package/python/modal_container.py +626 -0
  19. package/python/setup.py +15 -0
  20. package/python/setup_modal_token.py +0 -39
  21. package/python/shell.py +627 -0
  22. package/python/test_modalSandboxScript.py +75 -2639
  23. package/scripts/postinstall.js +22 -23
  24. package/python/__pycache__/credentials_manager.cpython-313.pyc +0 -0
  25. package/python/__pycache__/test_modalSandboxScript.cpython-313.pyc +0 -0
  26. package/python/__pycache__/test_modalSandboxScript_stable.cpython-313.pyc +0 -0
  27. package/python/debug_delete.py +0 -167
  28. package/python/documentation.py +0 -76
  29. package/python/fix_setup_commands.py +0 -116
  30. package/python/modal_auth_patch.py +0 -178
  31. package/python/modal_proxy_service.py +0 -665
  32. package/python/modal_token_solution.py +0 -293
  33. package/python/test_dynamic_commands.py +0 -147
  34. package/test_modalSandboxScript.py +0 -5004
@@ -0,0 +1,626 @@
1
+ import os
2
+ import subprocess
3
+ import time
4
+ import json
5
+ import threading
6
+ import modal
7
+
8
+ # Now modify the create_modal_ssh_container function to use the PersistentShell
9
+ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_commands=None,
10
+ volume_name=None, timeout_minutes=60, ssh_password=None, interactive=False):
11
+ """Create a Modal SSH container with GPU support and tunneling"""
12
+
13
+ # Use interactive mode if specified
14
+ if interactive:
15
+ # If GPU type is not specified, use default
16
+ if not gpu_type:
17
+ gpu_type = "A10G"
18
+ print(f"✅ Using default GPU type: {gpu_type}")
19
+ else:
20
+ print(f"✅ Using provided GPU type: {gpu_type}")
21
+
22
+ # If repo URL is not specified, prompt for it
23
+ if not repo_url:
24
+ try:
25
+ repo_url = input("? Enter GitHub repository URL: ").strip()
26
+ if not repo_url:
27
+ print("❌ Repository URL is required.")
28
+ return None
29
+ except KeyboardInterrupt:
30
+ print("\n🛑 Setup cancelled.")
31
+ return None
32
+
33
+ # If volume name is not specified, ask about persistent volume
34
+ if not volume_name:
35
+ try:
36
+ use_volume = input("? Use persistent volume for faster installs? (Y/n): ").strip().lower()
37
+ if use_volume in ('', 'y', 'yes'):
38
+ volume_name = input("? Enter volume name: ").strip()
39
+ if not volume_name:
40
+ volume_name = "gitarsenal-volume"
41
+ print(f"Using default volume name: {volume_name}")
42
+ except KeyboardInterrupt:
43
+ print("\n🛑 Setup cancelled.")
44
+ return None
45
+
46
+ # Check if Modal is authenticated
47
+ try:
48
+ # Print all environment variables for debugging
49
+ print("🔍 DEBUG: Checking environment variables")
50
+ modal_token_id = os.environ.get("MODAL_TOKEN_ID")
51
+ modal_token = os.environ.get("MODAL_TOKEN")
52
+ openai_api_key = os.environ.get("OPENAI_API_KEY")
53
+ print(f"🔍 token exists: {'Yes' if modal_token_id else 'No'}")
54
+ print(f"🔍 token exists: {'Yes' if modal_token else 'No'}")
55
+ print(f"🔍 openai_api_key exists: {'Yes' if openai_api_key else 'No'}")
56
+ if modal_token_id:
57
+ print(f"🔍 token length: {len(modal_token_id)}")
58
+ if modal_token:
59
+ print(f"🔍 token length: {len(modal_token)}")
60
+ if openai_api_key:
61
+ print(f"🔍 openai_api_key length: {len(openai_api_key)}")
62
+
63
+ # Try to access Modal token to check authentication
64
+ try:
65
+ # Check if token is set in environment
66
+ modal_token_id = os.environ.get("MODAL_TOKEN_ID")
67
+ if not modal_token_id:
68
+ print("⚠️ MODAL_TOKEN_ID not found in environment.")
69
+ # Try to get from MODAL_TOKEN
70
+ modal_token = os.environ.get("MODAL_TOKEN")
71
+ if modal_token:
72
+ print("✅ Found token in environment variable")
73
+ os.environ["MODAL_TOKEN_ID"] = modal_token
74
+ modal_token_id = modal_token
75
+ print(f"✅ Set token (length: {len(modal_token)})")
76
+
77
+ if modal_token_id:
78
+ print(f"✅ token found (length: {len(modal_token_id)})")
79
+
80
+ # Use the comprehensive fix_modal_token script
81
+ try:
82
+ # Execute the fix_modal_token.py script
83
+ import subprocess
84
+ print(f"🔄 Running fix_modal_token.py to set up Modal token...")
85
+ result = subprocess.run(
86
+ ["python", os.path.join(os.path.dirname(__file__), "fix_modal_token.py")],
87
+ capture_output=True,
88
+ text=True
89
+ )
90
+
91
+ # Print the output
92
+ print(result.stdout)
93
+
94
+ if result.returncode != 0:
95
+ print(f"⚠️ Warning: fix_modal_token.py exited with code {result.returncode}")
96
+ if result.stderr:
97
+ print(f"Error: {result.stderr}")
98
+
99
+ print(f"✅ token setup completed")
100
+ except Exception as e:
101
+ print(f"⚠️ Error running fix_modal_token.py: {e}")
102
+ else:
103
+ print("❌ No token found in environment variables")
104
+ # Try to get from file as a last resort
105
+ try:
106
+ home_dir = os.path.expanduser("~")
107
+ modal_dir = os.path.join(home_dir, ".modal")
108
+ token_file = os.path.join(modal_dir, "token.json")
109
+ if os.path.exists(token_file):
110
+ print(f"🔍 Found Modal token file at {token_file}")
111
+ with open(token_file, 'r') as f:
112
+ import json
113
+ token_data = json.load(f)
114
+ if "token_id" in token_data:
115
+ modal_token_id = token_data["token_id"]
116
+ os.environ["MODAL_TOKEN_ID"] = modal_token_id
117
+ os.environ["MODAL_TOKEN"] = modal_token_id
118
+ print(f"✅ Loaded token from file (length: {len(modal_token_id)})")
119
+ else:
120
+ print("❌ Token file does not contain token_id")
121
+ else:
122
+ print("❌ token file not found")
123
+ except Exception as e:
124
+ print(f"❌ Error loading token from file: {e}")
125
+
126
+ if not os.environ.get("MODAL_TOKEN_ID"):
127
+ print("❌ Could not find Modal token in any location")
128
+ return None
129
+
130
+ except Exception as e:
131
+ print(f"⚠️ Error checking Modal token: {e}")
132
+ # Try to use the token from environment
133
+ modal_token_id = os.environ.get("MODAL_TOKEN_ID")
134
+ modal_token = os.environ.get("MODAL_TOKEN")
135
+ if modal_token_id:
136
+ print(f"🔄 Using token from environment (length: {len(modal_token_id)})")
137
+ elif modal_token:
138
+ print(f"🔄 Using token from environment (length: {len(modal_token)})")
139
+ os.environ["MODAL_TOKEN_ID"] = modal_token
140
+ modal_token_id = modal_token
141
+ else:
142
+ print("❌ No Modal token available. Cannot proceed.")
143
+ return None
144
+
145
+ # Set it in both environment variables
146
+ os.environ["MODAL_TOKEN_ID"] = modal_token_id
147
+ os.environ["MODAL_TOKEN"] = modal_token_id
148
+ print("✅ Set both token and id environment variables")
149
+ except Exception as e:
150
+ print(f"⚠️ Error checking Modal authentication: {e}")
151
+ print("Continuing anyway, but Modal operations may fail")
152
+
153
+ # Generate a unique app name with timestamp to avoid conflicts
154
+ timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
155
+ app_name = f"ssh-container-{timestamp}"
156
+
157
+ gpu_configs = {
158
+ 'T4': {'gpu': 't4', 'memory': 16},
159
+ 'L4': {'gpu': 'l4', 'memory': 24},
160
+ 'A10G': {'gpu': 'a10g', 'memory': 24},
161
+ 'A100-40GB': {'gpu': 'a100', 'memory': 40},
162
+ 'A100-80GB': {'gpu': 'a100-80gb', 'memory': 80},
163
+ 'L40S': {'gpu': 'l40s', 'memory': 48},
164
+ 'H100': {'gpu': 'h100', 'memory': 80},
165
+ 'H200': {'gpu': 'h200', 'memory': 141},
166
+ 'B200': {'gpu': 'b200', 'memory': 96}
167
+ }
168
+
169
+ if gpu_type not in gpu_configs:
170
+ print(f"⚠️ Unknown GPU type: {gpu_type}. Using A10G as default.")
171
+ gpu_type = 'A10G'
172
+
173
+ gpu_spec = gpu_configs[gpu_type]
174
+ print(f"🚀 Creating SSH container with {gpu_spec['gpu']} GPU ({gpu_spec['memory']}GB VRAM)")
175
+
176
+ # Generate or use provided SSH password
177
+ if not ssh_password:
178
+ ssh_password = generate_random_password()
179
+ print(f"🔐 Generated SSH password: {ssh_password}")
180
+
181
+ # Setup volume if specified
182
+ volume = None
183
+ volume_mount_path = "/persistent"
184
+
185
+ if volume_name:
186
+ print(f"📦 Setting up volume: {volume_name}")
187
+ try:
188
+ volume = modal.Volume.from_name(volume_name, create_if_missing=True)
189
+ print(f"✅ Volume '{volume_name}' ready for use")
190
+ except Exception as e:
191
+ print(f"⚠️ Could not setup volume '{volume_name}': {e}")
192
+ print("⚠️ Continuing without persistent volume")
193
+ volume = None
194
+ else:
195
+ # Create a default volume for this session
196
+ default_volume_name = f"ssh-vol-{timestamp}"
197
+ print(f"📦 Creating default volume: {default_volume_name}")
198
+ try:
199
+ volume = modal.Volume.from_name(default_volume_name, create_if_missing=True)
200
+ volume_name = default_volume_name
201
+ print(f"✅ Default volume '{default_volume_name}' created")
202
+ except Exception as e:
203
+ print(f"⚠️ Could not create default volume: {e}")
204
+ print("⚠️ Continuing without persistent volume")
205
+ volume = None
206
+
207
+ # Print debug info for authentication
208
+ print("🔍 Modal authentication debug info:")
209
+ modal_token = os.environ.get("MODAL_TOKEN_ID")
210
+ print(f" - token in env: {'Yes' if modal_token else 'No'}")
211
+ print(f" - Token length: {len(modal_token) if modal_token else 'N/A'}")
212
+
213
+ # Create SSH-enabled image
214
+ try:
215
+ print("📦 Building SSH-enabled image...")
216
+
217
+ # Use a more stable CUDA base image and avoid problematic packages
218
+ ssh_image = (
219
+ # modal.Image.from_registry("nvidia/cuda:12.4.0-devel-ubuntu22.04", add_python="3.11")
220
+ modal.Image.debian_slim()
221
+ .apt_install(
222
+ "openssh-server", "sudo", "curl", "wget", "vim", "htop", "git",
223
+ "python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
224
+ "gpg", "ca-certificates", "software-properties-common"
225
+ )
226
+ .uv_pip_install("uv", "modal", "requests", "openai") # Remove problematic CUDA packages
227
+ .run_commands(
228
+ # Create SSH directory
229
+ "mkdir -p /var/run/sshd",
230
+ "mkdir -p /root/.ssh",
231
+ "chmod 700 /root/.ssh",
232
+
233
+ # Configure SSH server
234
+ "sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config",
235
+ "sed -i 's/#PasswordAuthentication yes/PasswordAuthentication yes/' /etc/ssh/sshd_config",
236
+ "sed -i 's/#PubkeyAuthentication yes/PubkeyAuthentication yes/' /etc/ssh/sshd_config",
237
+
238
+ # SSH keep-alive settings
239
+ "echo 'ClientAliveInterval 60' >> /etc/ssh/sshd_config",
240
+ "echo 'ClientAliveCountMax 3' >> /etc/ssh/sshd_config",
241
+
242
+ # Generate SSH host keys
243
+ "ssh-keygen -A",
244
+
245
+ # Set up a nice bash prompt
246
+ "echo 'export PS1=\"\\[\\e[1;32m\\]modal:\\[\\e[1;34m\\]\\w\\[\\e[0m\\]$ \"' >> /root/.bashrc",
247
+ )
248
+ )
249
+ print("✅ SSH image built successfully")
250
+ except Exception as e:
251
+ print(f"❌ Error building SSH image: {e}")
252
+ return None
253
+
254
+ # Configure volumes if available
255
+ volumes_config = {}
256
+ if volume:
257
+ volumes_config[volume_mount_path] = volume
258
+
259
+ # Create app with image passed directly (THIS IS THE KEY CHANGE)
260
+ try:
261
+ print("🔍 Testing app creation...")
262
+ app = modal.App(app_name, image=ssh_image) # Pass image here
263
+ print("✅ Created app successfully")
264
+ except Exception as e:
265
+ print(f"❌ Error creating app: {e}")
266
+ return None
267
+
268
+ # Define the SSH container function (remove image from decorator)
269
+ @app.function(
270
+ timeout=timeout_minutes * 60, # Convert to seconds
271
+ gpu="A10G",
272
+ serialized=True,
273
+ volumes=volumes_config if volumes_config else None,
274
+ )
275
+ def ssh_container_function(ssh_password=None, repo_url=None, repo_name=None, setup_commands=None, openai_api_key=None, stored_credentials=None):
276
+ """Start SSH container with password authentication and optional setup."""
277
+ import subprocess
278
+ import time
279
+ import os
280
+ import json
281
+
282
+ # Set root password
283
+ subprocess.run(["bash", "-c", f"echo 'root:{ssh_password}' | chpasswd"], check=True)
284
+
285
+ # Set OpenAI API key if provided
286
+ if openai_api_key:
287
+ os.environ['OPENAI_API_KEY'] = openai_api_key
288
+ # print(f"✅ Set OpenAI API key in container environment (length: {len(openai_api_key)})")
289
+ else:
290
+ print("⚠️ No OpenAI API key provided to container")
291
+
292
+ # Set up stored credentials in container environment
293
+ if stored_credentials:
294
+ print(f"🔐 Setting up {len(stored_credentials)} stored credentials in container...")
295
+ for key, value in stored_credentials.items():
296
+ # Set each credential as an environment variable
297
+ env_var_name = key.upper().replace('-', '_').replace(' ', '_')
298
+ os.environ[env_var_name] = value
299
+ print(f"✅ Set {env_var_name} in container environment")
300
+
301
+ # Also save credentials to a file in the container for easy access
302
+ try:
303
+ credentials_dir = "/root/.gitarsenal"
304
+ os.makedirs(credentials_dir, exist_ok=True)
305
+ credentials_file = os.path.join(credentials_dir, "credentials.json")
306
+ with open(credentials_file, 'w') as f:
307
+ json.dump(stored_credentials, f, indent=2)
308
+ print(f"✅ Saved credentials to {credentials_file}")
309
+
310
+ # Print available credentials for user reference
311
+ print("\n🔐 AVAILABLE CREDENTIALS IN CONTAINER:")
312
+ print("="*50)
313
+ for key, value in stored_credentials.items():
314
+ masked_value = value[:8] + "..." if len(value) > 8 else "***"
315
+ env_var_name = key.upper().replace('-', '_').replace(' ', '_')
316
+ print(f" {key} -> {env_var_name} = {masked_value}")
317
+ print("="*50)
318
+ print("💡 These credentials are available as environment variables and in /root/.gitarsenal/credentials.json")
319
+
320
+ except Exception as e:
321
+ print(f"⚠️ Could not save credentials file: {e}")
322
+ else:
323
+ print("⚠️ No stored credentials provided to container")
324
+
325
+ # Start SSH service
326
+ subprocess.run(["service", "ssh", "start"], check=True)
327
+
328
+ # Preprocess setup commands using LLM to inject credentials
329
+ if setup_commands:
330
+ print(f"🔧 Preprocessing {len(setup_commands)} setup commands with LLM to inject credentials...")
331
+ api_key = os.environ.get("OPENAI_API_KEY")
332
+ processed_commands = preprocess_commands_with_llm(setup_commands, stored_credentials, api_key)
333
+ print(f"⚙️ Running {len(processed_commands)} preprocessed setup commands with dynamic command list...")
334
+
335
+ # Create command list manager with processed commands
336
+ cmd_manager = CommandListManager(processed_commands)
337
+
338
+ # Create persistent shell instance starting in /root
339
+ shell = PersistentShell(working_dir="/root", timeout=300)
340
+
341
+ try:
342
+ # Start the persistent shell
343
+ shell.start()
344
+
345
+ # Track how many commands have been executed since last analysis
346
+ commands_since_analysis = 0
347
+
348
+ # Execute commands using the command list manager
349
+ while cmd_manager.has_pending_commands():
350
+ # Get next command to execute
351
+ next_cmd, cmd_type = cmd_manager.get_next_command()
352
+
353
+ if not next_cmd:
354
+ break
355
+
356
+ # Print status before executing
357
+ cmd_manager.print_status()
358
+
359
+ # Periodically analyze and update the command list
360
+ if commands_since_analysis >= 3 and cmd_type == 'main':
361
+ print("\n🔍 Periodic command list analysis...")
362
+ api_key = os.environ.get("OPENAI_API_KEY")
363
+ cmd_manager.update_command_list_with_llm(api_key)
364
+ commands_since_analysis = 0
365
+
366
+ # Execute the command
367
+ if cmd_type == 'main':
368
+ cmd_text = next_cmd['command']
369
+ cmd_index = next_cmd['index']
370
+ print(f"📋 Executing main command {cmd_index + 1}/{cmd_manager.total_commands}: {cmd_text}")
371
+
372
+ start_time = time.time()
373
+ success, stdout, stderr = shell.execute(cmd_text, timeout=300)
374
+ execution_time = time.time() - start_time
375
+
376
+ # Check if the command was aborted due to waiting for input and an alternative was suggested
377
+ if not success and "Command aborted - requires user input" in stderr and shell.suggested_alternative:
378
+ alternative_cmd = shell.suggested_alternative
379
+ print(f"🔄 Command aborted due to input requirement. Adding suggested alternative: {alternative_cmd}")
380
+
381
+ # Add the alternative command with high priority
382
+ cmd_manager.add_command_dynamically(alternative_cmd, priority='high')
383
+
384
+ # Clear the suggested alternative
385
+ shell.suggested_alternative = None
386
+ # Check if the command should be removed as suggested by LLM
387
+ elif not success and stderr.startswith("Command removed -"):
388
+ reason = stderr.replace("Command removed -", "").strip()
389
+ print(f"🚫 Removed command as suggested by LLM: {reason}")
390
+ # We don't need to do anything else, just mark it as executed and move on
391
+
392
+ # Mark command as executed
393
+ cmd_manager.mark_command_executed(
394
+ cmd_index, 'main', success, stdout, stderr, execution_time
395
+ )
396
+
397
+ # Increment counter for periodic analysis
398
+ commands_since_analysis += 1
399
+
400
+ if not success:
401
+ print(f"⚠️ Command failed, attempting LLM debugging...")
402
+
403
+ # Call LLM for debugging
404
+ try:
405
+ current_dir = shell.get_cwd()
406
+
407
+ # Use unified LLM debugging function
408
+ fix_command = call_llm_for_debug(cmd_text, stderr, current_dir=current_dir, sandbox=shell)
409
+
410
+ if fix_command:
411
+ print(f"🔧 OpenAI suggested fix command: {fix_command}")
412
+
413
+ # Add the fix to the command list manager
414
+ fix_index = cmd_manager.add_suggested_fix(cmd_text, fix_command, "LLM suggested fix")
415
+
416
+ # Execute the fix command
417
+ print(f"🔄 Running suggested fix command: {fix_command}")
418
+ fix_start_time = time.time()
419
+ fix_success, fix_stdout, fix_stderr = shell.execute(fix_command, timeout=300)
420
+ fix_execution_time = time.time() - fix_start_time
421
+
422
+ # Mark fix command as executed
423
+ cmd_manager.mark_command_executed(
424
+ fix_index, 'fix', fix_success, fix_stdout, fix_stderr, fix_execution_time
425
+ )
426
+
427
+ if fix_success:
428
+ print(f"✅ Fix command succeeded")
429
+
430
+ # Check if we should skip the original command
431
+ api_key = os.environ.get("OPENAI_API_KEY")
432
+ should_skip, skip_reason = cmd_manager.should_skip_original_command(
433
+ cmd_text, fix_command, fix_stdout, fix_stderr, api_key
434
+ )
435
+
436
+ if should_skip:
437
+ print(f"🔄 Skipping original command: {skip_reason}")
438
+
439
+ # Mark the original command as successful without running it
440
+ cmd_manager.mark_command_executed(
441
+ cmd_index, 'main', True,
442
+ f"Command skipped after successful fix: {skip_reason}",
443
+ "", time.time() - start_time
444
+ )
445
+
446
+ print(f"✅ Original command marked as successful (skipped)")
447
+
448
+ # After a successful fix and skipping the original command,
449
+ # analyze and update the entire command list
450
+ print("\n🔍 Analyzing and updating remaining commands based on fix results...")
451
+ cmd_manager.update_command_list_with_llm(api_key)
452
+ else:
453
+ # Retry the original command
454
+ print(f"🔄 Retrying original command: {cmd_text}")
455
+ retry_start_time = time.time()
456
+ retry_success, retry_stdout, retry_stderr = shell.execute(cmd_text, timeout=300)
457
+ retry_execution_time = time.time() - retry_start_time
458
+
459
+ # Update the original command status
460
+ cmd_manager.mark_command_executed(
461
+ cmd_index, 'main', retry_success, retry_stdout, retry_stderr, retry_execution_time
462
+ )
463
+
464
+ if retry_success:
465
+ print(f"✅ Original command succeeded after fix!")
466
+
467
+ # After a successful fix and successful retry,
468
+ # analyze and update the entire command list
469
+ print("\n🔍 Analyzing and updating remaining commands based on fix results...")
470
+ cmd_manager.update_command_list_with_llm(api_key)
471
+ else:
472
+ print(f"⚠️ Original command still failed after fix, continuing...")
473
+ else:
474
+ print(f"❌ Fix command failed: {fix_stderr}")
475
+ print(f"⚠️ Continuing with remaining commands...")
476
+ else:
477
+ print("❌ No fix suggested by OpenAI")
478
+ print(f"⚠️ Continuing with remaining commands...")
479
+
480
+ except Exception as debug_e:
481
+ print(f"❌ LLM debugging failed: {debug_e}")
482
+ print(f"⚠️ Continuing with remaining commands...")
483
+
484
+ elif cmd_type == 'fix':
485
+ cmd_text = next_cmd['fix_command']
486
+ cmd_index = next_cmd['index']
487
+ print(f"🔧 Executing fix command {cmd_index + 1}: {cmd_text}")
488
+
489
+ start_time = time.time()
490
+ success, stdout, stderr = shell.execute(cmd_text, timeout=300)
491
+ execution_time = time.time() - start_time
492
+
493
+ # Check if the fix command was aborted due to waiting for input and an alternative was suggested
494
+ if not success and "Command aborted - requires user input" in stderr and shell.suggested_alternative:
495
+ alternative_cmd = shell.suggested_alternative
496
+ print(f"🔄 Fix command aborted due to input requirement. Adding suggested alternative: {alternative_cmd}")
497
+
498
+ # Add the alternative command with high priority
499
+ cmd_manager.add_command_dynamically(alternative_cmd, priority='high')
500
+
501
+ # Clear the suggested alternative
502
+ shell.suggested_alternative = None
503
+ # Check if the fix command should be removed as suggested by LLM
504
+ elif not success and stderr.startswith("Command removed -"):
505
+ reason = stderr.replace("Command removed -", "").strip()
506
+ print(f"🚫 Removed fix command as suggested by LLM: {reason}")
507
+ # We don't need to do anything else, just mark it as executed and move on
508
+
509
+ # Mark fix command as executed
510
+ cmd_manager.mark_command_executed(
511
+ cmd_index, 'fix', success, stdout, stderr, execution_time
512
+ )
513
+
514
+ # After all commands are processed, do a final batch analysis of any remaining failed commands
515
+ failed_commands = cmd_manager.get_failed_commands_for_llm()
516
+ if failed_commands:
517
+ print(f"\n🔍 Final batch analysis of {len(failed_commands)} failed commands...")
518
+ current_dir = shell.get_cwd()
519
+ api_key = os.environ.get("OPENAI_API_KEY")
520
+
521
+ # Use batch analysis to get additional fixes
522
+ additional_fixes = cmd_manager.analyze_failed_commands_with_llm(api_key, current_dir, shell)
523
+
524
+ if additional_fixes:
525
+ print(f"🔧 Executing {len(additional_fixes)} additional fix commands...")
526
+
527
+ # Execute the additional fix commands
528
+ for fix_index in additional_fixes:
529
+ fix_cmd = cmd_manager.suggested_fixes[fix_index]
530
+ cmd_text = fix_cmd['fix_command']
531
+ print(f"🔧 Executing additional fix: {cmd_text}")
532
+
533
+ start_time = time.time()
534
+ success, stdout, stderr = shell.execute(cmd_text, timeout=300)
535
+ execution_time = time.time() - start_time
536
+
537
+ # Check if the fix command was aborted due to waiting for input and an alternative was suggested
538
+ if not success and "Command aborted - requires user input" in stderr and shell.suggested_alternative:
539
+ alternative_cmd = shell.suggested_alternative
540
+ print(f"🔄 Additional fix command aborted due to input requirement. Adding suggested alternative: {alternative_cmd}")
541
+
542
+ # Add the alternative command with high priority
543
+ cmd_manager.add_command_dynamically(alternative_cmd, priority='high')
544
+
545
+ # Clear the suggested alternative
546
+ shell.suggested_alternative = None
547
+ # Check if the additional fix command should be removed as suggested by LLM
548
+ elif not success and stderr.startswith("Command removed -"):
549
+ reason = stderr.replace("Command removed -", "").strip()
550
+ print(f"🚫 Removed additional fix command as suggested by LLM: {reason}")
551
+ # We don't need to do anything else, just mark it as executed and move on
552
+
553
+ # Mark fix command as executed
554
+ cmd_manager.mark_command_executed(
555
+ fix_index, 'fix', success, stdout, stderr, execution_time
556
+ )
557
+
558
+ # Print final status
559
+ print("\n" + "="*60)
560
+ print("🎉 SETUP COMMANDS EXECUTION COMPLETED")
561
+ print("="*60)
562
+ cmd_manager.print_status()
563
+
564
+ except Exception as e:
565
+ print(f"❌ Error during setup command execution: {e}")
566
+ finally:
567
+ # Clean up the shell
568
+ shell.cleanup()
569
+
570
+ # Create SSH tunnel
571
+ with modal.forward(22, unencrypted=True) as tunnel:
572
+ host, port = tunnel.tcp_socket
573
+
574
+ print("\n" + "=" * 80)
575
+ print("🎉 SSH CONTAINER IS READY!")
576
+ print("=" * 80)
577
+ print(f"🌐 SSH Host: {host}")
578
+ print(f"🔌 SSH Port: {port}")
579
+ print(f"👤 Username: root")
580
+ print(f"🔐 Password: {ssh_password}")
581
+ print()
582
+ print("🔗 CONNECT USING THIS COMMAND:")
583
+ print(f"ssh -p {port} root@{host}")
584
+ print("=" * 80)
585
+
586
+ # Keep the container running
587
+ while True:
588
+ time.sleep(30)
589
+ # Check if SSH service is still running
590
+ try:
591
+ subprocess.run(["service", "ssh", "status"], check=True,
592
+ capture_output=True)
593
+ except subprocess.CalledProcessError:
594
+ print("⚠️ SSH service stopped, restarting...")
595
+ subprocess.run(["service", "ssh", "start"], check=True)
596
+
597
+ # Run the container
598
+ try:
599
+ print("⏳ Starting container... This may take 1-2 minutes...")
600
+
601
+ # Start the container in a new thread to avoid blocking
602
+ with modal.enable_output():
603
+ with app.run():
604
+ # Get the API key from environment
605
+ api_key = os.environ.get("OPENAI_API_KEY")
606
+
607
+ # Get stored credentials from local file
608
+ stored_credentials = get_stored_credentials()
609
+ if stored_credentials:
610
+ print(f"🔐 Found {len(stored_credentials)} stored credentials to send to container")
611
+ else:
612
+ print("⚠️ No stored credentials found")
613
+
614
+ ssh_container_function.remote(ssh_password, repo_url, repo_name, setup_commands, api_key, stored_credentials)
615
+
616
+ # Clean up Modal token after container is successfully created
617
+ cleanup_modal_token()
618
+
619
+ return {
620
+ "app_name": app_name,
621
+ "ssh_password": ssh_password,
622
+ "volume_name": volume_name
623
+ }
624
+ except Exception as e:
625
+ print(f"❌ Error running container: {e}")
626
+ return None
@@ -0,0 +1,15 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="gitarsenal-python",
5
+ version="0.1.0",
6
+ description="GitArsenal Python utilities for Modal containers",
7
+ packages=find_packages(),
8
+ install_requires=[
9
+ "modal",
10
+ "requests",
11
+ "openai",
12
+ "anthropic"
13
+ ],
14
+ python_requires=">=3.8",
15
+ )