gitarsenal-cli 1.5.7 → 1.5.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.5.7",
3
+ "version": "1.5.8",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -144,7 +144,7 @@ def get_tokens():
144
144
 
145
145
  # If we couldn't fetch from the server, try to get default tokens from gitarsenal.dev
146
146
  if not token_id or not token_secret:
147
- print("⚠️ Proxy server failed, trying to fetch default tokens from gitarsenal.dev")
147
+ # print("⚠️ Proxy server failed, trying to fetch default tokens from gitarsenal.dev")
148
148
  token_id, token_secret, openai_api_key = fetch_default_tokens_from_gitarsenal()
149
149
 
150
150
  # If we still don't have tokens, we can't proceed
@@ -1487,7 +1487,7 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
1487
1487
  print(f"⚙️ Running {len(setup_commands)} setup commands...")
1488
1488
 
1489
1489
  # Define a helper function for running commands with LLM debugging
1490
- def run_command_with_basic_error_handling(cmd, show_output=True, retry_count=0, max_retries=3):
1490
+ def run_command_with_basic_error_handling(cmd, show_output=True, retry_count=0, max_retries=2):
1491
1491
  """Execute a command with LLM debugging enabled"""
1492
1492
  print(f"🔧 Executing: {cmd}")
1493
1493
  try:
@@ -2273,7 +2273,7 @@ def create_ssh_container_function(gpu_type="a10g", timeout_minutes=60, volume=No
2273
2273
 
2274
2274
  # Create SSH-enabled image
2275
2275
  ssh_image = (
2276
- modal.Image.debian_slim()
2276
+ modal.Image.from_registry("nvidia/cuda:12.4.0-devel-ubuntu22.04", add_python="3.10")
2277
2277
  .apt_install(
2278
2278
  "openssh-server", "sudo", "curl", "wget", "vim", "htop", "git",
2279
2279
  "python3", "python3-pip", "build-essential", "tmux", "screen", "nano",
@@ -0,0 +1,80 @@
1
+ import subprocess
2
+ import time
3
+ import modal
4
+
5
+ app = modal.App()
6
+
7
+ # Use NVIDIA CUDA development image with full toolkit including nvcc
8
+ cuda_version = "12.8.1" # should be no greater than host CUDA version
9
+ flavor = "devel" # includes full CUDA toolkit with nvcc
10
+ operating_sys = "ubuntu24.04"
11
+ tag = f"{cuda_version}-{flavor}-{operating_sys}"
12
+
13
+ image = (
14
+ modal.Image.from_registry(f"nvidia/cuda:{tag}", add_python="3.12")
15
+ .entrypoint([]) # remove verbose logging by base image on entry
16
+ .apt_install("openssh-server", "vim", "nano", "htop", "git") # SSH and useful tools
17
+ .run_commands("mkdir -p /run/sshd") # SSH daemon requires this directory
18
+ .add_local_file("~/.ssh/id_rsa.pub", "/root/.ssh/authorized_keys", copy=True)
19
+ .run_commands("chmod 700 /root/.ssh && chmod 600 /root/.ssh/authorized_keys")
20
+ )
21
+
22
+ @app.function(
23
+ image=image,
24
+ gpu="any", # Request GPU access
25
+ timeout=3600, # Keep alive for 1 hour
26
+ cpu=2,
27
+ memory=8192
28
+ )
29
+ def start_cuda_container():
30
+ """
31
+ Start a CUDA container with nvcc compiler and SSH access.
32
+ """
33
+
34
+ # Start SSH daemon
35
+ print("Starting SSH daemon...")
36
+ subprocess.Popen(["/usr/sbin/sshd", "-D", "-e"])
37
+
38
+ # Verify CUDA installation
39
+ print("Verifying CUDA installation...")
40
+ try:
41
+ nvcc_output = subprocess.check_output(["nvcc", "--version"], text=True)
42
+ print("NVCC version:")
43
+ print(nvcc_output)
44
+ except Exception as e:
45
+ print(f"Error checking nvcc: {e}")
46
+
47
+ try:
48
+ nvidia_smi_output = subprocess.check_output(["nvidia-smi"], text=True)
49
+ print("GPU status:")
50
+ print(nvidia_smi_output)
51
+ except Exception as e:
52
+ print(f"Error checking nvidia-smi: {e}")
53
+
54
+ # Set up port forwarding for SSH
55
+ with modal.forward(port=22, unencrypted=True) as tunnel:
56
+ hostname, port = tunnel.tcp_socket
57
+ connection_cmd = f'ssh -p {port} root@{hostname}'
58
+
59
+ print("=" * 60)
60
+ print("🚀 CUDA Container Ready!")
61
+ print("=" * 60)
62
+ print(f"SSH into container: {connection_cmd}")
63
+ print(f"Hostname: {hostname}")
64
+ print(f"Port: {port}")
65
+ print("=" * 60)
66
+ print("Available tools:")
67
+ print("- nvcc (CUDA compiler)")
68
+ print("- nvidia-smi (GPU monitoring)")
69
+ print("- vim, nano, git, htop")
70
+ print("- Python 3.12")
71
+ print("=" * 60)
72
+
73
+ # Keep the container alive
74
+ print("Container running for 1 hour...")
75
+ time.sleep(3600)
76
+
77
+ # Start the container immediately
78
+ if __name__ == "__main__":
79
+ print("Starting CUDA container with nvcc...")
80
+ start_cuda_container.remote()