gitarsenal-cli 1.9.34 → 1.9.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.venv_status.json CHANGED
@@ -1 +1 @@
1
- {"created":"2025-08-10T17:58:47.793Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
1
+ {"created":"2025-08-11T10:04:42.375Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
package/bin/gitarsenal.js CHANGED
@@ -108,7 +108,13 @@ function activateVirtualEnvironment() {
108
108
  async function previewRecommendations(repoUrl) {
109
109
  const spinner = ora('Analyzing repository for GPU/Torch/CUDA recommendations...').start();
110
110
  try {
111
- const apiUrl = process.env.GITARSENAL_API_URL || 'https://www.gitarsenal.dev/api/gitingest-setup-commands';
111
+ const envUrl = process.env.GITARSENAL_API_URL;
112
+ const endpoints = envUrl
113
+ ? [envUrl]
114
+ : [
115
+ 'https://www.gitarsenal.dev/api/best_gpu'
116
+ ];
117
+
112
118
  const payload = {
113
119
  repoUrl,
114
120
  // Minimal GitIngest data to allow backend to run LLM analysis
@@ -130,32 +136,61 @@ async function previewRecommendations(repoUrl) {
130
136
  content_preview: ''
131
137
  },
132
138
  success: true
133
- }
139
+ },
140
+ // Hint server to do lightweight preview if supported
141
+ preview: true
134
142
  };
135
143
 
136
- // Use global fetch (Node 18+) and follow redirects
137
- const res = await fetch(apiUrl, {
138
- method: 'POST',
139
- headers: {
140
- 'Content-Type': 'application/json',
141
- 'User-Agent': 'GitArsenal-CLI/1.0'
142
- },
143
- body: JSON.stringify(payload),
144
- redirect: 'follow'
145
- });
144
+ // Increase timeout to allow server to compute recommendations before GPU selection
145
+ const previewTimeoutMs = Number(process.env.GITARSENAL_PREVIEW_TIMEOUT_MS || 90000);
146
146
 
147
- spinner.stop();
147
+ const fetchWithTimeout = async (url, body, timeoutMs = previewTimeoutMs) => {
148
+ const controller = new AbortController();
149
+ const id = setTimeout(() => controller.abort(), timeoutMs);
150
+ try {
151
+ const res = await fetch(url, {
152
+ method: 'POST',
153
+ headers: {
154
+ 'Content-Type': 'application/json',
155
+ 'User-Agent': 'GitArsenal-CLI/1.0'
156
+ },
157
+ body: JSON.stringify(body),
158
+ redirect: 'follow',
159
+ signal: controller.signal
160
+ });
161
+ clearTimeout(id);
162
+ return res;
163
+ } catch (e) {
164
+ clearTimeout(id);
165
+ throw e;
166
+ }
167
+ };
148
168
 
149
- if (!res.ok) {
150
- const text = await res.text().catch(() => '');
151
- console.log(chalk.yellow(`⚠️ Preview request failed (${res.status}).`));
152
- if (text) console.log(chalk.gray(`Response: ${text.slice(0, 500)}`));
153
- return null;
169
+ let data = null;
170
+ let lastErrorText = '';
171
+
172
+ for (const url of endpoints) {
173
+ try {
174
+ spinner.text = `Analyzing (preview): ${url}`;
175
+ const res = await fetchWithTimeout(url, payload, previewTimeoutMs);
176
+ if (!res.ok) {
177
+ const text = await res.text().catch(() => '');
178
+ lastErrorText = `${res.status} ${text.slice(0, 300)}`;
179
+ continue;
180
+ }
181
+ data = await res.json().catch(() => null);
182
+ if (data) break;
183
+ } catch (err) {
184
+ lastErrorText = err && err.message ? err.message : 'request failed';
185
+ continue;
186
+ }
154
187
  }
155
188
 
156
- const data = await res.json().catch(() => null);
189
+ spinner.stop();
190
+
157
191
  if (!data) {
158
- console.log(chalk.yellow('⚠️ Could not parse recommendations preview response.'));
192
+ console.log(chalk.yellow('⚠️ Preview unavailable (timeout or server error).'));
193
+ if (lastErrorText) console.log(chalk.gray(`Reason: ${lastErrorText}`));
159
194
  return null;
160
195
  }
161
196
 
@@ -261,6 +296,66 @@ function printGpuTorchCudaSummary(result) {
261
296
  } catch {}
262
297
  }
263
298
 
299
+ // Full fetch to get both setup commands and recommendations in one request
300
+ async function fetchFullSetupAndRecs(repoUrl) {
301
+ const envUrl = process.env.GITARSENAL_API_URL;
302
+ const endpoints = envUrl ? [envUrl] : ['https://www.gitarsenal.dev/api/gitingest-setup-commands'];
303
+ const payload = {
304
+ repoUrl,
305
+ gitingestData: {
306
+ system_info: {
307
+ platform: process.platform,
308
+ python_version: process.version,
309
+ detected_language: 'Unknown',
310
+ detected_technologies: [],
311
+ file_count: 0,
312
+ repo_stars: 0,
313
+ repo_forks: 0,
314
+ primary_package_manager: 'Unknown',
315
+ complexity_level: 'Unknown'
316
+ },
317
+ repository_analysis: {
318
+ summary: `Repository: ${repoUrl}`,
319
+ tree: '',
320
+ content_preview: ''
321
+ },
322
+ success: true
323
+ }
324
+ };
325
+ const timeoutMs = Number(process.env.GITARSENAL_FULL_TIMEOUT_MS || 180000);
326
+
327
+ const fetchWithTimeout = async (url, body, timeout) => {
328
+ const controller = new AbortController();
329
+ const id = setTimeout(() => controller.abort(), timeout);
330
+ try {
331
+ const res = await fetch(url, {
332
+ method: 'POST',
333
+ headers: { 'Content-Type': 'application/json', 'User-Agent': 'GitArsenal-CLI/1.0' },
334
+ body: JSON.stringify(body),
335
+ redirect: 'follow',
336
+ signal: controller.signal
337
+ });
338
+ clearTimeout(id);
339
+ return res;
340
+ } catch (e) {
341
+ clearTimeout(id);
342
+ throw e;
343
+ }
344
+ };
345
+
346
+ for (const url of endpoints) {
347
+ try {
348
+ const res = await fetchWithTimeout(url, payload, timeoutMs);
349
+ if (!res.ok) continue;
350
+ const data = await res.json().catch(() => null);
351
+ if (data) return data;
352
+ } catch (_e) {
353
+ continue;
354
+ }
355
+ }
356
+ return null;
357
+ }
358
+
264
359
  // Function to send user data to web application
265
360
  async function sendUserData(userId, userName) {
266
361
  try {
@@ -563,9 +658,19 @@ async function runContainerCommand(options) {
563
658
  repoUrl = answers.repoUrl;
564
659
  }
565
660
 
566
- // NEW: Preview CUDA/Torch/GPU recommendations before choosing GPU (only if auto-detect enabled)
661
+ // Attempt full fetch first to get both commands and recommendations; fallback to preview on failure
567
662
  if (useApi && repoUrl) {
568
- await previewRecommendations(repoUrl);
663
+ const fullData = await fetchFullSetupAndRecs(repoUrl).catch(() => null);
664
+ if (fullData) {
665
+ printGpuTorchCudaSummary(fullData);
666
+ if (Array.isArray(fullData.commands) && fullData.commands.length) {
667
+ setupCommands = fullData.commands;
668
+ // Disable auto-detection since we already have commands
669
+ useApi = false;
670
+ }
671
+ } else {
672
+ await previewRecommendations(repoUrl);
673
+ }
569
674
  }
570
675
 
571
676
  // Prompt for GPU type if not specified
@@ -948,4 +1053,4 @@ async function handleKeysDelete(options) {
948
1053
  console.error(chalk.red(`Error: ${error.message}`));
949
1054
  process.exit(1);
950
1055
  }
951
- }
1056
+ }
package/lib/sandbox.js CHANGED
@@ -113,6 +113,8 @@ async function runContainer(options) {
113
113
  const tempCommandsFile = path.join(os.tmpdir(), `gitarsenal-commands-${Date.now()}.txt`);
114
114
  fs.writeFileSync(tempCommandsFile, setupCommands.join('\n'));
115
115
  args.push('--commands-file', tempCommandsFile);
116
+ // Ensure Python skips auto-detection via GitIngest when commands are provided
117
+ args.push('--no-gitingest');
116
118
  }
117
119
 
118
120
  // Log the command being executed
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gitarsenal-cli",
3
- "version": "1.9.34",
3
+ "version": "1.9.36",
4
4
  "description": "CLI tool for creating Modal sandboxes with GitHub repositories",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -914,68 +914,6 @@ def fetch_setup_commands_from_api(repo_url):
914
914
  print(f"📄 Response size: {len(response.text)} bytes")
915
915
  print(f"📄 Response URL: {response.url}")
916
916
 
917
- # NEW: Print a concise summary of only GPU, Torch, and CUDA recommendations
918
- try:
919
- print("\n📊 API RESULT SUMMARY (GPU/Torch/CUDA)")
920
- print("────────────────────────────────────────────────────────")
921
-
922
- # CUDA Recommendation
923
- cuda = data.get("cudaRecommendation")
924
- if cuda:
925
- print("🎯 CUDA Recommendation")
926
- print(" - CUDA: ", cuda.get("recommendedCudaVersion", "Unknown"))
927
- ct = cuda.get("compatibleTorchVersions") or []
928
- if ct:
929
- print(" - Torch Compatibility: ", ", ".join(ct))
930
- if cuda.get("dockerImage"):
931
- print(" - Docker Image: ", cuda.get("dockerImage"))
932
- install_cmds = cuda.get("installCommands") or []
933
- if install_cmds:
934
- print(" - Install Commands:")
935
- for c in install_cmds:
936
- print(f" $ {c}")
937
- if cuda.get("notes"):
938
- print(" - Notes: ", cuda.get("notes"))
939
-
940
- # Torch Recommendation
941
- torch = data.get("torchRecommendation")
942
- if torch:
943
- print("\n🔥 PyTorch Recommendation")
944
- print(" - Torch: ", torch.get("recommendedTorchVersion", "Unknown"))
945
- print(" - CUDA Variant: ", torch.get("cudaVariant", "Unknown"))
946
- if torch.get("pipInstallCommand"):
947
- print(" - Install:")
948
- print(f" $ {torch.get('pipInstallCommand')}")
949
- extras = torch.get("extraPackages") or []
950
- if extras:
951
- print(" - Extra Packages: ", ", ".join(extras))
952
- if torch.get("notes"):
953
- print(" - Notes: ", torch.get("notes"))
954
-
955
- # GPU Recommendation
956
- gpu = data.get("gpuRecommendation")
957
- if gpu:
958
- print("\n🖥️ GPU Recommendation")
959
- if gpu.get("minimumVramGb") is not None:
960
- print(f" - Min VRAM: {gpu.get('minimumVramGb')} GB")
961
- if gpu.get("recommendedVramGb") is not None:
962
- print(f" - Recommended VRAM: {gpu.get('recommendedVramGb')} GB")
963
- if gpu.get("minComputeCapability"):
964
- print(f" - Min Compute Capability: {gpu.get('minComputeCapability')}")
965
- rec_models = gpu.get("recommendedModels") or []
966
- if rec_models:
967
- print(" - Recommended Models: ", ", ".join(rec_models))
968
- budget = gpu.get("budgetOptions") or []
969
- if budget:
970
- print(" - Budget Options: ", ", ".join(budget))
971
- clouds = gpu.get("cloudInstances") or []
972
- if clouds:
973
- print(" - Cloud Instances: ", ", ".join(clouds))
974
- if gpu.get("notes"):
975
- print(" - Notes: ", gpu.get("notes"))
976
- except Exception as summary_err:
977
- print(f"⚠️ Failed to print API summary: {summary_err}")
978
-
979
917
  # Extract setup commands from the response
980
918
  if "setupInstructions" in data and "commands" in data["setupInstructions"]:
981
919
  commands = data["setupInstructions"]["commands"]
@@ -1009,16 +947,6 @@ def fetch_setup_commands_from_api(repo_url):
1009
947
  return fixed_commands
1010
948
  else:
1011
949
  print("⚠️ API response did not contain setupInstructions.commands field")
1012
- # If top-level commands exist (newer API), use them
1013
- if "commands" in data and isinstance(data["commands"], list):
1014
- commands = data["commands"]
1015
- print(f"✅ Found top-level commands array with {len(commands)} entries")
1016
- fixed_commands = fix_setup_commands(commands)
1017
- print("\n📋 Fixed commands:")
1018
- for i, cmd in enumerate(fixed_commands, 1):
1019
- print(f" {i}. {cmd}")
1020
- return fixed_commands
1021
-
1022
950
  print("📋 Available fields in response:")
1023
951
  for key in data.keys():
1024
952
  print(f" - {key}")
@@ -1692,68 +1620,6 @@ def get_setup_commands_from_gitingest(repo_url):
1692
1620
  try:
1693
1621
  result = response.json()
1694
1622
 
1695
- # NEW: Print a concise summary of only GPU, Torch, and CUDA recommendations
1696
- try:
1697
- print("\n📊 API RESULT SUMMARY (GPU/Torch/CUDA)")
1698
- print("────────────────────────────────────────────────────────")
1699
-
1700
- # CUDA Recommendation
1701
- cuda = result.get("cudaRecommendation")
1702
- if cuda:
1703
- print("🎯 CUDA Recommendation")
1704
- print(" - CUDA: ", cuda.get("recommendedCudaVersion", "Unknown"))
1705
- ct = cuda.get("compatibleTorchVersions") or []
1706
- if ct:
1707
- print(" - Torch Compatibility: ", ", ".join(ct))
1708
- if cuda.get("dockerImage"):
1709
- print(" - Docker Image: ", cuda.get("dockerImage"))
1710
- install_cmds = cuda.get("installCommands") or []
1711
- if install_cmds:
1712
- print(" - Install Commands:")
1713
- for c in install_cmds:
1714
- print(f" $ {c}")
1715
- if cuda.get("notes"):
1716
- print(" - Notes: ", cuda.get("notes"))
1717
-
1718
- # Torch Recommendation
1719
- torch = result.get("torchRecommendation")
1720
- if torch:
1721
- print("\n🔥 PyTorch Recommendation")
1722
- print(" - Torch: ", torch.get("recommendedTorchVersion", "Unknown"))
1723
- print(" - CUDA Variant: ", torch.get("cudaVariant", "Unknown"))
1724
- if torch.get("pipInstallCommand"):
1725
- print(" - Install:")
1726
- print(f" $ {torch.get('pipInstallCommand')}")
1727
- extras = torch.get("extraPackages") or []
1728
- if extras:
1729
- print(" - Extra Packages: ", ", ".join(extras))
1730
- if torch.get("notes"):
1731
- print(" - Notes: ", torch.get("notes"))
1732
-
1733
- # GPU Recommendation
1734
- gpu = result.get("gpuRecommendation")
1735
- if gpu:
1736
- print("\n🖥️ GPU Recommendation")
1737
- if gpu.get("minimumVramGb") is not None:
1738
- print(f" - Min VRAM: {gpu.get('minimumVramGb')} GB")
1739
- if gpu.get("recommendedVramGb") is not None:
1740
- print(f" - Recommended VRAM: {gpu.get('recommendedVramGb')} GB")
1741
- if gpu.get("minComputeCapability"):
1742
- print(f" - Min Compute Capability: {gpu.get('minComputeCapability')}")
1743
- rec_models = gpu.get("recommendedModels") or []
1744
- if rec_models:
1745
- print(" - Recommended Models: ", ", ".join(rec_models))
1746
- budget = gpu.get("budgetOptions") or []
1747
- if budget:
1748
- print(" - Budget Options: ", ", ".join(budget))
1749
- clouds = gpu.get("cloudInstances") or []
1750
- if clouds:
1751
- print(" - Cloud Instances: ", ", ".join(clouds))
1752
- if gpu.get("notes"):
1753
- print(" - Notes: ", gpu.get("notes"))
1754
- except Exception as summary_err:
1755
- print(f"⚠️ Failed to print API summary: {summary_err}")
1756
-
1757
1623
  # Check if we have commands in the response
1758
1624
  commands = None
1759
1625
 
@@ -2534,5 +2400,4 @@ if __name__ == "__main__":
2534
2400
  except Exception as e:
2535
2401
  # print(f"\n❌ Error: {e}")
2536
2402
  # print("🧹 Cleaning up resources...")
2537
- cleanup_modal_token()
2538
-
2403
+ cleanup_modal_token()