gitarsenal-cli 1.9.35 โ 1.9.37
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.venv_status.json
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"created":"2025-08-
|
|
1
|
+
{"created":"2025-08-11T11:05:53.789Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
|
package/bin/gitarsenal.js
CHANGED
|
@@ -112,7 +112,7 @@ async function previewRecommendations(repoUrl) {
|
|
|
112
112
|
const endpoints = envUrl
|
|
113
113
|
? [envUrl]
|
|
114
114
|
: [
|
|
115
|
-
'https://www.gitarsenal.dev/api/
|
|
115
|
+
'https://www.gitarsenal.dev/api/best_gpu'
|
|
116
116
|
];
|
|
117
117
|
|
|
118
118
|
const payload = {
|
|
@@ -136,10 +136,15 @@ async function previewRecommendations(repoUrl) {
|
|
|
136
136
|
content_preview: ''
|
|
137
137
|
},
|
|
138
138
|
success: true
|
|
139
|
-
}
|
|
139
|
+
},
|
|
140
|
+
// Hint server to do lightweight preview if supported
|
|
141
|
+
preview: true
|
|
140
142
|
};
|
|
141
143
|
|
|
142
|
-
|
|
144
|
+
// Increase timeout to allow server to compute recommendations before GPU selection
|
|
145
|
+
const previewTimeoutMs = Number(process.env.GITARSENAL_PREVIEW_TIMEOUT_MS || 90000);
|
|
146
|
+
|
|
147
|
+
const fetchWithTimeout = async (url, body, timeoutMs = previewTimeoutMs) => {
|
|
143
148
|
const controller = new AbortController();
|
|
144
149
|
const id = setTimeout(() => controller.abort(), timeoutMs);
|
|
145
150
|
try {
|
|
@@ -167,7 +172,7 @@ async function previewRecommendations(repoUrl) {
|
|
|
167
172
|
for (const url of endpoints) {
|
|
168
173
|
try {
|
|
169
174
|
spinner.text = `Analyzing (preview): ${url}`;
|
|
170
|
-
const res = await fetchWithTimeout(url, payload,
|
|
175
|
+
const res = await fetchWithTimeout(url, payload, previewTimeoutMs);
|
|
171
176
|
if (!res.ok) {
|
|
172
177
|
const text = await res.text().catch(() => '');
|
|
173
178
|
lastErrorText = `${res.status} ${text.slice(0, 300)}`;
|
|
@@ -291,6 +296,66 @@ function printGpuTorchCudaSummary(result) {
|
|
|
291
296
|
} catch {}
|
|
292
297
|
}
|
|
293
298
|
|
|
299
|
+
// Full fetch to get both setup commands and recommendations in one request
|
|
300
|
+
async function fetchFullSetupAndRecs(repoUrl) {
|
|
301
|
+
const envUrl = process.env.GITARSENAL_API_URL;
|
|
302
|
+
const endpoints = envUrl ? [envUrl] : ['https://www.gitarsenal.dev/api/gitingest-setup-commands'];
|
|
303
|
+
const payload = {
|
|
304
|
+
repoUrl,
|
|
305
|
+
gitingestData: {
|
|
306
|
+
system_info: {
|
|
307
|
+
platform: process.platform,
|
|
308
|
+
python_version: process.version,
|
|
309
|
+
detected_language: 'Unknown',
|
|
310
|
+
detected_technologies: [],
|
|
311
|
+
file_count: 0,
|
|
312
|
+
repo_stars: 0,
|
|
313
|
+
repo_forks: 0,
|
|
314
|
+
primary_package_manager: 'Unknown',
|
|
315
|
+
complexity_level: 'Unknown'
|
|
316
|
+
},
|
|
317
|
+
repository_analysis: {
|
|
318
|
+
summary: `Repository: ${repoUrl}`,
|
|
319
|
+
tree: '',
|
|
320
|
+
content_preview: ''
|
|
321
|
+
},
|
|
322
|
+
success: true
|
|
323
|
+
}
|
|
324
|
+
};
|
|
325
|
+
const timeoutMs = Number(process.env.GITARSENAL_FULL_TIMEOUT_MS || 180000);
|
|
326
|
+
|
|
327
|
+
const fetchWithTimeout = async (url, body, timeout) => {
|
|
328
|
+
const controller = new AbortController();
|
|
329
|
+
const id = setTimeout(() => controller.abort(), timeout);
|
|
330
|
+
try {
|
|
331
|
+
const res = await fetch(url, {
|
|
332
|
+
method: 'POST',
|
|
333
|
+
headers: { 'Content-Type': 'application/json', 'User-Agent': 'GitArsenal-CLI/1.0' },
|
|
334
|
+
body: JSON.stringify(body),
|
|
335
|
+
redirect: 'follow',
|
|
336
|
+
signal: controller.signal
|
|
337
|
+
});
|
|
338
|
+
clearTimeout(id);
|
|
339
|
+
return res;
|
|
340
|
+
} catch (e) {
|
|
341
|
+
clearTimeout(id);
|
|
342
|
+
throw e;
|
|
343
|
+
}
|
|
344
|
+
};
|
|
345
|
+
|
|
346
|
+
for (const url of endpoints) {
|
|
347
|
+
try {
|
|
348
|
+
const res = await fetchWithTimeout(url, payload, timeoutMs);
|
|
349
|
+
if (!res.ok) continue;
|
|
350
|
+
const data = await res.json().catch(() => null);
|
|
351
|
+
if (data) return data;
|
|
352
|
+
} catch (_e) {
|
|
353
|
+
continue;
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
return null;
|
|
357
|
+
}
|
|
358
|
+
|
|
294
359
|
// Function to send user data to web application
|
|
295
360
|
async function sendUserData(userId, userName) {
|
|
296
361
|
try {
|
|
@@ -593,9 +658,19 @@ async function runContainerCommand(options) {
|
|
|
593
658
|
repoUrl = answers.repoUrl;
|
|
594
659
|
}
|
|
595
660
|
|
|
596
|
-
//
|
|
661
|
+
// Attempt full fetch first to get both commands and recommendations; fallback to preview on failure
|
|
597
662
|
if (useApi && repoUrl) {
|
|
598
|
-
await
|
|
663
|
+
const fullData = await fetchFullSetupAndRecs(repoUrl).catch(() => null);
|
|
664
|
+
if (fullData) {
|
|
665
|
+
printGpuTorchCudaSummary(fullData);
|
|
666
|
+
if (Array.isArray(fullData.commands) && fullData.commands.length) {
|
|
667
|
+
setupCommands = fullData.commands;
|
|
668
|
+
// Disable auto-detection since we already have commands
|
|
669
|
+
useApi = false;
|
|
670
|
+
}
|
|
671
|
+
} else {
|
|
672
|
+
await previewRecommendations(repoUrl);
|
|
673
|
+
}
|
|
599
674
|
}
|
|
600
675
|
|
|
601
676
|
// Prompt for GPU type if not specified
|
|
@@ -978,4 +1053,4 @@ async function handleKeysDelete(options) {
|
|
|
978
1053
|
console.error(chalk.red(`Error: ${error.message}`));
|
|
979
1054
|
process.exit(1);
|
|
980
1055
|
}
|
|
981
|
-
}
|
|
1056
|
+
}
|
package/config.json
CHANGED
package/lib/sandbox.js
CHANGED
|
@@ -113,6 +113,8 @@ async function runContainer(options) {
|
|
|
113
113
|
const tempCommandsFile = path.join(os.tmpdir(), `gitarsenal-commands-${Date.now()}.txt`);
|
|
114
114
|
fs.writeFileSync(tempCommandsFile, setupCommands.join('\n'));
|
|
115
115
|
args.push('--commands-file', tempCommandsFile);
|
|
116
|
+
// Ensure Python skips auto-detection via GitIngest when commands are provided
|
|
117
|
+
args.push('--no-gitingest');
|
|
116
118
|
}
|
|
117
119
|
|
|
118
120
|
// Log the command being executed
|
package/package.json
CHANGED
|
Binary file
|
|
@@ -126,7 +126,7 @@ def create_modal_ssh_container(gpu_type, repo_url=None, repo_name=None, setup_co
|
|
|
126
126
|
print(f"Using default volume name: {volume_name}")
|
|
127
127
|
except KeyboardInterrupt:
|
|
128
128
|
print("\n๐ Setup cancelled.")
|
|
129
|
-
|
|
129
|
+
sys.exit(1)
|
|
130
130
|
|
|
131
131
|
# Check if Modal is authenticated
|
|
132
132
|
try:
|
|
@@ -914,68 +914,6 @@ def fetch_setup_commands_from_api(repo_url):
|
|
|
914
914
|
print(f"๐ Response size: {len(response.text)} bytes")
|
|
915
915
|
print(f"๐ Response URL: {response.url}")
|
|
916
916
|
|
|
917
|
-
# NEW: Print a concise summary of only GPU, Torch, and CUDA recommendations
|
|
918
|
-
try:
|
|
919
|
-
print("\n๐ API RESULT SUMMARY (GPU/Torch/CUDA)")
|
|
920
|
-
print("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
|
|
921
|
-
|
|
922
|
-
# CUDA Recommendation
|
|
923
|
-
cuda = data.get("cudaRecommendation")
|
|
924
|
-
if cuda:
|
|
925
|
-
print("๐ฏ CUDA Recommendation")
|
|
926
|
-
print(" - CUDA: ", cuda.get("recommendedCudaVersion", "Unknown"))
|
|
927
|
-
ct = cuda.get("compatibleTorchVersions") or []
|
|
928
|
-
if ct:
|
|
929
|
-
print(" - Torch Compatibility: ", ", ".join(ct))
|
|
930
|
-
if cuda.get("dockerImage"):
|
|
931
|
-
print(" - Docker Image: ", cuda.get("dockerImage"))
|
|
932
|
-
install_cmds = cuda.get("installCommands") or []
|
|
933
|
-
if install_cmds:
|
|
934
|
-
print(" - Install Commands:")
|
|
935
|
-
for c in install_cmds:
|
|
936
|
-
print(f" $ {c}")
|
|
937
|
-
if cuda.get("notes"):
|
|
938
|
-
print(" - Notes: ", cuda.get("notes"))
|
|
939
|
-
|
|
940
|
-
# Torch Recommendation
|
|
941
|
-
torch = data.get("torchRecommendation")
|
|
942
|
-
if torch:
|
|
943
|
-
print("\n๐ฅ PyTorch Recommendation")
|
|
944
|
-
print(" - Torch: ", torch.get("recommendedTorchVersion", "Unknown"))
|
|
945
|
-
print(" - CUDA Variant: ", torch.get("cudaVariant", "Unknown"))
|
|
946
|
-
if torch.get("pipInstallCommand"):
|
|
947
|
-
print(" - Install:")
|
|
948
|
-
print(f" $ {torch.get('pipInstallCommand')}")
|
|
949
|
-
extras = torch.get("extraPackages") or []
|
|
950
|
-
if extras:
|
|
951
|
-
print(" - Extra Packages: ", ", ".join(extras))
|
|
952
|
-
if torch.get("notes"):
|
|
953
|
-
print(" - Notes: ", torch.get("notes"))
|
|
954
|
-
|
|
955
|
-
# GPU Recommendation
|
|
956
|
-
gpu = data.get("gpuRecommendation")
|
|
957
|
-
if gpu:
|
|
958
|
-
print("\n๐ฅ๏ธ GPU Recommendation")
|
|
959
|
-
if gpu.get("minimumVramGb") is not None:
|
|
960
|
-
print(f" - Min VRAM: {gpu.get('minimumVramGb')} GB")
|
|
961
|
-
if gpu.get("recommendedVramGb") is not None:
|
|
962
|
-
print(f" - Recommended VRAM: {gpu.get('recommendedVramGb')} GB")
|
|
963
|
-
if gpu.get("minComputeCapability"):
|
|
964
|
-
print(f" - Min Compute Capability: {gpu.get('minComputeCapability')}")
|
|
965
|
-
rec_models = gpu.get("recommendedModels") or []
|
|
966
|
-
if rec_models:
|
|
967
|
-
print(" - Recommended Models: ", ", ".join(rec_models))
|
|
968
|
-
budget = gpu.get("budgetOptions") or []
|
|
969
|
-
if budget:
|
|
970
|
-
print(" - Budget Options: ", ", ".join(budget))
|
|
971
|
-
clouds = gpu.get("cloudInstances") or []
|
|
972
|
-
if clouds:
|
|
973
|
-
print(" - Cloud Instances: ", ", ".join(clouds))
|
|
974
|
-
if gpu.get("notes"):
|
|
975
|
-
print(" - Notes: ", gpu.get("notes"))
|
|
976
|
-
except Exception as summary_err:
|
|
977
|
-
print(f"โ ๏ธ Failed to print API summary: {summary_err}")
|
|
978
|
-
|
|
979
917
|
# Extract setup commands from the response
|
|
980
918
|
if "setupInstructions" in data and "commands" in data["setupInstructions"]:
|
|
981
919
|
commands = data["setupInstructions"]["commands"]
|
|
@@ -1009,16 +947,6 @@ def fetch_setup_commands_from_api(repo_url):
|
|
|
1009
947
|
return fixed_commands
|
|
1010
948
|
else:
|
|
1011
949
|
print("โ ๏ธ API response did not contain setupInstructions.commands field")
|
|
1012
|
-
# If top-level commands exist (newer API), use them
|
|
1013
|
-
if "commands" in data and isinstance(data["commands"], list):
|
|
1014
|
-
commands = data["commands"]
|
|
1015
|
-
print(f"โ
Found top-level commands array with {len(commands)} entries")
|
|
1016
|
-
fixed_commands = fix_setup_commands(commands)
|
|
1017
|
-
print("\n๐ Fixed commands:")
|
|
1018
|
-
for i, cmd in enumerate(fixed_commands, 1):
|
|
1019
|
-
print(f" {i}. {cmd}")
|
|
1020
|
-
return fixed_commands
|
|
1021
|
-
|
|
1022
950
|
print("๐ Available fields in response:")
|
|
1023
951
|
for key in data.keys():
|
|
1024
952
|
print(f" - {key}")
|
|
@@ -1668,9 +1596,14 @@ def get_setup_commands_from_gitingest(repo_url):
|
|
|
1668
1596
|
try:
|
|
1669
1597
|
print(f"Trying API endpoint: {api_url}")
|
|
1670
1598
|
|
|
1599
|
+
# Load stored credentials
|
|
1600
|
+
stored_credentials = get_stored_credentials()
|
|
1601
|
+
|
|
1671
1602
|
payload = {
|
|
1672
1603
|
"repoUrl": repo_url,
|
|
1673
|
-
"gitingestData": gitingest_data
|
|
1604
|
+
"gitingestData": gitingest_data,
|
|
1605
|
+
"storedCredentials": stored_credentials, # Add back stored credentials
|
|
1606
|
+
"preview": False
|
|
1674
1607
|
}
|
|
1675
1608
|
|
|
1676
1609
|
# Use the retry mechanism for more reliable requests
|
|
@@ -1692,68 +1625,6 @@ def get_setup_commands_from_gitingest(repo_url):
|
|
|
1692
1625
|
try:
|
|
1693
1626
|
result = response.json()
|
|
1694
1627
|
|
|
1695
|
-
# NEW: Print a concise summary of only GPU, Torch, and CUDA recommendations
|
|
1696
|
-
try:
|
|
1697
|
-
print("\n๐ API RESULT SUMMARY (GPU/Torch/CUDA)")
|
|
1698
|
-
print("โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
|
|
1699
|
-
|
|
1700
|
-
# CUDA Recommendation
|
|
1701
|
-
cuda = result.get("cudaRecommendation")
|
|
1702
|
-
if cuda:
|
|
1703
|
-
print("๐ฏ CUDA Recommendation")
|
|
1704
|
-
print(" - CUDA: ", cuda.get("recommendedCudaVersion", "Unknown"))
|
|
1705
|
-
ct = cuda.get("compatibleTorchVersions") or []
|
|
1706
|
-
if ct:
|
|
1707
|
-
print(" - Torch Compatibility: ", ", ".join(ct))
|
|
1708
|
-
if cuda.get("dockerImage"):
|
|
1709
|
-
print(" - Docker Image: ", cuda.get("dockerImage"))
|
|
1710
|
-
install_cmds = cuda.get("installCommands") or []
|
|
1711
|
-
if install_cmds:
|
|
1712
|
-
print(" - Install Commands:")
|
|
1713
|
-
for c in install_cmds:
|
|
1714
|
-
print(f" $ {c}")
|
|
1715
|
-
if cuda.get("notes"):
|
|
1716
|
-
print(" - Notes: ", cuda.get("notes"))
|
|
1717
|
-
|
|
1718
|
-
# Torch Recommendation
|
|
1719
|
-
torch = result.get("torchRecommendation")
|
|
1720
|
-
if torch:
|
|
1721
|
-
print("\n๐ฅ PyTorch Recommendation")
|
|
1722
|
-
print(" - Torch: ", torch.get("recommendedTorchVersion", "Unknown"))
|
|
1723
|
-
print(" - CUDA Variant: ", torch.get("cudaVariant", "Unknown"))
|
|
1724
|
-
if torch.get("pipInstallCommand"):
|
|
1725
|
-
print(" - Install:")
|
|
1726
|
-
print(f" $ {torch.get('pipInstallCommand')}")
|
|
1727
|
-
extras = torch.get("extraPackages") or []
|
|
1728
|
-
if extras:
|
|
1729
|
-
print(" - Extra Packages: ", ", ".join(extras))
|
|
1730
|
-
if torch.get("notes"):
|
|
1731
|
-
print(" - Notes: ", torch.get("notes"))
|
|
1732
|
-
|
|
1733
|
-
# GPU Recommendation
|
|
1734
|
-
gpu = result.get("gpuRecommendation")
|
|
1735
|
-
if gpu:
|
|
1736
|
-
print("\n๐ฅ๏ธ GPU Recommendation")
|
|
1737
|
-
if gpu.get("minimumVramGb") is not None:
|
|
1738
|
-
print(f" - Min VRAM: {gpu.get('minimumVramGb')} GB")
|
|
1739
|
-
if gpu.get("recommendedVramGb") is not None:
|
|
1740
|
-
print(f" - Recommended VRAM: {gpu.get('recommendedVramGb')} GB")
|
|
1741
|
-
if gpu.get("minComputeCapability"):
|
|
1742
|
-
print(f" - Min Compute Capability: {gpu.get('minComputeCapability')}")
|
|
1743
|
-
rec_models = gpu.get("recommendedModels") or []
|
|
1744
|
-
if rec_models:
|
|
1745
|
-
print(" - Recommended Models: ", ", ".join(rec_models))
|
|
1746
|
-
budget = gpu.get("budgetOptions") or []
|
|
1747
|
-
if budget:
|
|
1748
|
-
print(" - Budget Options: ", ", ".join(budget))
|
|
1749
|
-
clouds = gpu.get("cloudInstances") or []
|
|
1750
|
-
if clouds:
|
|
1751
|
-
print(" - Cloud Instances: ", ", ".join(clouds))
|
|
1752
|
-
if gpu.get("notes"):
|
|
1753
|
-
print(" - Notes: ", gpu.get("notes"))
|
|
1754
|
-
except Exception as summary_err:
|
|
1755
|
-
print(f"โ ๏ธ Failed to print API summary: {summary_err}")
|
|
1756
|
-
|
|
1757
1628
|
# Check if we have commands in the response
|
|
1758
1629
|
commands = None
|
|
1759
1630
|
|
|
@@ -1961,7 +1832,7 @@ Return only the JSON array, no other text.
|
|
|
1961
1832
|
client = openai.OpenAI(api_key=api_key)
|
|
1962
1833
|
|
|
1963
1834
|
response = client.chat.completions.create(
|
|
1964
|
-
model="gpt-
|
|
1835
|
+
model="gpt-4o-mini", # Fixed: using valid OpenAI model
|
|
1965
1836
|
messages=[
|
|
1966
1837
|
{"role": "system", "content": "You are a command preprocessing assistant that modifies setup commands to use available credentials and make them non-interactive."},
|
|
1967
1838
|
{"role": "user", "content": prompt}
|
|
@@ -2534,5 +2405,4 @@ if __name__ == "__main__":
|
|
|
2534
2405
|
except Exception as e:
|
|
2535
2406
|
# print(f"\nโ Error: {e}")
|
|
2536
2407
|
# print("๐งน Cleaning up resources...")
|
|
2537
|
-
cleanup_modal_token()
|
|
2538
|
-
|
|
2408
|
+
cleanup_modal_token()
|