gitarsenal-cli 1.9.31 ā 1.9.32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.venv_status.json +1 -1
- package/package.json +1 -1
- package/python/test_modalSandboxScript.py +134 -0
package/.venv_status.json
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"created":"2025-08-
|
|
1
|
+
{"created":"2025-08-10T17:44:29.632Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
|
package/package.json
CHANGED
|
@@ -914,6 +914,68 @@ def fetch_setup_commands_from_api(repo_url):
|
|
|
914
914
|
print(f"š Response size: {len(response.text)} bytes")
|
|
915
915
|
print(f"š Response URL: {response.url}")
|
|
916
916
|
|
|
917
|
+
# NEW: Print a concise summary of only GPU, Torch, and CUDA recommendations
|
|
918
|
+
try:
|
|
919
|
+
print("\nš API RESULT SUMMARY (GPU/Torch/CUDA)")
|
|
920
|
+
print("āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā")
|
|
921
|
+
|
|
922
|
+
# CUDA Recommendation
|
|
923
|
+
cuda = data.get("cudaRecommendation")
|
|
924
|
+
if cuda:
|
|
925
|
+
print("šÆ CUDA Recommendation")
|
|
926
|
+
print(" - CUDA: ", cuda.get("recommendedCudaVersion", "Unknown"))
|
|
927
|
+
ct = cuda.get("compatibleTorchVersions") or []
|
|
928
|
+
if ct:
|
|
929
|
+
print(" - Torch Compatibility: ", ", ".join(ct))
|
|
930
|
+
if cuda.get("dockerImage"):
|
|
931
|
+
print(" - Docker Image: ", cuda.get("dockerImage"))
|
|
932
|
+
install_cmds = cuda.get("installCommands") or []
|
|
933
|
+
if install_cmds:
|
|
934
|
+
print(" - Install Commands:")
|
|
935
|
+
for c in install_cmds:
|
|
936
|
+
print(f" $ {c}")
|
|
937
|
+
if cuda.get("notes"):
|
|
938
|
+
print(" - Notes: ", cuda.get("notes"))
|
|
939
|
+
|
|
940
|
+
# Torch Recommendation
|
|
941
|
+
torch = data.get("torchRecommendation")
|
|
942
|
+
if torch:
|
|
943
|
+
print("\nš„ PyTorch Recommendation")
|
|
944
|
+
print(" - Torch: ", torch.get("recommendedTorchVersion", "Unknown"))
|
|
945
|
+
print(" - CUDA Variant: ", torch.get("cudaVariant", "Unknown"))
|
|
946
|
+
if torch.get("pipInstallCommand"):
|
|
947
|
+
print(" - Install:")
|
|
948
|
+
print(f" $ {torch.get('pipInstallCommand')}")
|
|
949
|
+
extras = torch.get("extraPackages") or []
|
|
950
|
+
if extras:
|
|
951
|
+
print(" - Extra Packages: ", ", ".join(extras))
|
|
952
|
+
if torch.get("notes"):
|
|
953
|
+
print(" - Notes: ", torch.get("notes"))
|
|
954
|
+
|
|
955
|
+
# GPU Recommendation
|
|
956
|
+
gpu = data.get("gpuRecommendation")
|
|
957
|
+
if gpu:
|
|
958
|
+
print("\nš„ļø GPU Recommendation")
|
|
959
|
+
if gpu.get("minimumVramGb") is not None:
|
|
960
|
+
print(f" - Min VRAM: {gpu.get('minimumVramGb')} GB")
|
|
961
|
+
if gpu.get("recommendedVramGb") is not None:
|
|
962
|
+
print(f" - Recommended VRAM: {gpu.get('recommendedVramGb')} GB")
|
|
963
|
+
if gpu.get("minComputeCapability"):
|
|
964
|
+
print(f" - Min Compute Capability: {gpu.get('minComputeCapability')}")
|
|
965
|
+
rec_models = gpu.get("recommendedModels") or []
|
|
966
|
+
if rec_models:
|
|
967
|
+
print(" - Recommended Models: ", ", ".join(rec_models))
|
|
968
|
+
budget = gpu.get("budgetOptions") or []
|
|
969
|
+
if budget:
|
|
970
|
+
print(" - Budget Options: ", ", ".join(budget))
|
|
971
|
+
clouds = gpu.get("cloudInstances") or []
|
|
972
|
+
if clouds:
|
|
973
|
+
print(" - Cloud Instances: ", ", ".join(clouds))
|
|
974
|
+
if gpu.get("notes"):
|
|
975
|
+
print(" - Notes: ", gpu.get("notes"))
|
|
976
|
+
except Exception as summary_err:
|
|
977
|
+
print(f"ā ļø Failed to print API summary: {summary_err}")
|
|
978
|
+
|
|
917
979
|
# Extract setup commands from the response
|
|
918
980
|
if "setupInstructions" in data and "commands" in data["setupInstructions"]:
|
|
919
981
|
commands = data["setupInstructions"]["commands"]
|
|
@@ -947,6 +1009,16 @@ def fetch_setup_commands_from_api(repo_url):
|
|
|
947
1009
|
return fixed_commands
|
|
948
1010
|
else:
|
|
949
1011
|
print("ā ļø API response did not contain setupInstructions.commands field")
|
|
1012
|
+
# If top-level commands exist (newer API), use them
|
|
1013
|
+
if "commands" in data and isinstance(data["commands"], list):
|
|
1014
|
+
commands = data["commands"]
|
|
1015
|
+
print(f"ā
Found top-level commands array with {len(commands)} entries")
|
|
1016
|
+
fixed_commands = fix_setup_commands(commands)
|
|
1017
|
+
print("\nš Fixed commands:")
|
|
1018
|
+
for i, cmd in enumerate(fixed_commands, 1):
|
|
1019
|
+
print(f" {i}. {cmd}")
|
|
1020
|
+
return fixed_commands
|
|
1021
|
+
|
|
950
1022
|
print("š Available fields in response:")
|
|
951
1023
|
for key in data.keys():
|
|
952
1024
|
print(f" - {key}")
|
|
@@ -1620,6 +1692,68 @@ def get_setup_commands_from_gitingest(repo_url):
|
|
|
1620
1692
|
try:
|
|
1621
1693
|
result = response.json()
|
|
1622
1694
|
|
|
1695
|
+
# NEW: Print a concise summary of only GPU, Torch, and CUDA recommendations
|
|
1696
|
+
try:
|
|
1697
|
+
print("\nš API RESULT SUMMARY (GPU/Torch/CUDA)")
|
|
1698
|
+
print("āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā")
|
|
1699
|
+
|
|
1700
|
+
# CUDA Recommendation
|
|
1701
|
+
cuda = result.get("cudaRecommendation")
|
|
1702
|
+
if cuda:
|
|
1703
|
+
print("šÆ CUDA Recommendation")
|
|
1704
|
+
print(" - CUDA: ", cuda.get("recommendedCudaVersion", "Unknown"))
|
|
1705
|
+
ct = cuda.get("compatibleTorchVersions") or []
|
|
1706
|
+
if ct:
|
|
1707
|
+
print(" - Torch Compatibility: ", ", ".join(ct))
|
|
1708
|
+
if cuda.get("dockerImage"):
|
|
1709
|
+
print(" - Docker Image: ", cuda.get("dockerImage"))
|
|
1710
|
+
install_cmds = cuda.get("installCommands") or []
|
|
1711
|
+
if install_cmds:
|
|
1712
|
+
print(" - Install Commands:")
|
|
1713
|
+
for c in install_cmds:
|
|
1714
|
+
print(f" $ {c}")
|
|
1715
|
+
if cuda.get("notes"):
|
|
1716
|
+
print(" - Notes: ", cuda.get("notes"))
|
|
1717
|
+
|
|
1718
|
+
# Torch Recommendation
|
|
1719
|
+
torch = result.get("torchRecommendation")
|
|
1720
|
+
if torch:
|
|
1721
|
+
print("\nš„ PyTorch Recommendation")
|
|
1722
|
+
print(" - Torch: ", torch.get("recommendedTorchVersion", "Unknown"))
|
|
1723
|
+
print(" - CUDA Variant: ", torch.get("cudaVariant", "Unknown"))
|
|
1724
|
+
if torch.get("pipInstallCommand"):
|
|
1725
|
+
print(" - Install:")
|
|
1726
|
+
print(f" $ {torch.get('pipInstallCommand')}")
|
|
1727
|
+
extras = torch.get("extraPackages") or []
|
|
1728
|
+
if extras:
|
|
1729
|
+
print(" - Extra Packages: ", ", ".join(extras))
|
|
1730
|
+
if torch.get("notes"):
|
|
1731
|
+
print(" - Notes: ", torch.get("notes"))
|
|
1732
|
+
|
|
1733
|
+
# GPU Recommendation
|
|
1734
|
+
gpu = result.get("gpuRecommendation")
|
|
1735
|
+
if gpu:
|
|
1736
|
+
print("\nš„ļø GPU Recommendation")
|
|
1737
|
+
if gpu.get("minimumVramGb") is not None:
|
|
1738
|
+
print(f" - Min VRAM: {gpu.get('minimumVramGb')} GB")
|
|
1739
|
+
if gpu.get("recommendedVramGb") is not None:
|
|
1740
|
+
print(f" - Recommended VRAM: {gpu.get('recommendedVramGb')} GB")
|
|
1741
|
+
if gpu.get("minComputeCapability"):
|
|
1742
|
+
print(f" - Min Compute Capability: {gpu.get('minComputeCapability')}")
|
|
1743
|
+
rec_models = gpu.get("recommendedModels") or []
|
|
1744
|
+
if rec_models:
|
|
1745
|
+
print(" - Recommended Models: ", ", ".join(rec_models))
|
|
1746
|
+
budget = gpu.get("budgetOptions") or []
|
|
1747
|
+
if budget:
|
|
1748
|
+
print(" - Budget Options: ", ", ".join(budget))
|
|
1749
|
+
clouds = gpu.get("cloudInstances") or []
|
|
1750
|
+
if clouds:
|
|
1751
|
+
print(" - Cloud Instances: ", ", ".join(clouds))
|
|
1752
|
+
if gpu.get("notes"):
|
|
1753
|
+
print(" - Notes: ", gpu.get("notes"))
|
|
1754
|
+
except Exception as summary_err:
|
|
1755
|
+
print(f"ā ļø Failed to print API summary: {summary_err}")
|
|
1756
|
+
|
|
1623
1757
|
# Check if we have commands in the response
|
|
1624
1758
|
commands = None
|
|
1625
1759
|
|