gitarsenal-cli 1.9.87 → 1.9.89
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/.venv_status.json
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"created":"2025-08-
|
|
1
|
+
{"created":"2025-08-21T07:38:12.600Z","packages":["modal","gitingest","requests","anthropic"],"uv_version":"uv 0.8.4 (Homebrew 2025-07-30)"}
|
package/bin/gitarsenal.js
CHANGED
|
@@ -1240,7 +1240,7 @@ async function runContainerCommand(options) {
|
|
|
1240
1240
|
}
|
|
1241
1241
|
|
|
1242
1242
|
// Prompt for GPU count if not already specified via CLI
|
|
1243
|
-
if (!options.gpuCount
|
|
1243
|
+
if (!options.gpuCount) {
|
|
1244
1244
|
const gpuCountAnswers = await inquirer.prompt([
|
|
1245
1245
|
{
|
|
1246
1246
|
type: 'list',
|
|
@@ -57,6 +57,9 @@ Usage notes:
|
|
|
57
57
|
- VERY IMPORTANT: You MUST avoid using search commands like `find` and `grep`. Instead use Grep, Glob, or Task to search. You MUST avoid read tools like `cat`, `head`, `tail`, and `ls`, and use Read and LS to read files.
|
|
58
58
|
- If you _still_ need to run `grep`, STOP. ALWAYS USE ripgrep at `rg` first, which all Claude Code users have pre-installed.
|
|
59
59
|
- IMPORTANT: Always use `uv pip` instead of just `pip` when running pip commands. For example, use `uv pip install package_name` instead of `pip install package_name`.
|
|
60
|
+
- Create and activate a virtual environment using `uv venv` before running any Python package installations or commands using uv.
|
|
61
|
+
- Add "--privileged" flag to Docker run command to resolve overlayfs and iptables permission issues during container initialization.
|
|
62
|
+
- For repository setup workflows: Print all commands needed for clone/setup/run process at start, then execute them sequentially. Print completion count (e.g., "Executed 5/7 commands"). Assume Linux OS for all commands.
|
|
60
63
|
- TESTING MODE: For ML training and inference commands, always use minimal/quick parameters to enable fast testing:
|
|
61
64
|
- FIRST: Read and examine the training script/config files to understand which parameters actually control training speed and duration
|
|
62
65
|
- Add `--max_steps 10` or `--epochs 1` for training commands
|
package/package.json
CHANGED
|
@@ -74,13 +74,24 @@ def select_base_image_from_analysis(analysis_data):
|
|
|
74
74
|
recommended_cuda = cuda_rec.get('recommendedCudaVersion', '')
|
|
75
75
|
docker_image = cuda_rec.get('dockerImage', '')
|
|
76
76
|
|
|
77
|
+
# Handle null values from analysis
|
|
78
|
+
if recommended_cuda == 'null' or recommended_cuda is None:
|
|
79
|
+
recommended_cuda = ''
|
|
80
|
+
if docker_image == 'null' or docker_image is None:
|
|
81
|
+
docker_image = ''
|
|
82
|
+
|
|
83
|
+
# Check if analysis returned no useful CUDA information
|
|
84
|
+
if not recommended_cuda and not docker_image:
|
|
85
|
+
print("⚠️ Repository analysis did not detect specific CUDA requirements")
|
|
86
|
+
print("🐳 Will use default CUDA 12.4.1 image for broad compatibility")
|
|
87
|
+
|
|
77
88
|
print(f"🔍 CUDA Analysis Results:")
|
|
78
|
-
print(f" - Recommended CUDA Version: {recommended_cuda}")
|
|
79
|
-
print(f" - Recommended Docker Image: {docker_image}")
|
|
89
|
+
print(f" - Recommended CUDA Version: {recommended_cuda or 'None'}")
|
|
90
|
+
print(f" - Recommended Docker Image: {docker_image or 'None'}")
|
|
80
91
|
print(f" - Full CUDA Recommendation: {cuda_rec}")
|
|
81
92
|
|
|
82
93
|
# If a specific docker image is recommended, use it
|
|
83
|
-
if docker_image:
|
|
94
|
+
if docker_image and docker_image.strip():
|
|
84
95
|
print(f"🔍 Validating recommended Docker image: {docker_image}")
|
|
85
96
|
# Validate that the recommended Docker image follows expected patterns
|
|
86
97
|
is_valid_image = False
|
|
@@ -147,7 +158,7 @@ def select_base_image_from_analysis(analysis_data):
|
|
|
147
158
|
}
|
|
148
159
|
|
|
149
160
|
# Extract major.minor version from recommended CUDA version
|
|
150
|
-
if recommended_cuda:
|
|
161
|
+
if recommended_cuda and recommended_cuda.strip():
|
|
151
162
|
# Handle versions like "12.4", "CUDA 12.4", "12.4.0", "11.8-runtime", etc.
|
|
152
163
|
import re
|
|
153
164
|
version_match = re.search(r'(\d+\.\d+)', recommended_cuda)
|
|
@@ -161,7 +172,7 @@ def select_base_image_from_analysis(analysis_data):
|
|
|
161
172
|
return selected_image, "3.11"
|
|
162
173
|
|
|
163
174
|
# Try with -runtime suffix if original recommended image was runtime
|
|
164
|
-
if 'runtime' in docker_image.lower() or 'runtime' in recommended_cuda.lower():
|
|
175
|
+
if (docker_image and 'runtime' in docker_image.lower()) or 'runtime' in recommended_cuda.lower():
|
|
165
176
|
runtime_key = f"{cuda_version}-runtime"
|
|
166
177
|
if runtime_key in cuda_image_mapping:
|
|
167
178
|
selected_image = cuda_image_mapping[runtime_key]
|
|
@@ -1054,7 +1065,7 @@ if __name__ == "__main__":
|
|
|
1054
1065
|
parser.add_argument('--yes', action='store_true', help='Automatically confirm prompts (non-interactive)')
|
|
1055
1066
|
|
|
1056
1067
|
parser.add_argument('--gpu', default='A10G', help='GPU type to use')
|
|
1057
|
-
parser.add_argument('--gpu-count', type=int, default=
|
|
1068
|
+
parser.add_argument('--gpu-count', type=int, default=None, help='Number of GPUs to use (default: 1)')
|
|
1058
1069
|
parser.add_argument('--repo-url', help='Repository URL')
|
|
1059
1070
|
|
|
1060
1071
|
# Authentication-related arguments
|
|
@@ -1208,7 +1219,7 @@ if __name__ == "__main__":
|
|
|
1208
1219
|
# Display configuration after GPU selection
|
|
1209
1220
|
print("\n📋 Container Configuration:")
|
|
1210
1221
|
print(f"Repository URL: {args.repo_url or 'Not specified'}")
|
|
1211
|
-
gpu_count =
|
|
1222
|
+
gpu_count = args.gpu_count if args.gpu_count is not None else 1
|
|
1212
1223
|
if gpu_count > 1:
|
|
1213
1224
|
print(f"GPU Type: {gpu_count}x {gpu_type}")
|
|
1214
1225
|
else:
|
|
@@ -1263,8 +1274,8 @@ if __name__ == "__main__":
|
|
|
1263
1274
|
sys.exit(1)
|
|
1264
1275
|
|
|
1265
1276
|
# Ask about GPU count if not specified
|
|
1266
|
-
gpu_count = getattr(args, 'gpu_count',
|
|
1267
|
-
if
|
|
1277
|
+
gpu_count = getattr(args, 'gpu_count', None)
|
|
1278
|
+
if gpu_count is None:
|
|
1268
1279
|
try:
|
|
1269
1280
|
gpu_count_input = input("? How many GPUs do you need? (1-8, default: 1): ").strip()
|
|
1270
1281
|
if gpu_count_input:
|
|
@@ -1276,6 +1287,9 @@ if __name__ == "__main__":
|
|
|
1276
1287
|
except ValueError:
|
|
1277
1288
|
print("⚠️ Invalid GPU count. Using default: 1")
|
|
1278
1289
|
gpu_count = 1
|
|
1290
|
+
else:
|
|
1291
|
+
# User pressed enter without input, use default
|
|
1292
|
+
gpu_count = 1
|
|
1279
1293
|
except KeyboardInterrupt:
|
|
1280
1294
|
print("\n🛑 Setup cancelled.")
|
|
1281
1295
|
sys.exit(1)
|
|
@@ -1382,7 +1396,7 @@ if __name__ == "__main__":
|
|
|
1382
1396
|
timeout_minutes=args.timeout,
|
|
1383
1397
|
ssh_password=ssh_password,
|
|
1384
1398
|
interactive=args.interactive,
|
|
1385
|
-
gpu_count=
|
|
1399
|
+
gpu_count=args.gpu_count if args.gpu_count is not None else 1,
|
|
1386
1400
|
analysis_data=analysis_data, # Pass parsed analysis_data instead of raw args.analysis_data
|
|
1387
1401
|
)
|
|
1388
1402
|
|