@mariozechner/pi 0.1.2 → 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/package.json +4 -4
- package/{pi → pi.js} +13 -0
package/README.md
CHANGED
|
@@ -5,12 +5,12 @@ Quickly deploy LLMs on GPU pods from [Prime Intellect](https://www.primeintellec
|
|
|
5
5
|
## Installation
|
|
6
6
|
|
|
7
7
|
```bash
|
|
8
|
-
npm install -g @
|
|
8
|
+
npm install -g @mariozechner/pi
|
|
9
9
|
```
|
|
10
10
|
|
|
11
11
|
Or run directly with npx:
|
|
12
12
|
```bash
|
|
13
|
-
npx @
|
|
13
|
+
npx @mariozechner/pi
|
|
14
14
|
```
|
|
15
15
|
|
|
16
16
|
## What This Is
|
|
@@ -314,4 +314,4 @@ Remember: Tool calling is still an evolving feature in the LLM ecosystem. What w
|
|
|
314
314
|
- **Connection Refused**: Check pod is running and port is correct
|
|
315
315
|
- **HF Token Issues**: Ensure HF_TOKEN is set before running setup
|
|
316
316
|
- **Access Denied**: Some models (like Llama, Mistral) require completing an access request on HuggingFace first. Visit the model page and click "Request access"
|
|
317
|
-
- **Tool Calling Errors**: See the Tool Calling section above - consider disabling it or using a different model
|
|
317
|
+
- **Tool Calling Errors**: See the Tool Calling section above - consider disabling it or using a different model
|
package/package.json
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mariozechner/pi",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.4",
|
|
4
4
|
"description": "CLI tool for managing vLLM deployments on GPU pods from Prime Intellect, Vast.ai, etc.",
|
|
5
|
-
"main": "pi",
|
|
5
|
+
"main": "pi.js",
|
|
6
6
|
"bin": {
|
|
7
|
-
"pi": "pi"
|
|
7
|
+
"pi": "pi.js"
|
|
8
8
|
},
|
|
9
9
|
"scripts": {
|
|
10
10
|
"test": "echo \"Error: no test specified\" && exit 1"
|
|
@@ -33,7 +33,7 @@
|
|
|
33
33
|
},
|
|
34
34
|
"preferGlobal": true,
|
|
35
35
|
"files": [
|
|
36
|
-
"pi",
|
|
36
|
+
"pi.js",
|
|
37
37
|
"pod_setup.sh",
|
|
38
38
|
"vllm_manager.py",
|
|
39
39
|
"README.md",
|
package/{pi → pi.js}
RENAMED
|
@@ -769,6 +769,19 @@ class PrimeIntellectCLI {
|
|
|
769
769
|
async run() {
|
|
770
770
|
const [,, command, ...args] = process.argv;
|
|
771
771
|
|
|
772
|
+
// Handle --version flag
|
|
773
|
+
if (command === '--version' || command === '-v') {
|
|
774
|
+
const packageJsonPath = path.join(__dirname, 'package.json');
|
|
775
|
+
try {
|
|
776
|
+
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
|
|
777
|
+
console.log(packageJson.version);
|
|
778
|
+
} catch (error) {
|
|
779
|
+
console.error('Error reading version:', error.message);
|
|
780
|
+
process.exit(1);
|
|
781
|
+
}
|
|
782
|
+
return;
|
|
783
|
+
}
|
|
784
|
+
|
|
772
785
|
switch (command) {
|
|
773
786
|
case 'setup': {
|
|
774
787
|
if (args.length < 2) {
|