easytyga 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +107 -0
- package/bin/cli.js +89 -0
- package/package.json +32 -0
- package/src/detect.js +42 -0
- package/src/tunnel.js +239 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Joe Wee (jyswee)
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
# easytyga
|
|
2
|
+
|
|
3
|
+
**Tunnel your local AI to the internet. Secure. One command.**
|
|
4
|
+
|
|
5
|
+
Expose your local Ollama, vLLM, or any HTTP service to the internet with API key authentication. No port forwarding, no static IP, no nginx configs.
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npx easytyga
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
```
|
|
12
|
+
┌─────────────────────────────────────┐
|
|
13
|
+
│ easytyga v1.0.0 │
|
|
14
|
+
│ Tunnel your local AI to the web │
|
|
15
|
+
└─────────────────────────────────────┘
|
|
16
|
+
|
|
17
|
+
GPU: NVIDIA GeForce RTX 4090
|
|
18
|
+
Target: http://localhost:11434
|
|
19
|
+
Relay: wss://relay.easytyga.com/ws
|
|
20
|
+
|
|
21
|
+
┌─────────────────────────────────────┐
|
|
22
|
+
│ Tunnel active │
|
|
23
|
+
└─────────────────────────────────────┘
|
|
24
|
+
|
|
25
|
+
Public URL: https://abc123.easytyga.com
|
|
26
|
+
API Key: et_a1b2c3d4e5f6...
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
That's it. Your local AI is now accessible from anywhere, secured with an API key.
|
|
30
|
+
|
|
31
|
+
## Why?
|
|
32
|
+
|
|
33
|
+
Ollama has **no built-in authentication** and **no remote access**. If you want to use your GPU from your phone, office, or a cloud app, you need to cobble together nginx + Cloudflare tunnels + basic auth.
|
|
34
|
+
|
|
35
|
+
easytyga solves this in one command:
|
|
36
|
+
- **Secure tunnel** -- no port forwarding, works behind any NAT/firewall
|
|
37
|
+
- **API key auth** -- auto-generated, every request authenticated
|
|
38
|
+
- **Auto-detects your GPU** -- knows what hardware you're running
|
|
39
|
+
- **Works with anything** -- Ollama, vLLM, LocalAI, ComfyUI, or any HTTP service
|
|
40
|
+
|
|
41
|
+
## Usage
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
# Tunnel local Ollama (default port 11434)
|
|
45
|
+
npx easytyga
|
|
46
|
+
|
|
47
|
+
# Tunnel a different service
|
|
48
|
+
npx easytyga --target http://localhost:8080
|
|
49
|
+
|
|
50
|
+
# Use a specific key
|
|
51
|
+
npx easytyga --key et_abc123...
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Options
|
|
55
|
+
|
|
56
|
+
| Flag | Description |
|
|
57
|
+
|------|-------------|
|
|
58
|
+
| `--target <url>` | Local endpoint (default: `http://localhost:11434`) |
|
|
59
|
+
| `--server <url>` | Relay server URL |
|
|
60
|
+
| `--key <key>` | Connection key |
|
|
61
|
+
| `--help` | Show help |
|
|
62
|
+
|
|
63
|
+
## How it works
|
|
64
|
+
|
|
65
|
+
1. easytyga connects to the relay server via WebSocket
|
|
66
|
+
2. The relay assigns you a public URL with API key auth
|
|
67
|
+
3. Incoming requests are forwarded through the tunnel to your local service
|
|
68
|
+
4. Responses stream back to the caller
|
|
69
|
+
|
|
70
|
+
Your IP stays private. No ports to open. Works from any network.
|
|
71
|
+
|
|
72
|
+
## Features
|
|
73
|
+
|
|
74
|
+
- **One command** -- `npx easytyga`, no install, no config files
|
|
75
|
+
- **API key auth** -- every tunnel gets a unique key, no anonymous access
|
|
76
|
+
- **GPU detection** -- auto-detects NVIDIA, AMD, and Apple Silicon
|
|
77
|
+
- **Model detection** -- discovers installed Ollama models automatically
|
|
78
|
+
- **Auto-reconnect** -- exponential backoff, set and forget
|
|
79
|
+
- **Streaming** -- full support for streaming responses (chat, generate)
|
|
80
|
+
- **Any HTTP service** -- not locked to Ollama, tunnel anything
|
|
81
|
+
|
|
82
|
+
## Integrations
|
|
83
|
+
|
|
84
|
+
easytyga supports plugins for third-party services:
|
|
85
|
+
|
|
86
|
+
- **[gpusmarket.com](https://gpusmarket.com)** -- List your GPU on the peer-to-peer rental marketplace and earn money (`--list`)
|
|
87
|
+
- **[agenticmemory.ai](https://agenticmemory.ai)** -- Add persistent conversation memory to your AI (`--memory`)
|
|
88
|
+
|
|
89
|
+
See [easytyga.com](https://easytyga.com) for documentation on integrations and self-hosted relay servers.
|
|
90
|
+
|
|
91
|
+
## License
|
|
92
|
+
|
|
93
|
+
MIT
|
|
94
|
+
|
|
95
|
+
---
|
|
96
|
+
|
|
97
|
+
## Trademarks
|
|
98
|
+
|
|
99
|
+
"easytyga" and the easytyga logo are trademarks of Joe Wee. The name and branding may not be used in derivative works without written permission.
|
|
100
|
+
|
|
101
|
+
This software is provided under the MIT license -- you are free to use, modify, and distribute the code, but the easytyga name, branding, and relay infrastructure remain the property of the author.
|
|
102
|
+
|
|
103
|
+
Ollama is a trademark of Ollama, Inc. vLLM is a trademark of vLLM contributors. NVIDIA, GeForce, and RTX are trademarks of NVIDIA Corporation. AMD and Radeon are trademarks of AMD. Apple and Apple Silicon are trademarks of Apple Inc. All other trademarks are the property of their respective owners. This project is not affiliated with or endorsed by any of these companies.
|
|
104
|
+
|
|
105
|
+
---
|
|
106
|
+
|
|
107
|
+
Built by [Joe Wee](https://github.com/jyswee)
|
package/bin/cli.js
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* easytyga - Tunnel your local AI to the internet. Secure. One command.
|
|
4
|
+
*
|
|
5
|
+
* Usage:
|
|
6
|
+
* npx easytyga # Free tunnel with auto-generated auth
|
|
7
|
+
* npx easytyga --target http://...:11434 # Custom local endpoint
|
|
8
|
+
* npx easytyga --list # List your GPU on the marketplace
|
|
9
|
+
* npx easytyga --memory # Add persistent memory
|
|
10
|
+
* npx easytyga --key et_abc123... # Use existing key
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
const { createTunnel } = require('../src/tunnel');
|
|
14
|
+
const { detectGpu } = require('../src/detect');
|
|
15
|
+
|
|
16
|
+
// -- CLI args --
|
|
17
|
+
const args = process.argv.slice(2);
|
|
18
|
+
|
|
19
|
+
function getArg(name, fallback) {
|
|
20
|
+
const idx = args.indexOf('--' + name);
|
|
21
|
+
return idx >= 0 && args[idx + 1] ? args[idx + 1] : fallback;
|
|
22
|
+
}
|
|
23
|
+
function hasFlag(name) {
|
|
24
|
+
return args.includes('--' + name);
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
if (hasFlag('help') || hasFlag('h')) {
|
|
28
|
+
console.log(`
|
|
29
|
+
easytyga - Tunnel your local AI to the internet
|
|
30
|
+
|
|
31
|
+
Usage:
|
|
32
|
+
npx easytyga Free tunnel with API key auth
|
|
33
|
+
npx easytyga --list List your GPU on the marketplace
|
|
34
|
+
npx easytyga --memory Add persistent conversation memory
|
|
35
|
+
npx easytyga --key et_... Use existing key
|
|
36
|
+
|
|
37
|
+
Options:
|
|
38
|
+
--target <url> Local endpoint (default: http://localhost:11434)
|
|
39
|
+
--server <url> Relay server (default: wss://relay.easytyga.com/ws)
|
|
40
|
+
--key <key> Connection key
|
|
41
|
+
--list Register on the GPU marketplace
|
|
42
|
+
--memory Enable persistent memory
|
|
43
|
+
--help Show this help
|
|
44
|
+
|
|
45
|
+
Examples:
|
|
46
|
+
npx easytyga Tunnel local Ollama
|
|
47
|
+
npx easytyga --target http://...:8080 Tunnel any HTTP service
|
|
48
|
+
npx easytyga --list --memory Marketplace + persistent memory
|
|
49
|
+
|
|
50
|
+
https://easytyga.com
|
|
51
|
+
`);
|
|
52
|
+
process.exit(0);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const targetUrl = (getArg('target', '') || getArg('ollama', '') || 'http://localhost:11434').replace(/\/$/, '');
|
|
56
|
+
const serverUrl = getArg('server', 'wss://relay.easytyga.com/ws');
|
|
57
|
+
const key = getArg('key', '') || process.env.EASYTYGA_KEY || '';
|
|
58
|
+
const listMode = hasFlag('list');
|
|
59
|
+
const memoryMode = hasFlag('memory');
|
|
60
|
+
|
|
61
|
+
// -- Main --
|
|
62
|
+
(async () => {
|
|
63
|
+
console.log('');
|
|
64
|
+
console.log(' easytyga v1.0.0');
|
|
65
|
+
console.log(' Tunnel your local AI to the web');
|
|
66
|
+
console.log('');
|
|
67
|
+
|
|
68
|
+
const gpu = await detectGpu();
|
|
69
|
+
if (gpu) console.log(` GPU: ${gpu}`);
|
|
70
|
+
console.log(` Target: ${targetUrl}`);
|
|
71
|
+
console.log(` Relay: ${serverUrl}`);
|
|
72
|
+
if (listMode) console.log(' Mode: Marketplace');
|
|
73
|
+
if (memoryMode) console.log(' Memory: Enabled');
|
|
74
|
+
console.log('');
|
|
75
|
+
|
|
76
|
+
try {
|
|
77
|
+
await createTunnel({
|
|
78
|
+
ollamaUrl: targetUrl,
|
|
79
|
+
serverUrl,
|
|
80
|
+
key,
|
|
81
|
+
listMode,
|
|
82
|
+
memoryMode,
|
|
83
|
+
gpu,
|
|
84
|
+
});
|
|
85
|
+
} catch (err) {
|
|
86
|
+
console.error(` Error: ${err.message}`);
|
|
87
|
+
process.exit(1);
|
|
88
|
+
}
|
|
89
|
+
})();
|
package/package.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "easytyga",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Tunnel your local AI to the internet. Secure. One command. Works with Ollama, vLLM, and any HTTP service.",
|
|
5
|
+
"bin": {
|
|
6
|
+
"easytyga": "bin/cli.js"
|
|
7
|
+
},
|
|
8
|
+
"keywords": [
|
|
9
|
+
"tunnel",
|
|
10
|
+
"ollama",
|
|
11
|
+
"ngrok",
|
|
12
|
+
"reverse-proxy",
|
|
13
|
+
"llm",
|
|
14
|
+
"gpu",
|
|
15
|
+
"ai",
|
|
16
|
+
"inference",
|
|
17
|
+
"remote-access",
|
|
18
|
+
"api-gateway",
|
|
19
|
+
"vllm",
|
|
20
|
+
"local-ai"
|
|
21
|
+
],
|
|
22
|
+
"author": "Joe Wee <jyswee@gmail.com>",
|
|
23
|
+
"license": "MIT",
|
|
24
|
+
"repository": {
|
|
25
|
+
"type": "git",
|
|
26
|
+
"url": "https://github.com/jyswee/easytyga"
|
|
27
|
+
},
|
|
28
|
+
"homepage": "https://easytyga.com",
|
|
29
|
+
"dependencies": {
|
|
30
|
+
"ws": "^8.16.0"
|
|
31
|
+
}
|
|
32
|
+
}
|
package/src/detect.js
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* GPU detection - tries to identify the local GPU model.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
const { execSync } = require('child_process');
|
|
6
|
+
|
|
7
|
+
async function detectGpu() {
|
|
8
|
+
try {
|
|
9
|
+
// Try nvidia-smi first
|
|
10
|
+
const output = execSync('nvidia-smi --query-gpu=name --format=csv,noheader', {
|
|
11
|
+
timeout: 5000,
|
|
12
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
13
|
+
}).toString().trim();
|
|
14
|
+
if (output) return output.split('\n')[0].trim();
|
|
15
|
+
} catch {}
|
|
16
|
+
|
|
17
|
+
try {
|
|
18
|
+
// macOS: system_profiler
|
|
19
|
+
const output = execSync('system_profiler SPDisplaysDataType 2>/dev/null | grep "Chip\\|Chipset"', {
|
|
20
|
+
timeout: 5000,
|
|
21
|
+
shell: true,
|
|
22
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
23
|
+
}).toString().trim();
|
|
24
|
+
const match = output.match(/Apple (M\d[\w\s]*)/i);
|
|
25
|
+
if (match) return match[1].trim();
|
|
26
|
+
} catch {}
|
|
27
|
+
|
|
28
|
+
try {
|
|
29
|
+
// Linux: lspci
|
|
30
|
+
const output = execSync("lspci | grep -i 'vga\\|3d\\|display' | head -1", {
|
|
31
|
+
timeout: 5000,
|
|
32
|
+
shell: true,
|
|
33
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
34
|
+
}).toString().trim();
|
|
35
|
+
const match = output.match(/NVIDIA.*\[(.*?)\]/i) || output.match(/AMD.*\[(.*?)\]/i);
|
|
36
|
+
if (match) return match[1].trim();
|
|
37
|
+
} catch {}
|
|
38
|
+
|
|
39
|
+
return null;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
module.exports = { detectGpu };
|
package/src/tunnel.js
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core tunnel client - connects to a WebSocket relay server and forwards requests.
|
|
3
|
+
* Handles auth, request forwarding, streaming, and auto-reconnect.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
const http = require('http');
|
|
7
|
+
const https = require('https');
|
|
8
|
+
const WebSocket = require('ws');
|
|
9
|
+
const crypto = require('crypto');
|
|
10
|
+
|
|
11
|
+
// ── Protocol helpers ──
|
|
12
|
+
|
|
13
|
+
function decodeBody(b64) {
|
|
14
|
+
return b64 ? Buffer.from(b64, 'base64') : Buffer.alloc(0);
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
function encodeBody(buf) {
|
|
18
|
+
return buf && buf.length > 0 ? Buffer.from(buf).toString('base64') : '';
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
function wsSend(ws, obj) {
|
|
22
|
+
try { ws.send(JSON.stringify(obj)); } catch {}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
// ── Verify Ollama is reachable ──
|
|
26
|
+
|
|
27
|
+
async function checkOllama(ollamaUrl) {
|
|
28
|
+
return new Promise((resolve) => {
|
|
29
|
+
const url = new URL('/api/version', ollamaUrl);
|
|
30
|
+
const transport = url.protocol === 'https:' ? https : http;
|
|
31
|
+
const req = transport.get(url, { timeout: 5000 }, (res) => {
|
|
32
|
+
let body = '';
|
|
33
|
+
res.on('data', (c) => body += c);
|
|
34
|
+
res.on('end', () => {
|
|
35
|
+
try {
|
|
36
|
+
const data = JSON.parse(body);
|
|
37
|
+
resolve(data.version || true);
|
|
38
|
+
} catch {
|
|
39
|
+
resolve(true);
|
|
40
|
+
}
|
|
41
|
+
});
|
|
42
|
+
});
|
|
43
|
+
req.on('error', () => resolve(false));
|
|
44
|
+
req.on('timeout', () => { req.destroy(); resolve(false); });
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// ── Get available models ──
|
|
49
|
+
|
|
50
|
+
async function getModels(ollamaUrl) {
|
|
51
|
+
return new Promise((resolve) => {
|
|
52
|
+
const url = new URL('/api/tags', ollamaUrl);
|
|
53
|
+
const transport = url.protocol === 'https:' ? https : http;
|
|
54
|
+
const req = transport.get(url, { timeout: 5000 }, (res) => {
|
|
55
|
+
let body = '';
|
|
56
|
+
res.on('data', (c) => body += c);
|
|
57
|
+
res.on('end', () => {
|
|
58
|
+
try {
|
|
59
|
+
const data = JSON.parse(body);
|
|
60
|
+
resolve((data.models || []).map(m => m.name));
|
|
61
|
+
} catch {
|
|
62
|
+
resolve([]);
|
|
63
|
+
}
|
|
64
|
+
});
|
|
65
|
+
});
|
|
66
|
+
req.on('error', () => resolve([]));
|
|
67
|
+
req.on('timeout', () => { req.destroy(); resolve([]); });
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// ── Forward a request to local Ollama ──
|
|
72
|
+
|
|
73
|
+
function forwardToOllama(ws, msg, ollamaUrl) {
|
|
74
|
+
const url = new URL(msg.path, ollamaUrl);
|
|
75
|
+
const transport = url.protocol === 'https:' ? https : http;
|
|
76
|
+
const body = decodeBody(msg.body);
|
|
77
|
+
|
|
78
|
+
const reqOpts = {
|
|
79
|
+
method: msg.method || 'POST',
|
|
80
|
+
hostname: url.hostname,
|
|
81
|
+
port: url.port,
|
|
82
|
+
path: url.pathname + url.search,
|
|
83
|
+
headers: {
|
|
84
|
+
'Content-Type': 'application/json',
|
|
85
|
+
...(body.length > 0 ? { 'Content-Length': body.length } : {}),
|
|
86
|
+
},
|
|
87
|
+
timeout: 120000,
|
|
88
|
+
};
|
|
89
|
+
|
|
90
|
+
const req = transport.request(reqOpts, (res) => {
|
|
91
|
+
const headers = {};
|
|
92
|
+
for (const [k, v] of Object.entries(res.headers)) {
|
|
93
|
+
if (k.toLowerCase() !== 'transfer-encoding') headers[k] = v;
|
|
94
|
+
}
|
|
95
|
+
wsSend(ws, { id: msg.id, type: 'response-start', status: res.statusCode, headers });
|
|
96
|
+
|
|
97
|
+
res.on('data', (chunk) => {
|
|
98
|
+
wsSend(ws, { id: msg.id, type: 'response-data', chunk: encodeBody(chunk) });
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
res.on('end', () => {
|
|
102
|
+
wsSend(ws, { id: msg.id, type: 'response-end' });
|
|
103
|
+
});
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
req.on('error', (err) => {
|
|
107
|
+
wsSend(ws, { id: msg.id, type: 'response-error', error: err.message });
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
req.on('timeout', () => {
|
|
111
|
+
req.destroy();
|
|
112
|
+
wsSend(ws, { id: msg.id, type: 'response-error', error: 'Request timed out' });
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
if (body.length > 0) req.write(body);
|
|
116
|
+
req.end();
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// ── Main tunnel function ──
|
|
120
|
+
|
|
121
|
+
async function createTunnel(opts) {
|
|
122
|
+
const { ollamaUrl, serverUrl, key, listMode, memoryMode, gpu } = opts;
|
|
123
|
+
|
|
124
|
+
// Check Ollama
|
|
125
|
+
const version = await checkOllama(ollamaUrl);
|
|
126
|
+
if (!version) {
|
|
127
|
+
throw new Error(`Cannot reach Ollama at ${ollamaUrl}\nMake sure Ollama is running: ollama serve`);
|
|
128
|
+
}
|
|
129
|
+
console.log(` Ollama ${typeof version === 'string' ? `v${version}` : ''} reachable`);
|
|
130
|
+
|
|
131
|
+
// Get models
|
|
132
|
+
const models = await getModels(ollamaUrl);
|
|
133
|
+
if (models.length > 0) {
|
|
134
|
+
console.log(` Models: ${models.slice(0, 5).join(', ')}${models.length > 5 ? ` (+${models.length - 5} more)` : ''}`);
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Generate a tunnel key if none provided
|
|
138
|
+
const tunnelKey = key || `ot_${crypto.randomBytes(16).toString('hex')}`;
|
|
139
|
+
const isAnonymous = !key;
|
|
140
|
+
|
|
141
|
+
console.log('');
|
|
142
|
+
|
|
143
|
+
// Connect with auto-reconnect
|
|
144
|
+
let reconnectDelay = 2000;
|
|
145
|
+
const MAX_RECONNECT = 30000;
|
|
146
|
+
let requestCount = 0;
|
|
147
|
+
|
|
148
|
+
function connect() {
|
|
149
|
+
const ws = new WebSocket(serverUrl);
|
|
150
|
+
|
|
151
|
+
ws.on('open', () => {
|
|
152
|
+
wsSend(ws, {
|
|
153
|
+
type: 'auth',
|
|
154
|
+
key: tunnelKey,
|
|
155
|
+
anonymous: isAnonymous,
|
|
156
|
+
gpu: gpu || undefined,
|
|
157
|
+
models: models,
|
|
158
|
+
list: listMode || false,
|
|
159
|
+
memory: memoryMode || false,
|
|
160
|
+
});
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
ws.on('message', (data) => {
|
|
164
|
+
let msg;
|
|
165
|
+
try { msg = JSON.parse(data); } catch { return; }
|
|
166
|
+
|
|
167
|
+
if (msg.type === 'auth-ok') {
|
|
168
|
+
reconnectDelay = 2000;
|
|
169
|
+
console.log(' ┌─────────────────────────────────────┐');
|
|
170
|
+
console.log(' │ Tunnel active │');
|
|
171
|
+
console.log(' └─────────────────────────────────────┘');
|
|
172
|
+
if (msg.publicUrl) {
|
|
173
|
+
console.log('');
|
|
174
|
+
console.log(` Public URL: ${msg.publicUrl}`);
|
|
175
|
+
}
|
|
176
|
+
if (msg.apiKey) {
|
|
177
|
+
console.log(` API Key: ${msg.apiKey}`);
|
|
178
|
+
}
|
|
179
|
+
if (msg.listingTitle) {
|
|
180
|
+
console.log(` Listing: ${msg.listingTitle}`);
|
|
181
|
+
}
|
|
182
|
+
if (listMode) {
|
|
183
|
+
console.log('');
|
|
184
|
+
console.log(' Your GPU is listed on the marketplace.');
|
|
185
|
+
console.log(' Renters can find and use your GPU now.');
|
|
186
|
+
}
|
|
187
|
+
if (memoryMode) {
|
|
188
|
+
console.log('');
|
|
189
|
+
console.log(' Persistent memory enabled.');
|
|
190
|
+
console.log(' Conversations persist across sessions.');
|
|
191
|
+
}
|
|
192
|
+
console.log('');
|
|
193
|
+
console.log(' Press Ctrl+C to disconnect.');
|
|
194
|
+
console.log('');
|
|
195
|
+
return;
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
if (msg.type === 'auth-fail') {
|
|
199
|
+
console.error(` Authentication failed: ${msg.error}`);
|
|
200
|
+
if (isAnonymous) {
|
|
201
|
+
console.error(' The relay server may not support anonymous tunnels.');
|
|
202
|
+
console.error(' Get a key at https://easytyga.com');
|
|
203
|
+
}
|
|
204
|
+
process.exit(1);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
if (msg.type === 'request' && msg.id) {
|
|
208
|
+
requestCount++;
|
|
209
|
+
process.stdout.write(`\r Requests forwarded: ${requestCount}`);
|
|
210
|
+
forwardToOllama(ws, msg, ollamaUrl);
|
|
211
|
+
return;
|
|
212
|
+
}
|
|
213
|
+
});
|
|
214
|
+
|
|
215
|
+
ws.on('close', () => {
|
|
216
|
+
console.log(`\n Disconnected. Reconnecting in ${reconnectDelay / 1000}s...`);
|
|
217
|
+
setTimeout(connect, reconnectDelay);
|
|
218
|
+
reconnectDelay = Math.min(reconnectDelay * 2, MAX_RECONNECT);
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
ws.on('error', (err) => {
|
|
222
|
+
if (err.code !== 'ECONNREFUSED') {
|
|
223
|
+
console.error(` WebSocket error: ${err.message}`);
|
|
224
|
+
}
|
|
225
|
+
});
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
connect();
|
|
229
|
+
|
|
230
|
+
// Graceful shutdown
|
|
231
|
+
process.on('SIGINT', () => {
|
|
232
|
+
console.log('\n\n Tunnel closed. Goodbye!');
|
|
233
|
+
if (listMode) console.log(' Your listing has been set to offline.');
|
|
234
|
+
process.exit(0);
|
|
235
|
+
});
|
|
236
|
+
process.on('SIGTERM', () => { process.exit(0); });
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
module.exports = { createTunnel, checkOllama, getModels };
|