lcluster 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +173 -0
- package/built-in-templates/default.yml +14 -0
- package/built-in-templates/high-memory.yml +14 -0
- package/built-in-templates/minimal.yml +14 -0
- package/generate_logos.js +39 -0
- package/package.json +52 -0
- package/src/alerts/desktop.js +15 -0
- package/src/alerts/discord.js +53 -0
- package/src/alerts/index.js +50 -0
- package/src/alerts/sound.js +5 -0
- package/src/cli.js +25 -0
- package/src/core/config.js +17 -0
- package/src/core/events.js +5 -0
- package/src/core/healthcheck.js +126 -0
- package/src/core/loadbalancer.js +26 -0
- package/src/core/logBuffer.js +35 -0
- package/src/core/registry.js +59 -0
- package/src/gateway/guildMap.js +6 -0
- package/src/gateway/proxy.js +30 -0
- package/src/gateway/router.js +6 -0
- package/src/gateway/server.js +35 -0
- package/src/gateway/sessionMap.js +6 -0
- package/src/gateway/v4/rest.js +79 -0
- package/src/gateway/v4/websocket.js +95 -0
- package/src/main.js +133 -0
- package/src/spawner/docker.js +55 -0
- package/src/spawner/process.js +43 -0
- package/src/system/detect.js +74 -0
- package/src/system/systemd.js +33 -0
- package/src/templates/manager.js +66 -0
- package/src/templates/validator.js +36 -0
- package/src/tui/components/Border.jsx +27 -0
- package/src/tui/components/KeyHints.jsx +20 -0
- package/src/tui/components/MiniBar.jsx +22 -0
- package/src/tui/components/NodeCard.jsx +65 -0
- package/src/tui/components/NodeList.jsx +60 -0
- package/src/tui/components/StatPanel.jsx +64 -0
- package/src/tui/components/StatusDot.jsx +22 -0
- package/src/tui/index.jsx +69 -0
- package/src/tui/init/DiscordAlerts.jsx +122 -0
- package/src/tui/init/Done.jsx +77 -0
- package/src/tui/init/GatewaySetup.jsx +59 -0
- package/src/tui/init/NodeSetup.jsx +158 -0
- package/src/tui/init/ThemePicker.jsx +51 -0
- package/src/tui/init/Welcome.jsx +57 -0
- package/src/tui/init/index.jsx +78 -0
- package/src/tui/screens/Dashboard.jsx +51 -0
- package/src/tui/screens/Logs.jsx +59 -0
- package/src/tui/screens/NodeDetail.jsx +57 -0
- package/src/tui/screens/Settings.jsx +154 -0
- package/src/tui/screens/Templates.jsx +42 -0
- package/src/tui/theme/amber.js +16 -0
- package/src/tui/theme/cyberpunk.js +15 -0
- package/src/tui/theme/hacker.js +15 -0
- package/src/tui/theme/index.js +42 -0
- package/src/tui/theme/minimal.js +16 -0
- package/src/tui/theme/neon.js +16 -0
- package/src/tui/theme/ocean.js +15 -0
package/README.md
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
<img src="docs/public/logo.svg" height="80" alt="lcluster logo" />
|
|
3
|
+
<h1>lcluster</h1>
|
|
4
|
+
<p>A powerful Lavalink cluster manager for your terminal.</p>
|
|
5
|
+
|
|
6
|
+
<p>
|
|
7
|
+
<img src="https://img.shields.io/badge/version-1.0.0-blue" />
|
|
8
|
+
<img src="https://img.shields.io/badge/node-%3E%3D18.0.0-green" />
|
|
9
|
+
<img src="https://img.shields.io/badge/license-GPLv3-purple" />
|
|
10
|
+
<img src="https://img.shields.io/badge/built%20with-Claude%20AI-orange" />
|
|
11
|
+
</p>
|
|
12
|
+
</div>
|
|
13
|
+
|
|
14
|
+
---
|
|
15
|
+
|
|
16
|
+
## What is lcluster
|
|
17
|
+
|
|
18
|
+
**lcluster** is a terminal-based Lavalink cluster manager built for developers and server administrators. Run multiple Lavalink nodes — via Docker or Java process — and manage them all natively from one beautiful, fully-responsive, full-screen TUI dashboard.
|
|
19
|
+
|
|
20
|
+
Your Discord bot connects to one single gateway address. **lcluster** powerfully handles all routing, load balancing, session tracking, and seamless failover behind the scenes. Your bot never knows there is a cluster, it just sees one highly available, extremely robust Lavalink node.
|
|
21
|
+
|
|
22
|
+
---
|
|
23
|
+
|
|
24
|
+
## Features
|
|
25
|
+
|
|
26
|
+
- 🖥️ **Full-Screen Terminal Dashboard**: Native visual management built on top of modern React/Ink.
|
|
27
|
+
- ⬡ **Single Gateway Endpoint**: Connect your bot to one port, let lcluster handle the rest.
|
|
28
|
+
- 🐋 **Multi-Environment Spawning**: Natively spins up Docker containers or standalone Java processes.
|
|
29
|
+
- 🔌 **External Node Auto-Connect**: Bring your own existing Lavalink node and monitor it seamlessly.
|
|
30
|
+
- 🔀 **Intelligent Load Balancing**: Chooses nodes based on `least players`, `lowest CPU`, or `round robin`.
|
|
31
|
+
- ♻️ **Automatic Failover & Migration**: Seamless session migration if a node crashes.
|
|
32
|
+
- 🔔 **Discord Webhook Alerts**: Real-time integration and monitoring piped directly to your Discord channel.
|
|
33
|
+
- 🖥️ **Systemd Auto-Start**: Built-in Ubuntu/Linux daemon installation.
|
|
34
|
+
- 🎨 **Adaptive Themes**: 3 gorgeous built-in TUI themes — Cyberpunk Neon, Clean Minimal, and Retro Amber.
|
|
35
|
+
- ⚡ **Extremely Lightweight**: Tiny memory footprint natively optimizing underlying resources.
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## Requirements
|
|
40
|
+
|
|
41
|
+
- Node.js 18 or higher
|
|
42
|
+
- Java 17 or higher (for process mode nodes)
|
|
43
|
+
- Docker (optional, for Docker mode nodes)
|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
## Installation
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
npm install -g lcluster
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
Or install from source:
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
git clone https://github.com/yourname/lcluster.git
|
|
57
|
+
cd lcluster
|
|
58
|
+
npm install
|
|
59
|
+
npm link
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
---
|
|
63
|
+
|
|
64
|
+
## Quick Start
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
# Run the setup wizard
|
|
68
|
+
lcluster init
|
|
69
|
+
|
|
70
|
+
# Open the dashboard
|
|
71
|
+
lcluster
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
---
|
|
75
|
+
|
|
76
|
+
## CLI Commands
|
|
77
|
+
|
|
78
|
+
| Command | Description |
|
|
79
|
+
|---|---|
|
|
80
|
+
| `lcluster` | Open the TUI dashboard |
|
|
81
|
+
| `lcluster init` | Run the setup wizard |
|
|
82
|
+
| `lcluster ps` | List all nodes in terminal |
|
|
83
|
+
| `lcluster start <name>` | Start a node |
|
|
84
|
+
| `lcluster stop <name>` | Stop a node |
|
|
85
|
+
| `lcluster restart <name>` | Restart a node |
|
|
86
|
+
| `lcluster logs <name>` | Tail logs for a node |
|
|
87
|
+
|
|
88
|
+
---
|
|
89
|
+
|
|
90
|
+
## Connecting Your Bot
|
|
91
|
+
|
|
92
|
+
Point your Lavalink client at lcluster instead of a raw Lavalink node.
|
|
93
|
+
No changes needed in your bot code — lcluster speaks standard Lavalink v4.
|
|
94
|
+
|
|
95
|
+
```js
|
|
96
|
+
// Riffy example
|
|
97
|
+
const nodes = [{
|
|
98
|
+
host: "localhost",
|
|
99
|
+
port: 2333, // your lcluster gateway port
|
|
100
|
+
password: "yourpassword",
|
|
101
|
+
secure: false
|
|
102
|
+
}]
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
Works with Riffy, Shoukaku, Moonlink, Vulkava, and any other
|
|
106
|
+
Lavalink v4 compatible client.
|
|
107
|
+
|
|
108
|
+
---
|
|
109
|
+
|
|
110
|
+
## TUI Dashboard Layout
|
|
111
|
+
|
|
112
|
+
```
|
|
113
|
+
╔═════════════════════════════════════════════════════════════════════╗
|
|
114
|
+
║ ⬡ lcluster v1.0.0 ● 2 online ⚠ 1 warn gateway :2333 ● ║
|
|
115
|
+
╚═════════════════════════════════════════════════════════════════════╝
|
|
116
|
+
|
|
117
|
+
┌─ nodes (3/5) ──────────────────────────────────── [↑↓ scroll] ─┐
|
|
118
|
+
│ │
|
|
119
|
+
│ ● node-main ↑ 2d 4h 12m │
|
|
120
|
+
│ default.yml · process ● online │
|
|
121
|
+
│ ♪ 12 ⚡ 18ms CPU ▓▓░░░░░░ 34% MEM ▓▓░░░░░░ 29% │
|
|
122
|
+
│ │
|
|
123
|
+
│ ▶ ● node-docker ↑ 6h 12m │
|
|
124
|
+
│ high-mem.yml · docker 🐋 ● online │
|
|
125
|
+
│ ♪ 7 ⚡ 22ms CPU ▓▓▓▓░░░░ 58% MEM ▓▓▓▓▓░░░ 71% │
|
|
126
|
+
│ │
|
|
127
|
+
│ ⚠ node-backup ↑ 1d 2h │
|
|
128
|
+
│ minimal.yml · process ⚠ degraded │
|
|
129
|
+
│ ♪ 0 ⚡ 140ms CPU ▓░░░░░░░ 12% MEM ▓░░░░░░░ 18% │
|
|
130
|
+
│ │
|
|
131
|
+
└──────────────────────────────────────────────────────────────────┘
|
|
132
|
+
|
|
133
|
+
[↑↓] navigate [enter] manage [n] new [t] templates [q] quit
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
## Configuration
|
|
139
|
+
|
|
140
|
+
All local data, nodes, templates, and configurations are securely stored within `~/.lcluster/`:
|
|
141
|
+
|
|
142
|
+
- `~/.lcluster/config.yml`: Global settings, themes, alerts, and gateway credentials.
|
|
143
|
+
- `~/.lcluster/nodes.json`: Node registry mapping.
|
|
144
|
+
- `~/.lcluster/templates/`: Local customized `application.yml` configs for dynamic deployment.
|
|
145
|
+
- `~/.lcluster/nodes/`: Operating directory holding the `Lavalink.jar` and `application.yml` for process nodes.
|
|
146
|
+
|
|
147
|
+
---
|
|
148
|
+
|
|
149
|
+
## Documentation
|
|
150
|
+
|
|
151
|
+
Full architectural guides, TUI maps, setup instructions, and deployment strategies are absolutely free and publicly available at:
|
|
152
|
+
|
|
153
|
+
🔗 **[https://lcluster.dev](https://lcluster.dev)**
|
|
154
|
+
|
|
155
|
+
---
|
|
156
|
+
|
|
157
|
+
## Roadmap
|
|
158
|
+
|
|
159
|
+
- [x] v1.0.0 — Core cluster manager, TUI dashboard, gateway, alerts
|
|
160
|
+
- [ ] v1.0.1 — Custom Discord bot integration with token support
|
|
161
|
+
|
|
162
|
+
---
|
|
163
|
+
|
|
164
|
+
## Credits
|
|
165
|
+
|
|
166
|
+
lcluster was designed and built by **Ram Krishna** with architecture,
|
|
167
|
+
planning, and code assistance from **Claude**, an AI built by Anthropic.
|
|
168
|
+
|
|
169
|
+
---
|
|
170
|
+
|
|
171
|
+
## License
|
|
172
|
+
|
|
173
|
+
GPL-3.0
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import fs from 'node:fs';
|
|
2
|
+
|
|
3
|
+
const renderSvg = (bgColor, fgColor, isLight = false) => `<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 130" width="200" height="260">
|
|
4
|
+
<rect width="100" height="130" fill="${bgColor}" />
|
|
5
|
+
<!-- Verbindungen -->
|
|
6
|
+
<line x1="50" y1="50" x2="50" y2="15" stroke="${fgColor}" stroke-width="2" />
|
|
7
|
+
<line x1="50" y1="50" x2="19.69" y2="67.5" stroke="${fgColor}" stroke-width="2" />
|
|
8
|
+
<line x1="50" y1="50" x2="80.31" y2="67.5" stroke="${fgColor}" stroke-width="2" />
|
|
9
|
+
|
|
10
|
+
<!-- Zentrales Hexagon -->
|
|
11
|
+
<polygon points="50,30 67.32,40 67.32,60 50,70 32.68,60 32.68,40" fill="${bgColor}" stroke="${fgColor}" stroke-width="3" />
|
|
12
|
+
|
|
13
|
+
<!-- Nodes / Außenpunkte -->
|
|
14
|
+
<circle cx="50" cy="15" r="5" fill="${fgColor}" />
|
|
15
|
+
<circle cx="19.69" cy="67.5" r="5" fill="${fgColor}" />
|
|
16
|
+
<circle cx="80.31" cy="67.5" r="5" fill="${fgColor}" />
|
|
17
|
+
|
|
18
|
+
<!-- Text -->
|
|
19
|
+
<text x="50" y="115" font-family="monospace" font-size="20" font-weight="bold" fill="${fgColor}" text-anchor="middle">lcluster</text>
|
|
20
|
+
</svg>`;
|
|
21
|
+
|
|
22
|
+
// We will use Neon theme as default/logo.svg since the prompt mentioned tracking Neon as primary hue
|
|
23
|
+
// Neon version: #00ff9f on #080b14
|
|
24
|
+
const neonSvg = renderSvg('#080b14', '#00ff9f');
|
|
25
|
+
const lightSvg = renderSvg('#ffffff', '#000000', true);
|
|
26
|
+
const darkSvg = renderSvg('#000000', '#ffffff');
|
|
27
|
+
|
|
28
|
+
fs.writeFileSync('docs/public/logo.svg', neonSvg);
|
|
29
|
+
fs.writeFileSync('docs/public/logo-dark.svg', darkSvg);
|
|
30
|
+
fs.writeFileSync('docs/public/logo-light.svg', lightSvg);
|
|
31
|
+
|
|
32
|
+
// Also save minimal and amber just to have them available, though prompt says output formats needed: logo.svg, logo-dark.svg, logo-light.svg
|
|
33
|
+
const minimalSvg = renderSvg('#1e1e2e', '#cba6f7');
|
|
34
|
+
const amberSvg = renderSvg('#0d0800', '#ffb347');
|
|
35
|
+
fs.writeFileSync('docs/public/logo-minimal.svg', minimalSvg);
|
|
36
|
+
fs.writeFileSync('docs/public/logo-amber.svg', amberSvg);
|
|
37
|
+
fs.writeFileSync('docs/public/favicon.ico', neonSvg); // Many modern browsers accept SVGs as ICO internally, but we'll try to use a png to ico if needed. For now, writing it this way is simple.
|
|
38
|
+
|
|
39
|
+
console.log("Logos generated.");
|
package/package.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "lcluster",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "A powerful Lavalink cluster manager for your terminal",
|
|
5
|
+
"main": "src/cli.js",
|
|
6
|
+
"type": "module",
|
|
7
|
+
"bin": {
|
|
8
|
+
"lcluster": "./src/cli.js"
|
|
9
|
+
},
|
|
10
|
+
"scripts": {
|
|
11
|
+
"start": "node src/cli.js"
|
|
12
|
+
},
|
|
13
|
+
"engines": {
|
|
14
|
+
"node": ">=18.0.0"
|
|
15
|
+
},
|
|
16
|
+
"keywords": [
|
|
17
|
+
"lavalink",
|
|
18
|
+
"cluster",
|
|
19
|
+
"discord",
|
|
20
|
+
"music bot",
|
|
21
|
+
"tui",
|
|
22
|
+
"terminal"
|
|
23
|
+
],
|
|
24
|
+
"author": "Ram Krishna",
|
|
25
|
+
"license": "GPL-3.0",
|
|
26
|
+
"repository": {
|
|
27
|
+
"type": "git",
|
|
28
|
+
"url": "https://github.com/ramkrishna-js/lcluster.git"
|
|
29
|
+
},
|
|
30
|
+
"homepage": "https://lcluster.dev",
|
|
31
|
+
"dependencies": {
|
|
32
|
+
"chalk": "^5.3.0",
|
|
33
|
+
"commander": "^12.0.0",
|
|
34
|
+
"dockerode": "^4.0.2",
|
|
35
|
+
"http-proxy": "^1.18.1",
|
|
36
|
+
"ink": "^4.4.1",
|
|
37
|
+
"ink-text-input": "^5.0.1",
|
|
38
|
+
"js-yaml": "^4.1.0",
|
|
39
|
+
"node-fetch": "^3.3.2",
|
|
40
|
+
"node-notifier": "^10.0.1",
|
|
41
|
+
"ora": "^8.0.1",
|
|
42
|
+
"react": "^18.2.0",
|
|
43
|
+
"tsx": "^4.21.0",
|
|
44
|
+
"ws": "^8.16.0"
|
|
45
|
+
},
|
|
46
|
+
"devDependencies": {
|
|
47
|
+
"@babel/core": "^7.24.0",
|
|
48
|
+
"@babel/preset-env": "^7.24.0",
|
|
49
|
+
"@babel/preset-react": "^7.23.3",
|
|
50
|
+
"@babel/register": "^7.23.7"
|
|
51
|
+
}
|
|
52
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import notifier from 'node-notifier';
|
|
2
|
+
|
|
3
|
+
export function sendDesktop(level, message) {
|
|
4
|
+
// Only fire for danger and success per spec
|
|
5
|
+
if (level !== 'danger' && level !== 'success') {
|
|
6
|
+
return;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
notifier.notify({
|
|
10
|
+
title: 'lcluster',
|
|
11
|
+
message: message,
|
|
12
|
+
icon: '', // path to logo if available
|
|
13
|
+
sound: false
|
|
14
|
+
});
|
|
15
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import fetch from 'node-fetch';
|
|
2
|
+
import { events } from '../core/events.js';
|
|
3
|
+
|
|
4
|
+
export async function sendDiscord(level, node, message, webhookUrl) {
|
|
5
|
+
if (!webhookUrl) return;
|
|
6
|
+
|
|
7
|
+
let colorByLevel = 0x89B4FA; // info blue
|
|
8
|
+
let title = 'ℹ️ lcluster Info';
|
|
9
|
+
|
|
10
|
+
if (level === 'danger') {
|
|
11
|
+
colorByLevel = 0xFF3366;
|
|
12
|
+
title = '🔴 Node Offline';
|
|
13
|
+
} else if (level === 'warning') {
|
|
14
|
+
colorByLevel = 0xFFCC00;
|
|
15
|
+
title = '🟡 Node Warning';
|
|
16
|
+
} else if (level === 'success') {
|
|
17
|
+
colorByLevel = 0x00FF9F;
|
|
18
|
+
title = '🟢 Node Online';
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
const payload = {
|
|
22
|
+
embeds: [{
|
|
23
|
+
title,
|
|
24
|
+
description: message,
|
|
25
|
+
color: colorByLevel,
|
|
26
|
+
fields: node ? [
|
|
27
|
+
{ name: 'Node', value: node.name, inline: true },
|
|
28
|
+
{ name: 'Mode', value: node.mode, inline: true },
|
|
29
|
+
{ name: 'Uptime', value: node.uptime || '—', inline: true },
|
|
30
|
+
{ name: 'Players', value: String(node.players || 0), inline: true },
|
|
31
|
+
{ name: 'Ping', value: (node.ping || 0) + 'ms', inline: true },
|
|
32
|
+
{ name: 'CPU', value: (node.cpu || 0) + '%', inline: true },
|
|
33
|
+
] : [],
|
|
34
|
+
footer: {
|
|
35
|
+
text: 'lcluster · Built by Ram Krishna & Claude (Anthropic AI)'
|
|
36
|
+
},
|
|
37
|
+
timestamp: new Date().toISOString()
|
|
38
|
+
}]
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
try {
|
|
42
|
+
const res = await fetch(webhookUrl, {
|
|
43
|
+
method: 'POST',
|
|
44
|
+
headers: { 'Content-Type': 'application/json' },
|
|
45
|
+
body: JSON.stringify(payload)
|
|
46
|
+
});
|
|
47
|
+
if (!res.ok) {
|
|
48
|
+
events.emit('alert:failed', `Discord webhook failed: ${res.status}`);
|
|
49
|
+
}
|
|
50
|
+
} catch (err) {
|
|
51
|
+
events.emit('alert:failed', `Discord webhook error: ${err.message}`);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import { sendDiscord } from './discord.js';
|
|
2
|
+
import { sendDesktop } from './desktop.js';
|
|
3
|
+
import { sendSound } from './sound.js';
|
|
4
|
+
import { events } from '../core/events.js';
|
|
5
|
+
import fs from 'node:fs';
|
|
6
|
+
import path from 'node:path';
|
|
7
|
+
import os from 'node:os';
|
|
8
|
+
import yaml from 'js-yaml';
|
|
9
|
+
|
|
10
|
+
function getConfig() {
|
|
11
|
+
const configPath = path.join(os.homedir(), '.lcluster', 'config.yml');
|
|
12
|
+
if (fs.existsSync(configPath)) {
|
|
13
|
+
try { return yaml.load(fs.readFileSync(configPath, 'utf8')) || {}; } catch (e) { }
|
|
14
|
+
}
|
|
15
|
+
return {};
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
function getAlertConfig() {
|
|
19
|
+
const cfg = getConfig();
|
|
20
|
+
return cfg.alerts || {
|
|
21
|
+
discord: { enabled: false, webhook: '' },
|
|
22
|
+
desktop: { enabled: false },
|
|
23
|
+
sound: { enabled: false },
|
|
24
|
+
thresholds: { cpu_warn: 90, idle_warn: false }
|
|
25
|
+
};
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export function alert(level, node, message) {
|
|
29
|
+
const alertsConfig = getAlertConfig();
|
|
30
|
+
|
|
31
|
+
if (alertsConfig.discord?.enabled && alertsConfig.discord?.webhook) {
|
|
32
|
+
sendDiscord(level, node, message, alertsConfig.discord.webhook);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
if (alertsConfig.desktop?.enabled) {
|
|
36
|
+
sendDesktop(level, message);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
if (alertsConfig.sound?.enabled) {
|
|
40
|
+
sendSound(level);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
export function initAlerts() {
|
|
45
|
+
events.on('node:offline', (node) => alert('danger', node, 'went offline'));
|
|
46
|
+
events.on('node:online', (node) => alert('success', node, 'is back online'));
|
|
47
|
+
events.on('node:degraded', (node) => alert('warning', node, 'is degraded'));
|
|
48
|
+
events.on('gateway:ready', () => alert('info', null, 'gateway started'));
|
|
49
|
+
events.on('cpu:critical', (node) => alert('warning', node, 'CPU above threshold'));
|
|
50
|
+
}
|
package/src/cli.js
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { spawnSync } from 'node:child_process';
|
|
4
|
+
import { fileURLToPath } from 'node:url';
|
|
5
|
+
|
|
6
|
+
if (!process.env.LCLUSTER_TSX_BOOTSTRAPPED) {
|
|
7
|
+
process.env.LCLUSTER_TSX_BOOTSTRAPPED = '1';
|
|
8
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
9
|
+
|
|
10
|
+
const result = spawnSync(process.execPath, [
|
|
11
|
+
'--no-warnings',
|
|
12
|
+
'--import',
|
|
13
|
+
'tsx',
|
|
14
|
+
__filename,
|
|
15
|
+
...process.argv.slice(2)
|
|
16
|
+
], {
|
|
17
|
+
stdio: 'inherit'
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
process.exit(result.status ?? 0);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
// Now we can safely import everything else because tsx loader is active
|
|
24
|
+
const { runCLI } = await import('./main.js');
|
|
25
|
+
await runCLI();
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import fs from 'node:fs';
|
|
2
|
+
import path from 'node:path';
|
|
3
|
+
import os from 'node:os';
|
|
4
|
+
import yaml from 'js-yaml';
|
|
5
|
+
|
|
6
|
+
export function getGatewayConfig() {
|
|
7
|
+
const configPath = path.join(os.homedir(), '.lcluster', 'config.yml');
|
|
8
|
+
if (!fs.existsSync(configPath)) {
|
|
9
|
+
return { port: 2333, password: 'youshallnotpass' };
|
|
10
|
+
}
|
|
11
|
+
try {
|
|
12
|
+
const doc = yaml.load(fs.readFileSync(configPath, 'utf8'));
|
|
13
|
+
return doc?.gateway || { port: 2333, password: 'youshallnotpass' };
|
|
14
|
+
} catch {
|
|
15
|
+
return { port: 2333, password: 'youshallnotpass' };
|
|
16
|
+
}
|
|
17
|
+
}
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
import fetch from 'node-fetch';
|
|
2
|
+
import { events } from './events.js';
|
|
3
|
+
import { getAllNodes, updateNode } from './registry.js';
|
|
4
|
+
|
|
5
|
+
import fs from 'node:fs';
|
|
6
|
+
import path from 'node:path';
|
|
7
|
+
import os from 'node:os';
|
|
8
|
+
import yaml from 'js-yaml';
|
|
9
|
+
|
|
10
|
+
const failCounters = new Map();
|
|
11
|
+
const pingHistories = new Map(); // Add Ping history arrays
|
|
12
|
+
const NODE_TIMEOUT_MS = 3000;
|
|
13
|
+
|
|
14
|
+
function getConfig() {
|
|
15
|
+
const configPath = path.join(os.homedir(), '.lcluster', 'config.yml');
|
|
16
|
+
if (fs.existsSync(configPath)) {
|
|
17
|
+
try { return yaml.load(fs.readFileSync(configPath, 'utf8')) || {}; } catch (e) { }
|
|
18
|
+
}
|
|
19
|
+
return {};
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export function startHealthCheck() {
|
|
23
|
+
setInterval(async () => {
|
|
24
|
+
const nodes = getAllNodes();
|
|
25
|
+
for (const node of nodes) {
|
|
26
|
+
checkNode(node);
|
|
27
|
+
}
|
|
28
|
+
}, 5000);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
async function checkNode(node) {
|
|
32
|
+
const start = Date.now();
|
|
33
|
+
let failCount = failCounters.get(node.name) || 0;
|
|
34
|
+
|
|
35
|
+
try {
|
|
36
|
+
const res = await fetch(`http://localhost:${node.port}/v4/stats`, {
|
|
37
|
+
method: 'GET',
|
|
38
|
+
headers: {
|
|
39
|
+
'Authorization': node.password || 'youshallnotpass',
|
|
40
|
+
'Content-Type': 'application/json'
|
|
41
|
+
},
|
|
42
|
+
timeout: NODE_TIMEOUT_MS
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
if (res.ok) {
|
|
46
|
+
const ping = Date.now() - start;
|
|
47
|
+
const stats = await res.json();
|
|
48
|
+
|
|
49
|
+
const wasOffline = node.status !== 'online'; // anything not online should trigger recovery
|
|
50
|
+
|
|
51
|
+
failCounters.set(node.name, 0);
|
|
52
|
+
|
|
53
|
+
// Track ping history
|
|
54
|
+
if (!pingHistories.has(node.name)) pingHistories.set(node.name, []);
|
|
55
|
+
const hist = pingHistories.get(node.name);
|
|
56
|
+
hist.push(ping);
|
|
57
|
+
if (hist.length > 10) hist.shift();
|
|
58
|
+
|
|
59
|
+
const cpu = stats.cpu ? Math.round(stats.cpu.lavalinkLoad * 100) : 0;
|
|
60
|
+
const memory = stats.memory ? Math.round(stats.memory.used / 1024 / 1024) : 0;
|
|
61
|
+
const players = stats.players || 0;
|
|
62
|
+
|
|
63
|
+
updateNode(node.name, {
|
|
64
|
+
status: 'online',
|
|
65
|
+
ping,
|
|
66
|
+
cpu,
|
|
67
|
+
memory,
|
|
68
|
+
players,
|
|
69
|
+
pingHistory: [...pingHistories.get(node.name)]
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
const config = getConfig();
|
|
73
|
+
const cpuWarnThreshold = config.alerts?.thresholds?.cpu_warn || 90;
|
|
74
|
+
|
|
75
|
+
if (cpu >= cpuWarnThreshold) {
|
|
76
|
+
if (node.cpuWarnCount >= 2) {
|
|
77
|
+
events.emit('cpu:critical', node);
|
|
78
|
+
node.cpuWarnCount = 0;
|
|
79
|
+
} else {
|
|
80
|
+
node.cpuWarnCount = (node.cpuWarnCount || 0) + 1;
|
|
81
|
+
}
|
|
82
|
+
} else {
|
|
83
|
+
node.cpuWarnCount = 0;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if (wasOffline) {
|
|
87
|
+
events.emit('node:online', node.name);
|
|
88
|
+
}
|
|
89
|
+
} else {
|
|
90
|
+
throw new Error(`HTTP ${res.status}`);
|
|
91
|
+
}
|
|
92
|
+
} catch (error) {
|
|
93
|
+
failCount++;
|
|
94
|
+
failCounters.set(node.name, failCount);
|
|
95
|
+
|
|
96
|
+
let newStatus = node.status || 'offline';
|
|
97
|
+
let eventToEmit = null;
|
|
98
|
+
|
|
99
|
+
if (node.mode === 'external') {
|
|
100
|
+
if (failCount >= 12 && newStatus !== 'unreachable') {
|
|
101
|
+
newStatus = 'unreachable';
|
|
102
|
+
} else if (failCount > 0 && failCount < 12) {
|
|
103
|
+
if (newStatus === 'online' || newStatus === 'reconnecting') {
|
|
104
|
+
newStatus = 'reconnecting';
|
|
105
|
+
} else if (newStatus !== 'unreachable') {
|
|
106
|
+
newStatus = 'waiting';
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
} else {
|
|
110
|
+
if (failCount >= 6 && newStatus !== 'offline') {
|
|
111
|
+
newStatus = 'offline';
|
|
112
|
+
eventToEmit = 'node:offline';
|
|
113
|
+
} else if (failCount >= 3 && failCount < 6 && newStatus !== 'degraded') {
|
|
114
|
+
newStatus = 'degraded';
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
if (newStatus !== node.status) {
|
|
119
|
+
updateNode(node.name, { status: newStatus });
|
|
120
|
+
if (eventToEmit) events.emit(eventToEmit, node.name);
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// After each check emit updated
|
|
125
|
+
events.emit('node:updated', node.name);
|
|
126
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { getAllNodes } from './registry.js';
|
|
2
|
+
|
|
3
|
+
let rrIndex = 0;
|
|
4
|
+
|
|
5
|
+
export function pickNode(strategy = 'least-players') {
|
|
6
|
+
const onlineNodes = getAllNodes().filter(n => n.status === 'online');
|
|
7
|
+
|
|
8
|
+
if (onlineNodes.length === 0) {
|
|
9
|
+
throw new Error('No online nodes available');
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
if (strategy === 'least-players') {
|
|
13
|
+
return onlineNodes.reduce((min, node) => (node.players < min.players ? node : min), onlineNodes[0]);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
if (strategy === 'lowest-cpu') {
|
|
17
|
+
return onlineNodes.reduce((min, node) => (node.cpu < min.cpu ? node : min), onlineNodes[0]);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
if (strategy === 'round-robin') {
|
|
21
|
+
rrIndex = (rrIndex + 1) % onlineNodes.length;
|
|
22
|
+
return onlineNodes[rrIndex];
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
return onlineNodes[0];
|
|
26
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { events } from './events.js';
|
|
2
|
+
|
|
3
|
+
const buffers = new Map();
|
|
4
|
+
const MAX_LINES = 200;
|
|
5
|
+
|
|
6
|
+
export function initLogBuffer() {
|
|
7
|
+
events.on('node:online', (name) => addLog(name, `[lcluster] Node ${name} is online`));
|
|
8
|
+
events.on('node:offline', (name) => addLog(name, `[lcluster] Node ${name} is offline`));
|
|
9
|
+
|
|
10
|
+
// We need a catch-all for logs or we bind dynamically
|
|
11
|
+
// For simplicity, we can intercept all emit calls, but events.js is standard EventEmitter.
|
|
12
|
+
const originalEmit = events.emit;
|
|
13
|
+
events.emit = function (event, ...args) {
|
|
14
|
+
if (event.startsWith('log:')) {
|
|
15
|
+
const nodeName = event.slice(4);
|
|
16
|
+
addLog(nodeName, args[0]);
|
|
17
|
+
}
|
|
18
|
+
return originalEmit.apply(this, [event, ...args]);
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
function addLog(nodeName, line) {
|
|
23
|
+
if (!buffers.has(nodeName)) {
|
|
24
|
+
buffers.set(nodeName, []);
|
|
25
|
+
}
|
|
26
|
+
const b = buffers.get(nodeName);
|
|
27
|
+
b.push(line);
|
|
28
|
+
if (b.length > MAX_LINES) {
|
|
29
|
+
b.shift();
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export function getLogs(nodeName) {
|
|
34
|
+
return buffers.get(nodeName) || [];
|
|
35
|
+
}
|