kova-node-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +138 -0
- package/bin/cli.js +2 -0
- package/dist/__tests__/auto-bidder.test.js +267 -0
- package/dist/__tests__/container-manager.test.js +189 -0
- package/dist/__tests__/deployment-executor.test.js +332 -0
- package/dist/__tests__/heartbeat.test.js +191 -0
- package/dist/__tests__/lease-handler.test.js +268 -0
- package/dist/__tests__/resource-limits.test.js +164 -0
- package/dist/api/server.js +607 -0
- package/dist/cli.js +47 -0
- package/dist/commands/deploy.js +568 -0
- package/dist/commands/earnings.js +70 -0
- package/dist/commands/start.js +358 -0
- package/dist/commands/status.js +50 -0
- package/dist/commands/stop.js +101 -0
- package/dist/lib/client.js +87 -0
- package/dist/lib/config.js +107 -0
- package/dist/lib/docker.js +415 -0
- package/dist/lib/logger.js +12 -0
- package/dist/lib/message-signer.js +93 -0
- package/dist/lib/monitor.js +105 -0
- package/dist/lib/p2p.js +186 -0
- package/dist/lib/resource-limits.js +84 -0
- package/dist/lib/state.js +113 -0
- package/dist/lib/types.js +2 -0
- package/dist/lib/usage-meter.js +63 -0
- package/dist/services/auto-bidder.js +332 -0
- package/dist/services/container-manager.js +282 -0
- package/dist/services/deployment-executor.js +1562 -0
- package/dist/services/heartbeat.js +110 -0
- package/dist/services/job-handler.js +241 -0
- package/dist/services/lease-handler.js +382 -0
- package/package.json +51 -0
package/README.md
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
# Kova Node
|
|
2
|
+
|
|
3
|
+
Provider node software for the Kova decentralized compute network. Share your spare computing resources and earn rewards.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
Kova is a decentralized marketplace for compute resources. By running this node software, you contribute idle computing power to the network and receive compensation for workloads executed on your machine.
|
|
8
|
+
|
|
9
|
+
The platform enables anyone to become a compute provider, creating a distributed alternative to traditional cloud infrastructure.
|
|
10
|
+
|
|
11
|
+
## Requirements
|
|
12
|
+
|
|
13
|
+
- Docker (latest stable version)
|
|
14
|
+
- Linux or macOS
|
|
15
|
+
- Stable internet connection
|
|
16
|
+
- Ethereum wallet address
|
|
17
|
+
|
|
18
|
+
## Installation
|
|
19
|
+
|
|
20
|
+
Install the node software globally via npm:
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
npm install -g @kovanetwork/node-cli
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
Or using yarn:
|
|
27
|
+
|
|
28
|
+
```bash
|
|
29
|
+
yarn global add @kovanetwork/node-cli
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Getting Started
|
|
33
|
+
|
|
34
|
+
### 1. Obtain API Key
|
|
35
|
+
|
|
36
|
+
Visit https://app.kovanetwork.com and authenticate with your wallet. Navigate to the Provider section to retrieve your API key.
|
|
37
|
+
|
|
38
|
+
### 2. Start Your Node
|
|
39
|
+
|
|
40
|
+
Launch the node with your API key:
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
kova-node start --api-key sk_live_your_key_here
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
The node will register with the network and begin accepting workloads based on your available resources.
|
|
47
|
+
|
|
48
|
+
## Configuration
|
|
49
|
+
|
|
50
|
+
By default, the node allocates all available system resources. You can configure resource limits using command-line flags:
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
kova-node start --api-key sk_live_your_key_here \
|
|
54
|
+
--max-cpu 4 \
|
|
55
|
+
--max-memory 8 \
|
|
56
|
+
--max-disk 100
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### Available Options
|
|
60
|
+
|
|
61
|
+
- `--max-cpu` - Maximum CPU cores to allocate (default: all available)
|
|
62
|
+
- `--max-memory` - Maximum memory in GB (default: 80% of system total)
|
|
63
|
+
- `--max-disk` - Maximum disk space in GB (default: 100)
|
|
64
|
+
- `--port` - P2P network port (default: 4001)
|
|
65
|
+
|
|
66
|
+
## Commands
|
|
67
|
+
|
|
68
|
+
Check node status:
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
kova-node status
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
View earnings:
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
kova-node earnings
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
Stop the node:
|
|
81
|
+
|
|
82
|
+
```bash
|
|
83
|
+
kova-node stop
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## Earnings
|
|
87
|
+
|
|
88
|
+
Providers are compensated based on actual resource consumption. Payment rates are determined by:
|
|
89
|
+
|
|
90
|
+
- CPU and memory utilization
|
|
91
|
+
- Job execution duration
|
|
92
|
+
- Network demand and pricing
|
|
93
|
+
|
|
94
|
+
Earnings are credited to your connected wallet address. View accumulated earnings through the dashboard at https://app.kovanetwork.com or via the CLI.
|
|
95
|
+
|
|
96
|
+
## Security
|
|
97
|
+
|
|
98
|
+
Workloads execute within isolated Docker containers, providing process and filesystem separation from the host system. However, providers should understand the following:
|
|
99
|
+
|
|
100
|
+
- Containers provide isolation but are not a complete security boundary
|
|
101
|
+
- Providers have visibility into container contents and execution
|
|
102
|
+
- Only run nodes on dedicated hardware or machines you control
|
|
103
|
+
- Review the security documentation before accepting production workloads
|
|
104
|
+
|
|
105
|
+
## Troubleshooting
|
|
106
|
+
|
|
107
|
+
### Docker Not Found
|
|
108
|
+
|
|
109
|
+
Ensure Docker is installed and the daemon is running. Verify with:
|
|
110
|
+
|
|
111
|
+
```bash
|
|
112
|
+
docker ps
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
### Node Fails to Start
|
|
116
|
+
|
|
117
|
+
Check if the default port (4001) is available. Specify an alternative if needed:
|
|
118
|
+
|
|
119
|
+
```bash
|
|
120
|
+
kova-node start --api-key sk_live_your_key_here --port 4002
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
### No Jobs Assigned
|
|
124
|
+
|
|
125
|
+
Network discovery typically takes 10-15 minutes for new nodes. Ensure your node maintains consistent uptime and competitive resource pricing.
|
|
126
|
+
|
|
127
|
+
### Invalid API Key
|
|
128
|
+
|
|
129
|
+
If authentication fails, log in to the dashboard and regenerate your API key from the Provider section.
|
|
130
|
+
|
|
131
|
+
## Support
|
|
132
|
+
|
|
133
|
+
- Documentation: https://docs.kovanetwork.com
|
|
134
|
+
- GitHub Issues: https://github.com/Kovanetwork/node_cli/issues
|
|
135
|
+
|
|
136
|
+
## License
|
|
137
|
+
|
|
138
|
+
MIT
|
package/bin/cli.js
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
// tests for AutoBidder - order evaluation, bid pricing, deduplication
|
|
2
|
+
jest.mock('../lib/logger.js', () => ({
|
|
3
|
+
logger: { info: jest.fn(), error: jest.fn(), warn: jest.fn(), debug: jest.fn() }
|
|
4
|
+
}));
|
|
5
|
+
import { AutoBidder } from '../services/auto-bidder';
|
|
6
|
+
// mock fetch globally
|
|
7
|
+
const mockFetch = jest.fn();
|
|
8
|
+
global.fetch = mockFetch;
|
|
9
|
+
// mock resource monitor
|
|
10
|
+
const mockMonitor = {
|
|
11
|
+
getAvailableResources: jest.fn().mockResolvedValue({
|
|
12
|
+
cpu: { cores: 8, available: 6 },
|
|
13
|
+
memory: { total: 32, available: 24 },
|
|
14
|
+
disk: [{ path: '/', total: 500, available: 400 }],
|
|
15
|
+
network: { bandwidth: 100 },
|
|
16
|
+
gpu: []
|
|
17
|
+
}),
|
|
18
|
+
start: jest.fn(),
|
|
19
|
+
stop: jest.fn(),
|
|
20
|
+
getStats: jest.fn()
|
|
21
|
+
};
|
|
22
|
+
describe('AutoBidder', () => {
|
|
23
|
+
let bidder;
|
|
24
|
+
const config = {
|
|
25
|
+
nodeId: 'node-123',
|
|
26
|
+
providerId: 'provider-456',
|
|
27
|
+
orchestratorUrl: 'http://localhost:3000',
|
|
28
|
+
apiKey: 'sk_live_test123',
|
|
29
|
+
pricingStrategy: {
|
|
30
|
+
cpuPricePerCore: 0.01,
|
|
31
|
+
memoryPricePerGb: 0.005,
|
|
32
|
+
gpuPricePerUnit: 0.1,
|
|
33
|
+
margin: 1.2
|
|
34
|
+
}
|
|
35
|
+
};
|
|
36
|
+
// helper to make a fresh order id with recent timestamp
|
|
37
|
+
const makeOrderId = (suffix = '1') => {
|
|
38
|
+
return `user-abc-${Date.now()}-${suffix}`;
|
|
39
|
+
};
|
|
40
|
+
const makeOrder = (overrides = {}) => ({
|
|
41
|
+
id: makeOrderId(),
|
|
42
|
+
deploymentId: 'deploy-1',
|
|
43
|
+
resources: {
|
|
44
|
+
cpu: 2,
|
|
45
|
+
memory: '4Gi',
|
|
46
|
+
...overrides.resources
|
|
47
|
+
},
|
|
48
|
+
placement: { pricing: {} },
|
|
49
|
+
maxPricePerBlock: 1.0,
|
|
50
|
+
...overrides
|
|
51
|
+
});
|
|
52
|
+
// helper to wait for async operations to settle
|
|
53
|
+
const settle = (ms = 100) => new Promise(r => setTimeout(r, ms));
|
|
54
|
+
beforeEach(() => {
|
|
55
|
+
jest.clearAllMocks();
|
|
56
|
+
bidder = new AutoBidder(config, mockMonitor);
|
|
57
|
+
});
|
|
58
|
+
afterEach(() => {
|
|
59
|
+
bidder.stop();
|
|
60
|
+
});
|
|
61
|
+
// -- start/stop --
|
|
62
|
+
it('should start polling and run immediately', async () => {
|
|
63
|
+
mockFetch.mockResolvedValue({
|
|
64
|
+
ok: true,
|
|
65
|
+
json: async () => ({ orders: [] })
|
|
66
|
+
});
|
|
67
|
+
bidder.start(60000);
|
|
68
|
+
await settle();
|
|
69
|
+
expect(mockFetch).toHaveBeenCalledWith('http://localhost:3000/api/v1/provider/orders', expect.objectContaining({
|
|
70
|
+
headers: { 'Authorization': 'Bearer sk_live_test123' }
|
|
71
|
+
}));
|
|
72
|
+
});
|
|
73
|
+
it('should not start twice', async () => {
|
|
74
|
+
mockFetch.mockResolvedValue({ ok: true, json: async () => ({ orders: [] }) });
|
|
75
|
+
bidder.start(60000);
|
|
76
|
+
await settle();
|
|
77
|
+
const callCount = mockFetch.mock.calls.length;
|
|
78
|
+
bidder.start(60000);
|
|
79
|
+
await settle();
|
|
80
|
+
// shouldn't have made additional calls from second start
|
|
81
|
+
expect(mockFetch.mock.calls.length).toBe(callCount);
|
|
82
|
+
});
|
|
83
|
+
it('should stop cleanly', () => {
|
|
84
|
+
mockFetch.mockResolvedValue({ ok: true, json: async () => ({ orders: [] }) });
|
|
85
|
+
bidder.start(60000);
|
|
86
|
+
bidder.stop();
|
|
87
|
+
// no error thrown
|
|
88
|
+
});
|
|
89
|
+
// -- order evaluation --
|
|
90
|
+
it('should submit bid for affordable order with sufficient resources', async () => {
|
|
91
|
+
const order = makeOrder();
|
|
92
|
+
mockFetch
|
|
93
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ orders: [order] }) })
|
|
94
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ bid: { id: 'bid-1' } }) });
|
|
95
|
+
bidder.start(60000);
|
|
96
|
+
await settle(200);
|
|
97
|
+
const bidCall = mockFetch.mock.calls.find((c) => c[1]?.method === 'POST');
|
|
98
|
+
expect(bidCall).toBeDefined();
|
|
99
|
+
const body = JSON.parse(bidCall[1].body);
|
|
100
|
+
expect(body.orderId).toBe(order.id);
|
|
101
|
+
expect(body.nodeId).toBe('node-123');
|
|
102
|
+
expect(body.pricePerBlock).toBeGreaterThan(0);
|
|
103
|
+
});
|
|
104
|
+
it('should skip orders when resources are insufficient', async () => {
|
|
105
|
+
mockMonitor.getAvailableResources.mockResolvedValueOnce({
|
|
106
|
+
cpu: { cores: 8, available: 1 },
|
|
107
|
+
memory: { total: 32, available: 24 },
|
|
108
|
+
disk: [],
|
|
109
|
+
network: { bandwidth: 100 },
|
|
110
|
+
gpu: []
|
|
111
|
+
});
|
|
112
|
+
const order = makeOrder({ resources: { cpu: 4, memory: '8Gi' } });
|
|
113
|
+
mockFetch.mockResolvedValueOnce({
|
|
114
|
+
ok: true,
|
|
115
|
+
json: async () => ({ orders: [order] })
|
|
116
|
+
});
|
|
117
|
+
bidder.start(60000);
|
|
118
|
+
await settle(200);
|
|
119
|
+
const bidCalls = mockFetch.mock.calls.filter((c) => c[1]?.method === 'POST');
|
|
120
|
+
expect(bidCalls).toHaveLength(0);
|
|
121
|
+
});
|
|
122
|
+
it('should skip orders when our price exceeds maxPricePerBlock', async () => {
|
|
123
|
+
const order = makeOrder({ maxPricePerBlock: 0.0001 });
|
|
124
|
+
mockFetch.mockResolvedValueOnce({
|
|
125
|
+
ok: true,
|
|
126
|
+
json: async () => ({ orders: [order] })
|
|
127
|
+
});
|
|
128
|
+
bidder.start(60000);
|
|
129
|
+
await settle(200);
|
|
130
|
+
const bidCalls = mockFetch.mock.calls.filter((c) => c[1]?.method === 'POST');
|
|
131
|
+
expect(bidCalls).toHaveLength(0);
|
|
132
|
+
});
|
|
133
|
+
// -- bid price calculation --
|
|
134
|
+
it('should calculate price based on cpu and memory costs with margin', async () => {
|
|
135
|
+
// cpu: 2 cores * 0.01 = 0.02
|
|
136
|
+
// memory: 4gi * 0.005 = 0.02
|
|
137
|
+
// base: 0.04, with 1.2 margin = 0.048
|
|
138
|
+
const order = makeOrder({
|
|
139
|
+
resources: { cpu: 2, memory: '4Gi' },
|
|
140
|
+
maxPricePerBlock: 10
|
|
141
|
+
});
|
|
142
|
+
mockFetch
|
|
143
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ orders: [order] }) })
|
|
144
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ bid: { id: 'bid-1' } }) });
|
|
145
|
+
bidder.start(60000);
|
|
146
|
+
await settle(200);
|
|
147
|
+
const bidCall = mockFetch.mock.calls.find((c) => c[1]?.method === 'POST');
|
|
148
|
+
expect(bidCall).toBeDefined();
|
|
149
|
+
const body = JSON.parse(bidCall[1].body);
|
|
150
|
+
expect(body.pricePerBlock).toBe(0.048);
|
|
151
|
+
});
|
|
152
|
+
// -- deduplication --
|
|
153
|
+
it('should not bid on the same order twice', async () => {
|
|
154
|
+
const order = makeOrder();
|
|
155
|
+
mockFetch
|
|
156
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ orders: [order] }) })
|
|
157
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ bid: { id: 'bid-1' } }) });
|
|
158
|
+
bidder.start(500); // short interval for this test
|
|
159
|
+
await settle(200);
|
|
160
|
+
// second poll returns same order
|
|
161
|
+
mockFetch.mockResolvedValueOnce({
|
|
162
|
+
ok: true,
|
|
163
|
+
json: async () => ({ orders: [order] })
|
|
164
|
+
});
|
|
165
|
+
await settle(700);
|
|
166
|
+
// should only have 1 POST (bid submission)
|
|
167
|
+
const bidCalls = mockFetch.mock.calls.filter((c) => c[1]?.method === 'POST');
|
|
168
|
+
expect(bidCalls).toHaveLength(1);
|
|
169
|
+
});
|
|
170
|
+
it('should track "already bid" errors without retrying', async () => {
|
|
171
|
+
const order = makeOrder();
|
|
172
|
+
mockFetch
|
|
173
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ orders: [order] }) })
|
|
174
|
+
.mockResolvedValueOnce({
|
|
175
|
+
ok: false,
|
|
176
|
+
json: async () => ({ message: 'already bid on this order' })
|
|
177
|
+
});
|
|
178
|
+
bidder.start(500);
|
|
179
|
+
await settle(200);
|
|
180
|
+
// next poll, same order should be skipped
|
|
181
|
+
mockFetch.mockResolvedValueOnce({
|
|
182
|
+
ok: true,
|
|
183
|
+
json: async () => ({ orders: [order] })
|
|
184
|
+
});
|
|
185
|
+
await settle(700);
|
|
186
|
+
// only 1 POST attempt total
|
|
187
|
+
const bidCalls = mockFetch.mock.calls.filter((c) => c[1]?.method === 'POST');
|
|
188
|
+
expect(bidCalls).toHaveLength(1);
|
|
189
|
+
});
|
|
190
|
+
// -- error handling --
|
|
191
|
+
it('should handle fetch errors gracefully', async () => {
|
|
192
|
+
mockFetch.mockRejectedValueOnce(new Error('network down'));
|
|
193
|
+
bidder.start(60000);
|
|
194
|
+
await settle(200);
|
|
195
|
+
// no crash, bidder still alive
|
|
196
|
+
expect(bidder).toBeDefined();
|
|
197
|
+
});
|
|
198
|
+
it('should handle non-ok response from orders endpoint', async () => {
|
|
199
|
+
mockFetch.mockResolvedValueOnce({
|
|
200
|
+
ok: false,
|
|
201
|
+
status: 500,
|
|
202
|
+
text: async () => 'internal error'
|
|
203
|
+
});
|
|
204
|
+
bidder.start(60000);
|
|
205
|
+
await settle(200);
|
|
206
|
+
// no crash
|
|
207
|
+
expect(bidder).toBeDefined();
|
|
208
|
+
});
|
|
209
|
+
it('should skip orders older than 7 days', async () => {
|
|
210
|
+
const oldTimestamp = Date.now() - 8 * 24 * 60 * 60 * 1000;
|
|
211
|
+
const order = makeOrder({
|
|
212
|
+
id: `user-abc-${oldTimestamp}-1`
|
|
213
|
+
});
|
|
214
|
+
mockFetch.mockResolvedValueOnce({
|
|
215
|
+
ok: true,
|
|
216
|
+
json: async () => ({ orders: [order] })
|
|
217
|
+
});
|
|
218
|
+
bidder.start(60000);
|
|
219
|
+
await settle(200);
|
|
220
|
+
const bidCalls = mockFetch.mock.calls.filter((c) => c[1]?.method === 'POST');
|
|
221
|
+
expect(bidCalls).toHaveLength(0);
|
|
222
|
+
});
|
|
223
|
+
it('should handle empty orders list', async () => {
|
|
224
|
+
mockFetch.mockResolvedValueOnce({
|
|
225
|
+
ok: true,
|
|
226
|
+
json: async () => ({ orders: [] })
|
|
227
|
+
});
|
|
228
|
+
bidder.start(60000);
|
|
229
|
+
await settle(200);
|
|
230
|
+
// just the one GET, no POSTs
|
|
231
|
+
const getCalls = mockFetch.mock.calls.filter((c) => !c[1]?.method || c[1]?.method === 'GET');
|
|
232
|
+
expect(getCalls.length).toBeGreaterThanOrEqual(1);
|
|
233
|
+
const bidCalls = mockFetch.mock.calls.filter((c) => c[1]?.method === 'POST');
|
|
234
|
+
expect(bidCalls).toHaveLength(0);
|
|
235
|
+
});
|
|
236
|
+
it('should include gpu cost in price when order requires gpu', async () => {
|
|
237
|
+
const order = makeOrder({
|
|
238
|
+
resources: {
|
|
239
|
+
cpu: 2,
|
|
240
|
+
memory: '4Gi',
|
|
241
|
+
gpu: { units: 2, attributes: {} }
|
|
242
|
+
},
|
|
243
|
+
maxPricePerBlock: 10
|
|
244
|
+
});
|
|
245
|
+
mockFetch
|
|
246
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ orders: [order] }) })
|
|
247
|
+
.mockResolvedValueOnce({ ok: true, json: async () => ({ bid: { id: 'bid-2' } }) });
|
|
248
|
+
// provide gpu in available resources
|
|
249
|
+
mockMonitor.getAvailableResources.mockResolvedValueOnce({
|
|
250
|
+
cpu: { cores: 8, available: 6 },
|
|
251
|
+
memory: { total: 32, available: 24 },
|
|
252
|
+
disk: [],
|
|
253
|
+
network: { bandwidth: 100 },
|
|
254
|
+
gpu: [
|
|
255
|
+
{ vendor: 'nvidia', model: 'a100', vram: 40 },
|
|
256
|
+
{ vendor: 'nvidia', model: 'a100', vram: 40 }
|
|
257
|
+
]
|
|
258
|
+
});
|
|
259
|
+
bidder.start(60000);
|
|
260
|
+
await settle(200);
|
|
261
|
+
const bidCall = mockFetch.mock.calls.find((c) => c[1]?.method === 'POST');
|
|
262
|
+
expect(bidCall).toBeDefined();
|
|
263
|
+
const body = JSON.parse(bidCall[1].body);
|
|
264
|
+
// cpu: 2*0.01=0.02, mem: 4*0.005=0.02, gpu: 2*0.1=0.2 => base 0.24, *1.2 margin = 0.288
|
|
265
|
+
expect(body.pricePerBlock).toBe(0.288);
|
|
266
|
+
});
|
|
267
|
+
});
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
// tests for ContainerManager - container lifecycle, exec, file ops, health
|
|
2
|
+
jest.mock('../lib/logger.js', () => ({
|
|
3
|
+
logger: { info: jest.fn(), error: jest.fn(), warn: jest.fn(), debug: jest.fn() }
|
|
4
|
+
}));
|
|
5
|
+
const mockDocker = {
|
|
6
|
+
checkDocker: jest.fn().mockResolvedValue(true),
|
|
7
|
+
pullImage: jest.fn().mockResolvedValue(undefined),
|
|
8
|
+
createContainer: jest.fn().mockResolvedValue({
|
|
9
|
+
id: 'container-123',
|
|
10
|
+
// never resolve wait() so the container stays "running" in the map
|
|
11
|
+
wait: jest.fn().mockReturnValue(new Promise(() => { }))
|
|
12
|
+
}),
|
|
13
|
+
getContainerStats: jest.fn().mockResolvedValue({ memory: 100, cpu: 5, network: { rx: 0, tx: 0 } }),
|
|
14
|
+
cleanupContainer: jest.fn().mockResolvedValue(undefined),
|
|
15
|
+
execCommand: jest.fn().mockResolvedValue({ stdout: 'ok', stderr: '', exitCode: 0 }),
|
|
16
|
+
getContainerLogs: jest.fn().mockResolvedValue('log output'),
|
|
17
|
+
streamContainerLogs: jest.fn().mockResolvedValue(() => { })
|
|
18
|
+
};
|
|
19
|
+
jest.mock('../lib/docker.js', () => ({
|
|
20
|
+
DockerManager: jest.fn().mockImplementation(() => mockDocker)
|
|
21
|
+
}));
|
|
22
|
+
import { ContainerManager } from '../services/container-manager';
|
|
23
|
+
describe('ContainerManager', () => {
|
|
24
|
+
let cm;
|
|
25
|
+
const testJob = {
|
|
26
|
+
id: 'job-001',
|
|
27
|
+
userId: 'user-1',
|
|
28
|
+
image: 'alpine:latest',
|
|
29
|
+
resources: { cpu: 2, memory: 4, disk: 10 },
|
|
30
|
+
env: { FOO: 'bar' },
|
|
31
|
+
duration: 0
|
|
32
|
+
};
|
|
33
|
+
beforeEach(() => {
|
|
34
|
+
jest.clearAllMocks();
|
|
35
|
+
cm = new ContainerManager();
|
|
36
|
+
});
|
|
37
|
+
afterEach(async () => {
|
|
38
|
+
// cleanup intervals
|
|
39
|
+
await cm.stop();
|
|
40
|
+
});
|
|
41
|
+
// -- start --
|
|
42
|
+
it('should check docker availability on start', async () => {
|
|
43
|
+
await cm.start();
|
|
44
|
+
expect(mockDocker.checkDocker).toHaveBeenCalled();
|
|
45
|
+
});
|
|
46
|
+
it('should throw if docker is not available', async () => {
|
|
47
|
+
mockDocker.checkDocker.mockResolvedValueOnce(false);
|
|
48
|
+
await expect(cm.start()).rejects.toThrow('docker not available');
|
|
49
|
+
});
|
|
50
|
+
// -- runJob --
|
|
51
|
+
it('should pull image and create container', async () => {
|
|
52
|
+
const containerId = await cm.runJob(testJob);
|
|
53
|
+
expect(containerId).toBe('container-123');
|
|
54
|
+
expect(mockDocker.pullImage).toHaveBeenCalledWith('alpine:latest');
|
|
55
|
+
expect(mockDocker.createContainer).toHaveBeenCalledWith(expect.objectContaining({
|
|
56
|
+
jobId: 'job-001',
|
|
57
|
+
image: 'alpine:latest',
|
|
58
|
+
cpus: 2,
|
|
59
|
+
memory: 4096 // 4gb * 1024
|
|
60
|
+
}));
|
|
61
|
+
});
|
|
62
|
+
it('should emit container-started event on successful run', async () => {
|
|
63
|
+
const events = [];
|
|
64
|
+
cm.on('container-started', (data) => events.push(data));
|
|
65
|
+
await cm.runJob(testJob);
|
|
66
|
+
expect(events).toHaveLength(1);
|
|
67
|
+
expect(events[0].jobId).toBe('job-001');
|
|
68
|
+
expect(events[0].containerId).toBe('container-123');
|
|
69
|
+
});
|
|
70
|
+
it('should track running jobs after run', async () => {
|
|
71
|
+
await cm.runJob(testJob);
|
|
72
|
+
expect(cm.getRunningJobs()).toContain('job-001');
|
|
73
|
+
});
|
|
74
|
+
it('should emit container-failed on error', async () => {
|
|
75
|
+
mockDocker.pullImage.mockRejectedValueOnce(new Error('pull failed'));
|
|
76
|
+
const events = [];
|
|
77
|
+
cm.on('container-failed', (data) => events.push(data));
|
|
78
|
+
await expect(cm.runJob(testJob)).rejects.toThrow('pull failed');
|
|
79
|
+
expect(events).toHaveLength(1);
|
|
80
|
+
expect(events[0].jobId).toBe('job-001');
|
|
81
|
+
});
|
|
82
|
+
// -- stopContainer --
|
|
83
|
+
it('should stop and remove tracked container', async () => {
|
|
84
|
+
await cm.runJob(testJob);
|
|
85
|
+
const events = [];
|
|
86
|
+
cm.on('container-stopped', (data) => events.push(data));
|
|
87
|
+
await cm.stopContainer('job-001');
|
|
88
|
+
expect(mockDocker.cleanupContainer).toHaveBeenCalledWith('container-123');
|
|
89
|
+
expect(cm.getRunningJobs()).not.toContain('job-001');
|
|
90
|
+
expect(events).toHaveLength(1);
|
|
91
|
+
});
|
|
92
|
+
it('should silently no-op when stopping unknown job', async () => {
|
|
93
|
+
await cm.stopContainer('nonexistent');
|
|
94
|
+
// no throw
|
|
95
|
+
});
|
|
96
|
+
// -- execInContainer --
|
|
97
|
+
it('should exec command in running container', async () => {
|
|
98
|
+
await cm.runJob(testJob);
|
|
99
|
+
const result = await cm.execInContainer('job-001', 'whoami');
|
|
100
|
+
expect(mockDocker.execCommand).toHaveBeenCalledWith('container-123', 'whoami');
|
|
101
|
+
expect(result.stdout).toBe('ok');
|
|
102
|
+
expect(result.exitCode).toBe(0);
|
|
103
|
+
});
|
|
104
|
+
it('should throw when exec on unknown container', async () => {
|
|
105
|
+
await expect(cm.execInContainer('nope', 'ls')).rejects.toThrow('container not found');
|
|
106
|
+
});
|
|
107
|
+
it('should throw when exec on stopped container', async () => {
|
|
108
|
+
await cm.runJob(testJob);
|
|
109
|
+
await cm.stopContainer('job-001');
|
|
110
|
+
await expect(cm.execInContainer('job-001', 'ls')).rejects.toThrow('container not found');
|
|
111
|
+
});
|
|
112
|
+
// -- getContainerLogs --
|
|
113
|
+
it('should fetch logs from docker', async () => {
|
|
114
|
+
await cm.runJob(testJob);
|
|
115
|
+
const logs = await cm.getContainerLogs('job-001', 50);
|
|
116
|
+
expect(mockDocker.getContainerLogs).toHaveBeenCalledWith('container-123', 50);
|
|
117
|
+
expect(logs).toBe('log output');
|
|
118
|
+
});
|
|
119
|
+
it('should throw for logs on unknown container', async () => {
|
|
120
|
+
await expect(cm.getContainerLogs('nope')).rejects.toThrow('container not found');
|
|
121
|
+
});
|
|
122
|
+
// -- writeFile --
|
|
123
|
+
it('should base64 encode content and exec write command', async () => {
|
|
124
|
+
await cm.runJob(testJob);
|
|
125
|
+
await cm.writeFile('job-001', '/tmp/test.txt', 'hello world');
|
|
126
|
+
expect(mockDocker.execCommand).toHaveBeenCalled();
|
|
127
|
+
const call = mockDocker.execCommand.mock.calls[0];
|
|
128
|
+
expect(call[0]).toBe('container-123');
|
|
129
|
+
// command should contain base64 encoded content
|
|
130
|
+
const encoded = Buffer.from('hello world', 'utf8').toString('base64');
|
|
131
|
+
expect(call[1]).toContain(encoded);
|
|
132
|
+
expect(call[1]).toContain('/tmp/test.txt');
|
|
133
|
+
});
|
|
134
|
+
it('should reject path traversal in writeFile', async () => {
|
|
135
|
+
await cm.runJob(testJob);
|
|
136
|
+
await expect(cm.writeFile('job-001', '/tmp/../etc/passwd', 'bad'))
|
|
137
|
+
.rejects.toThrow('path traversal');
|
|
138
|
+
});
|
|
139
|
+
it('should reject relative paths in writeFile', async () => {
|
|
140
|
+
await cm.runJob(testJob);
|
|
141
|
+
await expect(cm.writeFile('job-001', 'relative/path.txt', 'bad'))
|
|
142
|
+
.rejects.toThrow('filepath must be absolute');
|
|
143
|
+
});
|
|
144
|
+
it('should reject shell metacharacters in filepath', async () => {
|
|
145
|
+
await cm.runJob(testJob);
|
|
146
|
+
await expect(cm.writeFile('job-001', '/tmp/test;rm -rf /', 'bad'))
|
|
147
|
+
.rejects.toThrow('invalid characters');
|
|
148
|
+
});
|
|
149
|
+
it('should reject system paths like /proc', async () => {
|
|
150
|
+
await cm.runJob(testJob);
|
|
151
|
+
await expect(cm.writeFile('job-001', '/proc/self/environ', 'bad'))
|
|
152
|
+
.rejects.toThrow('system paths not allowed');
|
|
153
|
+
});
|
|
154
|
+
// -- readFile --
|
|
155
|
+
it('should read file from container via exec', async () => {
|
|
156
|
+
await cm.runJob(testJob);
|
|
157
|
+
const content = await cm.readFile('job-001', '/tmp/data.txt');
|
|
158
|
+
expect(content).toBe('ok');
|
|
159
|
+
expect(mockDocker.execCommand).toHaveBeenCalledWith('container-123', expect.stringContaining('/tmp/data.txt'));
|
|
160
|
+
});
|
|
161
|
+
it('should reject path traversal in readFile', async () => {
|
|
162
|
+
await cm.runJob(testJob);
|
|
163
|
+
await expect(cm.readFile('job-001', '/dev/null/../etc/shadow'))
|
|
164
|
+
.rejects.toThrow('path traversal');
|
|
165
|
+
});
|
|
166
|
+
// -- getHealthStatus --
|
|
167
|
+
it('should return undefined for unknown job', () => {
|
|
168
|
+
expect(cm.getHealthStatus('nope')).toBeUndefined();
|
|
169
|
+
});
|
|
170
|
+
it('should return initial health status after run', async () => {
|
|
171
|
+
await cm.runJob(testJob);
|
|
172
|
+
const health = cm.getHealthStatus('job-001');
|
|
173
|
+
expect(health).toBeDefined();
|
|
174
|
+
expect(health.status).toBe('unknown');
|
|
175
|
+
expect(health.failures).toBe(0);
|
|
176
|
+
});
|
|
177
|
+
// -- getRunningJobs --
|
|
178
|
+
it('should return empty array with no jobs', () => {
|
|
179
|
+
expect(cm.getRunningJobs()).toEqual([]);
|
|
180
|
+
});
|
|
181
|
+
it('should list multiple running jobs', async () => {
|
|
182
|
+
await cm.runJob(testJob);
|
|
183
|
+
await cm.runJob({ ...testJob, id: 'job-002' });
|
|
184
|
+
const jobs = cm.getRunningJobs();
|
|
185
|
+
expect(jobs).toHaveLength(2);
|
|
186
|
+
expect(jobs).toContain('job-001');
|
|
187
|
+
expect(jobs).toContain('job-002');
|
|
188
|
+
});
|
|
189
|
+
});
|