@gns-foundation/hive-worker 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +87 -0
- package/dist/cli.d.ts +2 -0
- package/dist/cli.js +355 -0
- package/dist/cli.js.map +1 -0
- package/dist/dashboard.d.ts +25 -0
- package/dist/dashboard.js +97 -0
- package/dist/dashboard.js.map +1 -0
- package/dist/executor.d.ts +16 -0
- package/dist/executor.js +201 -0
- package/dist/executor.js.map +1 -0
- package/dist/hardware.d.ts +19 -0
- package/dist/hardware.js +129 -0
- package/dist/hardware.js.map +1 -0
- package/dist/identity.d.ts +15 -0
- package/dist/identity.js +64 -0
- package/dist/identity.js.map +1 -0
- package/dist/jobs.d.ts +53 -0
- package/dist/jobs.js +133 -0
- package/dist/jobs.js.map +1 -0
- package/dist/llama.d.ts +12 -0
- package/dist/llama.js +65 -0
- package/dist/llama.js.map +1 -0
- package/dist/registry.d.ts +25 -0
- package/dist/registry.js +97 -0
- package/dist/registry.js.map +1 -0
- package/dist/settlement.d.ts +15 -0
- package/dist/settlement.js +112 -0
- package/dist/settlement.js.map +1 -0
- package/package.json +34 -0
- package/src/cli.ts +418 -0
- package/src/dashboard.ts +129 -0
- package/src/executor.ts +270 -0
- package/src/hardware.ts +137 -0
- package/src/identity.ts +82 -0
- package/src/jobs.ts +228 -0
- package/src/llama.ts +79 -0
- package/src/registry.ts +160 -0
- package/src/settlement.ts +145 -0
- package/tsconfig.json +16 -0
package/src/jobs.ts
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// HIVE WORKER — JOB QUEUE
|
|
3
|
+
// Poll Supabase hive_jobs, claim atomically via FOR UPDATE
|
|
4
|
+
// SKIP LOCKED (Postgres handles races — no app-level locking needed)
|
|
5
|
+
// ============================================================
|
|
6
|
+
|
|
7
|
+
const SUPABASE_URL = 'https://kaqwkxfaclyqjlfhxrmt.supabase.co';
|
|
8
|
+
const SUPABASE_ANON = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImthcXdreGZhY2x5cWpsZmh4cm10Iiwicm9sZSI6ImFub24iLCJpYXQiOjE3NzI4MzU4NTAsImV4cCI6MjA4ODQxMTg1MH0.ClyWNGRxQjpKYzIROPZBqTXDsWvJioGe9pQymDOYBTc';
|
|
9
|
+
|
|
10
|
+
const HEADERS = {
|
|
11
|
+
'apikey': SUPABASE_ANON,
|
|
12
|
+
'Authorization': `Bearer ${SUPABASE_ANON}`,
|
|
13
|
+
'Content-Type': 'application/json',
|
|
14
|
+
};
|
|
15
|
+
|
|
16
|
+
// ─── Types ────────────────────────────────────────────────────
|
|
17
|
+
|
|
18
|
+
export interface HiveJob {
|
|
19
|
+
id: string;
|
|
20
|
+
h3_cell: string;
|
|
21
|
+
jurisdiction: string | null;
|
|
22
|
+
model_id: string;
|
|
23
|
+
model_url: string | null;
|
|
24
|
+
layer_start: number;
|
|
25
|
+
layer_end: number;
|
|
26
|
+
prompt: string;
|
|
27
|
+
max_tokens: number;
|
|
28
|
+
temperature: number;
|
|
29
|
+
status: 'pending' | 'assigned' | 'computing' | 'completed' | 'failed' | 'timed_out';
|
|
30
|
+
worker_pk: string | null;
|
|
31
|
+
assigned_at: string | null;
|
|
32
|
+
result_text: string | null;
|
|
33
|
+
tokens_generated: number | null;
|
|
34
|
+
tokens_per_second: number | null;
|
|
35
|
+
completed_at: string | null;
|
|
36
|
+
error_message: string | null;
|
|
37
|
+
gns_reward: number;
|
|
38
|
+
settled: boolean;
|
|
39
|
+
stellar_tx_hash: string | null;
|
|
40
|
+
submitter_pk: string;
|
|
41
|
+
created_at: string;
|
|
42
|
+
timeout_at: string;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export interface JobResult {
|
|
46
|
+
resultText: string;
|
|
47
|
+
tokensGenerated: number;
|
|
48
|
+
tokensPerSecond: number;
|
|
49
|
+
error?: string;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// ─── Claim (atomic via Postgres RPC) ─────────────────────────
|
|
53
|
+
|
|
54
|
+
export async function claimJob(
|
|
55
|
+
workerPk: string,
|
|
56
|
+
h3Cell: string,
|
|
57
|
+
modelId?: string,
|
|
58
|
+
): Promise<HiveJob | null> {
|
|
59
|
+
const resp = await fetch(
|
|
60
|
+
`${SUPABASE_URL}/rest/v1/rpc/claim_hive_job`,
|
|
61
|
+
{
|
|
62
|
+
method: 'POST',
|
|
63
|
+
headers: HEADERS,
|
|
64
|
+
body: JSON.stringify({
|
|
65
|
+
p_worker_pk: workerPk,
|
|
66
|
+
p_h3_cell: h3Cell,
|
|
67
|
+
p_model_id: modelId ?? null,
|
|
68
|
+
}),
|
|
69
|
+
},
|
|
70
|
+
);
|
|
71
|
+
|
|
72
|
+
if (!resp.ok) {
|
|
73
|
+
const text = await resp.text();
|
|
74
|
+
throw new Error(`claimJob RPC failed: ${resp.status} ${text}`);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
const rows = await resp.json() as HiveJob[];
|
|
78
|
+
return rows[0] ?? null;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// ─── Mark computing ───────────────────────────────────────────
|
|
82
|
+
|
|
83
|
+
export async function markComputing(jobId: string): Promise<void> {
|
|
84
|
+
await patchJob(jobId, { status: 'computing' });
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// ─── Post result ──────────────────────────────────────────────
|
|
88
|
+
|
|
89
|
+
export async function postResult(jobId: string, result: JobResult): Promise<void> {
|
|
90
|
+
await patchJob(jobId, {
|
|
91
|
+
status: 'completed',
|
|
92
|
+
result_text: result.resultText,
|
|
93
|
+
tokens_generated: result.tokensGenerated,
|
|
94
|
+
tokens_per_second: result.tokensPerSecond,
|
|
95
|
+
completed_at: new Date().toISOString(),
|
|
96
|
+
error_message: null,
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// ─── Post failure ─────────────────────────────────────────────
|
|
101
|
+
|
|
102
|
+
export async function postFailure(jobId: string, errorMsg: string): Promise<void> {
|
|
103
|
+
await patchJob(jobId, {
|
|
104
|
+
status: 'failed',
|
|
105
|
+
error_message: errorMsg,
|
|
106
|
+
completed_at: new Date().toISOString(),
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// ─── Trigger timeout cleanup (call once per poll cycle) ───────
|
|
111
|
+
|
|
112
|
+
export async function cleanupTimedOutJobs(): Promise<void> {
|
|
113
|
+
await fetch(`${SUPABASE_URL}/rest/v1/rpc/timeout_stale_jobs`, {
|
|
114
|
+
method: 'POST',
|
|
115
|
+
headers: HEADERS,
|
|
116
|
+
body: JSON.stringify({}),
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// ─── Fetch a specific job (for status watch) ──────────────────
|
|
121
|
+
|
|
122
|
+
export async function fetchJob(jobId: string): Promise<HiveJob | null> {
|
|
123
|
+
const resp = await fetch(
|
|
124
|
+
`${SUPABASE_URL}/rest/v1/hive_jobs?id=eq.${jobId}&select=*`,
|
|
125
|
+
{ headers: HEADERS },
|
|
126
|
+
);
|
|
127
|
+
if (!resp.ok) return null;
|
|
128
|
+
const rows = await resp.json() as HiveJob[];
|
|
129
|
+
return rows[0] ?? null;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// ─── Fetch recent completed jobs for this worker ──────────────
|
|
133
|
+
|
|
134
|
+
export async function fetchRecentCompletedJobs(
|
|
135
|
+
workerPk: string,
|
|
136
|
+
limit = 5,
|
|
137
|
+
): Promise<HiveJob[]> {
|
|
138
|
+
const resp = await fetch(
|
|
139
|
+
`${SUPABASE_URL}/rest/v1/hive_jobs?worker_pk=eq.${workerPk}&status=eq.completed&order=completed_at.desc&limit=${limit}&select=*`,
|
|
140
|
+
{ headers: HEADERS },
|
|
141
|
+
);
|
|
142
|
+
if (!resp.ok) return [];
|
|
143
|
+
return resp.json() as Promise<HiveJob[]>;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// ─── Patch helper ─────────────────────────────────────────────
|
|
147
|
+
|
|
148
|
+
async function patchJob(jobId: string, fields: Partial<HiveJob>): Promise<void> {
|
|
149
|
+
const resp = await fetch(
|
|
150
|
+
`${SUPABASE_URL}/rest/v1/hive_jobs?id=eq.${jobId}`,
|
|
151
|
+
{
|
|
152
|
+
method: 'PATCH',
|
|
153
|
+
headers: { ...HEADERS, 'Prefer': 'return=minimal' },
|
|
154
|
+
body: JSON.stringify(fields),
|
|
155
|
+
},
|
|
156
|
+
);
|
|
157
|
+
|
|
158
|
+
if (!resp.ok && resp.status !== 204) {
|
|
159
|
+
const text = await resp.text();
|
|
160
|
+
throw new Error(`patchJob failed: ${resp.status} ${text}`);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
// ─── Poll loop factory ────────────────────────────────────────
|
|
165
|
+
// Returns a controller object so cli.ts can stop it cleanly
|
|
166
|
+
|
|
167
|
+
export interface PollController {
|
|
168
|
+
stop: () => void;
|
|
169
|
+
isRunning: () => boolean;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
export function startJobPoller(opts: {
|
|
173
|
+
workerPk: string;
|
|
174
|
+
h3Cell: string;
|
|
175
|
+
intervalMs: number;
|
|
176
|
+
onJobClaimed: (job: HiveJob) => void;
|
|
177
|
+
onJobCompleted: (job: HiveJob, result: JobResult) => void;
|
|
178
|
+
onJobFailed: (job: HiveJob, error: string) => void;
|
|
179
|
+
onLog: (msg: string) => void;
|
|
180
|
+
executor: (job: HiveJob) => Promise<JobResult>;
|
|
181
|
+
}): PollController {
|
|
182
|
+
let running = true;
|
|
183
|
+
let busy = false;
|
|
184
|
+
|
|
185
|
+
const tick = async () => {
|
|
186
|
+
if (!running || busy) return;
|
|
187
|
+
|
|
188
|
+
try {
|
|
189
|
+
// Run cleanup once per cycle
|
|
190
|
+
await cleanupTimedOutJobs().catch(() => {});
|
|
191
|
+
|
|
192
|
+
const job = await claimJob(opts.workerPk, opts.h3Cell);
|
|
193
|
+
if (!job) return;
|
|
194
|
+
|
|
195
|
+
// Got one
|
|
196
|
+
busy = true;
|
|
197
|
+
opts.onJobClaimed(job);
|
|
198
|
+
|
|
199
|
+
try {
|
|
200
|
+
await markComputing(job.id);
|
|
201
|
+
|
|
202
|
+
const result = await opts.executor(job);
|
|
203
|
+
|
|
204
|
+
await postResult(job.id, result);
|
|
205
|
+
opts.onJobCompleted(job, result);
|
|
206
|
+
} catch (execErr) {
|
|
207
|
+
const errMsg = execErr instanceof Error ? execErr.message : String(execErr);
|
|
208
|
+
await postFailure(job.id, errMsg).catch(() => {});
|
|
209
|
+
opts.onJobFailed(job, errMsg);
|
|
210
|
+
opts.onLog(`Job ${job.id.slice(0, 8)} failed: ${errMsg}`);
|
|
211
|
+
} finally {
|
|
212
|
+
busy = false;
|
|
213
|
+
}
|
|
214
|
+
} catch (pollErr) {
|
|
215
|
+
opts.onLog(`Poll error: ${pollErr instanceof Error ? pollErr.message : String(pollErr)}`);
|
|
216
|
+
}
|
|
217
|
+
};
|
|
218
|
+
|
|
219
|
+
const timer = setInterval(tick, opts.intervalMs);
|
|
220
|
+
|
|
221
|
+
return {
|
|
222
|
+
stop: () => {
|
|
223
|
+
running = false;
|
|
224
|
+
clearInterval(timer);
|
|
225
|
+
},
|
|
226
|
+
isRunning: () => running,
|
|
227
|
+
};
|
|
228
|
+
}
|
package/src/llama.ts
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// HIVE WORKER — LLAMA.CPP RPC SERVER
|
|
3
|
+
// Start / stop the llama.cpp rpc-server process
|
|
4
|
+
// Detects binary location, manages lifecycle
|
|
5
|
+
// ============================================================
|
|
6
|
+
|
|
7
|
+
import { spawn, type ChildProcess } from 'child_process';
|
|
8
|
+
import { execSync } from 'child_process';
|
|
9
|
+
import os from 'os';
|
|
10
|
+
|
|
11
|
+
export const DEFAULT_RPC_PORT = 50052;
|
|
12
|
+
|
|
13
|
+
export interface LlamaRpcHandle {
|
|
14
|
+
process: ChildProcess;
|
|
15
|
+
port: number;
|
|
16
|
+
pid: number;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
const RPC_BINARY_CANDIDATES = [
|
|
20
|
+
'rpc-server',
|
|
21
|
+
'llama-rpc-server',
|
|
22
|
+
'/usr/local/bin/rpc-server',
|
|
23
|
+
`${os.homedir()}/llama.cpp/rpc-server`,
|
|
24
|
+
`${os.homedir()}/llama.cpp/build/bin/rpc-server`,
|
|
25
|
+
`${os.homedir()}/llama.cpp/build/rpc-server`,
|
|
26
|
+
];
|
|
27
|
+
|
|
28
|
+
export function findRpcBinary(): string | null {
|
|
29
|
+
for (const candidate of RPC_BINARY_CANDIDATES) {
|
|
30
|
+
try {
|
|
31
|
+
execSync(`which "${candidate}" 2>/dev/null || test -f "${candidate}"`, { timeout: 2000 });
|
|
32
|
+
return candidate;
|
|
33
|
+
} catch { }
|
|
34
|
+
}
|
|
35
|
+
try {
|
|
36
|
+
execSync('rpc-server --help 2>&1 | head -1', { timeout: 2000 });
|
|
37
|
+
return 'rpc-server';
|
|
38
|
+
} catch { }
|
|
39
|
+
return null;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export function isRpcServerAvailable(): boolean {
|
|
43
|
+
return findRpcBinary() !== null;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
export function startRpcServer(
|
|
47
|
+
port: number = DEFAULT_RPC_PORT,
|
|
48
|
+
onLog?: (line: string) => void,
|
|
49
|
+
): LlamaRpcHandle | null {
|
|
50
|
+
const binary = findRpcBinary();
|
|
51
|
+
if (!binary) return null;
|
|
52
|
+
|
|
53
|
+
const isAppleSilicon = os.platform() === 'darwin' && os.arch() === 'arm64';
|
|
54
|
+
const args = ['--host', '0.0.0.0', '--port', String(port)];
|
|
55
|
+
if (isAppleSilicon) args.push('-d', 'CPU');
|
|
56
|
+
|
|
57
|
+
const proc = spawn(binary, args, {
|
|
58
|
+
detached: false,
|
|
59
|
+
stdio: ['ignore', 'ignore', 'ignore'],
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
return { process: proc, port, pid: proc.pid ?? 0 };
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
export function stopRpcServer(handle: LlamaRpcHandle): void {
|
|
66
|
+
try {
|
|
67
|
+
handle.process.kill('SIGTERM');
|
|
68
|
+
} catch { }
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
export async function isPortFree(port: number): Promise<boolean> {
|
|
72
|
+
const net = await import('net');
|
|
73
|
+
return new Promise((resolve) => {
|
|
74
|
+
const server = net.createServer();
|
|
75
|
+
server.once('error', () => resolve(false));
|
|
76
|
+
server.once('listening', () => { server.close(); resolve(true); });
|
|
77
|
+
server.listen(port, '127.0.0.1');
|
|
78
|
+
});
|
|
79
|
+
}
|
package/src/registry.ts
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// HIVE WORKER — REGISTRY
|
|
3
|
+
// Supabase swarm_nodes: register, heartbeat, deregister
|
|
4
|
+
// Matches the schema built on March 25, 2026
|
|
5
|
+
// ============================================================
|
|
6
|
+
|
|
7
|
+
import type { HiveIdentity } from './identity.js';
|
|
8
|
+
import type { HardwareProfile, GeoProfile } from './hardware.js';
|
|
9
|
+
|
|
10
|
+
const SUPABASE_URL = 'https://kaqwkxfaclyqjlfhxrmt.supabase.co';
|
|
11
|
+
const SUPABASE_ANON = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImthcXdreGZhY2x5cWpsZmh4cm10Iiwicm9sZSI6ImFub24iLCJpYXQiOjE3NzI4MzU4NTAsImV4cCI6MjA4ODQxMTg1MH0.ClyWNGRxQjpKYzIROPZBqTXDsWvJioGe9pQymDOYBTc';
|
|
12
|
+
|
|
13
|
+
const HEADERS = {
|
|
14
|
+
'apikey': SUPABASE_ANON,
|
|
15
|
+
'Authorization': `Bearer ${SUPABASE_ANON}`,
|
|
16
|
+
'Content-Type': 'application/json',
|
|
17
|
+
'Prefer': 'resolution=merge-duplicates,return=minimal',
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
export type WorkerStatus = 'idle' | 'computing' | 'offline';
|
|
21
|
+
|
|
22
|
+
export interface SwarmNode {
|
|
23
|
+
pk: string;
|
|
24
|
+
h3_cell: string;
|
|
25
|
+
handle: string | null;
|
|
26
|
+
hardware: HardwareProfile;
|
|
27
|
+
geo: GeoProfile;
|
|
28
|
+
status: WorkerStatus;
|
|
29
|
+
last_heartbeat: string;
|
|
30
|
+
tokens_earned: number;
|
|
31
|
+
rpc_port: number | null;
|
|
32
|
+
worker_version: string;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export interface RegistryStats {
|
|
36
|
+
totalNodes: number;
|
|
37
|
+
activeNodes: number;
|
|
38
|
+
totalTflops: number;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async function supabase(
|
|
42
|
+
method: string,
|
|
43
|
+
table: string,
|
|
44
|
+
body?: unknown,
|
|
45
|
+
query?: string,
|
|
46
|
+
): Promise<Response> {
|
|
47
|
+
const url = `${SUPABASE_URL}/rest/v1/${table}${query ? '?' + query : ''}`;
|
|
48
|
+
return fetch(url, {
|
|
49
|
+
method,
|
|
50
|
+
headers: HEADERS,
|
|
51
|
+
body: body ? JSON.stringify(body) : undefined,
|
|
52
|
+
});
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
// ─── Registration ─────────────────────────────────────────────
|
|
56
|
+
|
|
57
|
+
export async function registerNode(
|
|
58
|
+
identity: HiveIdentity,
|
|
59
|
+
hw: HardwareProfile,
|
|
60
|
+
geo: GeoProfile,
|
|
61
|
+
handle: string | null,
|
|
62
|
+
rpcPort: number | null,
|
|
63
|
+
): Promise<void> {
|
|
64
|
+
const node: SwarmNode = {
|
|
65
|
+
pk: identity.pk,
|
|
66
|
+
h3_cell: geo.h3Cell,
|
|
67
|
+
handle,
|
|
68
|
+
hardware: hw,
|
|
69
|
+
geo,
|
|
70
|
+
status: 'idle',
|
|
71
|
+
last_heartbeat: new Date().toISOString(),
|
|
72
|
+
tokens_earned: await getExistingTokens(identity.pk),
|
|
73
|
+
rpc_port: rpcPort,
|
|
74
|
+
worker_version: '0.1.0',
|
|
75
|
+
};
|
|
76
|
+
|
|
77
|
+
// Upsert — on conflict update everything except tokens_earned
|
|
78
|
+
const resp = await supabase(
|
|
79
|
+
'POST',
|
|
80
|
+
'swarm_nodes',
|
|
81
|
+
node,
|
|
82
|
+
'on_conflict=pk',
|
|
83
|
+
);
|
|
84
|
+
|
|
85
|
+
if (!resp.ok) {
|
|
86
|
+
const err = await resp.text();
|
|
87
|
+
throw new Error(`Registry registration failed: ${resp.status} ${err}`);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
async function getExistingTokens(pk: string): Promise<number> {
|
|
92
|
+
try {
|
|
93
|
+
const resp = await supabase('GET', 'swarm_nodes', undefined, `pk=eq.${pk}&select=tokens_earned`);
|
|
94
|
+
if (!resp.ok) return 0;
|
|
95
|
+
const rows = await resp.json() as Array<{ tokens_earned: number }>;
|
|
96
|
+
return rows[0]?.tokens_earned ?? 0;
|
|
97
|
+
} catch {
|
|
98
|
+
return 0;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// ─── Heartbeat ─────────────────────────────────────────────
|
|
103
|
+
|
|
104
|
+
export async function heartbeat(
|
|
105
|
+
pk: string,
|
|
106
|
+
status: WorkerStatus,
|
|
107
|
+
): Promise<void> {
|
|
108
|
+
const resp = await supabase(
|
|
109
|
+
'PATCH',
|
|
110
|
+
'swarm_nodes',
|
|
111
|
+
{ status, last_heartbeat: new Date().toISOString() },
|
|
112
|
+
`pk=eq.${pk}`,
|
|
113
|
+
);
|
|
114
|
+
|
|
115
|
+
if (!resp.ok && resp.status !== 204) {
|
|
116
|
+
throw new Error(`Heartbeat failed: ${resp.status}`);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
// ─── Deregister (graceful shutdown) ─────────────────────────
|
|
121
|
+
|
|
122
|
+
export async function deregisterNode(pk: string): Promise<void> {
|
|
123
|
+
await supabase(
|
|
124
|
+
'PATCH',
|
|
125
|
+
'swarm_nodes',
|
|
126
|
+
{ status: 'offline', last_heartbeat: new Date().toISOString() },
|
|
127
|
+
`pk=eq.${pk}`,
|
|
128
|
+
);
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// ─── Stats (for dashboard) ──────────────────────────────────
|
|
132
|
+
|
|
133
|
+
export async function fetchSwarmStats(): Promise<RegistryStats> {
|
|
134
|
+
try {
|
|
135
|
+
const resp = await supabase('GET', 'swarm_nodes', undefined, 'select=pk,status,hardware');
|
|
136
|
+
if (!resp.ok) return { totalNodes: 0, activeNodes: 0, totalTflops: 0 };
|
|
137
|
+
const rows = await resp.json() as Array<{ status: string; hardware: HardwareProfile }>;
|
|
138
|
+
const totalNodes = rows.length;
|
|
139
|
+
const activeNodes = rows.filter(r => r.status !== 'offline').length;
|
|
140
|
+
const totalTflops = rows
|
|
141
|
+
.filter(r => r.status !== 'offline')
|
|
142
|
+
.reduce((sum, r) => sum + (r.hardware?.estimatedTflops ?? 0), 0);
|
|
143
|
+
return { totalNodes, activeNodes, totalTflops: Math.round(totalTflops * 10) / 10 };
|
|
144
|
+
} catch {
|
|
145
|
+
return { totalNodes: 0, activeNodes: 0, totalTflops: 0 };
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// ─── Token balance ──────────────────────────────────────────
|
|
150
|
+
|
|
151
|
+
export async function fetchTokenBalance(pk: string): Promise<number> {
|
|
152
|
+
try {
|
|
153
|
+
const resp = await supabase('GET', 'swarm_nodes', undefined, `pk=eq.${pk}&select=tokens_earned`);
|
|
154
|
+
if (!resp.ok) return 0;
|
|
155
|
+
const rows = await resp.json() as Array<{ tokens_earned: number }>;
|
|
156
|
+
return rows[0]?.tokens_earned ?? 0;
|
|
157
|
+
} catch {
|
|
158
|
+
return 0;
|
|
159
|
+
}
|
|
160
|
+
}
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// HIVE WORKER — SETTLEMENT
|
|
3
|
+
// After a job completes, credit GNS to the worker's
|
|
4
|
+
// tokens_earned balance in Supabase.
|
|
5
|
+
//
|
|
6
|
+
// Real Stellar settlement (the 60/25/10/5 split) runs
|
|
7
|
+
// server-side in the orchestrator. The worker just reads
|
|
8
|
+
// its credited balance here.
|
|
9
|
+
//
|
|
10
|
+
// This module:
|
|
11
|
+
// 1. Increments tokens_earned in swarm_nodes (optimistic)
|
|
12
|
+
// 2. Marks job.settled = true
|
|
13
|
+
// 3. Polls for the actual Stellar TX hash from the orchestrator
|
|
14
|
+
// ============================================================
|
|
15
|
+
|
|
16
|
+
const SUPABASE_URL = 'https://kaqwkxfaclyqjlfhxrmt.supabase.co';
|
|
17
|
+
const SUPABASE_ANON = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6ImthcXdreGZhY2x5cWpsZmh4cm10Iiwicm9sZSI6ImFub24iLCJpYXQiOjE3NzI4MzU4NTAsImV4cCI6MjA4ODQxMTg1MH0.ClyWNGRxQjpKYzIROPZBqTXDsWvJioGe9pQymDOYBTc';
|
|
18
|
+
|
|
19
|
+
const HEADERS = {
|
|
20
|
+
'apikey': SUPABASE_ANON,
|
|
21
|
+
'Authorization': `Bearer ${SUPABASE_ANON}`,
|
|
22
|
+
'Content-Type': 'application/json',
|
|
23
|
+
'Prefer': 'return=minimal',
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
// GNS split ratios (matches whitepaper §8.1 and March 25 settlement)
|
|
27
|
+
export const SPLIT = {
|
|
28
|
+
WORKER: 0.60, // community node operators
|
|
29
|
+
PLATFORM: 0.25, // GEIANT / ULISSY orchestration
|
|
30
|
+
HYDRATION: 0.10, // hydration & resilience fund
|
|
31
|
+
SOVEREIGN: 0.05, // BFT quorum coordinators
|
|
32
|
+
} as const;
|
|
33
|
+
|
|
34
|
+
export interface SettlementRecord {
|
|
35
|
+
jobId: string;
|
|
36
|
+
workerPk: string;
|
|
37
|
+
grossReward: number; // full gns_reward from hive_jobs
|
|
38
|
+
workerSlice: number; // 60% of gross
|
|
39
|
+
settledAt: string;
|
|
40
|
+
stellarTxHash: string | null;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// ─── Credit worker (local optimistic update) ──────────────────
|
|
44
|
+
// Increments tokens_earned in swarm_nodes by the 60% worker slice.
|
|
45
|
+
// The orchestrator does the actual Stellar TX — we just reflect it here.
|
|
46
|
+
|
|
47
|
+
export async function creditWorker(
|
|
48
|
+
workerPk: string,
|
|
49
|
+
jobId: string,
|
|
50
|
+
grossReward: number,
|
|
51
|
+
): Promise<SettlementRecord> {
|
|
52
|
+
const workerSlice = Math.round(grossReward * SPLIT.WORKER * 1_000_000) / 1_000_000;
|
|
53
|
+
|
|
54
|
+
// Step 1: increment tokens_earned via Postgres RPC
|
|
55
|
+
await incrementTokens(workerPk, workerSlice);
|
|
56
|
+
|
|
57
|
+
// Step 2: mark job settled (tx hash comes from orchestrator async)
|
|
58
|
+
await markJobSettled(jobId, workerSlice);
|
|
59
|
+
|
|
60
|
+
// Step 3: poll for Stellar TX hash (up to 30s, non-blocking)
|
|
61
|
+
const stellarTxHash = await pollForTxHash(jobId);
|
|
62
|
+
|
|
63
|
+
return {
|
|
64
|
+
jobId,
|
|
65
|
+
workerPk,
|
|
66
|
+
grossReward,
|
|
67
|
+
workerSlice,
|
|
68
|
+
settledAt: new Date().toISOString(),
|
|
69
|
+
stellarTxHash,
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// ─── Supabase RPC: atomic increment ──────────────────────────
|
|
74
|
+
|
|
75
|
+
async function incrementTokens(workerPk: string, amount: number): Promise<void> {
|
|
76
|
+
// Use a raw SQL RPC to do atomic increment (no race condition)
|
|
77
|
+
const resp = await fetch(`${SUPABASE_URL}/rest/v1/rpc/increment_worker_tokens`, {
|
|
78
|
+
method: 'POST',
|
|
79
|
+
headers: HEADERS,
|
|
80
|
+
body: JSON.stringify({ p_worker_pk: workerPk, p_amount: amount }),
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
if (!resp.ok) {
|
|
84
|
+
// Fallback: PATCH with current + amount (non-atomic, acceptable for v0.1)
|
|
85
|
+
await fallbackIncrementTokens(workerPk, amount);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
async function fallbackIncrementTokens(workerPk: string, amount: number): Promise<void> {
|
|
90
|
+
// Read current balance
|
|
91
|
+
const getResp = await fetch(
|
|
92
|
+
`${SUPABASE_URL}/rest/v1/swarm_nodes?pk=eq.${workerPk}&select=tokens_earned`,
|
|
93
|
+
{ headers: HEADERS },
|
|
94
|
+
);
|
|
95
|
+
if (!getResp.ok) return;
|
|
96
|
+
const rows = await getResp.json() as Array<{ tokens_earned: number }>;
|
|
97
|
+
const current = rows[0]?.tokens_earned ?? 0;
|
|
98
|
+
|
|
99
|
+
// Write new total
|
|
100
|
+
await fetch(`${SUPABASE_URL}/rest/v1/swarm_nodes?pk=eq.${workerPk}`, {
|
|
101
|
+
method: 'PATCH',
|
|
102
|
+
headers: HEADERS,
|
|
103
|
+
body: JSON.stringify({ tokens_earned: current + amount }),
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
async function markJobSettled(jobId: string, workerSlice: number): Promise<void> {
|
|
108
|
+
await fetch(`${SUPABASE_URL}/rest/v1/hive_jobs?id=eq.${jobId}`, {
|
|
109
|
+
method: 'PATCH',
|
|
110
|
+
headers: HEADERS,
|
|
111
|
+
body: JSON.stringify({ settled: true }),
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
async function pollForTxHash(jobId: string, maxWaitMs = 30_000): Promise<string | null> {
|
|
116
|
+
const deadline = Date.now() + maxWaitMs;
|
|
117
|
+
while (Date.now() < deadline) {
|
|
118
|
+
await sleep(3000);
|
|
119
|
+
const resp = await fetch(
|
|
120
|
+
`${SUPABASE_URL}/rest/v1/hive_jobs?id=eq.${jobId}&select=stellar_tx_hash`,
|
|
121
|
+
{ headers: HEADERS },
|
|
122
|
+
);
|
|
123
|
+
if (!resp.ok) break;
|
|
124
|
+
const rows = await resp.json() as Array<{ stellar_tx_hash: string | null }>;
|
|
125
|
+
const hash = rows[0]?.stellar_tx_hash;
|
|
126
|
+
if (hash) return hash;
|
|
127
|
+
}
|
|
128
|
+
return null;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
function sleep(ms: number): Promise<void> {
|
|
132
|
+
return new Promise(res => setTimeout(res, ms));
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// ─── SQL to add to supabase_migration.sql ────────────────────
|
|
136
|
+
// (included here as a comment so it's easy to find)
|
|
137
|
+
//
|
|
138
|
+
// CREATE OR REPLACE FUNCTION increment_worker_tokens(
|
|
139
|
+
// p_worker_pk TEXT,
|
|
140
|
+
// p_amount NUMERIC
|
|
141
|
+
// ) RETURNS VOID AS $$
|
|
142
|
+
// UPDATE swarm_nodes
|
|
143
|
+
// SET tokens_earned = tokens_earned + p_amount
|
|
144
|
+
// WHERE pk = p_worker_pk;
|
|
145
|
+
// $$ LANGUAGE SQL;
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2022",
|
|
4
|
+
"module": "ESNext",
|
|
5
|
+
"moduleResolution": "bundler",
|
|
6
|
+
"outDir": "./dist",
|
|
7
|
+
"rootDir": "./src",
|
|
8
|
+
"strict": true,
|
|
9
|
+
"esModuleInterop": true,
|
|
10
|
+
"skipLibCheck": true,
|
|
11
|
+
"declaration": true,
|
|
12
|
+
"sourceMap": true
|
|
13
|
+
},
|
|
14
|
+
"include": ["src/**/*"],
|
|
15
|
+
"exclude": ["node_modules", "dist"]
|
|
16
|
+
}
|