@gns-foundation/hive-worker 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +87 -0
- package/dist/cli.d.ts +2 -0
- package/dist/cli.js +355 -0
- package/dist/cli.js.map +1 -0
- package/dist/dashboard.d.ts +25 -0
- package/dist/dashboard.js +97 -0
- package/dist/dashboard.js.map +1 -0
- package/dist/executor.d.ts +16 -0
- package/dist/executor.js +201 -0
- package/dist/executor.js.map +1 -0
- package/dist/hardware.d.ts +19 -0
- package/dist/hardware.js +129 -0
- package/dist/hardware.js.map +1 -0
- package/dist/identity.d.ts +15 -0
- package/dist/identity.js +64 -0
- package/dist/identity.js.map +1 -0
- package/dist/jobs.d.ts +53 -0
- package/dist/jobs.js +133 -0
- package/dist/jobs.js.map +1 -0
- package/dist/llama.d.ts +12 -0
- package/dist/llama.js +65 -0
- package/dist/llama.js.map +1 -0
- package/dist/registry.d.ts +25 -0
- package/dist/registry.js +97 -0
- package/dist/registry.js.map +1 -0
- package/dist/settlement.d.ts +15 -0
- package/dist/settlement.js +112 -0
- package/dist/settlement.js.map +1 -0
- package/package.json +34 -0
- package/src/cli.ts +418 -0
- package/src/dashboard.ts +129 -0
- package/src/executor.ts +270 -0
- package/src/hardware.ts +137 -0
- package/src/identity.ts +82 -0
- package/src/jobs.ts +228 -0
- package/src/llama.ts +79 -0
- package/src/registry.ts +160 -0
- package/src/settlement.ts +145 -0
- package/tsconfig.json +16 -0
package/src/executor.ts
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// HIVE WORKER — EXECUTOR
|
|
3
|
+
// Runs llama-cli for an assigned job.
|
|
4
|
+
//
|
|
5
|
+
// Two modes:
|
|
6
|
+
// pipeline — rpc-server already running, orchestrator calls us
|
|
7
|
+
// (worker just tracks status, no local llama-cli)
|
|
8
|
+
// solo — worker claims and runs the whole model locally
|
|
9
|
+
// (for small models: phi-3-mini, gemma-2-2b, etc.)
|
|
10
|
+
//
|
|
11
|
+
// Solo mode is the active path for hive-worker v0.1 / v0.2.
|
|
12
|
+
// Pipeline mode is the full swarm path (Phase 3 whitepaper).
|
|
13
|
+
// ============================================================
|
|
14
|
+
|
|
15
|
+
import { spawn } from 'child_process';
|
|
16
|
+
import { execSync } from 'child_process';
|
|
17
|
+
import os from 'os';
|
|
18
|
+
import path from 'path';
|
|
19
|
+
import type { HiveJob, JobResult } from './jobs.js';
|
|
20
|
+
|
|
21
|
+
// ─── Binary detection ─────────────────────────────────────────
|
|
22
|
+
|
|
23
|
+
const LLAMA_CLI_CANDIDATES = [
|
|
24
|
+
'llama-cli',
|
|
25
|
+
'llama.cpp/llama-cli',
|
|
26
|
+
`${os.homedir()}/llama.cpp/llama-cli`,
|
|
27
|
+
`${os.homedir()}/llama.cpp/build/bin/llama-cli`,
|
|
28
|
+
`${os.homedir()}/llama.cpp/build/llama-cli`,
|
|
29
|
+
// Older name
|
|
30
|
+
'main',
|
|
31
|
+
`${os.homedir()}/llama.cpp/main`,
|
|
32
|
+
];
|
|
33
|
+
|
|
34
|
+
export function findLlamaCli(): string | null {
|
|
35
|
+
for (const candidate of LLAMA_CLI_CANDIDATES) {
|
|
36
|
+
try {
|
|
37
|
+
execSync(`test -f "${candidate}" || which "${candidate}" 2>/dev/null`, { timeout: 2000 });
|
|
38
|
+
return candidate;
|
|
39
|
+
} catch { /* not found */ }
|
|
40
|
+
}
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// ─── Model cache ──────────────────────────────────────────────
|
|
45
|
+
|
|
46
|
+
const MODEL_CACHE_DIR = path.join(os.homedir(), '.hive', 'models');
|
|
47
|
+
|
|
48
|
+
import fs from 'fs';
|
|
49
|
+
|
|
50
|
+
export function ensureModelCacheDir(): void {
|
|
51
|
+
fs.mkdirSync(MODEL_CACHE_DIR, { recursive: true });
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export function modelCachePath(modelId: string): string {
|
|
55
|
+
// Normalize: "phi-3-mini" → "phi-3-mini.gguf"
|
|
56
|
+
const filename = modelId.endsWith('.gguf') ? modelId : `${modelId}.gguf`;
|
|
57
|
+
return path.join(MODEL_CACHE_DIR, filename);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export function isModelCached(modelId: string): boolean {
|
|
61
|
+
return fs.existsSync(modelCachePath(modelId));
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// Known small models with public GGUF URLs (Q4_K_M quantizations)
|
|
65
|
+
const KNOWN_MODELS: Record<string, string> = {
|
|
66
|
+
'phi-3-mini':
|
|
67
|
+
'https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf/resolve/main/Phi-3-mini-4k-instruct-q4.gguf',
|
|
68
|
+
'gemma-2-2b':
|
|
69
|
+
'https://huggingface.co/bartowski/gemma-2-2b-it-GGUF/resolve/main/gemma-2-2b-it-Q4_K_M.gguf',
|
|
70
|
+
'tinyllama':
|
|
71
|
+
'https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf',
|
|
72
|
+
};
|
|
73
|
+
|
|
74
|
+
export function resolveModelUrl(job: HiveJob): string | null {
|
|
75
|
+
if (job.model_url) return job.model_url;
|
|
76
|
+
return KNOWN_MODELS[job.model_id] ?? null;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// ─── Download model (streaming, with progress) ───────────────
|
|
80
|
+
|
|
81
|
+
export async function downloadModel(
|
|
82
|
+
modelId: string,
|
|
83
|
+
url: string,
|
|
84
|
+
onProgress: (pct: number, mbDone: number, mbTotal: number) => void,
|
|
85
|
+
): Promise<string> {
|
|
86
|
+
ensureModelCacheDir();
|
|
87
|
+
const dest = modelCachePath(modelId);
|
|
88
|
+
const tmpDest = dest + '.download';
|
|
89
|
+
|
|
90
|
+
const resp = await fetch(url, { signal: AbortSignal.timeout(300_000) });
|
|
91
|
+
if (!resp.ok) throw new Error(`Model download failed: ${resp.status}`);
|
|
92
|
+
|
|
93
|
+
const totalBytes = parseInt(resp.headers.get('content-length') ?? '0', 10);
|
|
94
|
+
const totalMb = totalBytes / 1024 / 1024;
|
|
95
|
+
|
|
96
|
+
const writer = fs.createWriteStream(tmpDest);
|
|
97
|
+
let downloaded = 0;
|
|
98
|
+
|
|
99
|
+
if (!resp.body) throw new Error('No response body');
|
|
100
|
+
|
|
101
|
+
const reader = resp.body.getReader();
|
|
102
|
+
while (true) {
|
|
103
|
+
const { done, value } = await reader.read();
|
|
104
|
+
if (done) break;
|
|
105
|
+
writer.write(value);
|
|
106
|
+
downloaded += value.length;
|
|
107
|
+
if (totalBytes > 0) {
|
|
108
|
+
onProgress(
|
|
109
|
+
Math.round((downloaded / totalBytes) * 100),
|
|
110
|
+
Math.round(downloaded / 1024 / 1024),
|
|
111
|
+
Math.round(totalMb),
|
|
112
|
+
);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
await new Promise<void>((res, rej) => {
|
|
117
|
+
writer.end(() => res());
|
|
118
|
+
writer.on('error', rej);
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
fs.renameSync(tmpDest, dest);
|
|
122
|
+
return dest;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
// ─── Execute inference ────────────────────────────────────────
|
|
126
|
+
|
|
127
|
+
export interface ExecutorOptions {
|
|
128
|
+
onToken?: (token: string) => void;
|
|
129
|
+
onLog?: (line: string) => void;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
export async function executeJob(
|
|
133
|
+
job: HiveJob,
|
|
134
|
+
opts: ExecutorOptions = {},
|
|
135
|
+
): Promise<JobResult> {
|
|
136
|
+
const llamaCli = findLlamaCli();
|
|
137
|
+
if (!llamaCli) {
|
|
138
|
+
throw new Error(
|
|
139
|
+
'llama-cli not found. Install llama.cpp: https://github.com/ggerganov/llama.cpp',
|
|
140
|
+
);
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
const modelPath = modelCachePath(job.model_id);
|
|
144
|
+
if (!fs.existsSync(modelPath)) {
|
|
145
|
+
throw new Error(
|
|
146
|
+
`Model "${job.model_id}" not cached at ${modelPath}. ` +
|
|
147
|
+
`Run: hive-worker models fetch ${job.model_id}`,
|
|
148
|
+
);
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
return runLlamaCli(llamaCli, modelPath, job, opts);
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
function runLlamaCli(
|
|
155
|
+
binary: string,
|
|
156
|
+
modelPath: string,
|
|
157
|
+
job: HiveJob,
|
|
158
|
+
opts: ExecutorOptions,
|
|
159
|
+
): Promise<JobResult> {
|
|
160
|
+
return new Promise((resolve, reject) => {
|
|
161
|
+
const startMs = Date.now();
|
|
162
|
+
|
|
163
|
+
// Build args
|
|
164
|
+
const args = [
|
|
165
|
+
'--model', modelPath,
|
|
166
|
+
'--prompt', job.prompt,
|
|
167
|
+
'--n-predict', String(job.max_tokens),
|
|
168
|
+
'--temp', String(job.temperature),
|
|
169
|
+
'--ctx-size', '4096',
|
|
170
|
+
'--threads', String(Math.max(1, Math.floor(os.cpus().length / 2))),
|
|
171
|
+
'--no-display-prompt', // suppress prompt echo
|
|
172
|
+
'--log-disable',
|
|
173
|
+
'--single-turn',
|
|
174
|
+
'-ngl', '0',
|
|
175
|
+
|
|
176
|
+
];
|
|
177
|
+
|
|
178
|
+
// Layer range hint (pipeline mode — rpc-server handles routing,
|
|
179
|
+
// but we pass the range so the model knows which layers to host)
|
|
180
|
+
if (job.layer_start !== 0 || job.layer_end !== 31) {
|
|
181
|
+
args.push('--n-gpu-layers', '0'); // CPU-only for partial layer ranges
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// Apple Silicon: use Metal when running full model
|
|
185
|
+
const isApple = os.platform() === 'darwin' && os.arch() === 'arm64';
|
|
186
|
+
if (isApple && job.layer_start === 0) {
|
|
187
|
+
args.push('--n-gpu-layers', '99'); // offload all layers to Metal
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
opts.onLog?.(`Starting llama-cli for "${job.model_id}" [layers ${job.layer_start}–${job.layer_end}]`);
|
|
191
|
+
opts.onLog?.(`Command: ${binary} ${args.slice(0, 6).join(' ')} ...`);
|
|
192
|
+
|
|
193
|
+
const proc = spawn(binary, args, {
|
|
194
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
195
|
+
});
|
|
196
|
+
|
|
197
|
+
let output = '';
|
|
198
|
+
let stderrBuf = '';
|
|
199
|
+
|
|
200
|
+
// llama-cli writes generated tokens to stdout
|
|
201
|
+
proc.stdout.on('data', (chunk: Buffer) => {
|
|
202
|
+
const text = chunk.toString();
|
|
203
|
+
output += text;
|
|
204
|
+
opts.onToken?.(text);
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
// Stats appear on stderr: "llama_print_timings: eval time = 12345 ms / 150 runs"
|
|
208
|
+
proc.stderr.on('data', (chunk: Buffer) => {
|
|
209
|
+
stderrBuf += chunk.toString();
|
|
210
|
+
opts.onLog?.(chunk.toString().trim());
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
proc.on('error', (err) => {
|
|
214
|
+
reject(new Error(`llama-cli spawn error: ${err.message}`));
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
proc.on('close', (code) => {
|
|
218
|
+
const wallMs = Date.now() - startMs;
|
|
219
|
+
|
|
220
|
+
if (code !== 0) {
|
|
221
|
+
reject(new Error(
|
|
222
|
+
`llama-cli exited with code ${code}. stderr: ${stderrBuf.slice(-500)}`,
|
|
223
|
+
));
|
|
224
|
+
return;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
// Parse tokens/sec from llama.cpp timing output
|
|
228
|
+
// Example: "llama_print_timings: eval time = 4523.11 ms / 150 runs"
|
|
229
|
+
const tpsMatch = stderrBuf.match(/eval time\s*=\s*([\d.]+)\s*ms\s*\/\s*(\d+)\s*runs/);
|
|
230
|
+
let tokensPerSecond = 0;
|
|
231
|
+
let tokensGenerated = output.split(/\s+/).length; // rough estimate
|
|
232
|
+
|
|
233
|
+
if (tpsMatch) {
|
|
234
|
+
const evalMs = parseFloat(tpsMatch[1]);
|
|
235
|
+
tokensGenerated = parseInt(tpsMatch[2], 10);
|
|
236
|
+
tokensPerSecond = Math.round((tokensGenerated / evalMs) * 1000 * 10) / 10;
|
|
237
|
+
} else if (wallMs > 0) {
|
|
238
|
+
// Fallback: tokens from output word count / wall time
|
|
239
|
+
tokensPerSecond = Math.round((tokensGenerated / wallMs) * 1000 * 10) / 10;
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
resolve({
|
|
243
|
+
resultText: output.trim(),
|
|
244
|
+
tokensGenerated,
|
|
245
|
+
tokensPerSecond,
|
|
246
|
+
});
|
|
247
|
+
});
|
|
248
|
+
|
|
249
|
+
// Job-level timeout (safety net on top of Supabase timeout_at)
|
|
250
|
+
const deadline = new Date(job.timeout_at).getTime();
|
|
251
|
+
const remaining = deadline - Date.now() - 10_000; // 10s buffer
|
|
252
|
+
if (remaining > 0) {
|
|
253
|
+
setTimeout(() => {
|
|
254
|
+
proc.kill('SIGTERM');
|
|
255
|
+
reject(new Error('Job timed out (executor deadline)'));
|
|
256
|
+
}, remaining);
|
|
257
|
+
}
|
|
258
|
+
});
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
// ─── List cached models ───────────────────────────────────────
|
|
262
|
+
|
|
263
|
+
export function listCachedModels(): Array<{ modelId: string; sizeMb: number }> {
|
|
264
|
+
ensureModelCacheDir();
|
|
265
|
+
const files = fs.readdirSync(MODEL_CACHE_DIR).filter(f => f.endsWith('.gguf'));
|
|
266
|
+
return files.map(f => ({
|
|
267
|
+
modelId: f.replace('.gguf', ''),
|
|
268
|
+
sizeMb: Math.round(fs.statSync(path.join(MODEL_CACHE_DIR, f)).size / 1024 / 1024),
|
|
269
|
+
}));
|
|
270
|
+
}
|
package/src/hardware.ts
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// HIVE WORKER — HARDWARE
|
|
3
|
+
// Detect CPU, RAM, GPU, OS platform
|
|
4
|
+
// Derive H3 cell from IP geolocation (desktop has no GPS)
|
|
5
|
+
// ============================================================
|
|
6
|
+
|
|
7
|
+
import os from 'os';
|
|
8
|
+
import { execSync } from 'child_process';
|
|
9
|
+
import { latLngToCell } from 'h3-js';
|
|
10
|
+
|
|
11
|
+
export interface HardwareProfile {
|
|
12
|
+
cpuModel: string;
|
|
13
|
+
cpuCores: number;
|
|
14
|
+
ramGb: number;
|
|
15
|
+
platform: string; // darwin | linux | win32
|
|
16
|
+
arch: string; // arm64 | x64
|
|
17
|
+
gpuModel: string | null;
|
|
18
|
+
estimatedTflops: number;
|
|
19
|
+
hiveClass: 'phone' | 'laptop' | 'desktop' | 'server';
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export interface GeoProfile {
|
|
23
|
+
h3Cell: string; // Res-6 H3 cell
|
|
24
|
+
city: string;
|
|
25
|
+
country: string;
|
|
26
|
+
lat: number;
|
|
27
|
+
lng: number;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// ─── CPU / RAM ───────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
function detectGpu(): string | null {
|
|
33
|
+
try {
|
|
34
|
+
const plat = os.platform();
|
|
35
|
+
if (plat === 'darwin') {
|
|
36
|
+
const out = execSync('system_profiler SPDisplaysDataType 2>/dev/null | grep "Chipset Model"', {
|
|
37
|
+
encoding: 'utf-8', timeout: 3000,
|
|
38
|
+
});
|
|
39
|
+
const match = out.match(/Chipset Model:\s*(.+)/);
|
|
40
|
+
return match ? match[1].trim() : null;
|
|
41
|
+
}
|
|
42
|
+
if (plat === 'linux') {
|
|
43
|
+
try {
|
|
44
|
+
const out = execSync('nvidia-smi --query-gpu=name --format=csv,noheader 2>/dev/null', {
|
|
45
|
+
encoding: 'utf-8', timeout: 3000,
|
|
46
|
+
});
|
|
47
|
+
return out.trim().split('\n')[0] || null;
|
|
48
|
+
} catch { /* no nvidia */ }
|
|
49
|
+
try {
|
|
50
|
+
const out = execSync('rocm-smi --showproductname 2>/dev/null | grep "Card"', {
|
|
51
|
+
encoding: 'utf-8', timeout: 3000,
|
|
52
|
+
});
|
|
53
|
+
return out.trim() || null;
|
|
54
|
+
} catch { return null; }
|
|
55
|
+
}
|
|
56
|
+
return null;
|
|
57
|
+
} catch {
|
|
58
|
+
return null;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
function estimateTflops(cpuCores: number, gpu: string | null, arch: string): number {
|
|
63
|
+
// Very rough heuristic — enough for scheduling tier decisions
|
|
64
|
+
if (gpu) {
|
|
65
|
+
if (gpu.includes('A100')) return 312;
|
|
66
|
+
if (gpu.includes('H100')) return 756;
|
|
67
|
+
if (gpu.includes('4090')) return 82;
|
|
68
|
+
if (gpu.includes('3090')) return 35;
|
|
69
|
+
if (gpu.includes('RTX 5090')) return 170;
|
|
70
|
+
if (gpu.includes('M4 Pro') || gpu.includes('M4 Max')) return 14;
|
|
71
|
+
if (gpu.includes('M3 Pro') || gpu.includes('M3 Max')) return 10;
|
|
72
|
+
if (gpu.includes('M2 Pro') || gpu.includes('M2 Max')) return 6.5;
|
|
73
|
+
if (gpu.includes('M1 Pro') || gpu.includes('M1 Max')) return 5.2;
|
|
74
|
+
if (gpu.includes('M4')) return 4.6;
|
|
75
|
+
if (gpu.includes('M3')) return 3.6;
|
|
76
|
+
if (gpu.includes('M2')) return 2.6;
|
|
77
|
+
if (gpu.includes('M1')) return 2.0;
|
|
78
|
+
}
|
|
79
|
+
// CPU-only estimate: ~0.05 TFLOPS per core for modern CPUs
|
|
80
|
+
return Math.round(cpuCores * 0.05 * 10) / 10;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
function classifyDevice(ramGb: number, cpuCores: number, gpu: string | null): HardwareProfile['hiveClass'] {
|
|
84
|
+
if (ramGb >= 64 && cpuCores >= 16) return 'server';
|
|
85
|
+
if (ramGb >= 16 || (gpu && !gpu.toLowerCase().includes('apple'))) return 'desktop';
|
|
86
|
+
if (ramGb >= 8) return 'laptop';
|
|
87
|
+
return 'phone';
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
export function detectHardware(): HardwareProfile {
|
|
91
|
+
const cpus = os.cpus();
|
|
92
|
+
const cpuModel = cpus[0]?.model?.trim() ?? 'Unknown CPU';
|
|
93
|
+
const cpuCores = cpus.length;
|
|
94
|
+
const ramGb = Math.round(os.totalmem() / (1024 ** 3));
|
|
95
|
+
const platform = os.platform();
|
|
96
|
+
const arch = os.arch();
|
|
97
|
+
const gpuModel = detectGpu();
|
|
98
|
+
const estimatedTflops = estimateTflops(cpuCores, gpuModel, arch);
|
|
99
|
+
const hiveClass = classifyDevice(ramGb, cpuCores, gpuModel);
|
|
100
|
+
|
|
101
|
+
return { cpuModel, cpuCores, ramGb, platform, arch, gpuModel, estimatedTflops, hiveClass };
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// ─── GEO (IP-based, best effort) ─────────────────────────────
|
|
105
|
+
|
|
106
|
+
export async function detectGeo(): Promise<GeoProfile> {
|
|
107
|
+
try {
|
|
108
|
+
const resp = await fetch('https://ipapi.co/json/', { signal: AbortSignal.timeout(5000) });
|
|
109
|
+
if (!resp.ok) throw new Error(`HTTP ${resp.status}`);
|
|
110
|
+
const data = await resp.json() as {
|
|
111
|
+
latitude?: number; longitude?: number;
|
|
112
|
+
city?: string; country_name?: string;
|
|
113
|
+
};
|
|
114
|
+
const lat = data.latitude ?? 0;
|
|
115
|
+
const lng = data.longitude ?? 0;
|
|
116
|
+
const h3Cell = latLngToCell(lat, lng, 6); // Res-6: ~36 km²
|
|
117
|
+
|
|
118
|
+
return {
|
|
119
|
+
h3Cell,
|
|
120
|
+
city: data.city ?? 'Unknown',
|
|
121
|
+
country: data.country_name ?? 'Unknown',
|
|
122
|
+
lat,
|
|
123
|
+
lng,
|
|
124
|
+
};
|
|
125
|
+
} catch {
|
|
126
|
+
// Fallback: Rome (GNS genesis location)
|
|
127
|
+
const lat = 41.8919;
|
|
128
|
+
const lng = 12.5113;
|
|
129
|
+
return {
|
|
130
|
+
h3Cell: latLngToCell(lat, lng, 6),
|
|
131
|
+
city: 'Unknown (geo offline)',
|
|
132
|
+
country: 'Unknown',
|
|
133
|
+
lat,
|
|
134
|
+
lng,
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
}
|
package/src/identity.ts
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
// ============================================================
|
|
2
|
+
// HIVE WORKER — IDENTITY
|
|
3
|
+
// Ed25519 keypair generation + local persistence
|
|
4
|
+
// ~/.hive/identity.json
|
|
5
|
+
// ============================================================
|
|
6
|
+
|
|
7
|
+
import nacl from 'tweetnacl';
|
|
8
|
+
import fs from 'fs';
|
|
9
|
+
import path from 'path';
|
|
10
|
+
import os from 'os';
|
|
11
|
+
|
|
12
|
+
export interface HiveIdentity {
|
|
13
|
+
pk: string; // 64-char hex (32 bytes Ed25519 public key)
|
|
14
|
+
sk: string; // 128-char hex (64 bytes Ed25519 secret key) — never leaves disk
|
|
15
|
+
createdAt: string;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
const HIVE_DIR = path.join(os.homedir(), '.hive');
|
|
19
|
+
const IDENTITY_FILE = path.join(HIVE_DIR, 'identity.json');
|
|
20
|
+
|
|
21
|
+
function bytesToHex(bytes: Uint8Array): string {
|
|
22
|
+
return Array.from(bytes)
|
|
23
|
+
.map(b => b.toString(16).padStart(2, '0'))
|
|
24
|
+
.join('');
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
function hexToBytes(hex: string): Uint8Array {
|
|
28
|
+
const bytes = new Uint8Array(hex.length / 2);
|
|
29
|
+
for (let i = 0; i < hex.length; i += 2) {
|
|
30
|
+
bytes[i / 2] = parseInt(hex.substring(i, i + 2), 16);
|
|
31
|
+
}
|
|
32
|
+
return bytes;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export function ensureHiveDir(): void {
|
|
36
|
+
if (!fs.existsSync(HIVE_DIR)) {
|
|
37
|
+
fs.mkdirSync(HIVE_DIR, { recursive: true, mode: 0o700 });
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export function generateIdentity(): HiveIdentity {
|
|
42
|
+
const keypair = nacl.sign.keyPair();
|
|
43
|
+
return {
|
|
44
|
+
pk: bytesToHex(keypair.publicKey),
|
|
45
|
+
sk: bytesToHex(keypair.secretKey),
|
|
46
|
+
createdAt: new Date().toISOString(),
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export function loadOrCreateIdentity(): { identity: HiveIdentity; isNew: boolean } {
|
|
51
|
+
ensureHiveDir();
|
|
52
|
+
|
|
53
|
+
if (fs.existsSync(IDENTITY_FILE)) {
|
|
54
|
+
const raw = fs.readFileSync(IDENTITY_FILE, 'utf-8');
|
|
55
|
+
const identity = JSON.parse(raw) as HiveIdentity;
|
|
56
|
+
return { identity, isNew: false };
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
const identity = generateIdentity();
|
|
60
|
+
fs.writeFileSync(IDENTITY_FILE, JSON.stringify(identity, null, 2), { mode: 0o600 });
|
|
61
|
+
return { identity, isNew: true };
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
export function saveIdentity(identity: HiveIdentity): void {
|
|
65
|
+
ensureHiveDir();
|
|
66
|
+
fs.writeFileSync(IDENTITY_FILE, JSON.stringify(identity, null, 2), { mode: 0o600 });
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
export function sign(identity: HiveIdentity, message: string): string {
|
|
70
|
+
const sk = hexToBytes(identity.sk);
|
|
71
|
+
const msgBytes = new TextEncoder().encode(message);
|
|
72
|
+
const sig = nacl.sign.detached(msgBytes, sk);
|
|
73
|
+
return bytesToHex(sig);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export function shortPk(pk: string): string {
|
|
77
|
+
return pk.slice(0, 8);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
export function identityPath(): string {
|
|
81
|
+
return IDENTITY_FILE;
|
|
82
|
+
}
|