neuronix-node 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +60 -0
- package/dist/api.d.ts +31 -0
- package/dist/api.js +68 -0
- package/dist/config.d.ts +15 -0
- package/dist/config.js +48 -0
- package/dist/handlers/chart.d.ts +2 -0
- package/dist/handlers/chart.js +121 -0
- package/dist/handlers/expense.d.ts +2 -0
- package/dist/handlers/expense.js +102 -0
- package/dist/handlers/file-processor.d.ts +7 -0
- package/dist/handlers/file-processor.js +168 -0
- package/dist/handlers/index.d.ts +20 -0
- package/dist/handlers/index.js +36 -0
- package/dist/handlers/invoice.d.ts +2 -0
- package/dist/handlers/invoice.js +113 -0
- package/dist/handlers/pnl.d.ts +2 -0
- package/dist/handlers/pnl.js +116 -0
- package/dist/handlers/smart-route.d.ts +2 -0
- package/dist/handlers/smart-route.js +116 -0
- package/dist/hardware.d.ts +10 -0
- package/dist/hardware.js +27 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +279 -0
- package/dist/inference.d.ts +25 -0
- package/dist/inference.js +73 -0
- package/dist/models.d.ts +29 -0
- package/dist/models.js +141 -0
- package/dist/parsers/csv.d.ts +24 -0
- package/dist/parsers/csv.js +94 -0
- package/dist/parsers/index.d.ts +17 -0
- package/dist/parsers/index.js +101 -0
- package/dist/updater.d.ts +8 -0
- package/dist/updater.js +48 -0
- package/package.json +51 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
const config_js_1 = require("./config.js");
|
|
5
|
+
const hardware_js_1 = require("./hardware.js");
|
|
6
|
+
const api_js_1 = require("./api.js");
|
|
7
|
+
const models_js_1 = require("./models.js");
|
|
8
|
+
const inference_js_1 = require("./inference.js");
|
|
9
|
+
const index_js_1 = require("./handlers/index.js");
|
|
10
|
+
const updater_js_1 = require("./updater.js");
|
|
11
|
+
// ── Helpers ──────────────────────────────────────────────────
|
|
12
|
+
function log(msg) {
|
|
13
|
+
const ts = new Date().toLocaleTimeString();
|
|
14
|
+
console.log(` [${ts}] ${msg}`);
|
|
15
|
+
}
|
|
16
|
+
function banner() {
|
|
17
|
+
console.log("");
|
|
18
|
+
console.log(" ╔══════════════════════════════════════════════════╗");
|
|
19
|
+
console.log(" ║ ║");
|
|
20
|
+
console.log(" ║ Neuronix GPU Provider Node v0.1.0 ║");
|
|
21
|
+
console.log(" ║ Earn by contributing compute ║");
|
|
22
|
+
console.log(" ║ ║");
|
|
23
|
+
console.log(" ╚══════════════════════════════════════════════════╝");
|
|
24
|
+
console.log("");
|
|
25
|
+
}
|
|
26
|
+
function hardwareTable(hw) {
|
|
27
|
+
console.log(" ┌─────────────────────────────────────────────────┐");
|
|
28
|
+
console.log(` │ Hostname: ${hw.hostname.padEnd(37)}│`);
|
|
29
|
+
console.log(` │ OS: ${hw.os.slice(0, 37).padEnd(37)}│`);
|
|
30
|
+
console.log(` │ CPU: ${hw.cpuModel.slice(0, 37).padEnd(37)}│`);
|
|
31
|
+
console.log(` │ RAM: ${(Math.round(hw.ramMb / 1024) + " GB").padEnd(37)}│`);
|
|
32
|
+
console.log(` │ GPU: ${hw.gpuModel.slice(0, 37).padEnd(37)}│`);
|
|
33
|
+
console.log(` │ VRAM: ${(hw.gpuVramMb > 0 ? hw.gpuVramMb + " MB" : "N/A (CPU mode)").padEnd(37)}│`);
|
|
34
|
+
console.log(" └─────────────────────────────────────────────────┘");
|
|
35
|
+
console.log("");
|
|
36
|
+
}
|
|
37
|
+
// ── Authentication ───────────────────────────────────────────
|
|
38
|
+
async function authenticate(config) {
|
|
39
|
+
if (config.authToken) {
|
|
40
|
+
log("Already authenticated.");
|
|
41
|
+
return;
|
|
42
|
+
}
|
|
43
|
+
// Check for email/password in env or prompt
|
|
44
|
+
const email = process.env.NEURONIX_EMAIL;
|
|
45
|
+
const password = process.env.NEURONIX_PASSWORD;
|
|
46
|
+
if (email && password) {
|
|
47
|
+
log(`Authenticating as ${email}...`);
|
|
48
|
+
const result = await (0, api_js_1.loginProvider)(config, email, password);
|
|
49
|
+
config.authToken = result.access_token;
|
|
50
|
+
config.userId = result.user_id;
|
|
51
|
+
(0, config_js_1.saveConfig)(config);
|
|
52
|
+
log("Authenticated successfully.");
|
|
53
|
+
return;
|
|
54
|
+
}
|
|
55
|
+
// Interactive login via readline
|
|
56
|
+
const readline = await import("readline");
|
|
57
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
58
|
+
const ask = (q) => new Promise((r) => rl.question(q, r));
|
|
59
|
+
console.log("");
|
|
60
|
+
console.log(" ── Login to your Neuronix account ──");
|
|
61
|
+
console.log(" (Create one at https://neuronix-nu.vercel.app/signup)");
|
|
62
|
+
console.log("");
|
|
63
|
+
const inputEmail = await ask(" Email: ");
|
|
64
|
+
const inputPassword = await ask(" Password: ");
|
|
65
|
+
rl.close();
|
|
66
|
+
console.log("");
|
|
67
|
+
log(`Authenticating as ${inputEmail}...`);
|
|
68
|
+
const result = await (0, api_js_1.loginProvider)(config, inputEmail, inputPassword);
|
|
69
|
+
config.authToken = result.access_token;
|
|
70
|
+
config.userId = result.user_id;
|
|
71
|
+
(0, config_js_1.saveConfig)(config);
|
|
72
|
+
log("Authenticated successfully.");
|
|
73
|
+
}
|
|
74
|
+
// ── Main ─────────────────────────────────────────────────────
|
|
75
|
+
async function main() {
|
|
76
|
+
banner();
|
|
77
|
+
const config = (0, config_js_1.loadConfig)();
|
|
78
|
+
// Check for updates
|
|
79
|
+
log("Checking for updates...");
|
|
80
|
+
try {
|
|
81
|
+
const updateInfo = await (0, updater_js_1.checkForUpdates)("0.1.0");
|
|
82
|
+
if (updateInfo.updateAvailable) {
|
|
83
|
+
console.log("");
|
|
84
|
+
console.log(` ⚠ Update available: v${updateInfo.latestVersion}`);
|
|
85
|
+
console.log(` ${updateInfo.downloadUrl}`);
|
|
86
|
+
console.log(` ${updateInfo.releaseNotes}`);
|
|
87
|
+
console.log("");
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
log("Up to date.");
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
catch {
|
|
94
|
+
log("Could not check for updates (continuing).");
|
|
95
|
+
}
|
|
96
|
+
// Authenticate
|
|
97
|
+
await authenticate(config);
|
|
98
|
+
// Detect hardware
|
|
99
|
+
log("Detecting hardware...");
|
|
100
|
+
const hw = await (0, hardware_js_1.detectHardware)();
|
|
101
|
+
hardwareTable(hw);
|
|
102
|
+
// Show compatible models
|
|
103
|
+
const compatible = (0, models_js_1.listCompatibleModels)(hw.gpuVramMb);
|
|
104
|
+
log(`Compatible models: ${compatible.map((m) => m.name).join(", ")}`);
|
|
105
|
+
// Download and load the best default model
|
|
106
|
+
const defaultModel = (0, models_js_1.selectModel)("inference", hw.gpuVramMb);
|
|
107
|
+
log(`Selected model: ${defaultModel.name} (${defaultModel.sizeMb} MB)`);
|
|
108
|
+
try {
|
|
109
|
+
const modelPath = await (0, models_js_1.ensureModelDownloaded)(config.modelsDir, defaultModel, (pct) => {
|
|
110
|
+
process.stdout.write(`\r Downloading ${defaultModel.name}... ${pct}%`);
|
|
111
|
+
});
|
|
112
|
+
if (modelPath.includes("%"))
|
|
113
|
+
console.log(""); // newline after progress
|
|
114
|
+
log(`Model ready: ${defaultModel.file}`);
|
|
115
|
+
log("Loading model into memory...");
|
|
116
|
+
await (0, inference_js_1.loadModel)(modelPath, defaultModel.id);
|
|
117
|
+
log("Model loaded. Ready to accept tasks.");
|
|
118
|
+
}
|
|
119
|
+
catch (err) {
|
|
120
|
+
log(`Model setup failed: ${err}`);
|
|
121
|
+
log("Continuing — will attempt to load models on demand.");
|
|
122
|
+
}
|
|
123
|
+
// Register with the network
|
|
124
|
+
log("Registering with Neuronix network...");
|
|
125
|
+
try {
|
|
126
|
+
const reg = await (0, api_js_1.registerNode)(config, hw);
|
|
127
|
+
config.nodeId = reg.node.id;
|
|
128
|
+
(0, config_js_1.saveConfig)(config);
|
|
129
|
+
log(`Registered! Node ID: ${config.nodeId}`);
|
|
130
|
+
}
|
|
131
|
+
catch (err) {
|
|
132
|
+
log(`Registration failed: ${err}`);
|
|
133
|
+
process.exit(1);
|
|
134
|
+
}
|
|
135
|
+
console.log("");
|
|
136
|
+
console.log(" ════════════════════════════════════════════════════");
|
|
137
|
+
console.log(" Node is ONLINE and listening for tasks.");
|
|
138
|
+
console.log(" Press Ctrl+C to stop.");
|
|
139
|
+
console.log(" ════════════════════════════════════════════════════");
|
|
140
|
+
console.log("");
|
|
141
|
+
// Heartbeat loop
|
|
142
|
+
let consecutiveHeartbeatFailures = 0;
|
|
143
|
+
const heartbeatLoop = setInterval(async () => {
|
|
144
|
+
try {
|
|
145
|
+
await (0, api_js_1.sendHeartbeat)(config, "online");
|
|
146
|
+
consecutiveHeartbeatFailures = 0;
|
|
147
|
+
}
|
|
148
|
+
catch {
|
|
149
|
+
consecutiveHeartbeatFailures++;
|
|
150
|
+
if (consecutiveHeartbeatFailures >= 5) {
|
|
151
|
+
log("Lost connection to network (5 consecutive heartbeat failures). Reconnecting...");
|
|
152
|
+
try {
|
|
153
|
+
await (0, api_js_1.registerNode)(config, hw);
|
|
154
|
+
consecutiveHeartbeatFailures = 0;
|
|
155
|
+
log("Reconnected.");
|
|
156
|
+
}
|
|
157
|
+
catch {
|
|
158
|
+
log("Reconnection failed. Retrying next heartbeat.");
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
}, config.heartbeatIntervalMs);
|
|
163
|
+
// Task polling loop with error recovery
|
|
164
|
+
let running = true;
|
|
165
|
+
let tasksCompleted = 0;
|
|
166
|
+
let tasksFailed = 0;
|
|
167
|
+
let totalEarned = 0;
|
|
168
|
+
let consecutivePollErrors = 0;
|
|
169
|
+
const shutdown = async () => {
|
|
170
|
+
running = false;
|
|
171
|
+
clearInterval(heartbeatLoop);
|
|
172
|
+
log("Shutting down...");
|
|
173
|
+
(0, inference_js_1.unloadAll)();
|
|
174
|
+
try {
|
|
175
|
+
await (0, api_js_1.sendHeartbeat)(config, "offline");
|
|
176
|
+
}
|
|
177
|
+
catch { }
|
|
178
|
+
console.log("");
|
|
179
|
+
console.log(` Session: ${tasksCompleted} completed, ${tasksFailed} failed, $${totalEarned.toFixed(4)} earned`);
|
|
180
|
+
console.log("");
|
|
181
|
+
process.exit(0);
|
|
182
|
+
};
|
|
183
|
+
process.on("SIGINT", shutdown);
|
|
184
|
+
process.on("SIGTERM", shutdown);
|
|
185
|
+
while (running) {
|
|
186
|
+
try {
|
|
187
|
+
const result = await (0, api_js_1.pollTask)(config);
|
|
188
|
+
consecutivePollErrors = 0;
|
|
189
|
+
if (result.task) {
|
|
190
|
+
const task = result.task;
|
|
191
|
+
log(`Task received: ${task.id.slice(0, 8)}... [${task.type}] model=${task.model || "auto"}`);
|
|
192
|
+
// Set node to busy
|
|
193
|
+
await (0, api_js_1.sendHeartbeat)(config, "busy");
|
|
194
|
+
const taskStart = Date.now();
|
|
195
|
+
const taskTimeout = (task.timeout_seconds || 120) * 1000;
|
|
196
|
+
try {
|
|
197
|
+
let outputPayload;
|
|
198
|
+
let durationMs;
|
|
199
|
+
// Check if we have a specialized handler for this task type
|
|
200
|
+
if ((0, index_js_1.hasHandler)(task.type)) {
|
|
201
|
+
log(`Running ${task.type} handler...`);
|
|
202
|
+
const handlerPromise = (0, index_js_1.runHandler)({
|
|
203
|
+
type: task.type,
|
|
204
|
+
input_payload: task.input_payload,
|
|
205
|
+
model: task.model,
|
|
206
|
+
});
|
|
207
|
+
const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new Error("Task timed out")), taskTimeout));
|
|
208
|
+
const result = await Promise.race([handlerPromise, timeoutPromise]);
|
|
209
|
+
durationMs = result.duration_ms;
|
|
210
|
+
outputPayload = result;
|
|
211
|
+
}
|
|
212
|
+
else {
|
|
213
|
+
// Default: LLM inference
|
|
214
|
+
const requestedModel = task.model || "auto";
|
|
215
|
+
let modelId;
|
|
216
|
+
if (requestedModel === "auto") {
|
|
217
|
+
const best = (0, models_js_1.selectModel)(task.type, hw.gpuVramMb);
|
|
218
|
+
modelId = best.id;
|
|
219
|
+
}
|
|
220
|
+
else {
|
|
221
|
+
modelId = requestedModel;
|
|
222
|
+
}
|
|
223
|
+
if (!(0, inference_js_1.isModelLoaded)(modelId)) {
|
|
224
|
+
const modelSpec = (0, models_js_1.selectModel)(task.type, hw.gpuVramMb);
|
|
225
|
+
log(`Loading model ${modelSpec.name}...`);
|
|
226
|
+
const modelPath = await (0, models_js_1.ensureModelDownloaded)(config.modelsDir, modelSpec, (pct) => {
|
|
227
|
+
process.stdout.write(`\r Downloading... ${pct}%`);
|
|
228
|
+
});
|
|
229
|
+
console.log("");
|
|
230
|
+
await (0, inference_js_1.loadModel)(modelPath, modelSpec.id);
|
|
231
|
+
modelId = modelSpec.id;
|
|
232
|
+
}
|
|
233
|
+
const prompt = task.input_payload.prompt || "Hello";
|
|
234
|
+
const maxTokens = task.input_payload.max_tokens || 256;
|
|
235
|
+
const inferencePromise = (0, inference_js_1.runInference)(modelId, prompt, maxTokens);
|
|
236
|
+
const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new Error("Task timed out")), taskTimeout));
|
|
237
|
+
const result = await Promise.race([inferencePromise, timeoutPromise]);
|
|
238
|
+
durationMs = result.durationMs;
|
|
239
|
+
outputPayload = {
|
|
240
|
+
text: result.text,
|
|
241
|
+
tokens: result.text.split(/\s+/).length,
|
|
242
|
+
model: modelId,
|
|
243
|
+
duration_ms: durationMs,
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
await (0, api_js_1.completeTask)(config, task.id, "completed", outputPayload, durationMs);
|
|
247
|
+
tasksCompleted++;
|
|
248
|
+
totalEarned += task.cost_usd || 0;
|
|
249
|
+
log(`Task ${task.id.slice(0, 8)}... [${task.type}] completed in ${durationMs}ms (+$${(task.cost_usd || 0).toFixed(4)})`);
|
|
250
|
+
}
|
|
251
|
+
catch (inferErr) {
|
|
252
|
+
const elapsed = Date.now() - taskStart;
|
|
253
|
+
tasksFailed++;
|
|
254
|
+
log(`Task ${task.id.slice(0, 8)}... failed after ${elapsed}ms: ${inferErr}`);
|
|
255
|
+
await (0, api_js_1.completeTask)(config, task.id, "failed", {
|
|
256
|
+
error: String(inferErr),
|
|
257
|
+
duration_ms: elapsed,
|
|
258
|
+
}, elapsed);
|
|
259
|
+
}
|
|
260
|
+
// Back to online
|
|
261
|
+
await (0, api_js_1.sendHeartbeat)(config, "online");
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
catch (pollErr) {
|
|
265
|
+
consecutivePollErrors++;
|
|
266
|
+
if (consecutivePollErrors >= 10) {
|
|
267
|
+
log(`Too many poll errors (${consecutivePollErrors}). Waiting 30s before retry.`);
|
|
268
|
+
await new Promise((r) => setTimeout(r, 30000));
|
|
269
|
+
consecutivePollErrors = 0;
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
// Wait before polling again
|
|
273
|
+
await new Promise((r) => setTimeout(r, config.pollIntervalMs));
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
main().catch((err) => {
|
|
277
|
+
console.error("Fatal error:", err);
|
|
278
|
+
process.exit(1);
|
|
279
|
+
});
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Load a model into memory. Caches by model ID to avoid reloading.
|
|
3
|
+
* Evicts the oldest model if we already have 2 loaded (memory management).
|
|
4
|
+
*/
|
|
5
|
+
export declare function loadModel(modelPath: string, modelId: string): Promise<void>;
|
|
6
|
+
/**
|
|
7
|
+
* Check if a model is loaded.
|
|
8
|
+
*/
|
|
9
|
+
export declare function isModelLoaded(modelId: string): boolean;
|
|
10
|
+
/**
|
|
11
|
+
* Run inference with a specific loaded model.
|
|
12
|
+
*/
|
|
13
|
+
export declare function runInference(modelId: string, prompt: string, maxTokens?: number): Promise<{
|
|
14
|
+
text: string;
|
|
15
|
+
durationMs: number;
|
|
16
|
+
modelId: string;
|
|
17
|
+
}>;
|
|
18
|
+
/**
|
|
19
|
+
* Get list of currently loaded models.
|
|
20
|
+
*/
|
|
21
|
+
export declare function getLoadedModels(): string[];
|
|
22
|
+
/**
|
|
23
|
+
* Unload all models (for shutdown).
|
|
24
|
+
*/
|
|
25
|
+
export declare function unloadAll(): void;
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.loadModel = loadModel;
|
|
4
|
+
exports.isModelLoaded = isModelLoaded;
|
|
5
|
+
exports.runInference = runInference;
|
|
6
|
+
exports.getLoadedModels = getLoadedModels;
|
|
7
|
+
exports.unloadAll = unloadAll;
|
|
8
|
+
// Cache loaded models so we don't reload on every task
|
|
9
|
+
const modelCache = new Map();
|
|
10
|
+
let llamaInstance = null;
|
|
11
|
+
async function getLlama() {
|
|
12
|
+
if (!llamaInstance) {
|
|
13
|
+
const { getLlama } = await import("node-llama-cpp");
|
|
14
|
+
llamaInstance = await getLlama();
|
|
15
|
+
}
|
|
16
|
+
return llamaInstance;
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Load a model into memory. Caches by model ID to avoid reloading.
|
|
20
|
+
* Evicts the oldest model if we already have 2 loaded (memory management).
|
|
21
|
+
*/
|
|
22
|
+
async function loadModel(modelPath, modelId) {
|
|
23
|
+
if (modelCache.has(modelId)) {
|
|
24
|
+
return; // Already loaded
|
|
25
|
+
}
|
|
26
|
+
// Evict oldest if cache is full (keep max 2 models loaded)
|
|
27
|
+
if (modelCache.size >= 2) {
|
|
28
|
+
const oldest = modelCache.keys().next().value;
|
|
29
|
+
if (oldest) {
|
|
30
|
+
modelCache.delete(oldest);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
const llama = await getLlama();
|
|
34
|
+
const model = await llama.loadModel({ modelPath });
|
|
35
|
+
const context = await model.createContext();
|
|
36
|
+
modelCache.set(modelId, { model, context });
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* Check if a model is loaded.
|
|
40
|
+
*/
|
|
41
|
+
function isModelLoaded(modelId) {
|
|
42
|
+
return modelCache.has(modelId);
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Run inference with a specific loaded model.
|
|
46
|
+
*/
|
|
47
|
+
async function runInference(modelId, prompt, maxTokens = 256) {
|
|
48
|
+
const cached = modelCache.get(modelId);
|
|
49
|
+
if (!cached) {
|
|
50
|
+
throw new Error(`Model ${modelId} not loaded. Call loadModel() first.`);
|
|
51
|
+
}
|
|
52
|
+
const { LlamaChatSession } = await import("node-llama-cpp");
|
|
53
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
54
|
+
const session = new LlamaChatSession({
|
|
55
|
+
contextSequence: cached.context.getSequence(),
|
|
56
|
+
});
|
|
57
|
+
const start = Date.now();
|
|
58
|
+
const response = await session.prompt(prompt, { maxTokens });
|
|
59
|
+
const durationMs = Date.now() - start;
|
|
60
|
+
return { text: response, durationMs, modelId };
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* Get list of currently loaded models.
|
|
64
|
+
*/
|
|
65
|
+
function getLoadedModels() {
|
|
66
|
+
return Array.from(modelCache.keys());
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Unload all models (for shutdown).
|
|
70
|
+
*/
|
|
71
|
+
function unloadAll() {
|
|
72
|
+
modelCache.clear();
|
|
73
|
+
}
|
package/dist/models.d.ts
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
export interface ModelSpec {
|
|
2
|
+
id: string;
|
|
3
|
+
name: string;
|
|
4
|
+
file: string;
|
|
5
|
+
url: string;
|
|
6
|
+
sizeMb: number;
|
|
7
|
+
minVramMb: number;
|
|
8
|
+
taskTypes: string[];
|
|
9
|
+
maxTokens: number;
|
|
10
|
+
priority: number;
|
|
11
|
+
}
|
|
12
|
+
export declare const MODEL_REGISTRY: ModelSpec[];
|
|
13
|
+
/**
|
|
14
|
+
* Pick the best model for a given task type and available VRAM.
|
|
15
|
+
* Returns the highest-priority model the hardware can run.
|
|
16
|
+
*/
|
|
17
|
+
export declare function selectModel(taskType: string, vramMb: number): ModelSpec;
|
|
18
|
+
/**
|
|
19
|
+
* Pick a specific model by ID, or the best available.
|
|
20
|
+
*/
|
|
21
|
+
export declare function getModelById(modelId: string): ModelSpec | undefined;
|
|
22
|
+
/**
|
|
23
|
+
* List which models this hardware can run.
|
|
24
|
+
*/
|
|
25
|
+
export declare function listCompatibleModels(vramMb: number): ModelSpec[];
|
|
26
|
+
/**
|
|
27
|
+
* Download a model if not already cached.
|
|
28
|
+
*/
|
|
29
|
+
export declare function ensureModelDownloaded(modelsDir: string, model: ModelSpec, onProgress?: (pct: number) => void): Promise<string>;
|
package/dist/models.js
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.MODEL_REGISTRY = void 0;
|
|
7
|
+
exports.selectModel = selectModel;
|
|
8
|
+
exports.getModelById = getModelById;
|
|
9
|
+
exports.listCompatibleModels = listCompatibleModels;
|
|
10
|
+
exports.ensureModelDownloaded = ensureModelDownloaded;
|
|
11
|
+
const fs_1 = require("fs");
|
|
12
|
+
const path_1 = require("path");
|
|
13
|
+
const https_1 = __importDefault(require("https"));
|
|
14
|
+
const fs_2 = require("fs");
|
|
15
|
+
exports.MODEL_REGISTRY = [
|
|
16
|
+
{
|
|
17
|
+
id: "tinyllama-1.1b",
|
|
18
|
+
name: "TinyLlama 1.1B Chat",
|
|
19
|
+
file: "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
|
|
20
|
+
url: "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
|
|
21
|
+
sizeMb: 669,
|
|
22
|
+
minVramMb: 0,
|
|
23
|
+
taskTypes: ["inference", "embedding"],
|
|
24
|
+
maxTokens: 2048,
|
|
25
|
+
priority: 1,
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
id: "phi-2",
|
|
29
|
+
name: "Phi-2 2.7B",
|
|
30
|
+
file: "phi-2.Q4_K_M.gguf",
|
|
31
|
+
url: "https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q4_K_M.gguf",
|
|
32
|
+
sizeMb: 1740,
|
|
33
|
+
minVramMb: 2048,
|
|
34
|
+
taskTypes: ["inference", "embedding"],
|
|
35
|
+
maxTokens: 2048,
|
|
36
|
+
priority: 2,
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
id: "mistral-7b",
|
|
40
|
+
name: "Mistral 7B Instruct",
|
|
41
|
+
file: "mistral-7b-instruct-v0.2.Q4_K_M.gguf",
|
|
42
|
+
url: "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf",
|
|
43
|
+
sizeMb: 4370,
|
|
44
|
+
minVramMb: 6144,
|
|
45
|
+
taskTypes: ["inference", "embedding"],
|
|
46
|
+
maxTokens: 8192,
|
|
47
|
+
priority: 3,
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
id: "llama3-8b",
|
|
51
|
+
name: "Llama 3 8B Instruct",
|
|
52
|
+
file: "Meta-Llama-3-8B-Instruct-Q4_K_M.gguf",
|
|
53
|
+
url: "https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q4_K_M.gguf",
|
|
54
|
+
sizeMb: 4920,
|
|
55
|
+
minVramMb: 8192,
|
|
56
|
+
taskTypes: ["inference", "embedding"],
|
|
57
|
+
maxTokens: 8192,
|
|
58
|
+
priority: 4,
|
|
59
|
+
},
|
|
60
|
+
];
|
|
61
|
+
/**
|
|
62
|
+
* Pick the best model for a given task type and available VRAM.
|
|
63
|
+
* Returns the highest-priority model the hardware can run.
|
|
64
|
+
*/
|
|
65
|
+
function selectModel(taskType, vramMb) {
|
|
66
|
+
const candidates = exports.MODEL_REGISTRY
|
|
67
|
+
.filter((m) => m.taskTypes.includes(taskType) && m.minVramMb <= vramMb)
|
|
68
|
+
.sort((a, b) => b.priority - a.priority);
|
|
69
|
+
if (candidates.length === 0) {
|
|
70
|
+
// Fall back to CPU-capable model
|
|
71
|
+
return exports.MODEL_REGISTRY[0];
|
|
72
|
+
}
|
|
73
|
+
return candidates[0];
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Pick a specific model by ID, or the best available.
|
|
77
|
+
*/
|
|
78
|
+
function getModelById(modelId) {
|
|
79
|
+
return exports.MODEL_REGISTRY.find((m) => m.id === modelId);
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* List which models this hardware can run.
|
|
83
|
+
*/
|
|
84
|
+
function listCompatibleModels(vramMb) {
|
|
85
|
+
return exports.MODEL_REGISTRY
|
|
86
|
+
.filter((m) => m.minVramMb <= vramMb)
|
|
87
|
+
.sort((a, b) => b.priority - a.priority);
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Download a model if not already cached.
|
|
91
|
+
*/
|
|
92
|
+
async function ensureModelDownloaded(modelsDir, model, onProgress) {
|
|
93
|
+
const modelPath = (0, path_1.join)(modelsDir, model.file);
|
|
94
|
+
if ((0, fs_1.existsSync)(modelPath)) {
|
|
95
|
+
return modelPath;
|
|
96
|
+
}
|
|
97
|
+
return new Promise((resolve, reject) => {
|
|
98
|
+
const download = (url, redirects = 0) => {
|
|
99
|
+
if (redirects > 5) {
|
|
100
|
+
reject(new Error("Too many redirects"));
|
|
101
|
+
return;
|
|
102
|
+
}
|
|
103
|
+
https_1.default.get(url, { headers: { "User-Agent": "neuronix-node/0.1.0" } }, (res) => {
|
|
104
|
+
if (res.statusCode === 301 || res.statusCode === 302) {
|
|
105
|
+
const location = res.headers.location;
|
|
106
|
+
if (location) {
|
|
107
|
+
download(location, redirects + 1);
|
|
108
|
+
return;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
if (res.statusCode !== 200) {
|
|
112
|
+
reject(new Error(`Download failed: HTTP ${res.statusCode}`));
|
|
113
|
+
return;
|
|
114
|
+
}
|
|
115
|
+
const total = parseInt(res.headers["content-length"] || "0", 10);
|
|
116
|
+
let downloaded = 0;
|
|
117
|
+
const file = (0, fs_2.createWriteStream)(modelPath);
|
|
118
|
+
res.on("data", (chunk) => {
|
|
119
|
+
downloaded += chunk.length;
|
|
120
|
+
if (total > 0 && onProgress) {
|
|
121
|
+
onProgress(Math.round((downloaded / total) * 100));
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
res.pipe(file);
|
|
125
|
+
file.on("finish", () => {
|
|
126
|
+
file.close();
|
|
127
|
+
resolve(modelPath);
|
|
128
|
+
});
|
|
129
|
+
file.on("error", (err) => {
|
|
130
|
+
// Clean up partial download
|
|
131
|
+
try {
|
|
132
|
+
require("fs").unlinkSync(modelPath);
|
|
133
|
+
}
|
|
134
|
+
catch { }
|
|
135
|
+
reject(err);
|
|
136
|
+
});
|
|
137
|
+
}).on("error", reject);
|
|
138
|
+
};
|
|
139
|
+
download(model.url);
|
|
140
|
+
});
|
|
141
|
+
}
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* CSV Parser — extracts structured data from CSV content.
|
|
3
|
+
* Detects column types, calculates summaries, identifies patterns.
|
|
4
|
+
*/
|
|
5
|
+
export interface CSVData {
|
|
6
|
+
headers: string[];
|
|
7
|
+
rows: string[][];
|
|
8
|
+
rowCount: number;
|
|
9
|
+
columnTypes: Record<string, "number" | "date" | "text" | "currency">;
|
|
10
|
+
summary: {
|
|
11
|
+
numericColumns: {
|
|
12
|
+
name: string;
|
|
13
|
+
sum: number;
|
|
14
|
+
avg: number;
|
|
15
|
+
min: number;
|
|
16
|
+
max: number;
|
|
17
|
+
}[];
|
|
18
|
+
dateRange: {
|
|
19
|
+
min: string;
|
|
20
|
+
max: string;
|
|
21
|
+
} | null;
|
|
22
|
+
};
|
|
23
|
+
}
|
|
24
|
+
export declare function parseCSV(content: string): CSVData;
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* CSV Parser — extracts structured data from CSV content.
|
|
4
|
+
* Detects column types, calculates summaries, identifies patterns.
|
|
5
|
+
*/
|
|
6
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
7
|
+
exports.parseCSV = parseCSV;
|
|
8
|
+
function parseCSV(content) {
|
|
9
|
+
const lines = content.trim().split(/\r?\n/);
|
|
10
|
+
if (lines.length < 2) {
|
|
11
|
+
return { headers: [], rows: [], rowCount: 0, columnTypes: {}, summary: { numericColumns: [], dateRange: null } };
|
|
12
|
+
}
|
|
13
|
+
const headers = parseLine(lines[0]);
|
|
14
|
+
const rows = lines.slice(1).map(parseLine);
|
|
15
|
+
// Detect column types
|
|
16
|
+
const columnTypes = {};
|
|
17
|
+
for (let col = 0; col < headers.length; col++) {
|
|
18
|
+
const values = rows.map((r) => r[col] || "").filter((v) => v.length > 0);
|
|
19
|
+
columnTypes[headers[col]] = detectColumnType(values);
|
|
20
|
+
}
|
|
21
|
+
// Calculate numeric summaries
|
|
22
|
+
const numericColumns = [];
|
|
23
|
+
for (let col = 0; col < headers.length; col++) {
|
|
24
|
+
const type = columnTypes[headers[col]];
|
|
25
|
+
if (type === "number" || type === "currency") {
|
|
26
|
+
const nums = rows
|
|
27
|
+
.map((r) => parseFloat((r[col] || "").replace(/[$,]/g, "")))
|
|
28
|
+
.filter((n) => !isNaN(n));
|
|
29
|
+
if (nums.length > 0) {
|
|
30
|
+
numericColumns.push({
|
|
31
|
+
name: headers[col],
|
|
32
|
+
sum: round(nums.reduce((a, b) => a + b, 0)),
|
|
33
|
+
avg: round(nums.reduce((a, b) => a + b, 0) / nums.length),
|
|
34
|
+
min: round(Math.min(...nums)),
|
|
35
|
+
max: round(Math.max(...nums)),
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
// Detect date range
|
|
41
|
+
let dateRange = null;
|
|
42
|
+
for (let col = 0; col < headers.length; col++) {
|
|
43
|
+
if (columnTypes[headers[col]] === "date") {
|
|
44
|
+
const dates = rows
|
|
45
|
+
.map((r) => r[col] || "")
|
|
46
|
+
.filter((v) => v.length > 0)
|
|
47
|
+
.sort();
|
|
48
|
+
if (dates.length > 0) {
|
|
49
|
+
dateRange = { min: dates[0], max: dates[dates.length - 1] };
|
|
50
|
+
}
|
|
51
|
+
break;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
return { headers, rows, rowCount: rows.length, columnTypes, summary: { numericColumns, dateRange } };
|
|
55
|
+
}
|
|
56
|
+
function parseLine(line) {
|
|
57
|
+
const result = [];
|
|
58
|
+
let current = "";
|
|
59
|
+
let inQuotes = false;
|
|
60
|
+
for (let i = 0; i < line.length; i++) {
|
|
61
|
+
const ch = line[i];
|
|
62
|
+
if (ch === '"') {
|
|
63
|
+
inQuotes = !inQuotes;
|
|
64
|
+
}
|
|
65
|
+
else if (ch === "," && !inQuotes) {
|
|
66
|
+
result.push(current.trim());
|
|
67
|
+
current = "";
|
|
68
|
+
}
|
|
69
|
+
else {
|
|
70
|
+
current += ch;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
result.push(current.trim());
|
|
74
|
+
return result;
|
|
75
|
+
}
|
|
76
|
+
function detectColumnType(values) {
|
|
77
|
+
const sample = values.slice(0, 20);
|
|
78
|
+
// Currency: starts with $ or has $ in it
|
|
79
|
+
if (sample.filter((v) => /^\$[\d,.]+$/.test(v) || /^-?\$[\d,.]+$/.test(v)).length > sample.length * 0.7) {
|
|
80
|
+
return "currency";
|
|
81
|
+
}
|
|
82
|
+
// Number
|
|
83
|
+
if (sample.filter((v) => /^-?[\d,.]+$/.test(v.replace(/[$,]/g, ""))).length > sample.length * 0.7) {
|
|
84
|
+
return "number";
|
|
85
|
+
}
|
|
86
|
+
// Date
|
|
87
|
+
if (sample.filter((v) => /\d{4}[-/]\d{1,2}[-/]\d{1,2}/.test(v) || /\d{1,2}[-/]\d{1,2}[-/]\d{2,4}/.test(v)).length > sample.length * 0.7) {
|
|
88
|
+
return "date";
|
|
89
|
+
}
|
|
90
|
+
return "text";
|
|
91
|
+
}
|
|
92
|
+
function round(n) {
|
|
93
|
+
return Math.round(n * 100) / 100;
|
|
94
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* File parser registry.
|
|
3
|
+
* Detects file type and extracts structured data.
|
|
4
|
+
*/
|
|
5
|
+
import { parseCSV, type CSVData } from "./csv.js";
|
|
6
|
+
export interface ParsedFile {
|
|
7
|
+
type: string;
|
|
8
|
+
fileName: string;
|
|
9
|
+
data: CSVData | Record<string, unknown>;
|
|
10
|
+
suggestedActions: string[];
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Parse file content based on its type.
|
|
14
|
+
* Returns structured data + suggested actions the bot can take.
|
|
15
|
+
*/
|
|
16
|
+
export declare function parseFile(fileName: string, fileType: string, content: string | Buffer): ParsedFile;
|
|
17
|
+
export { parseCSV, type CSVData };
|