@synapseia-network/node 0.8.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +105 -0
- package/README.md +232 -0
- package/dist/bid-responder-Q725ZIUC.js +86 -0
- package/dist/bootstrap.js +22 -0
- package/dist/chain-info-lightweight-2UWAQZBF.js +303 -0
- package/dist/chat-stream-handler-BSHSGMFF.js +127 -0
- package/dist/chunk-2X7MSWD4.js +270 -0
- package/dist/chunk-3BHRQWSM.js +531 -0
- package/dist/chunk-5QFTU52A.js +442 -0
- package/dist/chunk-5ZAJBIAV.js +25 -0
- package/dist/chunk-7FLDR5NT.js +186 -0
- package/dist/chunk-C5XRYLYP.js +137 -0
- package/dist/chunk-D7ADMHK2.js +36 -0
- package/dist/chunk-DXUYWRO7.js +23 -0
- package/dist/chunk-F5UDK56Z.js +289 -0
- package/dist/chunk-NEHR6XY7.js +111 -0
- package/dist/chunk-NMJVODKH.js +453 -0
- package/dist/chunk-PRVT22SM.js +324 -0
- package/dist/chunk-T2ZRG5CX.js +1380 -0
- package/dist/chunk-V2L5SXTL.js +88 -0
- package/dist/chunk-XL2NJWFY.js +702 -0
- package/dist/embedding-C6GE3WVM.js +16 -0
- package/dist/hardware-ITQQJ5YI.js +37 -0
- package/dist/index.js +16836 -0
- package/dist/inference-server-CIGRJ36H.js +25 -0
- package/dist/local-cors-J6RWNMMD.js +44 -0
- package/dist/model-catalog-C53SDFMG.js +15 -0
- package/dist/model-discovery-LA6YMT3I.js +10 -0
- package/dist/ollama-XVXA3A37.js +9 -0
- package/dist/rewards-vault-cli-HW7H4EMD.js +147 -0
- package/dist/scripts/create_nodes.sh +6 -0
- package/dist/scripts/diloco_train.py +319 -0
- package/dist/scripts/train_lora.py +237 -0
- package/dist/scripts/train_micro.py +586 -0
- package/dist/trainer-HQMV2ZAR.js +21 -0
- package/package.json +128 -0
- package/scripts/create_nodes.sh +6 -0
- package/scripts/diloco_train.py +319 -0
- package/scripts/train_lora.py +237 -0
- package/scripts/train_micro.py +586 -0
|
@@ -0,0 +1,453 @@
|
|
|
1
|
+
import { fileURLToPath as __synFup } from "url";import { dirname as __synDn } from "path";const __filename = __synFup(import.meta.url);const __dirname = __synDn(__filename);
|
|
2
|
+
import {
|
|
3
|
+
init_logger,
|
|
4
|
+
logger_default
|
|
5
|
+
} from "./chunk-V2L5SXTL.js";
|
|
6
|
+
import {
|
|
7
|
+
__name
|
|
8
|
+
} from "./chunk-D7ADMHK2.js";
|
|
9
|
+
|
|
10
|
+
// src/modules/hardware/hardware.ts
|
|
11
|
+
init_logger();
|
|
12
|
+
import * as os from "os";
|
|
13
|
+
import { execSync, spawnSync } from "child_process";
|
|
14
|
+
import { URL } from "url";
|
|
15
|
+
import { Injectable } from "@nestjs/common";
|
|
16
|
+
function _ts_decorate(decorators, target, key, desc) {
|
|
17
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
18
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
19
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
20
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
21
|
+
}
|
|
22
|
+
__name(_ts_decorate, "_ts_decorate");
|
|
23
|
+
var hardwareLoggedOnce = false;
|
|
24
|
+
var HardwareHelper = class {
|
|
25
|
+
static {
|
|
26
|
+
__name(this, "HardwareHelper");
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Detect hardware capabilities
|
|
30
|
+
*/
|
|
31
|
+
/** @internal exported for testing */
|
|
32
|
+
detectAppleSilicon(hardware, model) {
|
|
33
|
+
if (model.includes("M3 Ultra")) hardware.hardwareClass = 5;
|
|
34
|
+
else if (model.includes("M3 Max") || model.includes("M3 Pro")) hardware.hardwareClass = 4;
|
|
35
|
+
else if (model.includes("M2 Ultra")) hardware.hardwareClass = 3;
|
|
36
|
+
else if (model.includes("M2 Max")) hardware.hardwareClass = 3;
|
|
37
|
+
else if (model.includes("M2 Pro") || model.includes("M1 Ultra")) hardware.hardwareClass = 2;
|
|
38
|
+
else if (model.includes("M1 Max")) hardware.hardwareClass = 2;
|
|
39
|
+
else if (model.includes("M3") || model.includes("M2") || model.includes("M1")) hardware.hardwareClass = 1;
|
|
40
|
+
if (model.includes("Ultra")) hardware.gpuVramGb = hardware.hardwareClass === 5 ? 192 : 128;
|
|
41
|
+
else if (model.includes("Max")) hardware.gpuVramGb = 96;
|
|
42
|
+
else if (model.includes("Pro")) hardware.gpuVramGb = hardware.hardwareClass >= 3 ? 48 : 18;
|
|
43
|
+
else hardware.gpuVramGb = hardware.hardwareClass === 1 ? 10 : 7;
|
|
44
|
+
}
|
|
45
|
+
/** @internal exported for testing */
|
|
46
|
+
detectNvidiaGPU(hardware, smiOutput) {
|
|
47
|
+
if (!smiOutput) {
|
|
48
|
+
smiOutput = execSync("nvidia-smi --query-gpu=memory.free --format=csv,noheader,nounits", {
|
|
49
|
+
encoding: "utf-8"
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
if (smiOutput.includes("GiB")) {
|
|
53
|
+
const match = smiOutput.match(/(\d+)\s*GiB/);
|
|
54
|
+
if (match) hardware.gpuVramGb = parseInt(match[1]);
|
|
55
|
+
} else if (smiOutput.includes("MiB")) {
|
|
56
|
+
const match = smiOutput.match(/(\d+)\s*MiB/);
|
|
57
|
+
if (match) hardware.gpuVramGb = Math.round(parseInt(match[1]) / 1024);
|
|
58
|
+
}
|
|
59
|
+
if (hardware.gpuVramGb >= 80) hardware.hardwareClass = 5;
|
|
60
|
+
else if (hardware.gpuVramGb >= 64) hardware.hardwareClass = 5;
|
|
61
|
+
else if (hardware.hardwareClass < 5 && hardware.gpuVramGb >= 24) hardware.hardwareClass = 4;
|
|
62
|
+
else if (hardware.hardwareClass < 4 && hardware.gpuVramGb >= 14) hardware.hardwareClass = 3;
|
|
63
|
+
else if (hardware.hardwareClass < 3 && hardware.gpuVramGb >= 10) hardware.hardwareClass = 2;
|
|
64
|
+
else if (hardware.hardwareClass < 2 && hardware.gpuVramGb >= 6) hardware.hardwareClass = 1;
|
|
65
|
+
}
|
|
66
|
+
detectHardware(cpuOnly = false, archOverride) {
|
|
67
|
+
const hardware = {
|
|
68
|
+
cpuCores: os.cpus().length || 2,
|
|
69
|
+
ramGb: Math.round(os.totalmem() / 1024 ** 3),
|
|
70
|
+
gpuVramGb: 0,
|
|
71
|
+
hardwareClass: 0,
|
|
72
|
+
hasOllama: false
|
|
73
|
+
};
|
|
74
|
+
if (!cpuOnly) {
|
|
75
|
+
try {
|
|
76
|
+
const arch2 = archOverride || os.arch();
|
|
77
|
+
if (arch2 === "arm64") {
|
|
78
|
+
const model = execSync("sysctl -n machdep.cpu.brand_string").toString().trim();
|
|
79
|
+
this.detectAppleSilicon(hardware, model);
|
|
80
|
+
hardware.gpuModel = model;
|
|
81
|
+
} else if (arch2 === "x64" || arch2 === "x86") {
|
|
82
|
+
this.detectNvidiaGPU(hardware);
|
|
83
|
+
try {
|
|
84
|
+
const name = execSync("nvidia-smi --query-gpu=name --format=csv,noheader", {
|
|
85
|
+
encoding: "utf-8",
|
|
86
|
+
stdio: [
|
|
87
|
+
"ignore",
|
|
88
|
+
"pipe",
|
|
89
|
+
"ignore"
|
|
90
|
+
]
|
|
91
|
+
}).split("\n")[0]?.trim();
|
|
92
|
+
if (name) hardware.gpuModel = name;
|
|
93
|
+
} catch {
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
} catch {
|
|
97
|
+
}
|
|
98
|
+
const ollamaUrl = process.env.OLLAMA_URL?.trim() || "http://localhost:11434";
|
|
99
|
+
let probeUrl = null;
|
|
100
|
+
try {
|
|
101
|
+
const parsed = new URL(ollamaUrl);
|
|
102
|
+
probeUrl = `${parsed.origin}/api/tags`;
|
|
103
|
+
} catch {
|
|
104
|
+
probeUrl = null;
|
|
105
|
+
}
|
|
106
|
+
if (probeUrl !== null) {
|
|
107
|
+
const result = spawnSync("curl", [
|
|
108
|
+
"-s",
|
|
109
|
+
"--max-time",
|
|
110
|
+
"2",
|
|
111
|
+
probeUrl
|
|
112
|
+
], {
|
|
113
|
+
stdio: "pipe",
|
|
114
|
+
timeout: 2e3
|
|
115
|
+
});
|
|
116
|
+
hardware.hasOllama = result.status === 0 && !result.error;
|
|
117
|
+
} else {
|
|
118
|
+
hardware.hasOllama = false;
|
|
119
|
+
}
|
|
120
|
+
hardware.hasCloudLlm = !!(process.env.LLM_CLOUD_MODEL?.trim() || process.env.LLM_PROVIDER?.trim().toLowerCase() === "cloud");
|
|
121
|
+
if (!hardwareLoggedOnce) {
|
|
122
|
+
logger_default.info(`[hardware] hasOllama=${hardware.hasOllama} (url=${ollamaUrl}) hasCloudLlm=${hardware.hasCloudLlm} gpuVramGb=${hardware.gpuVramGb} hardwareClass=${hardware.hardwareClass}`);
|
|
123
|
+
hardwareLoggedOnce = true;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
return hardware;
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Get tier name
|
|
130
|
+
*/
|
|
131
|
+
getTierName(tier) {
|
|
132
|
+
const names = [
|
|
133
|
+
"CPU-Only",
|
|
134
|
+
"Tier 1",
|
|
135
|
+
"Tier 2",
|
|
136
|
+
"Tier 3",
|
|
137
|
+
"Tier 4",
|
|
138
|
+
"Tier 5"
|
|
139
|
+
];
|
|
140
|
+
return names[tier] || "Unknown";
|
|
141
|
+
}
|
|
142
|
+
/** @internal Build OS string — exported for testing */
|
|
143
|
+
buildOsString(platform2, release2, arch2, osType) {
|
|
144
|
+
if (platform2 === "darwin") return `macOS ${release2} (${arch2})`;
|
|
145
|
+
if (platform2 === "linux") return `Linux ${release2} (${arch2})`;
|
|
146
|
+
if (platform2 === "win32") return `Windows ${release2} (${arch2})`;
|
|
147
|
+
return `${osType} ${release2} (${arch2})`;
|
|
148
|
+
}
|
|
149
|
+
/** @internal Estimate Apple Silicon VRAM — exported for testing */
|
|
150
|
+
estimateAppleSiliconVram(model) {
|
|
151
|
+
if (model.includes("M3 Ultra")) return 192;
|
|
152
|
+
if (model.includes("M3 Max")) return 128;
|
|
153
|
+
if (model.includes("M2 Ultra")) return 128;
|
|
154
|
+
if (model.includes("M2 Max")) return 96;
|
|
155
|
+
if (model.includes("M3 Pro")) return 48;
|
|
156
|
+
if (model.includes("M2 Pro")) return 18;
|
|
157
|
+
if (model.includes("M1 Ultra")) return 128;
|
|
158
|
+
if (model.includes("M1 Max")) return 96;
|
|
159
|
+
if (model.includes("M3") || model.includes("M2")) return 10;
|
|
160
|
+
if (model.includes("M1")) return 7;
|
|
161
|
+
return 0;
|
|
162
|
+
}
|
|
163
|
+
/** @internal Parse nvidia-smi CSV output — exported for testing */
|
|
164
|
+
parseNvidiaSmiOutput(smiOutput) {
|
|
165
|
+
const lines = smiOutput.trim().split("\n");
|
|
166
|
+
const parts = lines[0]?.split(",")?.map((s) => s.trim()) || [];
|
|
167
|
+
const name = parts[0] || "NVIDIA GPU";
|
|
168
|
+
const vramStr = parts[1] || "";
|
|
169
|
+
const match = vramStr.match(/(\d+)\s*(GiB|MiB)/);
|
|
170
|
+
if (!match) return {
|
|
171
|
+
name,
|
|
172
|
+
vramGb: 0
|
|
173
|
+
};
|
|
174
|
+
const value = parseInt(match[1]);
|
|
175
|
+
const unit = match[2];
|
|
176
|
+
const vramGb = unit === "GiB" ? value : Math.round(value / 1024);
|
|
177
|
+
return {
|
|
178
|
+
name,
|
|
179
|
+
vramGb
|
|
180
|
+
};
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Get system information
|
|
184
|
+
*/
|
|
185
|
+
getSystemInfo(archOverride) {
|
|
186
|
+
const osPlatform = os.platform();
|
|
187
|
+
const osRelease = os.release();
|
|
188
|
+
const arch2 = archOverride || os.arch();
|
|
189
|
+
const osString = this.buildOsString(osPlatform, osRelease, arch2, os.type());
|
|
190
|
+
const cpuModel = os.cpus()[0]?.model || "Unknown CPU";
|
|
191
|
+
const cpuCores = os.cpus().length || 0;
|
|
192
|
+
const memoryTotal = os.totalmem();
|
|
193
|
+
let gpuType = null;
|
|
194
|
+
let gpuVram = 0;
|
|
195
|
+
try {
|
|
196
|
+
if (arch2 === "arm64" && osPlatform === "darwin") {
|
|
197
|
+
const model = execSync("sysctl -n machdep.cpu.brand_string", {
|
|
198
|
+
encoding: "utf-8"
|
|
199
|
+
}).trim();
|
|
200
|
+
gpuType = model;
|
|
201
|
+
gpuVram = this.estimateAppleSiliconVram(model);
|
|
202
|
+
} else if (arch2 === "x86_64" || arch2 === "x64") {
|
|
203
|
+
try {
|
|
204
|
+
const smiOutput = execSync("nvidia-smi --query-gpu=name,memory.free --format=csv,noheader", {
|
|
205
|
+
encoding: "utf-8"
|
|
206
|
+
});
|
|
207
|
+
const parsed = this.parseNvidiaSmiOutput(smiOutput);
|
|
208
|
+
gpuType = parsed.name;
|
|
209
|
+
gpuVram = parsed.vramGb;
|
|
210
|
+
} catch {
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
} catch (error) {
|
|
214
|
+
}
|
|
215
|
+
return {
|
|
216
|
+
os: osString,
|
|
217
|
+
cpu: {
|
|
218
|
+
model: cpuModel,
|
|
219
|
+
cores: cpuCores
|
|
220
|
+
},
|
|
221
|
+
memory: {
|
|
222
|
+
totalGb: Math.round(memoryTotal / 1024 ** 3)
|
|
223
|
+
},
|
|
224
|
+
gpu: {
|
|
225
|
+
type: gpuType,
|
|
226
|
+
vramGb: gpuVram
|
|
227
|
+
}
|
|
228
|
+
};
|
|
229
|
+
}
|
|
230
|
+
/**
|
|
231
|
+
* Get compatible models based on available VRAM
|
|
232
|
+
*/
|
|
233
|
+
getCompatibleModels(vramGb, allModels = []) {
|
|
234
|
+
if (!allModels || allModels.length === 0) {
|
|
235
|
+
const defaultModels = [
|
|
236
|
+
{
|
|
237
|
+
name: "qwen2.5-3b",
|
|
238
|
+
minVram: 4,
|
|
239
|
+
recommendedTier: 2
|
|
240
|
+
},
|
|
241
|
+
{
|
|
242
|
+
name: "qwen2.5-0.5b",
|
|
243
|
+
minVram: 1,
|
|
244
|
+
recommendedTier: 1
|
|
245
|
+
},
|
|
246
|
+
{
|
|
247
|
+
name: "gemma-3-1b-web",
|
|
248
|
+
minVram: 2,
|
|
249
|
+
recommendedTier: 1
|
|
250
|
+
},
|
|
251
|
+
{
|
|
252
|
+
name: "phi-2",
|
|
253
|
+
minVram: 2,
|
|
254
|
+
recommendedTier: 1
|
|
255
|
+
},
|
|
256
|
+
{
|
|
257
|
+
name: "gemma-3-4b",
|
|
258
|
+
minVram: 4,
|
|
259
|
+
recommendedTier: 2
|
|
260
|
+
},
|
|
261
|
+
{
|
|
262
|
+
name: "qwen2.5-coder-7b",
|
|
263
|
+
minVram: 6,
|
|
264
|
+
recommendedTier: 2
|
|
265
|
+
},
|
|
266
|
+
{
|
|
267
|
+
name: "llama-3.1-8b-instruct",
|
|
268
|
+
minVram: 10,
|
|
269
|
+
recommendedTier: 3
|
|
270
|
+
},
|
|
271
|
+
{
|
|
272
|
+
name: "gemma-3-12b",
|
|
273
|
+
minVram: 10,
|
|
274
|
+
recommendedTier: 3
|
|
275
|
+
},
|
|
276
|
+
{
|
|
277
|
+
name: "gpt-oss-20b",
|
|
278
|
+
minVram: 16,
|
|
279
|
+
recommendedTier: 4
|
|
280
|
+
},
|
|
281
|
+
{
|
|
282
|
+
name: "qwen2.5-coder-32b",
|
|
283
|
+
minVram: 24,
|
|
284
|
+
recommendedTier: 4
|
|
285
|
+
},
|
|
286
|
+
{
|
|
287
|
+
name: "glm-4.7-flash",
|
|
288
|
+
minVram: 24,
|
|
289
|
+
recommendedTier: 5
|
|
290
|
+
},
|
|
291
|
+
{
|
|
292
|
+
name: "qwen3-coder-30b-a3b",
|
|
293
|
+
minVram: 24,
|
|
294
|
+
recommendedTier: 5
|
|
295
|
+
}
|
|
296
|
+
];
|
|
297
|
+
return defaultModels.filter((model) => model.minVram <= vramGb);
|
|
298
|
+
}
|
|
299
|
+
return allModels.filter((model) => model.minVram <= vramGb);
|
|
300
|
+
}
|
|
301
|
+
/**
|
|
302
|
+
* Get recommended tier based on VRAM
|
|
303
|
+
*/
|
|
304
|
+
getRecommendedTier(vramGb) {
|
|
305
|
+
if (vramGb >= 80) return 5;
|
|
306
|
+
if (vramGb >= 48) return 5;
|
|
307
|
+
if (vramGb >= 24) return 4;
|
|
308
|
+
if (vramGb >= 16) return 4;
|
|
309
|
+
if (vramGb >= 14) return 3;
|
|
310
|
+
if (vramGb >= 10) return 3;
|
|
311
|
+
if (vramGb >= 6) return 2;
|
|
312
|
+
if (vramGb >= 1) return 1;
|
|
313
|
+
return 0;
|
|
314
|
+
}
|
|
315
|
+
/**
|
|
316
|
+
* Detect if this node can participate in CPU inference tasks.
|
|
317
|
+
*
|
|
318
|
+
* Requirements:
|
|
319
|
+
* - At least 2 CPU cores
|
|
320
|
+
* - At least 4 GB RAM
|
|
321
|
+
*
|
|
322
|
+
* Returns true when both conditions are met.
|
|
323
|
+
*/
|
|
324
|
+
canInference() {
|
|
325
|
+
const hw = this.detectHardware(true);
|
|
326
|
+
return hw.cpuCores >= 2 && hw.ramGb >= 4;
|
|
327
|
+
}
|
|
328
|
+
/**
|
|
329
|
+
* Detect if this node can run micro-transformer training.
|
|
330
|
+
*
|
|
331
|
+
* Requirements:
|
|
332
|
+
* - python3 is available in PATH
|
|
333
|
+
* - torch is importable (PyTorch installed)
|
|
334
|
+
*
|
|
335
|
+
* Returns true when both are satisfied.
|
|
336
|
+
*/
|
|
337
|
+
canTrain() {
|
|
338
|
+
const python = spawnSync("python3", [
|
|
339
|
+
"--version"
|
|
340
|
+
], {
|
|
341
|
+
stdio: "pipe"
|
|
342
|
+
});
|
|
343
|
+
if (python.status !== 0 || python.error) {
|
|
344
|
+
return false;
|
|
345
|
+
}
|
|
346
|
+
const torchCheck = spawnSync("python3", [
|
|
347
|
+
"-c",
|
|
348
|
+
"import torch"
|
|
349
|
+
], {
|
|
350
|
+
stdio: "pipe"
|
|
351
|
+
});
|
|
352
|
+
return torchCheck.status === 0 && !torchCheck.error;
|
|
353
|
+
}
|
|
354
|
+
/**
|
|
355
|
+
* Detect if this node can participate in DiLoCo distributed training.
|
|
356
|
+
*
|
|
357
|
+
* Requirements:
|
|
358
|
+
* - GPU or MPS available (gpuVramGb > 0)
|
|
359
|
+
* - python3 available
|
|
360
|
+
* - torch importable
|
|
361
|
+
* - peft importable (LoRA adapters)
|
|
362
|
+
*
|
|
363
|
+
* Returns true when all conditions are met.
|
|
364
|
+
*/
|
|
365
|
+
canDiLoCo(hardware) {
|
|
366
|
+
const hw = hardware ?? this.detectHardware(false);
|
|
367
|
+
if (hw.gpuVramGb <= 0) {
|
|
368
|
+
return false;
|
|
369
|
+
}
|
|
370
|
+
const python = spawnSync("python3", [
|
|
371
|
+
"--version"
|
|
372
|
+
], {
|
|
373
|
+
stdio: "pipe"
|
|
374
|
+
});
|
|
375
|
+
if (python.status !== 0 || python.error) {
|
|
376
|
+
return false;
|
|
377
|
+
}
|
|
378
|
+
const torchCheck = spawnSync("python3", [
|
|
379
|
+
"-c",
|
|
380
|
+
"import torch"
|
|
381
|
+
], {
|
|
382
|
+
stdio: "pipe"
|
|
383
|
+
});
|
|
384
|
+
if (torchCheck.status !== 0 || torchCheck.error) {
|
|
385
|
+
return false;
|
|
386
|
+
}
|
|
387
|
+
const peftCheck = spawnSync("python3", [
|
|
388
|
+
"-c",
|
|
389
|
+
"import peft"
|
|
390
|
+
], {
|
|
391
|
+
stdio: "pipe"
|
|
392
|
+
});
|
|
393
|
+
return peftCheck.status === 0 && !peftCheck.error;
|
|
394
|
+
}
|
|
395
|
+
/**
|
|
396
|
+
* Build capabilities list from hardware.
|
|
397
|
+
* Includes 'training' if Python + torch are available.
|
|
398
|
+
* Includes 'diloco' and 'gpu' if canDiLoCo() returns true.
|
|
399
|
+
*/
|
|
400
|
+
buildCapabilities(hardware) {
|
|
401
|
+
const caps = [];
|
|
402
|
+
if (hardware.cpuCores > 0) caps.push("cpu");
|
|
403
|
+
if (hardware.gpuVramGb > 0) caps.push("gpu");
|
|
404
|
+
if (this.canTrain()) {
|
|
405
|
+
caps.push("training");
|
|
406
|
+
if (hardware.gpuVramGb > 0) caps.push("gpu_training");
|
|
407
|
+
}
|
|
408
|
+
if (this.canDiLoCo(hardware)) {
|
|
409
|
+
if (!caps.includes("diloco")) caps.push("diloco");
|
|
410
|
+
if (!caps.includes("gpu")) caps.push("gpu");
|
|
411
|
+
}
|
|
412
|
+
if (this.canInference()) {
|
|
413
|
+
caps.push("cpu_inference");
|
|
414
|
+
if (hardware.gpuVramGb > 0) caps.push("gpu_inference");
|
|
415
|
+
}
|
|
416
|
+
return caps;
|
|
417
|
+
}
|
|
418
|
+
};
|
|
419
|
+
HardwareHelper = _ts_decorate([
|
|
420
|
+
Injectable()
|
|
421
|
+
], HardwareHelper);
|
|
422
|
+
var detectAppleSilicon = /* @__PURE__ */ __name((hardware, model) => new HardwareHelper().detectAppleSilicon(hardware, model), "detectAppleSilicon");
|
|
423
|
+
var detectNvidiaGPU = /* @__PURE__ */ __name((hardware, smiOutput) => new HardwareHelper().detectNvidiaGPU(hardware, smiOutput), "detectNvidiaGPU");
|
|
424
|
+
var detectHardware = /* @__PURE__ */ __name((cpuOnly, archOverride) => new HardwareHelper().detectHardware(cpuOnly ?? false, archOverride), "detectHardware");
|
|
425
|
+
var getTierName = /* @__PURE__ */ __name((tier) => new HardwareHelper().getTierName(tier), "getTierName");
|
|
426
|
+
var buildOsString = /* @__PURE__ */ __name((platform2, release2, arch2, osType) => new HardwareHelper().buildOsString(platform2, release2, arch2, osType), "buildOsString");
|
|
427
|
+
var estimateAppleSiliconVram = /* @__PURE__ */ __name((model) => new HardwareHelper().estimateAppleSiliconVram(model), "estimateAppleSiliconVram");
|
|
428
|
+
var parseNvidiaSmiOutput = /* @__PURE__ */ __name((smiOutput) => new HardwareHelper().parseNvidiaSmiOutput(smiOutput), "parseNvidiaSmiOutput");
|
|
429
|
+
var getSystemInfo = /* @__PURE__ */ __name((archOverride) => new HardwareHelper().getSystemInfo(archOverride), "getSystemInfo");
|
|
430
|
+
var getCompatibleModels = /* @__PURE__ */ __name((vramGb, allModels) => new HardwareHelper().getCompatibleModels(vramGb, allModels ?? []), "getCompatibleModels");
|
|
431
|
+
var getRecommendedTier = /* @__PURE__ */ __name((vramGb) => new HardwareHelper().getRecommendedTier(vramGb), "getRecommendedTier");
|
|
432
|
+
var canTrain = /* @__PURE__ */ __name(() => new HardwareHelper().canTrain(), "canTrain");
|
|
433
|
+
var canDiLoCo = /* @__PURE__ */ __name((hardware) => new HardwareHelper().canDiLoCo(hardware), "canDiLoCo");
|
|
434
|
+
var canInference = /* @__PURE__ */ __name(() => new HardwareHelper().canInference(), "canInference");
|
|
435
|
+
var buildCapabilities = /* @__PURE__ */ __name((hardware) => new HardwareHelper().buildCapabilities(hardware), "buildCapabilities");
|
|
436
|
+
|
|
437
|
+
export {
|
|
438
|
+
HardwareHelper,
|
|
439
|
+
detectAppleSilicon,
|
|
440
|
+
detectNvidiaGPU,
|
|
441
|
+
detectHardware,
|
|
442
|
+
getTierName,
|
|
443
|
+
buildOsString,
|
|
444
|
+
estimateAppleSiliconVram,
|
|
445
|
+
parseNvidiaSmiOutput,
|
|
446
|
+
getSystemInfo,
|
|
447
|
+
getCompatibleModels,
|
|
448
|
+
getRecommendedTier,
|
|
449
|
+
canTrain,
|
|
450
|
+
canDiLoCo,
|
|
451
|
+
canInference,
|
|
452
|
+
buildCapabilities
|
|
453
|
+
};
|