@synapseia-network/node 0.8.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/LICENSE +105 -0
  2. package/README.md +232 -0
  3. package/dist/bid-responder-Q725ZIUC.js +86 -0
  4. package/dist/bootstrap.js +22 -0
  5. package/dist/chain-info-lightweight-2UWAQZBF.js +303 -0
  6. package/dist/chat-stream-handler-BSHSGMFF.js +127 -0
  7. package/dist/chunk-2X7MSWD4.js +270 -0
  8. package/dist/chunk-3BHRQWSM.js +531 -0
  9. package/dist/chunk-5QFTU52A.js +442 -0
  10. package/dist/chunk-5ZAJBIAV.js +25 -0
  11. package/dist/chunk-7FLDR5NT.js +186 -0
  12. package/dist/chunk-C5XRYLYP.js +137 -0
  13. package/dist/chunk-D7ADMHK2.js +36 -0
  14. package/dist/chunk-DXUYWRO7.js +23 -0
  15. package/dist/chunk-F5UDK56Z.js +289 -0
  16. package/dist/chunk-NEHR6XY7.js +111 -0
  17. package/dist/chunk-NMJVODKH.js +453 -0
  18. package/dist/chunk-PRVT22SM.js +324 -0
  19. package/dist/chunk-T2ZRG5CX.js +1380 -0
  20. package/dist/chunk-V2L5SXTL.js +88 -0
  21. package/dist/chunk-XL2NJWFY.js +702 -0
  22. package/dist/embedding-C6GE3WVM.js +16 -0
  23. package/dist/hardware-ITQQJ5YI.js +37 -0
  24. package/dist/index.js +16836 -0
  25. package/dist/inference-server-CIGRJ36H.js +25 -0
  26. package/dist/local-cors-J6RWNMMD.js +44 -0
  27. package/dist/model-catalog-C53SDFMG.js +15 -0
  28. package/dist/model-discovery-LA6YMT3I.js +10 -0
  29. package/dist/ollama-XVXA3A37.js +9 -0
  30. package/dist/rewards-vault-cli-HW7H4EMD.js +147 -0
  31. package/dist/scripts/create_nodes.sh +6 -0
  32. package/dist/scripts/diloco_train.py +319 -0
  33. package/dist/scripts/train_lora.py +237 -0
  34. package/dist/scripts/train_micro.py +586 -0
  35. package/dist/trainer-HQMV2ZAR.js +21 -0
  36. package/package.json +128 -0
  37. package/scripts/create_nodes.sh +6 -0
  38. package/scripts/diloco_train.py +319 -0
  39. package/scripts/train_lora.py +237 -0
  40. package/scripts/train_micro.py +586 -0
@@ -0,0 +1,324 @@
1
+ import { fileURLToPath as __synFup } from "url";import { dirname as __synDn } from "path";const __filename = __synFup(import.meta.url);const __dirname = __synDn(__filename);
2
+ import {
3
+ init_logger,
4
+ logger_default
5
+ } from "./chunk-V2L5SXTL.js";
6
+ import {
7
+ __name
8
+ } from "./chunk-D7ADMHK2.js";
9
+
10
+ // src/modules/model/model-catalog.ts
11
+ init_logger();
12
+ import { Injectable } from "@nestjs/common";
13
+ import { execSync } from "child_process";
14
+ function _ts_decorate(decorators, target, key, desc) {
15
+ var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
16
+ if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
17
+ else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
18
+ return c > 3 && r && Object.defineProperty(target, key, r), r;
19
+ }
20
+ __name(_ts_decorate, "_ts_decorate");
21
+ var MODEL_CATALOG = [
22
+ // Embedding models
23
+ {
24
+ name: "locusai/all-minilm-l6-v2",
25
+ minVram: 0.1,
26
+ recommendedTier: 0,
27
+ category: "embedding",
28
+ description: "Lightweight embedding model for vector search"
29
+ },
30
+ // General models (small)
31
+ {
32
+ name: "qwen2.5-0.5b",
33
+ minVram: 1,
34
+ recommendedTier: 1,
35
+ category: "general",
36
+ description: "Tiny general-purpose model"
37
+ },
38
+ {
39
+ name: "gemma-3-1b-web",
40
+ minVram: 2,
41
+ recommendedTier: 1,
42
+ category: "general",
43
+ description: "Small web-optimized general model"
44
+ },
45
+ {
46
+ name: "phi-2",
47
+ minVram: 2,
48
+ recommendedTier: 1,
49
+ category: "general",
50
+ description: "Microsoft Phi-2 small model"
51
+ },
52
+ {
53
+ name: "tiny-vicuna-1b",
54
+ minVram: 1,
55
+ recommendedTier: 1,
56
+ category: "general",
57
+ description: "Tiny general-purpose model"
58
+ },
59
+ {
60
+ name: "home-3b-v3",
61
+ minVram: 2,
62
+ recommendedTier: 1,
63
+ category: "general",
64
+ description: "Home-3B v3 small model"
65
+ },
66
+ {
67
+ name: "qwen2-0.5b",
68
+ minVram: 1,
69
+ recommendedTier: 1,
70
+ category: "general",
71
+ description: "Qwen2 0.5B tiny model"
72
+ },
73
+ {
74
+ name: "qwen2-0.5b-instruct",
75
+ minVram: 1,
76
+ recommendedTier: 1,
77
+ category: "general",
78
+ description: "Qwen2 0.5B instruct-tuned"
79
+ },
80
+ // Code models (small)
81
+ {
82
+ name: "qwen2.5-coder-0.5b",
83
+ minVram: 1,
84
+ recommendedTier: 1,
85
+ category: "code",
86
+ description: "Tiny code model"
87
+ },
88
+ {
89
+ name: "qwen2.5-coder-1.5b",
90
+ minVram: 2,
91
+ recommendedTier: 1,
92
+ category: "code",
93
+ description: "Small code model"
94
+ },
95
+ // General models (medium)
96
+ {
97
+ name: "qwen2.5-coder-3b",
98
+ minVram: 3,
99
+ recommendedTier: 2,
100
+ category: "code",
101
+ description: "Medium code model"
102
+ },
103
+ {
104
+ name: "gemma-3-1b",
105
+ minVram: 2,
106
+ recommendedTier: 1,
107
+ category: "general",
108
+ description: "Google Gemma 3 1B"
109
+ },
110
+ {
111
+ name: "gemma-3-4b",
112
+ minVram: 4,
113
+ recommendedTier: 2,
114
+ category: "general",
115
+ description: "Google Gemma 3 4B"
116
+ },
117
+ // Code models (medium-large)
118
+ {
119
+ name: "qwen2.5-coder-7b",
120
+ minVram: 6,
121
+ recommendedTier: 2,
122
+ category: "code",
123
+ description: "7B code model"
124
+ },
125
+ {
126
+ name: "glm-4-9b",
127
+ minVram: 8,
128
+ recommendedTier: 2,
129
+ category: "general",
130
+ description: "GLM-4 9B general model"
131
+ },
132
+ {
133
+ name: "mistral-7b-instruct",
134
+ minVram: 6,
135
+ recommendedTier: 2,
136
+ category: "general",
137
+ description: "Mistral 7B instruct model"
138
+ },
139
+ // General models (large)
140
+ {
141
+ name: "gemma-3-12b",
142
+ minVram: 10,
143
+ recommendedTier: 3,
144
+ category: "general",
145
+ description: "Google Gemma 3 12B"
146
+ },
147
+ {
148
+ name: "llama-3.1-8b-instruct",
149
+ minVram: 10,
150
+ recommendedTier: 3,
151
+ category: "general",
152
+ description: "Meta Llama 3.1 8B instruct"
153
+ },
154
+ {
155
+ name: "llama-3.2-1b-instruct",
156
+ minVram: 1,
157
+ recommendedTier: 1,
158
+ category: "general",
159
+ description: "Meta Llama 3.2 1B instruct"
160
+ },
161
+ // Code models (large)
162
+ {
163
+ name: "qwen2.5-coder-14b",
164
+ minVram: 12,
165
+ recommendedTier: 3,
166
+ category: "code",
167
+ description: "14B code model"
168
+ },
169
+ // General models (very large)
170
+ {
171
+ name: "gpt-oss-20b",
172
+ minVram: 16,
173
+ recommendedTier: 4,
174
+ category: "general",
175
+ description: "20B general model"
176
+ },
177
+ {
178
+ name: "gemma-3-27b",
179
+ minVram: 20,
180
+ recommendedTier: 4,
181
+ category: "general",
182
+ description: "Google Gemma 3 27B"
183
+ },
184
+ // Code models (very large)
185
+ {
186
+ name: "qwen2.5-coder-32b",
187
+ minVram: 24,
188
+ recommendedTier: 4,
189
+ category: "code",
190
+ description: "32B code model (recommended)"
191
+ },
192
+ // General models (ultra-large)
193
+ {
194
+ name: "glm-4.7-flash",
195
+ minVram: 24,
196
+ recommendedTier: 4,
197
+ category: "general",
198
+ description: "GLM-4.7 Flash ultra model"
199
+ },
200
+ {
201
+ name: "qwen3-coder-30b-a3b",
202
+ minVram: 24,
203
+ recommendedTier: 4,
204
+ category: "code",
205
+ description: "Qwen3 Coder 30B A3B"
206
+ }
207
+ ];
208
+ var CLOUD_MODELS = [
209
+ {
210
+ name: "gemini-2.0-flash",
211
+ minVram: 0,
212
+ recommendedTier: 0,
213
+ category: "general",
214
+ description: "Google Gemini 2.0 Flash (cloud-only)",
215
+ isCloud: true
216
+ },
217
+ {
218
+ name: "Minimax2.7",
219
+ minVram: 0,
220
+ recommendedTier: 0,
221
+ category: "general",
222
+ description: "Minimax2.7 (cloud-only)",
223
+ isCloud: true
224
+ },
225
+ {
226
+ name: "Kimi2.5",
227
+ minVram: 0,
228
+ recommendedTier: 0,
229
+ category: "general",
230
+ description: "Kimi2.5 (cloud-only)",
231
+ isCloud: true
232
+ }
233
+ ];
234
+ var FULL_CATALOG = [
235
+ ...MODEL_CATALOG,
236
+ ...CLOUD_MODELS
237
+ ];
238
+ var ModelCatalogHelper = class {
239
+ static {
240
+ __name(this, "ModelCatalogHelper");
241
+ }
242
+ listModels(category) {
243
+ if (category) return FULL_CATALOG.filter((m) => m.category === category);
244
+ return FULL_CATALOG;
245
+ }
246
+ getModelsForVram(vramGb) {
247
+ return FULL_CATALOG.filter((m) => m.minVram <= vramGb && !m.isCloud);
248
+ }
249
+ getModel(name) {
250
+ return FULL_CATALOG.find((m) => m.name === name);
251
+ }
252
+ async pullModel(name) {
253
+ try {
254
+ execSync("curl -s http://localhost:11434/api/tags", {
255
+ stdio: "pipe",
256
+ timeout: 1e3
257
+ });
258
+ } catch {
259
+ throw new Error("Ollama is not running. Start it with: ollama serve");
260
+ }
261
+ try {
262
+ logger_default.log(`[ModelCatalog] pulling model ${name}...`);
263
+ execSync(`ollama pull ${name}`, {
264
+ stdio: "inherit"
265
+ });
266
+ return true;
267
+ } catch (error) {
268
+ throw new Error(`Failed to pull model ${name}: ${error}`);
269
+ }
270
+ }
271
+ /**
272
+ * List locally available Ollama models.
273
+ * @param ollamaUrl - Optional base URL (e.g. `http://ollama:11434`). Falls back to
274
+ * `OLLAMA_URL` env var, then `http://localhost:11434`. Needed when Ollama runs on
275
+ * a different host (e.g. Docker Compose, where `localhost` inside the node
276
+ * container is not the ollama service).
277
+ */
278
+ getLocalModels(ollamaUrl) {
279
+ const baseUrl = ollamaUrl ?? process.env.OLLAMA_URL ?? "http://localhost:11434";
280
+ try {
281
+ const response = execSync(`curl -s ${baseUrl}/api/tags`, {
282
+ encoding: "utf-8"
283
+ });
284
+ const data = JSON.parse(response);
285
+ return data.models.map((m) => m.name);
286
+ } catch {
287
+ return [];
288
+ }
289
+ }
290
+ isModelAvailable(name) {
291
+ return this.getLocalModels().includes(name);
292
+ }
293
+ getRecommendedModel(tier, category) {
294
+ let models = this.getModelsForVram(tier * 16);
295
+ if (category) models = models.filter((m) => m.category === category);
296
+ models.sort((a, b) => {
297
+ if (a.recommendedTier !== b.recommendedTier) return a.recommendedTier - b.recommendedTier;
298
+ return b.minVram - a.minVram;
299
+ });
300
+ return models[0];
301
+ }
302
+ getModelCatalog() {
303
+ return [
304
+ ...MODEL_CATALOG
305
+ ];
306
+ }
307
+ normalizeModelName(name) {
308
+ return name.replace(/^ollama\//, "").replace(/:/g, "-").toLowerCase();
309
+ }
310
+ getModelByName(name) {
311
+ const normalized = this.normalizeModelName(name);
312
+ return MODEL_CATALOG.find((m) => m.name === normalized || m.name === name) || null;
313
+ }
314
+ };
315
+ ModelCatalogHelper = _ts_decorate([
316
+ Injectable()
317
+ ], ModelCatalogHelper);
318
+
319
+ export {
320
+ MODEL_CATALOG,
321
+ CLOUD_MODELS,
322
+ FULL_CATALOG,
323
+ ModelCatalogHelper
324
+ };