chainlesschain 0.47.8 → 0.49.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/bin/chainlesschain.js +0 -0
  2. package/package.json +10 -8
  3. package/src/assets/web-panel/.build-hash +1 -1
  4. package/src/assets/web-panel/assets/{AppLayout-6SPt_8Y_.js → AppLayout-Rvi759IS.js} +1 -1
  5. package/src/assets/web-panel/assets/Dashboard-BS-tzGNj.css +1 -0
  6. package/src/assets/web-panel/assets/{Dashboard-Br7kCwKJ.js → Dashboard-DBhFxXYQ.js} +2 -2
  7. package/src/assets/web-panel/assets/{index-tN-8TosE.js → index-uL0cZ8N_.js} +2 -2
  8. package/src/assets/web-panel/index.html +2 -2
  9. package/src/commands/activitypub.js +533 -0
  10. package/src/commands/codegen.js +303 -0
  11. package/src/commands/collab.js +482 -0
  12. package/src/commands/compliance.js +597 -6
  13. package/src/commands/crosschain.js +382 -0
  14. package/src/commands/dbevo.js +388 -0
  15. package/src/commands/dev.js +411 -0
  16. package/src/commands/federation.js +427 -0
  17. package/src/commands/fusion.js +332 -0
  18. package/src/commands/governance.js +505 -0
  19. package/src/commands/hardening.js +110 -0
  20. package/src/commands/incentive.js +373 -0
  21. package/src/commands/inference.js +304 -0
  22. package/src/commands/infra.js +361 -0
  23. package/src/commands/kg.js +371 -0
  24. package/src/commands/marketplace.js +326 -0
  25. package/src/commands/matrix.js +283 -0
  26. package/src/commands/mcp.js +441 -18
  27. package/src/commands/nlprog.js +329 -0
  28. package/src/commands/nostr.js +196 -7
  29. package/src/commands/ops.js +408 -0
  30. package/src/commands/perception.js +385 -0
  31. package/src/commands/pqc.js +34 -0
  32. package/src/commands/privacy.js +345 -0
  33. package/src/commands/quantization.js +280 -0
  34. package/src/commands/recommend.js +336 -0
  35. package/src/commands/reputation.js +349 -0
  36. package/src/commands/runtime.js +500 -0
  37. package/src/commands/sla.js +352 -0
  38. package/src/commands/social.js +265 -0
  39. package/src/commands/stress.js +252 -0
  40. package/src/commands/tech.js +268 -0
  41. package/src/commands/tenant.js +576 -0
  42. package/src/commands/trust.js +366 -0
  43. package/src/harness/mcp-client.js +330 -54
  44. package/src/index.js +114 -0
  45. package/src/lib/activitypub-bridge.js +623 -0
  46. package/src/lib/aiops.js +523 -0
  47. package/src/lib/autonomous-developer.js +524 -0
  48. package/src/lib/code-agent.js +442 -0
  49. package/src/lib/collaboration-governance.js +556 -0
  50. package/src/lib/community-governance.js +649 -0
  51. package/src/lib/compliance-framework-reporter.js +600 -0
  52. package/src/lib/content-recommendation.js +600 -0
  53. package/src/lib/cross-chain.js +669 -0
  54. package/src/lib/dbevo.js +669 -0
  55. package/src/lib/decentral-infra.js +445 -0
  56. package/src/lib/federation-hardening.js +587 -0
  57. package/src/lib/hardening-manager.js +409 -0
  58. package/src/lib/inference-network.js +407 -0
  59. package/src/lib/knowledge-graph.js +530 -0
  60. package/src/lib/matrix-bridge.js +252 -0
  61. package/src/lib/mcp-client.js +3 -0
  62. package/src/lib/mcp-registry.js +347 -0
  63. package/src/lib/mcp-scaffold.js +385 -0
  64. package/src/lib/multimodal.js +698 -0
  65. package/src/lib/nl-programming.js +595 -0
  66. package/src/lib/nostr-bridge.js +214 -38
  67. package/src/lib/perception.js +500 -0
  68. package/src/lib/pqc-manager.js +141 -9
  69. package/src/lib/privacy-computing.js +575 -0
  70. package/src/lib/protocol-fusion.js +535 -0
  71. package/src/lib/quantization.js +362 -0
  72. package/src/lib/reputation-optimizer.js +509 -0
  73. package/src/lib/skill-marketplace.js +397 -0
  74. package/src/lib/sla-manager.js +484 -0
  75. package/src/lib/social-graph.js +408 -0
  76. package/src/lib/stix-parser.js +167 -0
  77. package/src/lib/stress-tester.js +383 -0
  78. package/src/lib/tech-learning-engine.js +651 -0
  79. package/src/lib/tenant-saas.js +831 -0
  80. package/src/lib/threat-intel.js +268 -0
  81. package/src/lib/token-incentive.js +513 -0
  82. package/src/lib/topic-classifier.js +400 -0
  83. package/src/lib/trust-security.js +473 -0
  84. package/src/lib/ueba.js +403 -0
  85. package/src/lib/universal-runtime.js +771 -0
  86. package/src/assets/web-panel/assets/Dashboard-CKeMmCoT.css +0 -1
@@ -0,0 +1,400 @@
1
+ /**
2
+ * Topic Classifier — language-aware, multilingual.
3
+ *
4
+ * Pragmatic alternative to a neural classifier. Improves on boolean keyword
5
+ * matching by:
6
+ * 1. Detecting language via Unicode ranges (zh / ja / en / other) so
7
+ * CJK text is tokenized per-character instead of being stringified
8
+ * as one whitespace-less blob.
9
+ * 2. Scoring with term frequency (TF) against per-language lexicons,
10
+ * then normalizing so scores are comparable across topics.
11
+ *
12
+ * Deep-learning-enhanced classification is deferred to the LLM manager
13
+ * (Desktop main-process); this CLI classifier is offline and fully
14
+ * deterministic, which is what `cc social analyze` needs.
15
+ *
16
+ * Languages: zh (Chinese), ja (Japanese), en (English), other.
17
+ */
18
+
19
+ /* ── Unicode ranges ────────────────────────────────────────── */
20
+
21
+ // Han ideographs (CJK Unified + Extension-A). Shared by zh and ja.
22
+ const RE_HAN = /[\u3400-\u4dbf\u4e00-\u9fff]/;
23
+ // Hiragana + Katakana (distinctive for Japanese).
24
+ const RE_KANA = /[\u3040-\u309f\u30a0-\u30ff]/;
25
+ // Latin alphanumerics.
26
+ const RE_LATIN = /[A-Za-z]/;
27
+
28
+ /**
29
+ * Detect the dominant language of a piece of text.
30
+ * - Any Hiragana/Katakana → ja (distinctive of Japanese)
31
+ * - Else Han ideographs present → zh
32
+ * - Else majority Latin letters → en
33
+ * - Otherwise 'other'
34
+ */
35
+ export function detectLanguage(text) {
36
+ if (!text || typeof text !== "string") return "other";
37
+ const trimmed = text.trim();
38
+ if (trimmed.length === 0) return "other";
39
+ if (RE_KANA.test(trimmed)) return "ja";
40
+ if (RE_HAN.test(trimmed)) return "zh";
41
+ const latinCount = (trimmed.match(/[A-Za-z]/g) || []).length;
42
+ const totalLetters = (trimmed.match(/[^\s\d\p{P}\p{S}]/gu) || []).length;
43
+ if (totalLetters > 0 && latinCount / totalLetters >= 0.5) return "en";
44
+ return "other";
45
+ }
46
+
47
+ /* ── Tokenization ──────────────────────────────────────────── */
48
+
49
+ /**
50
+ * Tokenize a piece of text. The strategy depends on language:
51
+ * - en / other: split on non-alphanumerics, lowercase.
52
+ * - zh: each Han character becomes a token (ideograms carry meaning
53
+ * individually, and without a real segmenter this is more useful
54
+ * than "one giant token per whitespace-less blob").
55
+ * - ja: Kana + Han characters become tokens; Latin words split normally.
56
+ */
57
+ export function tokenize(text, lang) {
58
+ if (!text) return [];
59
+ const resolved = lang || detectLanguage(text);
60
+ const out = [];
61
+ if (resolved === "en" || resolved === "other") {
62
+ for (const word of text.toLowerCase().split(/[^a-z0-9_]+/u)) {
63
+ if (word) out.push(word);
64
+ }
65
+ return out;
66
+ }
67
+ // For zh/ja we tokenize char-by-char for CJK and word-by-word for Latin.
68
+ for (const ch of text) {
69
+ if (RE_KANA.test(ch) || RE_HAN.test(ch)) {
70
+ out.push(ch);
71
+ }
72
+ }
73
+ for (const word of text.toLowerCase().split(/[^a-z0-9_]+/u)) {
74
+ if (word) out.push(word);
75
+ }
76
+ return out;
77
+ }
78
+
79
+ /* ── Lexicons ──────────────────────────────────────────────── */
80
+
81
+ /**
82
+ * Default lexicons. Shape:
83
+ * DEFAULT_TOPIC_LEXICONS[topic][lang] = Array<string | [string, number]>
84
+ *
85
+ * Keywords are matched as exact tokens (language-aware — English keywords
86
+ * match tokenized English words; CN/JP keywords can be either single CJK
87
+ * chars or multi-char phrases, which we match via substring inclusion).
88
+ */
89
+ export const DEFAULT_TOPIC_LEXICONS = Object.freeze({
90
+ tech: {
91
+ en: [
92
+ "tech",
93
+ "technology",
94
+ "software",
95
+ "ai",
96
+ "algorithm",
97
+ "code",
98
+ "developer",
99
+ "programming",
100
+ "cloud",
101
+ "startup",
102
+ ],
103
+ zh: [
104
+ "科技",
105
+ "技术",
106
+ "软件",
107
+ "算法",
108
+ "编程",
109
+ "程序",
110
+ "开发",
111
+ "云计算",
112
+ "人工智能",
113
+ "互联网",
114
+ ],
115
+ ja: [
116
+ "テクノロジー",
117
+ "技術",
118
+ "ソフトウェア",
119
+ "プログラム",
120
+ "開発",
121
+ "人工知能",
122
+ "クラウド",
123
+ "システム",
124
+ ],
125
+ },
126
+ sports: {
127
+ en: [
128
+ "sport",
129
+ "sports",
130
+ "game",
131
+ "match",
132
+ "football",
133
+ "basketball",
134
+ "soccer",
135
+ "olympic",
136
+ "tennis",
137
+ "team",
138
+ ],
139
+ zh: [
140
+ "体育",
141
+ "运动",
142
+ "比赛",
143
+ "足球",
144
+ "篮球",
145
+ "奥运",
146
+ "网球",
147
+ "联赛",
148
+ "冠军",
149
+ ],
150
+ ja: [
151
+ "スポーツ",
152
+ "試合",
153
+ "サッカー",
154
+ "野球",
155
+ "バスケ",
156
+ "オリンピック",
157
+ "優勝",
158
+ ],
159
+ },
160
+ health: {
161
+ en: [
162
+ "health",
163
+ "medical",
164
+ "doctor",
165
+ "hospital",
166
+ "disease",
167
+ "vaccine",
168
+ "patient",
169
+ "therapy",
170
+ "clinic",
171
+ "surgery",
172
+ ],
173
+ zh: [
174
+ "健康",
175
+ "医疗",
176
+ "医生",
177
+ "医院",
178
+ "疾病",
179
+ "疫苗",
180
+ "治疗",
181
+ "诊所",
182
+ "手术",
183
+ ],
184
+ ja: ["健康", "医療", "医師", "病院", "病気", "ワクチン", "治療"],
185
+ },
186
+ food: {
187
+ en: [
188
+ "food",
189
+ "restaurant",
190
+ "cuisine",
191
+ "cooking",
192
+ "chef",
193
+ "recipe",
194
+ "dish",
195
+ "meal",
196
+ "dessert",
197
+ ],
198
+ zh: ["美食", "餐厅", "料理", "烹饪", "厨师", "菜谱", "甜点", "小吃"],
199
+ ja: ["料理", "レストラン", "グルメ", "料理人", "デザート", "食事"],
200
+ },
201
+ travel: {
202
+ en: [
203
+ "travel",
204
+ "tourism",
205
+ "hotel",
206
+ "trip",
207
+ "flight",
208
+ "vacation",
209
+ "tourist",
210
+ "destination",
211
+ "cruise",
212
+ ],
213
+ zh: ["旅行", "旅游", "酒店", "景点", "机票", "度假", "邮轮"],
214
+ ja: ["旅行", "ホテル", "観光", "フライト", "休暇", "旅"],
215
+ },
216
+ politics: {
217
+ en: [
218
+ "politics",
219
+ "government",
220
+ "election",
221
+ "policy",
222
+ "president",
223
+ "congress",
224
+ "senate",
225
+ "vote",
226
+ "campaign",
227
+ ],
228
+ zh: ["政治", "政府", "选举", "政策", "总统", "议会", "投票", "竞选"],
229
+ ja: ["政治", "政府", "選挙", "政策", "大統領", "国会", "投票"],
230
+ },
231
+ finance: {
232
+ en: [
233
+ "finance",
234
+ "economy",
235
+ "stock",
236
+ "investment",
237
+ "market",
238
+ "bank",
239
+ "trade",
240
+ "currency",
241
+ "crypto",
242
+ "inflation",
243
+ ],
244
+ zh: [
245
+ "金融",
246
+ "经济",
247
+ "股票",
248
+ "投资",
249
+ "市场",
250
+ "银行",
251
+ "贸易",
252
+ "货币",
253
+ "加密",
254
+ ],
255
+ ja: ["金融", "経済", "株式", "投資", "市場", "銀行", "取引", "通貨"],
256
+ },
257
+ entertainment: {
258
+ en: [
259
+ "entertainment",
260
+ "movie",
261
+ "film",
262
+ "music",
263
+ "celebrity",
264
+ "concert",
265
+ "series",
266
+ "actor",
267
+ "singer",
268
+ ],
269
+ zh: ["娱乐", "电影", "音乐", "明星", "演唱会", "电视剧", "演员", "歌手"],
270
+ ja: ["映画", "音楽", "芸能", "コンサート", "ドラマ", "俳優", "歌手"],
271
+ },
272
+ });
273
+
274
+ // User-registered lexicon overrides. Indexed by topic → lang → keywords[].
275
+ const _customLexicons = new Map();
276
+
277
+ /**
278
+ * Register or override a topic lexicon. Pass `lexicon` as:
279
+ * { en: [...], zh: [...], ja: [...] }
280
+ */
281
+ export function registerTopicLexicon(topic, lexicon) {
282
+ if (!topic) throw new Error("topic is required");
283
+ if (!lexicon || typeof lexicon !== "object") {
284
+ throw new Error("lexicon object is required");
285
+ }
286
+ _customLexicons.set(topic, lexicon);
287
+ }
288
+
289
+ export function unregisterTopicLexicon(topic) {
290
+ return _customLexicons.delete(topic);
291
+ }
292
+
293
+ export function listTopicLexicons() {
294
+ const merged = { ...DEFAULT_TOPIC_LEXICONS };
295
+ for (const [topic, lexicon] of _customLexicons.entries()) {
296
+ merged[topic] = lexicon;
297
+ }
298
+ return merged;
299
+ }
300
+
301
+ /* ── Scoring ───────────────────────────────────────────────── */
302
+
303
+ function _keywordWeight(entry) {
304
+ if (Array.isArray(entry)) return { keyword: entry[0], weight: entry[1] ?? 1 };
305
+ return { keyword: entry, weight: 1 };
306
+ }
307
+
308
+ function _matchCount(text, tokens, keyword, lang) {
309
+ // For Latin keywords, match as token equality (case-insensitive).
310
+ // For CJK keywords, match as substring of the text (so multi-char phrases
311
+ // like "加密货币" match even though tokenize() splits them into chars).
312
+ if (/^[A-Za-z0-9_]+$/.test(keyword)) {
313
+ const lower = keyword.toLowerCase();
314
+ let count = 0;
315
+ for (const t of tokens) if (t === lower) count += 1;
316
+ return count;
317
+ }
318
+ // CJK multi-char or single-char phrase: substring count.
319
+ if (!text) return 0;
320
+ let count = 0;
321
+ let idx = 0;
322
+ while (true) {
323
+ const hit = text.indexOf(keyword, idx);
324
+ if (hit === -1) break;
325
+ count += 1;
326
+ idx = hit + keyword.length;
327
+ }
328
+ return count;
329
+ }
330
+
331
+ /**
332
+ * Classify `text` into one or more topics.
333
+ *
334
+ * @param {string} text
335
+ * @param {Object} [opts]
336
+ * @param {Object} [opts.lexicons] Override per-topic lexicons for this call.
337
+ * @param {number} [opts.topK=3] Return the top-K topics.
338
+ * @param {string} [opts.lang] Override detected language.
339
+ * @param {number} [opts.minScore=0] Drop topics with score below this (pre-normalization).
340
+ * @returns {{
341
+ * language: string,
342
+ * tokens: string[],
343
+ * topics: Array<{ topic: string, score: number, rawScore: number, hits: number }>
344
+ * }}
345
+ */
346
+ export function classifyTopic(text, opts = {}) {
347
+ const {
348
+ lexicons: overrideLexicons,
349
+ topK = 3,
350
+ lang: forceLang,
351
+ minScore = 0,
352
+ } = opts;
353
+
354
+ const language = forceLang || detectLanguage(text);
355
+ const tokens = tokenize(text, language);
356
+ if (!text || tokens.length === 0) {
357
+ return { language, tokens: [], topics: [] };
358
+ }
359
+
360
+ const lexiconSet = overrideLexicons || listTopicLexicons();
361
+ const raw = [];
362
+ for (const [topic, perLang] of Object.entries(lexiconSet)) {
363
+ if (!perLang || typeof perLang !== "object") continue;
364
+ const entries = perLang[language] || [];
365
+ // Fall back to English keywords when the target language has none.
366
+ const fallback = entries.length === 0 ? perLang.en || [] : [];
367
+ let rawScore = 0;
368
+ let hits = 0;
369
+ for (const entry of [...entries, ...fallback]) {
370
+ const { keyword, weight } = _keywordWeight(entry);
371
+ const count = _matchCount(text, tokens, keyword, language);
372
+ if (count > 0) {
373
+ rawScore += count * weight;
374
+ hits += count;
375
+ }
376
+ }
377
+ if (rawScore > minScore) {
378
+ raw.push({ topic, rawScore, hits });
379
+ }
380
+ }
381
+
382
+ const total = raw.reduce((s, r) => s + r.rawScore, 0) || 1;
383
+ const topics = raw
384
+ .map((r) => ({
385
+ topic: r.topic,
386
+ score: r.rawScore / total,
387
+ rawScore: r.rawScore,
388
+ hits: r.hits,
389
+ }))
390
+ .sort((a, b) => b.rawScore - a.rawScore)
391
+ .slice(0, topK);
392
+
393
+ return { language, tokens, topics };
394
+ }
395
+
396
+ /* ── Reset (for testing) ───────────────────────────────────── */
397
+
398
+ export function _resetState() {
399
+ _customLexicons.clear();
400
+ }