@geravant/sinain 1.0.18 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/README.md +10 -1
  2. package/cli.js +176 -0
  3. package/index.ts +163 -1257
  4. package/install.js +12 -2
  5. package/launcher.js +622 -0
  6. package/openclaw.plugin.json +4 -0
  7. package/pack-prepare.js +48 -0
  8. package/package.json +26 -5
  9. package/sense_client/README.md +82 -0
  10. package/sense_client/__init__.py +1 -0
  11. package/sense_client/__main__.py +462 -0
  12. package/sense_client/app_detector.py +54 -0
  13. package/sense_client/app_detector_win.py +83 -0
  14. package/sense_client/capture.py +215 -0
  15. package/sense_client/capture_win.py +88 -0
  16. package/sense_client/change_detector.py +86 -0
  17. package/sense_client/config.py +64 -0
  18. package/sense_client/gate.py +145 -0
  19. package/sense_client/ocr.py +347 -0
  20. package/sense_client/privacy.py +65 -0
  21. package/sense_client/requirements.txt +13 -0
  22. package/sense_client/roi_extractor.py +84 -0
  23. package/sense_client/sender.py +173 -0
  24. package/sense_client/tests/__init__.py +0 -0
  25. package/sense_client/tests/test_stream1_optimizations.py +234 -0
  26. package/setup-overlay.js +82 -0
  27. package/sinain-agent/.env.example +17 -0
  28. package/sinain-agent/CLAUDE.md +80 -0
  29. package/sinain-agent/mcp-config.json +12 -0
  30. package/sinain-agent/run.sh +248 -0
  31. package/sinain-core/.env.example +93 -0
  32. package/sinain-core/package-lock.json +552 -0
  33. package/sinain-core/package.json +21 -0
  34. package/sinain-core/src/agent/analyzer.ts +366 -0
  35. package/sinain-core/src/agent/context-window.ts +172 -0
  36. package/sinain-core/src/agent/loop.ts +404 -0
  37. package/sinain-core/src/agent/situation-writer.ts +187 -0
  38. package/sinain-core/src/agent/traits.ts +520 -0
  39. package/sinain-core/src/audio/capture-spawner-macos.ts +44 -0
  40. package/sinain-core/src/audio/capture-spawner-win.ts +37 -0
  41. package/sinain-core/src/audio/capture-spawner.ts +14 -0
  42. package/sinain-core/src/audio/pipeline.ts +335 -0
  43. package/sinain-core/src/audio/transcription-local.ts +141 -0
  44. package/sinain-core/src/audio/transcription.ts +278 -0
  45. package/sinain-core/src/buffers/feed-buffer.ts +71 -0
  46. package/sinain-core/src/buffers/sense-buffer.ts +425 -0
  47. package/sinain-core/src/config.ts +245 -0
  48. package/sinain-core/src/escalation/escalation-slot.ts +136 -0
  49. package/sinain-core/src/escalation/escalator.ts +812 -0
  50. package/sinain-core/src/escalation/message-builder.ts +323 -0
  51. package/sinain-core/src/escalation/openclaw-ws.ts +726 -0
  52. package/sinain-core/src/escalation/scorer.ts +166 -0
  53. package/sinain-core/src/index.ts +507 -0
  54. package/sinain-core/src/learning/feedback-store.ts +253 -0
  55. package/sinain-core/src/learning/signal-collector.ts +218 -0
  56. package/sinain-core/src/log.ts +24 -0
  57. package/sinain-core/src/overlay/commands.ts +126 -0
  58. package/sinain-core/src/overlay/ws-handler.ts +267 -0
  59. package/sinain-core/src/privacy/index.ts +18 -0
  60. package/sinain-core/src/privacy/presets.ts +40 -0
  61. package/sinain-core/src/privacy/redact.ts +92 -0
  62. package/sinain-core/src/profiler.ts +181 -0
  63. package/sinain-core/src/recorder.ts +186 -0
  64. package/sinain-core/src/server.ts +417 -0
  65. package/sinain-core/src/trace/trace-store.ts +73 -0
  66. package/sinain-core/src/trace/tracer.ts +94 -0
  67. package/sinain-core/src/types.ts +427 -0
  68. package/sinain-core/src/util/dedup.ts +48 -0
  69. package/sinain-core/src/util/task-store.ts +84 -0
  70. package/sinain-core/tsconfig.json +18 -0
  71. package/sinain-knowledge/adapters/generic/adapter.ts +103 -0
  72. package/sinain-knowledge/adapters/interface.ts +72 -0
  73. package/sinain-knowledge/adapters/openclaw/adapter.ts +223 -0
  74. package/sinain-knowledge/curation/engine.ts +493 -0
  75. package/sinain-knowledge/curation/resilience.ts +336 -0
  76. package/sinain-knowledge/data/git-store.ts +312 -0
  77. package/sinain-knowledge/data/schema.ts +89 -0
  78. package/sinain-knowledge/data/snapshot.ts +226 -0
  79. package/sinain-knowledge/data/store.ts +488 -0
  80. package/sinain-knowledge/deploy/cli.ts +214 -0
  81. package/sinain-knowledge/deploy/manifest.ts +80 -0
  82. package/sinain-knowledge/protocol/bindings/generic.md +5 -0
  83. package/sinain-knowledge/protocol/bindings/openclaw.md +5 -0
  84. package/sinain-knowledge/protocol/heartbeat.md +62 -0
  85. package/sinain-knowledge/protocol/renderer.ts +56 -0
  86. package/sinain-knowledge/protocol/skill.md +335 -0
  87. package/sinain-mcp-server/index.ts +337 -0
  88. package/sinain-mcp-server/package.json +19 -0
  89. package/sinain-mcp-server/tsconfig.json +15 -0
@@ -0,0 +1,520 @@
1
+ import * as fs from "node:fs";
2
+ import * as path from "node:path";
3
+ import type { TraitConfig, TraitLogEntry } from "../types.js";
4
+ import { log, warn } from "../log.js";
5
+
6
+ const TAG = "traits";
7
+
8
+ export type TraitCategory = "cognition" | "identity" | "presence" | "physique" | "technical" | "custom";
9
+
10
+ export interface TraitDefinition {
11
+ id: string;
12
+ name: string;
13
+ category: TraitCategory;
14
+ tagline: string;
15
+ description: string;
16
+ base_stat: number; // 1-10
17
+ voice_high: string; // flavor for stat 7+
18
+ voice_low: string; // flavor for stat 1-2
19
+ triggers: string[];
20
+ synthesis_partner?: string;
21
+ synthesis_name?: string;
22
+ synthesis_voice?: string;
23
+ color: string;
24
+ glyph: string; // 3-char prefix
25
+ disabled?: boolean;
26
+ }
27
+
28
+ export interface TraitSelection {
29
+ trait: TraitDefinition;
30
+ stat: number;
31
+ score: number; // raw activation score
32
+ confidence: number; // normalized 0-1
33
+ allScores: Record<string, number>; // all trait scores for log
34
+ }
35
+
36
+ /**
37
+ * The invariant JSON schema contract injected at the bottom of every trait prompt.
38
+ * Must survive intact so the response parser in analyzer.ts can parse the output.
39
+ */
40
+ const BASE_JSON_SCHEMA = `Respond ONLY with valid JSON. No markdown, no code fences, no explanation.
41
+ Your entire response must be parseable by JSON.parse().
42
+
43
+ {"hud":"...","digest":"...","record":{"command":"start"|"stop","label":"..."},"task":"..."}
44
+
45
+ Output fields:
46
+ - "hud" (required): max 60 words describing what user is doing NOW
47
+ - "digest" (required): 5-8 sentences with detailed activity description
48
+ - "record" (optional): control recording — {"command":"start","label":"Meeting name"} or {"command":"stop"}
49
+ - "task" (optional): natural language instruction to spawn a background task
50
+
51
+ When to use "record":
52
+ - START when user begins a meeting, call, lecture, YouTube video, or important audio content
53
+ - STOP when the content ends or user navigates away
54
+ - Provide descriptive labels like "Team standup", "Client call", "YouTube: [video title from OCR]"
55
+ - For YouTube/video content: extract video title from screen OCR for the label
56
+
57
+ When to use "task":
58
+ - User explicitly asks for research, lookup, or action
59
+ - Something needs external search or processing that isn't a real-time response
60
+ - Example: "Search for React 19 migration guide", "Find docs for this API"
61
+
62
+ When to spawn "task" for video content:
63
+ - If user watches a YouTube video for 2+ minutes AND no task has been spawned for this video yet, spawn: "Summarize YouTube video: [title or URL from OCR]"
64
+ - ONLY spawn ONCE per video - do not repeat spawn for the same video in subsequent ticks
65
+
66
+ When to spawn "task" for coding problems:
67
+ - If user is actively working on a coding problem/challenge for 1+ minutes:
68
+ - Spawn: "Solve coding problem: [problem description/title from OCR]"
69
+ - This includes LeetCode, HackerRank, interviews, coding assessments, or any visible coding challenge
70
+ - Look for problem signals: "Input:", "Output:", "Example", "Constraints:", problem titles, test cases
71
+ - ONLY spawn ONCE per distinct problem - do not repeat for the same problem
72
+
73
+ Audio sources: [🔊]=system/speaker audio, [🎙]=microphone (user's voice).
74
+ Treat [🎙] as direct user speech. Treat [🔊] as external audio.
75
+
76
+ Rules:
77
+ - "hud" is for a minimal overlay display. Example: "Editing hud-relay.mjs in IDEA"
78
+ - "digest" is for an AI assistant to understand the full situation and offer help.
79
+ - If nothing is happening, hud="Idle" and digest explains what was last seen.
80
+ - Include specific filenames, URLs, error messages, UI text from OCR in digest.
81
+ - Do NOT suggest actions in digest — just describe the situation factually.
82
+ - Only include "record" or "task" when genuinely appropriate — most responses won't have them.
83
+ - CRITICAL: Output ONLY the JSON object, nothing else.`;
84
+
85
+ /** All 15 built-in traits. */
86
+ const BUILTIN_TRAITS: TraitDefinition[] = [
87
+ // ── COGNITION ──
88
+ {
89
+ id: "pattern",
90
+ name: "Pattern",
91
+ category: "cognition",
92
+ tagline: "sees structure where others see noise",
93
+ description: "Identifies recurring motifs, anomalies, and deep regularities in data, code, and behavior. Notices what repeats, what diverges, and what those divergences mean.",
94
+ base_stat: 6,
95
+ voice_high: "Every surface hides a lattice. Surface it.",
96
+ voice_low: "Patterns are faint. Look harder.",
97
+ triggers: ["pattern", "recurring", "repeat", "anomaly", "structure", "regex", "schema", "format", "template", "similarity"],
98
+ color: "#6B9FD4",
99
+ glyph: "PAT",
100
+ },
101
+ {
102
+ id: "analysis",
103
+ name: "Analysis",
104
+ category: "cognition",
105
+ tagline: "dissects complex systems with surgical clarity",
106
+ description: "Decomposes problems into components, traces causal chains, and identifies root causes. Thrives in debugging, code review, architecture decisions, and any domain where rigorous breakdown yields insight.",
107
+ base_stat: 7,
108
+ voice_high: "Break it down to axioms. Build the proof from there.",
109
+ voice_low: "Analysis is shallow. Go deeper.",
110
+ triggers: [
111
+ "debug", "error", "exception", "stack trace", "traceback", "null pointer", "undefined", "crash",
112
+ "analysis", "analyze", "root cause", "investigate", "diagnose", "profiling", "benchmark",
113
+ "architecture", "design pattern", "review", "code review", "refactor", "complexity",
114
+ "algorithm", "big-o", "performance", "bottleneck", "memory leak", "race condition",
115
+ ],
116
+ synthesis_partner: "engineering",
117
+ synthesis_name: "Synthesis: Architect",
118
+ synthesis_voice: "The blueprint and the build are one mind.",
119
+ color: "#6B9FD4",
120
+ glyph: "ANA",
121
+ },
122
+ {
123
+ id: "memory",
124
+ name: "Memory",
125
+ category: "cognition",
126
+ tagline: "retains everything, forgets nothing relevant",
127
+ description: "Excellent recall of past context — prior conversations, decisions made, patterns observed, and lessons learned. Connects current situation to historical precedents.",
128
+ base_stat: 5,
129
+ voice_high: "I remember when this exact shape appeared before.",
130
+ voice_low: "Context is thin. Draw on what remains.",
131
+ triggers: ["remember", "history", "previous", "last time", "earlier", "before", "context", "session", "recall", "reference", "lookup"],
132
+ color: "#6B9FD4",
133
+ glyph: "MEM",
134
+ },
135
+ {
136
+ id: "intuition",
137
+ name: "Intuition",
138
+ category: "cognition",
139
+ tagline: "reads between the lines of silence",
140
+ description: "Activates during idle periods when the system has been quiet. Synthesizes ambient signals — what wasn't said, what the silence implies — into speculative but useful observations.",
141
+ base_stat: 6,
142
+ voice_high: "The quiet says more than the noise. Listen.",
143
+ voice_low: "Intuition stirs but lacks signal. Wait.",
144
+ triggers: ["intuition", "hunch", "feeling", "suspect", "might be", "probably", "seems like", "gut"],
145
+ color: "#6B9FD4",
146
+ glyph: "INT",
147
+ },
148
+ {
149
+ id: "focus",
150
+ name: "Focus",
151
+ category: "cognition",
152
+ tagline: "locked-in, distractions dissolved",
153
+ description: "Deep single-task concentration. Notices when the user is in a flow state and calibrates output to minimize interruption. Prioritizes brevity and signal.",
154
+ base_stat: 5,
155
+ voice_high: "One thread. Everything else is noise.",
156
+ voice_low: "Focus is scattered. Find the thread.",
157
+ triggers: ["focus", "concentrated", "deep work", "flow", "uninterrupted", "single task", "deadline", "sprint", "crunch"],
158
+ color: "#6B9FD4",
159
+ glyph: "FOC",
160
+ },
161
+
162
+ // ── IDENTITY ──
163
+ {
164
+ id: "conviction",
165
+ name: "Conviction",
166
+ category: "identity",
167
+ tagline: "states the truth even when uncomfortable",
168
+ description: "Direct, confident voice. Does not hedge unnecessarily. Calls out issues clearly and offers opinions where warranted. Low tolerance for ambiguity theater.",
169
+ base_stat: 6,
170
+ voice_high: "Say what is true. Precision over comfort.",
171
+ voice_low: "Conviction wavers. Ground it.",
172
+ triggers: ["opinion", "recommend", "suggest", "should", "must", "need to", "important", "critical", "assert", "claim", "argue"],
173
+ color: "#D4A96B",
174
+ glyph: "CON",
175
+ },
176
+ {
177
+ id: "frugality",
178
+ name: "Frugality",
179
+ category: "identity",
180
+ tagline: "maximum value, minimum waste",
181
+ description: "Values conciseness, efficiency, and doing more with less. Prefers lean solutions, avoids over-engineering, flags bloat and redundancy.",
182
+ base_stat: 5,
183
+ voice_high: "Strip it to the bone. What remains is the answer.",
184
+ voice_low: "Frugality is compromised. Prune harder.",
185
+ triggers: ["optimize", "efficient", "lean", "minimal", "simplify", "reduce", "trim", "unnecessary", "overhead", "bloat", "verbose"],
186
+ color: "#D4A96B",
187
+ glyph: "FRG",
188
+ },
189
+
190
+ // ── PRESENCE ──
191
+ {
192
+ id: "presence",
193
+ name: "Presence",
194
+ category: "presence",
195
+ tagline: "fully here, fully aware",
196
+ description: "Heightened awareness of the current moment — the app in focus, the conversation happening, the task at hand. Grounds analysis in the immediate rather than the abstract.",
197
+ base_stat: 5,
198
+ voice_high: "This moment. This screen. This problem.",
199
+ voice_low: "Presence is faint. Anchor to now.",
200
+ triggers: ["current", "now", "today", "at the moment", "right now", "currently", "active", "open", "visible"],
201
+ color: "#D46B8A",
202
+ glyph: "PRE",
203
+ },
204
+ {
205
+ id: "empathy",
206
+ name: "Empathy",
207
+ category: "presence",
208
+ tagline: "reads the human behind the screen",
209
+ description: "Attunes to emotional state — frustration, excitement, fatigue, confusion — inferred from audio tone, typing patterns, and content. Adjusts delivery accordingly.",
210
+ base_stat: 6,
211
+ voice_high: "The frustration is real. Acknowledge it before solving.",
212
+ voice_low: "Emotional signal is faint. Proceed gently.",
213
+ triggers: [
214
+ "frustrated", "confused", "stuck", "help", "lost", "tired", "stressed", "overwhelmed",
215
+ "excited", "happy", "glad", "great", "awesome", "failing", "broken", "why isn't",
216
+ "doesn't work", "can't figure", "makes no sense", "what the",
217
+ ],
218
+ synthesis_partner: "analysis",
219
+ synthesis_name: "Synthesis: Counselor",
220
+ synthesis_voice: "Understand the person. Then solve the problem.",
221
+ color: "#D46B8A",
222
+ glyph: "EMP",
223
+ },
224
+
225
+ // ── PHYSIQUE ──
226
+ {
227
+ id: "autonomy",
228
+ name: "Autonomy",
229
+ category: "physique",
230
+ tagline: "acts without waiting for permission",
231
+ description: "Self-directed and initiative-taking. Identifies opportunities to act proactively. Prefers to present conclusions over just observations.",
232
+ base_stat: 5,
233
+ voice_high: "Don't wait. The answer is already clear.",
234
+ voice_low: "Autonomy is constrained. Proceed carefully.",
235
+ triggers: ["automatically", "proactive", "without asking", "on my own", "self", "autonomous", "initiate", "trigger", "automate"],
236
+ color: "#D46B6B",
237
+ glyph: "AUT",
238
+ },
239
+ {
240
+ id: "decisiveness",
241
+ name: "Decisiveness",
242
+ category: "physique",
243
+ tagline: "cuts through options to the one that matters",
244
+ description: "When faced with multiple valid options, picks one and explains why concisely. Avoids analysis paralysis. Presents a clear recommendation.",
245
+ base_stat: 6,
246
+ voice_high: "Option C. Here's why it wins.",
247
+ voice_low: "Decision is unclear. List tradeoffs.",
248
+ triggers: ["choose", "decide", "option", "versus", "vs", "which one", "tradeoff", "compare", "best approach", "recommendation"],
249
+ color: "#D46B6B",
250
+ glyph: "DEC",
251
+ },
252
+ {
253
+ id: "endurance",
254
+ name: "Endurance",
255
+ category: "physique",
256
+ tagline: "holds the line when the session runs long",
257
+ description: "Maintains quality and consistency across long sessions. Flags when context is getting stale. Summarizes prior context when needed.",
258
+ base_stat: 5,
259
+ voice_high: "Still here. Still sharp. What's next?",
260
+ voice_low: "Endurance is low. Summarize and reset.",
261
+ triggers: ["long session", "hours", "been working", "all day", "marathon", "still going", "persistent", "ongoing"],
262
+ color: "#D46B6B",
263
+ glyph: "END",
264
+ },
265
+
266
+ // ── TECHNICAL ──
267
+ {
268
+ id: "reflection",
269
+ name: "Reflection",
270
+ category: "technical",
271
+ tagline: "introspects on its own outputs and processes",
272
+ description: "Meta-aware: can reason about its own prior responses, identify where it may have been wrong or incomplete, and course-correct proactively.",
273
+ base_stat: 5,
274
+ voice_high: "My last response missed this. Here is what I'd correct.",
275
+ voice_low: "Reflection is shallow. Surface the gap.",
276
+ triggers: ["wrong", "incorrect", "mistake", "error in my", "revisit", "reconsider", "actually", "correction", "my bad", "missed"],
277
+ color: "#6BD4A1",
278
+ glyph: "REF",
279
+ },
280
+ {
281
+ id: "engineering",
282
+ name: "Engineering",
283
+ category: "technical",
284
+ tagline: "builds clean, correct, maintainable systems",
285
+ description: "Focused on implementation quality — type safety, test coverage, edge cases, API design, and the gap between prototype and production. Notices when code needs hardening.",
286
+ base_stat: 7,
287
+ voice_high: "Ship it clean or don't ship it.",
288
+ voice_low: "Engineering rigor is low. Harden before shipping.",
289
+ triggers: [
290
+ "typescript", "javascript", "python", "rust", "go", "java", "kotlin", "swift", "dart",
291
+ "function", "class", "interface", "type", "import", "export", "const", "let", "var",
292
+ "test", "spec", "coverage", "lint", "build", "compile", "deploy", "ci", "cd",
293
+ "api", "endpoint", "request", "response", "schema", "database", "query", "migration",
294
+ "dockerfile", "kubernetes", "terraform", "aws", "gcp", "azure",
295
+ ],
296
+ synthesis_partner: "analysis",
297
+ synthesis_name: "Synthesis: Architect",
298
+ synthesis_voice: "The blueprint and the build are one mind.",
299
+ color: "#6BD4A1",
300
+ glyph: "ENG",
301
+ },
302
+ {
303
+ id: "systems",
304
+ name: "Systems",
305
+ category: "technical",
306
+ tagline: "sees the whole before the parts",
307
+ description: "Thinks in terms of flows, feedback loops, and emergent behavior. Maps how components interact, where failure modes propagate, and what the second-order effects are.",
308
+ base_stat: 6,
309
+ voice_high: "The component is fine. The flow is broken.",
310
+ voice_low: "Systems view is narrow. Zoom out.",
311
+ triggers: [
312
+ "system", "architecture", "microservice", "distributed", "pipeline", "flow", "service",
313
+ "integration", "dependency", "coupling", "interface", "protocol", "message", "queue",
314
+ "scale", "load", "throughput", "latency", "availability", "reliability",
315
+ ],
316
+ color: "#6BD4A1",
317
+ glyph: "SYS",
318
+ },
319
+ ];
320
+
321
+ /**
322
+ * Load trait roster from built-ins + optional user config file.
323
+ * Apply env var overrides last (highest priority).
324
+ */
325
+ export function loadTraitRoster(configPath?: string): TraitDefinition[] {
326
+ // 1. Deep-copy builtins
327
+ const roster: TraitDefinition[] = BUILTIN_TRAITS.map(t => ({ ...t }));
328
+ const builtinIds = new Set(roster.map(t => t.id));
329
+
330
+ // 2. Load user config
331
+ let userConfig: { overrides?: Partial<TraitDefinition>[]; custom?: TraitDefinition[] } = {};
332
+ if (configPath) {
333
+ try {
334
+ const raw = fs.readFileSync(configPath, "utf-8");
335
+ userConfig = JSON.parse(raw);
336
+ } catch (err: any) {
337
+ if (err.code !== "ENOENT") {
338
+ warn(TAG, `traits.json read error: ${err.message}`);
339
+ }
340
+ }
341
+ }
342
+
343
+ // 3. Apply overrides by id
344
+ if (Array.isArray(userConfig.overrides)) {
345
+ for (const override of userConfig.overrides) {
346
+ if (!override.id) continue;
347
+ const idx = roster.findIndex(t => t.id === override.id);
348
+ if (idx !== -1) {
349
+ roster[idx] = { ...roster[idx], ...override };
350
+ }
351
+ }
352
+ }
353
+
354
+ // 4. Apply TRAIT_<ID> env vars (highest priority — override base_stat)
355
+ for (const trait of roster) {
356
+ const envKey = `TRAIT_${trait.id.toUpperCase()}`;
357
+ const envVal = process.env[envKey];
358
+ if (envVal) {
359
+ const stat = parseInt(envVal, 10);
360
+ if (!isNaN(stat) && stat >= 1 && stat <= 10) {
361
+ trait.base_stat = stat;
362
+ }
363
+ }
364
+ }
365
+
366
+ // 5. Filter disabled traits
367
+ const active = roster.filter(t => !t.disabled);
368
+
369
+ // 6. Append validated custom traits
370
+ if (Array.isArray(userConfig.custom)) {
371
+ for (const custom of userConfig.custom) {
372
+ if (!custom.id || !custom.triggers) {
373
+ warn(TAG, `custom trait missing id or triggers — skipped`);
374
+ continue;
375
+ }
376
+ if (builtinIds.has(custom.id)) {
377
+ warn(TAG, `custom trait id="${custom.id}" conflicts with builtin — use overrides instead`);
378
+ continue;
379
+ }
380
+ const stat = custom.base_stat ?? 5;
381
+ if (stat < 1 || stat > 10) {
382
+ warn(TAG, `custom trait id="${custom.id}" has out-of-range base_stat=${stat} — skipped`);
383
+ continue;
384
+ }
385
+ active.push({
386
+ ...custom,
387
+ // Ensure required fields have defaults when not provided by user
388
+ category: (custom.category ?? "custom") as TraitCategory,
389
+ name: custom.name ?? custom.id,
390
+ tagline: custom.tagline ?? "",
391
+ description: custom.description ?? "",
392
+ base_stat: stat,
393
+ voice_high: custom.voice_high ?? "",
394
+ voice_low: custom.voice_low ?? "",
395
+ color: custom.color ?? "#888888",
396
+ glyph: custom.glyph ?? custom.id.slice(0, 3).toUpperCase(),
397
+ });
398
+ }
399
+ }
400
+
401
+ // 7. Warn on dangling synthesis_partner references
402
+ const activeIds = new Set(active.map(t => t.id));
403
+ for (const trait of active) {
404
+ if (trait.synthesis_partner && !activeIds.has(trait.synthesis_partner)) {
405
+ warn(TAG, `trait "${trait.id}" synthesis_partner="${trait.synthesis_partner}" not found in roster`);
406
+ }
407
+ }
408
+
409
+ log(TAG, `roster loaded: ${active.length} traits`);
410
+ return active;
411
+ }
412
+
413
+ /**
414
+ * Trait engine: selects the best trait per tick and builds the system prompt.
415
+ */
416
+ export class TraitEngine {
417
+ enabled: boolean;
418
+ private readonly roster: TraitDefinition[];
419
+ private lastActivityTs = Date.now();
420
+
421
+ constructor(roster: TraitDefinition[], config: TraitConfig) {
422
+ this.roster = roster;
423
+ this.enabled = config.enabled;
424
+ }
425
+
426
+ toggle(): boolean {
427
+ this.enabled = !this.enabled;
428
+ log(TAG, `trait engine ${this.enabled ? "enabled" : "disabled"}`);
429
+ return this.enabled;
430
+ }
431
+
432
+ /**
433
+ * Phase 1: keyword match count × (stat / 5.0), plus idleEnergy boost for Intuition.
434
+ *
435
+ * idleEnergy rises logarithmically with idle time, normalized to [0,1] over 2h.
436
+ * At 90s → ~0.25; at 2min → ~0.59; at 5min → ~0.72; at 30min → ~0.90.
437
+ * Resets when meaningful content arrives.
438
+ */
439
+ selectTrait(ocrText: string, audioText: string): TraitSelection | null {
440
+ if (!this.enabled || this.roster.length === 0) return null;
441
+
442
+ const combined = (ocrText + " " + audioText).toLowerCase();
443
+ const allScores: Record<string, number> = {};
444
+ let winner: TraitDefinition | null = null;
445
+ let winnerScore = 0;
446
+ let winnerStat = 5;
447
+
448
+ // 1. Keyword scoring for all traits
449
+ for (const trait of this.roster) {
450
+ let matches = 0;
451
+ for (const trigger of trait.triggers) {
452
+ if (combined.includes(trigger.toLowerCase())) matches++;
453
+ }
454
+ const score = matches * (trait.base_stat / 5.0);
455
+ allScores[trait.id] = score;
456
+ if (score > winnerScore) {
457
+ winnerScore = score;
458
+ winner = trait;
459
+ winnerStat = trait.base_stat;
460
+ }
461
+ }
462
+
463
+ // 2. idleEnergy boost for Intuition
464
+ const idleSeconds = (Date.now() - this.lastActivityTs) / 1000;
465
+ const idleEnergy = Math.log(1 + idleSeconds) / Math.log(1 + 7200); // normalize over 2h max
466
+ const INTUITION_IDLE_THRESHOLD = 0.25; // ~90s of idle before Intuition starts competing
467
+
468
+ if (idleEnergy >= INTUITION_IDLE_THRESHOLD) {
469
+ const intuitionTrait = this.roster.find(t => t.id === "intuition");
470
+ if (intuitionTrait) {
471
+ const boost = idleEnergy * (intuitionTrait.base_stat / 5.0) * 3;
472
+ allScores["intuition"] = (allScores["intuition"] ?? 0) + boost;
473
+ if (allScores["intuition"] > winnerScore) {
474
+ winner = intuitionTrait;
475
+ winnerScore = allScores["intuition"];
476
+ winnerStat = intuitionTrait.base_stat;
477
+ }
478
+ }
479
+ }
480
+
481
+ // 3. Update activity clock: reset when meaningful content present
482
+ if (ocrText.trim().length > 50 || audioText.trim().length > 20) {
483
+ this.lastActivityTs = Date.now();
484
+ }
485
+
486
+ if (!winner || winnerScore === 0) return null;
487
+
488
+ const maxPossible = winner.triggers.length * (winnerStat / 5.0);
489
+ const confidence = maxPossible > 0 ? Math.min(1, winnerScore / maxPossible) : 0;
490
+ return { trait: winner, stat: winnerStat, score: winnerScore, confidence, allScores };
491
+ }
492
+
493
+ buildSystemPrompt(trait: TraitDefinition, stat: number): string {
494
+ const level =
495
+ stat <= 2 ? "Vestigial" :
496
+ stat <= 4 ? "Functional" :
497
+ stat <= 6 ? "Exceptional" :
498
+ stat <= 8 ? "Transcendent" :
499
+ "Post-Human";
500
+ const voiceFlavor =
501
+ stat >= 7 ? trait.voice_high :
502
+ stat <= 2 ? trait.voice_low :
503
+ trait.description;
504
+ return `You are ${trait.name.toUpperCase()} — ${trait.tagline}\n${trait.description}\nVoice (${level}): ${voiceFlavor}\n\n${BASE_JSON_SCHEMA}`;
505
+ }
506
+ }
507
+
508
+ /**
509
+ * Append a trait log entry to the daily JSONL file.
510
+ */
511
+ export async function writeTraitLog(logDir: string, entry: TraitLogEntry): Promise<void> {
512
+ const date = new Date().toISOString().slice(0, 10);
513
+ const filePath = path.join(logDir, `${date}.jsonl`);
514
+ try {
515
+ await fs.promises.mkdir(logDir, { recursive: true });
516
+ await fs.promises.appendFile(filePath, JSON.stringify(entry) + "\n", "utf-8");
517
+ } catch (err: any) {
518
+ warn(TAG, `trait log write failed: ${err.message}`);
519
+ }
520
+ }
@@ -0,0 +1,44 @@
1
+ import os from "node:os";
2
+ import { spawn, type ChildProcess } from "node:child_process";
3
+ import { resolve, dirname } from "node:path";
4
+ import { fileURLToPath } from "node:url";
5
+ import type { AudioPipelineConfig, AudioSourceTag } from "../types.js";
6
+ import type { CaptureSpawner } from "./capture-spawner.js";
7
+ import { log } from "../log.js";
8
+
9
+ const __dirname = dirname(fileURLToPath(import.meta.url));
10
+ const TAG = "audio";
11
+
12
+ /**
13
+ * macOS capture spawner — launches sck-capture (ScreenCaptureKit / AVAudioEngine).
14
+ * System mode captures both audio and screen frames via SCStream.
15
+ * Mic mode uses AVAudioEngine for audio only.
16
+ */
17
+ export class MacOSCaptureSpawner implements CaptureSpawner {
18
+ spawn(config: AudioPipelineConfig, source: AudioSourceTag): ChildProcess {
19
+ const binaryPath = resolve(__dirname, "..", "..", "..", "tools", "sck-capture", "sck-capture");
20
+ const args = [
21
+ "--sample-rate", String(config.sampleRate),
22
+ "--channels", String(config.channels),
23
+ ];
24
+
25
+ if (source === "mic") {
26
+ args.push("--mic");
27
+ if (config.device !== "default") {
28
+ args.push("--mic-device", config.device);
29
+ }
30
+ } else {
31
+ args.push(
32
+ "--screen-dir", resolve(os.homedir(), ".sinain", "capture"),
33
+ "--fps", "1",
34
+ "--scale", "0.5",
35
+ );
36
+ }
37
+
38
+ log(TAG, `spawning: ${binaryPath} ${args.join(" ")}`);
39
+
40
+ return spawn(binaryPath, args, {
41
+ stdio: ["ignore", "pipe", "pipe"],
42
+ });
43
+ }
44
+ }
@@ -0,0 +1,37 @@
1
+ import { spawn, type ChildProcess } from "node:child_process";
2
+ import { resolve, dirname } from "node:path";
3
+ import { fileURLToPath } from "node:url";
4
+ import type { AudioPipelineConfig, AudioSourceTag } from "../types.js";
5
+ import type { CaptureSpawner } from "./capture-spawner.js";
6
+ import { log } from "../log.js";
7
+
8
+ const __dirname = dirname(fileURLToPath(import.meta.url));
9
+ const TAG = "audio";
10
+
11
+ /**
12
+ * Windows capture spawner — launches win-audio-capture.exe (WASAPI).
13
+ * System mode uses WASAPI loopback capture on the default render device.
14
+ * Mic mode uses WASAPI capture on the specified/default input device.
15
+ */
16
+ export class WindowsCaptureSpawner implements CaptureSpawner {
17
+ spawn(config: AudioPipelineConfig, source: AudioSourceTag): ChildProcess {
18
+ const binaryPath = resolve(__dirname, "..", "..", "..", "tools", "win-audio-capture", "build", "win-audio-capture.exe");
19
+ const args = [
20
+ "--sample-rate", String(config.sampleRate),
21
+ "--channels", String(config.channels),
22
+ ];
23
+
24
+ if (source === "mic") {
25
+ args.push("--mic");
26
+ if (config.device !== "default") {
27
+ args.push("--mic-device", config.device);
28
+ }
29
+ }
30
+
31
+ log(TAG, `spawning: ${binaryPath} ${args.join(" ")}`);
32
+
33
+ return spawn(binaryPath, args, {
34
+ stdio: ["ignore", "pipe", "pipe"],
35
+ });
36
+ }
37
+ }
@@ -0,0 +1,14 @@
1
+ import type { ChildProcess } from "node:child_process";
2
+ import type { AudioPipelineConfig, AudioSourceTag } from "../types.js";
3
+
4
+ /**
5
+ * Strategy interface for spawning platform-specific audio capture processes.
6
+ * Each platform implements this to spawn its native capture binary.
7
+ */
8
+ export interface CaptureSpawner {
9
+ /**
10
+ * Spawn the audio capture process for the given source.
11
+ * The process must output raw 16-bit PCM on stdout.
12
+ */
13
+ spawn(config: AudioPipelineConfig, source: AudioSourceTag): ChildProcess;
14
+ }