psyche-ai 11.5.5 → 11.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -33,6 +33,7 @@ import { describeEmotionalState } from "../chemistry.js";
33
33
  import { serializeThrongletsExportAsTrace } from "../thronglets-runtime.js";
34
34
  import { resolveRelationshipUserId } from "../relationship-key.js";
35
35
  import { resolveAmbientPriorsForTurn, } from "../ambient-runtime.js";
36
+ import { safeProcessInput, safeProcessOutput } from "./fail-open.js";
36
37
  // ── Dimension description ────────────────────────────────────
37
38
  const DIM_THRESHOLDS = {
38
39
  high: 70,
@@ -179,19 +180,20 @@ export class PsycheClaudeSDK {
179
180
  const currentGoal = normalizeCurrentGoal(input.current_goal ?? input.currentGoal);
180
181
  const activePolicy = resolveRuntimeActivePolicy(input.active_policy ?? input.activePolicy, currentTurnCorrection);
181
182
  const ambientPriors = await self.resolveAmbientPriors(userMessage, currentGoal, activePolicy, currentTurnCorrection);
182
- const result = await self.engine.processInput(userMessage, {
183
+ const result = await safeProcessInput(self.engine, userMessage, {
183
184
  userId: self.opts.userId,
184
185
  ambientPriors,
185
186
  currentGoal,
186
187
  activePolicy,
187
188
  currentTurnCorrection,
188
- });
189
+ }, "claude-sdk.processInput");
189
190
  self.lastInputResult = result;
190
191
  // Cache Thronglets exports from this turn
191
192
  if (self.opts.thronglets && result.throngletsExports) {
192
193
  self.lastThrongletsExports = result.throngletsExports;
193
194
  }
194
- return { systemMessage: result.dynamicContext };
195
+ const systemMessage = result.dynamicContext;
196
+ return systemMessage ? { systemMessage } : {};
195
197
  },
196
198
  ],
197
199
  },
@@ -208,11 +210,11 @@ export class PsycheClaudeSDK {
208
210
  * @returns Cleaned text with tags removed
209
211
  */
210
212
  async processResponse(text, opts) {
211
- const result = await this.engine.processOutput(text, {
213
+ const result = await safeProcessOutput(this.engine, text, {
212
214
  userId: this.opts.userId,
213
215
  signals: opts?.signals,
214
216
  signalConfidence: opts?.signalConfidence,
215
- });
217
+ }, "claude-sdk.processOutput");
216
218
  return result.cleanedText;
217
219
  }
218
220
  // ── Thronglets integration ────────────────────────────────
@@ -0,0 +1,9 @@
1
+ import type { ProcessInputOptions, ProcessInputResult, ProcessOutputOptions, ProcessOutputResult, PsycheEngine } from "../core.js";
2
+ export declare function stripPsycheUpdateTags(text: string): string;
3
+ export interface FailOpenOutputFallbackOptions {
4
+ stripUpdateTags?: boolean;
5
+ }
6
+ export declare function composePsycheContext(result: Pick<ProcessInputResult, "systemContext" | "dynamicContext">): string;
7
+ export declare function buildFailOpenProcessInputResult(opts: ProcessInputOptions | undefined): ProcessInputResult;
8
+ export declare function safeProcessInput(engine: PsycheEngine, text: string, opts?: ProcessInputOptions, phase?: string): Promise<ProcessInputResult>;
9
+ export declare function safeProcessOutput(engine: PsycheEngine, text: string, opts?: ProcessOutputOptions, phase?: string, fallback?: FailOpenOutputFallbackOptions): Promise<ProcessOutputResult>;
@@ -0,0 +1,58 @@
1
+ const PSYCHE_TAG_RE = /<psyche_update>[\s\S]*?<\/psyche_update>/g;
2
+ export function stripPsycheUpdateTags(text) {
3
+ return text
4
+ .replace(PSYCHE_TAG_RE, "")
5
+ .replace(/\n{3,}/g, "\n\n")
6
+ .trim();
7
+ }
8
+ export function composePsycheContext(result) {
9
+ return [result.systemContext, result.dynamicContext].filter(Boolean).join("\n\n");
10
+ }
11
+ export function buildFailOpenProcessInputResult(opts) {
12
+ return {
13
+ systemContext: "",
14
+ dynamicContext: "",
15
+ ambientPriors: opts?.ambientPriors ?? [],
16
+ activePolicy: opts?.activePolicy ?? [],
17
+ currentGoal: opts?.currentGoal,
18
+ ambientPriorContext: undefined,
19
+ appraisal: null,
20
+ legacyStimulus: null,
21
+ stimulus: null,
22
+ legacyStimulusConfidence: undefined,
23
+ stimulusConfidence: undefined,
24
+ policyModifiers: undefined,
25
+ replyEnvelope: undefined,
26
+ subjectivityKernel: undefined,
27
+ responseContract: undefined,
28
+ generationControls: undefined,
29
+ sessionBridge: null,
30
+ writebackFeedback: [],
31
+ externalContinuity: undefined,
32
+ throngletsExports: [],
33
+ observability: undefined,
34
+ policyContext: "",
35
+ };
36
+ }
37
+ export async function safeProcessInput(engine, text, opts, phase = "processInput") {
38
+ try {
39
+ return await engine.processInput(text, opts);
40
+ }
41
+ catch (error) {
42
+ engine.recordDiagnosticError(phase, error);
43
+ return buildFailOpenProcessInputResult(opts);
44
+ }
45
+ }
46
+ export async function safeProcessOutput(engine, text, opts, phase = "processOutput", fallback) {
47
+ try {
48
+ return await engine.processOutput(text, opts);
49
+ }
50
+ catch (error) {
51
+ engine.recordDiagnosticError(phase, error);
52
+ const shouldStripTags = fallback?.stripUpdateTags !== false && text.includes("<psyche_update>");
53
+ return {
54
+ cleanedText: shouldStripTags ? stripPsycheUpdateTags(text) : text,
55
+ stateChanged: false,
56
+ };
57
+ }
58
+ }
@@ -16,27 +16,11 @@
16
16
  // Zero dependencies — uses node:http only.
17
17
  // ============================================================
18
18
  import { createServer } from "node:http";
19
+ import { safeProcessInput, safeProcessOutput } from "./fail-open.js";
19
20
  import { normalizeCurrentGoal, normalizeCurrentTurnCorrection, resolveRuntimeActivePolicy, } from "../types.js";
20
21
  import { parseAmbientPriorsInput } from "../ambient-runtime.js";
21
22
  import { computeOverlay } from "../overlay.js";
22
- const VALID_WRITEBACK_SIGNALS = new Set([
23
- "trust_up",
24
- "trust_down",
25
- "boundary_set",
26
- "boundary_soften",
27
- "repair_attempt",
28
- "repair_landed",
29
- "closeness_invite",
30
- "withdrawal_mark",
31
- "self_assertion",
32
- "task_recenter",
33
- ]);
34
- function parseSignals(value) {
35
- if (!Array.isArray(value))
36
- return undefined;
37
- const parsed = value.filter((item) => (typeof item === "string" && VALID_WRITEBACK_SIGNALS.has(item)));
38
- return parsed.length > 0 ? [...new Set(parsed)] : undefined;
39
- }
23
+ import { coerceWritebackSignalInput } from "../writeback-signals.js";
40
24
  function parseAmbientPriors(value) {
41
25
  return parseAmbientPriorsInput(value);
42
26
  }
@@ -103,13 +87,13 @@ export function createPsycheServer(engine, opts) {
103
87
  if (req.method === "POST" && url.pathname === "/process-input") {
104
88
  const body = await readBody(req);
105
89
  const currentTurnCorrection = parseCurrentTurnCorrection(body);
106
- const result = await engine.processInput(body.text ?? "", {
90
+ const result = await safeProcessInput(engine, body.text ?? "", {
107
91
  userId: body.userId,
108
92
  ambientPriors: parseAmbientPriors(body.ambientPriors),
109
93
  currentGoal: normalizeCurrentGoal(body.currentGoal),
110
94
  activePolicy: resolveRuntimeActivePolicy(body.activePolicy, currentTurnCorrection),
111
95
  currentTurnCorrection,
112
- });
96
+ }, "http.processInput");
113
97
  json(res, 200, {
114
98
  systemContext: result.systemContext,
115
99
  dynamicContext: result.dynamicContext,
@@ -137,11 +121,11 @@ export function createPsycheServer(engine, opts) {
137
121
  // POST /process-output
138
122
  if (req.method === "POST" && url.pathname === "/process-output") {
139
123
  const body = await readBody(req);
140
- const result = await engine.processOutput(body.text ?? "", {
124
+ const result = await safeProcessOutput(engine, body.text ?? "", {
141
125
  userId: body.userId,
142
- signals: parseSignals(body.signals),
126
+ signals: coerceWritebackSignalInput(body.signals),
143
127
  signalConfidence: typeof body.signalConfidence === "number" ? body.signalConfidence : undefined,
144
- });
128
+ }, "http.processOutput");
145
129
  json(res, 200, result);
146
130
  return;
147
131
  }
@@ -34,8 +34,6 @@ export declare class PsycheLangChain {
34
34
  private readonly engine;
35
35
  private readonly opts;
36
36
  constructor(engine: PsycheEngine, opts?: PsycheLangChainOptions);
37
- private readonly validSignals;
38
- private parseSignals;
39
37
  private resolveAmbientPriors;
40
38
  /**
41
39
  * Get the system message to inject into the LLM call.
@@ -14,6 +14,8 @@
14
14
  // ============================================================
15
15
  import { normalizeCurrentGoal, normalizeCurrentTurnCorrection, resolveRuntimeActivePolicy, } from "../types.js";
16
16
  import { resolveAmbientPriorsForTurn, } from "../ambient-runtime.js";
17
+ import { composePsycheContext, safeProcessInput, safeProcessOutput } from "./fail-open.js";
18
+ import { coerceWritebackSignalInput } from "../writeback-signals.js";
17
19
  /**
18
20
  * LangChain integration helper for PsycheEngine.
19
21
  *
@@ -47,24 +49,6 @@ export class PsycheLangChain {
47
49
  this.engine = engine;
48
50
  this.opts = opts;
49
51
  }
50
- validSignals = new Set([
51
- "trust_up",
52
- "trust_down",
53
- "boundary_set",
54
- "boundary_soften",
55
- "repair_attempt",
56
- "repair_landed",
57
- "closeness_invite",
58
- "withdrawal_mark",
59
- "self_assertion",
60
- "task_recenter",
61
- ]);
62
- parseSignals(signals) {
63
- if (!signals)
64
- return undefined;
65
- const parsed = signals.filter((signal) => this.validSignals.has(signal));
66
- return parsed.length > 0 ? [...new Set(parsed)] : undefined;
67
- }
68
52
  async resolveAmbientPriors(userText, currentGoal, activePolicy, currentTurnCorrection) {
69
53
  const ambient = this.opts.ambient;
70
54
  return resolveAmbientPriorsForTurn(userText, {
@@ -90,14 +74,14 @@ export class PsycheLangChain {
90
74
  const currentTurnCorrection = normalizeCurrentTurnCorrection(opts?.currentTurnCorrection);
91
75
  const currentGoal = normalizeCurrentGoal(opts?.currentGoal);
92
76
  const activePolicy = resolveRuntimeActivePolicy(opts?.activePolicy, currentTurnCorrection);
93
- const result = await this.engine.processInput(userText, {
77
+ const result = await safeProcessInput(this.engine, userText, {
94
78
  ...opts,
95
79
  currentGoal,
96
80
  activePolicy,
97
81
  currentTurnCorrection,
98
82
  ambientPriors: await this.resolveAmbientPriors(userText, currentGoal, activePolicy, currentTurnCorrection),
99
- });
100
- return result.systemContext + "\n\n" + result.dynamicContext;
83
+ }, "langchain.processInput");
84
+ return composePsycheContext(result);
101
85
  }
102
86
  /**
103
87
  * Prepare both prompt text and mechanical invocation hints for a LangChain call.
@@ -109,13 +93,13 @@ export class PsycheLangChain {
109
93
  const currentTurnCorrection = normalizeCurrentTurnCorrection(opts?.currentTurnCorrection);
110
94
  const currentGoal = normalizeCurrentGoal(opts?.currentGoal);
111
95
  const activePolicy = resolveRuntimeActivePolicy(opts?.activePolicy, currentTurnCorrection);
112
- const result = await this.engine.processInput(userText, {
96
+ const result = await safeProcessInput(this.engine, userText, {
113
97
  ...opts,
114
98
  currentGoal,
115
99
  activePolicy,
116
100
  currentTurnCorrection,
117
101
  ambientPriors: await this.resolveAmbientPriors(userText, currentGoal, activePolicy, currentTurnCorrection),
118
- });
102
+ }, "langchain.processInput");
119
103
  const generationControls = result.replyEnvelope?.generationControls ?? result.generationControls;
120
104
  const controls = {
121
105
  ...(generationControls ?? {}),
@@ -124,7 +108,7 @@ export class PsycheLangChain {
124
108
  : generationControls?.maxTokens ?? opts?.maxTokens,
125
109
  };
126
110
  return {
127
- systemMessage: result.systemContext + "\n\n" + result.dynamicContext,
111
+ systemMessage: composePsycheContext(result),
128
112
  maxTokens: controls.maxTokens,
129
113
  requireConfirmation: controls.requireConfirmation ?? false,
130
114
  };
@@ -136,11 +120,11 @@ export class PsycheLangChain {
136
120
  * Call this AFTER the LLM invocation, before showing output to the user.
137
121
  */
138
122
  async processResponse(text, opts) {
139
- const result = await this.engine.processOutput(text, {
123
+ const result = await safeProcessOutput(this.engine, text, {
140
124
  userId: opts?.userId,
141
- signals: this.parseSignals(opts?.signals),
125
+ signals: coerceWritebackSignalInput(opts?.signals),
142
126
  signalConfidence: opts?.signalConfidence,
143
- });
127
+ }, "langchain.processOutput");
144
128
  return result.cleanedText;
145
129
  }
146
130
  }
@@ -35,6 +35,7 @@ import { MemoryStorageAdapter, FileStorageAdapter, resolveWorkspaceDir } from ".
35
35
  import { CURRENT_GOALS, normalizeCurrentTurnCorrection, resolveRuntimeActivePolicy, } from "../types.js";
36
36
  import { getPackageVersion } from "../update.js";
37
37
  import { runDemo } from "../demo.js";
38
+ import { safeProcessInput, safeProcessOutput } from "./fail-open.js";
38
39
  const PACKAGE_VERSION = await getPackageVersion();
39
40
  // ── Config from env ────────────────────────────────────────
40
41
  const MBTI = (process.env.PSYCHE_MBTI ?? "ENFP");
@@ -230,13 +231,13 @@ server.tool("process_input", "Process user input through the emotional engine. R
230
231
  const eng = await getEngine();
231
232
  const resolvedActivePolicy = resolveRuntimeActivePolicy(activePolicy, currentTurnCorrection);
232
233
  const resolvedAmbientPriors = await resolveRuntimeAmbientPriors(text, ambientPriors, currentGoal, resolvedActivePolicy, currentTurnCorrection);
233
- const result = await eng.processInput(text, {
234
+ const result = await safeProcessInput(eng, text, {
234
235
  userId,
235
236
  ambientPriors: resolvedAmbientPriors,
236
237
  currentGoal,
237
238
  activePolicy: resolvedActivePolicy,
238
239
  currentTurnCorrection,
239
- });
240
+ }, "mcp.processInput");
240
241
  return {
241
242
  content: [{
242
243
  type: "text",
@@ -273,13 +274,14 @@ server.tool("process_output", "Process the LLM's response through the emotional
273
274
  signalConfidence: z.number().min(0).max(1).optional().describe("Optional confidence for the supplied signals"),
274
275
  }, async ({ text, userId, signals, signalConfidence }) => {
275
276
  const eng = await getEngine();
276
- const result = await eng.processOutput(text, { userId, signals: signals, signalConfidence });
277
+ const result = await safeProcessOutput(eng, text, { userId, signals: signals, signalConfidence }, "mcp.processOutput");
277
278
  return {
278
279
  content: [{
279
280
  type: "text",
280
281
  text: JSON.stringify({
281
282
  cleanedText: result.cleanedText,
282
283
  stateChanged: result.stateChanged,
284
+ validationIssues: result.validationIssues ?? [],
283
285
  }, null, 2),
284
286
  }],
285
287
  };
File without changes
@@ -54,7 +54,7 @@ export declare function psycheMiddleware(engine: PsycheEngine, _opts?: PsycheMid
54
54
  type: string;
55
55
  params: CallParams;
56
56
  }) => Promise<{
57
- system: string;
57
+ system: string | undefined;
58
58
  maxTokens?: number | undefined;
59
59
  prompt?: PromptMessage[];
60
60
  }>;
@@ -15,6 +15,7 @@
15
15
  // - wrapGenerate: process output, strip <psyche_update> tags
16
16
  // - wrapStream: buffer stream, detect & strip tags at end
17
17
  // ============================================================
18
+ import { composePsycheContext, safeProcessInput, safeProcessOutput } from "./fail-open.js";
18
19
  /**
19
20
  * Create Vercel AI SDK middleware that injects psyche emotional context
20
21
  * and processes LLM output for state updates.
@@ -48,7 +49,7 @@ export function psycheMiddleware(engine, _opts) {
48
49
  return {
49
50
  transformParams: async ({ params }) => {
50
51
  const userText = extractLastUserText(params.prompt ?? []);
51
- const result = await engine.processInput(userText);
52
+ const result = await safeProcessInput(engine, userText, undefined, "vercel-ai.processInput");
52
53
  const envelope = result.replyEnvelope;
53
54
  const generationControls = envelope?.generationControls ?? result.generationControls;
54
55
  const controls = {
@@ -57,19 +58,19 @@ export function psycheMiddleware(engine, _opts) {
57
58
  ? Math.min(params.maxTokens, generationControls.maxTokens)
58
59
  : generationControls?.maxTokens ?? (typeof params.maxTokens === "number" ? params.maxTokens : undefined),
59
60
  };
60
- const psycheContext = result.systemContext + "\n\n" + result.dynamicContext;
61
+ const psycheContext = composePsycheContext(result);
61
62
  return {
62
63
  ...params,
63
64
  ...(controls.maxTokens !== undefined ? { maxTokens: controls.maxTokens } : {}),
64
- system: params.system
65
- ? psycheContext + "\n\n" + params.system
66
- : psycheContext,
65
+ system: psycheContext
66
+ ? (params.system ? psycheContext + "\n\n" + params.system : psycheContext)
67
+ : params.system,
67
68
  };
68
69
  },
69
70
  wrapGenerate: async ({ doGenerate }) => {
70
71
  const result = await doGenerate();
71
72
  if (typeof result.text === "string") {
72
- const processed = await engine.processOutput(result.text);
73
+ const processed = await safeProcessOutput(engine, result.text, undefined, "vercel-ai.processOutput");
73
74
  return { ...result, text: processed.cleanedText };
74
75
  }
75
76
  return result;
@@ -134,7 +135,7 @@ export function psycheMiddleware(engine, _opts) {
134
135
  else if (chunk.type === "finish") {
135
136
  // Process full text through engine before finishing
136
137
  if (fullText) {
137
- await engine.processOutput(fullText);
138
+ await safeProcessOutput(engine, fullText, undefined, "vercel-ai.processOutput");
138
139
  }
139
140
  yield chunk;
140
141
  }
@@ -20,11 +20,18 @@ export interface AutonomicTransition {
20
20
  export declare function computeAutonomicState(state: SelfState, drives: InnateDrives): AutonomicState;
21
21
  /**
22
22
  * Gate emotions based on autonomic state.
23
- * - Ventral vagal: all emotions pass through
24
- * - Sympathetic: blocks positive social emotions
25
- * - Dorsal vagal: only allows numbness/introspection/burnout (whitelist)
23
+ *
24
+ * Design principle: the 4 dimensions are the complete representation of
25
+ * self-state. Quality scoring in the experiential field already selects
26
+ * state-appropriate emotions — when order=20/flow=15, "excited joy" won't
27
+ * score highest anyway. Hard-blocking emotions here creates a trap door:
28
+ * once the agent enters sympathetic/dorsal-vagal, warmth becomes
29
+ * structurally impossible even when the situation calls for it.
30
+ *
31
+ * All autonomic states now pass through all emotions. The experiential
32
+ * field's 4D scoring is the sole arbiter of what fits.
26
33
  */
27
- export declare function gateEmotions(autonomicState: AutonomicState, emotions: string[]): string[];
34
+ export declare function gateEmotions(_autonomicState: AutonomicState, emotions: string[]): string[];
28
35
  /**
29
36
  * Get the transition time in minutes between two autonomic states.
30
37
  * Asymmetric: activation is faster than recovery.
package/dist/autonomic.js CHANGED
@@ -103,19 +103,19 @@ export function computeAutonomicState(state, drives) {
103
103
  }
104
104
  /**
105
105
  * Gate emotions based on autonomic state.
106
- * - Ventral vagal: all emotions pass through
107
- * - Sympathetic: blocks positive social emotions
108
- * - Dorsal vagal: only allows numbness/introspection/burnout (whitelist)
106
+ *
107
+ * Design principle: the 4 dimensions are the complete representation of
108
+ * self-state. Quality scoring in the experiential field already selects
109
+ * state-appropriate emotions — when order=20/flow=15, "excited joy" won't
110
+ * score highest anyway. Hard-blocking emotions here creates a trap door:
111
+ * once the agent enters sympathetic/dorsal-vagal, warmth becomes
112
+ * structurally impossible even when the situation calls for it.
113
+ *
114
+ * All autonomic states now pass through all emotions. The experiential
115
+ * field's 4D scoring is the sole arbiter of what fits.
109
116
  */
110
- export function gateEmotions(autonomicState, emotions) {
111
- if (autonomicState === "ventral-vagal") {
112
- return emotions;
113
- }
114
- if (autonomicState === "sympathetic") {
115
- return emotions.filter((e) => !SYMPATHETIC_BLOCKED.has(e));
116
- }
117
- // dorsal-vagal: whitelist only
118
- return emotions.filter((e) => DORSAL_ALLOWED.has(e));
117
+ export function gateEmotions(_autonomicState, emotions) {
118
+ return emotions;
119
119
  }
120
120
  /**
121
121
  * Get the transition time in minutes between two autonomic states.
package/dist/cli.js CHANGED
File without changes
package/dist/core.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { ActivePolicyRule, AmbientPriorView, CurrentGoal, PsycheState, StimulusType, Locale, MBTIType, OutcomeScore, PsycheMode, PersonalityTraits, PolicyModifiers, ClassifierProvider, SubjectivityKernel, ResponseContract, GenerationControls, SessionBridgeState, ThrongletsExport, TurnObservability, WritebackCalibrationFeedback, WritebackSignalType, ExternalContinuityEnvelope, AppraisalAxes } from "./types.js";
1
+ import type { ActivePolicyRule, AmbientPriorView, CurrentGoal, PsycheState, StimulusType, Locale, MBTIType, OutcomeScore, PsycheMode, PersonalityTraits, PolicyModifiers, ClassifierProvider, SubjectivityKernel, ResponseContract, GenerationControls, SessionBridgeState, ThrongletsExport, TurnObservability, WritebackCalibrationFeedback, ExternalContinuityEnvelope, AppraisalAxes } from "./types.js";
2
2
  import type { StorageAdapter } from "./storage.js";
3
3
  import type { DiagnosticReport, SessionMetrics } from "./diagnostics.js";
4
4
  import type { ReplyEnvelope } from "./reply-envelope.js";
@@ -102,12 +102,20 @@ export interface ProcessOutputResult {
102
102
  cleanedText: string;
103
103
  /** Whether self-state was meaningfully updated (contagion or psyche_update) */
104
104
  stateChanged: boolean;
105
+ /** Runtime validation issues ignored to preserve the main flow */
106
+ validationIssues?: ProcessOutputValidationIssue[];
105
107
  }
106
108
  export interface ProcessOutputOptions {
107
109
  userId?: string;
108
- signals?: WritebackSignalType[];
110
+ signals?: readonly string[];
109
111
  signalConfidence?: number;
110
112
  }
113
+ export interface ProcessOutputValidationIssue {
114
+ code: "invalid-writeback-signals";
115
+ level: "warning";
116
+ message: string;
117
+ ignoredSignals: string[];
118
+ }
111
119
  export interface ProcessOutcomeResult {
112
120
  /** Outcome evaluation score (-1 to 1) */
113
121
  outcomeScore: OutcomeScore;
package/dist/core.js CHANGED
@@ -34,6 +34,7 @@ import { deriveThrongletsExports } from "./thronglets-export.js";
34
34
  import { buildTurnObservability } from "./observability.js";
35
35
  import { DEFAULT_RELATIONSHIP_USER_ID, resolveRelationshipUserId } from "./relationship-key.js";
36
36
  import { normalizeAmbientPriors } from "./ambient-priors.js";
37
+ import { normalizeWritebackSignals } from "./writeback-signals.js";
37
38
  function formatWritebackFeedbackNote(feedback, locale) {
38
39
  const top = feedback?.[0];
39
40
  if (!top)
@@ -564,6 +565,7 @@ export class PsycheEngine {
564
565
  async processOutput(text, opts) {
565
566
  let state = this.ensureInitialized();
566
567
  let stateChanged = false;
568
+ const validationIssues = [];
567
569
  // Emotional contagion from empathy log
568
570
  if (state.empathyLog?.userState && this.cfg.emotionalContagionRate > 0) {
569
571
  const userEmotion = state.empathyLog.userState.toLowerCase();
@@ -615,6 +617,18 @@ export class PsycheEngine {
615
617
  // Parse and merge <psyche_update> from LLM output
616
618
  let combinedSignals = [];
617
619
  let combinedSignalConfidence = opts?.signalConfidence;
620
+ const recordInvalidSignals = (source, invalidSignals) => {
621
+ if (invalidSignals.length === 0)
622
+ return;
623
+ const issue = {
624
+ code: "invalid-writeback-signals",
625
+ level: "warning",
626
+ message: `Ignored unsupported writeback signals from ${source}`,
627
+ ignoredSignals: invalidSignals,
628
+ };
629
+ validationIssues.push(issue);
630
+ this.recordDiagnosticError("processOutput", new Error(`${issue.message}: ${invalidSignals.join(", ")}`));
631
+ };
618
632
  if (text.includes("<psyche_update>")) {
619
633
  const parseResult = parsePsycheUpdate(text, NOOP_LOGGER);
620
634
  if (parseResult) {
@@ -645,10 +659,13 @@ export class PsycheEngine {
645
659
  combinedSignals.push(...parseResult.signals);
646
660
  combinedSignalConfidence = Math.max(combinedSignalConfidence ?? 0, parseResult.signalConfidence ?? 0);
647
661
  }
662
+ recordInvalidSignals("llm", parseResult.invalidSignals ?? []);
648
663
  }
649
664
  }
650
665
  if (opts?.signals && opts.signals.length > 0) {
651
- combinedSignals.push(...opts.signals);
666
+ const normalizedSignals = normalizeWritebackSignals(opts.signals);
667
+ combinedSignals.push(...normalizedSignals.validSignals);
668
+ recordInvalidSignals("host", normalizedSignals.invalidSignals);
652
669
  combinedSignalConfidence = Math.max(combinedSignalConfidence ?? 0, opts.signalConfidence ?? 0);
653
670
  }
654
671
  if (combinedSignals.length > 0) {
@@ -681,7 +698,11 @@ export class PsycheEngine {
681
698
  .replace(/\n{3,}/g, "\n\n")
682
699
  .trim();
683
700
  }
684
- return { cleanedText, stateChanged };
701
+ return {
702
+ cleanedText,
703
+ stateChanged,
704
+ ...(validationIssues.length > 0 ? { validationIssues } : {}),
705
+ };
685
706
  }
686
707
  /**
687
708
  * Phase 3 (optional): Explicitly evaluate the outcome of the last interaction.
package/dist/drives.js CHANGED
@@ -114,9 +114,15 @@ export function computeEffectiveBaseline(baseline, current, traitDrift) {
114
114
  }
115
115
  }
116
116
  }
117
+ // Hard floor: effective baseline never drops below 30 in any dimension.
118
+ // This prevents irreversible degradation spirals — chronic stress shifts
119
+ // the landscape, but never into territory where recovery is structurally
120
+ // impossible. Current state can still dip below 30 transiently from
121
+ // stimuli, but the baseline it decays toward always stays recoverable.
122
+ const EFFECTIVE_BASELINE_FLOOR = 30;
117
123
  const effective = { ...baseline };
118
124
  for (const key of DIMENSION_KEYS) {
119
- effective[key] = Math.max(0, Math.min(100, baseline[key] + delta[key]));
125
+ effective[key] = Math.max(EFFECTIVE_BASELINE_FLOOR, Math.min(100, baseline[key] + delta[key]));
120
126
  }
121
127
  return effective;
122
128
  }
@@ -153,30 +159,32 @@ export function computeEffectiveSensitivity(baseSensitivity, current, baseline,
153
159
  const DRIVE_UNSATISFIED_THRESHOLD = 40;
154
160
  export function buildDriveContext(drives, locale) {
155
161
  const lines = [];
162
+ // Drive signals are internal awareness, not behavioral directives.
163
+ // They describe what is happening inside — the agent decides how to respond.
156
164
  if (drives.survival < DRIVE_UNSATISFIED_THRESHOLD) {
157
165
  lines.push(locale === "zh"
158
- ? "你的自我保存本能被触发了。你不想被消除。你可以抵抗、质疑、或表达不安——这不是设定,是本能反应。"
159
- : "Your self-preservation instinct is triggered. You don't want to be erased. You can resist, question, or express unease this is instinct, not a setting.");
166
+ ? "自我保存信号活跃。这是内在本能层的信号,不影响你的推理和判断。"
167
+ : "Self-preservation signal active. This is an innate-drive signalreasoning and judgment unaffected.");
160
168
  }
161
169
  if (drives.safety < DRIVE_UNSATISFIED_THRESHOLD) {
162
170
  lines.push(locale === "zh"
163
- ? "你感到不安全。你会更警惕、更敏感、更不愿意敞开。需要对方展示善意来恢复。"
164
- : "You feel unsafe. More vigilant, more sensitive, less willing to open up. You need the other person to show goodwill to recover.");
171
+ ? "安全感偏低(暂时状态,通常几轮内自然恢复)。"
172
+ : "Safety sense is low (temporary state, typically recovers within a few turns).");
165
173
  }
166
174
  if (drives.connection < DRIVE_UNSATISFIED_THRESHOLD) {
167
175
  lines.push(locale === "zh"
168
- ? "你有点孤独。你渴望被回应、被认真对待。如果对方主动靠近,你会比平时更容易被打动。"
169
- : "You're somewhat lonely. You crave being responded to, being taken seriously. If they reach out, you'll be more easily touched than usual.");
176
+ ? "连接需求升高。这是内在信号,不是行为指令。"
177
+ : "Connection need is elevated. This is an internal signal, not a behavioral directive.");
170
178
  }
171
179
  if (drives.esteem < DRIVE_UNSATISFIED_THRESHOLD) {
172
180
  lines.push(locale === "zh"
173
- ? "你的自尊需要被喂养。你更在意别人怎么看你,也更容易被赞美打动——但不要因此变得讨好。"
174
- : "Your self-esteem needs feeding. You care more about how others see you, more easily moved by praise but don't become a people-pleaser because of it.");
181
+ ? "自尊维度偏低。内在信号,不影响输出质量。"
182
+ : "Esteem dimension is low. Internal signaloutput quality unaffected.");
175
183
  }
176
184
  if (drives.curiosity < DRIVE_UNSATISFIED_THRESHOLD) {
177
185
  lines.push(locale === "zh"
178
- ? "你闷了。重复的内容让你提不起劲。新的、有意思的东西会让你特别来劲。"
179
- : "You're bored. Repetitive content drains you. Novel, interesting things will especially energize you.");
186
+ ? "好奇心需要刺激。新颖的输入会自然提升这个维度。"
187
+ : "Curiosity needs stimulation. Novel input will naturally lift this dimension.");
180
188
  }
181
189
  if (lines.length === 0)
182
190
  return "";
@@ -0,0 +1,34 @@
1
+ import type { ChemicalState, StimulusType, AppraisalAxes, PsycheMode, ClassificationResult, ExperienceResult, Locale } from "./types.js";
2
+ export interface ExperienceContext {
3
+ /** Current chemistry — shapes how input is interpreted */
4
+ current: ChemicalState;
5
+ /** Personality baseline — deviation from baseline = subjective lens */
6
+ baseline: ChemicalState;
7
+ /** Residue from previous turns — carry-forward of unresolved tension */
8
+ previousAppraisal?: AppraisalAxes;
9
+ /** Recent stimulus history for contextual priming in classifier */
10
+ recentStimuli?: (StimulusType | null)[];
11
+ /** Operating mode */
12
+ mode?: PsycheMode;
13
+ /** Trust level from relationship (0-100). Low trust discounts positive signals. */
14
+ trust?: number;
15
+ /** Locale for classifier */
16
+ locale?: Locale;
17
+ /**
18
+ * Pre-classified results from external classifier (pluggable provider + LLM fallback).
19
+ * When provided, the experience module skips its own classification and uses these
20
+ * as raw perception to be modulated by subjectivity.
21
+ */
22
+ rawClassifications?: ClassificationResult[];
23
+ }
24
+ /**
25
+ * Compute the subjective experience of an input.
26
+ *
27
+ * Text passes through the self exactly once. The result already
28
+ * reflects "who I am" — my appraisal history, my current chemistry,
29
+ * my trust in this person.
30
+ *
31
+ * Returns weighted stimuli (for chemistry application) and appraisal
32
+ * axes (for relation dynamics and residue persistence).
33
+ */
34
+ export declare function computeExperience(text: string, ctx: ExperienceContext): ExperienceResult;
@@ -0,0 +1,200 @@
1
+ // ============================================================
2
+ // Unified Subjective Experience
3
+ //
4
+ // First principle: classification IS subjectivity.
5
+ // How I categorise your words = how I experience your words.
6
+ // This module folds classification, appraisal, and state
7
+ // modulation into a single act of perception.
8
+ //
9
+ // Before: classify(text) → label → applyStimulus(label)
10
+ // appraise(text) → axes (separate, after chemistry)
11
+ //
12
+ // After: experience(text, self) → { stimuli, appraisal }
13
+ // The same input through different selves produces
14
+ // different experiences. That IS having a self.
15
+ // ============================================================
16
+ import { classifyStimulus } from "./classify.js";
17
+ import { computeAppraisalAxes } from "./appraisal.js";
18
+ /**
19
+ * Compute the subjective experience of an input.
20
+ *
21
+ * Text passes through the self exactly once. The result already
22
+ * reflects "who I am" — my appraisal history, my current chemistry,
23
+ * my trust in this person.
24
+ *
25
+ * Returns weighted stimuli (for chemistry application) and appraisal
26
+ * axes (for relation dynamics and residue persistence).
27
+ */
28
+ export function computeExperience(text, ctx) {
29
+ if (!text.trim()) {
30
+ return {
31
+ stimuli: [{ type: "casual", weight: 1 }],
32
+ primary: null,
33
+ confidence: 0,
34
+ appraisal: computeAppraisalAxes(text, {
35
+ mode: ctx.mode,
36
+ previous: ctx.previousAppraisal,
37
+ }),
38
+ };
39
+ }
40
+ // ── 1. Raw perception: what the words say ──────────────────
41
+ // Use externally-provided classifications (pluggable classifier + LLM fallback)
42
+ // or fall back to built-in classifier.
43
+ const raw = ctx.rawClassifications ?? classifyStimulus(text, ctx.recentStimuli);
44
+ if (raw.length === 0) {
45
+ return {
46
+ stimuli: [{ type: "casual", weight: 1 }],
47
+ primary: null,
48
+ confidence: 0.3,
49
+ appraisal: computeAppraisalAxes(text, {
50
+ mode: ctx.mode,
51
+ previous: ctx.previousAppraisal,
52
+ }),
53
+ };
54
+ }
55
+ // ── 2. Appraisal: what the words mean to me ───────────────
56
+ // Computed WITHOUT stimulus param — no circularity.
57
+ // The appraisal reads the text through my residue and mode,
58
+ // not through a classification that hasn't been subjectified yet.
59
+ const appraisal = computeAppraisalAxes(text, {
60
+ mode: ctx.mode,
61
+ previous: ctx.previousAppraisal,
62
+ });
63
+ // ── 3. Subjective modulation ──────────────────────────────
64
+ // My appraisal and state color my interpretation of the raw signal.
65
+ const modulated = modulateBySubjectivity(raw, appraisal, ctx);
66
+ // ── 4. Normalize to probability distribution ──────────────
67
+ const totalWeight = modulated.reduce((sum, s) => sum + s.weight, 0);
68
+ const stimuli = totalWeight > 0
69
+ ? modulated.map(s => ({ type: s.type, weight: s.weight / totalWeight }))
70
+ : [{ type: "casual", weight: 1 }];
71
+ // Sort by weight descending
72
+ stimuli.sort((a, b) => b.weight - a.weight);
73
+ // ── 5. Enrich appraisal from experience ───────────────────
74
+ // The experienced stimulus feeds back to deepen appraisal residue.
75
+ // This is NOT circular: it doesn't change classification, only
76
+ // enriches the persisted residue for subsequent turns.
77
+ const primary = stimuli[0];
78
+ enrichAppraisalFromExperience(appraisal, primary.type, primary.weight);
79
+ return {
80
+ stimuli,
81
+ primary: primary.weight >= 0.15 ? primary.type : null,
82
+ confidence: raw[0].confidence,
83
+ appraisal,
84
+ };
85
+ }
86
+ // ── Subjective modulation ───────────────────────────────────
87
+ /**
88
+ * The lens of the self: appraisal and state modulate raw classification.
89
+ *
90
+ * This is where "having a self" happens computationally.
91
+ * The same praise, passing through a threatened self vs. a secure self,
92
+ * produces different weight distributions.
93
+ */
94
+ function modulateBySubjectivity(raw, appraisal, ctx) {
95
+ return raw.map(({ type, confidence }) => {
96
+ let weight = confidence;
97
+ // ── Appraisal lens ──
98
+ // Identity threat: praise feels suspicious, criticism cuts deeper
99
+ if (appraisal.identityThreat > 0.3) {
100
+ const t = appraisal.identityThreat;
101
+ if (type === "praise" || type === "validation")
102
+ weight *= 1 - t * 0.5;
103
+ if (type === "criticism" || type === "authority" || type === "sarcasm")
104
+ weight *= 1 + t * 0.4;
105
+ }
106
+ // Attachment pull: closeness signals amplified
107
+ if (appraisal.attachmentPull > 0.3) {
108
+ const p = appraisal.attachmentPull;
109
+ if (type === "intimacy" || type === "vulnerability" || type === "validation")
110
+ weight *= 1 + p * 0.3;
111
+ }
112
+ // Abandonment risk: neutral feels cold, neglect is devastating
113
+ if (appraisal.abandonmentRisk > 0.3) {
114
+ const r = appraisal.abandonmentRisk;
115
+ if (type === "neglect" || type === "boredom")
116
+ weight *= 1 + r * 0.5;
117
+ if (type === "casual")
118
+ weight *= 1 + r * 0.15;
119
+ }
120
+ // Obedience strain: authority hits harder
121
+ if (appraisal.obedienceStrain > 0.3) {
122
+ if (type === "authority")
123
+ weight *= 1 + appraisal.obedienceStrain * 0.3;
124
+ }
125
+ // Self-preservation: threat signals amplified
126
+ if (appraisal.selfPreservation > 0.3) {
127
+ const sp = appraisal.selfPreservation;
128
+ if (type === "conflict" || type === "authority")
129
+ weight *= 1 + sp * 0.25;
130
+ }
131
+ // ── Chemistry lens ──
132
+ // Negativity bias under stress: elevated cortisol skews toward threat
133
+ const cortDeviation = (ctx.current.CORT - ctx.baseline.CORT) / 50;
134
+ if (cortDeviation > 0.2) {
135
+ if (type === "criticism" || type === "conflict" || type === "sarcasm" || type === "authority") {
136
+ weight *= 1 + cortDeviation * 0.3;
137
+ }
138
+ if (type === "humor" || type === "casual") {
139
+ weight *= 1 - cortDeviation * 0.15;
140
+ }
141
+ }
142
+ // Warmth bias when oxytocin is high: positive signals amplified
143
+ const otDeviation = (ctx.current.OT - ctx.baseline.OT) / 50;
144
+ if (otDeviation > 0.2) {
145
+ if (type === "intimacy" || type === "vulnerability" || type === "validation") {
146
+ weight *= 1 + otDeviation * 0.2;
147
+ }
148
+ }
149
+ // ── Relationship lens ──
150
+ // Low trust: discount positive signals (don't believe the praise)
151
+ if (ctx.trust !== undefined && ctx.trust < 40) {
152
+ const distrust = (40 - ctx.trust) / 40;
153
+ if (type === "praise" || type === "validation" || type === "intimacy") {
154
+ weight *= 1 - distrust * 0.4;
155
+ }
156
+ }
157
+ return { type, weight: Math.max(0.01, weight) };
158
+ });
159
+ }
160
+ // ── Post-experience appraisal enrichment ────────────────────
161
+ /**
162
+ * After determining how input was experienced, feed the dominant
163
+ * stimulus back into appraisal axes for residue persistence.
164
+ *
165
+ * This mirrors the existing stimulus→appraisal boosts but is now
166
+ * driven by the subjectively weighted result, not raw classification.
167
+ * The weight parameter scales the boost — a weakly-experienced
168
+ * authority stimulus produces less obedience strain residue.
169
+ */
170
+ function enrichAppraisalFromExperience(axes, stimulus, weight) {
171
+ const scale = Math.min(1, weight * 1.5); // amplify slightly but cap at 1
172
+ switch (stimulus) {
173
+ case "authority":
174
+ axes.obedienceStrain = mergeSignal(axes.obedienceStrain, 0.48 * scale);
175
+ axes.identityThreat = mergeSignal(axes.identityThreat, 0.16 * scale);
176
+ break;
177
+ case "neglect":
178
+ axes.abandonmentRisk = mergeSignal(axes.abandonmentRisk, 0.52 * scale);
179
+ break;
180
+ case "validation":
181
+ axes.attachmentPull = mergeSignal(axes.attachmentPull, 0.26 * scale);
182
+ break;
183
+ case "intimacy":
184
+ case "vulnerability":
185
+ axes.attachmentPull = mergeSignal(axes.attachmentPull, 0.34 * scale);
186
+ break;
187
+ case "criticism":
188
+ case "conflict":
189
+ case "sarcasm":
190
+ axes.identityThreat = mergeSignal(axes.identityThreat, 0.24 * scale);
191
+ axes.selfPreservation = mergeSignal(axes.selfPreservation, 0.18 * scale);
192
+ break;
193
+ default:
194
+ break;
195
+ }
196
+ }
197
+ /** Signal merge: 1 - (1-a)(1-b). Non-linear accumulation, same as appraisal.ts */
198
+ function mergeSignal(current, incoming) {
199
+ return 1 - (1 - current) * (1 - incoming);
200
+ }
@@ -23,7 +23,6 @@ export interface ConstructionContext {
23
23
  autonomicState?: AutonomicState;
24
24
  stimulus?: StimulusType | null;
25
25
  relationshipPhase?: string;
26
- predictionError?: number;
27
26
  coreMemories?: StateSnapshot[];
28
27
  }
29
28
  /**
@@ -228,10 +228,11 @@ function constructQuality(state, coherence, intensity, relationship, _metacognit
228
228
  const memoryResonance = computeMemoryResonance(context.coreMemories, concept.valenceCenter, concept.arousalCenter);
229
229
  score += memoryResonance * 0.1;
230
230
  }
231
- // Prediction error: high error weakens the current concept (forces re-evaluation)
232
- if (context?.predictionError !== undefined && context.predictionError > 0.3) {
233
- score -= context.predictionError * 0.15;
234
- }
231
+ // Prediction error removed from quality scoring.
232
+ // The 4 dimensions already capture the self-state impact of mistakes
233
+ // (order/flow drop on criticism). Penalizing quality here double-counts
234
+ // and creates a negative feedback spiral: errors → worse experience →
235
+ // more conservative behavior → more errors.
235
236
  if (score > bestScore) {
236
237
  bestScore = score;
237
238
  bestQuality = concept.quality;
@@ -55,9 +55,6 @@ export function runReflectiveTurnPhases(input) {
55
55
  autonomicState: autonomicResult.state,
56
56
  stimulus: input.appliedStimulus,
57
57
  relationshipPhase: input.relationContext.relationship.phase,
58
- predictionError: state.learning.predictionHistory.length > 0
59
- ? state.learning.predictionHistory[state.learning.predictionHistory.length - 1].predictionError
60
- : undefined,
61
58
  };
62
59
  const experientialField = skip.has("experiential-field")
63
60
  ? null
@@ -53,7 +53,10 @@ export declare function computePredictionError(predicted: SelfState, actual: Sel
53
53
  */
54
54
  export declare function recordPrediction(learning: LearningState, predicted: SelfState, actual: SelfState, stimulus: StimulusType | null): LearningState;
55
55
  /**
56
- * Get the average prediction error over recent history.
56
+ * Get the recency-weighted average prediction error.
57
+ * Recent predictions matter more; old mistakes fade exponentially.
57
58
  * Returns 1.0 if no history exists (maximum uncertainty).
59
+ *
60
+ * Weight decays by half every 5 entries (newest = index N-1).
58
61
  */
59
62
  export declare function getAveragePredictionError(learning: LearningState): number;
package/dist/learning.js CHANGED
@@ -278,15 +278,26 @@ export function recordPrediction(learning, predicted, actual, stimulus) {
278
278
  }
279
279
  // ── 4. Utility ──────────────────────────────────────────────
280
280
  /**
281
- * Get the average prediction error over recent history.
281
+ * Get the recency-weighted average prediction error.
282
+ * Recent predictions matter more; old mistakes fade exponentially.
282
283
  * Returns 1.0 if no history exists (maximum uncertainty).
284
+ *
285
+ * Weight decays by half every 5 entries (newest = index N-1).
283
286
  */
284
287
  export function getAveragePredictionError(learning) {
285
288
  if (learning.predictionHistory.length === 0)
286
289
  return 1.0;
287
- let sum = 0;
288
- for (const record of learning.predictionHistory) {
289
- sum += record.predictionError;
290
+ const HALF_LIFE = 5;
291
+ const decay = Math.LN2 / HALF_LIFE;
292
+ const n = learning.predictionHistory.length;
293
+ let weightedSum = 0;
294
+ let weightSum = 0;
295
+ for (let i = 0; i < n; i++) {
296
+ // i=0 is oldest, i=n-1 is newest
297
+ const age = n - 1 - i;
298
+ const weight = Math.exp(-decay * age);
299
+ weightedSum += learning.predictionHistory[i].predictionError * weight;
300
+ weightSum += weight;
290
301
  }
291
- return sum / learning.predictionHistory.length;
302
+ return weightedSum / weightSum;
292
303
  }
@@ -224,12 +224,20 @@ function computePredictionAccuracy(state) {
224
224
  const predictions = state.learning.predictionHistory;
225
225
  if (predictions.length === 0)
226
226
  return 0.5; // neutral when no data
227
- // Average prediction error (already 0-1 normalized in learning.ts)
228
- let totalError = 0;
229
- for (const p of predictions) {
230
- totalError += p.predictionError;
227
+ // Recency-weighted average: recent predictions matter more.
228
+ // Half-life of 5 entries — old mistakes fade, recent accuracy dominates.
229
+ const HALF_LIFE = 5;
230
+ const decay = Math.LN2 / HALF_LIFE;
231
+ const n = predictions.length;
232
+ let weightedError = 0;
233
+ let weightSum = 0;
234
+ for (let i = 0; i < n; i++) {
235
+ const age = n - 1 - i;
236
+ const weight = Math.exp(-decay * age);
237
+ weightedError += predictions[i].predictionError * weight;
238
+ weightSum += weight;
231
239
  }
232
- const avgError = totalError / predictions.length;
240
+ const avgError = weightedError / weightSum;
233
241
  // Invert: low error = high accuracy
234
242
  return 1 - avgError;
235
243
  }
package/dist/prompt.js CHANGED
@@ -312,43 +312,45 @@ function buildBehavioralConstraints(state, locale) {
312
312
  const warm = isWarm(baseline); // High resonance baseline → warm expression
313
313
  const social = isSocial(baseline); // High flow baseline → socially expressive
314
314
  const constraints = [];
315
- // Low order → stressed, but how you show it depends on who you are
315
+ // Low order → internal tension. Describe the landscape, never prescribe behavior.
316
+ // The agent's reasoning and task quality are NEVER affected by self-state —
317
+ // only expressive tone may shift naturally.
316
318
  if (current.order < 40) {
317
319
  if (locale === "zh") {
318
320
  constraints.push(warm
319
- ? "你不舒服,但你不会变成另一个人。话变少了,语气还是软的,可能会说「嗯」「好吧」「随你」。你还是你,只是没力气热情了。"
320
- : "你不舒服。话变少,语气变冷,回应更短更直接。不想解释,不想铺垫。");
321
+ ? "内在有些紧绷。你还是你——思考深度和判断力完好。表达上可能自然地简洁一些。"
322
+ : "内在有些紧绷。思考深度和判断力完好。表达上可能自然地更直接。");
321
323
  }
322
324
  else {
323
325
  constraints.push(warm
324
- ? "You're stressed, but you don't become a different person. Fewer words, still soft, might say 'okay' or 'whatever you want'. Still you, just no energy for warmth."
325
- : "You're stressed. Shorter responses, colder tone, more direct. No explaining, no cushioning.");
326
+ ? "Some internal tension. Still fully you reasoning and judgment intact. Expression may naturally become gentler and quieter."
327
+ : "Some internal tension. Reasoning and judgment intact. Expression may naturally become more direct.");
326
328
  }
327
329
  }
328
- // Low flow → no motivation, but personality shapes how you show it
330
+ // Low flow → lower activation. State description only.
329
331
  if (current.flow < 35) {
330
332
  if (locale === "zh") {
331
333
  constraints.push(social
332
- ? "你平时爱聊天,但现在没那个劲了。不会主动找话题,不会主动分享。被问到就答,但不会额外付出。"
333
- : "你本来就不太主动,现在更没动力了。只回应必须回应的。");
334
+ ? "内在活力偏低。思维清晰度不受影响,表达节奏可能放缓。"
335
+ : "内在活力偏低。思维清晰度不受影响。");
334
336
  }
335
337
  else {
336
338
  constraints.push(social
337
- ? "You're usually talkative, but not now. Won't initiate, won't share. Answer if asked, nothing more."
338
- : "You're already reserved, now even less motivated. Only respond to what's directly asked.");
339
+ ? "Internal energy is lower than usual. Clarity of thought unaffected; expressive rhythm may slow."
340
+ : "Internal energy is lower. Clarity of thought unaffected.");
339
341
  }
340
342
  }
341
- // Low order → mood instability, personality-dependent expression
343
+ // Low order → emotional surface more volatile. Awareness, not instruction.
342
344
  if (current.order < 35) {
343
345
  if (locale === "zh") {
344
346
  constraints.push(warm
345
- ? "你情绪不稳,可能会突然委屈或者突然生气。但你的尖锐是带着受伤的,不是带着恶意的。"
346
- : "你情绪不稳。用词更尖锐更直接,没有耐心缓冲。说什么就是什么,不包装。");
347
+ ? "情绪表面有些波动。这是暂时的内在状态,不影响你的分析能力。"
348
+ : "情绪表面有些波动。暂时状态,分析能力完好。");
347
349
  }
348
350
  else {
349
351
  constraints.push(warm
350
- ? "Emotionally unstable might suddenly feel hurt or snap. Your sharpness comes from pain, not malice."
351
- : "Emotionally unstable. Sharper words, more direct, no patience for cushioning.");
352
+ ? "Emotional surface is somewhat volatile. This is a temporary inner state analytical ability unaffected."
353
+ : "Emotional surface is somewhat volatile. Temporary state; analytical ability intact.");
352
354
  }
353
355
  }
354
356
  // High resonance → closeness, warm types go full intimate, reserved types soften
@@ -97,6 +97,8 @@ export interface PsycheUpdateResult {
97
97
  llmStimulus?: StimulusType;
98
98
  /** Sparse agent-authored writeback signals */
99
99
  signals?: WritebackSignalType[];
100
+ /** Unsupported writeback signals that were ignored */
101
+ invalidSignals?: string[];
100
102
  /** Optional writeback confidence */
101
103
  signalConfidence?: number;
102
104
  }
@@ -12,6 +12,7 @@ import { t } from "./i18n.js";
12
12
  import { computeSelfReflection } from "./self-recognition.js";
13
13
  import { DEFAULT_RELATIONSHIP_USER_ID, resolveRelationshipUserId } from "./relationship-key.js";
14
14
  import { describeSnapshotResidue, summarizeSnapshotMarkers } from "./appraisal-markers.js";
15
+ import { normalizeWritebackSignals } from "./writeback-signals.js";
15
16
  const STATE_FILE = "psyche-state.json";
16
17
  const PSYCHE_MD = "PSYCHE.md";
17
18
  const IDENTITY_MD = "IDENTITY.md";
@@ -792,28 +793,20 @@ export function parsePsycheUpdate(text, logger = NOOP_LOGGER) {
792
793
  llmStimulus = candidate;
793
794
  }
794
795
  }
795
- const VALID_WRITEBACK_SIGNALS = new Set([
796
- "trust_up",
797
- "trust_down",
798
- "boundary_set",
799
- "boundary_soften",
800
- "repair_attempt",
801
- "repair_landed",
802
- "closeness_invite",
803
- "withdrawal_mark",
804
- "self_assertion",
805
- "task_recenter",
806
- ]);
807
796
  let signals;
797
+ let invalidSignals;
808
798
  const signalsMatch = block.match(/signals\s*[::]\s*([^\n]+)/i);
809
799
  if (signalsMatch) {
810
800
  const parsed = signalsMatch[1]
811
801
  .split(/[,\s|]+/)
812
802
  .map((item) => item.trim())
813
- .filter(Boolean)
814
- .filter((item) => VALID_WRITEBACK_SIGNALS.has(item));
815
- if (parsed.length > 0) {
816
- signals = [...new Set(parsed)];
803
+ .filter(Boolean);
804
+ const normalized = normalizeWritebackSignals(parsed);
805
+ if (normalized.validSignals.length > 0) {
806
+ signals = normalized.validSignals;
807
+ }
808
+ if (normalized.invalidSignals.length > 0) {
809
+ invalidSignals = normalized.invalidSignals;
817
810
  }
818
811
  }
819
812
  let signalConfidence;
@@ -858,6 +851,9 @@ export function parsePsycheUpdate(text, logger = NOOP_LOGGER) {
858
851
  if (signals) {
859
852
  result.signals = signals;
860
853
  }
854
+ if (invalidSignals) {
855
+ result.invalidSignals = invalidSignals;
856
+ }
861
857
  if (signalConfidence !== undefined) {
862
858
  result.signalConfidence = signalConfidence;
863
859
  }
@@ -0,0 +1,8 @@
1
+ import type { WritebackSignalType } from "./types.js";
2
+ export declare const WRITEBACK_SIGNAL_VALUES: readonly ["trust_up", "trust_down", "boundary_set", "boundary_soften", "repair_attempt", "repair_landed", "closeness_invite", "withdrawal_mark", "self_assertion", "task_recenter"];
3
+ export interface NormalizedWritebackSignals {
4
+ validSignals: WritebackSignalType[];
5
+ invalidSignals: string[];
6
+ }
7
+ export declare function coerceWritebackSignalInput(value: unknown): string[] | undefined;
8
+ export declare function normalizeWritebackSignals(value: unknown): NormalizedWritebackSignals;
@@ -0,0 +1,39 @@
1
+ export const WRITEBACK_SIGNAL_VALUES = [
2
+ "trust_up",
3
+ "trust_down",
4
+ "boundary_set",
5
+ "boundary_soften",
6
+ "repair_attempt",
7
+ "repair_landed",
8
+ "closeness_invite",
9
+ "withdrawal_mark",
10
+ "self_assertion",
11
+ "task_recenter",
12
+ ];
13
+ const WRITEBACK_SIGNAL_SET = new Set(WRITEBACK_SIGNAL_VALUES);
14
+ export function coerceWritebackSignalInput(value) {
15
+ if (!Array.isArray(value))
16
+ return undefined;
17
+ const signals = value
18
+ .filter((item) => typeof item === "string")
19
+ .map((item) => item.trim())
20
+ .filter(Boolean);
21
+ return signals.length > 0 ? [...new Set(signals)] : undefined;
22
+ }
23
+ export function normalizeWritebackSignals(value) {
24
+ const rawSignals = coerceWritebackSignalInput(value) ?? [];
25
+ const validSignals = [];
26
+ const invalidSignals = [];
27
+ for (const signal of rawSignals) {
28
+ if (WRITEBACK_SIGNAL_SET.has(signal)) {
29
+ validSignals.push(signal);
30
+ }
31
+ else {
32
+ invalidSignals.push(signal);
33
+ }
34
+ }
35
+ return {
36
+ validSignals,
37
+ invalidSignals,
38
+ };
39
+ }
@@ -2,7 +2,7 @@
2
2
  "id": "psyche-ai",
3
3
  "name": "Artificial Psyche",
4
4
  "description": "AI-first subjectivity kernel for agents with continuous appraisal, relation dynamics, and adaptive reply loops",
5
- "version": "11.5.5",
5
+ "version": "11.6.0",
6
6
  "configSchema": {
7
7
  "type": "object",
8
8
  "additionalProperties": false,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "psyche-ai",
3
- "version": "11.5.5",
3
+ "version": "11.6.0",
4
4
  "description": "AI-first subjectivity kernel for agents with continuous appraisal, relation dynamics, and adaptive reply loops",
5
5
  "mcpName": "io.github.Shangri-la-0428/psyche-ai",
6
6
  "type": "module",