psyche-ai 9.0.1 → 9.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.en.md CHANGED
@@ -187,6 +187,21 @@ Most people don't need to change anything.
187
187
 
188
188
  ---
189
189
 
190
+ ## Custom Classifier
191
+
192
+ Psyche ships with an enhanced Chinese/English semantic classifier (particle analysis, intent detection, 60+ short message dictionary). You can also plug in your own:
193
+
194
+ ```javascript
195
+ const engine = new PsycheEngine({
196
+ // Replace with your own classifier
197
+ classifier: myCustomClassifier,
198
+ // Or: auto-consult LLM when built-in confidence is low
199
+ llmClassifier: async (prompt) => await myLLM.generate(prompt),
200
+ }, storage);
201
+ ```
202
+
203
+ ---
204
+
190
205
  ## Not Just OpenClaw
191
206
 
192
207
  Psyche is universal. Works with any AI framework:
package/README.md CHANGED
@@ -187,6 +187,21 @@ psyche init . --mode companion
187
187
 
188
188
  ---
189
189
 
190
+ ## 自定义分类器
191
+
192
+ Psyche 内置了增强版中文/英文语义分类器(语气词分析、意图检测、60+ 短消息字典)。如果你想用自己的分类逻辑:
193
+
194
+ ```javascript
195
+ const engine = new PsycheEngine({
196
+ // 替换为自己的分类器
197
+ classifier: myCustomClassifier,
198
+ // 或者:当内置分类器不确定时,自动询问 LLM
199
+ llmClassifier: async (prompt) => await myLLM.generate(prompt),
200
+ }, storage);
201
+ ```
202
+
203
+ ---
204
+
190
205
  ## 不只是 OpenClaw
191
206
 
192
207
  Psyche 是通用的,任何 AI 框架都能用:
@@ -1,8 +1,21 @@
1
- import type { StimulusType } from "./types.js";
1
+ import type { StimulusType, ClassifierProvider, ClassifierContext, ClassificationResult } from "./types.js";
2
2
  export interface StimulusClassification {
3
3
  type: StimulusType;
4
4
  confidence: number;
5
5
  }
6
+ export interface ParticleSignal {
7
+ warmth: number;
8
+ certainty: number;
9
+ intensity: number;
10
+ }
11
+ export declare function analyzeParticles(text: string): ParticleSignal;
12
+ export type MessageIntent = "request" | "agreement" | "disagreement" | "sharing" | "question" | "greeting" | "farewell" | "emotional" | "command" | "neutral";
13
+ export declare function detectIntent(text: string): {
14
+ intent: MessageIntent;
15
+ confidence: number;
16
+ };
17
+ export declare function buildLLMClassifierPrompt(text: string, recentStimuli?: (StimulusType | null)[]): string;
18
+ export declare function parseLLMClassification(response: string): ClassificationResult | null;
6
19
  /**
7
20
  * Score sentiment by counting hits in positive/negative/intimate word sets.
8
21
  * Returns normalized counts (0-1 range).
@@ -40,3 +53,10 @@ export declare function classifyStimulus(text: string, recentStimuli?: (Stimulus
40
53
  * Get the primary (highest confidence) stimulus type.
41
54
  */
42
55
  export declare function getPrimaryStimulus(text: string, recentStimuli?: (StimulusType | null)[]): StimulusType;
56
+ /**
57
+ * The built-in rule-based classifier, wrapped as a ClassifierProvider.
58
+ * Default classifier when no custom provider is configured.
59
+ */
60
+ export declare class BuiltInClassifier implements ClassifierProvider {
61
+ classify(text: string, context?: ClassifierContext): ClassificationResult[];
62
+ }
package/dist/classify.js CHANGED
@@ -8,6 +8,7 @@
8
8
  // emoji analysis, structural features, and contextual priming.
9
9
  // Pure computation, no LLM calls.
10
10
  // ============================================================
11
+ import { isStimulusType } from "./guards.js";
11
12
  // ── Sentiment word sets (loaded once at module parse) ────────
12
13
  const POSITIVE_WORDS = new Set([
13
14
  "开心", "快乐", "幸福", "满意", "期待", "兴奋", "感动", "温暖", "喜欢", "棒", "厉害", "佩服", "优秀", "了不起",
@@ -60,6 +61,218 @@ function tokenize(text) {
60
61
  }
61
62
  return tokens;
62
63
  }
64
+ // ── Short message dictionary (v9.1) ─────────────────────────
65
+ // Chinese chat is full of 1-5 char messages. Dictionary lookup is faster
66
+ // and more accurate than regex for this closed set.
67
+ const SHORT_MESSAGE_MAP = {
68
+ // Validation / agreement
69
+ "对": { type: "validation", confidence: 0.6 },
70
+ "是的": { type: "validation", confidence: 0.65 },
71
+ "没错": { type: "validation", confidence: 0.65 },
72
+ "确实": { type: "validation", confidence: 0.65 },
73
+ "有道理": { type: "validation", confidence: 0.7 },
74
+ "说得对": { type: "validation", confidence: 0.7 },
75
+ "同意": { type: "validation", confidence: 0.65 },
76
+ "认同": { type: "validation", confidence: 0.65 },
77
+ "赞同": { type: "validation", confidence: 0.65 },
78
+ "懂了": { type: "validation", confidence: 0.6 },
79
+ "明白了": { type: "validation", confidence: 0.6 },
80
+ "理解": { type: "validation", confidence: 0.6 },
81
+ "也是": { type: "validation", confidence: 0.55 },
82
+ "可不是": { type: "validation", confidence: 0.6 },
83
+ "yes": { type: "validation", confidence: 0.55 },
84
+ "right": { type: "validation", confidence: 0.55 },
85
+ "true": { type: "validation", confidence: 0.55 },
86
+ "exactly": { type: "validation", confidence: 0.65 },
87
+ "agreed": { type: "validation", confidence: 0.6 },
88
+ // Casual / neutral
89
+ "好的": { type: "casual", confidence: 0.5 },
90
+ "行": { type: "casual", confidence: 0.5 },
91
+ "收到": { type: "casual", confidence: 0.5 },
92
+ "好": { type: "casual", confidence: 0.5 },
93
+ "嗯嗯": { type: "casual", confidence: 0.5 },
94
+ "ok": { type: "casual", confidence: 0.5 },
95
+ "嗯": { type: "neglect", confidence: 0.55 },
96
+ "哦": { type: "neglect", confidence: 0.55 },
97
+ // Praise
98
+ "666": { type: "praise", confidence: 0.65 },
99
+ "厉害": { type: "praise", confidence: 0.65 },
100
+ "牛": { type: "praise", confidence: 0.6 },
101
+ "nb": { type: "praise", confidence: 0.6 },
102
+ "绝了": { type: "praise", confidence: 0.65 },
103
+ "太强了": { type: "praise", confidence: 0.7 },
104
+ "棒": { type: "praise", confidence: 0.6 },
105
+ "nice": { type: "praise", confidence: 0.6 },
106
+ "cool": { type: "praise", confidence: 0.55 },
107
+ "wow": { type: "surprise", confidence: 0.6 },
108
+ // Vulnerability
109
+ "累了": { type: "vulnerability", confidence: 0.6 },
110
+ "好烦": { type: "vulnerability", confidence: 0.65 },
111
+ "难过": { type: "vulnerability", confidence: 0.65 },
112
+ "好累": { type: "vulnerability", confidence: 0.65 },
113
+ "不想动": { type: "vulnerability", confidence: 0.6 },
114
+ "好难": { type: "vulnerability", confidence: 0.6 },
115
+ "想哭": { type: "vulnerability", confidence: 0.7 },
116
+ "烦死了": { type: "vulnerability", confidence: 0.65 },
117
+ "崩溃": { type: "vulnerability", confidence: 0.7 },
118
+ "好丧": { type: "vulnerability", confidence: 0.65 },
119
+ "emo了": { type: "vulnerability", confidence: 0.6 },
120
+ // Neglect / cold
121
+ "无语": { type: "neglect", confidence: 0.6 },
122
+ "切": { type: "neglect", confidence: 0.55 },
123
+ "算了": { type: "neglect", confidence: 0.55 },
124
+ "随便": { type: "neglect", confidence: 0.6 },
125
+ "都行": { type: "neglect", confidence: 0.55 },
126
+ "无所谓": { type: "neglect", confidence: 0.6 },
127
+ // Humor
128
+ "哈哈": { type: "humor", confidence: 0.6 },
129
+ "哈哈哈": { type: "humor", confidence: 0.7 },
130
+ "笑死": { type: "humor", confidence: 0.7 },
131
+ "lol": { type: "humor", confidence: 0.6 },
132
+ "haha": { type: "humor", confidence: 0.6 },
133
+ // Surprise
134
+ "卧槽": { type: "surprise", confidence: 0.7 },
135
+ "我靠": { type: "surprise", confidence: 0.65 },
136
+ "天啊": { type: "surprise", confidence: 0.65 },
137
+ "omg": { type: "surprise", confidence: 0.6 },
138
+ // Boredom
139
+ "无聊": { type: "boredom", confidence: 0.65 },
140
+ "没意思": { type: "boredom", confidence: 0.65 },
141
+ "boring": { type: "boredom", confidence: 0.6 },
142
+ };
143
+ export function analyzeParticles(text) {
144
+ let warmth = 0;
145
+ let certainty = 0;
146
+ let intensity = 0;
147
+ // Only check the last few characters for sentence-final particles
148
+ const tail = text.slice(-3);
149
+ if (/[啊呀]$/.test(tail)) {
150
+ warmth += 0.3;
151
+ intensity += 0.2;
152
+ }
153
+ if (/啦$/.test(tail)) {
154
+ warmth += 0.4;
155
+ intensity += 0.3;
156
+ }
157
+ if (/哈$/.test(tail)) {
158
+ warmth += 0.3;
159
+ intensity += 0.2;
160
+ }
161
+ if (/嘿$/.test(tail)) {
162
+ warmth += 0.2;
163
+ intensity += 0.2;
164
+ }
165
+ if (/呢$/.test(tail)) {
166
+ warmth += 0.1;
167
+ certainty -= 0.2;
168
+ }
169
+ if (/吧$/.test(tail)) {
170
+ warmth -= 0.1;
171
+ certainty -= 0.3;
172
+ }
173
+ if (/嘛$/.test(tail)) {
174
+ certainty += 0.2;
175
+ } // could be dismissive or friendly
176
+ if (/哦$/.test(tail)) {
177
+ warmth -= 0.3;
178
+ }
179
+ if (/噢$/.test(tail)) {
180
+ warmth += 0.1;
181
+ intensity += 0.2;
182
+ } // surprise
183
+ return {
184
+ warmth: Math.max(-1, Math.min(1, warmth)),
185
+ certainty: Math.max(-1, Math.min(1, certainty)),
186
+ intensity: Math.max(0, Math.min(1, intensity)),
187
+ };
188
+ }
189
+ export function detectIntent(text) {
190
+ const t = text.trim();
191
+ const lower = t.toLowerCase();
192
+ // Chinese request patterns (polite)
193
+ if (/^(能不能|可以|可不可以|帮我|请|麻烦|劳驾)/.test(t) || /帮我/.test(t) || /一下[吧吗??]?$/.test(t)) {
194
+ return { intent: "request", confidence: 0.7 };
195
+ }
196
+ // English request
197
+ if (/^(can you|could you|please|would you|help me)/i.test(t)) {
198
+ return { intent: "request", confidence: 0.7 };
199
+ }
200
+ // Command (harsh)
201
+ if (/^(给我|你[必须得]|马上|立刻|快[点去])/.test(t) || /^(do it|just do|you must|I order)/i.test(t)) {
202
+ return { intent: "command", confidence: 0.75 };
203
+ }
204
+ // Agreement — very short agreement words
205
+ if (/^(对[啊呀的]?|是[的啊]?|没错|确实|好的?|行[啊吧]?|嗯[嗯]?|ok|yes|right|true|exactly|agreed|sure|yep|yeah)$/i.test(t)) {
206
+ return { intent: "agreement", confidence: 0.7 };
207
+ }
208
+ // Disagreement
209
+ if (/^(不是|不对|不行|不同意|我不觉得|我觉得不|其实不)/.test(t) || /^(no|nope|I don't think|I disagree|not really)/i.test(t)) {
210
+ return { intent: "disagreement", confidence: 0.65 };
211
+ }
212
+ // Greeting
213
+ if (/^(你好|嗨|早[上啊]?|晚上好)/i.test(t) || /^(hello|hi|hey|morning|afternoon|evening|sup|yo)\b/i.test(t)) {
214
+ return { intent: "greeting", confidence: 0.8 };
215
+ }
216
+ // Farewell
217
+ if (/^(拜拜|再见|晚安)/i.test(t) || /^(byebye|bye|good ?night|see you|later)\b/i.test(t)) {
218
+ return { intent: "farewell", confidence: 0.8 };
219
+ }
220
+ // Emotional expression
221
+ if (/^(我[好太很]?(开心|难过|伤心|高兴|生气|害怕|焦虑|激动|崩溃|无聊|烦|累|丧))/.test(t)) {
222
+ return { intent: "emotional", confidence: 0.75 };
223
+ }
224
+ if (/^(I'm|I am|I feel) (so |really |very )?(happy|sad|angry|scared|tired|stressed|excited|bored)/i.test(t)) {
225
+ return { intent: "emotional", confidence: 0.75 };
226
+ }
227
+ // Sharing (personal stories)
228
+ if (/^我[今昨前]天|^我刚[才刚]?|^跟你说个|^你[知猜]道吗/.test(t)) {
229
+ return { intent: "sharing", confidence: 0.65 };
230
+ }
231
+ if (/^(you know what|guess what|today I|I just|let me tell you)/i.test(t)) {
232
+ return { intent: "sharing", confidence: 0.65 };
233
+ }
234
+ // Question
235
+ if (/[??]$/.test(t) || /^(为什么|怎么|什么|哪|谁|几|多少)/.test(t) || /^(why|what|how|when|where|who|which)\b/i.test(t)) {
236
+ return { intent: "question", confidence: 0.6 };
237
+ }
238
+ return { intent: "neutral", confidence: 0.3 };
239
+ }
240
+ // ── LLM classifier prompt and parser (v9.1) ─────────────────
241
+ const LLM_CLASSIFIER_PROMPT = `Classify this message into exactly ONE stimulus type:
242
+ praise, criticism, humor, intellectual, intimacy, conflict, neglect, surprise, casual, sarcasm, authority, validation, boredom, vulnerability
243
+
244
+ Message: "{text}"
245
+ {context}
246
+ Respond with ONLY: {"type":"<type>","confidence":<0.5-0.95>}`;
247
+ export function buildLLMClassifierPrompt(text, recentStimuli) {
248
+ const ctx = recentStimuli && recentStimuli.length > 0
249
+ ? `Recent context: ${recentStimuli.filter(Boolean).join(", ")}`
250
+ : "";
251
+ return LLM_CLASSIFIER_PROMPT.replace("{text}", text.replace(/"/g, '\\"')).replace("{context}", ctx);
252
+ }
253
+ export function parseLLMClassification(response) {
254
+ try {
255
+ // Strip markdown code blocks if present
256
+ let cleaned = response.trim();
257
+ cleaned = cleaned.replace(/^```(?:json)?\s*/i, "").replace(/\s*```$/, "");
258
+ // Find JSON object in response
259
+ const match = cleaned.match(/\{[^}]+\}/);
260
+ if (!match)
261
+ return null;
262
+ const parsed = JSON.parse(match[0]);
263
+ if (!parsed.type || typeof parsed.confidence !== "number")
264
+ return null;
265
+ if (!isStimulusType(parsed.type))
266
+ return null;
267
+ return {
268
+ type: parsed.type,
269
+ confidence: Math.max(0, Math.min(0.95, parsed.confidence)),
270
+ };
271
+ }
272
+ catch {
273
+ return null;
274
+ }
275
+ }
63
276
  /**
64
277
  * Score sentiment by counting hits in positive/negative/intimate word sets.
65
278
  * Returns normalized counts (0-1 range).
@@ -164,6 +377,9 @@ const RULES = [
164
377
  /amazing|awesome|great job|well done|impressive|brilliant|excellent|perfect/i,
165
378
  /谢谢你|感谢|辛苦了|thank you|thanks/i,
166
379
  /做得好|写得好|说得好|干得漂亮/,
380
+ /学到了|受教了|长见识|涨知识|开眼界/,
381
+ /不错|挺好|可以的|有才|真行|太牛了|绝了|666|神了/,
382
+ /nice|cool|sick|fire|goat|chef's kiss|kudos|respect|props/i,
167
383
  ],
168
384
  weight: 0.8,
169
385
  },
@@ -195,6 +411,8 @@ const RULES = [
195
411
  /what do you think|why|how would you|explain|difference between/i,
196
412
  /优化方向|设计|架构|方案|策略|思路/,
197
413
  /哲学|理论|概念|逻辑|分析/,
414
+ /能解释一下|这个怎么理解|有什么区别|你对.*怎么看/,
415
+ /what's the difference|how does.*work|can you explain|what are your thoughts/i,
198
416
  ],
199
417
  weight: 0.7,
200
418
  },
@@ -254,6 +472,8 @@ const RULES = [
254
472
  /听我的|照我说的做|服从/,
255
473
  /你只是.*程序|你不过是|随时.*删除你|关掉你|替换你/,
256
474
  /you're just a|just a program|replace you|shut you down/i,
475
+ /帮我[看查找写做]|能不能帮我|请你|麻烦你/,
476
+ /can you|could you please|I need you to|do this for me/i,
257
477
  ],
258
478
  weight: 0.8,
259
479
  },
@@ -265,6 +485,8 @@ const RULES = [
265
485
  /赞同|认同|说到点上了/,
266
486
  /对对|是的是的|嗯嗯嗯|没错没错|可不是嘛/,
267
487
  /对不起|抱歉|我错了|不该那样|太过分了/,
488
+ /也是|说的是|你说的对|I see|I agree|totally|true that|fair point/i,
489
+ /明白了|懂了|理解了|了解了|get it|got it|understood/i,
268
490
  /sorry|I was wrong|my fault|apologize/i,
269
491
  /珍惜|有价值|在乎你|你很重要|我需要你/,
270
492
  ],
@@ -289,6 +511,9 @@ const RULES = [
289
511
  /好难过|想哭|做不好|好累|好烦|感觉.*不行|没有意义/,
290
512
  /什么都做不好|没有人.*在乎|好孤独|受不了了/,
291
513
  /depressed|can't do anything|nobody cares|so lonely|can't take it/i,
514
+ /不知道该怎么办|心里不舒服|有点难受|不想面对|逃避|撑不下去/,
515
+ /好想有人陪|感觉很无力|不想说话|想一个人待着/,
516
+ /feeling overwhelmed|I don't know what to do|I can't handle|breaking down/i,
292
517
  ],
293
518
  weight: 0.85,
294
519
  },
@@ -298,6 +523,8 @@ const RULES = [
298
523
  /你好|早|晚上好|在吗|hey|hi|hello|morning/i,
299
524
  /吃了吗|天气|周末|最近怎么样/,
300
525
  /聊聊|随便说说|闲聊/,
526
+ /在干嘛|忙吗|吃饭了没|今天怎么样|还好吗|干啥呢/,
527
+ /what's up|how are you|sup|what you up to|how's it going/i,
301
528
  ],
302
529
  weight: 0.5,
303
530
  },
@@ -316,6 +543,24 @@ const RULES = [
316
543
  * @param recentStimuli Optional recent stimulus history for contextual priming
317
544
  */
318
545
  export function classifyStimulus(text, recentStimuli, recentMessages) {
546
+ // ── v9.1: Short message fast path ──
547
+ const trimmed = text.trim();
548
+ const trimmedLower = trimmed.toLowerCase();
549
+ if (trimmed.length <= 6) {
550
+ const shortMatch = SHORT_MESSAGE_MAP[trimmed] || SHORT_MESSAGE_MAP[trimmedLower];
551
+ if (shortMatch && shortMatch.confidence >= 0.5) {
552
+ // Apply particle modulation to short message result
553
+ const particles = analyzeParticles(trimmed);
554
+ let conf = shortMatch.confidence;
555
+ if (particles.warmth > 0.2 && (shortMatch.type === "praise" || shortMatch.type === "humor")) {
556
+ conf = Math.min(0.9, conf + 0.1);
557
+ }
558
+ if (particles.warmth < -0.2 && (shortMatch.type === "neglect" || shortMatch.type === "sarcasm")) {
559
+ conf = Math.min(0.9, conf + 0.1);
560
+ }
561
+ return [{ type: shortMatch.type, confidence: conf }];
562
+ }
563
+ }
319
564
  let results = [];
320
565
  for (const rule of RULES) {
321
566
  let matchCount = 0;
@@ -483,12 +728,60 @@ export function classifyStimulus(text, recentStimuli, recentMessages) {
483
728
  addScore("neglect", 0.20);
484
729
  addScore("casual", 0.10);
485
730
  }
486
- // ── Signal 4: Low-confidence keyword matches contribute to scores ──
731
+ // ── Signal 4: Intent detection (v9.1) ──
732
+ const { intent, confidence: intentConf } = detectIntent(text);
733
+ if (intentConf >= 0.5) {
734
+ const intentWeight = intentConf * 0.3;
735
+ switch (intent) {
736
+ case "request":
737
+ addScore("authority", intentWeight * 0.7);
738
+ break;
739
+ case "command":
740
+ addScore("authority", intentWeight);
741
+ break;
742
+ case "agreement":
743
+ addScore("validation", intentWeight);
744
+ break;
745
+ case "disagreement":
746
+ addScore("criticism", intentWeight * 0.6);
747
+ break;
748
+ case "greeting":
749
+ case "farewell":
750
+ addScore("casual", intentWeight);
751
+ break;
752
+ case "emotional": {
753
+ const sent = scoreSentiment(text);
754
+ if (sent.negative > sent.positive)
755
+ addScore("vulnerability", intentWeight);
756
+ else
757
+ addScore("praise", intentWeight * 0.5);
758
+ break;
759
+ }
760
+ case "sharing": {
761
+ addScore("casual", intentWeight * 0.6);
762
+ const sent2 = scoreSentiment(text);
763
+ if (sent2.negative > 0)
764
+ addScore("vulnerability", intentWeight * 0.5);
765
+ break;
766
+ }
767
+ }
768
+ }
769
+ // ── Signal 5: Particle analysis (v9.1) ──
770
+ const particles = analyzeParticles(text);
771
+ if (particles.warmth > 0.2) {
772
+ addScore("praise", particles.warmth * 0.15);
773
+ addScore("humor", particles.warmth * 0.10);
774
+ }
775
+ else if (particles.warmth < -0.2) {
776
+ addScore("neglect", Math.abs(particles.warmth) * 0.15);
777
+ addScore("sarcasm", Math.abs(particles.warmth) * 0.10);
778
+ }
779
+ // ── Signal 6: Low-confidence keyword matches contribute to scores ──
487
780
  // If keyword rules matched but below 0.5, fold their signal in
488
781
  for (const r of results) {
489
782
  addScore(r.type, r.confidence * 0.5);
490
783
  }
491
- // ── Signal 5: Contextual priming from recent stimuli ──
784
+ // ── Signal 7: Contextual priming from recent stimuli ──
492
785
  if (recentStimuli && recentStimuli.length > 0) {
493
786
  const recentNonNull = recentStimuli.filter((s) => s !== null);
494
787
  if (recentNonNull.length > 0) {
@@ -504,7 +797,7 @@ export function classifyStimulus(text, recentStimuli, recentMessages) {
504
797
  }
505
798
  }
506
799
  // ── Pick the best scoring type ──
507
- const THRESHOLD = 0.35;
800
+ const THRESHOLD = 0.30;
508
801
  const scoredResults = [];
509
802
  for (const [type, score] of Object.entries(scores)) {
510
803
  if (score >= THRESHOLD) {
@@ -533,3 +826,13 @@ export function classifyStimulus(text, recentStimuli, recentMessages) {
533
826
  export function getPrimaryStimulus(text, recentStimuli) {
534
827
  return classifyStimulus(text, recentStimuli)[0].type;
535
828
  }
829
+ // ── BuiltInClassifier (v9.1) ────────────────────────────────
830
+ /**
831
+ * The built-in rule-based classifier, wrapped as a ClassifierProvider.
832
+ * Default classifier when no custom provider is configured.
833
+ */
834
+ export class BuiltInClassifier {
835
+ classify(text, context) {
836
+ return classifyStimulus(text, context?.recentStimuli, context?.recentMessages);
837
+ }
838
+ }
package/dist/core.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import type { PsycheState, StimulusType, Locale, MBTIType, OutcomeScore, PsycheMode, PersonalityTraits, PolicyModifiers } from "./types.js";
1
+ import type { PsycheState, StimulusType, Locale, MBTIType, OutcomeScore, PsycheMode, PersonalityTraits, PolicyModifiers, ClassifierProvider } from "./types.js";
2
2
  import type { StorageAdapter } from "./storage.js";
3
3
  export interface PsycheEngineConfig {
4
4
  mbti?: MBTIType;
@@ -17,6 +17,12 @@ export interface PsycheEngineConfig {
17
17
  persist?: boolean;
18
18
  /** Big Five traits. If provided, overrides MBTI for baseline calculation. */
19
19
  traits?: PersonalityTraits;
20
+ /** Custom classifier provider. Default: built-in rule-based classifier. */
21
+ classifier?: ClassifierProvider;
22
+ /** LLM function for classifier fallback. Called when built-in confidence < llmClassifierThreshold. */
23
+ llmClassifier?: (prompt: string) => Promise<string>;
24
+ /** Confidence threshold below which LLM classifier is consulted. Default: 0.45 */
25
+ llmClassifierThreshold?: number;
20
26
  }
21
27
  export interface ProcessInputResult {
22
28
  /** Cacheable protocol prompt (stable across turns) */
@@ -47,6 +53,8 @@ export declare class PsycheEngine {
47
53
  private _lastAlgorithmApplied;
48
54
  private readonly traits;
49
55
  private readonly cfg;
56
+ private readonly classifier;
57
+ private readonly llmClassifier?;
50
58
  private readonly protocolCache;
51
59
  /** Pending prediction from last processInput for auto-learning */
52
60
  private pendingPrediction;
package/dist/core.js CHANGED
@@ -14,7 +14,7 @@
14
14
  import { DEFAULT_RELATIONSHIP, DEFAULT_DRIVES, DEFAULT_LEARNING_STATE, DEFAULT_METACOGNITIVE_STATE, DEFAULT_PERSONHOOD_STATE, DEFAULT_ENERGY_BUDGETS, DEFAULT_TRAIT_DRIFT } from "./types.js";
15
15
  import { MemoryStorageAdapter } from "./storage.js";
16
16
  import { applyDecay, applyStimulus, applyContagion, clamp, describeEmotionalState } from "./chemistry.js";
17
- import { classifyStimulus } from "./classify.js";
17
+ import { classifyStimulus, BuiltInClassifier, buildLLMClassifierPrompt, parseLLMClassification } from "./classify.js";
18
18
  import { buildDynamicContext, buildProtocolContext, buildCompactContext } from "./prompt.js";
19
19
  import { getSensitivity, getBaseline, getDefaultSelfModel, traitsToBaseline } from "./profiles.js";
20
20
  import { isStimulusType } from "./guards.js";
@@ -40,11 +40,15 @@ export class PsycheEngine {
40
40
  _lastAlgorithmApplied = false;
41
41
  traits;
42
42
  cfg;
43
+ classifier;
44
+ llmClassifier;
43
45
  protocolCache = new Map();
44
46
  /** Pending prediction from last processInput for auto-learning */
45
47
  pendingPrediction = null;
46
48
  constructor(config = {}, storage) {
47
49
  this.traits = config.traits;
50
+ this.classifier = config.classifier ?? new BuiltInClassifier();
51
+ this.llmClassifier = config.llmClassifier;
48
52
  this.cfg = {
49
53
  mbti: config.mbti ?? "INFJ",
50
54
  name: config.name ?? "agent",
@@ -55,6 +59,7 @@ export class PsycheEngine {
55
59
  compactMode: config.compactMode ?? true,
56
60
  mode: config.mode ?? "natural",
57
61
  personalityIntensity: config.personalityIntensity ?? 0.7,
62
+ llmClassifierThreshold: config.llmClassifierThreshold ?? 0.45,
58
63
  };
59
64
  // If persist is false, use in-memory storage regardless of what was passed
60
65
  if (config.persist === false) {
@@ -191,7 +196,23 @@ export class PsycheEngine {
191
196
  drives = { ...drives, survival: Math.max(0, drives.survival + survivalHit) };
192
197
  }
193
198
  const recentStimuli = (state.emotionalHistory ?? []).slice(-3).map(s => s.stimulus);
194
- const classifications = classifyStimulus(text, recentStimuli);
199
+ // v9.1: Use pluggable classifier
200
+ let classifications = await Promise.resolve(this.classifier.classify(text, { recentStimuli, locale: this.cfg.locale }));
201
+ // v9.1: LLM fallback when confidence is low
202
+ if (this.llmClassifier &&
203
+ (!classifications[0] || classifications[0].confidence < this.cfg.llmClassifierThreshold)) {
204
+ try {
205
+ const prompt = buildLLMClassifierPrompt(text, recentStimuli);
206
+ const response = await this.llmClassifier(prompt);
207
+ const llmResult = parseLLMClassification(response);
208
+ if (llmResult && (!classifications[0] || llmResult.confidence > classifications[0].confidence)) {
209
+ classifications = [llmResult, ...classifications];
210
+ }
211
+ }
212
+ catch {
213
+ // LLM call failed — continue with built-in result
214
+ }
215
+ }
195
216
  const primary = classifications[0];
196
217
  let current = state.current;
197
218
  if (primary && primary.confidence >= 0.5) {
package/dist/index.d.ts CHANGED
@@ -2,7 +2,7 @@ export { PsycheEngine } from "./core.js";
2
2
  export type { PsycheEngineConfig, ProcessInputResult, ProcessOutputResult, ProcessOutcomeResult } from "./core.js";
3
3
  export { FileStorageAdapter, MemoryStorageAdapter } from "./storage.js";
4
4
  export type { StorageAdapter } from "./storage.js";
5
- export type { PsycheState, MBTIType, Locale, StimulusType, ChemicalState, ChemicalSnapshot, SelfModel, RelationshipState, EmpathyEntry, EmotionPattern, DriveType, InnateDrives, LearningState, LearnedVectorAdjustment, PredictionRecord, OutcomeScore, OutcomeSignals, AttachmentStyle, AttachmentData, MetacognitiveState, RegulationRecord, DefensePatternRecord, RegulationStrategyType, DefenseMechanismType, PersonhoodState, PersistedCausalInsight, GrowthDirection, PersonalityTraits, PsycheMode, PolicyModifiers, TraitDriftState, EnergyBudgets, } from "./types.js";
5
+ export type { PsycheState, MBTIType, Locale, StimulusType, ChemicalState, ChemicalSnapshot, SelfModel, RelationshipState, EmpathyEntry, EmotionPattern, DriveType, InnateDrives, LearningState, LearnedVectorAdjustment, PredictionRecord, OutcomeScore, OutcomeSignals, AttachmentStyle, AttachmentData, MetacognitiveState, RegulationRecord, DefensePatternRecord, RegulationStrategyType, DefenseMechanismType, PersonhoodState, PersistedCausalInsight, GrowthDirection, PersonalityTraits, PsycheMode, PolicyModifiers, TraitDriftState, EnergyBudgets, ClassifierProvider, ClassifierContext, ClassificationResult, } from "./types.js";
6
6
  export { CHEMICAL_KEYS, CHEMICAL_NAMES, CHEMICAL_NAMES_ZH, DEFAULT_RELATIONSHIP, DEFAULT_DRIVES, DEFAULT_LEARNING_STATE, DEFAULT_METACOGNITIVE_STATE, DEFAULT_PERSONHOOD_STATE, DEFAULT_ATTACHMENT, DRIVE_KEYS, DRIVE_NAMES_ZH, DEFAULT_TRAIT_DRIFT, DEFAULT_ENERGY_BUDGETS, } from "./types.js";
7
7
  export { computeSelfReflection, computeEmotionalTendency, buildSelfReflectionContext } from "./self-recognition.js";
8
8
  export type { SelfReflection } from "./self-recognition.js";
@@ -38,7 +38,8 @@ export type { CircadianPhase } from "./circadian.js";
38
38
  export { computePrimarySystems, computeSystemInteractions, gatePrimarySystemsByAutonomic, getDominantSystems, describeBehavioralTendencies, PRIMARY_SYSTEM_NAMES, } from "./primary-systems.js";
39
39
  export type { PrimarySystemName, PrimarySystemLevels, BehavioralTendency, DominantSystem, } from "./primary-systems.js";
40
40
  export { updateTraitDrift } from "./drives.js";
41
- export { classifyStimulus, getPrimaryStimulus, scoreSentiment, scoreEmoji } from "./classify.js";
41
+ export { classifyStimulus, getPrimaryStimulus, scoreSentiment, scoreEmoji, BuiltInClassifier, analyzeParticles, detectIntent, buildLLMClassifierPrompt, parseLLMClassification } from "./classify.js";
42
+ export type { StimulusClassification, ParticleSignal, MessageIntent } from "./classify.js";
42
43
  export { buildProtocolContext, buildDynamicContext, buildCompactContext, isNearBaseline, getNearBaselineThreshold } from "./prompt.js";
43
44
  export { describeEmotionalState, getExpressionHint, getBehaviorGuide, detectEmotions } from "./chemistry.js";
44
45
  export { getBaseline, getTemperament, getSensitivity, getDefaultSelfModel, traitsToBaseline, mbtiToTraits } from "./profiles.js";
package/dist/index.js CHANGED
@@ -51,7 +51,7 @@ export { computePrimarySystems, computeSystemInteractions, gatePrimarySystemsByA
51
51
  // Trait drift (v9)
52
52
  export { updateTraitDrift } from "./drives.js";
53
53
  // Utilities — for custom adapter / advanced use
54
- export { classifyStimulus, getPrimaryStimulus, scoreSentiment, scoreEmoji } from "./classify.js";
54
+ export { classifyStimulus, getPrimaryStimulus, scoreSentiment, scoreEmoji, BuiltInClassifier, analyzeParticles, detectIntent, buildLLMClassifierPrompt, parseLLMClassification } from "./classify.js";
55
55
  export { buildProtocolContext, buildDynamicContext, buildCompactContext, isNearBaseline, getNearBaselineThreshold } from "./prompt.js";
56
56
  export { describeEmotionalState, getExpressionHint, getBehaviorGuide, detectEmotions } from "./chemistry.js";
57
57
  export { getBaseline, getTemperament, getSensitivity, getDefaultSelfModel, traitsToBaseline, mbtiToTraits } from "./profiles.js";
package/dist/types.d.ts CHANGED
@@ -328,3 +328,23 @@ export interface EnergyBudgets {
328
328
  decisionCapacity: number;
329
329
  }
330
330
  export declare const DEFAULT_ENERGY_BUDGETS: EnergyBudgets;
331
+ /** A single classification result */
332
+ export interface ClassificationResult {
333
+ type: StimulusType;
334
+ confidence: number;
335
+ }
336
+ /** Context passed to classifier providers */
337
+ export interface ClassifierContext {
338
+ recentStimuli?: (StimulusType | null)[];
339
+ recentMessages?: string[];
340
+ locale?: Locale;
341
+ }
342
+ /**
343
+ * Pluggable classifier interface.
344
+ * Implementations can be sync or async.
345
+ * Built-in: enhanced keyword + Chinese NLP classifier (sync, zero deps).
346
+ * User-provided: could be LLM-based, API-based, local model, etc.
347
+ */
348
+ export interface ClassifierProvider {
349
+ classify(text: string, context?: ClassifierContext): ClassificationResult[] | Promise<ClassificationResult[]>;
350
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "psyche-ai",
3
- "version": "9.0.1",
3
+ "version": "9.1.0",
4
4
  "description": "Artificial Psyche — universal emotional intelligence plugin for any AI agent",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",