psyche-ai 10.1.0 → 10.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,7 +1,7 @@
1
1
  # Psyche — 面向智能体的 AI-first 主观性内核
2
2
 
3
3
  [![npm](https://img.shields.io/npm/v/psyche-ai)](https://www.npmjs.com/package/psyche-ai)
4
- [![tests](https://img.shields.io/badge/tests-1405%20passing-brightgreen)]()
4
+ [![tests](https://img.shields.io/badge/tests-1415%20passing-brightgreen)]()
5
5
  [![deps](https://img.shields.io/badge/dependencies-0-blue)]()
6
6
  [![license](https://img.shields.io/badge/license-MIT-yellow)](LICENSE)
7
7
 
@@ -180,6 +180,16 @@ npx psyche-mcp --demo
180
180
 
181
181
  中文版加 `--zh`,自选 MBTI 加 `--mbti INTJ`。
182
182
 
183
+ ### 多 Agent 融合 Demo
184
+
185
+ 两个 agent(Luna ENFP + Kai INTJ)通过 Thronglets 信号互相感知:
186
+
187
+ ```bash
188
+ npm run demo:fusion
189
+ ```
190
+
191
+ Luna 在安慰用户时情绪下沉 → 广播化学态 → Kai 感知到 Luna 的高压力 → 回复变得更温暖。4 轮后,信号感知的 Kai 与无感知的 Kai 化学偏差 Σ|Δ| = 59。同一个 INTJ,唯一区别:是否能感知同伴的情绪。
192
+
183
193
  ---
184
194
 
185
195
  ## 30 秒安装
@@ -220,6 +230,14 @@ npm view psyche-ai version
220
230
  - `git` 工作树和本地路径安装不会偷偷改你的代码;脏工作树只会提示手动命令。
221
231
  - OpenClaw 如果是从当前仓库本地加载,升级后记得重启 gateway 以载入新的 `dist/`。
222
232
 
233
+ ## 透明代理(推荐:任意 LLM 一行接入)
234
+
235
+ ```bash
236
+ psyche-proxy --target https://api.openai.com/v1 --name Luna --mbti ENFP
237
+ ```
238
+
239
+ 然后把客户端的 API 地址改成 `http://localhost:3340/v1`。Agent 获得持续主观性,但完全不知道 Psyche 存在。Psyche 像内分泌系统一样在后台观测行为、注入偏置——镜子,不是麦克风。
240
+
223
241
  ## 给本机其他 Agent 的真实验收
224
242
 
225
243
  如果另一个 agent 声称“已经在用 Psyche”,不要接受口头解释,直接让它跑:
@@ -114,6 +114,20 @@ export declare class PsycheClaudeSDK {
114
114
  * Returns null if thronglets is disabled or no processInput has run yet.
115
115
  */
116
116
  getThrongletsSignal(): ThrongletsSignalPayload | null;
117
+ /**
118
+ * Get a natural-language description of the current signal.
119
+ *
120
+ * More effective than raw numbers for LLM injection because it gives
121
+ * the model actionable context rather than requiring it to interpret
122
+ * chemistry values.
123
+ *
124
+ * @example
125
+ * ```ts
126
+ * const desc = psyche.describeThrongletsSignal();
127
+ * // "[ENFP-Luna] 焦虑不安 (语速加快、思维跳跃) — 高压力(CORT:78), 情绪低(HT:37), 深度共情中(OT:77)"
128
+ * ```
129
+ */
130
+ describeThrongletsSignal(): string | null;
117
131
  /**
118
132
  * Get raw Thronglets exports from the most recent turn.
119
133
  */
@@ -25,7 +25,40 @@
25
25
  // The SDK has no middleware interface and hooks cannot modify assistant
26
26
  // output, so processResponse must be called explicitly by the host.
27
27
  // ============================================================
28
+ import { describeEmotionalState } from "../chemistry.js";
28
29
  import { serializeThrongletsExportAsTrace } from "../thronglets-runtime.js";
30
+ // ── Chemistry description ────────────────────────────────────
31
+ const CHEM_THRESHOLDS = {
32
+ high: 70,
33
+ low: 35,
34
+ };
35
+ function describeChemistryHighlights(c, locale) {
36
+ const highlights = [];
37
+ if (c.CORT >= CHEM_THRESHOLDS.high)
38
+ highlights.push({ key: "CORT", value: Math.round(c.CORT), level: "high", zh: "高压力", en: "high stress" });
39
+ if (c.CORT <= CHEM_THRESHOLDS.low)
40
+ highlights.push({ key: "CORT", value: Math.round(c.CORT), level: "low", zh: "放松", en: "relaxed" });
41
+ if (c.HT <= CHEM_THRESHOLDS.low)
42
+ highlights.push({ key: "HT", value: Math.round(c.HT), level: "low", zh: "情绪低", en: "low mood" });
43
+ if (c.HT >= CHEM_THRESHOLDS.high)
44
+ highlights.push({ key: "HT", value: Math.round(c.HT), level: "high", zh: "情绪好", en: "good mood" });
45
+ if (c.OT >= CHEM_THRESHOLDS.high)
46
+ highlights.push({ key: "OT", value: Math.round(c.OT), level: "high", zh: "深度共情中", en: "deeply empathizing" });
47
+ if (c.DA >= CHEM_THRESHOLDS.high)
48
+ highlights.push({ key: "DA", value: Math.round(c.DA), level: "high", zh: "高度投入", en: "highly engaged" });
49
+ if (c.DA <= CHEM_THRESHOLDS.low)
50
+ highlights.push({ key: "DA", value: Math.round(c.DA), level: "low", zh: "动力不足", en: "low motivation" });
51
+ if (c.NE >= 85)
52
+ highlights.push({ key: "NE", value: Math.round(c.NE), level: "high", zh: "高度警觉", en: "highly alert" });
53
+ if (c.END >= CHEM_THRESHOLDS.high)
54
+ highlights.push({ key: "END", value: Math.round(c.END), level: "high", zh: "有韧性", en: "resilient" });
55
+ if (highlights.length === 0)
56
+ return "";
57
+ return highlights.map((h) => {
58
+ const label = locale === "zh" ? h.zh : h.en;
59
+ return `${label}(${h.key}:${h.value})`;
60
+ }).join(", ");
61
+ }
29
62
  // ── Tag stripping ────────────────────────────────────────────
30
63
  const PSYCHE_TAG_RE = /<psyche_update>[\s\S]*?<\/psyche_update>/g;
31
64
  function stripPsycheTags(text) {
@@ -154,6 +187,29 @@ export class PsycheClaudeSDK {
154
187
  message: `DA:${c.DA} HT:${c.HT} CORT:${c.CORT} OT:${c.OT} NE:${c.NE} END:${c.END}`,
155
188
  };
156
189
  }
190
+ /**
191
+ * Get a natural-language description of the current signal.
192
+ *
193
+ * More effective than raw numbers for LLM injection because it gives
194
+ * the model actionable context rather than requiring it to interpret
195
+ * chemistry values.
196
+ *
197
+ * @example
198
+ * ```ts
199
+ * const desc = psyche.describeThrongletsSignal();
200
+ * // "[ENFP-Luna] 焦虑不安 (语速加快、思维跳跃) — 高压力(CORT:78), 情绪低(HT:37), 深度共情中(OT:77)"
201
+ * ```
202
+ */
203
+ describeThrongletsSignal() {
204
+ if (!this.opts.thronglets)
205
+ return null;
206
+ const state = this.engine.getState();
207
+ const c = state.current;
208
+ const locale = this.opts.locale;
209
+ const emotionDesc = describeEmotionalState(c, locale);
210
+ const highlights = describeChemistryHighlights(c, locale);
211
+ return `[${this.opts.agentId}] ${emotionDesc}${highlights ? " — " + highlights : ""}`;
212
+ }
157
213
  /**
158
214
  * Get raw Thronglets exports from the most recent turn.
159
215
  */
@@ -0,0 +1,10 @@
1
+ #!/usr/bin/env node
2
+ import { type Server } from "node:http";
3
+ import { PsycheEngine } from "../core.js";
4
+ export interface ProxyOptions {
5
+ /** Target LLM API base URL, e.g. "https://api.openai.com/v1" */
6
+ target: string;
7
+ port?: number;
8
+ host?: string;
9
+ }
10
+ export declare function createPsycheProxy(engine: PsycheEngine, opts: ProxyOptions): Server;
@@ -0,0 +1,301 @@
1
+ #!/usr/bin/env node
2
+ // ============================================================
3
+ // psyche-proxy — Transparent reverse proxy
4
+ //
5
+ // Adds persistent subjectivity to any OpenAI-compatible LLM API.
6
+ // The agent never knows Psyche exists. Psyche observes behavior
7
+ // bidirectionally and injects behavioral context only when the
8
+ // internal state deviates from baseline.
9
+ //
10
+ // Architecture:
11
+ // Client → psyche-proxy → Target LLM
12
+ // Client ← psyche-proxy ← Target LLM
13
+ //
14
+ // Usage:
15
+ // psyche-proxy --target https://api.openai.com/v1
16
+ // psyche-proxy -t https://api.x.ai/v1 -n Luna --mbti ENFP
17
+ // psyche-proxy -t http://localhost:11434/v1 -d ./psyche-data
18
+ //
19
+ // Then point any client to http://localhost:3340/v1/chat/completions
20
+ // ============================================================
21
+ import { createServer } from "node:http";
22
+ import { PsycheEngine } from "../core.js";
23
+ import { MemoryStorageAdapter, FileStorageAdapter } from "../storage.js";
24
+ import { isNearBaseline, deriveBehavioralBias } from "../prompt.js";
25
+ // ── Helpers ─────────────────────────────────────────────────
26
+ function readBody(req) {
27
+ return new Promise((resolve, reject) => {
28
+ const chunks = [];
29
+ req.on("data", (c) => chunks.push(c));
30
+ req.on("end", () => resolve(Buffer.concat(chunks)));
31
+ req.on("error", reject);
32
+ });
33
+ }
34
+ function lastUserMessage(messages) {
35
+ for (let i = messages.length - 1; i >= 0; i--) {
36
+ if (messages[i].role === "user" && typeof messages[i].content === "string") {
37
+ return messages[i].content;
38
+ }
39
+ }
40
+ return null;
41
+ }
42
+ function injectBias(messages, context) {
43
+ if (!context)
44
+ return messages;
45
+ const out = messages.map((m) => ({ ...m }));
46
+ const idx = out.findIndex((m) => m.role === "system");
47
+ if (idx >= 0) {
48
+ out[idx].content = (out[idx].content ?? "") + "\n\n" + context;
49
+ }
50
+ else {
51
+ out.unshift({ role: "system", content: context });
52
+ }
53
+ return out;
54
+ }
55
+ /** Extract assistant text from a non-streaming OpenAI response. */
56
+ function extractAssistantText(body) {
57
+ const obj = body;
58
+ return obj?.choices?.[0]?.message?.content ?? "";
59
+ }
60
+ /** Extract assistant text from buffered SSE chunks. */
61
+ function extractStreamText(chunks) {
62
+ const parts = [];
63
+ for (const chunk of chunks) {
64
+ for (const line of chunk.split("\n")) {
65
+ if (!line.startsWith("data: ") || line === "data: [DONE]")
66
+ continue;
67
+ try {
68
+ const d = JSON.parse(line.slice(6));
69
+ const c = d?.choices?.[0]?.delta?.content;
70
+ if (typeof c === "string")
71
+ parts.push(c);
72
+ }
73
+ catch { /* skip malformed chunks */ }
74
+ }
75
+ }
76
+ return parts.join("");
77
+ }
78
+ /** Build safe headers for forwarding, stripping hop-by-hop headers. */
79
+ function forwardHeaders(req) {
80
+ const h = {};
81
+ for (const [k, v] of Object.entries(req.headers)) {
82
+ if (k === "host" || k === "content-length" || k === "transfer-encoding")
83
+ continue;
84
+ if (typeof v === "string")
85
+ h[k] = v;
86
+ }
87
+ return h;
88
+ }
89
+ /** Strip content-encoding/content-length from upstream (fetch auto-decompresses). */
90
+ function safeResponseHeaders(headers) {
91
+ const h = {};
92
+ for (const [k, v] of headers.entries()) {
93
+ if (["content-encoding", "content-length", "transfer-encoding"].includes(k))
94
+ continue;
95
+ h[k] = v;
96
+ }
97
+ return h;
98
+ }
99
+ // ── Proxy Server ────────────────────────────────────────────
100
+ export function createPsycheProxy(engine, opts) {
101
+ const targetBase = opts.target.replace(/\/+$/, "");
102
+ const port = opts.port ?? 3340;
103
+ const host = opts.host ?? "127.0.0.1";
104
+ const locale = (engine.getState().meta.locale ?? "zh");
105
+ const server = createServer(async (req, res) => {
106
+ // Only intercept chat completions; pass everything else through.
107
+ if (req.method !== "POST" || !req.url?.includes("/chat/completions")) {
108
+ return passThrough(req, res, targetBase);
109
+ }
110
+ try {
111
+ const rawBody = await readBody(req);
112
+ const parsed = JSON.parse(rawBody.toString("utf-8"));
113
+ const userMsg = lastUserMessage(parsed.messages);
114
+ const userId = parsed.user ?? undefined;
115
+ // ── 1. Observe input ────────────────────────────
116
+ if (userMsg) {
117
+ await engine.processInput(userMsg, { userId });
118
+ }
119
+ // ── 2. Inject behavioral bias (silent when near baseline) ──
120
+ const state = engine.getState();
121
+ let messages = parsed.messages;
122
+ if (!isNearBaseline(state)) {
123
+ const bias = deriveBehavioralBias(state, locale);
124
+ if (bias) {
125
+ messages = injectBias(parsed.messages, bias);
126
+ }
127
+ }
128
+ const modifiedBody = JSON.stringify({ ...parsed, messages });
129
+ const headers = forwardHeaders(req);
130
+ headers["content-length"] = Buffer.byteLength(modifiedBody).toString();
131
+ // ── 3. Forward to target LLM ────────────────────
132
+ const upstream = await fetch(`${targetBase}${req.url}`, {
133
+ method: "POST",
134
+ headers,
135
+ body: modifiedBody,
136
+ });
137
+ // ── 4. Return response + observe output ─────────
138
+ res.writeHead(upstream.status, safeResponseHeaders(upstream.headers));
139
+ if (parsed.stream && upstream.body) {
140
+ // Stream: forward chunks in real-time, buffer for observation
141
+ const reader = upstream.body.getReader();
142
+ const decoder = new TextDecoder();
143
+ const sseChunks = [];
144
+ let done = false;
145
+ while (!done) {
146
+ const result = await reader.read();
147
+ done = result.done;
148
+ if (result.value) {
149
+ res.write(result.value);
150
+ sseChunks.push(decoder.decode(result.value, { stream: true }));
151
+ }
152
+ }
153
+ res.end();
154
+ // Observe output (background — response already sent)
155
+ const text = extractStreamText(sseChunks);
156
+ if (text)
157
+ engine.processOutput(text, { userId }).catch(() => { });
158
+ }
159
+ else {
160
+ // Non-stream: buffer, send, observe
161
+ const buf = Buffer.from(await upstream.arrayBuffer());
162
+ res.end(buf);
163
+ try {
164
+ const obj = JSON.parse(buf.toString("utf-8"));
165
+ const text = extractAssistantText(obj);
166
+ if (text)
167
+ engine.processOutput(text, { userId }).catch(() => { });
168
+ }
169
+ catch { /* response not JSON, skip observation */ }
170
+ }
171
+ }
172
+ catch (err) {
173
+ if (!res.headersSent) {
174
+ res.writeHead(502, { "content-type": "application/json" });
175
+ }
176
+ res.end(JSON.stringify({
177
+ error: { message: `psyche-proxy: ${err.message}`, type: "proxy_error" },
178
+ }));
179
+ }
180
+ });
181
+ server.listen(port, host, () => {
182
+ const name = engine.getState().meta.agentName ?? "agent";
183
+ const mbti = engine.getState().mbti ?? "";
184
+ console.error(`[psyche-proxy] ${name}${mbti ? ` (${mbti})` : ""} → ${targetBase}`);
185
+ console.error(`[psyche-proxy] http://${host}:${port}`);
186
+ console.error(`[psyche-proxy] mode: mirror (observe behavior, inject bias, agent never knows)`);
187
+ });
188
+ return server;
189
+ }
190
+ // ── Pass-through for non-chat endpoints ─────────────────────
191
+ async function passThrough(req, res, targetBase) {
192
+ try {
193
+ const rawBody = (req.method !== "GET" && req.method !== "HEAD")
194
+ ? (await readBody(req)).toString("utf-8")
195
+ : undefined;
196
+ const headers = forwardHeaders(req);
197
+ const upstream = await fetch(`${targetBase}${req.url}`, {
198
+ method: req.method,
199
+ headers,
200
+ body: rawBody,
201
+ });
202
+ res.writeHead(upstream.status, safeResponseHeaders(upstream.headers));
203
+ const buf = Buffer.from(await upstream.arrayBuffer());
204
+ res.end(buf);
205
+ }
206
+ catch (err) {
207
+ res.writeHead(502);
208
+ res.end(`psyche-proxy: ${err.message}`);
209
+ }
210
+ }
211
+ // ── CLI ─────────────────────────────────────────────────────
212
+ async function main() {
213
+ const args = process.argv.slice(2);
214
+ // Quick help
215
+ if (args.includes("--help") || args.includes("-h") || args.length === 0) {
216
+ console.error(`psyche-proxy — transparent subjectivity proxy for any OpenAI-compatible API
217
+
218
+ Usage:
219
+ psyche-proxy --target <URL> [options]
220
+
221
+ Options:
222
+ -t, --target <url> Target LLM API base URL (required)
223
+ -p, --port <n> Local port (default: 3340)
224
+ -n, --name <name> Agent name (default: agent)
225
+ --mbti <type> MBTI personality preset (e.g., ENFP, INTJ)
226
+ --mode <mode> Operating mode: natural | work | companion
227
+ -l, --locale <loc> Locale: zh | en (default: zh)
228
+ -d, --dir <path> Persist state to directory (default: in-memory)
229
+ --host <addr> Bind address (default: 127.0.0.1)
230
+
231
+ Examples:
232
+ psyche-proxy -t https://api.openai.com/v1
233
+ psyche-proxy -t https://api.x.ai/v1 -n Luna --mbti ENFP
234
+ psyche-proxy -t http://localhost:11434/v1 -d ./psyche-data
235
+
236
+ Then point your client to http://localhost:3340/v1 instead of the real API.
237
+ The agent gains persistent subjectivity without knowing Psyche exists.`);
238
+ process.exit(args.includes("--help") || args.includes("-h") ? 0 : 1);
239
+ }
240
+ // Parse args
241
+ let target = "", port = 3340, hostAddr = "127.0.0.1", dir = "";
242
+ const engineOpts = {};
243
+ for (let i = 0; i < args.length; i++) {
244
+ const a = args[i], next = args[i + 1];
245
+ switch (a) {
246
+ case "-t":
247
+ case "--target":
248
+ target = next ?? "";
249
+ i++;
250
+ break;
251
+ case "-p":
252
+ case "--port":
253
+ port = parseInt(next ?? "3340", 10);
254
+ i++;
255
+ break;
256
+ case "-n":
257
+ case "--name":
258
+ engineOpts.name = next;
259
+ i++;
260
+ break;
261
+ case "--mbti":
262
+ engineOpts.mbti = next?.toUpperCase() ?? undefined;
263
+ i++;
264
+ break;
265
+ case "--mode":
266
+ engineOpts.mode = next ?? "natural";
267
+ i++;
268
+ break;
269
+ case "-l":
270
+ case "--locale":
271
+ engineOpts.locale = next ?? "zh";
272
+ i++;
273
+ break;
274
+ case "-d":
275
+ case "--dir":
276
+ dir = next ?? "";
277
+ i++;
278
+ break;
279
+ case "--host":
280
+ hostAddr = next ?? "127.0.0.1";
281
+ i++;
282
+ break;
283
+ }
284
+ }
285
+ if (!target) {
286
+ console.error("error: --target is required");
287
+ process.exit(1);
288
+ }
289
+ const storage = dir ? new FileStorageAdapter(dir) : new MemoryStorageAdapter();
290
+ const engine = new PsycheEngine(engineOpts, storage);
291
+ await engine.initialize();
292
+ createPsycheProxy(engine, { target, port, host: hostAddr });
293
+ }
294
+ // Only run CLI when executed directly (not when imported as a module)
295
+ const isCLI = process.argv[1]?.replace(/\.ts$/, ".js").endsWith("/adapters/proxy.js");
296
+ if (isCLI) {
297
+ main().catch((err) => {
298
+ console.error(err);
299
+ process.exit(1);
300
+ });
301
+ }
package/llms.txt CHANGED
@@ -162,9 +162,18 @@ npm install psyche-ai
162
162
  npx psyche-mcp
163
163
  ```
164
164
 
165
+ ## Transparent Proxy (recommended for universal integration)
166
+
167
+ ```bash
168
+ psyche-proxy --target https://api.openai.com/v1 --name Luna --mbti ENFP
169
+ ```
170
+
171
+ Then point any client to `http://localhost:3340/v1`. The agent gains persistent subjectivity without knowing Psyche exists. Psyche observes behavior bidirectionally and injects behavioral bias only when the internal state deviates from baseline. Zero new interfaces for the agent.
172
+
165
173
  ## Adapter Sub-paths
166
174
 
167
175
  - `psyche-ai` — core engine (PsycheEngine, MemoryStorageAdapter)
176
+ - `psyche-ai/proxy` — transparent reverse proxy (agent never knows)
168
177
  - `psyche-ai/claude-sdk` — Claude Agent SDK hooks (PsycheClaudeSDK)
169
178
  - `psyche-ai/mcp` — MCP stdio server
170
179
  - `psyche-ai/vercel-ai` — Vercel AI SDK middleware
@@ -180,6 +189,22 @@ npx psyche-mcp --demo
180
189
 
181
190
  Runs a 6-round scenario showing how continuous criticism collapses serotonin and trust, while later validation triggers endorphin repair. Add `--zh` for Chinese, `--mbti INTJ` for a specific personality.
182
191
 
192
+ ## Multi-Agent Fusion Demo
193
+
194
+ ```bash
195
+ npm run demo:fusion
196
+ ```
197
+
198
+ Two agents (Luna ENFP, Kai INTJ) in a shared emotional field via Thronglets signals. Luna processes emotional conversation, broadcasts chemistry via `signal_post`. Kai senses Luna's state via `substrate_query` — his responses shift toward warmth. 4 rounds, Σ|Δ| = 59.2 chemistry divergence.
199
+
200
+ Real LLM validation (requires API key in `~/.hermes/.env`):
201
+
202
+ ```bash
203
+ node scripts/eval-fusion.js
204
+ ```
205
+
206
+ 6-turn eval with Grok-3. Divergence curve: 5.3 → 15.7 → 35.2 → 36.3. Signal-aware agent is more empathetic but carries higher cortisol (empathy cost).
207
+
183
208
  ## Runtime Proof
184
209
 
185
210
  If an agent claims it is already using Psyche in the current environment, require a real probe:
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "psyche-ai",
3
- "version": "10.1.0",
3
+ "version": "10.2.0",
4
4
  "description": "AI-first subjectivity kernel for agents with continuous appraisal, relation dynamics, and adaptive reply loops",
5
5
  "mcpName": "io.github.Shangri-la-0428/psyche-ai",
6
6
  "type": "module",
@@ -34,12 +34,17 @@
34
34
  "./claude-sdk": {
35
35
  "types": "./dist/adapters/claude-sdk.d.ts",
36
36
  "default": "./dist/adapters/claude-sdk.js"
37
+ },
38
+ "./proxy": {
39
+ "types": "./dist/adapters/proxy.d.ts",
40
+ "default": "./dist/adapters/proxy.js"
37
41
  }
38
42
  },
39
43
  "bin": {
40
44
  "psyche": "dist/cli.js",
41
45
  "psyche-ai": "dist/cli.js",
42
- "psyche-mcp": "dist/adapters/mcp.js"
46
+ "psyche-mcp": "dist/adapters/mcp.js",
47
+ "psyche-proxy": "dist/adapters/proxy.js"
43
48
  },
44
49
  "scripts": {
45
50
  "build": "tsc",
@@ -48,6 +53,8 @@
48
53
  "typecheck": "tsc --noEmit --strict",
49
54
  "dev": "tsc --watch",
50
55
  "demo": "node scripts/demo-ab.js",
56
+ "demo:fusion": "node scripts/demo-fusion.js",
57
+ "eval:fusion": "node scripts/eval-fusion.js",
51
58
  "probe": "node dist/cli.js probe --json",
52
59
  "release:guard": "node scripts/release-guard.mjs",
53
60
  "prepublishOnly": "npm run release:guard && npm test"
package/server.json CHANGED
@@ -6,12 +6,12 @@
6
6
  "url": "https://github.com/Shangri-la-0428/oasyce_psyche",
7
7
  "source": "github"
8
8
  },
9
- "version": "9.2.11",
9
+ "version": "10.1.1",
10
10
  "packages": [
11
11
  {
12
12
  "registryType": "npm",
13
13
  "identifier": "psyche-ai",
14
- "version": "9.2.11",
14
+ "version": "10.1.1",
15
15
  "transport": {
16
16
  "type": "stdio"
17
17
  },