prism-mcp-server 7.8.2 β†’ 7.8.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -691,7 +691,7 @@ The Generator strips the `console.log`, resubmits, and the next `EVALUATE` retur
691
691
 
692
692
  ## πŸ†• What's New
693
693
 
694
- > **Current release: v7.8.0 β€” Cognitive Architecture**
694
+ > **Current release: v7.8.2 β€” Cognitive Architecture**
695
695
 
696
696
  - 🧠 **v7.8.0 β€” Cognitive Architecture:** The biggest leap forward yet. Moved beyond flat vector search into a true cognitive architecture inspired by human brain mechanics. Episodic-to-Semantic memory consolidation (Hebbian learning), ACT-R Spreading Activation with multi-hop causal reasoning, Uncertainty-Aware Rejection Gate (your agent can say "I don't know"), and Dynamic Fast Weight Decay (semantic memories outlive episodic chatter by 2Γ—). **Your agents don't just remember; they learn.** β†’ [Cognitive Architecture](#-cognitive-architecture-v78)
697
697
  - 🌐 **v7.7.0 β€” Cloud-Native SSE Transport:** Full unauthenticated and authenticated Server-Sent Events MCP support for seamless network deployments.
@@ -1065,7 +1065,7 @@ Prism has evolved from smart session logging into a **cognitive memory architect
1065
1065
 
1066
1066
  ## πŸ“¦ Milestones & Roadmap
1067
1067
 
1068
- > **Current: v7.8.0** β€” Cognitive Architecture ([CHANGELOG](CHANGELOG.md))
1068
+ > **Current: v7.8.2** β€” Cognitive Architecture ([CHANGELOG](CHANGELOG.md))
1069
1069
 
1070
1070
  | Release | Headline |
1071
1071
  |---------|----------|
@@ -46,6 +46,7 @@ export async function runInSandbox(dataStr, code, timeoutMs = 10000) {
46
46
  });
47
47
  const consoleHandle = vm.newObject();
48
48
  vm.setProp(consoleHandle, "log", logHandle);
49
+ vm.setProp(consoleHandle, "error", logHandle); // Map console.error to capture output safely
49
50
  vm.setProp(vm.global, "console", consoleHandle);
50
51
  consoleHandle.dispose();
51
52
  logHandle.dispose();
@@ -16,7 +16,7 @@
16
16
  * Prism's SQLite (sqlite-vec) and Supabase (pgvector) schemas define
17
17
  * embedding columns as EXACTLY 768 dimensions.
18
18
  *
19
- * Voyage solution: voyage-3 and voyage-3-lite output 1024 dims by default,
19
+ * Voyage solution: voyage-code-3 and voyage-3 output 1024 dims by default,
20
20
  * but both support the `output_dimension` parameter (Matryoshka Representation
21
21
  * Learning), enabling truncation to 768 while preserving quality.
22
22
  * voyage-3-lite at 768 dims is the fastest and most cost-efficient option.
@@ -29,11 +29,11 @@
29
29
  *
30
30
  * NOTE: voyage-3-lite natively outputs 512 dims; it does NOT support
31
31
  * output_dimension truncation to 768. Use voyage-3 for dimension parity.
32
- * Default is voyage-3 for this reason.
32
+ * Default is voyage-code-3 (optimised for code-heavy sessions).
33
33
  *
34
34
  * CONFIG KEYS (Prism dashboard "AI Providers" tab OR environment variables):
35
35
  * voyage_api_key β€” Required. Voyage AI API key (pa-...)
36
- * voyage_model β€” Embedding model (default: voyage-3)
36
+ * voyage_model β€” Embedding model (default: voyage-code-3)
37
37
  *
38
38
  * USAGE WITH ANTHROPIC TEXT PROVIDER:
39
39
  * Set text_provider=anthropic, embedding_provider=voyage in the dashboard.
@@ -52,9 +52,9 @@ const EMBEDDING_DIMS = 768;
52
52
  // with OpenAI and Gemini adapters) avoids tokenizer dependency.
53
53
  // 8000 chars β‰ˆ 1500-2000 tokens for typical session summaries.
54
54
  const MAX_EMBEDDING_CHARS = 8000;
55
- // Default model: voyage-3 (supports output_dimension=768 via MRL)
56
- // voyage-3-lite is NOT recommended as its native 512 dims < 768.
57
- const DEFAULT_MODEL = "voyage-3";
55
+ // Default model: voyage-code-3 (supports output_dimension=768 via client-side MRL truncation)
56
+ // Extremely optimized for code bases, ide workspaces, and technical queries.
57
+ const DEFAULT_MODEL = "voyage-code-3";
58
58
  const VOYAGE_API_BASE = "https://api.voyageai.com/v1";
59
59
  // ─── Adapter ─────────────────────────────────────────────────────────────────
60
60
  export class VoyageAdapter {
@@ -91,11 +91,9 @@ export class VoyageAdapter {
91
91
  const requestBody = {
92
92
  input: [truncated],
93
93
  model,
94
- // Request exactly 768 dims via Matryoshka truncation.
95
- // Supported by voyage-3, voyage-3-large, voyage-code-3.
96
- // voyage-3-lite (native 512 dims) will ignore this and return 512,
97
- // which will be caught by the dimension guard below.
98
- output_dimension: EMBEDDING_DIMS,
94
+ // We do NOT send output_dimension here because Voyage's API explicitly
95
+ // restricts it to [256, 512, 1024, 2048] for MRL models. We will
96
+ // manually slice the 1024-dim result down to 768 client-side.
99
97
  };
100
98
  const response = await fetch(`${VOYAGE_API_BASE}/embeddings`, {
101
99
  method: "POST",
@@ -110,17 +108,20 @@ export class VoyageAdapter {
110
108
  throw new Error(`[VoyageAdapter] API request failed β€” status=${response.status}: ${errorText}`);
111
109
  }
112
110
  const data = (await response.json());
113
- const embedding = data?.data?.[0]?.embedding;
111
+ let embedding = data?.data?.[0]?.embedding;
114
112
  if (!Array.isArray(embedding)) {
115
113
  throw new Error("[VoyageAdapter] Unexpected response format β€” no embedding array found");
116
114
  }
115
+ // Client-side MRL Truncation:
116
+ // Voyage models returning 1024 dims can be safely sliced to 768 since they
117
+ // are trained with Matryoshka Representation Learning.
118
+ if (embedding.length > EMBEDDING_DIMS) {
119
+ embedding = embedding.slice(0, EMBEDDING_DIMS);
120
+ }
117
121
  // Dimension guard: Prism's DB schema requires exactly 768 dims.
118
- // This catches voyage-3-lite (512) or future API changes silently early.
119
122
  if (embedding.length !== EMBEDDING_DIMS) {
120
123
  throw new Error(`[VoyageAdapter] Embedding dimension mismatch: expected ${EMBEDDING_DIMS}, ` +
121
- `got ${embedding.length}. ` +
122
- `Use voyage-3 (not voyage-3-lite) to get 768-dim output via MRL truncation. ` +
123
- `Change voyage_model in the Mind Palace dashboard.`);
124
+ `got ${embedding.length}. Make sure you are using a model that returns at least 768 dims.`);
124
125
  }
125
126
  debugLog(`[VoyageAdapter] Embedding generated β€” dims=${embedding.length}, ` +
126
127
  `tokens_used=${data.usage?.total_tokens ?? "unknown"}`);
@@ -89,15 +89,22 @@ export function getLLMProvider() {
89
89
  // ── Resolve embedding provider ────────────────────────────────────────
90
90
  let embedType = getSettingSync("embedding_provider", "auto");
91
91
  if (embedType === "auto") {
92
- // Anthropic has no embedding API β€” auto-bridge to Gemini.
93
- // For all other text providers, use the same provider for embeddings.
94
- embedType = textType === "anthropic" ? "gemini" : textType;
95
- if (textType === "anthropic") {
96
- console.info("[LLMFactory] text_provider=anthropic with embedding_provider=auto: " +
97
- "routing embeddings to GeminiAdapter (Anthropic has no native embedding API). " +
98
- "For the Anthropic-recommended pairing, set embedding_provider=voyage in the dashboard " +
99
- "(voyage-3 supports 768-dim output via MRL). " +
100
- "Alternatively, set embedding_provider=openai to use Ollama/OpenAI.");
92
+ if (process.env.VOYAGE_API_KEY) {
93
+ // If Voyage is available, use it as the default embedding provider
94
+ // since voyage-code-3 strongly outperforms general embeddings on code contexts.
95
+ embedType = "voyage";
96
+ }
97
+ else {
98
+ // Anthropic has no embedding API β€” auto-bridge to Gemini.
99
+ // For all other text providers, use the same provider for embeddings.
100
+ embedType = textType === "anthropic" ? "gemini" : textType;
101
+ if (textType === "anthropic") {
102
+ console.error("[LLMFactory] text_provider=anthropic with embedding_provider=auto: " +
103
+ "routing embeddings to GeminiAdapter (Anthropic has no native embedding API). " +
104
+ "For the Anthropic-recommended pairing, set embedding_provider=voyage in the dashboard " +
105
+ "(voyage-code-3 supports 768-dim output via MRL). " +
106
+ "Alternatively, set embedding_provider=openai to use Ollama/OpenAI.");
107
+ }
101
108
  }
102
109
  }
103
110
  try {
@@ -121,7 +128,7 @@ export function getLLMProvider() {
121
128
  // The text provider name is used as the primary span attribute label.
122
129
  providerInstance = new TracingLLMProvider(composed, textType);
123
130
  if (textType !== embedType) {
124
- console.info(`[LLMFactory] Split provider: text=${textType}, embedding=${embedType}`);
131
+ console.error(`[LLMFactory] Split provider: text=${textType}, embedding=${embedType}`);
125
132
  }
126
133
  }
127
134
  catch (err) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "prism-mcp-server",
3
- "version": "7.8.2",
3
+ "version": "7.8.4",
4
4
  "mcpName": "io.github.dcostenco/prism-mcp",
5
5
  "description": "The Mind Palace for AI Agents β€” a true Cognitive Architecture with Hebbian learning (episodicβ†’semantic consolidation), ACT-R spreading activation (multi-hop causal reasoning), uncertainty-aware rejection gates (agents that know when they don't know), adversarial evaluation (anti-sycophancy), fail-closed Dark Factory pipelines, persistent memory (SQLite/Supabase), multi-agent Hivemind, time travel & visual dashboard. Zero-config local mode.",
6
6
  "module": "index.ts",