prism-mcp-server 7.8.1 β 7.8.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/dist/utils/llm/adapters/voyage.js +14 -13
- package/dist/utils/llm/factory.js +10 -3
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -691,7 +691,7 @@ The Generator strips the `console.log`, resubmits, and the next `EVALUATE` retur
|
|
|
691
691
|
|
|
692
692
|
## π What's New
|
|
693
693
|
|
|
694
|
-
> **Current release: v7.8.
|
|
694
|
+
> **Current release: v7.8.2 β Cognitive Architecture**
|
|
695
695
|
|
|
696
696
|
- π§ **v7.8.0 β Cognitive Architecture:** The biggest leap forward yet. Moved beyond flat vector search into a true cognitive architecture inspired by human brain mechanics. Episodic-to-Semantic memory consolidation (Hebbian learning), ACT-R Spreading Activation with multi-hop causal reasoning, Uncertainty-Aware Rejection Gate (your agent can say "I don't know"), and Dynamic Fast Weight Decay (semantic memories outlive episodic chatter by 2Γ). **Your agents don't just remember; they learn.** β [Cognitive Architecture](#-cognitive-architecture-v78)
|
|
697
697
|
- π **v7.7.0 β Cloud-Native SSE Transport:** Full unauthenticated and authenticated Server-Sent Events MCP support for seamless network deployments.
|
|
@@ -1065,7 +1065,7 @@ Prism has evolved from smart session logging into a **cognitive memory architect
|
|
|
1065
1065
|
|
|
1066
1066
|
## π¦ Milestones & Roadmap
|
|
1067
1067
|
|
|
1068
|
-
> **Current: v7.8.
|
|
1068
|
+
> **Current: v7.8.2** β Cognitive Architecture ([CHANGELOG](CHANGELOG.md))
|
|
1069
1069
|
|
|
1070
1070
|
| Release | Headline |
|
|
1071
1071
|
|---------|----------|
|
|
@@ -52,9 +52,9 @@ const EMBEDDING_DIMS = 768;
|
|
|
52
52
|
// with OpenAI and Gemini adapters) avoids tokenizer dependency.
|
|
53
53
|
// 8000 chars β 1500-2000 tokens for typical session summaries.
|
|
54
54
|
const MAX_EMBEDDING_CHARS = 8000;
|
|
55
|
-
// Default model: voyage-3 (supports output_dimension=768 via MRL)
|
|
56
|
-
//
|
|
57
|
-
const DEFAULT_MODEL = "voyage-3";
|
|
55
|
+
// Default model: voyage-code-3 (supports output_dimension=768 via client-side MRL truncation)
|
|
56
|
+
// Extremely optimized for code bases, ide workspaces, and technical queries.
|
|
57
|
+
const DEFAULT_MODEL = "voyage-code-3";
|
|
58
58
|
const VOYAGE_API_BASE = "https://api.voyageai.com/v1";
|
|
59
59
|
// βββ Adapter βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
60
60
|
export class VoyageAdapter {
|
|
@@ -91,11 +91,9 @@ export class VoyageAdapter {
|
|
|
91
91
|
const requestBody = {
|
|
92
92
|
input: [truncated],
|
|
93
93
|
model,
|
|
94
|
-
//
|
|
95
|
-
//
|
|
96
|
-
//
|
|
97
|
-
// which will be caught by the dimension guard below.
|
|
98
|
-
output_dimension: EMBEDDING_DIMS,
|
|
94
|
+
// We do NOT send output_dimension here because Voyage's API explicitly
|
|
95
|
+
// restricts it to [256, 512, 1024, 2048] for MRL models. We will
|
|
96
|
+
// manually slice the 1024-dim result down to 768 client-side.
|
|
99
97
|
};
|
|
100
98
|
const response = await fetch(`${VOYAGE_API_BASE}/embeddings`, {
|
|
101
99
|
method: "POST",
|
|
@@ -110,17 +108,20 @@ export class VoyageAdapter {
|
|
|
110
108
|
throw new Error(`[VoyageAdapter] API request failed β status=${response.status}: ${errorText}`);
|
|
111
109
|
}
|
|
112
110
|
const data = (await response.json());
|
|
113
|
-
|
|
111
|
+
let embedding = data?.data?.[0]?.embedding;
|
|
114
112
|
if (!Array.isArray(embedding)) {
|
|
115
113
|
throw new Error("[VoyageAdapter] Unexpected response format β no embedding array found");
|
|
116
114
|
}
|
|
115
|
+
// Client-side MRL Truncation:
|
|
116
|
+
// Voyage models returning 1024 dims can be safely sliced to 768 since they
|
|
117
|
+
// are trained with Matryoshka Representation Learning.
|
|
118
|
+
if (embedding.length > EMBEDDING_DIMS) {
|
|
119
|
+
embedding = embedding.slice(0, EMBEDDING_DIMS);
|
|
120
|
+
}
|
|
117
121
|
// Dimension guard: Prism's DB schema requires exactly 768 dims.
|
|
118
|
-
// This catches voyage-3-lite (512) or future API changes silently early.
|
|
119
122
|
if (embedding.length !== EMBEDDING_DIMS) {
|
|
120
123
|
throw new Error(`[VoyageAdapter] Embedding dimension mismatch: expected ${EMBEDDING_DIMS}, ` +
|
|
121
|
-
`got ${embedding.length}.
|
|
122
|
-
`Use voyage-3 (not voyage-3-lite) to get 768-dim output via MRL truncation. ` +
|
|
123
|
-
`Change voyage_model in the Mind Palace dashboard.`);
|
|
124
|
+
`got ${embedding.length}. Make sure you are using a model that returns at least 768 dims.`);
|
|
124
125
|
}
|
|
125
126
|
debugLog(`[VoyageAdapter] Embedding generated β dims=${embedding.length}, ` +
|
|
126
127
|
`tokens_used=${data.usage?.total_tokens ?? "unknown"}`);
|
|
@@ -89,9 +89,16 @@ export function getLLMProvider() {
|
|
|
89
89
|
// ββ Resolve embedding provider ββββββββββββββββββββββββββββββββββββββββ
|
|
90
90
|
let embedType = getSettingSync("embedding_provider", "auto");
|
|
91
91
|
if (embedType === "auto") {
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
92
|
+
if (process.env.VOYAGE_API_KEY) {
|
|
93
|
+
// If Voyage is available, use it as the default embedding provider
|
|
94
|
+
// since voyage-code-3 strongly outperforms general embeddings on code contexts.
|
|
95
|
+
embedType = "voyage";
|
|
96
|
+
}
|
|
97
|
+
else {
|
|
98
|
+
// Anthropic has no embedding API β auto-bridge to Gemini.
|
|
99
|
+
// For all other text providers, use the same provider for embeddings.
|
|
100
|
+
embedType = textType === "anthropic" ? "gemini" : textType;
|
|
101
|
+
}
|
|
95
102
|
if (textType === "anthropic") {
|
|
96
103
|
console.info("[LLMFactory] text_provider=anthropic with embedding_provider=auto: " +
|
|
97
104
|
"routing embeddings to GeminiAdapter (Anthropic has no native embedding API). " +
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "prism-mcp-server",
|
|
3
|
-
"version": "7.8.
|
|
3
|
+
"version": "7.8.3",
|
|
4
4
|
"mcpName": "io.github.dcostenco/prism-mcp",
|
|
5
5
|
"description": "The Mind Palace for AI Agents β a true Cognitive Architecture with Hebbian learning (episodicβsemantic consolidation), ACT-R spreading activation (multi-hop causal reasoning), uncertainty-aware rejection gates (agents that know when they don't know), adversarial evaluation (anti-sycophancy), fail-closed Dark Factory pipelines, persistent memory (SQLite/Supabase), multi-agent Hivemind, time travel & visual dashboard. Zero-config local mode.",
|
|
6
6
|
"module": "index.ts",
|