@rigstate/mcp 0.4.6 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rigstate/mcp",
3
- "version": "0.4.6",
3
+ "version": "0.5.0",
4
4
  "description": "Rigstate MCP Server - Model Context Protocol for AI Editors",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -15,9 +15,12 @@
15
15
  "test": "node dist/index.js --help"
16
16
  },
17
17
  "dependencies": {
18
+ "@ai-sdk/google": "^3.0.13",
19
+ "@google/generative-ai": "^0.24.1",
18
20
  "@modelcontextprotocol/sdk": "^1.0.0",
19
- "@supabase/supabase-js": "^2.39.0",
20
21
  "@rigstate/rules-engine": "0.1.0",
22
+ "@supabase/supabase-js": "^2.39.0",
23
+ "ai": "^6.0.5",
21
24
  "dotenv": "^17.2.3",
22
25
  "uuid": "^9.0.0",
23
26
  "zod": "^3.22.4"
@@ -34,15 +34,27 @@ architecture rules, decisions, and constraints.`,
34
34
  }
35
35
  });
36
36
 
37
- // Simple embedding generation using text normalization
38
- async function generateQueryEmbedding(query: string): Promise<number[]> {
39
- // For MCP server, we'll use the match_memories_text RPC if available,
40
- // or fall back to keyword-based search.
41
- // This is a placeholder - the actual embedding should be done via the Supabase edge function
42
- // or passed through to the main Rigstate API.
43
-
44
- // For now, return null to trigger keyword fallback
45
- return [];
37
+ // Simple embedding generation using Google Gemini
38
+ async function generateQueryEmbedding(query: string): Promise<number[] | null> {
39
+ const apiKey = process.env.GOOGLE_GENERATIVE_AI_API_KEY;
40
+ if (!apiKey) {
41
+ console.warn('GOOGLE_GENERATIVE_AI_API_KEY not found, skipping vector search.');
42
+ return null;
43
+ }
44
+
45
+ try {
46
+ const { google } = await import('@ai-sdk/google');
47
+ const { embed } = await import('ai');
48
+
49
+ const { embedding } = await embed({
50
+ model: google.embedding('text-embedding-004'),
51
+ value: query.replace(/\n/g, ' '),
52
+ });
53
+ return embedding;
54
+ } catch (error) {
55
+ console.error('Failed to generate embedding for search:', error);
56
+ return null;
57
+ }
46
58
  }
47
59
 
48
60
  export async function queryBrain(
@@ -67,45 +79,24 @@ export async function queryBrain(
67
79
 
68
80
  // Try semantic search first using the match_memories RPC
69
81
  // This requires the embedding to be generated, so we'll try
70
- // a text-based search as fallback
82
+ // Generate embedding if possible for semantic search
83
+ const embedding = await generateQueryEmbedding(query);
71
84
  let memories: MemoryRecord[] = [];
72
85
 
73
- // Attempt keyword-based search using ilike on content
74
- // This is a simpler approach that works without embeddings
75
- const searchTerms = query.toLowerCase().split(/\s+/).filter(t => t.length > 2);
76
-
77
- if (searchTerms.length > 0) {
78
- // Build OR condition for fuzzy matching
79
- const orConditions = searchTerms.map(term => `content.ilike.%${term}%`).join(',');
80
-
81
- const { data: keywordMatches, error: searchError } = await supabase
82
- .from('project_memories')
83
- .select('id, content, category, tags, importance, created_at')
84
- .eq('project_id', projectId)
85
- .eq('is_active', true)
86
- .or(orConditions)
87
- .order('importance', { ascending: false, nullsFirst: false })
88
- .limit(limit);
89
-
90
- if (searchError) {
91
- console.error('Search error:', searchError);
92
- }
93
-
94
- if (keywordMatches) {
95
- memories = keywordMatches.map(m => ({
96
- id: m.id,
97
- content: m.content,
98
- category: m.category || 'general',
99
- tags: m.tags || [],
100
- netVotes: m.importance || 0,
101
- createdAt: m.created_at
102
- }));
103
- }
104
- }
105
-
106
- // If no keyword matches, fetch recent memories
107
- if (memories.length === 0) {
108
- const { data: recentMemories, error: recentError } = await supabase
86
+ // Use the hybrid search RPC
87
+ const { data: searchResults, error: searchError } = await supabase
88
+ .rpc('hybrid_search_memories', {
89
+ p_project_id: projectId,
90
+ p_query: query,
91
+ p_embedding: embedding,
92
+ p_limit: limit,
93
+ p_similarity_threshold: threshold || 0.2
94
+ });
95
+
96
+ if (searchError) {
97
+ console.error('Hybrid search error:', searchError);
98
+ // Fallback to basic recent fetch if RPC fails
99
+ const { data: recentMemories } = await supabase
109
100
  .from('project_memories')
110
101
  .select('id, content, category, tags, importance, created_at')
111
102
  .eq('project_id', projectId)
@@ -113,10 +104,6 @@ export async function queryBrain(
113
104
  .order('created_at', { ascending: false })
114
105
  .limit(limit);
115
106
 
116
- if (recentError) {
117
- console.error('Recent error:', recentError);
118
- }
119
-
120
107
  if (recentMemories) {
121
108
  memories = recentMemories.map(m => ({
122
109
  id: m.id,
@@ -127,17 +114,30 @@ export async function queryBrain(
127
114
  createdAt: m.created_at
128
115
  }));
129
116
  }
117
+ } else if (searchResults) {
118
+ memories = searchResults.map((m: any) => ({
119
+ id: m.id,
120
+ content: m.content,
121
+ category: m.category,
122
+ tags: m.tags,
123
+ netVotes: m.importance,
124
+ createdAt: m.created_at
125
+ }));
130
126
  }
131
127
 
132
128
  // Format memories into a readable context block
133
129
  const contextLines = memories.map((m) => {
134
130
  const voteIndicator = m.netVotes && m.netVotes < 0 ? ` [⚠️ POORLY RATED: ${m.netVotes}]` : '';
135
- const tagStr = m.tags.length > 0 ? ` [${m.tags.join(', ')}]` : '';
136
- return `- [${m.category.toUpperCase()}]${tagStr}${voteIndicator}: ${m.content}`;
131
+ const tagStr = m.tags && m.tags.length > 0 ? ` [${m.tags.join(', ')}]` : '';
132
+ const category = m.category ? m.category.toUpperCase() : 'GENERAL';
133
+ return `- [${category}]${tagStr}${voteIndicator}: ${m.content}`;
137
134
  });
138
135
 
136
+ const searchType = embedding ? 'TRIPLE-HYBRID (Vector + FTS + Fuzzy)' : 'HYBRID (FTS + Fuzzy)';
137
+
139
138
  const formatted = memories.length > 0
140
139
  ? `=== PROJECT BRAIN: RELEVANT MEMORIES ===
140
+ Search Mode: ${searchType}
141
141
  Query: "${query}"
142
142
  Found ${memories.length} relevant memories:
143
143