@rigstate/mcp 0.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +8 -0
- package/README.md +352 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +3445 -0
- package/dist/index.js.map +1 -0
- package/package.json +43 -0
- package/roadmap.json +531 -0
- package/src/agents/the-scribe.ts +122 -0
- package/src/index.ts +1792 -0
- package/src/lib/supabase.ts +120 -0
- package/src/lib/tool-registry.ts +134 -0
- package/src/lib/types.ts +415 -0
- package/src/lib/utils.ts +10 -0
- package/src/resources/project-morals.ts +92 -0
- package/src/tools/arch-tools.ts +166 -0
- package/src/tools/archaeological-scan.ts +335 -0
- package/src/tools/check-agent-bridge.ts +169 -0
- package/src/tools/check-rules-sync.ts +85 -0
- package/src/tools/complete-roadmap-task.ts +96 -0
- package/src/tools/generate-professional-pdf.ts +232 -0
- package/src/tools/get-latest-decisions.ts +130 -0
- package/src/tools/get-next-roadmap-step.ts +76 -0
- package/src/tools/get-project-context.ts +163 -0
- package/src/tools/index.ts +17 -0
- package/src/tools/list-features.ts +67 -0
- package/src/tools/list-roadmap-tasks.ts +61 -0
- package/src/tools/pending-tasks.ts +228 -0
- package/src/tools/planning-tools.ts +123 -0
- package/src/tools/query-brain.ts +125 -0
- package/src/tools/research-tools.ts +149 -0
- package/src/tools/run-architecture-audit.ts +203 -0
- package/src/tools/save-decision.ts +77 -0
- package/src/tools/security-tools.ts +82 -0
- package/src/tools/submit-idea.ts +66 -0
- package/src/tools/sync-ide-rules.ts +76 -0
- package/src/tools/teacher-mode.ts +171 -0
- package/src/tools/ui-tools.ts +191 -0
- package/src/tools/update-roadmap.ts +105 -0
- package/tsconfig.json +29 -0
- package/tsup.config.ts +16 -0
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tool: get_pending_tasks
|
|
3
|
+
*
|
|
4
|
+
* Gets tasks from the agent_bridge table that have been APPROVED by the user
|
|
5
|
+
* and are ready for Frank to execute. Also includes update_task_status for
|
|
6
|
+
* marking tasks as EXECUTING or COMPLETED.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { SupabaseClient } from '@supabase/supabase-js';
|
|
10
|
+
|
|
11
|
+
// =============================================================================
|
|
12
|
+
// TYPES
|
|
13
|
+
// =============================================================================
|
|
14
|
+
|
|
15
|
+
export interface PendingTask {
|
|
16
|
+
id: string;
|
|
17
|
+
project_id: string;
|
|
18
|
+
task_id: string | null; // Reference to roadmap_chunks
|
|
19
|
+
status: string;
|
|
20
|
+
objective: string; // Mapped from proposal or roadmap_chunks.title
|
|
21
|
+
technical_context: string; // Mapped from roadmap_chunks.prompt_content
|
|
22
|
+
constraints: string[]; // Extracted from verification_criteria
|
|
23
|
+
definition_of_done: string; // Mapped from verification_criteria
|
|
24
|
+
created_at: string;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
export interface GetPendingTasksResponse {
|
|
28
|
+
success: boolean;
|
|
29
|
+
tasks: PendingTask[];
|
|
30
|
+
message: string;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
export interface UpdateTaskStatusResponse {
|
|
34
|
+
success: boolean;
|
|
35
|
+
task_id: string;
|
|
36
|
+
previous_status: string;
|
|
37
|
+
new_status: string;
|
|
38
|
+
message: string;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// =============================================================================
|
|
42
|
+
// GET PENDING TASKS
|
|
43
|
+
// =============================================================================
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Fetches tasks that have been APPROVED by the user through the dashboard
|
|
47
|
+
* and are ready for the AI agent (Frank) to execute.
|
|
48
|
+
*/
|
|
49
|
+
export async function getPendingTasks(
|
|
50
|
+
supabase: SupabaseClient,
|
|
51
|
+
projectId: string
|
|
52
|
+
): Promise<GetPendingTasksResponse> {
|
|
53
|
+
|
|
54
|
+
// Fetch APPROVED tasks that are ready for execution
|
|
55
|
+
const { data: tasks, error } = await supabase
|
|
56
|
+
.from('agent_bridge')
|
|
57
|
+
.select(`
|
|
58
|
+
id,
|
|
59
|
+
project_id,
|
|
60
|
+
task_id,
|
|
61
|
+
status,
|
|
62
|
+
proposal,
|
|
63
|
+
summary,
|
|
64
|
+
created_at,
|
|
65
|
+
roadmap_chunks (
|
|
66
|
+
title,
|
|
67
|
+
prompt_content,
|
|
68
|
+
verification_criteria,
|
|
69
|
+
summary
|
|
70
|
+
)
|
|
71
|
+
`)
|
|
72
|
+
.eq('project_id', projectId)
|
|
73
|
+
.eq('status', 'APPROVED')
|
|
74
|
+
.order('created_at', { ascending: true });
|
|
75
|
+
|
|
76
|
+
if (error) {
|
|
77
|
+
throw new Error(`Failed to fetch pending tasks: ${error.message}`);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
if (!tasks || tasks.length === 0) {
|
|
81
|
+
return {
|
|
82
|
+
success: true,
|
|
83
|
+
tasks: [],
|
|
84
|
+
message: 'No approved tasks waiting for execution. 🎉'
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Transform to the expected format
|
|
89
|
+
const pendingTasks: PendingTask[] = tasks.map((task: any) => {
|
|
90
|
+
const roadmap = task.roadmap_chunks;
|
|
91
|
+
|
|
92
|
+
// Extract constraints from verification_criteria if available
|
|
93
|
+
let constraints: string[] = [];
|
|
94
|
+
let definitionOfDone = 'Complete the task as described.';
|
|
95
|
+
|
|
96
|
+
if (roadmap?.verification_criteria) {
|
|
97
|
+
const criteria = roadmap.verification_criteria;
|
|
98
|
+
if (typeof criteria === 'string') {
|
|
99
|
+
definitionOfDone = criteria;
|
|
100
|
+
// Try to extract bullet-point constraints
|
|
101
|
+
constraints = criteria
|
|
102
|
+
.split('\n')
|
|
103
|
+
.filter((line: string) => line.trim().startsWith('-') || line.trim().startsWith('•'))
|
|
104
|
+
.map((line: string) => line.replace(/^[-•]\s*/, '').trim());
|
|
105
|
+
} else if (Array.isArray(criteria)) {
|
|
106
|
+
constraints = criteria;
|
|
107
|
+
definitionOfDone = criteria.join('\n');
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
return {
|
|
112
|
+
id: task.id,
|
|
113
|
+
project_id: task.project_id,
|
|
114
|
+
task_id: task.task_id,
|
|
115
|
+
status: task.status,
|
|
116
|
+
objective: task.proposal || roadmap?.title || task.summary || 'Unnamed Task',
|
|
117
|
+
technical_context: roadmap?.prompt_content || roadmap?.summary || 'No additional context provided.',
|
|
118
|
+
constraints: constraints,
|
|
119
|
+
definition_of_done: definitionOfDone,
|
|
120
|
+
created_at: task.created_at
|
|
121
|
+
};
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
return {
|
|
125
|
+
success: true,
|
|
126
|
+
tasks: pendingTasks,
|
|
127
|
+
message: `Found ${pendingTasks.length} approved task(s) ready for execution.`
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// =============================================================================
|
|
132
|
+
// UPDATE TASK STATUS
|
|
133
|
+
// =============================================================================
|
|
134
|
+
|
|
135
|
+
/**
|
|
136
|
+
* Updates the status of an agent_bridge task.
|
|
137
|
+
* Allowed transitions:
|
|
138
|
+
* - APPROVED → EXECUTING (when Frank starts working)
|
|
139
|
+
* - EXECUTING → COMPLETED (when Frank finishes successfully)
|
|
140
|
+
* - EXECUTING → FAILED (if something goes wrong)
|
|
141
|
+
*
|
|
142
|
+
* Also updates the linked roadmap_chunk if the task is COMPLETED.
|
|
143
|
+
*/
|
|
144
|
+
export async function updateTaskStatus(
|
|
145
|
+
supabase: SupabaseClient,
|
|
146
|
+
projectId: string,
|
|
147
|
+
taskId: string,
|
|
148
|
+
newStatus: 'EXECUTING' | 'COMPLETED' | 'FAILED',
|
|
149
|
+
executionSummary?: string
|
|
150
|
+
): Promise<UpdateTaskStatusResponse> {
|
|
151
|
+
|
|
152
|
+
// 1. Fetch the current task state
|
|
153
|
+
const { data: currentTask, error: fetchError } = await supabase
|
|
154
|
+
.from('agent_bridge')
|
|
155
|
+
.select('status, task_id')
|
|
156
|
+
.eq('id', taskId)
|
|
157
|
+
.eq('project_id', projectId)
|
|
158
|
+
.single();
|
|
159
|
+
|
|
160
|
+
if (fetchError || !currentTask) {
|
|
161
|
+
throw new Error(`Task not found: ${taskId}`);
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
const previousStatus = currentTask.status;
|
|
165
|
+
|
|
166
|
+
// 2. Validate state transition
|
|
167
|
+
const validTransitions: Record<string, string[]> = {
|
|
168
|
+
'APPROVED': ['EXECUTING'],
|
|
169
|
+
'EXECUTING': ['COMPLETED', 'FAILED']
|
|
170
|
+
};
|
|
171
|
+
|
|
172
|
+
if (!validTransitions[previousStatus]?.includes(newStatus)) {
|
|
173
|
+
throw new Error(
|
|
174
|
+
`Invalid status transition: ${previousStatus} → ${newStatus}. ` +
|
|
175
|
+
`Allowed from ${previousStatus}: ${validTransitions[previousStatus]?.join(', ') || 'none'}`
|
|
176
|
+
);
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
// 3. Build update payload
|
|
180
|
+
const updateData: Record<string, any> = {
|
|
181
|
+
status: newStatus,
|
|
182
|
+
updated_at: new Date().toISOString()
|
|
183
|
+
};
|
|
184
|
+
|
|
185
|
+
if (newStatus === 'COMPLETED') {
|
|
186
|
+
if (!executionSummary) {
|
|
187
|
+
throw new Error('execution_summary is REQUIRED when setting status to COMPLETED.');
|
|
188
|
+
}
|
|
189
|
+
updateData.execution_summary = executionSummary;
|
|
190
|
+
updateData.completed_at = new Date().toISOString();
|
|
191
|
+
} else if (executionSummary) {
|
|
192
|
+
updateData.summary = executionSummary;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// 4. Update the agent_bridge task
|
|
196
|
+
const { error: updateError } = await supabase
|
|
197
|
+
.from('agent_bridge')
|
|
198
|
+
.update(updateData)
|
|
199
|
+
.eq('id', taskId)
|
|
200
|
+
.eq('project_id', projectId);
|
|
201
|
+
|
|
202
|
+
if (updateError) {
|
|
203
|
+
throw new Error(`Failed to update task status: ${updateError.message}`);
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
// 5. If COMPLETED, also mark the roadmap_chunk as COMPLETED
|
|
207
|
+
if (newStatus === 'COMPLETED' && currentTask.task_id) {
|
|
208
|
+
const { error: roadmapError } = await supabase
|
|
209
|
+
.from('roadmap_chunks')
|
|
210
|
+
.update({
|
|
211
|
+
status: 'COMPLETED',
|
|
212
|
+
completed_at: new Date().toISOString()
|
|
213
|
+
})
|
|
214
|
+
.eq('id', currentTask.task_id);
|
|
215
|
+
|
|
216
|
+
if (roadmapError) {
|
|
217
|
+
console.error(`Warning: Failed to update roadmap_chunk status: ${roadmapError.message}`);
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
return {
|
|
222
|
+
success: true,
|
|
223
|
+
task_id: taskId,
|
|
224
|
+
previous_status: previousStatus,
|
|
225
|
+
new_status: newStatus,
|
|
226
|
+
message: `Task ${taskId} status updated from ${previousStatus} → ${newStatus}.`
|
|
227
|
+
};
|
|
228
|
+
}
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import { SupabaseClient } from '@supabase/supabase-js';
|
|
2
|
+
import { SaveToProjectBrainInput, UpdateRoadmapStatusInput, AddRoadmapChunkInput } from '../lib/types.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Maja's Tool: Save to Project Brain
|
|
6
|
+
* Persists knowledge, decisions, and notes.
|
|
7
|
+
*/
|
|
8
|
+
export async function saveToProjectBrain(
|
|
9
|
+
supabase: SupabaseClient,
|
|
10
|
+
userId: string,
|
|
11
|
+
input: SaveToProjectBrainInput
|
|
12
|
+
) {
|
|
13
|
+
const { projectId, title, content, category, tags } = input;
|
|
14
|
+
|
|
15
|
+
// Confirm project access
|
|
16
|
+
const { data: p, error: pErr } = await supabase.from('projects').select('id').eq('id', projectId).eq('owner_id', userId).single();
|
|
17
|
+
if (pErr || !p) throw new Error('Access denied');
|
|
18
|
+
|
|
19
|
+
const fullContent = `# ${title}\n\n${content}`;
|
|
20
|
+
|
|
21
|
+
const { data, error } = await supabase
|
|
22
|
+
.from('project_memories')
|
|
23
|
+
.insert({
|
|
24
|
+
project_id: projectId,
|
|
25
|
+
content: fullContent,
|
|
26
|
+
category: category.toLowerCase(),
|
|
27
|
+
tags: tags,
|
|
28
|
+
importance: (category === 'DECISION' || category === 'ARCHITECTURE') ? 9 : 5,
|
|
29
|
+
is_active: true,
|
|
30
|
+
source_type: 'chat_manual'
|
|
31
|
+
})
|
|
32
|
+
.select('id')
|
|
33
|
+
.single();
|
|
34
|
+
|
|
35
|
+
if (error) throw new Error(`Failed to save memory: ${error.message}`);
|
|
36
|
+
|
|
37
|
+
return {
|
|
38
|
+
success: true,
|
|
39
|
+
memoryId: data.id,
|
|
40
|
+
message: `✅ Saved [${category}] "${title}" to Project Brain.`
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Kine's Tool: Update Roadmap Status
|
|
46
|
+
* Updates task status and notifies Scribe system.
|
|
47
|
+
*/
|
|
48
|
+
export async function updateRoadmapStatus(
|
|
49
|
+
supabase: SupabaseClient,
|
|
50
|
+
userId: string,
|
|
51
|
+
input: UpdateRoadmapStatusInput
|
|
52
|
+
) {
|
|
53
|
+
const { projectId, chunkId, status } = input;
|
|
54
|
+
|
|
55
|
+
// Map status to DB enum: 'TODO' -> 'LOCKED' (Standard convention in Rigstate seems to be LOCKED/ACTIVE/COMPLETED)
|
|
56
|
+
// If 'TODO' is meant to be 'PENDING', we check schema. Assuming 'LOCKED' is the backlog state.
|
|
57
|
+
const dbStatus = status === 'TODO' ? 'LOCKED' : status === 'IN_PROGRESS' ? 'ACTIVE' : 'COMPLETED';
|
|
58
|
+
|
|
59
|
+
const { error } = await supabase
|
|
60
|
+
.from('roadmap_chunks')
|
|
61
|
+
.update({ status: dbStatus })
|
|
62
|
+
.eq('id', chunkId)
|
|
63
|
+
.eq('project_id', projectId);
|
|
64
|
+
|
|
65
|
+
if (error) throw new Error(`Update failed: ${error.message}`);
|
|
66
|
+
|
|
67
|
+
let message = `Roadmap status updated to ${status} (${dbStatus}).`;
|
|
68
|
+
|
|
69
|
+
if (status === 'COMPLETED') {
|
|
70
|
+
// "Notification" to Gunhild
|
|
71
|
+
message += " 📢 Gunhild (Scribe) has been signaled. This completion will be reflected in the next report.";
|
|
72
|
+
} else if (status === 'IN_PROGRESS') {
|
|
73
|
+
message += " 🚀 Work started.";
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
return { success: true, status: dbStatus, message };
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* Kine's Tool: Add Roadmap Chunk
|
|
81
|
+
* Decomposes features into actionable chunks.
|
|
82
|
+
*/
|
|
83
|
+
export async function addRoadmapChunk(
|
|
84
|
+
supabase: SupabaseClient,
|
|
85
|
+
userId: string,
|
|
86
|
+
input: AddRoadmapChunkInput
|
|
87
|
+
) {
|
|
88
|
+
const { projectId, title, description, priority } = input;
|
|
89
|
+
|
|
90
|
+
// Get max step number
|
|
91
|
+
const { data: maxStep } = await supabase
|
|
92
|
+
.from('roadmap_chunks')
|
|
93
|
+
.select('step_number')
|
|
94
|
+
.eq('project_id', projectId)
|
|
95
|
+
.order('step_number', { ascending: false })
|
|
96
|
+
.limit(1)
|
|
97
|
+
.single();
|
|
98
|
+
|
|
99
|
+
const nextStepNum = (maxStep?.step_number || 0) + 1;
|
|
100
|
+
|
|
101
|
+
const { data, error } = await supabase
|
|
102
|
+
.from('roadmap_chunks')
|
|
103
|
+
.insert({
|
|
104
|
+
project_id: projectId,
|
|
105
|
+
title: title,
|
|
106
|
+
description: description || '',
|
|
107
|
+
status: 'LOCKED', // Default state
|
|
108
|
+
priority: priority,
|
|
109
|
+
step_number: nextStepNum,
|
|
110
|
+
sprint_focus: input.featureId ? `Feature: ${input.featureId}` : null
|
|
111
|
+
})
|
|
112
|
+
.select('id')
|
|
113
|
+
.single();
|
|
114
|
+
|
|
115
|
+
if (error) throw new Error(`Failed to add chunk: ${error.message}`);
|
|
116
|
+
|
|
117
|
+
return {
|
|
118
|
+
success: true,
|
|
119
|
+
chunkId: data.id,
|
|
120
|
+
stepNumber: nextStepNum,
|
|
121
|
+
message: `✅ Added new roadmap chunk: "${title}" (Step ${nextStepNum}).`
|
|
122
|
+
};
|
|
123
|
+
}
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tool: query_brain
|
|
3
|
+
*
|
|
4
|
+
* Takes a natural language query and performs semantic search
|
|
5
|
+
* against the project's memories (RAG), returning relevant
|
|
6
|
+
* architecture rules, decisions, and constraints.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { SupabaseClient } from '@supabase/supabase-js';
|
|
10
|
+
import type { BrainQueryResponse, MemoryRecord } from '../lib/types.js';
|
|
11
|
+
|
|
12
|
+
// Simple embedding generation using text normalization
|
|
13
|
+
// In production, this would call an embedding API like OpenAI
|
|
14
|
+
async function generateQueryEmbedding(query: string): Promise<number[]> {
|
|
15
|
+
// For MCP server, we'll use the match_memories_text RPC if available,
|
|
16
|
+
// or fall back to keyword-based search.
|
|
17
|
+
// This is a placeholder - the actual embedding should be done via the Supabase edge function
|
|
18
|
+
// or passed through to the main Rigstate API.
|
|
19
|
+
|
|
20
|
+
// For now, return null to trigger keyword fallback
|
|
21
|
+
return [];
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export async function queryBrain(
|
|
25
|
+
supabase: SupabaseClient,
|
|
26
|
+
userId: string,
|
|
27
|
+
projectId: string,
|
|
28
|
+
query: string,
|
|
29
|
+
limit: number = 8,
|
|
30
|
+
threshold: number = 0.5
|
|
31
|
+
): Promise<BrainQueryResponse> {
|
|
32
|
+
// First, verify project ownership
|
|
33
|
+
const { data: project, error: projectError } = await supabase
|
|
34
|
+
.from('projects')
|
|
35
|
+
.select('id')
|
|
36
|
+
.eq('id', projectId)
|
|
37
|
+
.eq('owner_id', userId)
|
|
38
|
+
.single();
|
|
39
|
+
|
|
40
|
+
if (projectError || !project) {
|
|
41
|
+
throw new Error('Project not found or access denied');
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
// Try semantic search first using the match_memories RPC
|
|
45
|
+
// This requires the embedding to be generated, so we'll try
|
|
46
|
+
// a text-based search as fallback
|
|
47
|
+
let memories: MemoryRecord[] = [];
|
|
48
|
+
|
|
49
|
+
// Attempt keyword-based search using ilike on content
|
|
50
|
+
// This is a simpler approach that works without embeddings
|
|
51
|
+
const searchTerms = query.toLowerCase().split(/\s+/).filter(t => t.length > 2);
|
|
52
|
+
|
|
53
|
+
if (searchTerms.length > 0) {
|
|
54
|
+
// Build OR condition for fuzzy matching
|
|
55
|
+
const orConditions = searchTerms.map(term => `content.ilike.%${term}%`).join(',');
|
|
56
|
+
|
|
57
|
+
const { data: keywordMatches, error: searchError } = await supabase
|
|
58
|
+
.from('project_memories')
|
|
59
|
+
.select('id, content, category, tags, net_votes, created_at')
|
|
60
|
+
.eq('project_id', projectId)
|
|
61
|
+
.eq('is_active', true)
|
|
62
|
+
.or(orConditions)
|
|
63
|
+
.order('net_votes', { ascending: false, nullsFirst: false })
|
|
64
|
+
.limit(limit);
|
|
65
|
+
|
|
66
|
+
if (!searchError && keywordMatches) {
|
|
67
|
+
memories = keywordMatches.map(m => ({
|
|
68
|
+
id: m.id,
|
|
69
|
+
content: m.content,
|
|
70
|
+
category: m.category || 'general',
|
|
71
|
+
tags: m.tags || [],
|
|
72
|
+
netVotes: m.net_votes,
|
|
73
|
+
createdAt: m.created_at
|
|
74
|
+
}));
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// If no keyword matches, fetch recent memories
|
|
79
|
+
if (memories.length === 0) {
|
|
80
|
+
const { data: recentMemories, error: recentError } = await supabase
|
|
81
|
+
.from('project_memories')
|
|
82
|
+
.select('id, content, category, tags, net_votes, created_at')
|
|
83
|
+
.eq('project_id', projectId)
|
|
84
|
+
.eq('is_active', true)
|
|
85
|
+
.order('created_at', { ascending: false })
|
|
86
|
+
.limit(limit);
|
|
87
|
+
|
|
88
|
+
if (!recentError && recentMemories) {
|
|
89
|
+
memories = recentMemories.map(m => ({
|
|
90
|
+
id: m.id,
|
|
91
|
+
content: m.content,
|
|
92
|
+
category: m.category || 'general',
|
|
93
|
+
tags: m.tags || [],
|
|
94
|
+
netVotes: m.net_votes,
|
|
95
|
+
createdAt: m.created_at
|
|
96
|
+
}));
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Format memories into a readable context block
|
|
101
|
+
const contextLines = memories.map((m) => {
|
|
102
|
+
const voteIndicator = m.netVotes && m.netVotes < 0 ? ` [⚠️ POORLY RATED: ${m.netVotes}]` : '';
|
|
103
|
+
const tagStr = m.tags.length > 0 ? ` [${m.tags.join(', ')}]` : '';
|
|
104
|
+
return `- [${m.category.toUpperCase()}]${tagStr}${voteIndicator}: ${m.content}`;
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
const formatted = memories.length > 0
|
|
108
|
+
? `=== PROJECT BRAIN: RELEVANT MEMORIES ===
|
|
109
|
+
Query: "${query}"
|
|
110
|
+
Found ${memories.length} relevant memories:
|
|
111
|
+
|
|
112
|
+
${contextLines.join('\n')}
|
|
113
|
+
|
|
114
|
+
==========================================`
|
|
115
|
+
: `=== PROJECT BRAIN ===
|
|
116
|
+
Query: "${query}"
|
|
117
|
+
No relevant memories found for this query.
|
|
118
|
+
=======================`;
|
|
119
|
+
|
|
120
|
+
return {
|
|
121
|
+
query,
|
|
122
|
+
memories,
|
|
123
|
+
formatted
|
|
124
|
+
};
|
|
125
|
+
}
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
import { SupabaseClient } from '@supabase/supabase-js';
|
|
2
|
+
import { QueryProjectBrainInput, FetchPackageHealthInput } from '../lib/types.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Maja's Tool: Project Brain Query
|
|
6
|
+
* Searches for ADRs, architecture decisions, and memories with a focus on "Why".
|
|
7
|
+
*/
|
|
8
|
+
export async function queryProjectBrain(
|
|
9
|
+
supabase: SupabaseClient,
|
|
10
|
+
userId: string,
|
|
11
|
+
input: QueryProjectBrainInput
|
|
12
|
+
) {
|
|
13
|
+
const { projectId, query, limit = 5 } = input;
|
|
14
|
+
|
|
15
|
+
// Verify access
|
|
16
|
+
const { data: project, error: pError } = await supabase
|
|
17
|
+
.from('projects')
|
|
18
|
+
.select('id')
|
|
19
|
+
.eq('id', projectId)
|
|
20
|
+
.eq('owner_id', userId)
|
|
21
|
+
.single();
|
|
22
|
+
|
|
23
|
+
if (pError || !project) throw new Error('Access denied or project not found');
|
|
24
|
+
|
|
25
|
+
// Simple keyword search
|
|
26
|
+
const terms = query.toLowerCase().split(/\s+/).filter(t => t.length > 2);
|
|
27
|
+
|
|
28
|
+
let queryBuilder = supabase
|
|
29
|
+
.from('project_memories')
|
|
30
|
+
.select('id, content, category, tags, importance, created_at')
|
|
31
|
+
.eq('project_id', projectId)
|
|
32
|
+
.eq('is_active', true);
|
|
33
|
+
|
|
34
|
+
if (terms.length > 0) {
|
|
35
|
+
// Search content.
|
|
36
|
+
// Ideally we search tags too, but PostgREST filter for array-contains-text-ILIKE is complex.
|
|
37
|
+
// We rely on content search, which includes the Title (markdown header) and body.
|
|
38
|
+
const orParams = terms.map(t => `content.ilike.%${t}%`).join(',');
|
|
39
|
+
queryBuilder = queryBuilder.or(orParams);
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const { data, error } = await queryBuilder
|
|
43
|
+
.order('importance', { ascending: false }) // Prioritize High Importance (ADRs)
|
|
44
|
+
.order('created_at', { ascending: false })
|
|
45
|
+
.limit(limit);
|
|
46
|
+
|
|
47
|
+
if (error) throw new Error(`Brain query failed: ${error.message}`);
|
|
48
|
+
|
|
49
|
+
const results = (data || []).map(m => ({
|
|
50
|
+
id: m.id,
|
|
51
|
+
category: m.category,
|
|
52
|
+
tags: m.tags,
|
|
53
|
+
importance: m.importance,
|
|
54
|
+
preview: m.content.substring(0, 200) + '...',
|
|
55
|
+
fullContent: m.content,
|
|
56
|
+
date: new Date(m.created_at).toLocaleDateString()
|
|
57
|
+
}));
|
|
58
|
+
|
|
59
|
+
return {
|
|
60
|
+
query,
|
|
61
|
+
matchCount: results.length,
|
|
62
|
+
results
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Astrid's Tool: NPM Package Health Check
|
|
68
|
+
* Evaluates a library's maturity and safety.
|
|
69
|
+
*/
|
|
70
|
+
export async function fetchPackageHealth(input: FetchPackageHealthInput) {
|
|
71
|
+
const { packageName } = input;
|
|
72
|
+
|
|
73
|
+
try {
|
|
74
|
+
// 1. Registry Metadata
|
|
75
|
+
const res = await fetch(`https://registry.npmjs.org/${packageName}`);
|
|
76
|
+
if (!res.ok) {
|
|
77
|
+
if (res.status === 404) return { status: 'NOT_FOUND', message: 'Package does not exist.' };
|
|
78
|
+
throw new Error(`Registry error: ${res.statusText}`);
|
|
79
|
+
}
|
|
80
|
+
const data = (await res.json()) as any;
|
|
81
|
+
|
|
82
|
+
const latestVersion = data['dist-tags']?.latest;
|
|
83
|
+
if (!latestVersion) throw new Error('No latest version found');
|
|
84
|
+
|
|
85
|
+
const versionData = data.versions[latestVersion];
|
|
86
|
+
const lastPublish = data.time[latestVersion];
|
|
87
|
+
|
|
88
|
+
// 2. Download Stats
|
|
89
|
+
// Use exact range 'last-week'
|
|
90
|
+
const dlRes = await fetch(`https://api.npmjs.org/downloads/point/last-week/${packageName}`);
|
|
91
|
+
let downloads = 0;
|
|
92
|
+
if (dlRes.ok) {
|
|
93
|
+
const dlData = (await dlRes.json()) as any;
|
|
94
|
+
downloads = dlData.downloads || 0;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// 3. Analysis
|
|
98
|
+
const lastDate = new Date(lastPublish);
|
|
99
|
+
const now = new Date();
|
|
100
|
+
const ageCodeDays = (now.getTime() - lastDate.getTime()) / (1000 * 3600 * 24);
|
|
101
|
+
|
|
102
|
+
let healthScore = 'MODERATE';
|
|
103
|
+
let recommendation = 'Usable, but verify.';
|
|
104
|
+
let riskFactors: string[] = [];
|
|
105
|
+
|
|
106
|
+
if (data.deprecated) {
|
|
107
|
+
healthScore = 'CRITICAL';
|
|
108
|
+
recommendation = 'DO NOT USE. Package is deprecated.';
|
|
109
|
+
riskFactors.push(`Deprecated: ${data.deprecated}`);
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
if (ageCodeDays > 730) { // 2 years
|
|
113
|
+
healthScore = 'STAGNANT';
|
|
114
|
+
recommendation = 'Consider alternatives. No updates in 2+ years.';
|
|
115
|
+
riskFactors.push('Mainteinance mode? (Old)');
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
if (downloads < 50) {
|
|
119
|
+
healthScore = 'LOW_ADOPTION';
|
|
120
|
+
riskFactors.push('Very low downloads (<50/week)');
|
|
121
|
+
if (healthScore !== 'CRITICAL') recommendation = 'High risk. Barely used.';
|
|
122
|
+
} else if (downloads > 100000 && ageCodeDays < 180) {
|
|
123
|
+
healthScore = 'EXCELLENT';
|
|
124
|
+
recommendation = 'Strongly recommended. Industry standard.';
|
|
125
|
+
} else if (downloads > 5000 && ageCodeDays < 365) {
|
|
126
|
+
healthScore = 'HEALTHY';
|
|
127
|
+
recommendation = 'Safe to use.';
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
return {
|
|
131
|
+
packageName,
|
|
132
|
+
latestVersion,
|
|
133
|
+
lastPublished: lastPublish,
|
|
134
|
+
downloadsLastWeek: downloads,
|
|
135
|
+
license: versionData.license || 'Unknown',
|
|
136
|
+
description: data.description,
|
|
137
|
+
healthScore,
|
|
138
|
+
recommendation,
|
|
139
|
+
riskFactors,
|
|
140
|
+
maintainers: data.maintainers?.length || 0
|
|
141
|
+
};
|
|
142
|
+
|
|
143
|
+
} catch (e: any) {
|
|
144
|
+
return {
|
|
145
|
+
status: 'ERROR',
|
|
146
|
+
message: `Analysis failed: ${e.message}`
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
}
|