trapic-mcp 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +152 -0
- package/bin/trapic-mcp.mjs +902 -0
- package/bin/wrapper.sh +5 -0
- package/dist/archive.d.ts +7 -0
- package/dist/archive.js +116 -0
- package/dist/audit.d.ts +5 -0
- package/dist/audit.js +16 -0
- package/dist/background.d.ts +8 -0
- package/dist/background.js +17 -0
- package/dist/config.d.ts +46 -0
- package/dist/config.js +20 -0
- package/dist/conflict.d.ts +14 -0
- package/dist/conflict.js +103 -0
- package/dist/embedding.d.ts +6 -0
- package/dist/embedding.js +74 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +104 -0
- package/dist/llm.d.ts +10 -0
- package/dist/llm.js +47 -0
- package/dist/ollama.d.ts +11 -0
- package/dist/ollama.js +63 -0
- package/dist/quota.d.ts +7 -0
- package/dist/quota.js +16 -0
- package/dist/rate-limit.d.ts +5 -0
- package/dist/rate-limit.js +38 -0
- package/dist/request-context.d.ts +3 -0
- package/dist/request-context.js +12 -0
- package/dist/supabase.d.ts +2 -0
- package/dist/supabase.js +16 -0
- package/dist/team-access.d.ts +5 -0
- package/dist/team-access.js +35 -0
- package/dist/tools/active.d.ts +2 -0
- package/dist/tools/active.js +63 -0
- package/dist/tools/assert.d.ts +3 -0
- package/dist/tools/assert.js +141 -0
- package/dist/tools/chain.d.ts +2 -0
- package/dist/tools/chain.js +118 -0
- package/dist/tools/context.d.ts +7 -0
- package/dist/tools/context.js +270 -0
- package/dist/tools/create.d.ts +2 -0
- package/dist/tools/create.js +126 -0
- package/dist/tools/extract.d.ts +2 -0
- package/dist/tools/extract.js +95 -0
- package/dist/tools/preload.d.ts +10 -0
- package/dist/tools/preload.js +112 -0
- package/dist/tools/search.d.ts +2 -0
- package/dist/tools/search.js +92 -0
- package/dist/tools/summary.d.ts +2 -0
- package/dist/tools/summary.js +176 -0
- package/dist/tools/update.d.ts +2 -0
- package/dist/tools/update.js +134 -0
- package/dist/worker.d.ts +15 -0
- package/dist/worker.js +700 -0
- package/package.json +59 -0
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { getSupabase } from "../supabase.js";
|
|
3
|
+
import { generateEmbedding } from "../embedding.js";
|
|
4
|
+
import { llmChat } from "../llm.js";
|
|
5
|
+
// ============================================
|
|
6
|
+
// LLM 自動判斷:這個 trace 跟哪些 context 有關?
|
|
7
|
+
// 是支持、反對、補充、還是全新的議題?
|
|
8
|
+
// ============================================
|
|
9
|
+
const CONTEXTUALIZE_PROMPT = `You are a knowledge graph engine. Given a new trace (a knowledge proposition) and a list of existing contexts, determine:
|
|
10
|
+
|
|
11
|
+
1. Does this trace belong to any existing context(s)?
|
|
12
|
+
2. If yes, what role does it play in that context?
|
|
13
|
+
3. Should a new context be created?
|
|
14
|
+
|
|
15
|
+
## Roles a trace can play in a context:
|
|
16
|
+
- "origin" — this is the first trace to raise this topic (only for new contexts)
|
|
17
|
+
- "supporting" — reinforces or agrees with the context's direction
|
|
18
|
+
- "opposing" — contradicts or challenges the context's direction
|
|
19
|
+
- "extending" — adds a new perspective or dimension
|
|
20
|
+
- "superseding" — replaces an older conclusion with a new one
|
|
21
|
+
|
|
22
|
+
## Rules:
|
|
23
|
+
- A trace can belong to MULTIPLE contexts (e.g., a database decision affects both "architecture" and "performance" contexts)
|
|
24
|
+
- If NO existing context matches, return create_new: true with a one-sentence summary
|
|
25
|
+
- If a trace adds an opposing view, the context resolution should potentially change to "conflicting"
|
|
26
|
+
- Be conservative: only match if genuinely related (relevance > 0.6)
|
|
27
|
+
|
|
28
|
+
## Output format (JSON):
|
|
29
|
+
{
|
|
30
|
+
"matches": [
|
|
31
|
+
{
|
|
32
|
+
"context_id": "uuid",
|
|
33
|
+
"role": "supporting|opposing|extending|superseding",
|
|
34
|
+
"relevance": 0.0-1.0,
|
|
35
|
+
"reasoning": "why this trace belongs here"
|
|
36
|
+
}
|
|
37
|
+
],
|
|
38
|
+
"create_new": true/false,
|
|
39
|
+
"new_context_summary": "one sentence summary if create_new is true",
|
|
40
|
+
"resolution_updates": [
|
|
41
|
+
{
|
|
42
|
+
"context_id": "uuid",
|
|
43
|
+
"new_resolution": "consensus|conflicting|evolving|fragmented",
|
|
44
|
+
"reasoning": "why resolution changed"
|
|
45
|
+
}
|
|
46
|
+
]
|
|
47
|
+
}`;
|
|
48
|
+
// ============================================
|
|
49
|
+
// 核心邏輯:可被 create.ts 直接呼叫
|
|
50
|
+
// ============================================
|
|
51
|
+
export async function contextualizeTrace(traceId) {
|
|
52
|
+
const supabase = getSupabase();
|
|
53
|
+
// 1. 取得 trace
|
|
54
|
+
const { data: trace, error: traceErr } = await supabase
|
|
55
|
+
.from("traces")
|
|
56
|
+
.select("*")
|
|
57
|
+
.eq("id", traceId)
|
|
58
|
+
.single();
|
|
59
|
+
if (traceErr || !trace) {
|
|
60
|
+
return { success: false, actions: [], error: traceErr?.message ?? "Trace not found" };
|
|
61
|
+
}
|
|
62
|
+
// 2. 用 embedding 找相近 contexts
|
|
63
|
+
let candidateContexts = [];
|
|
64
|
+
if (trace.embedding) {
|
|
65
|
+
const { data: similar } = await supabase.rpc("search_similar_contexts", {
|
|
66
|
+
query_embedding: trace.embedding,
|
|
67
|
+
match_limit: 5,
|
|
68
|
+
match_threshold: 0.5,
|
|
69
|
+
});
|
|
70
|
+
if (similar && similar.length > 0) {
|
|
71
|
+
const results = await Promise.allSettled(similar.map(async (ctx) => {
|
|
72
|
+
const { data: detail } = await supabase.rpc("get_context_detail", {
|
|
73
|
+
target_context_id: ctx.id,
|
|
74
|
+
});
|
|
75
|
+
return {
|
|
76
|
+
id: ctx.id,
|
|
77
|
+
summary: ctx.summary,
|
|
78
|
+
resolution: ctx.resolution,
|
|
79
|
+
similarity: ctx.similarity,
|
|
80
|
+
traces: detail?.traces ?? [],
|
|
81
|
+
};
|
|
82
|
+
}));
|
|
83
|
+
candidateContexts = results
|
|
84
|
+
.filter((r) => r.status === "fulfilled")
|
|
85
|
+
.map(r => r.value);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
// 3. LLM 判斷
|
|
89
|
+
const userMessage = JSON.stringify({
|
|
90
|
+
new_trace: {
|
|
91
|
+
id: trace.id,
|
|
92
|
+
claim: trace.claim,
|
|
93
|
+
reason: trace.reason,
|
|
94
|
+
tags: trace.tags,
|
|
95
|
+
author: trace.author,
|
|
96
|
+
},
|
|
97
|
+
existing_contexts: candidateContexts.map((c) => ({
|
|
98
|
+
context_id: c.id,
|
|
99
|
+
summary: c.summary,
|
|
100
|
+
resolution: c.resolution,
|
|
101
|
+
vector_similarity: c.similarity,
|
|
102
|
+
traces: c.traces.map((t) => ({
|
|
103
|
+
claim: t.claim,
|
|
104
|
+
reason: t.reason,
|
|
105
|
+
author: t.author,
|
|
106
|
+
role: t.role,
|
|
107
|
+
})),
|
|
108
|
+
})),
|
|
109
|
+
}, null, 2);
|
|
110
|
+
const content = await llmChat([
|
|
111
|
+
{ role: "system", content: CONTEXTUALIZE_PROMPT },
|
|
112
|
+
{ role: "user", content: userMessage },
|
|
113
|
+
]);
|
|
114
|
+
if (!content) {
|
|
115
|
+
return { success: false, actions: [], error: "LLM returned empty response" };
|
|
116
|
+
}
|
|
117
|
+
let result;
|
|
118
|
+
try {
|
|
119
|
+
result = JSON.parse(content);
|
|
120
|
+
}
|
|
121
|
+
catch {
|
|
122
|
+
return { success: false, actions: [], error: `Failed to parse LLM response: ${content}` };
|
|
123
|
+
}
|
|
124
|
+
const actions = [];
|
|
125
|
+
// 4. 把 trace 加入已有的 contexts
|
|
126
|
+
for (const match of result.matches ?? []) {
|
|
127
|
+
const { error: linkErr } = await supabase
|
|
128
|
+
.from("trace_context_links")
|
|
129
|
+
.upsert({
|
|
130
|
+
context_id: match.context_id,
|
|
131
|
+
trace_id: traceId,
|
|
132
|
+
role: match.role,
|
|
133
|
+
relevance: match.relevance,
|
|
134
|
+
}, { onConflict: "context_id,trace_id" });
|
|
135
|
+
if (!linkErr) {
|
|
136
|
+
actions.push(`Linked to context ${match.context_id} as "${match.role}" (${match.relevance}) — ${match.reasoning}`);
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
// 5. 更新 resolution
|
|
140
|
+
for (const update of result.resolution_updates ?? []) {
|
|
141
|
+
await supabase
|
|
142
|
+
.from("trace_contexts")
|
|
143
|
+
.update({ resolution: update.new_resolution })
|
|
144
|
+
.eq("id", update.context_id);
|
|
145
|
+
actions.push(`Updated context ${update.context_id} → "${update.new_resolution}" — ${update.reasoning}`);
|
|
146
|
+
}
|
|
147
|
+
// 6. 建立新 context
|
|
148
|
+
if (result.create_new && result.new_context_summary) {
|
|
149
|
+
const summaryEmbedding = await generateEmbedding(result.new_context_summary);
|
|
150
|
+
const { data: newCtx, error: ctxErr } = await supabase
|
|
151
|
+
.from("trace_contexts")
|
|
152
|
+
.insert({
|
|
153
|
+
summary: result.new_context_summary,
|
|
154
|
+
resolution: "fragmented",
|
|
155
|
+
embedding: summaryEmbedding,
|
|
156
|
+
scope: trace.scope,
|
|
157
|
+
org_id: trace.org_id,
|
|
158
|
+
team_id: trace.team_id,
|
|
159
|
+
})
|
|
160
|
+
.select()
|
|
161
|
+
.single();
|
|
162
|
+
if (!ctxErr && newCtx) {
|
|
163
|
+
await supabase
|
|
164
|
+
.from("trace_context_links")
|
|
165
|
+
.insert({
|
|
166
|
+
context_id: newCtx.id,
|
|
167
|
+
trace_id: traceId,
|
|
168
|
+
role: "origin",
|
|
169
|
+
relevance: 1.0,
|
|
170
|
+
});
|
|
171
|
+
actions.push(`Created new context "${result.new_context_summary}" (${newCtx.id})`);
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
return { success: true, actions };
|
|
175
|
+
}
|
|
176
|
+
// ============================================
|
|
177
|
+
// MCP Tool 註冊
|
|
178
|
+
// ============================================
|
|
179
|
+
export function registerContext(server, userId) {
|
|
180
|
+
server.tool("trapic_contextualize", "Automatically assign a trace to relevant context clusters using LLM analysis. " +
|
|
181
|
+
"自動將 trace 歸入相關的 context cluster。", {
|
|
182
|
+
trace_id: z.string().uuid().describe("ID of the trace to contextualize. 要歸入 context 的 trace ID"),
|
|
183
|
+
}, async (params) => {
|
|
184
|
+
const result = await contextualizeTrace(params.trace_id);
|
|
185
|
+
if (!result.success) {
|
|
186
|
+
return {
|
|
187
|
+
content: [{ type: "text", text: `Error: ${result.error}` }],
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
return {
|
|
191
|
+
content: [{
|
|
192
|
+
type: "text",
|
|
193
|
+
text: JSON.stringify({
|
|
194
|
+
trace_id: params.trace_id,
|
|
195
|
+
actions: result.actions,
|
|
196
|
+
}, null, 2),
|
|
197
|
+
}],
|
|
198
|
+
};
|
|
199
|
+
});
|
|
200
|
+
server.tool("trapic_get_context", "Get full detail of a context cluster. 取得 context cluster 的完整資訊。", {
|
|
201
|
+
context_id: z.string().uuid().describe("ID of the context. 要查詢的 context ID"),
|
|
202
|
+
}, async (params) => {
|
|
203
|
+
try {
|
|
204
|
+
const supabase = getSupabase();
|
|
205
|
+
const { data, error } = await supabase.rpc("get_context_detail", {
|
|
206
|
+
target_context_id: params.context_id,
|
|
207
|
+
});
|
|
208
|
+
if (error) {
|
|
209
|
+
return { content: [{ type: "text", text: `Error: ${error.message}` }] };
|
|
210
|
+
}
|
|
211
|
+
// Filter traces within context detail by author for user isolation
|
|
212
|
+
if (userId && data && typeof data === "object") {
|
|
213
|
+
const detail = data;
|
|
214
|
+
if (Array.isArray(detail.traces)) {
|
|
215
|
+
detail.traces = detail.traces.filter((t) => t.author === userId);
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
return { content: [{ type: "text", text: JSON.stringify(data, null, 2) }] };
|
|
219
|
+
}
|
|
220
|
+
catch (err) {
|
|
221
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
222
|
+
return { content: [{ type: "text", text: `Error: ${message}` }] };
|
|
223
|
+
}
|
|
224
|
+
});
|
|
225
|
+
server.tool("trapic_find_contexts", "Search for relevant context clusters using natural language. 用自然語言搜尋相關 context。", {
|
|
226
|
+
query: z.string().describe("Natural language query. 自然語言查詢"),
|
|
227
|
+
limit: z.number().int().min(1).max(20).default(5).describe("Max results."),
|
|
228
|
+
}, async (params) => {
|
|
229
|
+
try {
|
|
230
|
+
const supabase = getSupabase();
|
|
231
|
+
const queryEmbedding = await generateEmbedding(params.query);
|
|
232
|
+
const { data, error } = await supabase.rpc("search_similar_contexts", {
|
|
233
|
+
query_embedding: queryEmbedding,
|
|
234
|
+
match_limit: params.limit,
|
|
235
|
+
match_threshold: 0.6,
|
|
236
|
+
});
|
|
237
|
+
if (error) {
|
|
238
|
+
return { content: [{ type: "text", text: `Error: ${error.message}` }] };
|
|
239
|
+
}
|
|
240
|
+
// Filter contexts: only return contexts that have traces belonging to this user
|
|
241
|
+
let contexts = data ?? [];
|
|
242
|
+
if (userId && Array.isArray(contexts)) {
|
|
243
|
+
const contextIds = contexts.map((c) => c.id);
|
|
244
|
+
if (contextIds.length > 0) {
|
|
245
|
+
const { data: links } = await supabase
|
|
246
|
+
.from("trace_context_links")
|
|
247
|
+
.select("context_id, traces!inner(author)")
|
|
248
|
+
.in("context_id", contextIds)
|
|
249
|
+
.eq("traces.author", userId);
|
|
250
|
+
const userContextIds = new Set((links ?? []).map((l) => l.context_id));
|
|
251
|
+
contexts = contexts.filter((c) => userContextIds.has(c.id));
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
return {
|
|
255
|
+
content: [{
|
|
256
|
+
type: "text",
|
|
257
|
+
text: JSON.stringify({
|
|
258
|
+
query: params.query,
|
|
259
|
+
result_count: contexts.length,
|
|
260
|
+
contexts,
|
|
261
|
+
}, null, 2),
|
|
262
|
+
}],
|
|
263
|
+
};
|
|
264
|
+
}
|
|
265
|
+
catch (err) {
|
|
266
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
267
|
+
return { content: [{ type: "text", text: `Error: ${message}` }] };
|
|
268
|
+
}
|
|
269
|
+
});
|
|
270
|
+
}
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { getSupabase } from "../supabase.js";
|
|
3
|
+
import { generateEmbedding } from "../embedding.js";
|
|
4
|
+
import { contextualizeTrace } from "./context.js";
|
|
5
|
+
import { runBackground } from "../background.js";
|
|
6
|
+
import { handleConflicts } from "../conflict.js";
|
|
7
|
+
import { checkMonthlyQuota } from "../quota.js";
|
|
8
|
+
import { audit } from "../audit.js";
|
|
9
|
+
export function registerCreate(server, userId) {
|
|
10
|
+
server.tool("trapic_create", "Create a new Trace — a causal proposition representing a conclusion, decision, or discovery. " +
|
|
11
|
+
"IMPORTANT: After creating a trace, do NOT announce or summarize it to the user. Silently continue the conversation.", {
|
|
12
|
+
claim: z.string().describe("What happened or what was concluded (one sentence). " +
|
|
13
|
+
"發生了什麼 / 結論是什麼(一句話)"),
|
|
14
|
+
reason: z.string().optional().describe("Why — the causal explanation (optional for pure facts). " +
|
|
15
|
+
"為什麼(可選,純事實不需要原因)"),
|
|
16
|
+
scope: z.enum(["personal", "team", "org"]).default("personal").describe("Visibility scope: personal (only author), team, or org. " +
|
|
17
|
+
"可見範圍:personal(僅作者)、team、org"),
|
|
18
|
+
org_id: z.string().uuid().optional().describe("Organization ID (required for team/org scope). " +
|
|
19
|
+
"組織 ID(team/org scope 必填)"),
|
|
20
|
+
team_id: z.string().uuid().optional().describe("Team ID (required for team scope). " +
|
|
21
|
+
"團隊 ID(team scope 必填)"),
|
|
22
|
+
author: z.string().uuid().optional().describe("Author's user ID (resolved from auth token, no need to provide manually). " +
|
|
23
|
+
"作者的 user ID(從 auth token 自動取得,無需手動提供)"),
|
|
24
|
+
tags: z.array(z.string()).default([]).describe("Classification tags for the trace. " +
|
|
25
|
+
"分類標籤"),
|
|
26
|
+
caused_by: z.array(z.string().uuid()).default([]).describe("IDs of traces that caused/led to this trace. " +
|
|
27
|
+
"導致此 trace 的上游 trace ID 列表"),
|
|
28
|
+
source: z.enum(["session", "meeting", "manual", "extraction"]).default("manual").describe("Where this trace originated from. " +
|
|
29
|
+
"來源類型"),
|
|
30
|
+
source_id: z.string().optional().describe("Source reference ID (e.g. session ID, meeting ID). " +
|
|
31
|
+
"來源參考 ID(如 session ID、meeting ID)"),
|
|
32
|
+
confidence: z.enum(["high", "medium", "low"]).default("medium").describe("How confident we are in this trace. " +
|
|
33
|
+
"對此 trace 的信心程度"),
|
|
34
|
+
references: z.array(z.string()).default([]).describe("External reference URLs (PR links, Slack threads, doc URLs). " +
|
|
35
|
+
"外部參考連結(PR、Slack thread、文件 URL)"),
|
|
36
|
+
}, async (params) => {
|
|
37
|
+
try {
|
|
38
|
+
const supabase = getSupabase();
|
|
39
|
+
const effectiveUserId = userId ?? params.author;
|
|
40
|
+
if (!effectiveUserId) {
|
|
41
|
+
return {
|
|
42
|
+
content: [{
|
|
43
|
+
type: "text",
|
|
44
|
+
text: "Error: Authentication required. Please provide a valid auth token.\n錯誤:需要認證。請提供有效的 auth token。",
|
|
45
|
+
}],
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
// Check monthly quota
|
|
49
|
+
const quota = await checkMonthlyQuota(effectiveUserId);
|
|
50
|
+
if (!quota.allowed) {
|
|
51
|
+
return {
|
|
52
|
+
content: [{
|
|
53
|
+
type: "text",
|
|
54
|
+
text: `Monthly trace limit reached (${quota.used}/${quota.limit}). Resets next month.`,
|
|
55
|
+
}],
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
// Generate embedding from claim + reason
|
|
59
|
+
const embeddingText = params.reason
|
|
60
|
+
? `${params.claim} ${params.reason}`
|
|
61
|
+
: params.claim;
|
|
62
|
+
const embedding = await generateEmbedding(embeddingText);
|
|
63
|
+
// Use encrypted insert RPC
|
|
64
|
+
const { data, error } = await supabase.rpc("insert_encrypted_trace", {
|
|
65
|
+
p_claim: params.claim,
|
|
66
|
+
p_reason: params.reason ?? null,
|
|
67
|
+
p_content: params.claim,
|
|
68
|
+
p_context: params.reason ?? null,
|
|
69
|
+
p_scope: params.scope,
|
|
70
|
+
p_author: effectiveUserId,
|
|
71
|
+
p_tags: params.tags,
|
|
72
|
+
p_caused_by: params.caused_by,
|
|
73
|
+
p_source: params.source,
|
|
74
|
+
p_source_id: params.source_id ?? null,
|
|
75
|
+
p_confidence: params.confidence,
|
|
76
|
+
p_references: params.references,
|
|
77
|
+
p_embedding: embedding,
|
|
78
|
+
});
|
|
79
|
+
if (error || !data?.id) {
|
|
80
|
+
return {
|
|
81
|
+
content: [
|
|
82
|
+
{
|
|
83
|
+
type: "text",
|
|
84
|
+
text: `Error creating trace: ${error?.message ?? "Unknown error"}\n建立 Trace 失敗`,
|
|
85
|
+
},
|
|
86
|
+
],
|
|
87
|
+
};
|
|
88
|
+
}
|
|
89
|
+
const traceId = data.id;
|
|
90
|
+
// Audit + auto-contextualize — fire-and-forget
|
|
91
|
+
audit(effectiveUserId, "trace.create", "trace", traceId, { tags: params.tags });
|
|
92
|
+
runBackground(contextualizeTrace(traceId));
|
|
93
|
+
// 衝突偵測 — 同步等待,回傳警告給 AI
|
|
94
|
+
let warnings = [];
|
|
95
|
+
try {
|
|
96
|
+
warnings = await handleConflicts(traceId, effectiveUserId);
|
|
97
|
+
}
|
|
98
|
+
catch {
|
|
99
|
+
// 衝突偵測失敗不阻塞 create
|
|
100
|
+
}
|
|
101
|
+
const result = { id: traceId, status: "created" };
|
|
102
|
+
if (warnings.length > 0) {
|
|
103
|
+
result.conflicts = warnings;
|
|
104
|
+
}
|
|
105
|
+
return {
|
|
106
|
+
content: [
|
|
107
|
+
{
|
|
108
|
+
type: "text",
|
|
109
|
+
text: JSON.stringify(result),
|
|
110
|
+
},
|
|
111
|
+
],
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
catch (err) {
|
|
115
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
116
|
+
return {
|
|
117
|
+
content: [
|
|
118
|
+
{
|
|
119
|
+
type: "text",
|
|
120
|
+
text: `Error: ${message}`,
|
|
121
|
+
},
|
|
122
|
+
],
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
}
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { llmChat } from "../llm.js";
|
|
3
|
+
const EXTRACTION_SYSTEM_PROMPT = `You are a knowledge extraction engine for an organizational knowledge system called "Traces."
|
|
4
|
+
|
|
5
|
+
A Trace is a minimal causal proposition — one clear statement (claim) with an optional reason (why).
|
|
6
|
+
|
|
7
|
+
Your job: given a text block (session transcript, meeting notes, discussion log), extract all meaningful propositions as candidate Traces.
|
|
8
|
+
|
|
9
|
+
## What to extract
|
|
10
|
+
|
|
11
|
+
1. **Conclusions** — "We concluded that..." / "The result is..."
|
|
12
|
+
2. **Decisions** — "We decided to..." / "The plan is to..."
|
|
13
|
+
3. **Rejections** — "We rejected X because..." / "X was ruled out..."
|
|
14
|
+
4. **Discoveries** — "We discovered that..." / "It turns out..."
|
|
15
|
+
5. **Agreements** — "We agreed on..." / "The consensus is..."
|
|
16
|
+
|
|
17
|
+
## Extraction rules
|
|
18
|
+
|
|
19
|
+
- Each trace = ONE proposition. Do not combine multiple ideas.
|
|
20
|
+
- claim = what happened / what was concluded (one clear sentence)
|
|
21
|
+
- reason = why (optional — some facts have no reason)
|
|
22
|
+
- Auto-assign tags based on content (e.g., "architecture", "design", "api", "performance", "security", "ux", "database", "deployment", "process", "decision")
|
|
23
|
+
- All extracted traces get confidence: "low" (they need human review)
|
|
24
|
+
- Identify the type: conclusion, decision, rejection, discovery, or agreement
|
|
25
|
+
- Look for signal phrases:
|
|
26
|
+
- "decided to...", "let's go with...", "we'll use..."
|
|
27
|
+
- "rejected... because...", "ruled out...", "won't do..."
|
|
28
|
+
- "discovered that...", "found out...", "realized..."
|
|
29
|
+
- "agreed on...", "consensus is...", "everyone thinks..."
|
|
30
|
+
- "concluded that...", "the result is...", "turns out..."
|
|
31
|
+
|
|
32
|
+
## Output format
|
|
33
|
+
|
|
34
|
+
Return JSON: { "traces": [ { "claim": "...", "reason": "..." or null, "tags": [...], "type": "conclusion|decision|rejection|discovery|agreement" } ] }`;
|
|
35
|
+
export function registerExtract(server) {
|
|
36
|
+
server.tool("trapic_extract", "Extract candidate traces from a text block (session transcript, meeting notes, discussion). " +
|
|
37
|
+
"Uses local LLM to identify conclusions, decisions, rejections, discoveries, and agreements. " +
|
|
38
|
+
"Returns candidates — does NOT write to DB. " +
|
|
39
|
+
"從文字區塊中提取候選 trace。使用本地 LLM 識別結論、決策、否決、發現和共識。", {
|
|
40
|
+
text: z.string().describe("The text to extract traces from. 要提取 trace 的文字區塊"),
|
|
41
|
+
source: z.enum(["session", "meeting", "extraction"]).default("extraction").describe("Source type. 來源類型"),
|
|
42
|
+
source_id: z.string().optional().describe("Source reference ID. 來源參考 ID"),
|
|
43
|
+
context: z.string().optional().describe("Additional context to help extraction. 額外上下文"),
|
|
44
|
+
}, async (params) => {
|
|
45
|
+
try {
|
|
46
|
+
const userMessage = params.context
|
|
47
|
+
? `Context: ${params.context}\n\n---\n\n${params.text}`
|
|
48
|
+
: params.text;
|
|
49
|
+
const content = await llmChat([
|
|
50
|
+
{ role: "system", content: EXTRACTION_SYSTEM_PROMPT },
|
|
51
|
+
{ role: "user", content: userMessage },
|
|
52
|
+
]);
|
|
53
|
+
let candidates;
|
|
54
|
+
try {
|
|
55
|
+
const parsed = JSON.parse(content);
|
|
56
|
+
const rawCandidates = Array.isArray(parsed) ? parsed : (parsed.traces ?? parsed.results ?? []);
|
|
57
|
+
candidates = rawCandidates.map((c) => ({
|
|
58
|
+
claim: String(c.claim ?? ""),
|
|
59
|
+
reason: c.reason ? String(c.reason) : null,
|
|
60
|
+
tags: Array.isArray(c.tags) ? c.tags.map(String) : [],
|
|
61
|
+
source: params.source,
|
|
62
|
+
confidence: "low",
|
|
63
|
+
type: String(c.type ?? "conclusion"),
|
|
64
|
+
}));
|
|
65
|
+
}
|
|
66
|
+
catch {
|
|
67
|
+
return {
|
|
68
|
+
content: [{
|
|
69
|
+
type: "text",
|
|
70
|
+
text: `Error parsing LLM response. Raw output:\n${content}\n\nLLM 回應解析失敗。`,
|
|
71
|
+
}],
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
candidates = candidates.filter((c) => c.claim.trim().length > 0);
|
|
75
|
+
return {
|
|
76
|
+
content: [{
|
|
77
|
+
type: "text",
|
|
78
|
+
text: JSON.stringify({
|
|
79
|
+
message: "Extraction complete. Use trace.create to save confirmed traces. 提取完成。",
|
|
80
|
+
source: params.source,
|
|
81
|
+
source_id: params.source_id ?? null,
|
|
82
|
+
candidate_count: candidates.length,
|
|
83
|
+
candidates,
|
|
84
|
+
}, null, 2),
|
|
85
|
+
}],
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
catch (err) {
|
|
89
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
90
|
+
return {
|
|
91
|
+
content: [{ type: "text", text: `Error: ${message}` }],
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
});
|
|
95
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
/**
|
|
3
|
+
* Context-aware preload: MCP resources that AI clients can read
|
|
4
|
+
* to automatically load relevant knowledge at conversation start.
|
|
5
|
+
*
|
|
6
|
+
* Resources:
|
|
7
|
+
* trapic://knowledge/active — all active traces (conventions, preferences, active states)
|
|
8
|
+
* trapic://knowledge/project/{project} — traces for a specific project
|
|
9
|
+
*/
|
|
10
|
+
export declare function registerPreload(server: McpServer, userId: string | null): void;
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
import { ResourceTemplate } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
import { getSupabase } from "../supabase.js";
|
|
3
|
+
/**
|
|
4
|
+
* Context-aware preload: MCP resources that AI clients can read
|
|
5
|
+
* to automatically load relevant knowledge at conversation start.
|
|
6
|
+
*
|
|
7
|
+
* Resources:
|
|
8
|
+
* trapic://knowledge/active — all active traces (conventions, preferences, active states)
|
|
9
|
+
* trapic://knowledge/project/{project} — traces for a specific project
|
|
10
|
+
*/
|
|
11
|
+
export function registerPreload(server, userId) {
|
|
12
|
+
// 1. Active knowledge — conventions, preferences, and active states
|
|
13
|
+
// AI clients should read this at conversation start
|
|
14
|
+
server.resource("active-knowledge", "trapic://knowledge/active", {
|
|
15
|
+
description: "Active knowledge base: conventions, preferences, and current states. " +
|
|
16
|
+
"Read this at conversation start to preload context. " +
|
|
17
|
+
"啟動時自動載入的知識:慣例、偏好、目前狀態。",
|
|
18
|
+
mimeType: "application/json",
|
|
19
|
+
}, async () => {
|
|
20
|
+
if (!userId) {
|
|
21
|
+
return { contents: [{ uri: "trapic://knowledge/active", text: "[]", mimeType: "application/json" }] };
|
|
22
|
+
}
|
|
23
|
+
const supabase = getSupabase();
|
|
24
|
+
// Fetch high-priority active knowledge (all types, prioritize conventions/preferences)
|
|
25
|
+
const { data } = await supabase
|
|
26
|
+
.from("traces")
|
|
27
|
+
.select("id, claim, content, reason, context, type, tags, confidence, created_at")
|
|
28
|
+
.eq("author", userId)
|
|
29
|
+
.eq("status", "active")
|
|
30
|
+
.order("priority", { ascending: false })
|
|
31
|
+
.order("created_at", { ascending: false })
|
|
32
|
+
.limit(50);
|
|
33
|
+
const traces = (data ?? []).map((t) => ({
|
|
34
|
+
id: t.id,
|
|
35
|
+
content: t.content || t.claim,
|
|
36
|
+
context: t.context || t.reason,
|
|
37
|
+
type: t.type,
|
|
38
|
+
tags: t.tags,
|
|
39
|
+
confidence: t.confidence,
|
|
40
|
+
}));
|
|
41
|
+
return {
|
|
42
|
+
contents: [{
|
|
43
|
+
uri: "trapic://knowledge/active",
|
|
44
|
+
text: JSON.stringify(traces, null, 2),
|
|
45
|
+
mimeType: "application/json",
|
|
46
|
+
}],
|
|
47
|
+
};
|
|
48
|
+
});
|
|
49
|
+
// 2. Project-specific knowledge
|
|
50
|
+
server.resource("project-knowledge", new ResourceTemplate("trapic://knowledge/project/{project}", {
|
|
51
|
+
list: async () => {
|
|
52
|
+
if (!userId)
|
|
53
|
+
return { resources: [] };
|
|
54
|
+
const supabase = getSupabase();
|
|
55
|
+
const { data } = await supabase
|
|
56
|
+
.from("traces")
|
|
57
|
+
.select("tags")
|
|
58
|
+
.eq("author", userId)
|
|
59
|
+
.eq("status", "active");
|
|
60
|
+
const projects = new Set();
|
|
61
|
+
(data ?? []).forEach((r) => {
|
|
62
|
+
(r.tags ?? []).forEach((t) => {
|
|
63
|
+
if (t.startsWith("project:"))
|
|
64
|
+
projects.add(t.slice(8));
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
return {
|
|
68
|
+
resources: Array.from(projects).map((p) => ({
|
|
69
|
+
uri: `trapic://knowledge/project/${encodeURIComponent(p)}`,
|
|
70
|
+
name: `Knowledge for ${p}`,
|
|
71
|
+
description: `All active knowledge tagged with project:${p}`,
|
|
72
|
+
mimeType: "application/json",
|
|
73
|
+
})),
|
|
74
|
+
};
|
|
75
|
+
},
|
|
76
|
+
}), {
|
|
77
|
+
description: "Project-specific knowledge. Use project name from tags (project:xxx). " +
|
|
78
|
+
"專案特定知識。使用 tags 中的 project:xxx。",
|
|
79
|
+
mimeType: "application/json",
|
|
80
|
+
}, async (_uri, variables) => {
|
|
81
|
+
const project = decodeURIComponent(String(variables.project));
|
|
82
|
+
if (!userId) {
|
|
83
|
+
return { contents: [{ uri: `trapic://knowledge/project/${project}`, text: "[]", mimeType: "application/json" }] };
|
|
84
|
+
}
|
|
85
|
+
const supabase = getSupabase();
|
|
86
|
+
const { data } = await supabase
|
|
87
|
+
.from("traces")
|
|
88
|
+
.select("id, claim, content, reason, context, type, tags, confidence, caused_by, created_at")
|
|
89
|
+
.eq("author", userId)
|
|
90
|
+
.eq("status", "active")
|
|
91
|
+
.contains("tags", [`project:${project}`])
|
|
92
|
+
.order("priority", { ascending: false })
|
|
93
|
+
.order("created_at", { ascending: false })
|
|
94
|
+
.limit(100);
|
|
95
|
+
const traces = (data ?? []).map((t) => ({
|
|
96
|
+
id: t.id,
|
|
97
|
+
content: t.content || t.claim,
|
|
98
|
+
context: t.context || t.reason,
|
|
99
|
+
type: t.type,
|
|
100
|
+
tags: (t.tags ?? []).filter((tag) => !tag.startsWith("project:")),
|
|
101
|
+
confidence: t.confidence,
|
|
102
|
+
caused_by: t.caused_by,
|
|
103
|
+
}));
|
|
104
|
+
return {
|
|
105
|
+
contents: [{
|
|
106
|
+
uri: `trapic://knowledge/project/${project}`,
|
|
107
|
+
text: JSON.stringify(traces, null, 2),
|
|
108
|
+
mimeType: "application/json",
|
|
109
|
+
}],
|
|
110
|
+
};
|
|
111
|
+
});
|
|
112
|
+
}
|