trapic-mcp 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +152 -0
- package/bin/trapic-mcp.mjs +902 -0
- package/bin/wrapper.sh +5 -0
- package/dist/archive.d.ts +7 -0
- package/dist/archive.js +116 -0
- package/dist/audit.d.ts +5 -0
- package/dist/audit.js +16 -0
- package/dist/background.d.ts +8 -0
- package/dist/background.js +17 -0
- package/dist/config.d.ts +46 -0
- package/dist/config.js +20 -0
- package/dist/conflict.d.ts +14 -0
- package/dist/conflict.js +103 -0
- package/dist/embedding.d.ts +6 -0
- package/dist/embedding.js +74 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +104 -0
- package/dist/llm.d.ts +10 -0
- package/dist/llm.js +47 -0
- package/dist/ollama.d.ts +11 -0
- package/dist/ollama.js +63 -0
- package/dist/quota.d.ts +7 -0
- package/dist/quota.js +16 -0
- package/dist/rate-limit.d.ts +5 -0
- package/dist/rate-limit.js +38 -0
- package/dist/request-context.d.ts +3 -0
- package/dist/request-context.js +12 -0
- package/dist/supabase.d.ts +2 -0
- package/dist/supabase.js +16 -0
- package/dist/team-access.d.ts +5 -0
- package/dist/team-access.js +35 -0
- package/dist/tools/active.d.ts +2 -0
- package/dist/tools/active.js +63 -0
- package/dist/tools/assert.d.ts +3 -0
- package/dist/tools/assert.js +141 -0
- package/dist/tools/chain.d.ts +2 -0
- package/dist/tools/chain.js +118 -0
- package/dist/tools/context.d.ts +7 -0
- package/dist/tools/context.js +270 -0
- package/dist/tools/create.d.ts +2 -0
- package/dist/tools/create.js +126 -0
- package/dist/tools/extract.d.ts +2 -0
- package/dist/tools/extract.js +95 -0
- package/dist/tools/preload.d.ts +10 -0
- package/dist/tools/preload.js +112 -0
- package/dist/tools/search.d.ts +2 -0
- package/dist/tools/search.js +92 -0
- package/dist/tools/summary.d.ts +2 -0
- package/dist/tools/summary.js +176 -0
- package/dist/tools/update.d.ts +2 -0
- package/dist/tools/update.js +134 -0
- package/dist/worker.d.ts +15 -0
- package/dist/worker.js +700 -0
- package/package.json +59 -0
package/bin/wrapper.sh
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
echo "$(date) WRAPPER CALLED pid=$$ args=$*" >> "$HOME/.trapic-mcp/wrapper.log"
|
|
3
|
+
echo "$(date) ENV TRAPIC_TOKEN=${TRAPIC_TOKEN:0:8}..." >> "$HOME/.trapic-mcp/wrapper.log"
|
|
4
|
+
mkdir -p "$HOME/.trapic-mcp"
|
|
5
|
+
exec node "/Users/wongkimhong/ai/trace-mcp/bin/trapic-mcp.mjs" "$@"
|
package/dist/archive.js
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Daily archive: move traces older than 30 days from Supabase to R2
|
|
3
|
+
* Then delete archived traces after 7-day safety buffer
|
|
4
|
+
*/
|
|
5
|
+
import { createClient } from "@supabase/supabase-js";
|
|
6
|
+
const BATCH_SIZE = 500;
|
|
7
|
+
const FREE_ARCHIVE_DAYS = 30; // matches config/plans.ts free.archiveDays
|
|
8
|
+
const DELETE_AFTER_DAYS = 7; // days after archiving before deletion
|
|
9
|
+
export async function runArchive(env) {
|
|
10
|
+
const supabase = createClient(env.SUPABASE_URL, env.SUPABASE_SERVICE_ROLE_KEY);
|
|
11
|
+
const now = new Date();
|
|
12
|
+
const logs = [];
|
|
13
|
+
// Get team member user IDs — they have longer archive retention (pro/team plans)
|
|
14
|
+
// TODO: When Stripe is integrated, check user_subscriptions for plan-specific archiveDays
|
|
15
|
+
const { data: teamUsers } = await supabase
|
|
16
|
+
.from("team_members")
|
|
17
|
+
.select("user_id");
|
|
18
|
+
const teamUserIds = new Set((teamUsers ?? []).map((r) => r.user_id));
|
|
19
|
+
// ── Step 1: Archive free-plan traces older than 30 days ──
|
|
20
|
+
// Skip users who are in teams (they have pro/team plan with longer retention)
|
|
21
|
+
const archiveCutoff = new Date(now);
|
|
22
|
+
archiveCutoff.setDate(archiveCutoff.getDate() - FREE_ARCHIVE_DAYS);
|
|
23
|
+
const cutoffISO = archiveCutoff.toISOString();
|
|
24
|
+
let totalArchived = 0;
|
|
25
|
+
let hasMore = true;
|
|
26
|
+
while (hasMore) {
|
|
27
|
+
const { data: traces, error } = await supabase
|
|
28
|
+
.from("traces")
|
|
29
|
+
.select("*")
|
|
30
|
+
.lt("created_at", cutoffISO)
|
|
31
|
+
.is("archived_at", null)
|
|
32
|
+
.order("created_at", { ascending: true })
|
|
33
|
+
.limit(BATCH_SIZE);
|
|
34
|
+
if (error) {
|
|
35
|
+
logs.push(`Error fetching traces: ${error.message}`);
|
|
36
|
+
break;
|
|
37
|
+
}
|
|
38
|
+
if (!traces || traces.length === 0) {
|
|
39
|
+
hasMore = false;
|
|
40
|
+
break;
|
|
41
|
+
}
|
|
42
|
+
// Skip traces belonging to team members (they have longer retention)
|
|
43
|
+
const archivable = traces.filter((t) => !teamUserIds.has(t.author));
|
|
44
|
+
if (archivable.length === 0) {
|
|
45
|
+
hasMore = traces.length >= BATCH_SIZE;
|
|
46
|
+
continue;
|
|
47
|
+
}
|
|
48
|
+
// Group by user_id and date for organized storage
|
|
49
|
+
const groups = new Map();
|
|
50
|
+
for (const trace of archivable) {
|
|
51
|
+
const date = trace.created_at.slice(0, 10); // YYYY-MM-DD
|
|
52
|
+
const key = `${trace.author}/${date}`;
|
|
53
|
+
if (!groups.has(key))
|
|
54
|
+
groups.set(key, []);
|
|
55
|
+
groups.get(key).push(trace);
|
|
56
|
+
}
|
|
57
|
+
// Write each group to R2
|
|
58
|
+
for (const [key, groupTraces] of groups) {
|
|
59
|
+
const [userId, date] = key.split("/");
|
|
60
|
+
const [year, month, day] = date.split("-");
|
|
61
|
+
const r2Key = `archives/${userId}/${year}/${month}/${day}.ndjson`;
|
|
62
|
+
// Append to existing file if it exists
|
|
63
|
+
const existing = await env.ARCHIVE_BUCKET.get(r2Key);
|
|
64
|
+
let content = existing ? await existing.text() : "";
|
|
65
|
+
for (const trace of groupTraces) {
|
|
66
|
+
content += JSON.stringify(trace) + "\n";
|
|
67
|
+
}
|
|
68
|
+
await env.ARCHIVE_BUCKET.put(r2Key, content, {
|
|
69
|
+
httpMetadata: { contentType: "application/x-ndjson" },
|
|
70
|
+
customMetadata: {
|
|
71
|
+
userId,
|
|
72
|
+
date,
|
|
73
|
+
traceCount: String(content.split("\n").filter(Boolean).length),
|
|
74
|
+
},
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
// Mark as archived in Supabase
|
|
78
|
+
const ids = archivable.map((t) => t.id);
|
|
79
|
+
const { error: updateErr } = await supabase
|
|
80
|
+
.from("traces")
|
|
81
|
+
.update({ archived_at: now.toISOString() })
|
|
82
|
+
.in("id", ids);
|
|
83
|
+
if (updateErr) {
|
|
84
|
+
logs.push(`Error marking archived: ${updateErr.message}`);
|
|
85
|
+
break;
|
|
86
|
+
}
|
|
87
|
+
totalArchived += archivable.length;
|
|
88
|
+
if (traces.length < BATCH_SIZE) {
|
|
89
|
+
hasMore = false;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
if (totalArchived > 0) {
|
|
93
|
+
logs.push(`Archived ${totalArchived} traces to R2`);
|
|
94
|
+
}
|
|
95
|
+
// ── Step 2: Delete traces archived more than 7 days ago ──
|
|
96
|
+
const deleteCutoff = new Date(now);
|
|
97
|
+
deleteCutoff.setDate(deleteCutoff.getDate() - DELETE_AFTER_DAYS);
|
|
98
|
+
const deleteCutoffISO = deleteCutoff.toISOString();
|
|
99
|
+
const { count, error: deleteErr } = await supabase
|
|
100
|
+
.from("traces")
|
|
101
|
+
.delete({ count: "exact" })
|
|
102
|
+
.not("archived_at", "is", null)
|
|
103
|
+
.lt("archived_at", deleteCutoffISO);
|
|
104
|
+
if (deleteErr) {
|
|
105
|
+
logs.push(`Error deleting old archived traces: ${deleteErr.message}`);
|
|
106
|
+
}
|
|
107
|
+
else if (count && count > 0) {
|
|
108
|
+
logs.push(`Deleted ${count} archived traces from Supabase (archived >${DELETE_AFTER_DAYS}d ago)`);
|
|
109
|
+
}
|
|
110
|
+
// ── Step 3: Clean up orphaned context links ──
|
|
111
|
+
const { error: cleanupErr } = await supabase.rpc("cleanup_orphaned_context_links");
|
|
112
|
+
if (cleanupErr && !cleanupErr.message.includes("does not exist")) {
|
|
113
|
+
logs.push(`Context link cleanup error: ${cleanupErr.message}`);
|
|
114
|
+
}
|
|
115
|
+
return logs.length > 0 ? logs.join("\n") : "No traces to archive or delete";
|
|
116
|
+
}
|
package/dist/audit.d.ts
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Audit logging — fire-and-forget, never blocks main flow.
|
|
3
|
+
*/
|
|
4
|
+
export type AuditAction = "trace.create" | "trace.read" | "trace.update" | "trace.delete" | "trace.search" | "trace.summary" | "context.read" | "context.find" | "context.link" | "team.create" | "team.invite" | "team.join" | "team.leave" | "team.remove";
|
|
5
|
+
export declare function audit(userId: string, action: AuditAction, resourceType: string, resourceId?: string, metadata?: Record<string, unknown>): void;
|
package/dist/audit.js
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Audit logging — fire-and-forget, never blocks main flow.
|
|
3
|
+
*/
|
|
4
|
+
import { getSupabase } from "./supabase.js";
|
|
5
|
+
import { runBackground } from "./background.js";
|
|
6
|
+
export function audit(userId, action, resourceType, resourceId, metadata) {
|
|
7
|
+
const supabase = getSupabase();
|
|
8
|
+
const p = supabase.rpc("insert_audit_log", {
|
|
9
|
+
p_user_id: userId,
|
|
10
|
+
p_action: action,
|
|
11
|
+
p_resource_type: resourceType,
|
|
12
|
+
p_resource_id: resourceId ?? null,
|
|
13
|
+
p_metadata: metadata ?? {},
|
|
14
|
+
}).then(() => { }, (err) => console.error("[audit]", err.message));
|
|
15
|
+
runBackground(Promise.resolve(p));
|
|
16
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Global background task holder for Cloudflare Workers.
|
|
3
|
+
* Worker sets ctx.waitUntil via setWaitUntil() at request start,
|
|
4
|
+
* then any module can call runBackground() to keep promises alive
|
|
5
|
+
* after the Response is sent.
|
|
6
|
+
*/
|
|
7
|
+
export declare function setWaitUntil(fn: (p: Promise<unknown>) => void): void;
|
|
8
|
+
export declare function runBackground(p: Promise<unknown>): void;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Global background task holder for Cloudflare Workers.
|
|
3
|
+
* Worker sets ctx.waitUntil via setWaitUntil() at request start,
|
|
4
|
+
* then any module can call runBackground() to keep promises alive
|
|
5
|
+
* after the Response is sent.
|
|
6
|
+
*/
|
|
7
|
+
let _waitUntil = null;
|
|
8
|
+
export function setWaitUntil(fn) {
|
|
9
|
+
_waitUntil = fn;
|
|
10
|
+
}
|
|
11
|
+
export function runBackground(p) {
|
|
12
|
+
if (_waitUntil) {
|
|
13
|
+
_waitUntil(p);
|
|
14
|
+
}
|
|
15
|
+
// Always attach error handler to prevent unhandled rejection
|
|
16
|
+
p.then(() => { }, (err) => console.error("[bg]", err.message));
|
|
17
|
+
}
|
package/dist/config.d.ts
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Plan limits config for MCP server.
|
|
3
|
+
* Mirrors trace-web/src/config/plans.ts — keep in sync.
|
|
4
|
+
*
|
|
5
|
+
* TODO: When Stripe is integrated, fetch plan from DB per user.
|
|
6
|
+
* For now, all users get free plan limits.
|
|
7
|
+
*/
|
|
8
|
+
export interface PlanLimits {
|
|
9
|
+
traces: number;
|
|
10
|
+
tokens: number;
|
|
11
|
+
rateLimit: number;
|
|
12
|
+
maxExtractPerSummary: number;
|
|
13
|
+
auditLogDays: number;
|
|
14
|
+
conflictAutoSupersede: boolean;
|
|
15
|
+
}
|
|
16
|
+
export declare const PLAN_LIMITS: {
|
|
17
|
+
readonly free: {
|
|
18
|
+
readonly traces: 100;
|
|
19
|
+
readonly tokens: 5;
|
|
20
|
+
readonly rateLimit: 20;
|
|
21
|
+
readonly maxExtractPerSummary: 20;
|
|
22
|
+
readonly auditLogDays: 7;
|
|
23
|
+
readonly conflictAutoSupersede: false;
|
|
24
|
+
};
|
|
25
|
+
readonly pro: {
|
|
26
|
+
readonly traces: 1000;
|
|
27
|
+
readonly tokens: 10;
|
|
28
|
+
readonly rateLimit: 60;
|
|
29
|
+
readonly maxExtractPerSummary: 20;
|
|
30
|
+
readonly auditLogDays: 7;
|
|
31
|
+
readonly conflictAutoSupersede: false;
|
|
32
|
+
};
|
|
33
|
+
readonly team: {
|
|
34
|
+
readonly traces: 5000;
|
|
35
|
+
readonly tokens: 20;
|
|
36
|
+
readonly rateLimit: 120;
|
|
37
|
+
readonly maxExtractPerSummary: 20;
|
|
38
|
+
readonly auditLogDays: 365;
|
|
39
|
+
readonly conflictAutoSupersede: true;
|
|
40
|
+
};
|
|
41
|
+
};
|
|
42
|
+
/**
|
|
43
|
+
* Get limits for a user. Currently returns free plan for all users.
|
|
44
|
+
* When Stripe is integrated, this will look up the user's subscription.
|
|
45
|
+
*/
|
|
46
|
+
export declare function getUserLimits(_userId: string): PlanLimits;
|
package/dist/config.js
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Plan limits config for MCP server.
|
|
3
|
+
* Mirrors trace-web/src/config/plans.ts — keep in sync.
|
|
4
|
+
*
|
|
5
|
+
* TODO: When Stripe is integrated, fetch plan from DB per user.
|
|
6
|
+
* For now, all users get free plan limits.
|
|
7
|
+
*/
|
|
8
|
+
export const PLAN_LIMITS = {
|
|
9
|
+
free: { traces: 100, tokens: 5, rateLimit: 20, maxExtractPerSummary: 20, auditLogDays: 7, conflictAutoSupersede: false },
|
|
10
|
+
pro: { traces: 1000, tokens: 10, rateLimit: 60, maxExtractPerSummary: 20, auditLogDays: 7, conflictAutoSupersede: false },
|
|
11
|
+
team: { traces: 5000, tokens: 20, rateLimit: 120, maxExtractPerSummary: 20, auditLogDays: 365, conflictAutoSupersede: true },
|
|
12
|
+
};
|
|
13
|
+
/**
|
|
14
|
+
* Get limits for a user. Currently returns free plan for all users.
|
|
15
|
+
* When Stripe is integrated, this will look up the user's subscription.
|
|
16
|
+
*/
|
|
17
|
+
export function getUserLimits(_userId) {
|
|
18
|
+
// TODO: look up user plan from DB
|
|
19
|
+
return PLAN_LIMITS.free;
|
|
20
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conflict detection: find traces that contradict a newly created trace.
|
|
3
|
+
* Uses embedding similarity + LLM judgment.
|
|
4
|
+
*/
|
|
5
|
+
export interface ConflictResult {
|
|
6
|
+
existing_trace_id: string;
|
|
7
|
+
reason: string;
|
|
8
|
+
should_supersede: boolean;
|
|
9
|
+
}
|
|
10
|
+
export declare function detectConflicts(traceId: string, userId: string): Promise<ConflictResult[]>;
|
|
11
|
+
/**
|
|
12
|
+
* Auto-supersede conflicting traces and return warnings.
|
|
13
|
+
*/
|
|
14
|
+
export declare function handleConflicts(traceId: string, userId: string): Promise<string[]>;
|
package/dist/conflict.js
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conflict detection: find traces that contradict a newly created trace.
|
|
3
|
+
* Uses embedding similarity + LLM judgment.
|
|
4
|
+
*/
|
|
5
|
+
import { getSupabase } from "./supabase.js";
|
|
6
|
+
import { llmChat } from "./llm.js";
|
|
7
|
+
import { getUserLimits } from "./config.js";
|
|
8
|
+
const CONFLICT_PROMPT = `You are a knowledge conflict detector. Given a NEW trace and a list of EXISTING traces, determine if the new trace contradicts any existing ones.
|
|
9
|
+
|
|
10
|
+
A contradiction means the new trace says the OPPOSITE of an existing trace about the SAME topic. Examples:
|
|
11
|
+
- New: "Use Vite for bundling" vs Existing: "Use Webpack for bundling" → CONFLICT
|
|
12
|
+
- New: "API uses REST" vs Existing: "API uses GraphQL" → CONFLICT
|
|
13
|
+
- New: "Added pagination" vs Existing: "Added search" → NOT a conflict (different topics)
|
|
14
|
+
- New: "Password min 8 chars" vs Existing: "Password min 6 chars" → CONFLICT (supersedes)
|
|
15
|
+
|
|
16
|
+
Return JSON:
|
|
17
|
+
{
|
|
18
|
+
"conflicts": [
|
|
19
|
+
{
|
|
20
|
+
"existing_trace_id": "uuid",
|
|
21
|
+
"reason": "Why this is a contradiction (one sentence)",
|
|
22
|
+
"should_supersede": true
|
|
23
|
+
}
|
|
24
|
+
]
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
If no conflicts, return: { "conflicts": [] }
|
|
28
|
+
Be conservative — only flag clear contradictions, not mere differences.`;
|
|
29
|
+
export async function detectConflicts(traceId, userId) {
|
|
30
|
+
const supabase = getSupabase();
|
|
31
|
+
// 1. Get the new trace
|
|
32
|
+
const { data: trace } = await supabase
|
|
33
|
+
.from("traces")
|
|
34
|
+
.select("id, claim, reason, content, context, tags, embedding")
|
|
35
|
+
.eq("id", traceId)
|
|
36
|
+
.single();
|
|
37
|
+
if (!trace?.embedding)
|
|
38
|
+
return [];
|
|
39
|
+
// 2. Find similar traces (potential conflicts)
|
|
40
|
+
const { data: similar } = await supabase.rpc("search_traces", {
|
|
41
|
+
query_embedding: trace.embedding,
|
|
42
|
+
match_limit: 10,
|
|
43
|
+
match_threshold: 0.5,
|
|
44
|
+
filter_status: "active",
|
|
45
|
+
filter_tags: [],
|
|
46
|
+
});
|
|
47
|
+
// Filter to same author, exclude self
|
|
48
|
+
const candidates = (similar ?? [])
|
|
49
|
+
.filter((r) => r.author === userId && r.id !== traceId);
|
|
50
|
+
if (candidates.length === 0)
|
|
51
|
+
return [];
|
|
52
|
+
// 3. LLM judgment
|
|
53
|
+
const userMessage = JSON.stringify({
|
|
54
|
+
new_trace: {
|
|
55
|
+
id: trace.id,
|
|
56
|
+
claim: trace.content || trace.claim,
|
|
57
|
+
reason: trace.context || trace.reason,
|
|
58
|
+
},
|
|
59
|
+
existing_traces: candidates.slice(0, 5).map((t) => ({
|
|
60
|
+
id: t.id,
|
|
61
|
+
claim: t.claim,
|
|
62
|
+
reason: t.reason,
|
|
63
|
+
tags: t.tags,
|
|
64
|
+
})),
|
|
65
|
+
}, null, 2);
|
|
66
|
+
const content = await llmChat([
|
|
67
|
+
{ role: "system", content: CONFLICT_PROMPT },
|
|
68
|
+
{ role: "user", content: userMessage },
|
|
69
|
+
]);
|
|
70
|
+
try {
|
|
71
|
+
const result = JSON.parse(content);
|
|
72
|
+
return result.conflicts ?? [];
|
|
73
|
+
}
|
|
74
|
+
catch {
|
|
75
|
+
return [];
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* Auto-supersede conflicting traces and return warnings.
|
|
80
|
+
*/
|
|
81
|
+
export async function handleConflicts(traceId, userId) {
|
|
82
|
+
const conflicts = await detectConflicts(traceId, userId);
|
|
83
|
+
if (conflicts.length === 0)
|
|
84
|
+
return [];
|
|
85
|
+
const limits = getUserLimits(userId);
|
|
86
|
+
const supabase = getSupabase();
|
|
87
|
+
const warnings = [];
|
|
88
|
+
for (const conflict of conflicts) {
|
|
89
|
+
if (conflict.should_supersede && limits.conflictAutoSupersede) {
|
|
90
|
+
// Mark old trace as superseded (Team plan only)
|
|
91
|
+
await supabase
|
|
92
|
+
.from("traces")
|
|
93
|
+
.update({ status: "superseded" })
|
|
94
|
+
.eq("id", conflict.existing_trace_id)
|
|
95
|
+
.eq("author", userId);
|
|
96
|
+
warnings.push(`⚠ Superseded trace ${conflict.existing_trace_id}: ${conflict.reason}`);
|
|
97
|
+
}
|
|
98
|
+
else {
|
|
99
|
+
warnings.push(`⚠ Potential conflict with ${conflict.existing_trace_id}: ${conflict.reason}`);
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
return warnings;
|
|
103
|
+
}
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Embedding generation — supports Ollama (local) and OpenAI (cloud)
|
|
3
|
+
* Set OPENAI_API_KEY to use OpenAI, otherwise falls back to Ollama
|
|
4
|
+
*/
|
|
5
|
+
export declare function generateEmbedding(text: string): Promise<number[]>;
|
|
6
|
+
export declare function generateEmbeddings(texts: string[]): Promise<number[][]>;
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Embedding generation — supports Ollama (local) and OpenAI (cloud)
|
|
3
|
+
* Set OPENAI_API_KEY to use OpenAI, otherwise falls back to Ollama
|
|
4
|
+
*/
|
|
5
|
+
function getOllamaUrl() {
|
|
6
|
+
return process.env.OLLAMA_URL ?? "http://localhost:11434";
|
|
7
|
+
}
|
|
8
|
+
const OLLAMA_EMBED_MODEL = "nomic-embed-text";
|
|
9
|
+
function getOpenAIKey() {
|
|
10
|
+
return process.env.OPENAI_API_KEY;
|
|
11
|
+
}
|
|
12
|
+
function getOpenAIEmbedModel() {
|
|
13
|
+
return process.env.OPENAI_EMBED_MODEL ?? "text-embedding-3-small";
|
|
14
|
+
}
|
|
15
|
+
async function ollamaEmbedding(text) {
|
|
16
|
+
const response = await fetch(`${getOllamaUrl()}/api/embed`, {
|
|
17
|
+
method: "POST",
|
|
18
|
+
headers: { "Content-Type": "application/json" },
|
|
19
|
+
body: JSON.stringify({ model: OLLAMA_EMBED_MODEL, input: text }),
|
|
20
|
+
});
|
|
21
|
+
if (!response.ok) {
|
|
22
|
+
throw new Error(`Ollama embedding failed (${response.status}): ${await response.text()}. ` +
|
|
23
|
+
`請確認 Ollama 正在運行且已安裝 ${OLLAMA_EMBED_MODEL} 模型。`);
|
|
24
|
+
}
|
|
25
|
+
const data = await response.json();
|
|
26
|
+
return data.embeddings[0];
|
|
27
|
+
}
|
|
28
|
+
async function openaiEmbedding(text) {
|
|
29
|
+
const response = await fetch("https://api.openai.com/v1/embeddings", {
|
|
30
|
+
method: "POST",
|
|
31
|
+
headers: {
|
|
32
|
+
"Content-Type": "application/json",
|
|
33
|
+
"Authorization": `Bearer ${getOpenAIKey()}`,
|
|
34
|
+
},
|
|
35
|
+
body: JSON.stringify({
|
|
36
|
+
model: getOpenAIEmbedModel(),
|
|
37
|
+
input: text,
|
|
38
|
+
}),
|
|
39
|
+
});
|
|
40
|
+
if (!response.ok) {
|
|
41
|
+
throw new Error(`OpenAI embedding failed (${response.status}): ${await response.text()}`);
|
|
42
|
+
}
|
|
43
|
+
const data = await response.json();
|
|
44
|
+
return data.data[0].embedding;
|
|
45
|
+
}
|
|
46
|
+
export async function generateEmbedding(text) {
|
|
47
|
+
if (getOpenAIKey()) {
|
|
48
|
+
return openaiEmbedding(text);
|
|
49
|
+
}
|
|
50
|
+
return ollamaEmbedding(text);
|
|
51
|
+
}
|
|
52
|
+
async function openaiEmbeddingBatch(texts) {
|
|
53
|
+
const response = await fetch("https://api.openai.com/v1/embeddings", {
|
|
54
|
+
method: "POST",
|
|
55
|
+
headers: {
|
|
56
|
+
"Content-Type": "application/json",
|
|
57
|
+
"Authorization": `Bearer ${getOpenAIKey()}`,
|
|
58
|
+
},
|
|
59
|
+
body: JSON.stringify({ model: getOpenAIEmbedModel(), input: texts }),
|
|
60
|
+
});
|
|
61
|
+
if (!response.ok) {
|
|
62
|
+
throw new Error(`OpenAI embedding batch failed (${response.status}): ${await response.text()}`);
|
|
63
|
+
}
|
|
64
|
+
const data = await response.json();
|
|
65
|
+
return data.data.sort((a, b) => a.index - b.index).map(d => d.embedding);
|
|
66
|
+
}
|
|
67
|
+
export async function generateEmbeddings(texts) {
|
|
68
|
+
if (texts.length === 0)
|
|
69
|
+
return [];
|
|
70
|
+
if (getOpenAIKey()) {
|
|
71
|
+
return openaiEmbeddingBatch(texts);
|
|
72
|
+
}
|
|
73
|
+
return Promise.all(texts.map(t => ollamaEmbedding(t)));
|
|
74
|
+
}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
3
|
+
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
|
|
4
|
+
import { createMcpExpressApp } from "@modelcontextprotocol/sdk/server/express.js";
|
|
5
|
+
import { registerCreate } from "./tools/create.js";
|
|
6
|
+
import { registerSearch } from "./tools/search.js";
|
|
7
|
+
import { registerChain } from "./tools/chain.js";
|
|
8
|
+
import { registerActive } from "./tools/active.js";
|
|
9
|
+
import { registerUpdate } from "./tools/update.js";
|
|
10
|
+
import { registerExtract } from "./tools/extract.js";
|
|
11
|
+
import { registerContext } from "./tools/context.js";
|
|
12
|
+
import { registerSummary } from "./tools/summary.js";
|
|
13
|
+
import { randomUUID, createHash } from "node:crypto";
|
|
14
|
+
import { getSupabase } from "./supabase.js";
|
|
15
|
+
async function resolveUserIdFromToken() {
|
|
16
|
+
const token = process.env.TRAPIC_TOKEN;
|
|
17
|
+
if (!token?.startsWith("tr_"))
|
|
18
|
+
return null;
|
|
19
|
+
const tokenHash = createHash("sha256").update(token).digest("hex");
|
|
20
|
+
const supabase = getSupabase();
|
|
21
|
+
const { data } = await supabase
|
|
22
|
+
.from("trace_api_tokens")
|
|
23
|
+
.select("user_id")
|
|
24
|
+
.eq("token_hash", tokenHash)
|
|
25
|
+
.is("revoked_at", null)
|
|
26
|
+
.single();
|
|
27
|
+
return data?.user_id ?? null;
|
|
28
|
+
}
|
|
29
|
+
const userId = await resolveUserIdFromToken();
|
|
30
|
+
const server = new McpServer({
|
|
31
|
+
name: "trapic-mcp",
|
|
32
|
+
version: "0.1.0",
|
|
33
|
+
});
|
|
34
|
+
// Register all tools
|
|
35
|
+
registerCreate(server, userId);
|
|
36
|
+
registerSearch(server, userId);
|
|
37
|
+
registerChain(server, userId); // trace.get_chain + trace.get_effects
|
|
38
|
+
registerActive(server, userId);
|
|
39
|
+
registerUpdate(server, userId);
|
|
40
|
+
registerExtract(server);
|
|
41
|
+
registerContext(server, userId); // trace.contextualize + trace.get_context + trace.find_contexts
|
|
42
|
+
registerSummary(server, userId);
|
|
43
|
+
const isHttpMode = process.env.TRANSPORT === "http" || process.argv.includes("--http");
|
|
44
|
+
if (isHttpMode) {
|
|
45
|
+
const app = createMcpExpressApp({ host: "0.0.0.0" });
|
|
46
|
+
const transports = new Map();
|
|
47
|
+
app.post("/mcp", async (req, res) => {
|
|
48
|
+
const sessionId = req.headers["mcp-session-id"];
|
|
49
|
+
let transport;
|
|
50
|
+
if (sessionId && transports.has(sessionId)) {
|
|
51
|
+
transport = transports.get(sessionId);
|
|
52
|
+
}
|
|
53
|
+
else {
|
|
54
|
+
transport = new StreamableHTTPServerTransport({
|
|
55
|
+
sessionIdGenerator: () => randomUUID(),
|
|
56
|
+
});
|
|
57
|
+
transport.onclose = () => {
|
|
58
|
+
const sid = transport.sessionId;
|
|
59
|
+
if (sid)
|
|
60
|
+
transports.delete(sid);
|
|
61
|
+
};
|
|
62
|
+
await server.connect(transport);
|
|
63
|
+
// Capture session ID after response is sent
|
|
64
|
+
res.on("finish", () => {
|
|
65
|
+
const sid = res.getHeader("mcp-session-id");
|
|
66
|
+
if (sid && !transports.has(sid)) {
|
|
67
|
+
transports.set(sid, transport);
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
await transport.handleRequest(req, res, req.body);
|
|
72
|
+
});
|
|
73
|
+
app.get("/mcp", async (req, res) => {
|
|
74
|
+
const sessionId = req.headers["mcp-session-id"];
|
|
75
|
+
if (!sessionId || !transports.has(sessionId)) {
|
|
76
|
+
res.status(400).json({ error: "Missing or invalid session ID" });
|
|
77
|
+
return;
|
|
78
|
+
}
|
|
79
|
+
const transport = transports.get(sessionId);
|
|
80
|
+
await transport.handleRequest(req, res);
|
|
81
|
+
});
|
|
82
|
+
app.delete("/mcp", async (req, res) => {
|
|
83
|
+
const sessionId = req.headers["mcp-session-id"];
|
|
84
|
+
if (sessionId && transports.has(sessionId)) {
|
|
85
|
+
const transport = transports.get(sessionId);
|
|
86
|
+
await transport.close();
|
|
87
|
+
transports.delete(sessionId);
|
|
88
|
+
}
|
|
89
|
+
res.status(200).end();
|
|
90
|
+
});
|
|
91
|
+
// Health check
|
|
92
|
+
app.get("/health", (_req, res) => {
|
|
93
|
+
res.json({ status: "ok", server: "trapic-mcp", version: "0.1.0" });
|
|
94
|
+
});
|
|
95
|
+
const port = parseInt(process.env.PORT || "3100", 10);
|
|
96
|
+
app.listen(port, "0.0.0.0", () => {
|
|
97
|
+
console.log(`trapic-mcp HTTP server running on http://0.0.0.0:${port}/mcp`);
|
|
98
|
+
});
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
// Default: stdio transport (local usage)
|
|
102
|
+
const transport = new StdioServerTransport();
|
|
103
|
+
await server.connect(transport);
|
|
104
|
+
}
|
package/dist/llm.d.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM chat completion — Groq (primary) → OpenAI (fallback)
|
|
3
|
+
* Both use OpenAI-compatible API format
|
|
4
|
+
*/
|
|
5
|
+
interface ChatMessage {
|
|
6
|
+
role: "system" | "user" | "assistant";
|
|
7
|
+
content: string;
|
|
8
|
+
}
|
|
9
|
+
export declare function llmChat(messages: ChatMessage[]): Promise<string>;
|
|
10
|
+
export {};
|
package/dist/llm.js
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM chat completion — Groq (primary) → OpenAI (fallback)
|
|
3
|
+
* Both use OpenAI-compatible API format
|
|
4
|
+
*/
|
|
5
|
+
function getGroqKey() {
|
|
6
|
+
return process.env.GROQ_API_KEY;
|
|
7
|
+
}
|
|
8
|
+
function getGroqModel() {
|
|
9
|
+
return process.env.GROQ_CHAT_MODEL ?? "llama-3.1-8b-instant";
|
|
10
|
+
}
|
|
11
|
+
function getOpenAIKey() {
|
|
12
|
+
return process.env.OPENAI_API_KEY;
|
|
13
|
+
}
|
|
14
|
+
function getOpenAIModel() {
|
|
15
|
+
return process.env.OPENAI_CHAT_MODEL ?? "gpt-4o-mini";
|
|
16
|
+
}
|
|
17
|
+
async function chatCompletion(baseUrl, apiKey, model, messages) {
|
|
18
|
+
const response = await fetch(`${baseUrl}/chat/completions`, {
|
|
19
|
+
method: "POST",
|
|
20
|
+
headers: {
|
|
21
|
+
"Content-Type": "application/json",
|
|
22
|
+
Authorization: `Bearer ${apiKey}`,
|
|
23
|
+
},
|
|
24
|
+
body: JSON.stringify({
|
|
25
|
+
model,
|
|
26
|
+
messages,
|
|
27
|
+
temperature: 0.2,
|
|
28
|
+
response_format: { type: "json_object" },
|
|
29
|
+
}),
|
|
30
|
+
});
|
|
31
|
+
if (!response.ok) {
|
|
32
|
+
throw new Error(`LLM chat failed (${response.status}): ${await response.text()}`);
|
|
33
|
+
}
|
|
34
|
+
const data = (await response.json());
|
|
35
|
+
return data.choices[0].message.content;
|
|
36
|
+
}
|
|
37
|
+
export async function llmChat(messages) {
|
|
38
|
+
const groqKey = getGroqKey();
|
|
39
|
+
if (groqKey) {
|
|
40
|
+
return chatCompletion("https://api.groq.com/openai/v1", groqKey, getGroqModel(), messages);
|
|
41
|
+
}
|
|
42
|
+
const openaiKey = getOpenAIKey();
|
|
43
|
+
if (openaiKey) {
|
|
44
|
+
return chatCompletion("https://api.openai.com/v1", openaiKey, getOpenAIModel(), messages);
|
|
45
|
+
}
|
|
46
|
+
throw new Error("No LLM API key configured. Set GROQ_API_KEY or OPENAI_API_KEY.");
|
|
47
|
+
}
|
package/dist/ollama.d.ts
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM chat completion — supports Ollama (local) and OpenAI (cloud)
|
|
3
|
+
* Set OPENAI_API_KEY to use OpenAI, otherwise falls back to Ollama
|
|
4
|
+
*/
|
|
5
|
+
interface ChatMessage {
|
|
6
|
+
role: "system" | "user" | "assistant";
|
|
7
|
+
content: string;
|
|
8
|
+
}
|
|
9
|
+
export declare function ollamaChat(messages: ChatMessage[]): Promise<string>;
|
|
10
|
+
export {};
|
|
11
|
+
//# sourceMappingURL=ollama.d.ts.map
|