@duckmind/deepquark-darwin-arm64 0.9.83 → 0.9.90
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.deepquark/skills/bundled/knowledge-graph/SKILL.md +385 -0
- package/.deepquark/skills/bundled/knowledge-graph/STANDARDS.md +461 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/cli.ts +588 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/config.ts +630 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/connection-profile.ts +629 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/container.ts +756 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/mcp-client.ts +1310 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/output-formatter.ts +997 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/token-metrics.ts +335 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/transformation-log.ts +137 -0
- package/.deepquark/skills/bundled/knowledge-graph/lib/wrapper-config.ts +113 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/.env.example +129 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/compare-embeddings.ts +175 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/config-falkordb.yaml +108 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/config-neo4j.yaml +111 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/diagnose.ts +483 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-falkordb-dev.yml +146 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-falkordb.yml +151 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-neo4j-dev-local.yml +161 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-neo4j-dev.yml +161 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-neo4j.yml +169 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-production.yml +128 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose-test.yml +10 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/docker-compose.yml +84 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/entrypoint.sh +40 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/install.ts +2054 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/podman-compose-falkordb.yml +78 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/podman-compose-neo4j.yml +88 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/podman-compose.yml +83 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-all-llms-mcp.ts +387 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-embedding-models.ts +201 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-embedding-providers.ts +641 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-graphiti-model.ts +217 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-grok-correct.ts +141 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-grok-llms-mcp.ts +386 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-grok-models.ts +173 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-llama-extraction.ts +188 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-mcp-final.ts +240 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-mcp-live.ts +187 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-mcp-session.ts +127 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-model-combinations.ts +316 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-ollama-models.ts +228 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-openrouter-models.ts +460 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-real-life-mcp.ts +311 -0
- package/.deepquark/skills/bundled/knowledge-graph/server/test-search-debug.ts +199 -0
- package/.deepquark/skills/bundled/knowledge-graph/tools/Install.md +104 -0
- package/.deepquark/skills/bundled/knowledge-graph/tools/README.md +120 -0
- package/.deepquark/skills/bundled/knowledge-graph/tools/knowledge-cli.ts +996 -0
- package/.deepquark/skills/bundled/knowledge-graph/tools/server-cli.ts +531 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/BulkImport.md +514 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/CaptureEpisode.md +242 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/ClearGraph.md +392 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/GetRecent.md +352 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/GetStatus.md +373 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/HealthReport.md +212 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/InvestigateEntity.md +142 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/OntologyManagement.md +201 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/RunMaintenance.md +302 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/SearchByDate.md +255 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/SearchFacts.md +382 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/SearchKnowledge.md +374 -0
- package/.deepquark/skills/bundled/knowledge-graph/workflows/StixImport.md +212 -0
- package/bin/deepquark +0 -0
- package/package.json +1 -1
- package/.deepquark/skills/bundled/ge-payroll/SKILL.md +0 -153
- package/.deepquark/skills/bundled/ge-payroll/evals/evals.json +0 -23
- package/.deepquark/skills/bundled/ge-payroll/references/pain-points-improvements.md +0 -106
- package/.deepquark/skills/bundled/ge-payroll/references/process-detail.md +0 -217
- package/.deepquark/skills/bundled/ge-payroll/references/raci-stakeholders.md +0 -85
- package/.deepquark/skills/bundled/ge-payroll/references/timeline-mandays.md +0 -64
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Token Metrics for response size measurement
|
|
3
|
+
* @module token-metrics
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { appendFile, mkdir, readFile } from 'node:fs/promises';
|
|
7
|
+
import { dirname } from 'node:path';
|
|
8
|
+
|
|
9
|
+
// ============================================================================
|
|
10
|
+
// Types
|
|
11
|
+
// ============================================================================
|
|
12
|
+
|
|
13
|
+
export interface TokenMetrics {
|
|
14
|
+
operation: string;
|
|
15
|
+
timestamp: Date;
|
|
16
|
+
rawBytes: number;
|
|
17
|
+
compactBytes: number;
|
|
18
|
+
savingsPercent: number;
|
|
19
|
+
estimatedTokensBefore: number;
|
|
20
|
+
estimatedTokensAfter: number;
|
|
21
|
+
processingTimeMs: number;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
export interface AggregateStats {
|
|
25
|
+
count: number;
|
|
26
|
+
avgSavingsPercent: number;
|
|
27
|
+
medianSavingsPercent: number;
|
|
28
|
+
minSavingsPercent: number;
|
|
29
|
+
maxSavingsPercent: number;
|
|
30
|
+
totalBytesBeforeTransform: number;
|
|
31
|
+
totalBytesAfterTransform: number;
|
|
32
|
+
avgProcessingTimeMs: number;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export interface BenchmarkReport {
|
|
36
|
+
generatedAt: Date;
|
|
37
|
+
totalMeasurements: number;
|
|
38
|
+
overall: AggregateStats;
|
|
39
|
+
byOperation: Map<string, AggregateStats>;
|
|
40
|
+
underperformingOperations: Array<{
|
|
41
|
+
operation: string;
|
|
42
|
+
avgSavingsPercent: number;
|
|
43
|
+
target: number;
|
|
44
|
+
}>;
|
|
45
|
+
verdict: 'PASS' | 'FAIL';
|
|
46
|
+
summary: string;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// ============================================================================
|
|
50
|
+
// Constants (T045)
|
|
51
|
+
// ============================================================================
|
|
52
|
+
|
|
53
|
+
export const TOKEN_SAVINGS_TARGETS: Record<string, number> = {
|
|
54
|
+
add_memory: 25,
|
|
55
|
+
search_nodes: 30,
|
|
56
|
+
search_memory_nodes: 30,
|
|
57
|
+
search_facts: 30,
|
|
58
|
+
search_memory_facts: 30,
|
|
59
|
+
get_episodes: 25,
|
|
60
|
+
get_status: 25,
|
|
61
|
+
clear_graph: 25,
|
|
62
|
+
health: 25,
|
|
63
|
+
delete_episode: 25,
|
|
64
|
+
delete_entity_edge: 25,
|
|
65
|
+
};
|
|
66
|
+
|
|
67
|
+
// Default target for unknown operations
|
|
68
|
+
const DEFAULT_TARGET = 25;
|
|
69
|
+
|
|
70
|
+
// ============================================================================
|
|
71
|
+
// Core Functions (T036-T037)
|
|
72
|
+
// ============================================================================
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Estimate token count from text using chars/4 formula.
|
|
76
|
+
* This is an approximation; actual tokenization varies by model.
|
|
77
|
+
* T037
|
|
78
|
+
*/
|
|
79
|
+
export function estimateTokens(text: string): number {
|
|
80
|
+
if (!text) return 0;
|
|
81
|
+
return Math.ceil(text.length / 4);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Calculate token metrics for a transformation.
|
|
86
|
+
* T036
|
|
87
|
+
*/
|
|
88
|
+
export function measureTokens(
|
|
89
|
+
rawData: unknown,
|
|
90
|
+
compactOutput: string,
|
|
91
|
+
operation: string,
|
|
92
|
+
processingTimeMs: number
|
|
93
|
+
): TokenMetrics {
|
|
94
|
+
const rawJson = JSON.stringify(rawData, null, 2);
|
|
95
|
+
const rawBytes = new TextEncoder().encode(rawJson).length;
|
|
96
|
+
const compactBytes = new TextEncoder().encode(compactOutput).length;
|
|
97
|
+
|
|
98
|
+
const savingsPercent = rawBytes > 0 ? ((rawBytes - compactBytes) / rawBytes) * 100 : 0;
|
|
99
|
+
|
|
100
|
+
return {
|
|
101
|
+
operation,
|
|
102
|
+
timestamp: new Date(),
|
|
103
|
+
rawBytes,
|
|
104
|
+
compactBytes,
|
|
105
|
+
savingsPercent,
|
|
106
|
+
estimatedTokensBefore: estimateTokens(rawJson),
|
|
107
|
+
estimatedTokensAfter: estimateTokens(compactOutput),
|
|
108
|
+
processingTimeMs,
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
// ============================================================================
|
|
113
|
+
// Formatting Functions (T038)
|
|
114
|
+
// ============================================================================
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Generate human-readable metrics summary.
|
|
118
|
+
* T038
|
|
119
|
+
*/
|
|
120
|
+
export function formatMetricsReport(metrics: TokenMetrics): string {
|
|
121
|
+
const tokensSaved = metrics.estimatedTokensBefore - metrics.estimatedTokensAfter;
|
|
122
|
+
|
|
123
|
+
const lines = [
|
|
124
|
+
'--- Token Metrics ---',
|
|
125
|
+
`Operation: ${metrics.operation}`,
|
|
126
|
+
`Raw size: ${metrics.rawBytes.toLocaleString()} bytes (${metrics.estimatedTokensBefore} est. tokens)`,
|
|
127
|
+
`Compact size: ${metrics.compactBytes.toLocaleString()} bytes (${metrics.estimatedTokensAfter} est. tokens)`,
|
|
128
|
+
`Savings: ${metrics.savingsPercent.toFixed(1)}% (${tokensSaved} tokens saved)`,
|
|
129
|
+
`Processing time: ${metrics.processingTimeMs}ms`,
|
|
130
|
+
];
|
|
131
|
+
|
|
132
|
+
return lines.join('\n');
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// ============================================================================
|
|
136
|
+
// Persistence Functions (T039)
|
|
137
|
+
// ============================================================================
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Append metrics to JSONL file for later analysis.
|
|
141
|
+
* T039
|
|
142
|
+
*/
|
|
143
|
+
export async function appendMetrics(metrics: TokenMetrics, filePath: string): Promise<void> {
|
|
144
|
+
// Ensure directory exists
|
|
145
|
+
await mkdir(dirname(filePath), { recursive: true });
|
|
146
|
+
|
|
147
|
+
// Serialize metrics to JSON line
|
|
148
|
+
const line = `${JSON.stringify({
|
|
149
|
+
...metrics,
|
|
150
|
+
timestamp: metrics.timestamp.toISOString(),
|
|
151
|
+
})}\n`;
|
|
152
|
+
|
|
153
|
+
await appendFile(filePath, line, 'utf-8');
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* Load metrics from JSONL file for analysis.
|
|
158
|
+
* T039
|
|
159
|
+
*/
|
|
160
|
+
export async function loadMetrics(filePath: string): Promise<TokenMetrics[]> {
|
|
161
|
+
try {
|
|
162
|
+
const content = await readFile(filePath, 'utf-8');
|
|
163
|
+
const lines = content.trim().split('\n').filter(Boolean);
|
|
164
|
+
|
|
165
|
+
return lines
|
|
166
|
+
.map((line) => {
|
|
167
|
+
try {
|
|
168
|
+
const parsed = JSON.parse(line);
|
|
169
|
+
return {
|
|
170
|
+
...parsed,
|
|
171
|
+
timestamp: new Date(parsed.timestamp),
|
|
172
|
+
};
|
|
173
|
+
} catch {
|
|
174
|
+
return null;
|
|
175
|
+
}
|
|
176
|
+
})
|
|
177
|
+
.filter((m): m is TokenMetrics => m !== null);
|
|
178
|
+
} catch (_error) {
|
|
179
|
+
// File doesn't exist or is unreadable
|
|
180
|
+
return [];
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// ============================================================================
|
|
185
|
+
// Analysis Functions (T040-T041)
|
|
186
|
+
// ============================================================================
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* Calculate aggregate statistics for a single group of metrics.
|
|
190
|
+
*/
|
|
191
|
+
function calculateStats(metrics: TokenMetrics[]): AggregateStats {
|
|
192
|
+
if (metrics.length === 0) {
|
|
193
|
+
return {
|
|
194
|
+
count: 0,
|
|
195
|
+
avgSavingsPercent: 0,
|
|
196
|
+
medianSavingsPercent: 0,
|
|
197
|
+
minSavingsPercent: 0,
|
|
198
|
+
maxSavingsPercent: 0,
|
|
199
|
+
totalBytesBeforeTransform: 0,
|
|
200
|
+
totalBytesAfterTransform: 0,
|
|
201
|
+
avgProcessingTimeMs: 0,
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
const savingsValues = metrics.map((m) => m.savingsPercent).sort((a, b) => a - b);
|
|
206
|
+
const processingTimes = metrics.map((m) => m.processingTimeMs);
|
|
207
|
+
|
|
208
|
+
const count = metrics.length;
|
|
209
|
+
const avgSavingsPercent = savingsValues.reduce((a, b) => a + b, 0) / count;
|
|
210
|
+
const minSavingsPercent = savingsValues[0];
|
|
211
|
+
const maxSavingsPercent = savingsValues[count - 1];
|
|
212
|
+
|
|
213
|
+
// Calculate median
|
|
214
|
+
const mid = Math.floor(count / 2);
|
|
215
|
+
const medianSavingsPercent =
|
|
216
|
+
count % 2 === 0 ? (savingsValues[mid - 1] + savingsValues[mid]) / 2 : savingsValues[mid];
|
|
217
|
+
|
|
218
|
+
const totalBytesBeforeTransform = metrics.reduce((sum, m) => sum + m.rawBytes, 0);
|
|
219
|
+
const totalBytesAfterTransform = metrics.reduce((sum, m) => sum + m.compactBytes, 0);
|
|
220
|
+
const avgProcessingTimeMs = processingTimes.reduce((a, b) => a + b, 0) / count;
|
|
221
|
+
|
|
222
|
+
return {
|
|
223
|
+
count,
|
|
224
|
+
avgSavingsPercent,
|
|
225
|
+
medianSavingsPercent,
|
|
226
|
+
minSavingsPercent,
|
|
227
|
+
maxSavingsPercent,
|
|
228
|
+
totalBytesBeforeTransform,
|
|
229
|
+
totalBytesAfterTransform,
|
|
230
|
+
avgProcessingTimeMs,
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
/**
|
|
235
|
+
* Calculate aggregate statistics from multiple measurements.
|
|
236
|
+
* T040
|
|
237
|
+
*/
|
|
238
|
+
export function aggregateMetrics(
|
|
239
|
+
metrics: TokenMetrics[],
|
|
240
|
+
groupBy?: 'operation' | 'day'
|
|
241
|
+
): AggregateStats | Map<string, AggregateStats> {
|
|
242
|
+
if (!groupBy) {
|
|
243
|
+
return calculateStats(metrics);
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
const groups = new Map<string, TokenMetrics[]>();
|
|
247
|
+
|
|
248
|
+
for (const metric of metrics) {
|
|
249
|
+
let key: string;
|
|
250
|
+
if (groupBy === 'operation') {
|
|
251
|
+
key = metric.operation;
|
|
252
|
+
} else {
|
|
253
|
+
// Group by day
|
|
254
|
+
key = metric.timestamp.toISOString().split('T')[0];
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
const existing = groups.get(key) || [];
|
|
258
|
+
existing.push(metric);
|
|
259
|
+
groups.set(key, existing);
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
const result = new Map<string, AggregateStats>();
|
|
263
|
+
for (const [key, groupMetrics] of groups) {
|
|
264
|
+
result.set(key, calculateStats(groupMetrics));
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
return result;
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
/**
|
|
271
|
+
* Generate comprehensive benchmark report for validation.
|
|
272
|
+
* T041
|
|
273
|
+
*/
|
|
274
|
+
export function generateBenchmarkReport(metrics: TokenMetrics[]): BenchmarkReport {
|
|
275
|
+
const generatedAt = new Date();
|
|
276
|
+
const totalMeasurements = metrics.length;
|
|
277
|
+
const overall = calculateStats(metrics);
|
|
278
|
+
const byOperation = aggregateMetrics(metrics, 'operation') as Map<string, AggregateStats>;
|
|
279
|
+
|
|
280
|
+
// Find underperforming operations
|
|
281
|
+
const underperformingOperations: BenchmarkReport['underperformingOperations'] = [];
|
|
282
|
+
|
|
283
|
+
for (const [operation, stats] of byOperation) {
|
|
284
|
+
const target = TOKEN_SAVINGS_TARGETS[operation] ?? DEFAULT_TARGET;
|
|
285
|
+
if (stats.avgSavingsPercent < target) {
|
|
286
|
+
underperformingOperations.push({
|
|
287
|
+
operation,
|
|
288
|
+
avgSavingsPercent: stats.avgSavingsPercent,
|
|
289
|
+
target,
|
|
290
|
+
});
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
const verdict: 'PASS' | 'FAIL' = underperformingOperations.length === 0 ? 'PASS' : 'FAIL';
|
|
295
|
+
|
|
296
|
+
// Generate summary
|
|
297
|
+
const summaryLines = [
|
|
298
|
+
'Token Savings Benchmark Report',
|
|
299
|
+
'==============================',
|
|
300
|
+
`Total measurements: ${totalMeasurements}`,
|
|
301
|
+
`Overall average savings: ${overall.avgSavingsPercent.toFixed(1)}%`,
|
|
302
|
+
'',
|
|
303
|
+
'By Operation:',
|
|
304
|
+
];
|
|
305
|
+
|
|
306
|
+
for (const [operation, stats] of byOperation) {
|
|
307
|
+
const target = TOKEN_SAVINGS_TARGETS[operation] ?? DEFAULT_TARGET;
|
|
308
|
+
const status = stats.avgSavingsPercent >= target ? '✅' : '❌';
|
|
309
|
+
summaryLines.push(
|
|
310
|
+
` ${operation}: ${stats.avgSavingsPercent.toFixed(1)}% avg (target: ${target}%) ${status}`
|
|
311
|
+
);
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
summaryLines.push('');
|
|
315
|
+
if (verdict === 'PASS') {
|
|
316
|
+
summaryLines.push('Verdict: PASS - All operations exceed target savings');
|
|
317
|
+
} else {
|
|
318
|
+
summaryLines.push(
|
|
319
|
+
`Verdict: FAIL - ${underperformingOperations.length} operation(s) below target`
|
|
320
|
+
);
|
|
321
|
+
for (const { operation, avgSavingsPercent, target } of underperformingOperations) {
|
|
322
|
+
summaryLines.push(` - ${operation}: ${avgSavingsPercent.toFixed(1)}% (target: ${target}%)`);
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
return {
|
|
327
|
+
generatedAt,
|
|
328
|
+
totalMeasurements,
|
|
329
|
+
overall,
|
|
330
|
+
byOperation,
|
|
331
|
+
underperformingOperations,
|
|
332
|
+
verdict,
|
|
333
|
+
summary: summaryLines.join('\n'),
|
|
334
|
+
};
|
|
335
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Transformation logging for MCP wrapper
|
|
3
|
+
* Logs transformation failures and slow operations
|
|
4
|
+
* @module transformation-log
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { appendFile, mkdir } from 'node:fs/promises';
|
|
8
|
+
import { dirname } from 'node:path';
|
|
9
|
+
import { homedir } from 'node:os';
|
|
10
|
+
|
|
11
|
+
export interface TransformationLog {
|
|
12
|
+
/** Log entry ID */
|
|
13
|
+
id: string;
|
|
14
|
+
/** Timestamp */
|
|
15
|
+
timestamp: Date;
|
|
16
|
+
/** Severity level */
|
|
17
|
+
level: 'info' | 'warn' | 'error';
|
|
18
|
+
/** MCP operation name */
|
|
19
|
+
operation: string;
|
|
20
|
+
/** Input data size in bytes */
|
|
21
|
+
inputSize: number;
|
|
22
|
+
/** Error message if failure */
|
|
23
|
+
error?: string;
|
|
24
|
+
/** Processing time (for slow warnings) */
|
|
25
|
+
processingTimeMs?: number;
|
|
26
|
+
/** Whether fallback was used */
|
|
27
|
+
usedFallback: boolean;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
/** Default log file path */
|
|
31
|
+
export const DEFAULT_LOG_PATH = `${homedir()}/.madeinoz-knowledge/wrapper.log`;
|
|
32
|
+
|
|
33
|
+
/** Performance thresholds */
|
|
34
|
+
export const SLOW_THRESHOLD_MS = 50;
|
|
35
|
+
export const TIMEOUT_THRESHOLD_MS = 100;
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Generate a simple UUID v4
|
|
39
|
+
*/
|
|
40
|
+
function generateId(): string {
|
|
41
|
+
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
|
|
42
|
+
const r = (Math.random() * 16) | 0;
|
|
43
|
+
const v = c === 'x' ? r : (r & 0x3) | 0x8;
|
|
44
|
+
return v.toString(16);
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Format a log entry for file output
|
|
50
|
+
*/
|
|
51
|
+
function formatLogEntry(log: TransformationLog): string {
|
|
52
|
+
const timestamp = log.timestamp.toISOString();
|
|
53
|
+
const level = log.level.toUpperCase().padEnd(5);
|
|
54
|
+
const operation = log.operation.padEnd(15);
|
|
55
|
+
const fallback = log.usedFallback ? '[FALLBACK]' : '';
|
|
56
|
+
const time = log.processingTimeMs ? `${log.processingTimeMs}ms` : '';
|
|
57
|
+
const error = log.error ? `: ${log.error}` : '';
|
|
58
|
+
|
|
59
|
+
return `${timestamp} ${level} ${operation} ${fallback} ${time}${error}`.trim();
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Log a transformation event to file
|
|
64
|
+
*/
|
|
65
|
+
export async function logTransformation(
|
|
66
|
+
log: Omit<TransformationLog, 'id' | 'timestamp'>
|
|
67
|
+
): Promise<void> {
|
|
68
|
+
const entry: TransformationLog = {
|
|
69
|
+
...log,
|
|
70
|
+
id: generateId(),
|
|
71
|
+
timestamp: new Date(),
|
|
72
|
+
};
|
|
73
|
+
|
|
74
|
+
const logPath = process.env.MADEINOZ_WRAPPER_LOG_FILE || DEFAULT_LOG_PATH;
|
|
75
|
+
|
|
76
|
+
try {
|
|
77
|
+
// Ensure directory exists
|
|
78
|
+
await mkdir(dirname(logPath), { recursive: true });
|
|
79
|
+
|
|
80
|
+
// Append log entry
|
|
81
|
+
const line = `${formatLogEntry(entry)}\n`;
|
|
82
|
+
await appendFile(logPath, line, 'utf-8');
|
|
83
|
+
} catch (error) {
|
|
84
|
+
// Silently fail - logging should not break the wrapper
|
|
85
|
+
console.error('[transformation-log] Failed to write log:', error);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Log a transformation failure (convenience function)
|
|
91
|
+
*/
|
|
92
|
+
export async function logTransformationFailure(
|
|
93
|
+
operation: string,
|
|
94
|
+
inputSize: number,
|
|
95
|
+
error: string,
|
|
96
|
+
processingTimeMs?: number
|
|
97
|
+
): Promise<void> {
|
|
98
|
+
await logTransformation({
|
|
99
|
+
level: 'error',
|
|
100
|
+
operation,
|
|
101
|
+
inputSize,
|
|
102
|
+
error,
|
|
103
|
+
processingTimeMs,
|
|
104
|
+
usedFallback: true,
|
|
105
|
+
});
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Log a slow transformation warning (convenience function)
|
|
110
|
+
*/
|
|
111
|
+
export async function logSlowTransformation(
|
|
112
|
+
operation: string,
|
|
113
|
+
inputSize: number,
|
|
114
|
+
processingTimeMs: number
|
|
115
|
+
): Promise<void> {
|
|
116
|
+
await logTransformation({
|
|
117
|
+
level: 'warn',
|
|
118
|
+
operation,
|
|
119
|
+
inputSize,
|
|
120
|
+
processingTimeMs,
|
|
121
|
+
usedFallback: false,
|
|
122
|
+
});
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* Log successful transformation (info level, optional)
|
|
127
|
+
*/
|
|
128
|
+
export async function logTransformationSuccess(
|
|
129
|
+
operation: string,
|
|
130
|
+
inputSize: number,
|
|
131
|
+
processingTimeMs: number
|
|
132
|
+
): Promise<void> {
|
|
133
|
+
// Only log if processing was slow but within threshold
|
|
134
|
+
if (processingTimeMs >= SLOW_THRESHOLD_MS) {
|
|
135
|
+
await logSlowTransformation(operation, inputSize, processingTimeMs);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MCP Wrapper Configuration
|
|
3
|
+
* @module wrapper-config
|
|
4
|
+
*
|
|
5
|
+
* Environment Variables:
|
|
6
|
+
* - MADEINOZ_WRAPPER_COMPACT: Set to "false" to disable compact output (default: true)
|
|
7
|
+
* - MADEINOZ_WRAPPER_METRICS: Set to "true" to enable metrics collection (default: false)
|
|
8
|
+
* - MADEINOZ_WRAPPER_METRICS_FILE: Path to write metrics JSONL file
|
|
9
|
+
* - MADEINOZ_WRAPPER_LOG_FILE: Path to write transformation error logs
|
|
10
|
+
* - MADEINOZ_WRAPPER_SLOW_THRESHOLD: Slow processing threshold in ms (default: 50)
|
|
11
|
+
* - MADEINOZ_WRAPPER_TIMEOUT: Processing timeout in ms (default: 100)
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
export interface OutputFormat {
|
|
15
|
+
formatId: string;
|
|
16
|
+
template: string;
|
|
17
|
+
extractFields: string[];
|
|
18
|
+
transforms?: Record<string, (value: unknown) => string>;
|
|
19
|
+
maxLength?: number;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export interface WrapperConfig {
|
|
23
|
+
compactOutput: boolean;
|
|
24
|
+
collectMetrics: boolean;
|
|
25
|
+
metricsFile?: string;
|
|
26
|
+
logFile?: string;
|
|
27
|
+
slowThresholdMs: number;
|
|
28
|
+
timeoutMs: number;
|
|
29
|
+
formatOverrides?: Record<string, OutputFormat>;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Environment variable names for wrapper configuration
|
|
34
|
+
*/
|
|
35
|
+
export const ENV_VARS = {
|
|
36
|
+
COMPACT: 'MADEINOZ_WRAPPER_COMPACT',
|
|
37
|
+
METRICS: 'MADEINOZ_WRAPPER_METRICS',
|
|
38
|
+
METRICS_FILE: 'MADEINOZ_WRAPPER_METRICS_FILE',
|
|
39
|
+
LOG_FILE: 'MADEINOZ_WRAPPER_LOG_FILE',
|
|
40
|
+
SLOW_THRESHOLD: 'MADEINOZ_WRAPPER_SLOW_THRESHOLD',
|
|
41
|
+
TIMEOUT: 'MADEINOZ_WRAPPER_TIMEOUT',
|
|
42
|
+
} as const;
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Load configuration from environment variables
|
|
46
|
+
*/
|
|
47
|
+
function loadFromEnv(): Partial<WrapperConfig> {
|
|
48
|
+
const config: Partial<WrapperConfig> = {};
|
|
49
|
+
|
|
50
|
+
// MADEINOZ_WRAPPER_COMPACT (boolean, default: true)
|
|
51
|
+
const compactEnv = process.env[ENV_VARS.COMPACT];
|
|
52
|
+
if (compactEnv !== undefined) {
|
|
53
|
+
config.compactOutput = compactEnv.toLowerCase() !== 'false';
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// MADEINOZ_WRAPPER_METRICS (boolean, default: false)
|
|
57
|
+
const metricsEnv = process.env[ENV_VARS.METRICS];
|
|
58
|
+
if (metricsEnv !== undefined) {
|
|
59
|
+
config.collectMetrics = metricsEnv.toLowerCase() === 'true';
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// MADEINOZ_WRAPPER_METRICS_FILE (string path)
|
|
63
|
+
const metricsFileEnv = process.env[ENV_VARS.METRICS_FILE];
|
|
64
|
+
if (metricsFileEnv) {
|
|
65
|
+
config.metricsFile = metricsFileEnv;
|
|
66
|
+
// If a metrics file is specified, also enable metrics collection
|
|
67
|
+
config.collectMetrics = true;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// MADEINOZ_WRAPPER_LOG_FILE (string path)
|
|
71
|
+
const logFileEnv = process.env[ENV_VARS.LOG_FILE];
|
|
72
|
+
if (logFileEnv) {
|
|
73
|
+
config.logFile = logFileEnv;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// MADEINOZ_WRAPPER_SLOW_THRESHOLD (number in ms)
|
|
77
|
+
const slowThresholdEnv = process.env[ENV_VARS.SLOW_THRESHOLD];
|
|
78
|
+
if (slowThresholdEnv) {
|
|
79
|
+
const parsed = Number.parseInt(slowThresholdEnv, 10);
|
|
80
|
+
if (!Number.isNaN(parsed) && parsed > 0) {
|
|
81
|
+
config.slowThresholdMs = parsed;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// MADEINOZ_WRAPPER_TIMEOUT (number in ms)
|
|
86
|
+
const timeoutEnv = process.env[ENV_VARS.TIMEOUT];
|
|
87
|
+
if (timeoutEnv) {
|
|
88
|
+
const parsed = Number.parseInt(timeoutEnv, 10);
|
|
89
|
+
if (!Number.isNaN(parsed) && parsed > 0) {
|
|
90
|
+
config.timeoutMs = parsed;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
return config;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
export const DEFAULT_CONFIG: WrapperConfig = {
|
|
98
|
+
compactOutput: true,
|
|
99
|
+
collectMetrics: false,
|
|
100
|
+
slowThresholdMs: 50,
|
|
101
|
+
timeoutMs: 100,
|
|
102
|
+
};
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Load configuration with the following priority (highest to lowest):
|
|
106
|
+
* 1. Explicit overrides passed as parameter
|
|
107
|
+
* 2. Environment variables
|
|
108
|
+
* 3. Default values
|
|
109
|
+
*/
|
|
110
|
+
export function loadConfig(overrides?: Partial<WrapperConfig>): WrapperConfig {
|
|
111
|
+
const envConfig = loadFromEnv();
|
|
112
|
+
return { ...DEFAULT_CONFIG, ...envConfig, ...overrides };
|
|
113
|
+
}
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
# Madeinoz Knowledge System - Environment Configuration
|
|
2
|
+
# Copy this file to your .env location (see INSTALL.md for location)
|
|
3
|
+
#
|
|
4
|
+
# This file is for direct container usage (unprefixed variables)
|
|
5
|
+
# The start.sh script generates this from MADEINOZ_KNOWLEDGE_* prefixed variables
|
|
6
|
+
|
|
7
|
+
# ============================================================================
|
|
8
|
+
# REQUIRED: API Keys
|
|
9
|
+
# ============================================================================
|
|
10
|
+
|
|
11
|
+
# Primary API Key (for OpenRouter, OpenAI, or compatible provider)
|
|
12
|
+
# Get your key from: https://openrouter.ai/keys
|
|
13
|
+
OPENAI_API_KEY=sk-your-api-key-here
|
|
14
|
+
|
|
15
|
+
# Optional: Alternative provider API keys
|
|
16
|
+
# ANTHROPIC_API_KEY=your-anthropic-key
|
|
17
|
+
# GOOGLE_API_KEY=your-google-key
|
|
18
|
+
# GROQ_API_KEY=your-groq-key
|
|
19
|
+
# VOYAGE_API_KEY=your-voyage-key
|
|
20
|
+
|
|
21
|
+
# ============================================================================
|
|
22
|
+
# LLM Configuration
|
|
23
|
+
# ============================================================================
|
|
24
|
+
|
|
25
|
+
# LLM Provider: openai, anthropic, gemini, groq
|
|
26
|
+
LLM_PROVIDER=openai
|
|
27
|
+
|
|
28
|
+
# Model to use (examples)
|
|
29
|
+
# - OpenRouter: openai/gpt-4o-mini, google/gemini-2.0-flash-001, anthropic/claude-3.5-haiku
|
|
30
|
+
# - OpenAI: gpt-4o-mini, gpt-4o
|
|
31
|
+
# - Direct provider: gemini-2.0-flash-001, claude-3.5-sonnet, llama-3.3-70b
|
|
32
|
+
MODEL_NAME=openai/gpt-4o-mini
|
|
33
|
+
|
|
34
|
+
# API Endpoint (for OpenRouter, Together AI, or custom OpenAI-compatible API)
|
|
35
|
+
# Default for OpenRouter:
|
|
36
|
+
OPENAI_BASE_URL=https://openrouter.ai/api/v1
|
|
37
|
+
# For OpenAI directly:
|
|
38
|
+
# OPENAI_BASE_URL=https://api.openai.com/v1
|
|
39
|
+
# For Ollama (local LLM):
|
|
40
|
+
# OPENAI_BASE_URL=http://host.docker.internal:11434/v1
|
|
41
|
+
|
|
42
|
+
# ============================================================================
|
|
43
|
+
# Embedder Configuration
|
|
44
|
+
# ============================================================================
|
|
45
|
+
|
|
46
|
+
# Embedder Provider: openai, ollama, gemini, voyage
|
|
47
|
+
EMBEDDER_PROVIDER=ollama
|
|
48
|
+
|
|
49
|
+
# Embedding Model
|
|
50
|
+
# - Ollama: mxbai-embed-large (1024 dims), nomic-embed-text (768 dims)
|
|
51
|
+
# - OpenAI: text-embedding-3-small (1536 dims), text-embedding-ada-002 (1536 dims)
|
|
52
|
+
EMBEDDER_MODEL=mxbai-embed-large
|
|
53
|
+
|
|
54
|
+
# Embedding Dimensions (MUST match your model!)
|
|
55
|
+
# mxbai-embed-large: 1024
|
|
56
|
+
# nomic-embed-text: 768
|
|
57
|
+
# text-embedding-3-small: 1536
|
|
58
|
+
EMBEDDER_DIMENSIONS=1024
|
|
59
|
+
|
|
60
|
+
# Ollama Endpoint (for local embedder)
|
|
61
|
+
# Docker/Podman: http://host.containers.internal:11434/v1
|
|
62
|
+
# Same machine: http://localhost:11434/v1
|
|
63
|
+
# Remote: http://YOUR_IP:11434/v1
|
|
64
|
+
EMBEDDER_PROVIDER_URL=http://host.containers.internal:11434/v1
|
|
65
|
+
|
|
66
|
+
# ============================================================================
|
|
67
|
+
# Database Configuration
|
|
68
|
+
# ============================================================================
|
|
69
|
+
|
|
70
|
+
# Database Type: neo4j or falkordb
|
|
71
|
+
DATABASE_TYPE=neo4j
|
|
72
|
+
|
|
73
|
+
# Neo4j Configuration (when DATABASE_TYPE=neo4j)
|
|
74
|
+
NEO4J_URI=bolt://madeinoz-knowledge-neo4j:7687
|
|
75
|
+
NEO4J_USER=neo4j
|
|
76
|
+
NEO4J_PASSWORD=madeinozknowledge
|
|
77
|
+
NEO4J_DATABASE=neo4j
|
|
78
|
+
|
|
79
|
+
# FalkorDB Configuration (when DATABASE_TYPE=falkordb)
|
|
80
|
+
FALKORDB_HOST=madeinoz-knowledge-falkordb
|
|
81
|
+
FALKORDB_PORT=6379
|
|
82
|
+
FALKORDB_PASSWORD=
|
|
83
|
+
|
|
84
|
+
# ============================================================================
|
|
85
|
+
# Knowledge Graph Configuration
|
|
86
|
+
# ============================================================================
|
|
87
|
+
|
|
88
|
+
# Group ID for isolating knowledge graphs
|
|
89
|
+
GROUP_ID=main
|
|
90
|
+
|
|
91
|
+
# Performance
|
|
92
|
+
SEMAPHORE_LIMIT=10
|
|
93
|
+
|
|
94
|
+
# Telemetry
|
|
95
|
+
GRAPHITI_TELEMETRY_ENABLED=false
|
|
96
|
+
NEO4J_TELEMETRY_ENABLED=false
|
|
97
|
+
|
|
98
|
+
# Search All Groups (Neo4j only)
|
|
99
|
+
# When true, searches all groups when no group_ids specified
|
|
100
|
+
# SECURITY: Default is false for least privilege (data isolation between groups)
|
|
101
|
+
GRAPHITI_SEARCH_ALL_GROUPS=${GRAPHITI_SEARCH_ALL_GROUPS:-false}
|
|
102
|
+
|
|
103
|
+
# ============================================================================
|
|
104
|
+
# REFERENCE: Working Model Combinations
|
|
105
|
+
# ============================================================================
|
|
106
|
+
#
|
|
107
|
+
# OpenRouter + Local Ollama Embeddings (RECOMMENDED):
|
|
108
|
+
# LLM_PROVIDER=openai
|
|
109
|
+
# MODEL_NAME=openai/gpt-4o-mini
|
|
110
|
+
# OPENAI_BASE_URL=https://openrouter.ai/api/v1
|
|
111
|
+
# EMBEDDER_PROVIDER=ollama
|
|
112
|
+
# EMBEDDER_MODEL=mxbai-embed-large
|
|
113
|
+
# EMBEDDER_PROVIDER_URL=http://host.containers.internal:11434/v1
|
|
114
|
+
#
|
|
115
|
+
# All OpenRouter (Simple, No Local Setup):
|
|
116
|
+
# LLM_PROVIDER=openai
|
|
117
|
+
# MODEL_NAME=openai/gpt-4o-mini
|
|
118
|
+
# OPENAI_BASE_URL=https://openrouter.ai/api/v1
|
|
119
|
+
# EMBEDDER_PROVIDER=openai
|
|
120
|
+
# EMBEDDER_MODEL=text-embedding-3-small
|
|
121
|
+
#
|
|
122
|
+
# All Local Ollama (FREE, No API Keys):
|
|
123
|
+
# LLM_PROVIDER=openai
|
|
124
|
+
# MODEL_NAME=llama3.2
|
|
125
|
+
# OPENAI_BASE_URL=http://host.docker.internal:11434/v1
|
|
126
|
+
# EMBEDDER_PROVIDER=ollama
|
|
127
|
+
# EMBEDDER_MODEL=mxbai-embed-large
|
|
128
|
+
# EMBEDDER_PROVIDER_URL=http://host.docker.internal:11434/v1
|
|
129
|
+
# OPENAI_API_KEY=ollama # Dummy key for Ollama
|