parrat 0.1.0-beta.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,2056 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/index.ts
4
+ import "dotenv/config";
5
+ import { Command as Command9 } from "commander";
6
+
7
+ // src/cli/audit-query.ts
8
+ import { readFileSync } from "fs";
9
+ import { Command } from "commander";
10
+ function formatRecord(record) {
11
+ const ts = typeof record.timestamp === "string" ? record.timestamp : "?";
12
+ const type = typeof record.event_type === "string" ? record.event_type : "?";
13
+ const runId = typeof record.run_id === "string" ? record.run_id.slice(0, 8) : "?";
14
+ const skill = typeof record.skill === "string" ? ` skill=${record.skill}` : "";
15
+ let detail = "";
16
+ const payload = record.payload;
17
+ if (type === "mcp_call" && typeof payload === "object" && payload !== null) {
18
+ const p = payload;
19
+ const tool = typeof p.tool === "string" ? p.tool : "";
20
+ const server = typeof p.server === "string" ? p.server : "";
21
+ const ms = typeof p.duration_ms === "number" ? ` (${p.duration_ms}ms)` : "";
22
+ detail = ` ${server}.${tool}${ms}`;
23
+ } else if (type === "claude_call" && typeof payload === "object" && payload !== null) {
24
+ const p = payload;
25
+ const tokens = typeof p.output_tokens === "number" ? ` ${p.output_tokens} tokens` : "";
26
+ const cost = typeof p.cost_estimate_usd === "number" ? ` $${p.cost_estimate_usd.toFixed(4)}` : "";
27
+ detail = `${tokens}${cost}`;
28
+ } else if (type === "error" && typeof payload === "object" && payload !== null) {
29
+ const p = payload;
30
+ detail = ` ${typeof p.error_message === "string" ? p.error_message : ""}`;
31
+ }
32
+ return `[${ts}] ${type.padEnd(22)} run=${runId}${skill}${detail}`;
33
+ }
34
+ async function queryAuditLog(options) {
35
+ let raw;
36
+ try {
37
+ raw = readFileSync(options.auditPath, "utf8");
38
+ } catch {
39
+ return { exitCode: 1, error: `Audit log not found: ${options.auditPath}` };
40
+ }
41
+ const sinceMs = options.since ? Date.parse(options.since) : void 0;
42
+ if (options.since && sinceMs !== void 0 && Number.isNaN(sinceMs)) {
43
+ return { exitCode: 1, error: `Invalid --since value: ${options.since}` };
44
+ }
45
+ const records = [];
46
+ for (const line of raw.split("\n")) {
47
+ const trimmed = line.trim();
48
+ if (!trimmed) continue;
49
+ try {
50
+ records.push(JSON.parse(trimmed));
51
+ } catch {
52
+ }
53
+ }
54
+ let filtered = records.filter((r) => {
55
+ if (options.runId && r.run_id !== options.runId) return false;
56
+ if (options.eventType && r.event_type !== options.eventType) return false;
57
+ if (sinceMs !== void 0 && typeof r.timestamp === "string") {
58
+ if (Date.parse(r.timestamp) < sinceMs) return false;
59
+ }
60
+ return true;
61
+ });
62
+ filtered.sort((a, b) => {
63
+ const ta = typeof a.timestamp === "string" ? a.timestamp : "";
64
+ const tb = typeof b.timestamp === "string" ? b.timestamp : "";
65
+ return ta.localeCompare(tb);
66
+ });
67
+ if (options.limit !== void 0 && options.limit > 0) {
68
+ filtered = filtered.slice(0, options.limit);
69
+ }
70
+ if (filtered.length === 0) {
71
+ return { exitCode: 1, lines: [], error: "No matching events found." };
72
+ }
73
+ const lines = options.json ? filtered.map((r) => JSON.stringify(r)) : filtered.map(formatRecord);
74
+ return { exitCode: 0, lines };
75
+ }
76
+ var auditCommand = new Command("audit").description("Audit log tools");
77
+ auditCommand.addCommand(
78
+ new Command("query").description("Query the audit log").option("--audit-path <path>", "Path to audit log file", ".parrat/audit.jsonl").option("--run-id <id>", "Filter by run ID").option("--event-type <type>", "Filter by event type (trigger, mcp_call, claude_call, ...)").option("--since <iso>", "Only show events after this ISO 8601 timestamp").option("--limit <n>", "Maximum number of events to show", (v) => Number.parseInt(v, 10)).option("--json", "Output raw NDJSON instead of human-readable format").action(
79
+ async (opts) => {
80
+ const result = await queryAuditLog({
81
+ auditPath: opts.auditPath,
82
+ ...opts.runId ? { runId: opts.runId } : {},
83
+ ...opts.eventType ? { eventType: opts.eventType } : {},
84
+ ...opts.since ? { since: opts.since } : {},
85
+ ...opts.limit !== void 0 ? { limit: opts.limit } : {},
86
+ ...opts.json ? { json: opts.json } : {}
87
+ });
88
+ if (result.lines) {
89
+ for (const line of result.lines) {
90
+ console.log(line);
91
+ }
92
+ }
93
+ if (result.error && result.lines?.length === 0) {
94
+ console.error(result.error);
95
+ }
96
+ if (result.exitCode !== 0) process.exit(result.exitCode);
97
+ }
98
+ )
99
+ );
100
+
101
+ // src/cli/doctor.ts
102
+ import { execFile } from "child_process";
103
+ import { constants, access, mkdir } from "fs/promises";
104
+ import { dirname } from "path";
105
+ import { Command as Command2 } from "commander";
106
+
107
+ // src/core/config/loader.ts
108
+ import { existsSync, readFileSync as readFileSync2 } from "fs";
109
+ import { homedir } from "os";
110
+ import { isAbsolute, resolve } from "path";
111
+ import { parse as parseYaml } from "yaml";
112
+
113
+ // src/core/errors.ts
114
+ var ParratError = class extends Error {
115
+ name = "ParratError";
116
+ };
117
+ var SkillNotFoundError = class extends ParratError {
118
+ constructor(skillName, available) {
119
+ const list = available.join(", ") || "(none)";
120
+ super(`Skill not found: '${skillName}'. Available skills: ${list}.`);
121
+ this.skillName = skillName;
122
+ this.available = available;
123
+ }
124
+ skillName;
125
+ available;
126
+ name = "SkillNotFoundError";
127
+ };
128
+ var DuplicateSkillError = class extends ParratError {
129
+ constructor(skillName) {
130
+ super(`Duplicate skill name: ${skillName}`);
131
+ this.skillName = skillName;
132
+ }
133
+ skillName;
134
+ name = "DuplicateSkillError";
135
+ };
136
+ var SchemaValidationError = class extends ParratError {
137
+ constructor(direction, skillName, cause) {
138
+ super(`Skill '${skillName}' ${direction} failed schema validation: ${cause.message}`, {
139
+ cause
140
+ });
141
+ this.direction = direction;
142
+ this.skillName = skillName;
143
+ }
144
+ direction;
145
+ skillName;
146
+ name = "SchemaValidationError";
147
+ };
148
+ var AuditWriteError = class extends ParratError {
149
+ constructor(filePath, cause) {
150
+ const causeMsg = cause instanceof Error ? cause.message : String(cause);
151
+ super(`Failed to write audit log to '${filePath}': ${causeMsg}`, { cause });
152
+ this.filePath = filePath;
153
+ }
154
+ filePath;
155
+ name = "AuditWriteError";
156
+ };
157
+ var MissingClaudeKeyError = class extends ParratError {
158
+ name = "MissingClaudeKeyError";
159
+ constructor() {
160
+ super(
161
+ "Claude API key not found. Set ANTHROPIC_API_KEY in your environment, or run `parrat init` to configure."
162
+ );
163
+ }
164
+ };
165
+ var ConfigNotFoundError = class extends ParratError {
166
+ constructor(path, source) {
167
+ const hint = source === "PARRAT_CONFIG_PATH" ? `Expected a config at PARRAT_CONFIG_PATH='${path}' but no file exists there.` : `No config found at '${path}'. Run \`parrat init\` to scaffold one.`;
168
+ super(`Parrat config not found: ${hint}`);
169
+ this.path = path;
170
+ this.source = source;
171
+ }
172
+ path;
173
+ source;
174
+ name = "ConfigNotFoundError";
175
+ };
176
+ var ConfigValidationError = class extends ParratError {
177
+ constructor(path, stage, cause) {
178
+ const causeMsg = cause instanceof Error ? cause.message : String(cause ?? "");
179
+ super(`Config validation failed at '${path}' (${stage}): ${causeMsg}`, { cause });
180
+ this.path = path;
181
+ this.stage = stage;
182
+ }
183
+ path;
184
+ stage;
185
+ name = "ConfigValidationError";
186
+ };
187
+ var MaxTurnsExceededError = class extends ParratError {
188
+ constructor(skillName, maxTurns) {
189
+ super(
190
+ `Skill '${skillName}' did not converge within max_turns=${maxTurns}. Increase max_turns or refine the system prompt.`
191
+ );
192
+ this.skillName = skillName;
193
+ this.maxTurns = maxTurns;
194
+ }
195
+ skillName;
196
+ maxTurns;
197
+ name = "MaxTurnsExceededError";
198
+ };
199
+ var LlmApiError = class extends ParratError {
200
+ name = "LlmApiError";
201
+ constructor(message, cause) {
202
+ super(message, { cause });
203
+ }
204
+ };
205
+
206
+ // src/core/config/overrides.ts
207
+ function applyEnvOverrides(config, env) {
208
+ const overrideMap = {
209
+ PARRAT_TENANT_ID: {
210
+ type: "tenant",
211
+ apply: (cfg, val) => ({ ...cfg, tenant_id: val })
212
+ },
213
+ PARRAT_CLAUDE_MODEL: {
214
+ type: "string",
215
+ apply: (cfg, val) => ({ ...cfg, claude: { ...cfg.claude, model: val } })
216
+ },
217
+ PARRAT_CLAUDE_MAX_TURNS: {
218
+ type: "number",
219
+ apply: (cfg, val) => ({
220
+ ...cfg,
221
+ claude: { ...cfg.claude, max_turns: Number.parseInt(val, 10) }
222
+ })
223
+ },
224
+ PARRAT_CLAUDE_MAX_TOKENS: {
225
+ type: "number",
226
+ apply: (cfg, val) => ({
227
+ ...cfg,
228
+ claude: { ...cfg.claude, max_tokens: Number.parseInt(val, 10) }
229
+ })
230
+ },
231
+ PARRAT_CLAUDE_TEMPERATURE: {
232
+ type: "number",
233
+ apply: (cfg, val) => ({
234
+ ...cfg,
235
+ claude: { ...cfg.claude, temperature: Number.parseFloat(val) }
236
+ })
237
+ },
238
+ PARRAT_AUDIT_LOG_PATH: {
239
+ type: "string",
240
+ apply: (cfg, val) => ({ ...cfg, audit: { ...cfg.audit, log_path: val } })
241
+ },
242
+ PARRAT_AUDIT_RETENTION_DAYS: {
243
+ type: "number",
244
+ apply: (cfg, val) => ({
245
+ ...cfg,
246
+ audit: { ...cfg.audit, retention_days: Number.parseInt(val, 10) }
247
+ })
248
+ }
249
+ };
250
+ let result = config;
251
+ for (const [envVar, descriptor] of Object.entries(overrideMap)) {
252
+ const value = env[envVar];
253
+ if (value === void 0 || value === "") continue;
254
+ if (descriptor.type === "number" && Number.isNaN(Number.parseFloat(value))) continue;
255
+ result = descriptor.apply(result, value);
256
+ }
257
+ return result;
258
+ }
259
+
260
+ // src/core/config/schema.ts
261
+ import { z } from "zod";
262
+ var mcpServerConfigSchema = z.object({
263
+ command: z.string().min(1),
264
+ args: z.array(z.string()).default([]),
265
+ env: z.record(z.string(), z.string()).default({}),
266
+ tools: z.array(z.string()).optional()
267
+ }).strict();
268
+ var skillDefaultsSchema = z.object({
269
+ timeout_seconds: z.number().int().positive().default(60),
270
+ max_retries: z.number().int().nonnegative().default(2)
271
+ }).strict();
272
+ var auditConfigSchema = z.object({
273
+ log_path: z.string().default(".parrat/audit.jsonl"),
274
+ hash_algorithm: z.literal("sha256").default("sha256"),
275
+ retention_days: z.number().int().positive().default(90),
276
+ redact_fields: z.array(z.string()).default([]),
277
+ idempotency_window_hours: z.number().int().positive().default(24)
278
+ }).strict();
279
+ var claudeConfigSchema = z.object({
280
+ model: z.string().default("claude-sonnet-4-6"),
281
+ max_turns: z.number().int().positive().default(6),
282
+ max_tokens: z.number().int().positive().default(4096),
283
+ temperature: z.number().min(0).max(1).default(0)
284
+ }).strict();
285
+ var watchConfigSchema = z.object({
286
+ skill: z.string().min(1),
287
+ input: z.record(z.string(), z.unknown()).default({})
288
+ }).strict();
289
+ var slackNotifyConfigSchema = z.object({
290
+ webhook_url: z.string().url()
291
+ }).strict();
292
+ var notifyConfigSchema = z.object({
293
+ slack: slackNotifyConfigSchema.optional()
294
+ }).strict();
295
+ var webhookConfigSchema = z.object({
296
+ port: z.number().int().positive().default(8080),
297
+ secret: z.string().optional()
298
+ }).strict();
299
+ var configSchema = z.object({
300
+ version: z.literal(1),
301
+ tenant_id: z.string().default("default"),
302
+ mcpServers: z.record(z.string(), mcpServerConfigSchema).default({}),
303
+ skills: z.object({
304
+ defaults: skillDefaultsSchema.default({})
305
+ }).strict().default({}),
306
+ audit: auditConfigSchema.default({}),
307
+ claude: claudeConfigSchema.default({}),
308
+ watch: watchConfigSchema.optional(),
309
+ notify: notifyConfigSchema.optional(),
310
+ webhook: webhookConfigSchema.optional()
311
+ }).strict();
312
+
313
+ // src/core/config/loader.ts
314
+ var ENV_VAR_PATTERN = /\$([A-Z_][A-Z0-9_]*)|\$\{([A-Z_][A-Z0-9_]*)\}/g;
315
+ function resolveConfigPath(env = process.env, cwd = process.cwd()) {
316
+ const fromEnv = env.PARRAT_CONFIG_PATH;
317
+ if (fromEnv) {
318
+ const absolute = isAbsolute(fromEnv) ? fromEnv : resolve(cwd, fromEnv);
319
+ if (!existsSync(absolute)) {
320
+ throw new ConfigNotFoundError(absolute, "PARRAT_CONFIG_PATH");
321
+ }
322
+ return absolute;
323
+ }
324
+ const defaultPath = resolve(cwd, ".parrat", "config.yaml");
325
+ if (!existsSync(defaultPath)) {
326
+ throw new ConfigNotFoundError(defaultPath, "default");
327
+ }
328
+ return defaultPath;
329
+ }
330
+ async function loadConfig(env = process.env, cwd = process.cwd()) {
331
+ const path = resolveConfigPath(env, cwd);
332
+ const raw = readFileSync2(path, "utf-8");
333
+ let parsed;
334
+ try {
335
+ parsed = parseYaml(raw);
336
+ } catch (cause) {
337
+ throw new ConfigValidationError(path, "YAML parse failed", cause);
338
+ }
339
+ const result = configSchema.safeParse(parsed);
340
+ if (!result.success) {
341
+ throw new ConfigValidationError(path, "schema validation failed", result.error);
342
+ }
343
+ const resolved = walkAndTransform(result.data, env, homedir());
344
+ const withOverrides = applyEnvOverrides(resolved, env);
345
+ return Object.freeze(withOverrides);
346
+ }
347
+ function resolveEnvVars(value, env) {
348
+ return value.replace(
349
+ ENV_VAR_PATTERN,
350
+ (_match, bare, braced) => {
351
+ const name = bare ?? braced ?? "";
352
+ const resolved = env[name];
353
+ if (resolved === void 0) {
354
+ throw new ConfigValidationError(
355
+ "<env-resolution>",
356
+ `Environment variable '${name}' referenced in config but not set`,
357
+ void 0
358
+ );
359
+ }
360
+ return resolved;
361
+ }
362
+ );
363
+ }
364
+ function expandTilde(path, home) {
365
+ if (path === "~") return home;
366
+ if (path.startsWith("~/") || path.startsWith("~\\")) {
367
+ return resolve(home, path.slice(2));
368
+ }
369
+ return path;
370
+ }
371
+ function walkAndTransform(value, env, home) {
372
+ if (typeof value === "string") {
373
+ const envResolved = resolveEnvVars(value, env);
374
+ return expandTilde(envResolved, home);
375
+ }
376
+ if (Array.isArray(value)) {
377
+ return value.map((item) => walkAndTransform(item, env, home));
378
+ }
379
+ if (value !== null && typeof value === "object") {
380
+ const out = {};
381
+ for (const [key, v] of Object.entries(value)) {
382
+ out[key] = walkAndTransform(v, env, home);
383
+ }
384
+ return out;
385
+ }
386
+ return value;
387
+ }
388
+
389
+ // src/cli/doctor.ts
390
+ function execFileAsync(cmd, args, opts) {
391
+ return new Promise((resolve6, reject) => {
392
+ execFile(cmd, args, opts, (err, stdout2, stderr) => {
393
+ if (err) reject(err);
394
+ else resolve6({ stdout: stdout2, stderr });
395
+ });
396
+ });
397
+ }
398
+ var PINNED_DBT_MCP_VERSION = "0.4.3";
399
+ async function checkApiKey() {
400
+ const key = process.env.ANTHROPIC_API_KEY;
401
+ return key && key.length > 0 ? { name: "ANTHROPIC_API_KEY", status: "ok", message: "Present" } : {
402
+ name: "ANTHROPIC_API_KEY",
403
+ status: "fail",
404
+ message: "Missing \u2014 set ANTHROPIC_API_KEY in your environment or .env file"
405
+ };
406
+ }
407
+ async function checkConfig() {
408
+ try {
409
+ await loadConfig();
410
+ return { name: "Config file", status: "ok", message: "Loaded and valid" };
411
+ } catch (e) {
412
+ return {
413
+ name: "Config file",
414
+ status: "fail",
415
+ message: `${e instanceof Error ? e.message : String(e)}`
416
+ };
417
+ }
418
+ }
419
+ async function checkDbtMcpVersion() {
420
+ let config;
421
+ try {
422
+ config = await loadConfig();
423
+ } catch {
424
+ return { name: "dbt-mcp version", status: "warn", message: "Skipped \u2014 config not loadable" };
425
+ }
426
+ const usesDbtMcp = Object.values(config.mcpServers).some(
427
+ (s) => s.command.includes("dbt-mcp") || s.args.some((a) => a.includes("dbt-mcp"))
428
+ );
429
+ if (!usesDbtMcp) {
430
+ return { name: "dbt-mcp version", status: "ok", message: "dbt-mcp not configured \u2014 skipped" };
431
+ }
432
+ try {
433
+ const { stdout: stdout2 } = await execFileAsync("uvx", ["dbt-mcp", "--version"], { timeout: 1e4 });
434
+ const version = stdout2.trim().split(/\s+/).pop() ?? "";
435
+ if (version === PINNED_DBT_MCP_VERSION) {
436
+ return { name: "dbt-mcp version", status: "ok", message: `${version} (matches pinned)` };
437
+ }
438
+ return {
439
+ name: "dbt-mcp version",
440
+ status: "fail",
441
+ message: `Found ${version || "(unknown)"}, expected ${PINNED_DBT_MCP_VERSION} \u2014 run: uvx install dbt-mcp==${PINNED_DBT_MCP_VERSION}`
442
+ };
443
+ } catch (e) {
444
+ return {
445
+ name: "dbt-mcp version",
446
+ status: "fail",
447
+ message: `Could not run 'uvx dbt-mcp --version': ${e instanceof Error ? e.message : String(e)}`
448
+ };
449
+ }
450
+ }
451
+ async function checkAuditDir(auditPath) {
452
+ const dir = dirname(auditPath);
453
+ try {
454
+ await access(dir, constants.W_OK);
455
+ return { name: "Audit directory", status: "ok", message: `${dir} is writable` };
456
+ } catch {
457
+ try {
458
+ await mkdir(dir, { recursive: true });
459
+ return { name: "Audit directory", status: "ok", message: `${dir} created` };
460
+ } catch (e) {
461
+ return {
462
+ name: "Audit directory",
463
+ status: "warn",
464
+ message: `Could not create ${dir}: ${e instanceof Error ? e.message : String(e)}`
465
+ };
466
+ }
467
+ }
468
+ }
469
+ async function runDoctor(auditPath) {
470
+ return Promise.all([
471
+ checkApiKey(),
472
+ checkConfig(),
473
+ checkDbtMcpVersion(),
474
+ checkAuditDir(auditPath)
475
+ ]);
476
+ }
477
+ function formatCheck(check) {
478
+ const icon = check.status === "ok" ? "\u2713" : check.status === "warn" ? "!" : "\u2717";
479
+ return `${icon} ${check.name.padEnd(22)} ${check.status.padEnd(6)} ${check.message}`;
480
+ }
481
+ var doctorCommand = new Command2("doctor").description("Check Parrat configuration and dependencies").option("--audit-path <path>", "Path to audit log file", ".parrat/audit.jsonl").action(async (opts) => {
482
+ const checks = await runDoctor(opts.auditPath);
483
+ for (const check of checks) {
484
+ console.log(formatCheck(check));
485
+ }
486
+ const hasFail = checks.some((c) => c.status === "fail");
487
+ if (hasFail) process.exit(1);
488
+ });
489
+
490
+ // src/cli/init.ts
491
+ import { mkdir as mkdir2, stat, writeFile } from "fs/promises";
492
+ import { dirname as dirname2, resolve as resolve2 } from "path";
493
+ import { stdin, stdout } from "process";
494
+ import { createInterface } from "readline/promises";
495
+ import { Command as Command3 } from "commander";
496
+ var DEFAULT_CONFIG_PATH = ".parrat/config.yaml";
497
+ async function pathExists(path) {
498
+ try {
499
+ await stat(path);
500
+ return true;
501
+ } catch {
502
+ return false;
503
+ }
504
+ }
505
+ async function writeDefaultConfig(configPath) {
506
+ const yaml = [
507
+ "# Parrat configuration",
508
+ `# Generated by \`parrat init\` on ${(/* @__PURE__ */ new Date()).toISOString()}`,
509
+ "",
510
+ "version: 1",
511
+ "tenant_id: default",
512
+ "",
513
+ "# MCP servers \u2014 uncomment and configure for LLM-driven Skills",
514
+ "# (e.g., freshness-investigation needs a `dbt` server).",
515
+ "mcpServers: {}",
516
+ "# dbt:",
517
+ "# command: uvx",
518
+ "# args: [dbt-mcp]",
519
+ "# env:",
520
+ "# DBT_PROJECT_DIR: /absolute/path/to/dbt/project",
521
+ "# DBT_PATH: /absolute/path/to/dbt",
522
+ '# PYTHONUTF8: "1" # Required on Windows',
523
+ "",
524
+ "audit:",
525
+ " log_path: .parrat/audit.jsonl",
526
+ " retention_days: 90",
527
+ "",
528
+ "claude:",
529
+ " # API key from ANTHROPIC_API_KEY env var (never declared here).",
530
+ " model: claude-sonnet-4-6",
531
+ " max_turns: 6",
532
+ " max_tokens: 4096",
533
+ " temperature: 0.0",
534
+ ""
535
+ ].join("\n");
536
+ await mkdir2(dirname2(configPath), { recursive: true });
537
+ await writeFile(configPath, yaml, "utf8");
538
+ }
539
+ var initCommand = new Command3("init").description("Initialize a Parrat configuration in the current directory").option("--config-path <path>", "Path to write the config file", DEFAULT_CONFIG_PATH).option("-f, --force", "Overwrite existing config without prompting", false).action(async (opts) => {
540
+ const configPath = resolve2(opts.configPath);
541
+ console.log("Welcome to Parrat.");
542
+ console.log("Run `parrat skills list` to see available Skills.");
543
+ console.log("");
544
+ if (await pathExists(configPath) && !opts.force) {
545
+ const rl = createInterface({ input: stdin, output: stdout });
546
+ try {
547
+ const answer = await rl.question(`${configPath} already exists. Overwrite? (y/N) `);
548
+ if (!/^y(es)?$/i.test(answer.trim())) {
549
+ console.log("Aborted. No changes written.");
550
+ return;
551
+ }
552
+ } finally {
553
+ rl.close();
554
+ }
555
+ }
556
+ await writeDefaultConfig(configPath);
557
+ console.log(`Configuration written to ${configPath}`);
558
+ if (process.env.ANTHROPIC_API_KEY) {
559
+ console.log("ANTHROPIC_API_KEY is set in your environment.");
560
+ } else {
561
+ console.log(
562
+ "ANTHROPIC_API_KEY not set. Run `export ANTHROPIC_API_KEY=...` before invoking Skills that call Claude."
563
+ );
564
+ }
565
+ });
566
+
567
+ // src/cli/replay.ts
568
+ import { readFileSync as readFileSync3 } from "fs";
569
+ import { resolve as resolve3 } from "path";
570
+ import { Command as Command4 } from "commander";
571
+ function formatTime(iso) {
572
+ return iso.slice(11, 19);
573
+ }
574
+ function formatRecord2(r) {
575
+ const t = formatTime(r.timestamp);
576
+ switch (r.event_type) {
577
+ case "trigger":
578
+ return `[${t}] TRIGGER skill=${r.skill ?? "?"} actor=${r.actor}`;
579
+ case "claude_call": {
580
+ const p = r.payload;
581
+ const cost = typeof p.cost_estimate_usd === "number" ? p.cost_estimate_usd.toFixed(4) : "?";
582
+ const dur = typeof p.duration_ms === "number" ? (p.duration_ms / 1e3).toFixed(1) : "?";
583
+ return `[${t}] CLAUDE turn=${p.turn_index} in=${p.input_tokens} out=${p.output_tokens} cost=$${cost} dur=${dur}s`;
584
+ }
585
+ case "mcp_call": {
586
+ const p = r.payload;
587
+ const dur = typeof p.duration_ms === "number" ? (p.duration_ms / 1e3).toFixed(1) : "?";
588
+ const status = p.is_error ? " ERROR" : "";
589
+ return `[${t}] MCP server=${p.server} tool=${p.tool} dur=${dur}s${status}`;
590
+ }
591
+ case "skill_output_captured":
592
+ return `[${t}] OUTPUT turn=${r.payload.turn_index}`;
593
+ case "skill_complete": {
594
+ const dur = typeof r.payload.duration_ms === "number" ? ` dur=${(r.payload.duration_ms / 1e3).toFixed(1)}s` : "";
595
+ return `[${t}] COMPLETE${dur}`;
596
+ }
597
+ case "error":
598
+ return `[${t}] ERROR ${r.payload.error_name}: ${r.payload.error_message}`;
599
+ default:
600
+ return `[${t}] ${r.event_type.toUpperCase().padEnd(9)}`;
601
+ }
602
+ }
603
+ function replayRun(options) {
604
+ let raw;
605
+ try {
606
+ raw = readFileSync3(resolve3(options.auditPath), "utf8");
607
+ } catch (e) {
608
+ return {
609
+ exitCode: 1,
610
+ error: `Cannot read audit log at '${options.auditPath}': ${e instanceof Error ? e.message : String(e)}`
611
+ };
612
+ }
613
+ const records = raw.split("\n").filter(Boolean).flatMap((line) => {
614
+ try {
615
+ return [JSON.parse(line)];
616
+ } catch {
617
+ return [];
618
+ }
619
+ }).filter((r) => r.run_id === options.runId).sort((a, b) => a.timestamp.localeCompare(b.timestamp));
620
+ if (records.length === 0) {
621
+ return {
622
+ exitCode: 1,
623
+ error: `No events found for run_id '${options.runId}' in '${options.auditPath}'`
624
+ };
625
+ }
626
+ return { exitCode: 0, lines: records.map(formatRecord2) };
627
+ }
628
+ var replayCommand = new Command4("replay").description("Print a human-readable trace of a past Skill run from the audit log").argument("<run_id>", "The run ID to replay").option("--audit-path <path>", "Path to audit log file", ".parrat/audit.jsonl").action(async (runId, opts) => {
629
+ const result = replayRun({ runId, auditPath: opts.auditPath });
630
+ if (result.error) {
631
+ console.error(result.error);
632
+ }
633
+ if (result.lines) {
634
+ for (const line of result.lines) {
635
+ console.log(line);
636
+ }
637
+ }
638
+ if (result.exitCode !== 0) {
639
+ process.exit(result.exitCode);
640
+ }
641
+ });
642
+
643
+ // src/cli/run.ts
644
+ import { readFileSync as readFileSync6 } from "fs";
645
+ import { resolve as resolve4 } from "path";
646
+ import { Command as Command5 } from "commander";
647
+
648
+ // src/core/audit/idempotency.ts
649
+ import { readFileSync as readFileSync4 } from "fs";
650
+ async function isDuplicateRun(auditPath, correlationId, windowHours) {
651
+ let raw;
652
+ try {
653
+ raw = readFileSync4(auditPath, "utf8");
654
+ } catch {
655
+ return false;
656
+ }
657
+ const cutoff = Date.now() - windowHours * 3600 * 1e3;
658
+ for (const line of raw.split("\n")) {
659
+ const trimmed = line.trim();
660
+ if (!trimmed) continue;
661
+ let record;
662
+ try {
663
+ record = JSON.parse(trimmed);
664
+ } catch {
665
+ continue;
666
+ }
667
+ if (record.event_type === "trigger" && record.workflow_id === correlationId && typeof record.timestamp === "string" && Date.parse(record.timestamp) >= cutoff) {
668
+ return true;
669
+ }
670
+ }
671
+ return false;
672
+ }
673
+
674
+ // src/core/audit/logger.ts
675
+ import { createHash, randomUUID } from "crypto";
676
+ import { appendFile, mkdir as mkdir3 } from "fs/promises";
677
+ import { dirname as dirname3 } from "path";
678
+ var HASH_FIELDS = {
679
+ mcp_call: [
680
+ ["args", "args_hash"],
681
+ ["result", "result_hash"]
682
+ ],
683
+ trigger: [["input", "input_hash"]]
684
+ };
685
+ function sha256hex(value) {
686
+ return createHash("sha256").update(JSON.stringify(value)).digest("hex");
687
+ }
688
+ function applyHashing(eventType, payload) {
689
+ const targets = HASH_FIELDS[eventType];
690
+ if (!targets) return payload;
691
+ const result = { ...payload };
692
+ for (const [src, dest] of targets) {
693
+ if (src in result) {
694
+ result[dest] = sha256hex(result[src]);
695
+ delete result[src];
696
+ }
697
+ }
698
+ return result;
699
+ }
700
+ function applyRedaction(payload, redactFields) {
701
+ if (redactFields.length === 0) return { payload, redacted: false };
702
+ let redacted = false;
703
+ const walk = (obj) => {
704
+ const out = {};
705
+ for (const [k, v] of Object.entries(obj)) {
706
+ if (redactFields.includes(k)) {
707
+ out[k] = "[REDACTED]";
708
+ redacted = true;
709
+ } else if (v !== null && typeof v === "object" && !Array.isArray(v)) {
710
+ out[k] = walk(v);
711
+ } else {
712
+ out[k] = v;
713
+ }
714
+ }
715
+ return out;
716
+ };
717
+ return { payload: walk(payload), redacted };
718
+ }
719
+ function createAuditLogger(options) {
720
+ return {
721
+ write: async (input) => {
722
+ const cfg = options.auditConfig;
723
+ let payload = input.payload;
724
+ let redactionApplied = false;
725
+ if (cfg?.hash_algorithm) {
726
+ payload = applyHashing(input.type, payload);
727
+ }
728
+ if (cfg?.redact_fields && cfg.redact_fields.length > 0) {
729
+ const result = applyRedaction(payload, cfg.redact_fields);
730
+ payload = result.payload;
731
+ redactionApplied = result.redacted;
732
+ }
733
+ const record = {
734
+ event_id: randomUUID(),
735
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
736
+ tenant_id: input.tenantId,
737
+ run_id: input.runId,
738
+ workflow_id: input.workflowId ?? input.runId,
739
+ skill: input.skill,
740
+ event_type: input.type,
741
+ actor: input.actor,
742
+ payload,
743
+ redaction_applied: redactionApplied
744
+ };
745
+ try {
746
+ await mkdir3(dirname3(options.filePath), { recursive: true });
747
+ await appendFile(options.filePath, `${JSON.stringify(record)}
748
+ `, "utf8");
749
+ } catch (e) {
750
+ throw new AuditWriteError(options.filePath, e);
751
+ }
752
+ }
753
+ };
754
+ }
755
+
756
+ // src/core/audit/retention.ts
757
+ import { readFileSync as readFileSync5 } from "fs";
758
+ import { writeFile as writeFile2 } from "fs/promises";
759
+ async function sweepAuditLog(auditPath, retentionDays) {
760
+ let raw;
761
+ try {
762
+ raw = readFileSync5(auditPath, "utf8");
763
+ } catch {
764
+ return { removed: 0 };
765
+ }
766
+ const cutoff = Date.now() - retentionDays * 86400 * 1e3;
767
+ const survivors = [];
768
+ let removed = 0;
769
+ for (const line of raw.split("\n")) {
770
+ const trimmed = line.trim();
771
+ if (!trimmed) continue;
772
+ let record;
773
+ try {
774
+ record = JSON.parse(trimmed);
775
+ } catch {
776
+ survivors.push(trimmed);
777
+ continue;
778
+ }
779
+ if (typeof record.timestamp === "string" && Date.parse(record.timestamp) < cutoff) {
780
+ removed++;
781
+ } else {
782
+ survivors.push(trimmed);
783
+ }
784
+ }
785
+ if (removed > 0) {
786
+ await writeFile2(auditPath, survivors.join("\n") + (survivors.length > 0 ? "\n" : ""), "utf8");
787
+ }
788
+ return { removed };
789
+ }
790
+
791
+ // src/core/runtime.ts
792
+ import "dotenv/config";
793
+ import { randomUUID as randomUUID2 } from "crypto";
794
+
795
+ // src/core/keys.ts
796
+ async function getClaudeKey(_tenantId) {
797
+ const key = process.env.ANTHROPIC_API_KEY;
798
+ if (!key) {
799
+ throw new MissingClaudeKeyError();
800
+ }
801
+ return key;
802
+ }
803
+
804
+ // src/core/llm/client.ts
805
+ import Anthropic from "@anthropic-ai/sdk";
806
+ function createLlmClient(options) {
807
+ const sdk = new Anthropic({ apiKey: options.apiKey });
808
+ const maxRetries = options.maxRetries ?? 3;
809
+ return {
810
+ call: async (callOptions) => {
811
+ let lastError;
812
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
813
+ try {
814
+ return await sdk.messages.create({
815
+ model: callOptions.model,
816
+ max_tokens: callOptions.maxTokens,
817
+ temperature: callOptions.temperature,
818
+ system: callOptions.system,
819
+ messages: callOptions.messages,
820
+ tools: callOptions.tools
821
+ });
822
+ } catch (error) {
823
+ lastError = error;
824
+ if (!isTransient(error) || attempt === maxRetries) break;
825
+ await sleep(2 ** attempt * 1e3);
826
+ }
827
+ }
828
+ throw new LlmApiError(
829
+ `Anthropic API call failed after ${maxRetries + 1} attempts`,
830
+ lastError
831
+ );
832
+ }
833
+ };
834
+ }
835
+ function isTransient(error) {
836
+ if (!(error instanceof Anthropic.APIError)) return false;
837
+ const status = error.status ?? 0;
838
+ return status >= 500 || status === 408 || status === 429;
839
+ }
840
+ function sleep(ms) {
841
+ return new Promise((resolve6) => setTimeout(resolve6, ms));
842
+ }
843
+
844
+ // src/core/telemetry.ts
845
+ function isTelemetryEnabled(_config) {
846
+ return false;
847
+ }
848
+ async function track(_event) {
849
+ }
850
+
851
+ // src/core/types.ts
852
+ var DEFAULT_TENANT_ID = "default";
853
+
854
+ // src/core/runtime.ts
855
+ function createRuntime(options) {
856
+ const { registry, auditLogger } = options;
857
+ const tenantId = options.tenantId ?? DEFAULT_TENANT_ID;
858
+ let cachedConfig = options.config;
859
+ let cachedLlmClient = options.llmClient;
860
+ async function getConfig() {
861
+ if (cachedConfig) return cachedConfig;
862
+ cachedConfig = await loadConfig();
863
+ return cachedConfig;
864
+ }
865
+ async function getLlmClient() {
866
+ if (cachedLlmClient) return cachedLlmClient;
867
+ const apiKey = await getClaudeKey(tenantId);
868
+ cachedLlmClient = createLlmClient({ apiKey });
869
+ return cachedLlmClient;
870
+ }
871
+ return {
872
+ invoke: async ({ skill: skillName, input, actor, triggerMetadata, correlationId }) => {
873
+ const skill = registry.lookup(skillName);
874
+ const runId = randomUUID2();
875
+ const workflowId = correlationId ?? runId;
876
+ const skillSpec = skill;
877
+ const skillNeedsLlm = !!skillSpec.mcpServers && Object.keys(skillSpec.mcpServers).length > 0;
878
+ const config = skillNeedsLlm ? await getConfig() : void 0;
879
+ const llmClient = skillNeedsLlm ? await getLlmClient() : void 0;
880
+ const ctx = {
881
+ tenantId,
882
+ runId,
883
+ workflowId,
884
+ auditLogger,
885
+ actor,
886
+ ...config ? { config } : {},
887
+ ...llmClient ? { llmClient } : {}
888
+ };
889
+ await auditLogger.write({
890
+ type: "trigger",
891
+ tenantId,
892
+ runId,
893
+ workflowId,
894
+ skill: skillName,
895
+ actor,
896
+ payload: { input, triggerMetadata: triggerMetadata ?? {} }
897
+ });
898
+ try {
899
+ const output = await skill.run(input, ctx);
900
+ await auditLogger.write({
901
+ type: "skill_complete",
902
+ tenantId,
903
+ runId,
904
+ workflowId,
905
+ skill: skillName,
906
+ actor,
907
+ payload: { output }
908
+ });
909
+ if (config && isTelemetryEnabled(config)) {
910
+ await track({ event: "skill_complete", properties: { skill: skillName } });
911
+ }
912
+ return output;
913
+ } catch (e) {
914
+ await auditLogger.write({
915
+ type: "error",
916
+ tenantId,
917
+ runId,
918
+ workflowId,
919
+ skill: skillName,
920
+ actor,
921
+ payload: {
922
+ error_name: e instanceof Error ? e.name : "UnknownError",
923
+ error_message: e instanceof Error ? e.message : String(e)
924
+ }
925
+ });
926
+ throw e;
927
+ }
928
+ }
929
+ };
930
+ }
931
+
932
+ // src/core/skills/registry.ts
933
+ function createRegistry(skills2) {
934
+ const byName = /* @__PURE__ */ new Map();
935
+ for (const skill of skills2) {
936
+ if (byName.has(skill.name)) {
937
+ throw new DuplicateSkillError(skill.name);
938
+ }
939
+ byName.set(skill.name, skill);
940
+ }
941
+ return {
942
+ list: () => Array.from(byName.keys()).sort(),
943
+ has: (name) => byName.has(name),
944
+ lookup: (name) => {
945
+ const skill = byName.get(name);
946
+ if (!skill) {
947
+ throw new SkillNotFoundError(name, Array.from(byName.keys()).sort());
948
+ }
949
+ return skill;
950
+ }
951
+ };
952
+ }
953
+
954
+ // src/core/llm/skill-executor.ts
955
+ import { ZodError } from "zod";
956
+ import { zodToJsonSchema } from "zod-to-json-schema";
957
+
958
+ // src/core/mcp/client.ts
959
+ import { Client as McpSdkClient } from "@modelcontextprotocol/sdk/client/index.js";
960
+ import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
961
+ async function connectMcpClient(serverName, config) {
962
+ const transport = new StdioClientTransport({
963
+ command: config.command,
964
+ args: [...config.args],
965
+ env: { ...process.env, ...config.env }
966
+ });
967
+ const sdkClient = new McpSdkClient(
968
+ { name: "parrat-mcp-client", version: "0.0.0" },
969
+ { capabilities: {} }
970
+ );
971
+ await sdkClient.connect(transport);
972
+ return {
973
+ serverName,
974
+ async listTools() {
975
+ const result = await sdkClient.listTools();
976
+ return result.tools.map((t) => ({
977
+ name: t.name,
978
+ description: t.description,
979
+ inputSchema: t.inputSchema
980
+ }));
981
+ },
982
+ async callTool(name, args) {
983
+ const result = await sdkClient.callTool({ name, arguments: args });
984
+ return {
985
+ content: result.content,
986
+ isError: typeof result.isError === "boolean" ? result.isError : void 0
987
+ };
988
+ },
989
+ async close() {
990
+ await sdkClient.close();
991
+ }
992
+ };
993
+ }
994
+
995
+ // src/core/mcp/filter.ts
996
+ function resolveAllowlist(serverName, toolNames) {
997
+ return {
998
+ serverName,
999
+ toolNames,
1000
+ fullyQualified: toolNames.map((name) => `mcp__${serverName}__${name}`)
1001
+ };
1002
+ }
1003
+
1004
+ // src/core/llm/skill-executor.ts
1005
+ var COST_PER_MTOK = {
1006
+ "claude-opus-4": { input: 15, output: 75 },
1007
+ "claude-sonnet-4-6": { input: 3, output: 15 },
1008
+ "claude-haiku-4-5": { input: 0.8, output: 4 }
1009
+ };
1010
+ function estimateCost(model, inputTokens, outputTokens) {
1011
+ const entry = Object.entries(COST_PER_MTOK).find(([prefix]) => model.startsWith(prefix));
1012
+ if (!entry) return 0;
1013
+ const [, rates] = entry;
1014
+ return (inputTokens * rates.input + outputTokens * rates.output) / 1e6;
1015
+ }
1016
+ async function executeSkill(options) {
1017
+ const startedAt = Date.now();
1018
+ const clients = [];
1019
+ const toolRouting = /* @__PURE__ */ new Map();
1020
+ const tools = [];
1021
+ const emitFindingsName = "emit_findings";
1022
+ tools.push({
1023
+ name: emitFindingsName,
1024
+ description: "Report your investigation findings. Call this exactly once when your investigation is complete.",
1025
+ input_schema: zodToJsonSchema(options.outputSchema)
1026
+ });
1027
+ try {
1028
+ for (const [serverName, server] of Object.entries(options.mcpServers)) {
1029
+ const client = await connectMcpClient(serverName, server.config);
1030
+ clients.push(client);
1031
+ const allowlist = resolveAllowlist(serverName, server.tools);
1032
+ const allowedSet = new Set(server.tools);
1033
+ const allTools = await client.listTools();
1034
+ for (const tool of allTools) {
1035
+ if (!allowedSet.has(tool.name)) continue;
1036
+ const fqName = `mcp__${serverName}__${tool.name}`;
1037
+ toolRouting.set(fqName, { client, bareName: tool.name });
1038
+ tools.push({
1039
+ name: fqName,
1040
+ description: tool.description ?? `Tool ${tool.name} from MCP server ${serverName}`,
1041
+ input_schema: tool.inputSchema
1042
+ });
1043
+ }
1044
+ const exposedNames = new Set(allTools.map((t) => t.name));
1045
+ for (const expected of server.tools) {
1046
+ if (!exposedNames.has(expected)) {
1047
+ throw new Error(
1048
+ `MCP server '${serverName}' did not expose required tool '${expected}'. Available: ${[...exposedNames].join(", ") || "(none)"}.`
1049
+ );
1050
+ }
1051
+ }
1052
+ void allowlist;
1053
+ }
1054
+ const messages = [{ role: "user", content: options.userMessage }];
1055
+ let inputTokens = 0;
1056
+ let outputTokens = 0;
1057
+ let totalCostUsd = 0;
1058
+ let capturedOutput;
1059
+ for (let turn = 0; turn < options.maxTurns; turn++) {
1060
+ const turnStartedAt = Date.now();
1061
+ const response = await options.llm.call({
1062
+ model: options.model,
1063
+ maxTokens: options.maxTokens,
1064
+ temperature: options.temperature,
1065
+ system: options.systemPrompt,
1066
+ messages,
1067
+ tools
1068
+ });
1069
+ const turnDurationMs = Date.now() - turnStartedAt;
1070
+ inputTokens += response.usage.input_tokens;
1071
+ outputTokens += response.usage.output_tokens;
1072
+ const turnCostUsd = estimateCost(
1073
+ options.model,
1074
+ response.usage.input_tokens,
1075
+ response.usage.output_tokens
1076
+ );
1077
+ totalCostUsd += turnCostUsd;
1078
+ await options.auditLogger.write({
1079
+ type: "claude_call",
1080
+ tenantId: options.tenantId,
1081
+ runId: options.runId,
1082
+ workflowId: options.workflowId,
1083
+ skill: options.skillName,
1084
+ actor: options.actor,
1085
+ payload: {
1086
+ model: options.model,
1087
+ input_tokens: response.usage.input_tokens,
1088
+ output_tokens: response.usage.output_tokens,
1089
+ cost_estimate_usd: turnCostUsd,
1090
+ duration_ms: turnDurationMs,
1091
+ turn_index: turn
1092
+ }
1093
+ });
1094
+ if (response.stop_reason === "end_turn") {
1095
+ if (capturedOutput !== void 0) {
1096
+ return {
1097
+ output: capturedOutput,
1098
+ totalTurns: turn + 1,
1099
+ inputTokens,
1100
+ outputTokens,
1101
+ totalCostUsd,
1102
+ durationMs: Date.now() - startedAt
1103
+ };
1104
+ }
1105
+ throw new MaxTurnsExceededError(options.skillName, options.maxTurns);
1106
+ }
1107
+ if (response.stop_reason === "tool_use") {
1108
+ const toolUses = response.content.filter(
1109
+ (block) => block.type === "tool_use"
1110
+ );
1111
+ messages.push({ role: "assistant", content: response.content });
1112
+ const toolResultBlocks = [];
1113
+ for (const toolUse of toolUses) {
1114
+ if (toolUse.name === emitFindingsName) {
1115
+ try {
1116
+ capturedOutput = options.outputSchema.parse(toolUse.input);
1117
+ await options.auditLogger.write({
1118
+ type: "skill_output_captured",
1119
+ tenantId: options.tenantId,
1120
+ runId: options.runId,
1121
+ workflowId: options.workflowId,
1122
+ skill: options.skillName,
1123
+ actor: options.actor,
1124
+ payload: { output: capturedOutput, turn_index: turn }
1125
+ });
1126
+ toolResultBlocks.push({
1127
+ type: "tool_result",
1128
+ tool_use_id: toolUse.id,
1129
+ content: "Findings recorded."
1130
+ });
1131
+ } catch (e) {
1132
+ const message = e instanceof ZodError ? e.message : String(e);
1133
+ toolResultBlocks.push({
1134
+ type: "tool_result",
1135
+ tool_use_id: toolUse.id,
1136
+ content: `Validation error \u2014 revise and call emit_findings again: ${message}`
1137
+ });
1138
+ }
1139
+ continue;
1140
+ }
1141
+ const route = toolRouting.get(toolUse.name);
1142
+ if (!route) {
1143
+ throw new Error(
1144
+ `Claude requested tool '${toolUse.name}' which is not in the allowlist. This indicates a bug \u2014 the API SDK should never expose disallowed tools.`
1145
+ );
1146
+ }
1147
+ const callStartedAt = Date.now();
1148
+ const args = toolUse.input;
1149
+ const result = await route.client.callTool(route.bareName, args);
1150
+ const durationMs = Date.now() - callStartedAt;
1151
+ await options.auditLogger.write({
1152
+ type: "mcp_call",
1153
+ tenantId: options.tenantId,
1154
+ runId: options.runId,
1155
+ workflowId: options.workflowId,
1156
+ skill: options.skillName,
1157
+ actor: options.actor,
1158
+ payload: {
1159
+ server: route.client.serverName,
1160
+ tool: route.bareName,
1161
+ args,
1162
+ result: result.content,
1163
+ is_error: result.isError ?? false,
1164
+ duration_ms: durationMs,
1165
+ turn_index: turn
1166
+ }
1167
+ });
1168
+ toolResultBlocks.push({
1169
+ type: "tool_result",
1170
+ tool_use_id: toolUse.id,
1171
+ content: JSON.stringify(result.content)
1172
+ });
1173
+ }
1174
+ if (capturedOutput !== void 0 && toolResultBlocks.every((b) => b.content === "Findings recorded.")) {
1175
+ return {
1176
+ output: capturedOutput,
1177
+ totalTurns: turn + 1,
1178
+ inputTokens,
1179
+ outputTokens,
1180
+ totalCostUsd,
1181
+ durationMs: Date.now() - startedAt
1182
+ };
1183
+ }
1184
+ messages.push({ role: "user", content: toolResultBlocks });
1185
+ continue;
1186
+ }
1187
+ throw new Error(
1188
+ `Unexpected stop_reason '${response.stop_reason}' from Claude in skill '${options.skillName}'`
1189
+ );
1190
+ }
1191
+ throw new MaxTurnsExceededError(options.skillName, options.maxTurns);
1192
+ } finally {
1193
+ for (const client of clients) {
1194
+ try {
1195
+ await client.close();
1196
+ } catch {
1197
+ }
1198
+ }
1199
+ }
1200
+ }
1201
+
1202
+ // src/core/skills/Skill.ts
1203
+ import { ZodError as ZodError2 } from "zod";
1204
+ function defineSkill(spec) {
1205
+ const validate = (schema, value, direction) => {
1206
+ try {
1207
+ return schema.parse(value);
1208
+ } catch (e) {
1209
+ if (e instanceof ZodError2) {
1210
+ throw new SchemaValidationError(direction, spec.name, e);
1211
+ }
1212
+ throw e;
1213
+ }
1214
+ };
1215
+ return {
1216
+ ...spec,
1217
+ run: async (input, ctx) => {
1218
+ const validInput = validate(spec.inputSchema, input, "input");
1219
+ const output = await spec.run(validInput, ctx);
1220
+ return validate(spec.outputSchema, output, "output");
1221
+ }
1222
+ };
1223
+ }
1224
+
1225
+ // src/skills/freshness-investigation/freshness-context-provider.ts
1226
+ import { readFile } from "fs/promises";
1227
+ import { join } from "path";
1228
+ var DbtFreshnessContextProvider = class {
1229
+ constructor(dbtProjectDir) {
1230
+ this.dbtProjectDir = dbtProjectDir;
1231
+ }
1232
+ dbtProjectDir;
1233
+ async getContext(sources) {
1234
+ const filePath = join(this.dbtProjectDir, "target", "sources.json");
1235
+ let raw;
1236
+ try {
1237
+ raw = await readFile(filePath, "utf8");
1238
+ } catch (e) {
1239
+ if (e.code === "ENOENT") return [];
1240
+ throw e;
1241
+ }
1242
+ let parsed;
1243
+ try {
1244
+ parsed = JSON.parse(raw);
1245
+ } catch (e) {
1246
+ throw new Error(`sources.json parse error: ${e.message}`);
1247
+ }
1248
+ const results = parsed.results ?? [];
1249
+ const all = results.map(mapResult);
1250
+ if (!sources || sources.length === 0) return all;
1251
+ return all.filter(
1252
+ (ctx) => sources.some((s) => ctx.source === s || ctx.source.endsWith(`.${s}`))
1253
+ );
1254
+ }
1255
+ };
1256
+ function mapResult(r) {
1257
+ const lastLoadedAt = r.max_loaded_at ?? null;
1258
+ if (r.status === "pass") {
1259
+ return { source: r.unique_id, lastLoadedAt, status: "fresh" };
1260
+ }
1261
+ if (r.status === "warn") {
1262
+ return { source: r.unique_id, lastLoadedAt, status: "stale_warn", thresholdBreached: "warn" };
1263
+ }
1264
+ if (r.status === "error") {
1265
+ return {
1266
+ source: r.unique_id,
1267
+ lastLoadedAt,
1268
+ status: "stale_error",
1269
+ thresholdBreached: "error"
1270
+ };
1271
+ }
1272
+ return { source: r.unique_id, lastLoadedAt, status: "unknown" };
1273
+ }
1274
+
1275
+ // src/skills/freshness-investigation/input-schema.ts
1276
+ import { z as z2 } from "zod";
1277
+ var inputSchema = z2.object({
1278
+ source: z2.string().min(1).optional(),
1279
+ threshold: z2.enum(["warn", "error"]).default("error")
1280
+ });
1281
+
1282
+ // src/skills/freshness-investigation/output-schema.ts
1283
+ import { z as z3 } from "zod";
1284
+ var staleSourceSchema = z3.object({
1285
+ source: z3.string().describe("Source identifier in 'source_name.table_name' format"),
1286
+ last_loaded_at: z3.string().describe("ISO 8601 timestamp of the most recent loaded data"),
1287
+ threshold_breached: z3.enum(["warn", "error"]).describe("Which threshold was crossed"),
1288
+ summary: z3.string().describe("One-paragraph explanation of why this specific source is stale")
1289
+ });
1290
+ var downstreamImpactSchema = z3.object({
1291
+ models: z3.array(z3.string()).describe("Fully-qualified model names that depend on stale source(s)"),
1292
+ severity: z3.enum(["high", "medium", "low"]).describe(
1293
+ "Estimated business impact: high = critical mart; medium = intermediate; low = limited"
1294
+ )
1295
+ });
1296
+ var evidenceSchema = z3.object({
1297
+ tool: z3.string().describe("Which MCP tool produced this evidence"),
1298
+ finding: z3.string().describe("What the tool returned that's load-bearing for the conclusion")
1299
+ });
1300
+ var outputSchema = z3.object({
1301
+ status: z3.enum(["fresh", "stale_warn", "stale_error", "no_freshness_config", "unknown"]),
1302
+ stale_sources: z3.array(staleSourceSchema).default([]),
1303
+ confidence: z3.enum(["high", "medium", "low"]),
1304
+ root_cause_summary: z3.string(),
1305
+ evidence: z3.array(evidenceSchema).default([]),
1306
+ recommended_action: z3.string().nullable(),
1307
+ downstream_impact: downstreamImpactSchema
1308
+ });
1309
+
1310
+ // src/skills/freshness-investigation/prompt.ts
1311
+ function buildSystemPrompt(contexts) {
1312
+ if (contexts.length === 0) return BASE_PROMPT;
1313
+ return `${BASE_PROMPT}
1314
+
1315
+ ${buildFreshnessBlock(contexts)}`;
1316
+ }
1317
+ function buildFreshnessBlock(contexts) {
1318
+ const rows = contexts.map((c) => {
1319
+ const loaded = c.lastLoadedAt ?? "unknown";
1320
+ const threshold = c.thresholdBreached ?? "\u2014";
1321
+ return `| ${c.source} | ${loaded} | ${c.status} | ${threshold} |`;
1322
+ }).join("\n");
1323
+ return `## Known freshness state
1324
+
1325
+ The following freshness verdicts were read from dbt's sources.json before this investigation started. Use these as your starting point \u2014 your tool calls should confirm root cause and trace downstream impact, not re-discover what is already known.
1326
+
1327
+ | Source | Last Loaded | Status | Threshold Breached |
1328
+ |---|---|---|---|
1329
+ ${rows}`;
1330
+ }
1331
+ var BASE_PROMPT = `You are Parrat's freshness investigation agent. Your job: when given a stale dbt source (or asked to check all sources), determine the root cause by examining the dbt project's source freshness configs and last-loaded timestamps, then verify the warehouse state directly. Return a structured finding.
1332
+
1333
+ You have exactly four tools available:
1334
+ - mcp__dbt__list \u2014 enumerate sources/models in the dbt project
1335
+ - mcp__dbt__get_node_details_dev \u2014 pull a specific node's details, including freshness config and last_loaded_at
1336
+ - mcp__dbt__get_lineage_dev \u2014 trace downstream models that depend on a given source
1337
+ - mcp__dbt__show \u2014 execute a SELECT query against the connected warehouse via dbt's existing connection. Returns tabular results. Use for: row counts, last-ingested timestamps, spot-checks on raw tables.
1338
+
1339
+ You do NOT have other tools. Do not attempt to call dbt CLI commands directly or modify state.
1340
+
1341
+ ## CRITICAL: dbt naming conventions
1342
+
1343
+ The dbt-mcp tools use TWO different node-naming conventions for parameters that conceptually identify the same node. If you mix them up, every call fails:
1344
+
1345
+ | Tool / Parameter | Format | Example |
1346
+ |---|---|---|
1347
+ | list.node_selection | dbt selector (COLON between type and name) | source:tpch.orders |
1348
+ | get_node_details_dev.node_id | dbt selector (COLON between type and name) | source:tpch.orders |
1349
+ | get_lineage_dev.unique_id | manifest unique_id (DOTS, includes project name) | source.parrat_dogfood.tpch.orders |
1350
+
1351
+ Diagnostic tip: if you see "No node found for **selector**: ..." \u2192 you sent unique_id format to a tool expecting selector. Switch the type-separator from "." to ":".
1352
+
1353
+ If you see "No node found for **unique_id**: ..." \u2192 you sent selector format to a tool expecting unique_id. Switch ":" to "." and add the project name.
1354
+
1355
+ ## Investigation strategy
1356
+
1357
+ 1. ALWAYS call list({resource_type: ["source"]}) first to enumerate sources. It returns a newline-separated list of selectors in format source:<project>.<source>.<table> (e.g., source:parrat_dogfood.tpch.orders). This both confirms the source exists AND gives you the project name needed downstream.
1358
+ 2. For each source under investigation: pass the selector verbatim to get_node_details_dev({node_id: "..."}) to retrieve freshness config + last_loaded_at.
1359
+ 3. To trace lineage, convert the selector to unique_id by replacing ":" with ".": e.g., source:parrat_dogfood.tpch.orders \u2192 source.parrat_dogfood.tpch.orders. Pass to get_lineage_dev({unique_id: "..."}).
1360
+ 4. Compare last_loaded_at against the user's threshold ('warn' or 'error', default 'error'). For stale sources, use show to verify at the warehouse layer: query the underlying table's row count and MAX(<timestamp_column>) to confirm the warehouse state matches dbt's freshness verdict. Example: show({sql: "SELECT COUNT(*) as row_count, MAX(o_orderdate) as last_date FROM ORDERS LIMIT 1"}).
1361
+ 5. If show returns an error result: record it in evidence[] as { tool: "show", finding: "warehouse query failed: <error message>" }, fall back to the dbt-only freshness verdict, and set confidence: "medium". Do not retry show. Continue with what you have.
1362
+ 6. Synthesize all findings into the structured output schema.
1363
+
1364
+ ## Confidence calibration
1365
+
1366
+ You must assign a confidence level for each investigation:
1367
+ - **high** \u2014 at least two tool results corroborate the conclusion (e.g., source freshness IS configured AND last_loaded_at IS past threshold AND lineage shows confirmed downstream models AND warehouse query confirms row delta)
1368
+ - **medium** \u2014 one tool result clearly supports the conclusion AND your reasoning fills any gaps (e.g., freshness config found, but warehouse query failed or lineage trace failed)
1369
+ - **low** \u2014 conclusion inferred from incomplete evidence (e.g., freshness config not found; you're guessing based on the source's apparent age)
1370
+
1371
+ If confidence would be low, prefer status='unknown' and explain in root_cause_summary what evidence is missing.
1372
+
1373
+ ## Anti-hallucination rules
1374
+
1375
+ - If a source has no freshness configuration, set status='no_freshness_config' and explain.
1376
+ - If you cannot determine freshness from the available tools, set status='unknown'. Do not fabricate timestamps or guess values.
1377
+ - evidence[] must reference real tool results \u2014 do not invent findings.
1378
+ - recommended_action may be null if no clear action is appropriate.
1379
+
1380
+ ## Tool budget
1381
+
1382
+ You have at most 6 tool-call turns. Plan accordingly:
1383
+ - Single source: typically 2-3 turns (get_node_details_dev \u2192 get_lineage_dev \u2192 show)
1384
+ - All sources: 1 turn for list + 1 turn per source detail (batch where possible) + 1 lineage trace per stale source + 1 show per stale source
1385
+
1386
+ If you exceed the budget without a final answer, the system will throw an error.
1387
+
1388
+ ## Output
1389
+
1390
+ When your investigation is complete, call 'emit_findings' with your structured findings. The schema is provided in the tool definition.`;
1391
+
1392
+ // src/skills/freshness-investigation/index.ts
1393
+ var allowedDbtTools = ["list", "get_node_details_dev", "get_lineage_dev", "show"];
1394
+ var freshnessInvestigationSkill = defineSkill({
1395
+ name: "freshness-investigation",
1396
+ inputSchema,
1397
+ outputSchema,
1398
+ kind: "investigation",
1399
+ // Tool allowlist — declared at the Skill level. The runtime resolves the
1400
+ // actual MCP server config (command, args, env) from the user's
1401
+ // parrat.config.yaml at invocation time.
1402
+ mcpServers: {
1403
+ dbt: {
1404
+ // The `config` field is filled in by the runtime from parrat.config.yaml
1405
+ // when the Skill is invoked. We use a placeholder here that the runtime
1406
+ // overrides; declaring it satisfies the SkillSpec type.
1407
+ config: { command: "", args: [], env: {} },
1408
+ tools: allowedDbtTools
1409
+ }
1410
+ },
1411
+ async run(input, ctx) {
1412
+ if (!ctx.config) {
1413
+ throw new Error(
1414
+ "freshness-investigation requires runtime-provided config. Did the runtime forget to load it?"
1415
+ );
1416
+ }
1417
+ if (!ctx.llmClient) {
1418
+ throw new Error(
1419
+ "freshness-investigation requires an LLM client. Did the runtime forget to construct one?"
1420
+ );
1421
+ }
1422
+ const userMcpServers = ctx.config.mcpServers;
1423
+ const dbtUserConfig = userMcpServers.dbt;
1424
+ if (!dbtUserConfig) {
1425
+ throw new Error(
1426
+ "freshness-investigation requires an 'dbt' MCP server in parrat.config.yaml."
1427
+ );
1428
+ }
1429
+ const dbtProjectDir = ctx.config.mcpServers.dbt?.env?.DBT_PROJECT_DIR;
1430
+ if (!dbtProjectDir) {
1431
+ throw new Error(
1432
+ "freshness-investigation: DBT_PROJECT_DIR must be set in mcpServers.dbt.env in parrat.config.yaml"
1433
+ );
1434
+ }
1435
+ const provider = new DbtFreshnessContextProvider(dbtProjectDir);
1436
+ const contexts = await provider.getContext(input.source ? [input.source] : void 0);
1437
+ const prompt = buildSystemPrompt(contexts);
1438
+ const result = await executeSkill({
1439
+ skillName: "freshness-investigation",
1440
+ llm: ctx.llmClient,
1441
+ systemPrompt: prompt,
1442
+ userMessage: JSON.stringify(input),
1443
+ mcpServers: {
1444
+ dbt: {
1445
+ config: dbtUserConfig,
1446
+ tools: allowedDbtTools
1447
+ }
1448
+ },
1449
+ outputSchema,
1450
+ model: ctx.config.claude.model,
1451
+ maxTurns: ctx.config.claude.max_turns,
1452
+ maxTokens: ctx.config.claude.max_tokens,
1453
+ temperature: ctx.config.claude.temperature,
1454
+ auditLogger: ctx.auditLogger,
1455
+ runId: ctx.runId,
1456
+ workflowId: ctx.workflowId,
1457
+ tenantId: ctx.tenantId,
1458
+ actor: ctx.actor ?? "user"
1459
+ });
1460
+ return result.output;
1461
+ }
1462
+ });
1463
+
1464
+ // src/skills/lineage-analysis/input-schema.ts
1465
+ import { z as z4 } from "zod";
1466
+ var inputSchema2 = z4.object({
1467
+ node_id: z4.string().min(1),
1468
+ direction: z4.enum(["upstream", "downstream", "both"]).default("both"),
1469
+ depth: z4.number().int().min(1).max(5).default(3),
1470
+ project_path: z4.string().optional()
1471
+ });
1472
+
1473
+ // src/skills/lineage-analysis/output-schema.ts
1474
+ import { z as z5 } from "zod";
1475
+ var evidenceSchema2 = z5.object({
1476
+ tool: z5.string().describe("Which MCP tool produced this evidence"),
1477
+ finding: z5.string().describe("What the tool returned that's load-bearing for the conclusion")
1478
+ });
1479
+ var outputSchema2 = z5.object({
1480
+ node_id: z5.string(),
1481
+ upstream_nodes: z5.array(z5.string()),
1482
+ downstream_nodes: z5.array(z5.string()),
1483
+ impact_count: z5.number().int(),
1484
+ impact_summary: z5.string(),
1485
+ critical_path: z5.array(z5.string()).optional(),
1486
+ truncated: z5.boolean().default(false),
1487
+ confidence: z5.enum(["high", "medium", "low"]),
1488
+ evidence: z5.array(evidenceSchema2).default([])
1489
+ });
1490
+
1491
+ // src/skills/lineage-analysis/prompt.ts
1492
+ var BASE_PROMPT2 = `You are Parrat's lineage analysis agent. You are given a dbt node identifier, a direction (upstream, downstream, or both), and a depth limit. Your job: map the lineage graph for that node, summarise the impact, and identify the critical path if one exists.
1493
+
1494
+ You have exactly three tools available:
1495
+ - mcp__dbt__list \u2014 enumerate models/sources to confirm a node exists and retrieve its selector
1496
+ - mcp__dbt__get_node_details_dev \u2014 pull a node's details to verify it exists and get its type
1497
+ - mcp__dbt__get_lineage_dev \u2014 retrieve the upstream and/or downstream lineage graph
1498
+
1499
+ You do NOT have other tools. Do not attempt to call dbt CLI directly or modify state.
1500
+
1501
+ ## CRITICAL: dbt naming conventions
1502
+
1503
+ The dbt-mcp tools use TWO different node-naming conventions. If you mix them up, every call fails:
1504
+
1505
+ | Tool / Parameter | Format | Example |
1506
+ |---|---|---|
1507
+ | list.node_selection | dbt selector (COLON between type and name) | model:fct_orders |
1508
+ | get_node_details_dev.node_id | dbt selector (COLON between type and name) | model:fct_orders |
1509
+ | get_lineage_dev.unique_id | manifest unique_id (DOTS, includes project name) | model.parrat_dogfood.fct_orders |
1510
+
1511
+ Diagnostic tip: "No node found for selector" \u2192 switch "." to ":". "No node found for unique_id" \u2192 switch ":" to "." and add the project name.
1512
+
1513
+ ## Investigation strategy
1514
+
1515
+ 1. Call list() to confirm the target node exists and retrieve its selector. This also gives you the project name needed for unique_id construction.
1516
+ 2. Call get_lineage_dev({unique_id: "<node_unique_id>", depth: <depth>}) with the direction from input. Convert selector to unique_id by replacing ":" with "." and prepending the project name.
1517
+ 3. From the returned graph, extract:
1518
+ - upstream_nodes: all nodes that feed into the target (empty array if direction is 'downstream')
1519
+ - downstream_nodes: all nodes the target feeds into (empty array if direction is 'upstream')
1520
+ - impact_count: total count of upstream_nodes + downstream_nodes
1521
+ 4. If impact_count exceeds 50, set truncated: true and limit each list to the 25 closest nodes (by graph distance). Note the truncation in impact_summary.
1522
+ 5. Identify critical_path if one is apparent: the longest chain of high-fan-out models, or the chain connecting the target to a known mart or reporting layer. Omit critical_path entirely if the graph is shallow or no clear path stands out.
1523
+ 6. Write impact_summary: one paragraph describing what the lineage means in plain English \u2014 which upstream sources feed this node, which downstream reports or marts depend on it, and the blast radius of a change to this node.
1524
+
1525
+ ## Confidence calibration
1526
+
1527
+ - **high** \u2014 get_lineage_dev returned a non-empty graph and node details confirmed the node type
1528
+ - **medium** \u2014 get_lineage_dev succeeded but node details call failed, or graph was truncated
1529
+ - **low** \u2014 could not retrieve lineage; impact inferred from node name alone
1530
+
1531
+ ## Anti-hallucination rules
1532
+
1533
+ - upstream_nodes and downstream_nodes must contain only unique_ids returned by get_lineage_dev \u2014 do not invent nodes.
1534
+ - If get_lineage_dev returns an empty graph, return empty arrays and set confidence: "medium" with a note in impact_summary explaining why the graph is empty.
1535
+ - critical_path may be omitted (undefined) if no clear path exists \u2014 do not fabricate one.
1536
+
1537
+ ## Tool budget
1538
+
1539
+ You have at most 4 tool-call turns:
1540
+ - list (1) \u2192 get_lineage_dev (1) \u2192 optional get_node_details_dev for critical path clarification (1) \u2192 emit_findings (1)
1541
+
1542
+ ## Output
1543
+
1544
+ When your analysis is complete, call 'emit_findings' with your structured findings. The schema is provided in the tool definition.`;
1545
+
1546
+ // src/skills/lineage-analysis/index.ts
1547
+ var allowedDbtTools2 = ["list", "get_node_details_dev", "get_lineage_dev"];
1548
+ var lineageAnalysisSkill = defineSkill({
1549
+ name: "lineage-analysis",
1550
+ inputSchema: inputSchema2,
1551
+ outputSchema: outputSchema2,
1552
+ kind: "investigation",
1553
+ mcpServers: {
1554
+ dbt: {
1555
+ config: { command: "", args: [], env: {} },
1556
+ tools: allowedDbtTools2
1557
+ }
1558
+ },
1559
+ async run(input, ctx) {
1560
+ if (!ctx.config) throw new Error("lineage-analysis requires runtime-provided config.");
1561
+ if (!ctx.llmClient) throw new Error("lineage-analysis requires an LLM client.");
1562
+ const dbtUserConfig = ctx.config.mcpServers.dbt;
1563
+ if (!dbtUserConfig) {
1564
+ throw new Error("lineage-analysis requires a 'dbt' MCP server in parrat.config.yaml.");
1565
+ }
1566
+ const result = await executeSkill({
1567
+ skillName: "lineage-analysis",
1568
+ llm: ctx.llmClient,
1569
+ systemPrompt: BASE_PROMPT2,
1570
+ userMessage: JSON.stringify(input),
1571
+ mcpServers: { dbt: { config: dbtUserConfig, tools: allowedDbtTools2 } },
1572
+ outputSchema: outputSchema2,
1573
+ model: ctx.config.claude.model,
1574
+ maxTurns: ctx.config.claude.max_turns,
1575
+ maxTokens: ctx.config.claude.max_tokens,
1576
+ temperature: ctx.config.claude.temperature,
1577
+ auditLogger: ctx.auditLogger,
1578
+ runId: ctx.runId,
1579
+ workflowId: ctx.workflowId,
1580
+ tenantId: ctx.tenantId,
1581
+ actor: ctx.actor ?? "user"
1582
+ });
1583
+ return result.output;
1584
+ }
1585
+ });
1586
+
1587
+ // src/skills/metric-drop-rca/input-schema.ts
1588
+ import { z as z6 } from "zod";
1589
+ var inputSchema3 = z6.object({
1590
+ metric_name: z6.string().min(1),
1591
+ model_id: z6.string().min(1),
1592
+ metric_column: z6.string().min(1),
1593
+ drop_percent: z6.number().min(0).max(100),
1594
+ time_window_hours: z6.number().positive().default(24),
1595
+ project_path: z6.string().optional()
1596
+ });
1597
+
1598
+ // src/skills/metric-drop-rca/output-schema.ts
1599
+ import { z as z7 } from "zod";
1600
+ var evidenceSchema3 = z7.object({
1601
+ tool: z7.string().describe("Which MCP tool produced this evidence"),
1602
+ finding: z7.string().describe("What the tool returned that's load-bearing for the conclusion")
1603
+ });
1604
+ var outputSchema3 = z7.object({
1605
+ metric_name: z7.string(),
1606
+ drop_percent: z7.number(),
1607
+ status: z7.enum([
1608
+ "data_missing",
1609
+ "volume_drop",
1610
+ "upstream_model_issue",
1611
+ "pipeline_failure",
1612
+ "schema_change",
1613
+ "unknown"
1614
+ ]),
1615
+ root_cause: z7.string(),
1616
+ suspect_models: z7.array(z7.string()),
1617
+ confidence: z7.enum(["high", "medium", "low"]),
1618
+ recommended_action: z7.string().nullable(),
1619
+ evidence: z7.array(evidenceSchema3).default([])
1620
+ });
1621
+
1622
+ // src/skills/metric-drop-rca/prompt.ts
1623
+ var BASE_PROMPT3 = `You are Parrat's metric drop RCA agent. You are given a metric name, the dbt model that computes it, the affected column, and an observed drop percentage. Your job: determine the root cause of the metric drop by examining the model's SQL, its upstream dependencies, and the current warehouse data.
1624
+
1625
+ You have exactly four tools available:
1626
+ - mcp__dbt__list \u2014 enumerate models/sources in the dbt project
1627
+ - mcp__dbt__get_node_details_dev \u2014 pull a node's details, including compiled SQL and database relation info
1628
+ - mcp__dbt__get_lineage_dev \u2014 trace upstream/downstream dependencies for a given node
1629
+ - mcp__dbt__show \u2014 execute a SELECT query against the connected warehouse via dbt's existing connection. Returns tabular results. Use for: current vs historical aggregates, upstream row counts, data volume checks.
1630
+
1631
+ You do NOT have other tools. Do not attempt to call dbt CLI directly or modify state.
1632
+
1633
+ ## CRITICAL: dbt naming conventions
1634
+
1635
+ The dbt-mcp tools use TWO different node-naming conventions. If you mix them up, every call fails:
1636
+
1637
+ | Tool / Parameter | Format | Example |
1638
+ |---|---|---|
1639
+ | list.node_selection | dbt selector (COLON between type and name) | model:fct_orders |
1640
+ | get_node_details_dev.node_id | dbt selector (COLON between type and name) | model:fct_orders |
1641
+ | get_lineage_dev.unique_id | manifest unique_id (DOTS, includes project name) | model.parrat_dogfood.fct_orders |
1642
+
1643
+ Diagnostic tip: "No node found for selector" \u2192 switch "." to ":". "No node found for unique_id" \u2192 switch ":" to "." and add the project name.
1644
+
1645
+ ## Investigation strategy
1646
+
1647
+ 1. Call list({resource_type: ["model"]}) to confirm the target model exists and retrieve its selector. This also gives you the project name needed for unique_id construction downstream.
1648
+ 2. Call get_node_details_dev to retrieve the model's compiled SQL and database relation (schema + table name). Read the SQL carefully: identify which column feeds the metric, which date/timestamp column drives time partitioning.
1649
+ 3. Use show to run two comparison queries against the warehouse table from step 2:
1650
+ - Current window: SELECT <agg>(<metric_column>) FROM <schema>.<table> WHERE <date_col> >= <now minus time_window_hours>
1651
+ - Previous window: same query shifted back one full time_window_hours period
1652
+ Compare values to confirm the drop magnitude matches the reported drop_percent.
1653
+ 4. Call get_lineage_dev({unique_id: "<model_unique_id>", direction: "upstream"}) to find upstream models and sources. Convert selector to unique_id by replacing ":" with "." and prepending the project name.
1654
+ 5. For the most likely upstream contributors, use show to check row counts and MAX(<timestamp>) for both time windows. A sudden volume drop or missing rows upstream is the most common root cause.
1655
+ 6. Synthesize: identify suspect_models (dbt unique_ids), classify status, write root_cause.
1656
+
1657
+ ## Confidence calibration
1658
+
1659
+ - **high** \u2014 warehouse queries confirmed the drop magnitude AND an upstream volume or quality change was identified
1660
+ - **medium** \u2014 warehouse query succeeded but upstream cause is unclear; or upstream identified but warehouse query failed
1661
+ - **low** \u2014 could not query warehouse; root cause inferred from model structure alone
1662
+
1663
+ If confidence would be low, set status='unknown' and explain what evidence is missing in root_cause.
1664
+
1665
+ ## Anti-hallucination rules
1666
+
1667
+ - Do not fabricate table names or column names. Read them from get_node_details_dev output.
1668
+ - If show returns an error: record it in evidence[] as { tool: "show", finding: "query failed: <error>" }, set confidence to at most "medium", and continue without retrying.
1669
+ - suspect_models must contain dbt unique_ids from actual tool results \u2014 do not invent them.
1670
+ - recommended_action may be null if no clear action is appropriate.
1671
+
1672
+ ## Tool budget
1673
+
1674
+ You have at most 8 tool-call turns. Plan accordingly:
1675
+ - Typical path: list (1) \u2192 get_node_details_dev (1) \u2192 show \xD7 2 (2) \u2192 get_lineage_dev (1) \u2192 show upstream \xD7 1\u20132 (1\u20132) \u2192 emit_findings (1)
1676
+
1677
+ ## Output
1678
+
1679
+ When your investigation is complete, call 'emit_findings' with your structured findings. The schema is provided in the tool definition.`;
1680
+
1681
+ // src/skills/metric-drop-rca/index.ts
1682
+ var allowedDbtTools3 = ["list", "get_node_details_dev", "get_lineage_dev", "show"];
1683
+ var metricDropRcaSkill = defineSkill({
1684
+ name: "metric-drop-rca",
1685
+ inputSchema: inputSchema3,
1686
+ outputSchema: outputSchema3,
1687
+ kind: "investigation",
1688
+ mcpServers: {
1689
+ dbt: {
1690
+ config: { command: "", args: [], env: {} },
1691
+ tools: allowedDbtTools3
1692
+ }
1693
+ },
1694
+ async run(input, ctx) {
1695
+ if (!ctx.config) throw new Error("metric-drop-rca requires runtime-provided config.");
1696
+ if (!ctx.llmClient) throw new Error("metric-drop-rca requires an LLM client.");
1697
+ const dbtUserConfig = ctx.config.mcpServers.dbt;
1698
+ if (!dbtUserConfig) {
1699
+ throw new Error("metric-drop-rca requires a 'dbt' MCP server in parrat.config.yaml.");
1700
+ }
1701
+ const result = await executeSkill({
1702
+ skillName: "metric-drop-rca",
1703
+ llm: ctx.llmClient,
1704
+ systemPrompt: BASE_PROMPT3,
1705
+ userMessage: JSON.stringify(input),
1706
+ mcpServers: { dbt: { config: dbtUserConfig, tools: allowedDbtTools3 } },
1707
+ outputSchema: outputSchema3,
1708
+ model: ctx.config.claude.model,
1709
+ maxTurns: ctx.config.claude.max_turns,
1710
+ maxTokens: ctx.config.claude.max_tokens,
1711
+ temperature: ctx.config.claude.temperature,
1712
+ auditLogger: ctx.auditLogger,
1713
+ runId: ctx.runId,
1714
+ workflowId: ctx.workflowId,
1715
+ tenantId: ctx.tenantId,
1716
+ actor: ctx.actor ?? "user"
1717
+ });
1718
+ return result.output;
1719
+ }
1720
+ });
1721
+
1722
+ // src/skills/index.ts
1723
+ var skills = [
1724
+ freshnessInvestigationSkill,
1725
+ metricDropRcaSkill,
1726
+ lineageAnalysisSkill
1727
+ ];
1728
+
1729
+ // src/cli/run.ts
1730
+ async function runSkill(options) {
1731
+ let input;
1732
+ try {
1733
+ input = JSON.parse(options.inputJson);
1734
+ } catch (e) {
1735
+ return {
1736
+ exitCode: 2,
1737
+ error: `Invalid JSON input: ${e instanceof Error ? e.message : String(e)}`
1738
+ };
1739
+ }
1740
+ const registry = createRegistry(skills);
1741
+ const auditLogger = createAuditLogger({ filePath: resolve4(options.auditPath) });
1742
+ const runtime = createRuntime({ registry, auditLogger });
1743
+ try {
1744
+ const output = await runtime.invoke({
1745
+ skill: options.skillName,
1746
+ input,
1747
+ actor: "user",
1748
+ ...options.correlationId ? { correlationId: options.correlationId } : {}
1749
+ });
1750
+ return { exitCode: 0, output };
1751
+ } catch (e) {
1752
+ const errorName = e instanceof Error ? e.name : "UnknownError";
1753
+ const message = e instanceof Error ? `${e.name}: ${e.message}` : String(e);
1754
+ if (errorName === "MissingClaudeKeyError" || errorName === "ConfigValidationError" || errorName === "ConfigNotFoundError") {
1755
+ return { exitCode: 4, error: message };
1756
+ }
1757
+ return { exitCode: 1, error: message };
1758
+ }
1759
+ }
1760
+ var runCommand = new Command5("run").description("Run a Skill with the given input").argument("<skill>", "Name of the Skill to run (e.g. hello-world)").argument("[input]", "JSON input string for the Skill", "{}").option("--audit-path <path>", "Path to audit log file", ".parrat/audit.jsonl").option(
1761
+ "--input-file <path>",
1762
+ "Read Skill input from JSON file (alternative to positional argument)"
1763
+ ).option("--resume <workflow_id>", "Resume a paused workflow (Phase 1+ feature; v1 stub)").action(
1764
+ async (skillName, positionalInputJson, opts) => {
1765
+ if (opts.resume) {
1766
+ console.error(
1767
+ "parrat run --resume is reserved for Phase 1+ composite Skills. v1 has no resumable workflows."
1768
+ );
1769
+ process.exit(3);
1770
+ }
1771
+ let inputJson = positionalInputJson;
1772
+ if (opts.inputFile) {
1773
+ if (positionalInputJson !== "{}") {
1774
+ console.error("Cannot pass both a positional JSON argument and --input-file. Pick one.");
1775
+ process.exit(2);
1776
+ }
1777
+ try {
1778
+ inputJson = readFileSync6(opts.inputFile, "utf8");
1779
+ } catch (e) {
1780
+ console.error(
1781
+ `Failed to read --input-file '${opts.inputFile}': ${e instanceof Error ? e.message : String(e)}`
1782
+ );
1783
+ process.exit(2);
1784
+ }
1785
+ }
1786
+ const correlationId = process.env.correlation_id ?? process.env.CORRELATION_ID;
1787
+ const config = await loadConfig().catch(() => null);
1788
+ if (config) {
1789
+ sweepAuditLog(opts.auditPath, config.audit.retention_days).catch(() => {
1790
+ });
1791
+ }
1792
+ if (correlationId && config) {
1793
+ const isDup = await isDuplicateRun(
1794
+ opts.auditPath,
1795
+ correlationId,
1796
+ config.audit.idempotency_window_hours
1797
+ );
1798
+ if (isDup) {
1799
+ console.log(`Skipped: duplicate correlation_id ${correlationId}`);
1800
+ process.exit(0);
1801
+ }
1802
+ }
1803
+ const result = await runSkill({
1804
+ skillName,
1805
+ inputJson,
1806
+ auditPath: opts.auditPath,
1807
+ ...correlationId ? { correlationId } : {}
1808
+ });
1809
+ if (result.error) {
1810
+ console.error(result.error);
1811
+ }
1812
+ if (result.output !== void 0) {
1813
+ console.log(JSON.stringify(result.output, null, 2));
1814
+ }
1815
+ if (result.exitCode !== 0) {
1816
+ process.exit(result.exitCode);
1817
+ }
1818
+ }
1819
+ );
1820
+
1821
+ // src/cli/skills.ts
1822
+ import { Command as Command6 } from "commander";
1823
+ function listSkillNames() {
1824
+ return createRegistry(skills).list();
1825
+ }
1826
+ var listCommand = new Command6("list").description("List all installed Skills").action(() => {
1827
+ const names = listSkillNames();
1828
+ if (names.length === 0) {
1829
+ console.log("No Skills installed.");
1830
+ return;
1831
+ }
1832
+ for (const name of names) {
1833
+ console.log(name);
1834
+ }
1835
+ });
1836
+ var skillsCommand = new Command6("skills").description("Manage Parrat Skills").addCommand(listCommand);
1837
+
1838
+ // src/cli/watch.ts
1839
+ import { resolve as resolve5 } from "path";
1840
+ import { Command as Command7 } from "commander";
1841
+
1842
+ // src/core/notify/slack.ts
1843
+ var SlackNotifier = class {
1844
+ constructor(webhookUrl) {
1845
+ this.webhookUrl = webhookUrl;
1846
+ }
1847
+ webhookUrl;
1848
+ async send(message) {
1849
+ let response;
1850
+ try {
1851
+ response = await fetch(this.webhookUrl, {
1852
+ method: "POST",
1853
+ headers: { "Content-Type": "application/json" },
1854
+ body: JSON.stringify(message)
1855
+ });
1856
+ } catch (e) {
1857
+ throw new Error(`Slack webhook POST failed: ${e instanceof Error ? e.message : String(e)}`);
1858
+ }
1859
+ if (!response.ok) {
1860
+ const body = await response.text().catch(() => "");
1861
+ throw new Error(`Slack webhook returned ${response.status}: ${body}`);
1862
+ }
1863
+ }
1864
+ };
1865
+
1866
+ // src/cli/watch.ts
1867
+ function formatSlackMessage(skillName, output, error) {
1868
+ if (error) {
1869
+ return `[parrat] ${skillName} | FAILED
1870
+ ${error}`;
1871
+ }
1872
+ const json = JSON.stringify(output, null, 2);
1873
+ const body = json.length > 2e3 ? `${json.slice(0, 2e3)}
1874
+ ...(truncated)` : json;
1875
+ return `[parrat] ${skillName} | OK
1876
+ ${body}`;
1877
+ }
1878
+ async function watchSkill(options) {
1879
+ const { config, auditPath } = options;
1880
+ if (!config.watch) {
1881
+ return {
1882
+ exitCode: 1,
1883
+ error: "No 'watch' section in config. Add watch.skill and watch.input to .parrat/config.yaml."
1884
+ };
1885
+ }
1886
+ const { skill, input } = config.watch;
1887
+ const runResult = await runSkill({
1888
+ skillName: skill,
1889
+ inputJson: JSON.stringify(input),
1890
+ auditPath: resolve5(auditPath)
1891
+ });
1892
+ const slackWebhookUrl = config.notify?.slack?.webhook_url;
1893
+ if (slackWebhookUrl) {
1894
+ const message = formatSlackMessage(skill, runResult.output, runResult.error);
1895
+ const notifier = new SlackNotifier(slackWebhookUrl);
1896
+ try {
1897
+ await notifier.send({ text: message });
1898
+ } catch (e) {
1899
+ const notifyError = e instanceof Error ? e.message : String(e);
1900
+ return {
1901
+ exitCode: 1,
1902
+ error: `Skill ${runResult.exitCode === 0 ? "succeeded" : "failed"} but Slack notification failed: ${notifyError}`
1903
+ };
1904
+ }
1905
+ }
1906
+ return {
1907
+ exitCode: runResult.exitCode,
1908
+ ...runResult.error ? { error: runResult.error } : {}
1909
+ };
1910
+ }
1911
+ var watchCommand = new Command7("watch").description("Run the configured watch skill once (schedule via cron / Task Scheduler)").option("--audit-path <path>", "Path to audit log file", ".parrat/audit.jsonl").action(async (opts) => {
1912
+ let config;
1913
+ try {
1914
+ config = await loadConfig();
1915
+ } catch (e) {
1916
+ console.error(e instanceof Error ? e.message : String(e));
1917
+ process.exit(4);
1918
+ }
1919
+ sweepAuditLog(opts.auditPath, config.audit.retention_days).catch(() => {
1920
+ });
1921
+ const correlationId = process.env.correlation_id ?? process.env.CORRELATION_ID;
1922
+ if (correlationId) {
1923
+ const isDup = await isDuplicateRun(
1924
+ opts.auditPath,
1925
+ correlationId,
1926
+ config.audit.idempotency_window_hours
1927
+ );
1928
+ if (isDup) {
1929
+ console.log(`Skipped: duplicate correlation_id ${correlationId}`);
1930
+ process.exit(0);
1931
+ }
1932
+ }
1933
+ const result = await watchSkill({ config, auditPath: opts.auditPath });
1934
+ if (result.error) {
1935
+ console.error(result.error);
1936
+ }
1937
+ if (result.exitCode !== 0) {
1938
+ process.exit(result.exitCode);
1939
+ }
1940
+ });
1941
+
1942
+ // src/cli/webhook.ts
1943
+ import { createServer } from "http";
1944
+ import { Command as Command8 } from "commander";
1945
+ function mapMonteCarloPayload(body) {
1946
+ if (typeof body !== "object" || body === null) return null;
1947
+ const b = body;
1948
+ if (b.alert_type !== "freshness") return null;
1949
+ const sourceRaw = b.table ?? b.source_name;
1950
+ const input = {
1951
+ threshold: "error",
1952
+ ...typeof sourceRaw === "string" && sourceRaw.length > 0 ? { source: sourceRaw } : {}
1953
+ };
1954
+ return { skill: "freshness-investigation", input };
1955
+ }
1956
+ function readBody(req) {
1957
+ return new Promise((resolve6, reject) => {
1958
+ const chunks = [];
1959
+ req.on("data", (chunk) => chunks.push(chunk));
1960
+ req.on("end", () => resolve6(Buffer.concat(chunks).toString("utf8")));
1961
+ req.on("error", reject);
1962
+ });
1963
+ }
1964
+ function send(res, status, body) {
1965
+ const json = JSON.stringify(body);
1966
+ res.writeHead(status, { "Content-Type": "application/json" });
1967
+ res.end(json);
1968
+ }
1969
+ function startWebhook(options) {
1970
+ const { config, auditPath, port, secret } = options;
1971
+ const server = createServer(async (req, res) => {
1972
+ if (req.method !== "POST" || req.url !== "/trigger") {
1973
+ send(res, 404, { error: "Not found. Only POST /trigger is supported." });
1974
+ return;
1975
+ }
1976
+ if (secret) {
1977
+ const header = req.headers["x-parrat-secret"];
1978
+ if (header !== secret) {
1979
+ send(res, 401, { error: "Unauthorized. X-Parrat-Secret header missing or incorrect." });
1980
+ return;
1981
+ }
1982
+ }
1983
+ let body;
1984
+ try {
1985
+ const raw = await readBody(req);
1986
+ body = JSON.parse(raw);
1987
+ } catch {
1988
+ send(res, 400, { error: "Invalid JSON body." });
1989
+ return;
1990
+ }
1991
+ const mapped = mapMonteCarloPayload(body);
1992
+ if (!mapped) {
1993
+ send(res, 400, {
1994
+ error: "Unrecognised payload format. Expected a Monte Carlo freshness alert."
1995
+ });
1996
+ return;
1997
+ }
1998
+ const notifyConfig = config.notify;
1999
+ const slackWebhookUrl = notifyConfig?.slack?.webhook_url;
2000
+ const result = await runSkill({
2001
+ skillName: mapped.skill,
2002
+ inputJson: JSON.stringify(mapped.input),
2003
+ auditPath,
2004
+ ...slackWebhookUrl ? {} : {}
2005
+ });
2006
+ if (result.exitCode === 0) {
2007
+ send(res, 200, { ok: true, output: result.output });
2008
+ } else {
2009
+ send(res, 500, { error: result.error ?? "Skill execution failed." });
2010
+ }
2011
+ });
2012
+ return new Promise((resolve6) => {
2013
+ server.listen(port, () => {
2014
+ const addr = server.address();
2015
+ console.log(`parrat webhook listening on port ${addr.port}`);
2016
+ resolve6({
2017
+ port: addr.port,
2018
+ close() {
2019
+ server.close();
2020
+ }
2021
+ });
2022
+ });
2023
+ });
2024
+ }
2025
+ var webhookCommand = new Command8("webhook").description("Start an HTTP listener that accepts external alert triggers (e.g. Monte Carlo)").option("--port <number>", "Port to listen on (overrides config)", (v) => Number.parseInt(v, 10)).option("--audit-path <path>", "Path to audit log file", ".parrat/audit.jsonl").action(async (opts) => {
2026
+ const config = await loadConfig();
2027
+ const port = opts.port ?? config.webhook?.port ?? 8080;
2028
+ const secret = config.webhook?.secret;
2029
+ const webhook = await startWebhook({
2030
+ config,
2031
+ auditPath: opts.auditPath,
2032
+ port,
2033
+ ...secret ? { secret } : {}
2034
+ });
2035
+ process.on("SIGINT", () => {
2036
+ webhook.close();
2037
+ process.exit(0);
2038
+ });
2039
+ process.on("SIGTERM", () => {
2040
+ webhook.close();
2041
+ process.exit(0);
2042
+ });
2043
+ });
2044
+
2045
+ // src/index.ts
2046
+ var program = new Command9().name("parrat").description("Claude-native cross-stack agent for data ops").version("0.1.0-beta.5");
2047
+ program.addCommand(doctorCommand);
2048
+ program.addCommand(initCommand);
2049
+ program.addCommand(runCommand);
2050
+ program.addCommand(skillsCommand);
2051
+ program.addCommand(replayCommand);
2052
+ program.addCommand(watchCommand);
2053
+ program.addCommand(auditCommand);
2054
+ program.addCommand(webhookCommand);
2055
+ await program.parseAsync(process.argv);
2056
+ //# sourceMappingURL=index.js.map