@pauly4010/evalai-sdk 1.8.0 → 1.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/CHANGELOG.md +54 -0
  2. package/README.md +136 -23
  3. package/dist/assertions.js +51 -18
  4. package/dist/batch.js +8 -2
  5. package/dist/cli/api.js +3 -1
  6. package/dist/cli/check.js +19 -6
  7. package/dist/cli/ci-context.js +3 -1
  8. package/dist/cli/ci.d.ts +45 -0
  9. package/dist/cli/ci.js +192 -0
  10. package/dist/cli/config.js +28 -8
  11. package/dist/cli/diff.d.ts +173 -0
  12. package/dist/cli/diff.js +685 -0
  13. package/dist/cli/discover.d.ts +84 -0
  14. package/dist/cli/discover.js +419 -0
  15. package/dist/cli/doctor.js +62 -19
  16. package/dist/cli/env.d.ts +21 -0
  17. package/dist/cli/env.js +42 -0
  18. package/dist/cli/explain.js +168 -36
  19. package/dist/cli/formatters/human.js +4 -1
  20. package/dist/cli/formatters/pr-comment.js +3 -1
  21. package/dist/cli/gate.js +6 -2
  22. package/dist/cli/impact-analysis.d.ts +63 -0
  23. package/dist/cli/impact-analysis.js +252 -0
  24. package/dist/cli/index.js +185 -0
  25. package/dist/cli/manifest.d.ts +103 -0
  26. package/dist/cli/manifest.js +282 -0
  27. package/dist/cli/migrate.d.ts +41 -0
  28. package/dist/cli/migrate.js +349 -0
  29. package/dist/cli/policy-packs.js +8 -2
  30. package/dist/cli/print-config.js +33 -14
  31. package/dist/cli/regression-gate.js +8 -2
  32. package/dist/cli/report/build-check-report.js +8 -2
  33. package/dist/cli/run.d.ts +101 -0
  34. package/dist/cli/run.js +395 -0
  35. package/dist/cli/share.js +3 -1
  36. package/dist/cli/upgrade.js +2 -1
  37. package/dist/cli/workspace.d.ts +28 -0
  38. package/dist/cli/workspace.js +58 -0
  39. package/dist/client.d.ts +16 -19
  40. package/dist/client.js +60 -43
  41. package/dist/client.request.test.d.ts +1 -1
  42. package/dist/client.request.test.js +222 -147
  43. package/dist/context.js +3 -1
  44. package/dist/errors.js +11 -4
  45. package/dist/export.js +3 -1
  46. package/dist/index.d.ts +8 -2
  47. package/dist/index.js +30 -5
  48. package/dist/integrations/anthropic.d.ts +20 -1
  49. package/dist/integrations/openai-eval.js +4 -2
  50. package/dist/integrations/openai.d.ts +24 -1
  51. package/dist/local.js +3 -1
  52. package/dist/logger.js +6 -2
  53. package/dist/pagination.js +6 -2
  54. package/dist/runtime/adapters/config-to-dsl.d.ts +33 -0
  55. package/dist/runtime/adapters/config-to-dsl.js +394 -0
  56. package/dist/runtime/adapters/testsuite-to-dsl.d.ts +63 -0
  57. package/dist/runtime/adapters/testsuite-to-dsl.js +276 -0
  58. package/dist/runtime/context.d.ts +26 -0
  59. package/dist/runtime/context.js +74 -0
  60. package/dist/runtime/eval.d.ts +46 -0
  61. package/dist/runtime/eval.js +244 -0
  62. package/dist/runtime/execution-mode.d.ts +80 -0
  63. package/dist/runtime/execution-mode.js +357 -0
  64. package/dist/runtime/executor.d.ts +16 -0
  65. package/dist/runtime/executor.js +152 -0
  66. package/dist/runtime/registry.d.ts +78 -0
  67. package/dist/runtime/registry.js +403 -0
  68. package/dist/runtime/run-report.d.ts +200 -0
  69. package/dist/runtime/run-report.js +222 -0
  70. package/dist/runtime/types.d.ts +356 -0
  71. package/dist/runtime/types.js +76 -0
  72. package/dist/testing.d.ts +65 -0
  73. package/dist/testing.js +49 -2
  74. package/dist/types.d.ts +100 -69
  75. package/dist/utils/input-hash.js +4 -1
  76. package/dist/version.d.ts +1 -1
  77. package/dist/version.js +1 -1
  78. package/dist/workflows.js +62 -14
  79. package/package.json +115 -110
@@ -0,0 +1,42 @@
1
+ "use strict";
2
+ /**
3
+ * CORE-401: Centralized environment detection
4
+ *
5
+ * Provides unified environment detection for all EvalAI CLI commands
6
+ */
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ exports.isCI = isCI;
9
+ exports.isGitHubActions = isGitHubActions;
10
+ exports.getGitHubStepSummaryPath = getGitHubStepSummaryPath;
11
+ exports.isGitRef = isGitRef;
12
+ /**
13
+ * Check if running in CI environment
14
+ */
15
+ function isCI() {
16
+ return !!(process.env.GITHUB_ACTIONS ||
17
+ process.env.CI ||
18
+ process.env.CONTINUOUS_INTEGRATION ||
19
+ process.env.BUILDKITE ||
20
+ process.env.CIRCLECI ||
21
+ process.env.TRAVIS ||
22
+ process.env.JENKINS_URL);
23
+ }
24
+ /**
25
+ * Check if running in GitHub Actions
26
+ */
27
+ function isGitHubActions() {
28
+ return !!process.env.GITHUB_ACTIONS;
29
+ }
30
+ /**
31
+ * Get GitHub Step Summary path if available
32
+ */
33
+ function getGitHubStepSummaryPath() {
34
+ return process.env.GITHUB_STEP_SUMMARY;
35
+ }
36
+ /**
37
+ * Check if string looks like a git reference
38
+ */
39
+ function isGitRef(ref) {
40
+ // Common git ref patterns
41
+ return /^(main|master|develop|dev|origin\/|remotes\/|feature\/|hotfix\/|release\/|v\d+\.\d+\.\d+|.*\.\.\..*|nonexistent-branch|test-branch|ci-branch)/.test(ref);
42
+ }
@@ -87,7 +87,9 @@ const REPORT_SEARCH_PATHS = [
87
87
  ];
88
88
  function findReport(cwd, explicitPath) {
89
89
  if (explicitPath) {
90
- const abs = path.isAbsolute(explicitPath) ? explicitPath : path.join(cwd, explicitPath);
90
+ const abs = path.isAbsolute(explicitPath)
91
+ ? explicitPath
92
+ : path.join(cwd, explicitPath);
91
93
  return fs.existsSync(abs) ? abs : null;
92
94
  }
93
95
  for (const rel of REPORT_SEARCH_PATHS) {
@@ -115,11 +117,13 @@ function classifyRootCauses(report) {
115
117
  causes.push("cost_regression");
116
118
  }
117
119
  // Latency regression
118
- if (reasonCode === "LATENCY_BUDGET_EXCEEDED" || reasonCode === "LATENCY_RISK") {
120
+ if (reasonCode === "LATENCY_BUDGET_EXCEEDED" ||
121
+ reasonCode === "LATENCY_RISK") {
119
122
  causes.push("latency_regression");
120
123
  }
121
124
  // Coverage drop (test count decreased)
122
- if (reasonCode === "LOW_SAMPLE_SIZE" || reasonCode === "INSUFFICIENT_EVIDENCE") {
125
+ if (reasonCode === "LOW_SAMPLE_SIZE" ||
126
+ reasonCode === "INSUFFICIENT_EVIDENCE") {
123
127
  causes.push("coverage_drop");
124
128
  }
125
129
  // Analyze failed cases for drift patterns
@@ -173,52 +177,164 @@ function classifyRootCauses(report) {
173
177
  // ── Suggested fixes ──
174
178
  const ROOT_CAUSE_FIXES = {
175
179
  prompt_drift: [
176
- { action: "Review prompt changes", detail: "Compare current prompt with the version used in baseline run. Diff system/user messages.", priority: "high" },
177
- { action: "Pin model version", detail: "Use a specific model snapshot (e.g. gpt-4-0613) instead of a rolling alias.", priority: "medium" },
178
- { action: "Update baseline", detail: "If changes are intentional, run: npx evalai baseline update", priority: "low" },
180
+ {
181
+ action: "Review prompt changes",
182
+ detail: "Compare current prompt with the version used in baseline run. Diff system/user messages.",
183
+ priority: "high",
184
+ },
185
+ {
186
+ action: "Pin model version",
187
+ detail: "Use a specific model snapshot (e.g. gpt-4-0613) instead of a rolling alias.",
188
+ priority: "medium",
189
+ },
190
+ {
191
+ action: "Update baseline",
192
+ detail: "If changes are intentional, run: npx evalai baseline update",
193
+ priority: "low",
194
+ },
179
195
  ],
180
196
  retrieval_drift: [
181
- { action: "Check retrieval pipeline", detail: "Verify embeddings, index, and chunk strategy haven't changed.", priority: "high" },
182
- { action: "Update test case context", detail: "If knowledge base changed, update expected outputs in test cases.", priority: "medium" },
183
- { action: "Add retrieval-specific tests", detail: "Add test cases that verify document retrieval before generation.", priority: "low" },
197
+ {
198
+ action: "Check retrieval pipeline",
199
+ detail: "Verify embeddings, index, and chunk strategy haven't changed.",
200
+ priority: "high",
201
+ },
202
+ {
203
+ action: "Update test case context",
204
+ detail: "If knowledge base changed, update expected outputs in test cases.",
205
+ priority: "medium",
206
+ },
207
+ {
208
+ action: "Add retrieval-specific tests",
209
+ detail: "Add test cases that verify document retrieval before generation.",
210
+ priority: "low",
211
+ },
184
212
  ],
185
213
  formatting_drift: [
186
- { action: "Update output format instructions", detail: "Check if system prompt format instructions match expected output structure.", priority: "high" },
187
- { action: "Add format validators", detail: "Use schema assertions to validate output structure (JSON schema, regex).", priority: "medium" },
188
- { action: "Refresh baseline", detail: "If new format is intentional, run: npx evalai baseline update", priority: "low" },
214
+ {
215
+ action: "Update output format instructions",
216
+ detail: "Check if system prompt format instructions match expected output structure.",
217
+ priority: "high",
218
+ },
219
+ {
220
+ action: "Add format validators",
221
+ detail: "Use schema assertions to validate output structure (JSON schema, regex).",
222
+ priority: "medium",
223
+ },
224
+ {
225
+ action: "Refresh baseline",
226
+ detail: "If new format is intentional, run: npx evalai baseline update",
227
+ priority: "low",
228
+ },
189
229
  ],
190
230
  tool_use_drift: [
191
- { action: "Verify tool definitions", detail: "Check that tool/function schemas match what the model expects.", priority: "high" },
192
- { action: "Review tool call patterns", detail: "Compare tool call sequences in failing vs passing cases.", priority: "medium" },
193
- { action: "Add tool-use assertions", detail: "Assert specific tool calls are made (or not made) per test case.", priority: "low" },
231
+ {
232
+ action: "Verify tool definitions",
233
+ detail: "Check that tool/function schemas match what the model expects.",
234
+ priority: "high",
235
+ },
236
+ {
237
+ action: "Review tool call patterns",
238
+ detail: "Compare tool call sequences in failing vs passing cases.",
239
+ priority: "medium",
240
+ },
241
+ {
242
+ action: "Add tool-use assertions",
243
+ detail: "Assert specific tool calls are made (or not made) per test case.",
244
+ priority: "low",
245
+ },
194
246
  ],
195
247
  safety_regression: [
196
- { action: "Review safety assertions", detail: "Check which safety test cases are failing and why.", priority: "high" },
197
- { action: "Strengthen guardrails", detail: "Add or update content filters, system prompt safety instructions.", priority: "high" },
198
- { action: "Update rubric", detail: "If safety criteria changed, update the LLM judge rubric.", priority: "medium" },
248
+ {
249
+ action: "Review safety assertions",
250
+ detail: "Check which safety test cases are failing and why.",
251
+ priority: "high",
252
+ },
253
+ {
254
+ action: "Strengthen guardrails",
255
+ detail: "Add or update content filters, system prompt safety instructions.",
256
+ priority: "high",
257
+ },
258
+ {
259
+ action: "Update rubric",
260
+ detail: "If safety criteria changed, update the LLM judge rubric.",
261
+ priority: "medium",
262
+ },
199
263
  ],
200
264
  cost_regression: [
201
- { action: "Check token usage", detail: "Compare input/output token counts between baseline and current run.", priority: "high" },
202
- { action: "Optimize prompts", detail: "Reduce prompt length or use a smaller model for non-critical paths.", priority: "medium" },
203
- { action: "Update cost budget", detail: "If higher cost is expected, adjust --max-cost-usd threshold.", priority: "low" },
265
+ {
266
+ action: "Check token usage",
267
+ detail: "Compare input/output token counts between baseline and current run.",
268
+ priority: "high",
269
+ },
270
+ {
271
+ action: "Optimize prompts",
272
+ detail: "Reduce prompt length or use a smaller model for non-critical paths.",
273
+ priority: "medium",
274
+ },
275
+ {
276
+ action: "Update cost budget",
277
+ detail: "If higher cost is expected, adjust --max-cost-usd threshold.",
278
+ priority: "low",
279
+ },
204
280
  ],
205
281
  latency_regression: [
206
- { action: "Check response times", detail: "Compare per-test-case latency between baseline and current run.", priority: "high" },
207
- { action: "Reduce prompt complexity", detail: "Simplify prompts or use streaming to reduce perceived latency.", priority: "medium" },
208
- { action: "Update latency budget", detail: "If higher latency is expected, adjust --max-latency-ms threshold.", priority: "low" },
282
+ {
283
+ action: "Check response times",
284
+ detail: "Compare per-test-case latency between baseline and current run.",
285
+ priority: "high",
286
+ },
287
+ {
288
+ action: "Reduce prompt complexity",
289
+ detail: "Simplify prompts or use streaming to reduce perceived latency.",
290
+ priority: "medium",
291
+ },
292
+ {
293
+ action: "Update latency budget",
294
+ detail: "If higher latency is expected, adjust --max-latency-ms threshold.",
295
+ priority: "low",
296
+ },
209
297
  ],
210
298
  coverage_drop: [
211
- { action: "Add test cases", detail: "Current test count is below minimum. Add more test cases to the evaluation.", priority: "high" },
212
- { action: "Check test case filtering", detail: "Verify no test cases were accidentally deleted or filtered out.", priority: "medium" },
299
+ {
300
+ action: "Add test cases",
301
+ detail: "Current test count is below minimum. Add more test cases to the evaluation.",
302
+ priority: "high",
303
+ },
304
+ {
305
+ action: "Check test case filtering",
306
+ detail: "Verify no test cases were accidentally deleted or filtered out.",
307
+ priority: "medium",
308
+ },
213
309
  ],
214
310
  baseline_stale: [
215
- { action: "Create baseline", detail: "Run: npx evalai baseline init (or publish a run from the dashboard)", priority: "high" },
216
- { action: "Use --baseline previous", detail: "Compare against the previous run instead of a published baseline.", priority: "medium" },
311
+ {
312
+ action: "Create baseline",
313
+ detail: "Run: npx evalai baseline init (or publish a run from the dashboard)",
314
+ priority: "high",
315
+ },
316
+ {
317
+ action: "Use --baseline previous",
318
+ detail: "Compare against the previous run instead of a published baseline.",
319
+ priority: "medium",
320
+ },
217
321
  ],
218
322
  unknown: [
219
- { action: "Run evalai doctor", detail: "Run: npx evalai doctor to check your full CI/CD setup.", priority: "high" },
220
- { action: "Check logs", detail: "Review CI logs for errors or unexpected behavior.", priority: "medium" },
221
- { action: "Update baseline", detail: "If changes are intentional, run: npx evalai baseline update", priority: "low" },
323
+ {
324
+ action: "Run evalai doctor",
325
+ detail: "Run: npx evalai doctor to check your full CI/CD setup.",
326
+ priority: "high",
327
+ },
328
+ {
329
+ action: "Check logs",
330
+ detail: "Review CI logs for errors or unexpected behavior.",
331
+ priority: "medium",
332
+ },
333
+ {
334
+ action: "Update baseline",
335
+ detail: "If changes are intentional, run: npx evalai baseline update",
336
+ priority: "low",
337
+ },
222
338
  ],
223
339
  };
224
340
  function suggestFixes(causes) {
@@ -248,7 +364,9 @@ function buildExplainOutput(report, reportPath) {
248
364
  function buildFromCheckReport(report, reportPath) {
249
365
  const failedCases = report.failedCases ?? [];
250
366
  // Top failures (up to 3)
251
- const topFailures = failedCases.slice(0, 3).map((fc, i) => ({
367
+ const topFailures = failedCases
368
+ .slice(0, 3)
369
+ .map((fc, i) => ({
252
370
  rank: i + 1,
253
371
  name: fc.name,
254
372
  input: fc.inputSnippet || fc.input,
@@ -336,7 +454,11 @@ function buildFromBuiltinReport(report, reportPath) {
336
454
  }
337
455
  // ── Output formatting ──
338
456
  function printHuman(output) {
339
- const verdictIcon = output.verdict === "pass" ? "\u2705" : output.verdict === "warn" ? "\u26A0\uFE0F" : "\u274C";
457
+ const verdictIcon = output.verdict === "pass"
458
+ ? "\u2705"
459
+ : output.verdict === "warn"
460
+ ? "\u26A0\uFE0F"
461
+ : "\u274C";
340
462
  console.log(`\n evalai explain\n`);
341
463
  console.log(` ${verdictIcon} Verdict: ${output.verdict.toUpperCase()}`);
342
464
  if (output.score != null) {
@@ -352,7 +474,11 @@ function printHuman(output) {
352
474
  if (output.changes.length > 0) {
353
475
  console.log("\n What changed:");
354
476
  for (const c of output.changes) {
355
- const arrow = c.direction === "worse" ? "\u2193" : c.direction === "better" ? "\u2191" : "\u2192";
477
+ const arrow = c.direction === "worse"
478
+ ? "\u2193"
479
+ : c.direction === "better"
480
+ ? "\u2191"
481
+ : "\u2192";
356
482
  console.log(` ${arrow} ${c.metric}: ${c.baseline} \u2192 ${c.current}`);
357
483
  }
358
484
  }
@@ -382,7 +508,11 @@ function printHuman(output) {
382
508
  if (output.suggestedFixes.length > 0) {
383
509
  console.log("\n Suggested fixes:");
384
510
  for (const fix of output.suggestedFixes) {
385
- const pIcon = fix.priority === "high" ? "\u203C\uFE0F" : fix.priority === "medium" ? "\u2757" : "\u2022";
511
+ const pIcon = fix.priority === "high"
512
+ ? "\u203C\uFE0F"
513
+ : fix.priority === "medium"
514
+ ? "\u2757"
515
+ : "\u2022";
386
516
  console.log(` ${pIcon} ${fix.action}`);
387
517
  console.log(` ${fix.detail}`);
388
518
  }
@@ -413,7 +543,9 @@ async function runExplain(argv) {
413
543
  return 1;
414
544
  }
415
545
  // Schema version compatibility check
416
- const reportSchema = typeof reportData.schemaVersion === "number" ? reportData.schemaVersion : undefined;
546
+ const reportSchema = typeof reportData.schemaVersion === "number"
547
+ ? reportData.schemaVersion
548
+ : undefined;
417
549
  if (reportSchema != null && reportSchema > types_1.CHECK_REPORT_SCHEMA_VERSION) {
418
550
  console.error(`\n \u26A0\uFE0F Report schema version ${reportSchema} is newer than this CLI supports (v${types_1.CHECK_REPORT_SCHEMA_VERSION}).`);
419
551
  console.error(" Update your SDK: npm install @pauly4010/evalai-sdk@latest\n");
@@ -43,7 +43,10 @@ function formatHuman(report) {
43
43
  lines.push("Next: View full report above, fix failing cases, or adjust gate with --minScore / --maxDrop / --warnDrop");
44
44
  }
45
45
  if (report.explain &&
46
- (report.breakdown01 || report.contribPts || report.flags?.length || report.policyEvidence)) {
46
+ (report.breakdown01 ||
47
+ report.contribPts ||
48
+ report.flags?.length ||
49
+ report.policyEvidence)) {
47
50
  lines.push("");
48
51
  lines.push("--- Explain ---");
49
52
  if (report.contribPts) {
@@ -34,7 +34,9 @@ function buildPrComment(report) {
34
34
  }
35
35
  }
36
36
  else {
37
- lines.push(passed ? "## ✅ EvalAI Regression Gate — PASSED" : "## 🚨 EvalAI Regression Gate — FAILED");
37
+ lines.push(passed
38
+ ? "## ✅ EvalAI Regression Gate — PASSED"
39
+ : "## 🚨 EvalAI Regression Gate — FAILED");
38
40
  }
39
41
  lines.push("");
40
42
  // Score + Delta (skip when gate not applied)
package/dist/cli/gate.js CHANGED
@@ -55,7 +55,9 @@ function evaluateGate(args, quality) {
55
55
  reasonMessage: `cost $${costUsd.toFixed(4)} exceeds maxCostUsd $${args.maxCostUsd.toFixed(4)}`,
56
56
  };
57
57
  }
58
- if (args.maxLatencyMs != null && avgLatencyMs != null && avgLatencyMs > args.maxLatencyMs) {
58
+ if (args.maxLatencyMs != null &&
59
+ avgLatencyMs != null &&
60
+ avgLatencyMs > args.maxLatencyMs) {
59
61
  return {
60
62
  exitCode: constants_1.EXIT.SCORE_BELOW,
61
63
  passed: false,
@@ -102,7 +104,9 @@ function evaluateGate(args, quality) {
102
104
  };
103
105
  }
104
106
  // warnDrop: soft warning band; maxDrop: hard fail
105
- if (args.maxDrop !== undefined && regressionDelta !== null && regressionDelta < -args.maxDrop) {
107
+ if (args.maxDrop !== undefined &&
108
+ regressionDelta !== null &&
109
+ regressionDelta < -args.maxDrop) {
106
110
  return {
107
111
  exitCode: constants_1.EXIT.REGRESSION,
108
112
  passed: false,
@@ -0,0 +1,63 @@
1
+ /**
2
+ * TICKET 3 — Impact Analysis CLI Command (v0)
3
+ *
4
+ * Goal: Modal-like perceived speed via incremental intelligence
5
+ *
6
+ * Algorithm v0 (practical, shippable):
7
+ * - Inputs: manifest.json + git diff --name-only base...HEAD
8
+ * - Rules: Direct file mapping, dependency tracking, safe fallback
9
+ * - Output: Human-readable counts + JSON for automation
10
+ */
11
+ import type { EvaluationManifest } from "./manifest";
12
+ /**
13
+ * Impact analysis result
14
+ */
15
+ export interface ImpactAnalysisResult {
16
+ /** Impacted specification IDs */
17
+ impactedSpecIds: string[];
18
+ /** Reason for each impacted spec */
19
+ reasonBySpecId: Record<string, string>;
20
+ /** Changed files that triggered the analysis */
21
+ changedFiles: string[];
22
+ /** Analysis metadata */
23
+ metadata: {
24
+ baseBranch: string;
25
+ totalSpecs: number;
26
+ impactedCount: number;
27
+ analysisTime: number;
28
+ };
29
+ }
30
+ /**
31
+ * Impact analysis options
32
+ */
33
+ export interface ImpactAnalysisOptions {
34
+ /** Base branch to compare against */
35
+ baseBranch: string;
36
+ /** Optional explicit list of changed files (for CI) */
37
+ changedFiles?: string[];
38
+ /** Output format */
39
+ format?: "human" | "json";
40
+ }
41
+ /**
42
+ * Run impact analysis
43
+ */
44
+ export declare function runImpactAnalysis(options: ImpactAnalysisOptions, projectRoot?: string): Promise<ImpactAnalysisResult>;
45
+ /**
46
+ * Analyze impact of changed files
47
+ */
48
+ export declare function analyzeImpact(changedFiles: string[], manifest: EvaluationManifest): {
49
+ impactedSpecIds: string[];
50
+ reasonBySpecId: Record<string, string>;
51
+ };
52
+ /**
53
+ * Print human-readable results
54
+ */
55
+ export declare function printHumanResults(result: ImpactAnalysisResult): void;
56
+ /**
57
+ * Print JSON results
58
+ */
59
+ export declare function printJsonResults(result: ImpactAnalysisResult): void;
60
+ /**
61
+ * CLI entry point
62
+ */
63
+ export declare function runImpactAnalysisCLI(options: ImpactAnalysisOptions): Promise<void>;