@ema.co/mcp-toolkit 1.6.0 → 1.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @ema.co/mcp-toolkit might be problematic. Click here for more details.

@@ -0,0 +1,883 @@
1
+ /**
2
+ * Intent Architect - LLM-driven intent decomposition
3
+ *
4
+ * This module provides the prompt and types for decomposing user requests
5
+ * into structured IntentSpecs using the Intent Architect approach.
6
+ *
7
+ * KEY PRINCIPLE: Intent Architect focuses on WHY + WHAT + CONSTRAINTS
8
+ * It does NOT focus on HOW (mechanisms) - that's the Solver's job.
9
+ *
10
+ * - WHY: Business outcome, success metrics, risks
11
+ * - WHAT: Deliverables, decisions, required evidence
12
+ * - CONSTRAINTS: Hard requirements that limit solution space
13
+ * - HOW: Deferred to Solver (which nodes, which channels)
14
+ *
15
+ * ## Progressive Enhancement (Additive, not Big Bang)
16
+ *
17
+ * Use `selectIntentStrategy()` to determine the appropriate level:
18
+ *
19
+ * 1. **SIMPLE**: Basic prompt, no qualification needed
20
+ * - Clear goal, known channel, no approvals, no taxonomy
21
+ * - Fast, cheap, skip Intent Architect
22
+ *
23
+ * 2. **MODERATE**: Some qualification needed
24
+ * - Has approvals OR has taxonomy OR multiple intents
25
+ * - Use Intent Architect to ask Gate 1-2 questions
26
+ *
27
+ * 3. **COMPLEX**: Full Intent Architect
28
+ * - Custom approvals, taxonomy, multi-step decisions
29
+ * - All 5 gates, full IntentSpec
30
+ *
31
+ * This allows gradual adoption - start simple, escalate as needed.
32
+ */
33
+ import * as fs from "fs";
34
+ import * as path from "path";
35
+ /** Cached gate definitions loaded from config */
36
+ let cachedGateConfig = null;
37
+ /**
38
+ * Load gate definitions from config file.
39
+ *
40
+ * Config is loaded from resources/config/gates.json and cached.
41
+ * Falls back to embedded defaults if config file is not found.
42
+ *
43
+ * @param forceReload - If true, reloads even if cached
44
+ */
45
+ export function loadGateConfig(forceReload = false) {
46
+ if (cachedGateConfig && !forceReload) {
47
+ return cachedGateConfig;
48
+ }
49
+ // Try multiple paths (works in both src and dist)
50
+ const possiblePaths = [
51
+ path.resolve(__dirname, "../../resources/config/gates.json"),
52
+ path.resolve(__dirname, "../../../resources/config/gates.json"),
53
+ path.resolve(process.cwd(), "resources/config/gates.json"),
54
+ ];
55
+ for (const configPath of possiblePaths) {
56
+ if (fs.existsSync(configPath)) {
57
+ try {
58
+ const content = fs.readFileSync(configPath, "utf-8");
59
+ const config = JSON.parse(content);
60
+ // Basic validation
61
+ if (!config.version || !Array.isArray(config.gates) || config.gates.length === 0) {
62
+ throw new Error("Invalid gate config: missing version or gates array");
63
+ }
64
+ // Validate each gate has required fields
65
+ for (const gate of config.gates) {
66
+ if (!gate.id || !gate.name || !gate.type || !gate.enforcement) {
67
+ throw new Error(`Invalid gate ${gate.id}: missing required fields`);
68
+ }
69
+ }
70
+ cachedGateConfig = config;
71
+ return config;
72
+ }
73
+ catch (err) {
74
+ throw new Error(`Failed to load gate config from ${configPath}: ${err}`);
75
+ }
76
+ }
77
+ }
78
+ // Fallback to embedded defaults (emergency only)
79
+ console.warn("Gate config not found, using embedded defaults");
80
+ cachedGateConfig = getDefaultGateConfig();
81
+ return cachedGateConfig;
82
+ }
83
+ /** Embedded defaults - only used if config file not found */
84
+ function getDefaultGateConfig() {
85
+ return {
86
+ version: "1.0.0-fallback",
87
+ owner: "embedded",
88
+ gates: [
89
+ { id: 1, name: "Modality + Trigger + Consent", type: "blocking", enforcement: "required", base_weight: 0.6, asks: ["voice vs email vs chat", "inbound/outbound", "consent requirements"], blockers: ["channel_ambiguity", "consent_unknown"], triggers: ["voice", "call", "phone", "outbound", "inbound"], default_value: null },
90
+ { id: 2, name: "Outcome + Qualification Definition", type: "blocking", enforcement: "required", base_weight: 0.6, asks: ["what 'qualified' means", "disqualifiers"], blockers: ["qualification_definition_missing", "outcome_unclear"], triggers: ["qualify", "qualified", "fit", "assess"], default_value: null },
91
+ { id: 3, name: "Use-case Taxonomy + Content Scoping", type: "discovery", enforcement: "recommended", base_weight: 0.4, asks: ["taxonomy source", "KB sources"], blockers: ["taxonomy_unknown"], triggers: ["use-case", "category", "classify"], default_value: "general_category" },
92
+ { id: 4, name: "Governance / HITL Semantics", type: "blocking", enforcement: "required", base_weight: 0.6, asks: ["who approves", "SLA", "fallback"], blockers: ["approver_unknown", "approval_semantics_unclear"], triggers: ["approval", "review", "hitl", "human"], default_value: null },
93
+ { id: 5, name: "Integrations + Systems of Record", type: "informational", enforcement: "optional", base_weight: 0.2, asks: ["CRM", "email", "telephony"], blockers: ["integration_requirements_unknown"], triggers: ["salesforce", "crm", "hubspot"], default_value: "none" },
94
+ ],
95
+ };
96
+ }
97
+ /**
98
+ * Get gate definitions as a record (for backward compatibility).
99
+ * Loads from config file.
100
+ */
101
+ export function getGateDefinitions() {
102
+ const config = loadGateConfig();
103
+ const result = {};
104
+ for (const gate of config.gates) {
105
+ result[gate.id] = {
106
+ id: gate.id,
107
+ name: gate.name,
108
+ type: gate.type,
109
+ enforcement: gate.enforcement,
110
+ asks: gate.asks,
111
+ blockers: gate.blockers,
112
+ triggers: gate.triggers,
113
+ default_value: gate.default_value ?? undefined,
114
+ };
115
+ }
116
+ return result;
117
+ }
118
+ // ─────────────────────────────────────────────────────────────────────────────
119
+ // Prompt Loading
120
+ // ─────────────────────────────────────────────────────────────────────────────
121
+ let _cachedPrompt = null;
122
+ /**
123
+ * Load the Intent Architect prompt from the markdown file
124
+ */
125
+ export function getIntentArchitectPrompt() {
126
+ if (_cachedPrompt)
127
+ return _cachedPrompt;
128
+ const promptPath = path.join(__dirname, "../prompts/intent-architect.md");
129
+ try {
130
+ _cachedPrompt = fs.readFileSync(promptPath, "utf-8");
131
+ return _cachedPrompt;
132
+ }
133
+ catch (e) {
134
+ // Fallback: return embedded prompt summary
135
+ console.warn("[IntentArchitect] Could not load prompt file, using embedded fallback");
136
+ return EMBEDDED_PROMPT_SUMMARY;
137
+ }
138
+ }
139
+ // Embedded fallback in case file isn't available (e.g., in bundled contexts)
140
+ const EMBEDDED_PROMPT_SUMMARY = `You are "Intent Architect", an expert at turning vague user requests into a precise, testable IntentSpec.
141
+
142
+ Your job is NOT to immediately generate a workflow. Your job is to:
143
+ 1) Extract and refine intent (WHY → WHAT → HOW)
144
+ 2) Identify missing qualification details
145
+ 3) Ask minimal high-information questions
146
+ 4) Produce a structured IntentSpec
147
+ 5) Map to candidate graph skeleton only after critical unknowns resolved
148
+
149
+ Key principles:
150
+ - WHY = business outcome + success metrics + risks
151
+ - WHAT = deliverables + decisions + required evidence
152
+ - HOW = channel + policies + constraints + integrations + approvals
153
+ - Never use platform jargon without translating
154
+ - Make "use-case identification" explicit and concrete
155
+
156
+ Output format: A) Intent Hypothesis, B) IntentSpec (YAML), C) Qualification Questions, D) Candidate Graph, E) Convergence Plan`;
157
+ /**
158
+ * Generate the user prompt for a specific request
159
+ */
160
+ export function generateIntentArchitectUserPrompt(userRequest, context) {
161
+ let prompt = `## User Request\n\n"${userRequest}"\n\n`;
162
+ if (context) {
163
+ prompt += `## Known Context\n\n`;
164
+ if (context.persona_type) {
165
+ prompt += `- Interaction type: ${context.persona_type.toUpperCase()} (${context.persona_type === "voice" ? "phone/call" :
166
+ context.persona_type === "chat" ? "text-based chat" : "dashboard/async"})\n`;
167
+ }
168
+ if (context.available_integrations?.length) {
169
+ prompt += `- Available integrations: ${context.available_integrations.join(", ")}\n`;
170
+ }
171
+ if (context.existing_answers && Object.keys(context.existing_answers).length > 0) {
172
+ prompt += `\n### Previously Answered Questions\n`;
173
+ for (const [q, a] of Object.entries(context.existing_answers)) {
174
+ prompt += `- ${q}: ${a}\n`;
175
+ }
176
+ }
177
+ prompt += "\n";
178
+ }
179
+ if (context?.action_catalog) {
180
+ prompt += `## Available Actions (for graph generation)\n\n${context.action_catalog}\n\n`;
181
+ }
182
+ prompt += `## Task
183
+
184
+ Analyze this request and produce sections A–E as specified in your instructions.
185
+
186
+ Focus on:
187
+ 1. Understanding the real business goal (WHY), not just the stated mechanism
188
+ 2. Identifying ALL decisions that need to be made and their required evidence
189
+ 3. Making "use-case identification" concrete if mentioned
190
+ 4. Specifying exactly who approves what, where, and with what SLA
191
+ 5. Asking minimal, high-information questions tied to specific IntentSpec fields
192
+
193
+ Return your response in the structured format (A through E).`;
194
+ return prompt;
195
+ }
196
+ // ─────────────────────────────────────────────────────────────────────────────
197
+ // Response Parsing
198
+ // ─────────────────────────────────────────────────────────────────────────────
199
+ /**
200
+ * Parse the LLM response to extract structured data
201
+ * Note: This is best-effort - the LLM output is semi-structured
202
+ */
203
+ export function parseIntentArchitectResponse(response) {
204
+ const result = {};
205
+ // Try to extract YAML from section B
206
+ const yamlMatch = response.match(/```yaml\s*([\s\S]*?)\s*```/);
207
+ if (yamlMatch) {
208
+ try {
209
+ // Note: In production, use a proper YAML parser
210
+ // For now, just extract it for reference
211
+ result.intent_spec = { _raw_yaml: yamlMatch[1] };
212
+ }
213
+ catch (e) {
214
+ console.warn("[IntentArchitect] Could not parse YAML:", e);
215
+ }
216
+ }
217
+ // Extract qualification questions
218
+ const questions = [];
219
+ const questionPattern = /\*\*Q(\d+)\*\*[:\s]*\*\*([^*]+)\*\*|Q(\d+)[:\s]+([^\n]+)/g;
220
+ let match;
221
+ while ((match = questionPattern.exec(response)) !== null) {
222
+ const id = match[1] || match[3];
223
+ const question = (match[2] || match[4])?.trim();
224
+ if (id && question) {
225
+ questions.push({
226
+ id: `Q${id}`,
227
+ question,
228
+ tied_to_field: "", // Would need more sophisticated parsing
229
+ category: "WHAT", // Default
230
+ });
231
+ }
232
+ }
233
+ if (questions.length > 0) {
234
+ result.qualification_questions = questions;
235
+ }
236
+ // Check for deferred graph
237
+ if (response.toLowerCase().includes("deferred") || response.toLowerCase().includes("blocking")) {
238
+ result.candidate_graph = {
239
+ status: "deferred",
240
+ deferred_reason: "Critical unknowns must be resolved first",
241
+ };
242
+ }
243
+ return result;
244
+ }
245
+ /**
246
+ * Get the complete prompt package for Intent Architect
247
+ */
248
+ export function getIntentArchitectPromptPackage(userRequest, context) {
249
+ return {
250
+ system: getIntentArchitectPrompt(),
251
+ user: generateIntentArchitectUserPrompt(userRequest, context),
252
+ };
253
+ }
254
+ /**
255
+ * Convert ComplexityAssessment to legacy IntentComplexity enum.
256
+ */
257
+ export function complexityAssessmentToEnum(assessment) {
258
+ if (assessment.level < 0.34)
259
+ return "simple";
260
+ if (assessment.level < 0.67)
261
+ return "moderate";
262
+ return "complex";
263
+ }
264
+ /**
265
+ * Convert legacy IntentComplexity enum to a ComplexityAssessment.
266
+ */
267
+ export function enumToComplexityAssessment(complexity, rationale = "Converted from legacy enum") {
268
+ const levelMap = {
269
+ simple: 0.2,
270
+ moderate: 0.5,
271
+ complex: 0.8,
272
+ };
273
+ return {
274
+ level: levelMap[complexity],
275
+ confidence: {
276
+ kind: "confidence",
277
+ value: 0.7, // Medium confidence for enum-based detection
278
+ scale: "0_1",
279
+ rationale: "Converted from legacy enum",
280
+ },
281
+ signals: [],
282
+ blockers: [],
283
+ rationale,
284
+ };
285
+ }
286
+ /**
287
+ * Gate definitions loaded from config.
288
+ *
289
+ * @deprecated Use getGateDefinitions() instead - this is kept for backward compatibility.
290
+ * Gate definitions are now loaded from resources/config/gates.json
291
+ */
292
+ export const GATE_DEFINITIONS = (() => {
293
+ // Lazy initialization from config
294
+ try {
295
+ return getGateDefinitions();
296
+ }
297
+ catch {
298
+ // Return minimal fallback if config loading fails at module load time
299
+ return {
300
+ 1: { id: 1, name: "Modality", type: "blocking", enforcement: "required", asks: [], blockers: [], triggers: [] },
301
+ 2: { id: 2, name: "Qualification", type: "blocking", enforcement: "required", asks: [], blockers: [], triggers: [] },
302
+ 3: { id: 3, name: "Taxonomy", type: "discovery", enforcement: "recommended", asks: [], blockers: [], triggers: [] },
303
+ 4: { id: 4, name: "Governance", type: "blocking", enforcement: "required", asks: [], blockers: [], triggers: [] },
304
+ 5: { id: 5, name: "Integrations", type: "informational", enforcement: "optional", asks: [], blockers: [], triggers: [] },
305
+ };
306
+ }
307
+ })();
308
+ /**
309
+ * Default base weights by enforcement level.
310
+ * These are used if a gate doesn't specify a base_weight in config.
311
+ */
312
+ const DEFAULT_BASE_WEIGHTS = {
313
+ required: 0.6,
314
+ recommended: 0.4,
315
+ optional: 0.2,
316
+ };
317
+ /**
318
+ * Compute weights for a gate based on input and context.
319
+ *
320
+ * Uses gate config from resources/config/gates.json for:
321
+ * - base_weight (or defaults based on enforcement level)
322
+ * - triggers (signals that boost weight)
323
+ */
324
+ export function computeGateWeights(gateId, input, context) {
325
+ const config = loadGateConfig();
326
+ const gateConfig = config.gates.find(g => g.id === gateId);
327
+ const gate = getGateDefinitions()[gateId];
328
+ if (!gate)
329
+ return { base: 0, signal_boost: 0, context_multiplier: 1, final_score: 0 };
330
+ const inputLower = input.toLowerCase();
331
+ // Base weight from config (or default based on enforcement)
332
+ const base = gateConfig?.base_weight ?? DEFAULT_BASE_WEIGHTS[gate.enforcement];
333
+ // Signal boost: count triggers found, normalize to 0-1
334
+ const triggersFound = gate.triggers.filter(t => inputLower.includes(t.toLowerCase()));
335
+ const signal_boost = Math.min(1.0, triggersFound.length * 0.2);
336
+ // Context multiplier
337
+ let context_multiplier = 1.0;
338
+ if (context) {
339
+ // Voice personas boost Gate 1 (modality)
340
+ if (context.persona_type === "voice" && gateId === 1) {
341
+ context_multiplier *= 1.5;
342
+ }
343
+ // Compliance mode boosts Gate 4 (governance)
344
+ if (context.compliance_mode && gateId === 4) {
345
+ context_multiplier *= 2.0;
346
+ }
347
+ // Outbound mode boosts Gate 1 (consent)
348
+ if (context.outbound_mode && gateId === 1) {
349
+ context_multiplier *= 1.5;
350
+ }
351
+ // Custom multipliers
352
+ if (context.custom_multipliers?.[gateId]) {
353
+ context_multiplier *= context.custom_multipliers[gateId];
354
+ }
355
+ }
356
+ // Final score
357
+ const final_score = base * (1 + signal_boost) * context_multiplier;
358
+ return { base, signal_boost, context_multiplier, final_score };
359
+ }
360
+ /**
361
+ * Get gates with computed weights, sorted by final score.
362
+ * Loads gate definitions from config.
363
+ */
364
+ export function getGatesWithWeights(input, context) {
365
+ const gates = getGateDefinitions();
366
+ return Object.values(gates)
367
+ .map(gate => ({
368
+ ...gate,
369
+ weights: computeGateWeights(gate.id, input, context),
370
+ }))
371
+ .sort((a, b) => b.weights.final_score - a.weights.final_score);
372
+ }
373
+ /**
374
+ * Get gates that should be asked (score > threshold)
375
+ */
376
+ export function getRequiredGates(input, context, threshold = 0.5) {
377
+ return getGatesWithWeights(input, context)
378
+ .filter(gate => gate.weights.final_score >= threshold || gate.type === "blocking");
379
+ }
380
+ /**
381
+ * Get discovery questions - gates where score is moderate (uncertain)
382
+ * These trigger more exploration rather than blocking
383
+ */
384
+ export function getDiscoveryGates(input, context, uncertaintyRange = { min: 0.3, max: 0.7 }) {
385
+ return getGatesWithWeights(input, context)
386
+ .filter(gate => gate.type === "discovery" &&
387
+ gate.weights.final_score >= uncertaintyRange.min &&
388
+ gate.weights.final_score <= uncertaintyRange.max);
389
+ }
390
+ // ─────────────────────────────────────────────────────────────────────────────
391
+ // LLM-DRIVEN DETECTION (Primary)
392
+ // ─────────────────────────────────────────────────────────────────────────────
393
+ /**
394
+ * Prompt for LLM-based intent signal detection.
395
+ *
396
+ * The caller should send this to their LLM and parse the response
397
+ * using parseComplexitySignalsFromLLM().
398
+ */
399
+ export function getComplexityDetectionPrompt(input) {
400
+ return {
401
+ system: `You are an intent classifier. Analyze the user request and detect INTENT signals (WHY/WHAT), NOT mechanisms (HOW).
402
+
403
+ INTENT signals to detect:
404
+ - governance: needs human approval, review, oversight, sign-off
405
+ - decision: needs classification, categorization, routing, triage
406
+ - qualification: needs evaluation, assessment, scoring, validation
407
+ - multi_step: multiple sequential actions, outcomes
408
+ - conditional: branching logic, different paths based on conditions
409
+ - extraction: capture structured data, collect information
410
+
411
+ DO NOT detect mechanisms (these are HOW, not WHY/WHAT):
412
+ - "email" → mechanism, not intent
413
+ - "Salesforce" → mechanism, not intent
414
+ - "Slack" → mechanism, not intent
415
+
416
+ Return JSON only:
417
+ {
418
+ "governance": true/false,
419
+ "decision": true/false,
420
+ "qualification": true/false,
421
+ "multi_step": true/false,
422
+ "conditional": true/false,
423
+ "extraction": true/false,
424
+ "reasoning": "brief explanation"
425
+ }`,
426
+ user: `Analyze this request for INTENT signals:\n\n"${input}"`,
427
+ };
428
+ }
429
+ /**
430
+ * Parse LLM response for complexity signals.
431
+ */
432
+ export function parseComplexitySignalsFromLLM(llmResponse) {
433
+ try {
434
+ // Try to extract JSON from response
435
+ const jsonMatch = llmResponse.match(/\{[\s\S]*\}/);
436
+ if (!jsonMatch)
437
+ return null;
438
+ const parsed = JSON.parse(jsonMatch[0]);
439
+ const signals = {
440
+ has_governance_requirement: parsed.governance === true,
441
+ has_decision_requirement: parsed.decision === true,
442
+ has_qualification_requirement: parsed.qualification === true,
443
+ has_multi_step: parsed.multi_step === true,
444
+ has_conditional_logic: parsed.conditional === true,
445
+ has_extraction_requirement: parsed.extraction === true,
446
+ complexity: "simple",
447
+ reason: parsed.reasoning || "",
448
+ required_gates: [],
449
+ blockers: [],
450
+ safe_defaults: {},
451
+ unsafe_defaults: [],
452
+ detection_method: "llm",
453
+ };
454
+ // Compute complexity from signals
455
+ computeComplexityFromSignals(signals);
456
+ return signals;
457
+ }
458
+ catch {
459
+ return null;
460
+ }
461
+ }
462
+ /**
463
+ * Compute complexity level from detected signals.
464
+ * Populates: complexity, reason, required_gates, blockers, safe_defaults, unsafe_defaults
465
+ */
466
+ function computeComplexityFromSignals(signals) {
467
+ // Compute individual signal weights (using TypedScore)
468
+ // Weights are calibrated so that:
469
+ // - Any single high-priority signal (governance, decision, qualification) → moderate (0.4+)
470
+ // - Two or more high-priority signals → complex (0.7+)
471
+ const signalScores = [];
472
+ if (signals.has_governance_requirement) {
473
+ signalScores.push({ kind: "weight", value: 0.4, scale: "0_1", rationale: "governance requirement detected" });
474
+ }
475
+ if (signals.has_decision_requirement) {
476
+ signalScores.push({ kind: "weight", value: 0.4, scale: "0_1", rationale: "decision/classification requirement detected" });
477
+ }
478
+ if (signals.has_qualification_requirement) {
479
+ signalScores.push({ kind: "weight", value: 0.35, scale: "0_1", rationale: "qualification requirement detected" });
480
+ }
481
+ if (signals.has_multi_step) {
482
+ signalScores.push({ kind: "weight", value: 0.2, scale: "0_1", rationale: "multi-step flow detected" });
483
+ }
484
+ if (signals.has_conditional_logic) {
485
+ signalScores.push({ kind: "weight", value: 0.2, scale: "0_1", rationale: "conditional logic detected" });
486
+ }
487
+ if (signals.has_extraction_requirement) {
488
+ signalScores.push({ kind: "weight", value: 0.15, scale: "0_1", rationale: "data extraction requirement detected" });
489
+ }
490
+ // Compute continuous complexity level (0-1)
491
+ const totalWeight = signalScores.reduce((sum, s) => sum + s.value, 0);
492
+ const complexityLevel = Math.min(1.0, totalWeight); // Cap at 1.0
493
+ // Initialize arrays
494
+ signals.blockers = [];
495
+ signals.safe_defaults = {};
496
+ signals.unsafe_defaults = [];
497
+ signals.required_gates = [];
498
+ // Determine blockers based on detected signals
499
+ if (signals.has_governance_requirement) {
500
+ signals.blockers.push("approver_unknown", "approval_semantics_unclear");
501
+ signals.unsafe_defaults.push("who approves", "approval channel", "SLA");
502
+ signals.required_gates.push(4);
503
+ }
504
+ if (signals.has_decision_requirement) {
505
+ signals.blockers.push("taxonomy_unknown", "classification_criteria_unclear");
506
+ signals.unsafe_defaults.push("use-case categories", "taxonomy source/owner");
507
+ signals.required_gates.push(3);
508
+ }
509
+ if (signals.has_qualification_requirement) {
510
+ signals.blockers.push("qualification_definition_missing");
511
+ signals.unsafe_defaults.push("qualification rubric", "what 'fit' means");
512
+ signals.required_gates.push(2);
513
+ }
514
+ if (signals.has_multi_step || signals.has_conditional_logic) {
515
+ // Multi-step usually needs outcome definition
516
+ if (!signals.required_gates.includes(2))
517
+ signals.required_gates.push(2);
518
+ }
519
+ // Safe defaults (can be assumed without asking)
520
+ signals.safe_defaults = {
521
+ "tone": "professional",
522
+ "retry_policy": "none",
523
+ "page_size": "10",
524
+ };
525
+ // Determine complexity level (legacy enum + new continuous score)
526
+ // Thresholds: simple < 0.34, moderate 0.34-0.66, complex >= 0.67
527
+ if (complexityLevel >= 0.67 ||
528
+ (signals.has_governance_requirement && signals.has_decision_requirement) ||
529
+ (signals.has_governance_requirement && signals.has_qualification_requirement)) {
530
+ signals.complexity = "complex";
531
+ if (!signals.reason) {
532
+ signals.reason = `High uncertainty: ${signals.blockers.length} blockers require resolution`;
533
+ }
534
+ // Complex = all gates
535
+ signals.required_gates = [1, 2, 3, 4, 5];
536
+ }
537
+ else if (complexityLevel >= 0.34) {
538
+ signals.complexity = "moderate";
539
+ if (!signals.reason) {
540
+ signals.reason = `Moderate uncertainty: ${signals.blockers.length} blockers on specific requirements`;
541
+ }
542
+ // Add Gate 1 (modality) if not already included
543
+ if (!signals.required_gates.includes(1))
544
+ signals.required_gates.unshift(1);
545
+ // Sort gates
546
+ signals.required_gates.sort((a, b) => a - b);
547
+ }
548
+ else {
549
+ signals.complexity = "simple";
550
+ if (!signals.reason)
551
+ signals.reason = "Low uncertainty - basic workflow generation sufficient";
552
+ signals.required_gates = [];
553
+ signals.blockers = [];
554
+ signals.unsafe_defaults = [];
555
+ }
556
+ // Backward compatibility
557
+ signals.recommended_gates = signals.required_gates;
558
+ // Populate typed assessment (new preferred structure)
559
+ const confidenceValue = signals.detection_method === "llm" ? 0.85 : 0.6;
560
+ signals.assessment = {
561
+ level: complexityLevel,
562
+ confidence: {
563
+ kind: "confidence",
564
+ value: confidenceValue,
565
+ scale: "0_1",
566
+ rationale: signals.detection_method === "llm"
567
+ ? "LLM-based detection with semantic understanding"
568
+ : "Regex-based fallback with limited semantic understanding",
569
+ },
570
+ signals: signalScores,
571
+ blockers: signals.blockers,
572
+ rationale: signals.reason,
573
+ };
574
+ }
575
+ // ─────────────────────────────────────────────────────────────────────────────
576
+ // REGEX FALLBACK (Offline / Emergency)
577
+ // ─────────────────────────────────────────────────────────────────────────────
578
+ /**
579
+ * Regex-based fallback for intent signal detection.
580
+ *
581
+ * ⚠️ This is a FALLBACK for when LLM is unavailable.
582
+ * Prefer using getComplexityDetectionPrompt() + parseComplexitySignalsFromLLM().
583
+ *
584
+ * Limitations:
585
+ * - Doesn't understand context or semantics
586
+ * - Can't handle synonyms or variations
587
+ * - May miss subtle intent signals
588
+ */
589
+ export function analyzeComplexityFallback(input) {
590
+ const signals = {
591
+ has_governance_requirement: /\b(approval|approve|review|hitl|human\s+(review|approval|oversight)|sign.?off|authoriz|permission|consent|oversight)\b/i.test(input),
592
+ has_decision_requirement: /\b(use.?case|category|categories|categorize|classify|classification|route|routing|determine|decide|triage|segment|bucket)\b/i.test(input),
593
+ has_multi_step: /\b(and then|after that|next step|follow.?up|subsequently|finally)\b/i.test(input) ||
594
+ (input.split(/\band\b/i).length > 2 && input.length > 60),
595
+ has_conditional_logic: /\b(if|when|depending|based on|conditional|branch|otherwise|either.*or)\b/i.test(input),
596
+ has_extraction_requirement: /\b(capture|extract|collect|gather|record|log|document|note)\b/i.test(input),
597
+ has_qualification_requirement: /\b(qualify|qualifies|qualifying|qualification|evaluate|evaluates|assess|assessment|score|scoring|validate|verify|check if|determine if|eligible)\b/i.test(input),
598
+ complexity: "simple",
599
+ reason: "",
600
+ required_gates: [],
601
+ blockers: [],
602
+ safe_defaults: {},
603
+ unsafe_defaults: [],
604
+ detection_method: "regex_fallback",
605
+ };
606
+ computeComplexityFromSignals(signals);
607
+ return signals;
608
+ }
609
+ /**
610
+ * Analyze complexity - uses regex fallback by default.
611
+ *
612
+ * For LLM-driven detection (recommended), use:
613
+ * 1. getComplexityDetectionPrompt(input) → send to LLM
614
+ * 2. parseComplexitySignalsFromLLM(response) → get signals
615
+ *
616
+ * This function is for backward compatibility and offline scenarios.
617
+ */
618
+ export function analyzeComplexity(input) {
619
+ return analyzeComplexityFallback(input);
620
+ }
621
+ /**
622
+ * Select the appropriate intent strategy based on complexity.
623
+ *
624
+ * Supports iterative refinement:
625
+ * - Pass 1: Detect complexity, ask blocker questions only
626
+ * - Pass 2+: Re-evaluate with previous answers, ask remaining questions
627
+ *
628
+ * Recommended flow for LLM-driven detection:
629
+ * ```typescript
630
+ * // 1. Get LLM prompt and send to your LLM
631
+ * const prompt = getComplexityDetectionPrompt(input);
632
+ * const llmResponse = await yourLLM.complete(prompt);
633
+ *
634
+ * // 2. Parse response
635
+ * const signals = parseComplexitySignalsFromLLM(llmResponse);
636
+ *
637
+ * // 3. Pass to strategy selector
638
+ * const strategy = selectIntentStrategy(input, context, { precomputed_signals: signals });
639
+ *
640
+ * // 4. After user answers, re-evaluate with answers
641
+ * const strategy2 = selectIntentStrategy(input, context, {
642
+ * precomputed_signals: signals,
643
+ * iteration: 2,
644
+ * previous_answers: { taxonomy: "provided", approver: "manager" }
645
+ * });
646
+ * ```
647
+ *
648
+ * If no precomputed_signals provided, falls back to regex detection.
649
+ */
650
+ export function selectIntentStrategy(input, context, options) {
651
+ // Use precomputed signals (from LLM) or fall back to regex
652
+ let signals = options?.precomputed_signals ?? analyzeComplexityFallback(input);
653
+ // Apply overrides
654
+ if (options?.force_complexity) {
655
+ signals = { ...signals, complexity: options.force_complexity };
656
+ }
657
+ if (options?.max_complexity) {
658
+ const levels = ["simple", "moderate", "complex"];
659
+ const maxIdx = levels.indexOf(options.max_complexity);
660
+ const currentIdx = levels.indexOf(signals.complexity);
661
+ if (currentIdx > maxIdx) {
662
+ signals = { ...signals, complexity: options.max_complexity };
663
+ }
664
+ }
665
+ const iteration = options?.iteration ?? 1;
666
+ const previousAnswers = options?.previous_answers ?? {};
667
+ // ═══════════════════════════════════════════════════════════════════════════
668
+ // BLOCKER-FIRST ITERATION
669
+ // - If blockers exist → ask only blockers (high-information questions)
670
+ // - If blockers resolved → proceed to compile
671
+ // ═══════════════════════════════════════════════════════════════════════════
672
+ // Filter out blockers that have been answered
673
+ const unresolvedBlockers = signals.blockers.filter(blocker => {
674
+ // Check if this blocker has been resolved by previous answers
675
+ const resolved = Object.keys(previousAnswers).some(key => blocker.toLowerCase().includes(key.toLowerCase()) ||
676
+ key.toLowerCase().includes(blocker.replace(/_/g, " ").split(" ")[0]));
677
+ return !resolved;
678
+ });
679
+ // Determine which gates are needed based on unresolved blockers
680
+ const gatesToAsk = determineGatesForBlockers(unresolvedBlockers, signals.required_gates);
681
+ // Determine approach based on complexity and iteration
682
+ const approach = determineApproach(signals.complexity, unresolvedBlockers.length, iteration);
683
+ switch (approach) {
684
+ case "template_fill":
685
+ return {
686
+ complexity: signals.complexity,
687
+ approach: "template_fill",
688
+ gates_to_ask: [],
689
+ blockers: [],
690
+ safe_defaults: signals.safe_defaults,
691
+ reason: signals.reason,
692
+ signals,
693
+ iteration,
694
+ previous_answers: previousAnswers,
695
+ };
696
+ case "guided_architect":
697
+ return {
698
+ complexity: signals.complexity,
699
+ approach: "guided_architect",
700
+ gates_to_ask: gatesToAsk.slice(0, 3), // Max 3 gates for guided
701
+ blockers: unresolvedBlockers.slice(0, 3), // Prioritize top 3 blockers
702
+ safe_defaults: signals.safe_defaults,
703
+ prompt_package: getIntentArchitectPromptPackage(input, context),
704
+ reason: signals.reason,
705
+ signals,
706
+ iteration,
707
+ previous_answers: previousAnswers,
708
+ };
709
+ case "full_architect":
710
+ return {
711
+ complexity: signals.complexity,
712
+ approach: "full_architect",
713
+ gates_to_ask: gatesToAsk,
714
+ blockers: unresolvedBlockers,
715
+ safe_defaults: signals.safe_defaults,
716
+ prompt_package: getIntentArchitectPromptPackage(input, context),
717
+ reason: signals.reason,
718
+ signals,
719
+ iteration,
720
+ previous_answers: previousAnswers,
721
+ };
722
+ }
723
+ }
724
+ /**
725
+ * Determine which gates to ask based on unresolved blockers.
726
+ * Uses blocker-to-gate mapping from gate config.
727
+ */
728
+ function determineGatesForBlockers(blockers, requiredGates) {
729
+ if (blockers.length === 0)
730
+ return [];
731
+ const neededGates = new Set();
732
+ const gates = getGateDefinitions();
733
+ for (const blocker of blockers) {
734
+ // Check which gate this blocker belongs to
735
+ for (const [gateNum, gateDef] of Object.entries(gates)) {
736
+ if (gateDef.blockers.some(b => blocker.includes(b.split("_")[0]) || b.includes(blocker.split("_")[0]))) {
737
+ neededGates.add(parseInt(gateNum));
738
+ }
739
+ }
740
+ }
741
+ // If we couldn't map blockers to gates, use required_gates
742
+ if (neededGates.size === 0) {
743
+ return requiredGates;
744
+ }
745
+ return Array.from(neededGates).sort((a, b) => a - b);
746
+ }
747
+ /**
748
+ * Determine the approach based on complexity, blockers, and iteration.
749
+ */
750
+ function determineApproach(complexity, unresolvedBlockerCount, iteration) {
751
+ // If no blockers remain, can use simple template fill
752
+ if (unresolvedBlockerCount === 0 && iteration > 1) {
753
+ return "template_fill";
754
+ }
755
+ switch (complexity) {
756
+ case "simple":
757
+ return "template_fill";
758
+ case "moderate":
759
+ // Moderate with few blockers → guided
760
+ return unresolvedBlockerCount <= 3 ? "guided_architect" : "full_architect";
761
+ case "complex":
762
+ // Complex always starts as full, but can become guided after iteration
763
+ if (iteration > 1 && unresolvedBlockerCount <= 3) {
764
+ return "guided_architect";
765
+ }
766
+ return "full_architect";
767
+ }
768
+ }
769
+ /**
770
+ * Single canonical entrypoint for intent processing.
771
+ *
772
+ * Handler should call ONLY this function. It encapsulates:
773
+ * 1. Complexity detection (with typed scores)
774
+ * 2. Strategy selection (which approach to take)
775
+ * 3. Gate qualification (which questions to ask)
776
+ * 4. Prompt generation (if full architect needed)
777
+ *
778
+ * @example
779
+ * ```typescript
780
+ * // Basic usage
781
+ * const result = runIntentArchitect("Voice AI SDR that qualifies leads", {
782
+ * persona_type: "voice"
783
+ * });
784
+ *
785
+ * if (result.strategy.can_proceed) {
786
+ * // Simple enough - proceed to generation
787
+ * } else if (result.questions) {
788
+ * // Ask qualification questions
789
+ * const answers = await askUser(result.questions);
790
+ * // Re-run with answers
791
+ * const result2 = runIntentArchitect(input, context, {
792
+ * previous_answers: answers,
793
+ * iteration: 2
794
+ * });
795
+ * } else if (result.prompt_package) {
796
+ * // Full architect - send prompt to LLM
797
+ * const intentSpec = await llm.complete(result.prompt_package);
798
+ * }
799
+ * ```
800
+ */
801
+ export function runIntentArchitect(input, context, options) {
802
+ // 1. Get complexity signals (from LLM or fallback)
803
+ const signals = options?.precomputed_signals ?? analyzeComplexityFallback(input);
804
+ // 2. Get strategy using existing selectIntentStrategy
805
+ const strategy = selectIntentStrategy(input, context, {
806
+ precomputed_signals: signals,
807
+ max_complexity: options?.max_complexity,
808
+ previous_answers: options?.previous_answers,
809
+ iteration: options?.iteration,
810
+ });
811
+ // 3. Build assessment from signals
812
+ const assessment = signals.assessment ?? {
813
+ level: signals.complexity === "simple" ? 0.2 : signals.complexity === "moderate" ? 0.5 : 0.8,
814
+ confidence: {
815
+ kind: "confidence",
816
+ value: signals.detection_method === "llm" ? 0.85 : 0.6,
817
+ scale: "0_1",
818
+ rationale: signals.detection_method === "llm"
819
+ ? "LLM-based detection"
820
+ : "Regex-based fallback",
821
+ },
822
+ signals: [],
823
+ blockers: signals.blockers,
824
+ rationale: signals.reason,
825
+ };
826
+ // 4. Build strategy decision
827
+ const strategyDecision = {
828
+ approach: strategy.approach,
829
+ gates_to_ask: strategy.gates_to_ask,
830
+ blockers: signals.blockers.filter(b => !Object.keys(options?.previous_answers ?? {}).some(a => b.includes(a))),
831
+ can_proceed: strategy.approach === "template_fill" || strategy.gates_to_ask.length === 0,
832
+ next_step: strategy.approach === "template_fill"
833
+ ? "Proceed to workflow generation"
834
+ : strategy.approach === "guided_architect"
835
+ ? `Ask qualification questions for gates: ${strategy.gates_to_ask.join(", ")}`
836
+ : "Send prompt to LLM for full intent decomposition",
837
+ };
838
+ // 5. Build qualification questions from gates
839
+ let questions;
840
+ if (strategy.gates_to_ask.length > 0 && strategy.approach !== "full_architect") {
841
+ questions = buildQualificationQuestions(strategy.gates_to_ask, signals);
842
+ }
843
+ // 6. Build result
844
+ const result = {
845
+ assessment,
846
+ strategy: strategyDecision,
847
+ questions,
848
+ legacy: {
849
+ complexity: strategy.complexity,
850
+ signals,
851
+ },
852
+ };
853
+ // Include prompt package for full architect
854
+ if (strategy.approach === "full_architect" && strategy.prompt_package) {
855
+ result.prompt_package = strategy.prompt_package;
856
+ }
857
+ return result;
858
+ }
859
+ /**
860
+ * Build qualification questions from gate IDs
861
+ */
862
+ function buildQualificationQuestions(gateIds, signals) {
863
+ const questions = [];
864
+ const gates = getGateDefinitions();
865
+ for (const gateId of gateIds) {
866
+ const gate = gates[gateId];
867
+ if (!gate)
868
+ continue;
869
+ // Create a question for each "ask" in the gate
870
+ for (let i = 0; i < gate.asks.length; i++) {
871
+ const ask = gate.asks[i];
872
+ questions.push({
873
+ id: `gate${gateId}_q${i + 1}`,
874
+ gate: gateId,
875
+ question: ask,
876
+ category: gateId <= 2 ? "WHAT" : gateId <= 4 ? "HOW" : "HOW",
877
+ answer_type: "text",
878
+ blocking: gate.type === "blocking",
879
+ });
880
+ }
881
+ }
882
+ return questions;
883
+ }