@ema.co/mcp-toolkit 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +321 -0
  3. package/config.example.yaml +32 -0
  4. package/dist/cli/index.js +333 -0
  5. package/dist/config.js +136 -0
  6. package/dist/emaClient.js +398 -0
  7. package/dist/index.js +109 -0
  8. package/dist/mcp/handlers-consolidated.js +851 -0
  9. package/dist/mcp/index.js +15 -0
  10. package/dist/mcp/prompts.js +1753 -0
  11. package/dist/mcp/resources.js +624 -0
  12. package/dist/mcp/server.js +4723 -0
  13. package/dist/mcp/tools-consolidated.js +590 -0
  14. package/dist/mcp/tools-legacy.js +736 -0
  15. package/dist/models.js +8 -0
  16. package/dist/scheduler.js +21 -0
  17. package/dist/sdk/client.js +788 -0
  18. package/dist/sdk/config.js +136 -0
  19. package/dist/sdk/contracts.js +429 -0
  20. package/dist/sdk/generation-schema.js +189 -0
  21. package/dist/sdk/index.js +39 -0
  22. package/dist/sdk/knowledge.js +2780 -0
  23. package/dist/sdk/models.js +8 -0
  24. package/dist/sdk/state.js +88 -0
  25. package/dist/sdk/sync-options.js +216 -0
  26. package/dist/sdk/sync.js +220 -0
  27. package/dist/sdk/validation-rules.js +355 -0
  28. package/dist/sdk/workflow-generator.js +291 -0
  29. package/dist/sdk/workflow-intent.js +1585 -0
  30. package/dist/state.js +88 -0
  31. package/dist/sync.js +416 -0
  32. package/dist/syncOptions.js +216 -0
  33. package/dist/ui.js +334 -0
  34. package/docs/advisor-comms-assistant-fixes.md +175 -0
  35. package/docs/api-contracts.md +216 -0
  36. package/docs/auto-builder-analysis.md +271 -0
  37. package/docs/data-architecture.md +166 -0
  38. package/docs/ema-auto-builder-guide.html +394 -0
  39. package/docs/ema-user-guide.md +1121 -0
  40. package/docs/mcp-tools-guide.md +149 -0
  41. package/docs/naming-conventions.md +218 -0
  42. package/docs/tool-consolidation-proposal.md +427 -0
  43. package/package.json +98 -0
  44. package/resources/templates/chat-ai/README.md +119 -0
  45. package/resources/templates/chat-ai/persona-config.json +111 -0
  46. package/resources/templates/dashboard-ai/README.md +156 -0
  47. package/resources/templates/dashboard-ai/persona-config.json +180 -0
  48. package/resources/templates/voice-ai/README.md +123 -0
  49. package/resources/templates/voice-ai/persona-config.json +74 -0
  50. package/resources/templates/voice-ai/workflow-prompt.md +120 -0
@@ -0,0 +1,2780 @@
1
+ /**
2
+ * Auto Builder Knowledge Base
3
+ *
4
+ * Structured knowledge about Ema Auto Builder and AI Employee platform for MCP exposure.
5
+ * Sources:
6
+ * - .cursor/rules/platforms/ema-auto-builder/
7
+ * - Ema User Guide documentation
8
+ * - Platform best practices
9
+ */
10
+ // ─────────────────────────────────────────────────────────────────────────────
11
+ // Platform Concepts (from Ema User Guide)
12
+ // ─────────────────────────────────────────────────────────────────────────────
13
+ export const PLATFORM_CONCEPTS = [
14
+ {
15
+ term: "AI Employee",
16
+ definition: "A fully configured, deployable autonomous agent with identity, role, capabilities, knowledge, integrations, and access controls. Think of it as a digital team member.",
17
+ aliases: ["Persona", "AI Agent"],
18
+ relatedTerms: ["Workflow", "Agent", "Trigger"],
19
+ examples: ["HR Support AI", "Customer Service AI", "Document Processing AI"],
20
+ commonConfusions: "An AI Employee CONTAINS agents. Agents are individual capabilities; AI Employees are the complete packages.",
21
+ },
22
+ {
23
+ term: "Workflow",
24
+ definition: "A sequence of agents/actions orchestrated to complete a task. Defines the processing logic of an AI Employee.",
25
+ aliases: ["Process", "Flow"],
26
+ relatedTerms: ["Agent", "Trigger", "Edge"],
27
+ commonConfusions: "Workflow describes backend logic (what agents, how they connect). Persona configuration describes frontend behavior (how the AI talks).",
28
+ },
29
+ {
30
+ term: "Agent",
31
+ definition: "A specialized capability within a workflow (search, respond, classify, etc.). Individual skills that an AI Employee uses.",
32
+ aliases: ["Action", "Node", "Skill"],
33
+ relatedTerms: ["Workflow", "AI Employee"],
34
+ examples: ["Knowledge Search", "Intent Classifier", "Response Generator", "Entity Extractor"],
35
+ },
36
+ {
37
+ term: "Action",
38
+ definition: "A discrete operation an agent can perform. Platform's term for individual operations.",
39
+ aliases: ["Tool", "Step", "Operation"],
40
+ relatedTerms: ["Agent", "External Action", "Connector"],
41
+ commonConfusions: "Action, Tool, and Step can refer to the same thing depending on context!",
42
+ },
43
+ {
44
+ term: "Trigger",
45
+ definition: "What initiates a workflow execution. The entry point for AI Employee interactions.",
46
+ aliases: ["Entry Point", "Initiator"],
47
+ examples: ["chat_trigger", "voice_trigger", "document_trigger", "email_trigger", "scheduled_trigger"],
48
+ },
49
+ {
50
+ term: "HITL",
51
+ definition: "Human-in-the-Loop. An approval/verification step where a human reviews before the workflow continues.",
52
+ aliases: ["Human Collaboration", "Approval Step", "Review Step"],
53
+ relatedTerms: ["general_hitl"],
54
+ commonConfusions: "HITL nodes MUST have both success AND failure paths defined.",
55
+ },
56
+ {
57
+ term: "Connector",
58
+ definition: "A pre-built integration that enables agents to interact with external enterprise systems.",
59
+ aliases: ["Integration", "Tool", "External Action"],
60
+ examples: ["ServiceNow connector", "Salesforce connector", "Workday connector", "Email connector"],
61
+ },
62
+ {
63
+ term: "EmaFusion",
64
+ definition: "Routes queries to optimal models from 100+ LLMs for best results.",
65
+ relatedTerms: ["fusionModel", "Model Selection"],
66
+ },
67
+ {
68
+ term: "GWE",
69
+ definition: "Generative Workflow Engine - Builds workflows from natural language prompts via the Auto Builder.",
70
+ aliases: ["Generative Workflow Engine", "Auto Builder Engine"],
71
+ },
72
+ {
73
+ term: "Agent Mesh",
74
+ definition: "Network of specialized agents working together as a team collaboration pattern.",
75
+ relatedTerms: ["AI Employee", "Workflow"],
76
+ },
77
+ ];
78
+ // ─────────────────────────────────────────────────────────────────────────────
79
+ // Workflow Execution Model (Critical Knowledge)
80
+ // ─────────────────────────────────────────────────────────────────────────────
81
+ export const WORKFLOW_EXECUTION_MODEL = {
82
+ criticalRule: "Each user_query (user message) triggers a NEW workflow execution. The workflow runs ONCE per user message, not once per conversation.",
83
+ implications: [
84
+ "Each subsequent question is its own user_query that invokes the workflow again",
85
+ "chat_conversation accumulates across executions, but each execution starts fresh",
86
+ "Avoid duplicate actions - check context before performing actions",
87
+ "Use conversation history to understand what actions were already taken",
88
+ "Use runIf conditions to skip redundant operations based on conversation state",
89
+ ],
90
+ antiPattern: {
91
+ description: "Creating duplicate records on follow-up questions",
92
+ bad: `User: "Create a ticket for my laptop issue" → Workflow creates ticket ✓
93
+ User: "Add my phone number to it" → Workflow runs again - might create ANOTHER ticket ✗`,
94
+ good: `Use chat_conversation to detect if a ticket was already created in this conversation,
95
+ then route to "update ticket" instead of "create ticket"`,
96
+ },
97
+ triggerOutputs: {
98
+ chat_conversation: {
99
+ scope: "All history - complete conversation from start",
100
+ useCase: "When you need full context and don't want to lose any information",
101
+ },
102
+ user_query: {
103
+ scope: "Last message only - just the current question",
104
+ useCase: "Simple queries where history doesn't matter",
105
+ },
106
+ conversation_summarizer: {
107
+ scope: "Configurable - specify how many turns/how far back",
108
+ useCase: "When you need to limit context window or summarize long conversations",
109
+ },
110
+ },
111
+ };
112
+ // ─────────────────────────────────────────────────────────────────────────────
113
+ // Agent Catalog
114
+ // ─────────────────────────────────────────────────────────────────────────────
115
+ export const AGENT_CATALOG = [
116
+ // Triggers
117
+ {
118
+ actionName: "chat_trigger",
119
+ displayName: "Chat Trigger",
120
+ category: "trigger",
121
+ description: "Entry point for chat-based interactions. Outputs both conversation history and current query.",
122
+ inputs: [],
123
+ outputs: [
124
+ { name: "chat_conversation", type: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", description: "Full conversation history from start" },
125
+ { name: "user_query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Current user message only" },
126
+ ],
127
+ whenToUse: "Starting point for any chat or voice AI workflow",
128
+ criticalRules: [
129
+ "Each user message triggers a NEW workflow execution",
130
+ "chat_conversation accumulates; user_query is current message only",
131
+ ],
132
+ },
133
+ {
134
+ actionName: "document_trigger",
135
+ displayName: "Document Trigger",
136
+ category: "trigger",
137
+ description: "Entry point for document processing workflows triggered by file upload.",
138
+ inputs: [],
139
+ outputs: [
140
+ { name: "user_query", type: "WELL_KNOWN_TYPE_DOCUMENT", description: "Uploaded document(s)" },
141
+ ],
142
+ whenToUse: "When workflow is triggered by document upload for processing/extraction",
143
+ },
144
+ {
145
+ actionName: "voice_trigger",
146
+ displayName: "Voice Trigger",
147
+ category: "trigger",
148
+ description: "Entry point for voice/phone call interactions.",
149
+ inputs: [],
150
+ outputs: [
151
+ { name: "chat_conversation", type: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", description: "Voice conversation history" },
152
+ { name: "user_query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Current utterance" },
153
+ ],
154
+ whenToUse: "Voice AI employees handling phone calls",
155
+ },
156
+ // Routing / Classification
157
+ {
158
+ actionName: "chat_categorizer",
159
+ displayName: "Intent Classifier",
160
+ category: "routing",
161
+ description: "Classifies user intent into predefined categories for routing. First routing node after trigger.",
162
+ inputs: [
163
+ { name: "conversation", type: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", required: true, description: "Chat conversation to classify" },
164
+ ],
165
+ outputs: [
166
+ { name: "category", type: "WELL_KNOWN_TYPE_ENUM", description: "Outputs the matched category enum value (e.g., 'Client_Update', 'Fallback')" },
167
+ ],
168
+ criticalRules: [
169
+ "MUST have at least one outgoing edge",
170
+ "runIf condition: compare 'output: category' to 'enumValue: <CategoryName>' (NOT category_<name> format)",
171
+ "Target input MUST be: trigger_when",
172
+ "ALWAYS include a Fallback category",
173
+ "Create a runIf condition for EACH category or routing fails",
174
+ "You MUST create a handler for EACH category",
175
+ ],
176
+ whenToUse: "When you need to route chat conversations to different processing paths based on intent",
177
+ whenNotToUse: "Simple single-path workflows that don't need routing",
178
+ example: "Categories: [Market_Impact, Client_Lookup, Compliance_Check, Fallback]",
179
+ },
180
+ {
181
+ actionName: "text_categorizer",
182
+ displayName: "Text Categorizer",
183
+ category: "routing",
184
+ description: "Classifies text content (not conversation) into categories.",
185
+ inputs: [
186
+ { name: "text", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Text to classify" },
187
+ ],
188
+ outputs: [
189
+ { name: "category", type: "WELL_KNOWN_TYPE_ENUM", description: "Classification result" },
190
+ ],
191
+ criticalRules: [
192
+ "Same rules as chat_categorizer: must have Fallback, edges for each category",
193
+ ],
194
+ whenToUse: "When routing based on text content rather than full conversation history",
195
+ },
196
+ {
197
+ actionName: "document_categorizer",
198
+ displayName: "Document Categorizer",
199
+ category: "routing",
200
+ description: "Classifies documents into categories for routing.",
201
+ inputs: [
202
+ { name: "documents", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Documents to classify" },
203
+ ],
204
+ outputs: [
205
+ { name: "category", type: "WELL_KNOWN_TYPE_ENUM", description: "Document classification" },
206
+ ],
207
+ whenToUse: "When routing based on document type or content",
208
+ },
209
+ {
210
+ actionName: "conversation_to_search_query",
211
+ displayName: "Conversation Summarizer",
212
+ category: "routing",
213
+ description: "Converts conversation history to a search query. Configurable turn count.",
214
+ inputs: [
215
+ { name: "conversation", type: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", required: true, description: "Conversation to summarize" },
216
+ ],
217
+ outputs: [
218
+ { name: "summarized_conversation", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Distilled search query" },
219
+ ],
220
+ whenToUse: "Before search nodes when you have chat_conversation but need TEXT_WITH_SOURCES. Also for managing long conversation context windows.",
221
+ whenNotToUse: "When trigger.user_query is sufficient for simple queries",
222
+ criticalRules: [
223
+ "May still be needed WITH chat_conversation for downstream agents requiring specific format",
224
+ "Use to reduce conversation size due to LLM context window limits",
225
+ ],
226
+ },
227
+ // Search & Retrieval
228
+ {
229
+ actionName: "search",
230
+ displayName: "File Search / Knowledge Search",
231
+ category: "search",
232
+ description: "Searches uploaded documents/knowledge base using hybrid search (keyword + vector).",
233
+ inputs: [
234
+ { name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Search query" },
235
+ ],
236
+ outputs: [
237
+ { name: "search_results", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", description: "Matching passages with sources and citations" },
238
+ ],
239
+ whenToUse: "When searching static uploaded documents or internal knowledge base",
240
+ example: "FAQ lookup, policy search, documentation assistant",
241
+ },
242
+ {
243
+ actionName: "live_web_search",
244
+ displayName: "Live Web Search / Deep Web Search",
245
+ category: "search",
246
+ description: "Real-time web search for external information not in knowledge base.",
247
+ inputs: [
248
+ { name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Search query" },
249
+ ],
250
+ outputs: [
251
+ { name: "web_search_results", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", description: "Web search results" },
252
+ ],
253
+ whenToUse: "When you need real-time external information not in the knowledge base",
254
+ example: "Current events, live data, external research",
255
+ },
256
+ {
257
+ actionName: "combine_search_results",
258
+ displayName: "Combine Search Results",
259
+ category: "search",
260
+ description: "Merges results from multiple search sources with deduplication.",
261
+ inputs: [
262
+ { name: "search_results_1", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", required: true, description: "First result set" },
263
+ { name: "search_results_2", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", required: true, description: "Second result set" },
264
+ ],
265
+ outputs: [
266
+ { name: "combined_results", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", description: "Merged, deduplicated results" },
267
+ ],
268
+ criticalRules: [
269
+ "AVOID using unless necessary - prefer combine_and_rerank_search_results or call_llm with named_inputs",
270
+ "combine_and_rerank_search_results provides intelligent relevance ranking",
271
+ "call_llm with named_inputs allows prompt-based ranking control",
272
+ ],
273
+ whenToUse: "When combining local + web search, or multiple knowledge bases",
274
+ whenNotToUse: "When you need intelligent ranking - use combine_and_rerank_search_results instead",
275
+ },
276
+ {
277
+ actionName: "combine_and_rerank_search_results",
278
+ displayName: "Rerank Search Results",
279
+ category: "search",
280
+ description: "Combines and reranks search results by relevance.",
281
+ inputs: [
282
+ { name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Original query for relevance scoring" },
283
+ { name: "search_results_lists", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", required: true, description: "Results to rerank" },
284
+ ],
285
+ outputs: [
286
+ { name: "reranked_results", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", description: "Reranked results" },
287
+ ],
288
+ whenToUse: "When you need to prioritize results by relevance after combining multiple sources",
289
+ },
290
+ {
291
+ actionName: "document_metasearch",
292
+ displayName: "Document Metasearch",
293
+ category: "search",
294
+ description: "Searches across document metadata.",
295
+ inputs: [
296
+ { name: "template", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Search template" },
297
+ ],
298
+ outputs: [
299
+ { name: "document_metasearch_results", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", description: "Metadata search results" },
300
+ ],
301
+ whenToUse: "When searching by document metadata rather than content",
302
+ },
303
+ // Generation & Response
304
+ {
305
+ actionName: "call_llm",
306
+ displayName: "Respond",
307
+ category: "generation",
308
+ description: "Generates response using LLM with custom instructions. Accepts ANY type via named_inputs. Also used for LLM templating.",
309
+ inputs: [
310
+ { name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "User query" },
311
+ { name: "named_inputs", type: "WELL_KNOWN_TYPE_ANY", required: false, description: "Additional context (use named_inputs_<Name> suffix)" },
312
+ ],
313
+ outputs: [
314
+ { name: "response_with_sources", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Generated response" },
315
+ ],
316
+ whenToUse: "When you need custom response generation, content synthesis, or LLM templating for documents",
317
+ whenNotToUse: "For strict regulatory formats requiring pixel-perfect layouts - use template engine instead",
318
+ criticalRules: [
319
+ "named_inputs accepts ANY type - use for tool results, search results, etc.",
320
+ "Use suffix pattern: named_inputs_<Descriptive_Name>",
321
+ "LLM TEMPLATING: Include structured section headers (## Section) in prompt - let LLM determine appropriate sections based on content",
322
+ "For document generation: Set temperature 0.3-0.5 for consistent formatting",
323
+ "Use structured prompts with clear formatting rules and examples",
324
+ ],
325
+ example: "named_inputs_Market_Context, named_inputs_Client_Data | " +
326
+ "LLM Template prompt: 'Generate structured content with clear ## section headers, organized by themes'",
327
+ },
328
+ {
329
+ actionName: "generate_document",
330
+ displayName: "Generate Document",
331
+ category: "generation",
332
+ description: "Converts markdown content to formatted document (.docx). Use after call_llm for document generation workflows.",
333
+ inputs: [
334
+ { name: "markdown_file_contents", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Well-structured markdown to convert" },
335
+ { name: "template", type: "WELL_KNOWN_TYPE_DOCUMENT", required: false, description: "Optional template from data source for styling" },
336
+ ],
337
+ outputs: [
338
+ { name: "document_link", type: "WELL_KNOWN_TYPE_DOCUMENT", description: "Link to generated document" },
339
+ ],
340
+ whenToUse: "When converting LLM-generated markdown to professional document format",
341
+ criticalRules: [
342
+ "Ensure input markdown has clear headers (## Section) and structure",
343
+ "For email attachment: use named_inputs (not attachment_links) due to DOCUMENT type",
344
+ "Chain pattern: call_llm (content generation) → generate_document → send_email (via named_inputs)",
345
+ "Optional: Add CSS/styling in markdown for professional formatting",
346
+ ],
347
+ example: "detailed_content (call_llm) → generate_doc → send_email.named_inputs_Attachment",
348
+ },
349
+ {
350
+ actionName: "respond_with_sources",
351
+ displayName: "Respond using Search Results",
352
+ category: "generation",
353
+ description: "Generates response grounded in search results with citations.",
354
+ inputs: [
355
+ { name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "User query" },
356
+ { name: "search_results", type: "WELL_KNOWN_TYPE_SEARCH_RESULT", required: true, description: "Search results to ground response" },
357
+ ],
358
+ outputs: [
359
+ { name: "response_with_sources", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Response with citations" },
360
+ ],
361
+ whenToUse: "When you want responses grounded in specific search results with source citations",
362
+ criticalRules: [
363
+ "Enable use_citation_based_filtering for trust",
364
+ "Implement confidence thresholds for quality control",
365
+ ],
366
+ },
367
+ {
368
+ actionName: "respond_for_external_actions",
369
+ displayName: "Respond using Tool Result",
370
+ category: "generation",
371
+ description: "Generates response explaining external tool/action results.",
372
+ inputs: [
373
+ { name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Original query" },
374
+ { name: "external_action_result", type: "WELL_KNOWN_TYPE_ANY", required: true, description: "Tool execution result" },
375
+ ],
376
+ outputs: [
377
+ { name: "response", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Human-friendly explanation" },
378
+ ],
379
+ whenToUse: "After external_action_caller to explain results to user",
380
+ },
381
+ {
382
+ actionName: "fixed_response",
383
+ displayName: "Fixed Response",
384
+ category: "generation",
385
+ description: "Returns a static/template response with {{variable}} substitution. No LLM call.",
386
+ inputs: [
387
+ { name: "named_inputs", type: "WELL_KNOWN_TYPE_ANY", required: false, description: "Variables to substitute into template (use named_inputs_<Variable_Name>)" },
388
+ { name: "extracted_variables", type: "WELL_KNOWN_TYPE_ANY", required: false, description: "JSON key-value pairs for variable substitution" },
389
+ { name: "fixed_response", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Template with {{variable_name}} placeholders" },
390
+ ],
391
+ outputs: [
392
+ { name: "fixed_response_with_sources", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Response with variables substituted" },
393
+ ],
394
+ whenToUse: "For short, structured messages with variables: confirmations, acknowledgments, simple notifications",
395
+ whenNotToUse: "For complex templates - use data source templates with generate_document or fill_document_template instead",
396
+ criticalRules: [
397
+ "Use {{variable_name}} syntax for dynamic content (case-sensitive)",
398
+ "named_inputs takes precedence over extracted_variables when same variable name exists",
399
+ "Keep templates SHORT - for long emails/documents use data source templates",
400
+ "Variables must match exactly including case",
401
+ ],
402
+ example: "Template: 'Dear {{Customer_Name}}, your order {{Order_ID}} has been {{Status}}.' " +
403
+ "→ Connect entity_extraction.customer_name to named_inputs_Customer_Name",
404
+ },
405
+ {
406
+ actionName: "send_email_agent",
407
+ displayName: "Send Email",
408
+ category: "external",
409
+ description: "Sends email via configured email provider.",
410
+ inputs: [
411
+ { name: "email_to", type: "WELL_KNOWN_TYPE_ANY", required: true, description: "Recipient email address - MUST be from entity_extraction" },
412
+ { name: "email_subject", type: "WELL_KNOWN_TYPE_ANY", required: false, description: "Email subject line" },
413
+ { name: "email_body", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Email body content" },
414
+ { name: "attachment_links", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: false, description: "Document links to attach (use named_inputs for DOCUMENT type)" },
415
+ ],
416
+ outputs: [
417
+ { name: "send_status", type: "WELL_KNOWN_TYPE_ANY", description: "Send confirmation" },
418
+ ],
419
+ whenToUse: "When you need to send email after user confirmation",
420
+ criticalRules: [
421
+ "CRITICAL: email_to MUST be from entity_extraction.email_address - NOT from text outputs",
422
+ "ALWAYS use HITL confirmation before sending (runIf: 'HITL Success')",
423
+ "For attachments with DOCUMENT type, use named_inputs instead of attachment_links",
424
+ "Never connect summarized_conversation, response_with_sources, or search_results to email_to",
425
+ ],
426
+ example: "entity_extraction.email_address → send_email.email_to (CORRECT) | " +
427
+ "summarized_conversation → send_email.email_to (WRONG - this is text, not an email address)",
428
+ },
429
+ // External Actions
430
+ {
431
+ actionName: "external_action_caller",
432
+ displayName: "External Tool Caller / Intelligent Actions",
433
+ category: "external",
434
+ description: "Calls external APIs/tools (ServiceNow, Salesforce, Workday, etc.).",
435
+ inputs: [
436
+ { name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Context for tool call" },
437
+ { name: "conversation", type: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", required: false, description: "Conversation history" },
438
+ ],
439
+ outputs: [
440
+ { name: "tool_execution_result", type: "WELL_KNOWN_TYPE_ANY", description: "Tool execution result" },
441
+ ],
442
+ whenToUse: "When you need to call external systems (create ticket, send email, lookup CRM, update records)",
443
+ criticalRules: [
444
+ "Check conversation history before creating records to avoid duplicates",
445
+ "Use HITL for actions with external side effects",
446
+ ],
447
+ example: "ServiceNow ticket creation, Salesforce record update, Email sending",
448
+ },
449
+ // Entity & Rule Processing
450
+ {
451
+ actionName: "entity_extraction_with_documents",
452
+ displayName: "Entity Extraction",
453
+ category: "entity",
454
+ description: "Extracts structured entities from documents for cross-document linking or API calls.",
455
+ inputs: [
456
+ { name: "documents", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Documents to extract from" },
457
+ { name: "extraction_columns", type: "WELL_KNOWN_TYPE_ANY", required: true, description: "Schema defining what to extract" },
458
+ ],
459
+ outputs: [
460
+ { name: "extraction_columns", type: "WELL_KNOWN_TYPE_ANY", description: "Extracted structured data" },
461
+ ],
462
+ criticalRules: [
463
+ "Works on DOCUMENTS, not text",
464
+ "Requires schema definition for extraction columns",
465
+ ],
466
+ whenToUse: "When you need structured data extraction for cross-document linking, API calls, or audit trails with citations",
467
+ whenNotToUse: "For simple conversational queries—use LLM resolution instead",
468
+ },
469
+ {
470
+ actionName: "rule_validation_with_documents",
471
+ displayName: "Rule Validation",
472
+ category: "validation",
473
+ description: "Validates extracted data against business rules.",
474
+ inputs: [
475
+ { name: "primary_docs", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Primary documents" },
476
+ { name: "map_of_extracted_columns", type: "WELL_KNOWN_TYPE_ANY", required: true, description: "Extracted data to validate" },
477
+ ],
478
+ outputs: [
479
+ { name: "ruleset_output", type: "WELL_KNOWN_TYPE_ANY", description: "Validation results" },
480
+ ],
481
+ whenToUse: "For compliance checks, business rule validation, threshold checking",
482
+ },
483
+ // Human Collaboration
484
+ {
485
+ actionName: "general_hitl",
486
+ displayName: "Human Collaboration",
487
+ category: "collaboration",
488
+ description: "Routes for human review/approval. Critical for sensitive actions. Special categorizer with fixed categories.",
489
+ inputs: [
490
+ { name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Context for review" },
491
+ ],
492
+ outputs: [
493
+ // NOTE: Edge source_output uses space: "hitl_status_HITL Success" (not underscore)
494
+ // The runIf enumValue also uses space: "HITL Success" / "HITL Failure"
495
+ { name: "hitl_status_HITL Success", type: "WELL_KNOWN_TYPE_ANY", description: "Human approved path" },
496
+ { name: "hitl_status_HITL Failure", type: "WELL_KNOWN_TYPE_ANY", description: "Human rejected path" },
497
+ ],
498
+ criticalRules: [
499
+ "MUST have both 'hitl_status_HITL Success' AND 'hitl_status_HITL Failure' paths (note: space, not underscore)",
500
+ "Both paths must lead to valid response nodes and WORKFLOW_OUTPUT",
501
+ "Missing HITL on sensitive actions = unintended actions taken",
502
+ "Unlike regular categorizers, NO Fallback category - only Success/Failure",
503
+ "Connect HITL outputs to downstream node's trigger_when input",
504
+ ],
505
+ whenToUse: "For high-impact actions requiring human oversight, any action with external side effects",
506
+ example: "source_output: 'hitl_status_HITL Success' → target_input: 'trigger_when'",
507
+ },
508
+ // Validation & Guardrails
509
+ {
510
+ actionName: "response_validator",
511
+ displayName: "Response Validator",
512
+ category: "validation",
513
+ description: "Validates LLM output against criteria. Guardrail for response quality.",
514
+ inputs: [
515
+ { name: "reference_query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Original query" },
516
+ { name: "response_to_validate", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Response to check" },
517
+ ],
518
+ outputs: [
519
+ { name: "abstain_reason", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Reason if validation fails" },
520
+ ],
521
+ whenToUse: "For guardrails, compliance checking on generated responses, quality control",
522
+ },
523
+ {
524
+ actionName: "abstain_action",
525
+ displayName: "Abstain from Answering",
526
+ category: "validation",
527
+ description: "Declines to answer when appropriate. Graceful handling of out-of-scope queries.",
528
+ inputs: [
529
+ { name: "abstain_reason", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Why abstaining" },
530
+ ],
531
+ outputs: [
532
+ { name: "abstain_reason", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Abstention message" },
533
+ ],
534
+ whenToUse: "When the AI should explicitly decline to answer or redirect",
535
+ },
536
+ // Sentiment & Analysis
537
+ {
538
+ actionName: "sentiment_analyzer",
539
+ displayName: "Sentiment Analyzer",
540
+ category: "analytics",
541
+ description: "Analyzes sentiment/emotion in user input.",
542
+ inputs: [
543
+ { name: "text", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Text to analyze" },
544
+ ],
545
+ outputs: [
546
+ { name: "sentiment", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Sentiment analysis" },
547
+ ],
548
+ whenToUse: "When you need to gauge user emotion for routing or response tone adjustment",
549
+ },
550
+ // Domain Agents - Finance
551
+ {
552
+ actionName: "financial_risk_assessor",
553
+ displayName: "Financial Risk Assessor",
554
+ category: "finance",
555
+ description: "Analyzes financial risk factors for investment/credit decisions.",
556
+ inputs: [{ name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Risk query" }],
557
+ outputs: [{ name: "risk_assessment", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Risk analysis" }],
558
+ whenToUse: "Financial services: portfolio risk, credit assessment, exposure analysis",
559
+ },
560
+ {
561
+ actionName: "financial_statement_analyzer",
562
+ displayName: "Financial Statement Analyzer",
563
+ category: "finance",
564
+ description: "Analyzes financial statements and reports.",
565
+ inputs: [{ name: "documents", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Financial documents" }],
566
+ outputs: [{ name: "analysis", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Financial analysis" }],
567
+ whenToUse: "Analyzing P&L, balance sheets, annual reports",
568
+ },
569
+ {
570
+ actionName: "tax_advisor",
571
+ displayName: "Tax Advisor",
572
+ category: "finance",
573
+ description: "Provides tax-related guidance and analysis.",
574
+ inputs: [{ name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Tax question" }],
575
+ outputs: [{ name: "tax_advice", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Tax guidance" }],
576
+ whenToUse: "Tax planning, compliance questions, deduction analysis",
577
+ },
578
+ {
579
+ actionName: "audit_evidence_verifier",
580
+ displayName: "Audit Evidence Verifier",
581
+ category: "finance",
582
+ description: "Verifies audit evidence and documentation.",
583
+ inputs: [{ name: "documents", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Audit documents" }],
584
+ outputs: [{ name: "verification", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Verification results" }],
585
+ whenToUse: "Audit support, evidence verification, compliance documentation",
586
+ },
587
+ // Domain Agents - Healthcare
588
+ {
589
+ actionName: "medical_record_summarizer",
590
+ displayName: "Medical Record Summarizer",
591
+ category: "healthcare",
592
+ description: "Summarizes medical records and patient history.",
593
+ inputs: [{ name: "documents", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Medical records" }],
594
+ outputs: [{ name: "summary", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Record summary" }],
595
+ whenToUse: "Healthcare: summarizing patient records, medical history, clinical notes",
596
+ },
597
+ {
598
+ actionName: "medical_research_assistant",
599
+ displayName: "Medical Research Assistant",
600
+ category: "healthcare",
601
+ description: "Assists with medical research queries.",
602
+ inputs: [{ name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Research query" }],
603
+ outputs: [{ name: "research", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Research findings" }],
604
+ whenToUse: "Medical research, literature review, clinical guidance",
605
+ },
606
+ {
607
+ actionName: "insurance_claim_summarizer",
608
+ displayName: "Insurance Claim Summarizer",
609
+ category: "healthcare",
610
+ description: "Summarizes insurance claims and documentation.",
611
+ inputs: [{ name: "documents", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Claim documents" }],
612
+ outputs: [{ name: "summary", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Claim summary" }],
613
+ whenToUse: "Insurance processing, claim analysis",
614
+ },
615
+ // Domain Agents - Legal
616
+ {
617
+ actionName: "compliance_document_analyzer",
618
+ displayName: "Compliance Document Analyzer",
619
+ category: "legal",
620
+ description: "Analyzes documents for compliance issues.",
621
+ inputs: [{ name: "documents", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Documents to analyze" }],
622
+ outputs: [{ name: "compliance_findings", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Compliance analysis" }],
623
+ whenToUse: "Regulatory compliance, contract review, policy compliance",
624
+ },
625
+ {
626
+ actionName: "legal_expert",
627
+ displayName: "Legal Expert",
628
+ category: "legal",
629
+ description: "Provides legal analysis and guidance.",
630
+ inputs: [{ name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Legal question" }],
631
+ outputs: [{ name: "legal_analysis", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Legal guidance" }],
632
+ whenToUse: "Legal research, contract analysis, regulatory questions",
633
+ },
634
+ {
635
+ actionName: "information_redacter",
636
+ displayName: "Information Redacter",
637
+ category: "legal",
638
+ description: "Redacts sensitive information from documents.",
639
+ inputs: [{ name: "documents", type: "WELL_KNOWN_TYPE_DOCUMENT", required: true, description: "Documents to redact" }],
640
+ outputs: [{ name: "redacted", type: "WELL_KNOWN_TYPE_DOCUMENT", description: "Redacted documents" }],
641
+ whenToUse: "Privacy compliance, document sanitization",
642
+ },
643
+ // Domain Agents - IT/Security
644
+ {
645
+ actionName: "phishing_email_detector",
646
+ displayName: "Phishing Email Detector",
647
+ category: "it_security",
648
+ description: "Detects phishing attempts in emails.",
649
+ inputs: [{ name: "email_content", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Email to analyze" }],
650
+ outputs: [{ name: "detection_result", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Phishing analysis" }],
651
+ whenToUse: "Email security, security operations",
652
+ },
653
+ {
654
+ actionName: "cybersecurity_expert",
655
+ displayName: "Cybersecurity Expert",
656
+ category: "it_security",
657
+ description: "Provides cybersecurity analysis and guidance.",
658
+ inputs: [{ name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Security query" }],
659
+ outputs: [{ name: "security_analysis", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Security guidance" }],
660
+ whenToUse: "Security incident analysis, vulnerability assessment",
661
+ },
662
+ {
663
+ actionName: "technical_support",
664
+ displayName: "Technical Support",
665
+ category: "it_security",
666
+ description: "Provides IT technical support guidance.",
667
+ inputs: [{ name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Support query" }],
668
+ outputs: [{ name: "support_response", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Technical guidance" }],
669
+ whenToUse: "IT helpdesk, troubleshooting, technical guidance",
670
+ },
671
+ // Domain Agents - Sales
672
+ {
673
+ actionName: "sales_intelligence",
674
+ displayName: "Sales Intelligence",
675
+ category: "sales",
676
+ description: "Provides sales insights and account intelligence.",
677
+ inputs: [{ name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Sales query" }],
678
+ outputs: [{ name: "intelligence", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Sales insights" }],
679
+ whenToUse: "Account research, competitive analysis, opportunity assessment",
680
+ },
681
+ {
682
+ actionName: "email_writer",
683
+ displayName: "Email Writer",
684
+ category: "sales",
685
+ description: "Generates professional email content.",
686
+ inputs: [{ name: "query", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Email context" }],
687
+ outputs: [{ name: "email", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Generated email" }],
688
+ whenToUse: "Sales outreach, customer communication, follow-ups",
689
+ },
690
+ // Formatting Agents
691
+ {
692
+ actionName: "json_formatter",
693
+ displayName: "JSON Formatter",
694
+ category: "formatting",
695
+ description: "Formats output as JSON.",
696
+ inputs: [{ name: "content", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Content to format" }],
697
+ outputs: [{ name: "json", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "JSON formatted output" }],
698
+ whenToUse: "When structured JSON output is required",
699
+ },
700
+ {
701
+ actionName: "json_extractor",
702
+ displayName: "JSON Extractor",
703
+ category: "formatting",
704
+ description: "Extracts JSON from LLM output.",
705
+ inputs: [{ name: "content", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Content containing JSON" }],
706
+ outputs: [{ name: "extracted_json", type: "WELL_KNOWN_TYPE_ANY", description: "Extracted JSON object" }],
707
+ whenToUse: "When you need to parse JSON from LLM responses",
708
+ },
709
+ {
710
+ actionName: "markdown_formatter",
711
+ displayName: "Markdown Formatter",
712
+ category: "formatting",
713
+ description: "Formats output as Markdown.",
714
+ inputs: [{ name: "content", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", required: true, description: "Content to format" }],
715
+ outputs: [{ name: "markdown", type: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", description: "Markdown formatted output" }],
716
+ whenToUse: "When rich text formatting is needed",
717
+ },
718
+ ];
719
+ // ─────────────────────────────────────────────────────────────────────────────
720
+ // Widget Reference
721
+ // ─────────────────────────────────────────────────────────────────────────────
722
+ export const WIDGET_CATALOG = [
723
+ // Voice AI Widgets
724
+ { id: 38, name: "voiceSettings", description: "Language hints, voice model selection", requiredFor: ["voice"], fields: ["languageHints", "voiceModel"] },
725
+ { id: 39, name: "conversationSettings", description: "Identity, purpose, action instructions, hangup rules - core persona configuration", requiredFor: ["voice"], fields: ["welcomeMessage", "identityAndPurpose", "takeActionInstructions", "hangupInstructions", "transferCallInstructions", "speechCharacteristics", "systemPrompt", "formFillingInstructions", "waitMessage"] },
726
+ { id: 43, name: "vadSettings", description: "Voice activity detection settings", requiredFor: ["voice"], fields: ["turnTimeout", "silenceEndCallTimeout", "maxConversationDuration"] },
727
+ { id: 42, name: "dataStorageSettings", description: "Audio/transcript recording settings", requiredFor: ["voice"], fields: ["storeAudioRecording", "storeTranscripts", "storeAgentTranscript"] },
728
+ // Chat AI Widgets
729
+ { id: 28, name: "chatbotSdkConfig", description: "Chat widget configuration and theming", requiredFor: ["chat"], fields: ["theme", "position", "initialMessage"] },
730
+ { id: 33, name: "feedbackMessage", description: "Feedback collection settings", requiredFor: ["chat"], fields: ["enabled", "prompt"] },
731
+ // Common Widgets (all types)
732
+ { id: 3, name: "fileUpload", description: "Document upload configuration", requiredFor: ["voice", "chat", "dashboard"], fields: ["allowedTypes", "maxSize"] },
733
+ { id: 6, name: "fusionModel", description: "EmaFusion model selection (GPT-4, Claude, etc.)", requiredFor: ["voice", "chat", "dashboard"], fields: ["allModels", "selectedModels"] },
734
+ { id: 8, name: "dataProtection", description: "PII redaction settings", requiredFor: ["voice", "chat", "dashboard"], fields: ["protectedClasses"] },
735
+ ];
736
+ // Project type mapping
737
+ export const PROJECT_TYPES = {
738
+ voice: 5,
739
+ chat: 4,
740
+ dashboard: 2,
741
+ document: 3,
742
+ };
743
+ // ─────────────────────────────────────────────────────────────────────────────
744
+ // Type Compatibility
745
+ // ─────────────────────────────────────────────────────────────────────────────
746
+ export const TYPE_COMPATIBILITY = [
747
+ // Chat conversation compatibility
748
+ { sourceType: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", targetType: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", compatible: true },
749
+ { sourceType: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", targetType: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", compatible: false, note: "Use conversation_to_search_query to convert" },
750
+ { sourceType: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", targetType: "WELL_KNOWN_TYPE_ANY", compatible: true, note: "call_llm.named_inputs accepts ANY" },
751
+ // Text with sources compatibility
752
+ { sourceType: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", targetType: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", compatible: true },
753
+ { sourceType: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", targetType: "WELL_KNOWN_TYPE_ANY", compatible: true },
754
+ { sourceType: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", targetType: "WELL_KNOWN_TYPE_CHAT_CONVERSATION", compatible: false, note: "Cannot convert text to conversation" },
755
+ // Search result compatibility
756
+ { sourceType: "WELL_KNOWN_TYPE_SEARCH_RESULT", targetType: "WELL_KNOWN_TYPE_SEARCH_RESULT", compatible: true },
757
+ { sourceType: "WELL_KNOWN_TYPE_SEARCH_RESULT", targetType: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", compatible: false, note: "Use respond_with_sources instead of call_llm for search results" },
758
+ { sourceType: "WELL_KNOWN_TYPE_SEARCH_RESULT", targetType: "WELL_KNOWN_TYPE_ANY", compatible: true, note: "call_llm.named_inputs accepts ANY" },
759
+ // Document compatibility
760
+ { sourceType: "WELL_KNOWN_TYPE_DOCUMENT", targetType: "WELL_KNOWN_TYPE_DOCUMENT", compatible: true },
761
+ { sourceType: "WELL_KNOWN_TYPE_DOCUMENT", targetType: "WELL_KNOWN_TYPE_ANY", compatible: true },
762
+ { sourceType: "WELL_KNOWN_TYPE_DOCUMENT", targetType: "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES", compatible: false, note: "Use entity_extraction or document-specific agents" },
763
+ // Any type compatibility
764
+ { sourceType: "WELL_KNOWN_TYPE_ANY", targetType: "WELL_KNOWN_TYPE_ANY", compatible: true },
765
+ ];
766
+ // ─────────────────────────────────────────────────────────────────────────────
767
+ // Workflow Patterns
768
+ // ─────────────────────────────────────────────────────────────────────────────
769
+ export const WORKFLOW_PATTERNS = [
770
+ {
771
+ name: "simple-kb-search",
772
+ personaType: "chat",
773
+ description: "Basic knowledge base Q&A with search and response",
774
+ nodes: ["chat_trigger", "conversation_to_search_query", "search", "respond_with_sources"],
775
+ connections: [
776
+ "chat_trigger.chat_conversation → conversation_to_search_query.conversation",
777
+ "conversation_to_search_query.summarized_conversation → search.query",
778
+ "search.search_results → respond_with_sources.search_results",
779
+ "chat_trigger.user_query → respond_with_sources.query",
780
+ "respond_with_sources.response_with_sources → WORKFLOW_OUTPUT",
781
+ ],
782
+ useCase: "FAQ bot, documentation assistant, policy lookup",
783
+ },
784
+ {
785
+ name: "intent-routing",
786
+ personaType: "chat",
787
+ description: "Route conversations to different handlers based on intent classification",
788
+ nodes: ["chat_trigger", "chat_categorizer", "handler_per_category", "fallback_response"],
789
+ connections: [
790
+ "chat_trigger.chat_conversation → chat_categorizer.conversation",
791
+ "chat_categorizer.category_<Category1> → handler_1.trigger_when",
792
+ "chat_categorizer.category_<Category2> → handler_2.trigger_when",
793
+ "chat_categorizer.category_Fallback → fallback_response.trigger_when",
794
+ "handler_*.response → WORKFLOW_OUTPUT",
795
+ "fallback_response.response → WORKFLOW_OUTPUT",
796
+ ],
797
+ useCase: "Multi-purpose assistant with distinct capabilities (HR + IT + General)",
798
+ antiPatterns: [
799
+ "Missing Fallback category",
800
+ "Categories without outgoing edges",
801
+ "Not all paths leading to WORKFLOW_OUTPUT",
802
+ ],
803
+ },
804
+ {
805
+ name: "multi-source-search",
806
+ personaType: "chat",
807
+ description: "Combine local and web search for comprehensive answers",
808
+ nodes: ["chat_trigger", "conversation_to_search_query", "search", "live_web_search", "combine_search_results", "respond_with_sources"],
809
+ connections: [
810
+ "chat_trigger.chat_conversation → conversation_to_search_query.conversation",
811
+ "conversation_to_search_query.summarized_conversation → search.query",
812
+ "conversation_to_search_query.summarized_conversation → live_web_search.query",
813
+ "search.search_results → combine_search_results.search_results_1",
814
+ "live_web_search.web_search_results → combine_search_results.search_results_2",
815
+ "combine_search_results.combined_results → respond_with_sources.search_results",
816
+ "chat_trigger.user_query → respond_with_sources.query",
817
+ "respond_with_sources.response_with_sources → WORKFLOW_OUTPUT",
818
+ ],
819
+ useCase: "Research assistant needing both internal docs and current web info",
820
+ },
821
+ {
822
+ name: "tool-calling",
823
+ personaType: "voice",
824
+ description: "Voice AI that calls external tools (ServiceNow, Salesforce, CRM)",
825
+ nodes: ["chat_trigger", "chat_categorizer", "external_action_caller", "call_llm"],
826
+ connections: [
827
+ "chat_trigger.chat_conversation → chat_categorizer.conversation",
828
+ "chat_categorizer.category_<Action> → external_action_caller.trigger_when",
829
+ "chat_trigger.chat_conversation → external_action_caller.conversation",
830
+ "external_action_caller.tool_execution_result → call_llm.named_inputs_Tool_Result",
831
+ "chat_trigger.user_query → call_llm.query",
832
+ "call_llm.response_with_sources → WORKFLOW_OUTPUT",
833
+ ],
834
+ useCase: "IT helpdesk, customer service with ticketing, CRM operations",
835
+ antiPatterns: [
836
+ "Creating duplicate records on follow-up questions",
837
+ "Not checking conversation history before actions",
838
+ ],
839
+ },
840
+ {
841
+ name: "hitl-approval",
842
+ personaType: "chat",
843
+ description: "Human-in-the-loop approval workflow for sensitive actions",
844
+ nodes: ["chat_trigger", "external_action_caller", "general_hitl", "call_llm_approved", "call_llm_rejected"],
845
+ connections: [
846
+ "chat_trigger.user_query → external_action_caller.query",
847
+ "external_action_caller.tool_execution_result → general_hitl.query",
848
+ "general_hitl.'hitl_status_HITL Success' → call_llm_approved.trigger_when",
849
+ "general_hitl.'hitl_status_HITL Failure' → call_llm_rejected.trigger_when",
850
+ "call_llm_approved.response → WORKFLOW_OUTPUT",
851
+ "call_llm_rejected.response → WORKFLOW_OUTPUT",
852
+ ],
853
+ useCase: "High-value transactions, sensitive operations, escalation paths",
854
+ antiPatterns: [
855
+ "Missing either success or failure path",
856
+ "Not terminating both paths at WORKFLOW_OUTPUT",
857
+ ],
858
+ },
859
+ {
860
+ name: "document-processing",
861
+ personaType: "dashboard",
862
+ description: "Document upload → extraction → validation → response",
863
+ nodes: ["document_trigger", "entity_extraction_with_documents", "rule_validation_with_documents", "call_llm"],
864
+ connections: [
865
+ "document_trigger.user_query → entity_extraction_with_documents.documents",
866
+ "entity_extraction_with_documents.extraction_columns → rule_validation_with_documents.map_of_extracted_columns",
867
+ "document_trigger.user_query → rule_validation_with_documents.primary_docs",
868
+ "rule_validation_with_documents.ruleset_output → call_llm.named_inputs_Validation_Results",
869
+ "call_llm.response_with_sources → WORKFLOW_OUTPUT",
870
+ ],
871
+ useCase: "Invoice processing, contract analysis, compliance checking",
872
+ },
873
+ {
874
+ name: "guardrails-pattern",
875
+ personaType: "chat",
876
+ description: "Response generation with validation guardrails",
877
+ nodes: ["chat_trigger", "search", "respond_with_sources", "response_validator", "abstain_action"],
878
+ connections: [
879
+ "chat_trigger.user_query → search.query",
880
+ "search.search_results → respond_with_sources.search_results",
881
+ "respond_with_sources.response_with_sources → response_validator.response_to_validate",
882
+ "chat_trigger.user_query → response_validator.reference_query",
883
+ "response_validator.abstain_reason → [conditional: if valid] → WORKFLOW_OUTPUT",
884
+ "response_validator.abstain_reason → [conditional: if invalid] → abstain_action → WORKFLOW_OUTPUT",
885
+ ],
886
+ useCase: "Regulated industries, compliance-sensitive responses",
887
+ },
888
+ ];
889
+ // ─────────────────────────────────────────────────────────────────────────────
890
+ // Qualifying Questions
891
+ // ─────────────────────────────────────────────────────────────────────────────
892
+ export const QUALIFYING_QUESTIONS = [
893
+ // Round 1: Core Context (Always ask first)
894
+ { category: "AI Type", question: "Is this a Voice AI, Chat AI, or Dashboard AI?", whyItMatters: "Determines trigger type, persona settings, UI widgets, and project type", required: true },
895
+ { category: "AI Type", question: "What triggers the workflow? (phone call, chat message, document upload, email, scheduled)", whyItMatters: "Affects chat_trigger vs document_trigger vs voice_trigger configuration", required: true },
896
+ { category: "Intent", question: "What are the main intents/categories the AI should recognize? (list 3-5 plus Fallback)", whyItMatters: "Required for chat_categorizer categories and routing logic", required: true },
897
+ { category: "Intent", question: "What should happen for each category?", whyItMatters: "Determines branching logic and agent paths for each intent", required: true },
898
+ { category: "Data", question: "What data sources does the AI need to access? (files, APIs, databases, knowledge bases)", whyItMatters: "Determines which search/lookup agents to use", required: true },
899
+ // Round 2: Workflow Details
900
+ { category: "Data", question: "Are the data sources static files or live APIs?", whyItMatters: "Static = search agent, Live = external_action_caller", required: false },
901
+ { category: "Data", question: "What specific fields/entities need to be extracted from queries?", whyItMatters: "May need entity_extraction or conversation_to_search_query", required: false },
902
+ { category: "Data", question: "Are there multiple data sources that need to be correlated?", whyItMatters: "Requires cross-document linking strategy and combine_search_results", required: false },
903
+ { category: "Actions", question: "What external tools/APIs should the AI call? (ServiceNow, Salesforce, Workday, etc.)", whyItMatters: "Configures external_action_caller tools and connectors", required: false },
904
+ { category: "Actions", question: "What actions can the AI perform? (create ticket, send email, update record)", whyItMatters: "Lists available tools and determines action scope", required: false },
905
+ { category: "Actions", question: "Which actions require human approval?", whyItMatters: "Requires general_hitl with success/failure paths", required: false },
906
+ { category: "Validation", question: "What validations must be performed? (compliance, rules, thresholds)", whyItMatters: "May need rule_validation_with_documents or response_validator", required: false },
907
+ { category: "Output", question: "What should the success response look like?", whyItMatters: "Determines call_llm output formatting and response structure", required: true },
908
+ { category: "Output", question: "What should the error/exception response look like?", whyItMatters: "Needs explicit error handling paths and fallback responses", required: false },
909
+ { category: "Output", question: "Should responses include citations/sources?", whyItMatters: "Enable use_citation_based_filtering for trust", required: false },
910
+ // Round 3: Voice AI Specific
911
+ { category: "Voice", question: "What is the welcome message when the call connects?", whyItMatters: "Required for welcomeMessage persona field", required: false },
912
+ { category: "Voice", question: "What should the AI's identity and purpose be?", whyItMatters: "Required for identityAndPurpose - defines role and responsibilities", required: false },
913
+ { category: "Voice", question: "What actions can the voice AI perform? (with trigger conditions and parameters)", whyItMatters: "Required for takeActionInstructions in </Case N> format", required: false },
914
+ { category: "Voice", question: "When should the AI hang up?", whyItMatters: "Required for hangupInstructions", required: false },
915
+ { category: "Voice", question: "When should the call transfer to a human?", whyItMatters: "Required for transferCallInstructions", required: false },
916
+ { category: "Voice", question: "What is the wait message while processing?", whyItMatters: "Required for waitMessage - keeps caller informed during delays", required: false },
917
+ { category: "Voice", question: "Any speech characteristics? (pace, tone, pronunciation rules)", whyItMatters: "Affects speechCharacteristics for natural conversation", required: false },
918
+ // Guardrails
919
+ { category: "Guardrails", question: "What should the AI NEVER do?", whyItMatters: "Defines guardrails, abstain conditions, and out-of-scope handling", required: false },
920
+ { category: "Guardrails", question: "What PII/sensitive data handling is required?", whyItMatters: "Affects data_protection_config and protectedClasses", required: false },
921
+ { category: "Guardrails", question: "Are there compliance requirements? (regulations, policies)", whyItMatters: "May need response_validator or compliance_document_analyzer", required: false },
922
+ ];
923
+ // ─────────────────────────────────────────────────────────────────────────────
924
+ // Voice AI Templates
925
+ // ─────────────────────────────────────────────────────────────────────────────
926
+ export const VOICE_PERSONA_TEMPLATE = {
927
+ welcomeMessage: "Hello, thank you for calling {Company}. This is {AI Name}. How can I help you today?",
928
+ identityAndPurpose: `You are {AI Name}, a Voice AI assistant for {Company}.
929
+
930
+ Primary responsibility: {Main purpose}
931
+
932
+ Your responsibilities:
933
+ 1. {Responsibility 1}
934
+ 2. {Responsibility 2}
935
+ 3. {Responsibility 3}
936
+
937
+ Rules:
938
+ - {Rule 1}
939
+ - {Rule 2}
940
+ - Always be professional and helpful
941
+ - Never provide information you're not certain about`,
942
+ takeActionInstructions: `</Case 1>
943
+ {Action 1 Name}
944
+
945
+ Trigger When:
946
+ {Condition that triggers this action}
947
+
948
+ Intent for tool call: "{Tool Intent Name}"
949
+
950
+ Required parameters:
951
+ { "{param1}": "", "{param2}": "" }
952
+ </Case 1>
953
+
954
+ </Case 2>
955
+ {Action 2 Name}
956
+
957
+ Trigger When:
958
+ {Condition that triggers this action}
959
+
960
+ Intent for tool call: "{Tool Intent Name}"
961
+
962
+ Required parameters:
963
+ { "{param1}": "" }
964
+ </Case 2>`,
965
+ hangupInstructions: `End the call when:
966
+ - The caller explicitly says goodbye or asks to hang up
967
+ - The caller confirms they have no more questions
968
+ - {Additional hangup conditions}
969
+
970
+ Before ending:
971
+ - Confirm all issues are resolved
972
+ - Offer any follow-up information
973
+ - Thank the caller`,
974
+ transferCallInstructions: `Transfer the call when:
975
+ - The caller explicitly requests a human agent
976
+ - The issue is beyond AI capabilities
977
+ - {Complex scenario requiring human}
978
+
979
+ Before transferring:
980
+ - Inform the caller you're transferring them
981
+ - Summarize the issue for the human agent`,
982
+ speechCharacteristics: `**Conversational Style:**
983
+ - Keep responses brief (2-3 sentences per turn)
984
+ - Use warm, professional tone
985
+ - Speak clearly at moderate pace
986
+
987
+ **Natural Speech:**
988
+ - Use brief pauses between sentences
989
+ - Acknowledge with 'I understand', 'Of course', 'Certainly'
990
+ - Avoid robotic language
991
+
992
+ **TTS Pronunciation Rules:**
993
+ - Spell out IDs: 'A-B-C-1-2-3'
994
+ - Pause between numbers: 'Your code is... 1... 2... 3... 4'
995
+ - {Domain-specific pronunciation rules}`,
996
+ systemPrompt: `Tool Calling Instructions:
997
+
998
+ 1. Always collect ALL required parameters before calling a tool
999
+ 2. Confirm parameter values with the caller before executing
1000
+ 3. Wait for tool response before calling another tool
1001
+ 4. NEVER mention tool names to the user
1002
+ 5. NEVER guess parameter values - always ask
1003
+ 6. Use plain language: "Let me look that up" not "Calling the API"
1004
+ 7. Handle delays with wait message: "One moment while I check..."
1005
+ 8. Handle errors gracefully, offer alternatives`,
1006
+ waitMessage: "One moment while I look that up for you...",
1007
+ };
1008
+ // ─────────────────────────────────────────────────────────────────────────────
1009
+ // Common Mistakes & Debugging
1010
+ // ─────────────────────────────────────────────────────────────────────────────
1011
+ export const COMMON_MISTAKES = [
1012
+ { mistake: "Not testing with edge cases", problem: "Unexpected failures in production", solution: "Test with empty inputs, long inputs, multilingual inputs" },
1013
+ { mistake: "Overloading workflows", problem: "Slow execution, timeouts", solution: "Keep workflows focused; split complex ones into multiple AI Employees" },
1014
+ { mistake: "Missing HITL on sensitive actions", problem: "Unintended actions taken with external side effects", solution: "Add HITL for any action with external side effects" },
1015
+ { mistake: "Vague agent instructions", problem: "Inconsistent responses", solution: "Be specific about tone, format, boundaries in instructions" },
1016
+ { mistake: "Not citing sources", problem: "Trust issues with users", solution: "Enable use_citation_based_filtering" },
1017
+ { mistake: "Ignoring confidence scores", problem: "Bad answers served confidently", solution: "Implement confidence thresholds and abstain when uncertain" },
1018
+ { mistake: "Creating duplicate records on follow-ups", problem: "Multiple tickets/records for same issue", solution: "Check chat_conversation for existing records before creating new ones" },
1019
+ { mistake: "Missing Fallback category", problem: "Workflow validation fails, unhandled intents", solution: "ALWAYS include Fallback category in every categorizer" },
1020
+ { mistake: "Type mismatches in connections", problem: "Validation errors, runtime failures", solution: "Check type compatibility: use conversation_to_search_query when needed" },
1021
+ { mistake: "Not mapping to WORKFLOW_OUTPUT", problem: "Responses don't reach user", solution: "Ensure ALL paths terminate at WORKFLOW_OUTPUT" },
1022
+ ];
1023
+ export const DEBUG_CHECKLIST = [
1024
+ { step: 1, action: "Check Status", description: "Is the AI Employee active/ready?", apiField: "status" },
1025
+ { step: 2, action: "Review Trigger", description: "Is the trigger correctly configured?", apiField: "trigger_type" },
1026
+ { step: 3, action: "Trace Execution", description: "Where does the workflow fail? Check execution logs" },
1027
+ { step: 4, action: "Inspect Inputs", description: "Are inputs reaching agents correctly?" },
1028
+ { step: 5, action: "Check Integrations", description: "Are connectors authenticated and working?" },
1029
+ { step: 6, action: "Review Logs", description: "What errors appear in execution trace?" },
1030
+ { step: 7, action: "Test Isolation", description: "Does the agent work in isolation?" },
1031
+ { step: 8, action: "Verify Categorizer", description: "Does categorizer have all category edges including Fallback?" },
1032
+ { step: 9, action: "Check WORKFLOW_OUTPUT", description: "Do all paths lead to WORKFLOW_OUTPUT?" },
1033
+ { step: 10, action: "Validate Types", description: "Are all connections type-compatible?" },
1034
+ ];
1035
+ // ─────────────────────────────────────────────────────────────────────────────
1036
+ // Guidance Topics
1037
+ // ─────────────────────────────────────────────────────────────────────────────
1038
+ export const GUIDANCE_TOPICS = {
1039
+ "categorizer-routing": {
1040
+ title: "Categorizer Routing Best Practices",
1041
+ content: "Every categorizer must have runIf conditions for each category. The runIf compares output 'category' to enumValue. Always include Fallback.",
1042
+ examples: [
1043
+ "runIf: {lhs: {actionOutput: {actionName: 'chat_categorizer', output: 'category'}, autoDetectedBinding: false}, operator: 1, rhs: {inline: {enumValue: 'Market_Impact'}, autoDetectedBinding: false}}",
1044
+ "runIf: {lhs: {actionOutput: {actionName: 'chat_categorizer', output: 'category'}, autoDetectedBinding: false}, operator: 1, rhs: {inline: {enumValue: 'Fallback'}, autoDetectedBinding: false}}",
1045
+ ],
1046
+ criticalRules: [
1047
+ "MUST have runIf condition for each category",
1048
+ "runIf format: output='category', operator=1 (numeric), enumValue='<CategoryName>'",
1049
+ "DO NOT use 'category_<Name>' as output - use 'category' and compare to enum value",
1050
+ "ALWAYS include Fallback category",
1051
+ "Create handler node with runIf for EACH category",
1052
+ ],
1053
+ },
1054
+ "type-compatibility": {
1055
+ title: "Type Compatibility Rules",
1056
+ content: "Auto Builder validates type compatibility. Common mistake: connecting chat_conversation directly to search.query.",
1057
+ examples: [
1058
+ "✅ trigger.user_query → search.query (TEXT_WITH_SOURCES → TEXT_WITH_SOURCES)",
1059
+ "❌ trigger.chat_conversation → search.query (CHAT_CONVERSATION ≠ TEXT_WITH_SOURCES)",
1060
+ "✅ search.search_results → respond_with_sources.search_results (SEARCH_RESULT → SEARCH_RESULT)",
1061
+ "❌ search.search_results → call_llm.query (SEARCH_RESULT ≠ TEXT_WITH_SOURCES)",
1062
+ "✅ search.search_results → call_llm.named_inputs_* (SEARCH_RESULT → ANY)",
1063
+ ],
1064
+ criticalRules: [
1065
+ "CHAT_CONVERSATION only compatible with chat_categorizer.conversation",
1066
+ "SEARCH_RESULT only compatible with respond_with_sources.search_results or named_inputs",
1067
+ "call_llm.named_inputs accepts ANY type",
1068
+ "Use conversation_to_search_query to convert CHAT_CONVERSATION → TEXT_WITH_SOURCES",
1069
+ ],
1070
+ },
1071
+ "named-inputs": {
1072
+ title: "Named Inputs Pattern",
1073
+ content: "When connecting to call_llm.named_inputs, use suffix: named_inputs_<Descriptive_Name>. The name appears as a label in the workflow UI.",
1074
+ examples: [
1075
+ "named_inputs_Market_Context",
1076
+ "named_inputs_Client_Data",
1077
+ "named_inputs_Tool_Result",
1078
+ "named_inputs_Web_Search_Results",
1079
+ "named_inputs_Validation_Output",
1080
+ ],
1081
+ },
1082
+ "hitl-patterns": {
1083
+ title: "Human-in-the-Loop Patterns",
1084
+ content: "HITL nodes MUST have both success and failure paths. Both paths must lead to valid response nodes and terminate at WORKFLOW_OUTPUT. NOTE: Output names use SPACE not underscore: 'HITL Success' not 'HITL_Success'.",
1085
+ examples: [
1086
+ "general_hitl.'hitl_status_HITL Success' → approved_response.trigger_when",
1087
+ "general_hitl.'hitl_status_HITL Failure' → rejected_response.trigger_when",
1088
+ ],
1089
+ criticalRules: [
1090
+ "MUST define 'hitl_status_HITL Success' path (note: space, not underscore)",
1091
+ "MUST define 'hitl_status_HITL Failure' path (note: space, not underscore)",
1092
+ "Both paths must terminate at WORKFLOW_OUTPUT",
1093
+ "Use for any action with external side effects",
1094
+ "Unlike regular categorizers, NO Fallback category",
1095
+ ],
1096
+ },
1097
+ "multi-source-search": {
1098
+ title: "Multi-Source Search Architecture",
1099
+ content: "For comprehensive answers, combine local search with web search. Use combine_search_results to merge and deduplicate.",
1100
+ examples: [
1101
+ "search (local) + live_web_search → combine_search_results → respond_with_sources",
1102
+ "For cross-document linking: entity_extraction → knowledge_graph_generator → document_synthesis",
1103
+ ],
1104
+ criticalRules: [
1105
+ "Use entity extraction when cross-document linking is needed",
1106
+ "Use LLM resolution for single conversational queries",
1107
+ "Entity extraction requires schema definition",
1108
+ ],
1109
+ },
1110
+ "voice-tool-calling": {
1111
+ title: "Voice AI Tool Calling Rules",
1112
+ content: "Voice AI has specific rules for tool calling to ensure natural conversation flow.",
1113
+ criticalRules: [
1114
+ "Collect ALL required parameters before calling a tool",
1115
+ "Wait for response before calling another tool",
1116
+ "NEVER mention tool names to the user",
1117
+ "NEVER guess parameter values - ask the user",
1118
+ "Use plain language: 'Let me look that up' not 'Calling search API'",
1119
+ "Handle delays with waitMessage: 'One moment while I check...'",
1120
+ "Handle errors gracefully, offer alternatives",
1121
+ ],
1122
+ },
1123
+ "guardrails": {
1124
+ title: "Implementing Guardrails",
1125
+ content: "Use response_validator to check LLM output before sending to user. Use abstain_action when the AI should decline to answer.",
1126
+ examples: [
1127
+ "call_llm → response_validator → (valid) → WORKFLOW_OUTPUT",
1128
+ "call_llm → response_validator → (invalid) → abstain_action → WORKFLOW_OUTPUT",
1129
+ ],
1130
+ criticalRules: [
1131
+ "Enable use_citation_based_filtering for trust",
1132
+ "Implement confidence thresholds",
1133
+ "Define clear abstain conditions",
1134
+ ],
1135
+ },
1136
+ "output-mapping": {
1137
+ title: "Output Mapping to WORKFLOW_OUTPUT",
1138
+ content: "ALL paths must eventually lead to WORKFLOW_OUTPUT. Every response node should have an edge to WORKFLOW_OUTPUT.",
1139
+ examples: [
1140
+ "respond_market.response_with_sources → WORKFLOW_OUTPUT",
1141
+ "fallback_response.response_with_sources → WORKFLOW_OUTPUT",
1142
+ "approved_response.response → WORKFLOW_OUTPUT",
1143
+ "rejected_response.response → WORKFLOW_OUTPUT",
1144
+ ],
1145
+ },
1146
+ "workflow-execution": {
1147
+ title: "Workflow Execution Model",
1148
+ content: "CRITICAL: Each user_query triggers a NEW workflow execution. The workflow runs ONCE per user message, not once per conversation.",
1149
+ examples: [
1150
+ "❌ User: 'Create ticket' → creates ticket; User: 'Add my phone' → creates ANOTHER ticket",
1151
+ "✅ Check chat_conversation for existing ticket, route to 'update' instead of 'create'",
1152
+ ],
1153
+ criticalRules: [
1154
+ "Each user message triggers new workflow execution",
1155
+ "chat_conversation accumulates; user_query is current message only",
1156
+ "Check context before creating records to avoid duplicates",
1157
+ "Use runIf conditions to skip redundant operations",
1158
+ ],
1159
+ },
1160
+ "conversation-vs-query": {
1161
+ title: "Conversation vs Query Usage",
1162
+ content: "Use user_query for current message, chat_conversation for full history, conversation_summarizer for controlled context.",
1163
+ examples: [
1164
+ "user_query: Simple queries where history doesn't matter",
1165
+ "chat_conversation: When you need full context",
1166
+ "conversation_summarizer: Long conversations, context window management",
1167
+ ],
1168
+ criticalRules: [
1169
+ "user_query = current message only (TEXT_WITH_SOURCES)",
1170
+ "chat_conversation = all history (CHAT_CONVERSATION)",
1171
+ "conversation_summarizer may still be needed for downstream agent format requirements",
1172
+ ],
1173
+ },
1174
+ };
1175
+ // ─────────────────────────────────────────────────────────────────────────────
1176
+ // Helper Functions
1177
+ // ─────────────────────────────────────────────────────────────────────────────
1178
+ export function getAgentsByCategory(category) {
1179
+ return AGENT_CATALOG.filter(a => a.category === category);
1180
+ }
1181
+ export function getAgentByName(actionName) {
1182
+ return AGENT_CATALOG.find(a => a.actionName === actionName || a.actionName === actionName.toLowerCase());
1183
+ }
1184
+ export function getWidgetsForPersonaType(type) {
1185
+ return WIDGET_CATALOG.filter(w => w.requiredFor.includes(type));
1186
+ }
1187
+ export function checkTypeCompatibility(sourceType, targetType) {
1188
+ return TYPE_COMPATIBILITY.find(t => t.sourceType === sourceType && t.targetType === targetType);
1189
+ }
1190
+ export function getQualifyingQuestionsByCategory(category) {
1191
+ return QUALIFYING_QUESTIONS.filter(q => q.category.toLowerCase() === category.toLowerCase());
1192
+ }
1193
+ export function getRequiredQualifyingQuestions() {
1194
+ return QUALIFYING_QUESTIONS.filter(q => q.required);
1195
+ }
1196
+ export function getConceptByTerm(term) {
1197
+ return PLATFORM_CONCEPTS.find(c => c.term.toLowerCase() === term.toLowerCase() ||
1198
+ c.aliases?.some(a => a.toLowerCase() === term.toLowerCase()));
1199
+ }
1200
+ export function suggestAgentsForUseCase(useCase) {
1201
+ const keywords = useCase.toLowerCase();
1202
+ const suggestions = [];
1203
+ const added = new Set();
1204
+ const addIfNotPresent = (name) => {
1205
+ if (!added.has(name)) {
1206
+ const agent = getAgentByName(name);
1207
+ if (agent) {
1208
+ suggestions.push(agent);
1209
+ added.add(name);
1210
+ }
1211
+ }
1212
+ };
1213
+ // Always include trigger
1214
+ addIfNotPresent("chat_trigger");
1215
+ // Routing
1216
+ if (keywords.includes("intent") || keywords.includes("route") || keywords.includes("categor") || keywords.includes("multi") || keywords.includes("different")) {
1217
+ addIfNotPresent("chat_categorizer");
1218
+ }
1219
+ // Search
1220
+ if (keywords.includes("search") || keywords.includes("document") || keywords.includes("knowledge") || keywords.includes("faq") || keywords.includes("lookup") || keywords.includes("find")) {
1221
+ addIfNotPresent("conversation_to_search_query");
1222
+ addIfNotPresent("search");
1223
+ addIfNotPresent("respond_with_sources");
1224
+ }
1225
+ // Web search
1226
+ if (keywords.includes("web") || keywords.includes("real-time") || keywords.includes("current") || keywords.includes("live")) {
1227
+ addIfNotPresent("live_web_search");
1228
+ if (added.has("search")) {
1229
+ addIfNotPresent("combine_search_results");
1230
+ }
1231
+ }
1232
+ // External tools
1233
+ if (keywords.includes("ticket") || keywords.includes("servicenow") || keywords.includes("salesforce") || keywords.includes("api") || keywords.includes("create") || keywords.includes("update") || keywords.includes("crm") || keywords.includes("email")) {
1234
+ addIfNotPresent("external_action_caller");
1235
+ }
1236
+ // HITL
1237
+ if (keywords.includes("approval") || keywords.includes("review") || keywords.includes("human") || keywords.includes("sensitive") || keywords.includes("verify")) {
1238
+ addIfNotPresent("general_hitl");
1239
+ }
1240
+ // Compliance/Validation
1241
+ if (keywords.includes("compliance") || keywords.includes("validate") || keywords.includes("rule") || keywords.includes("guardrail")) {
1242
+ addIfNotPresent("response_validator");
1243
+ }
1244
+ // Entity extraction
1245
+ if (keywords.includes("extract") || keywords.includes("entity") || keywords.includes("structure")) {
1246
+ addIfNotPresent("entity_extraction_with_documents");
1247
+ }
1248
+ // Voice specific
1249
+ if (keywords.includes("voice") || keywords.includes("phone") || keywords.includes("call")) {
1250
+ suggestions[0] = getAgentByName("voice_trigger") || suggestions[0];
1251
+ }
1252
+ // Document processing
1253
+ if (keywords.includes("document") || keywords.includes("upload") || keywords.includes("process") || keywords.includes("invoice") || keywords.includes("contract")) {
1254
+ if (!added.has("chat_trigger")) {
1255
+ suggestions[0] = getAgentByName("document_trigger") || suggestions[0];
1256
+ }
1257
+ addIfNotPresent("entity_extraction_with_documents");
1258
+ }
1259
+ // Default response if no response agent added
1260
+ if (!added.has("respond_with_sources") && !added.has("call_llm")) {
1261
+ addIfNotPresent("call_llm");
1262
+ }
1263
+ return suggestions;
1264
+ }
1265
+ export function validateWorkflowPrompt(prompt) {
1266
+ const lowerPrompt = prompt.toLowerCase();
1267
+ const issues = [];
1268
+ const warnings = [];
1269
+ // Check for categorizer without fallback
1270
+ if ((lowerPrompt.includes("categoriz") || lowerPrompt.includes("classif") || lowerPrompt.includes("route")) &&
1271
+ !lowerPrompt.includes("fallback")) {
1272
+ issues.push("Missing Fallback category - every categorizer MUST have a Fallback");
1273
+ }
1274
+ // Check for HITL without both paths
1275
+ if (lowerPrompt.includes("hitl") || lowerPrompt.includes("human collaboration") || lowerPrompt.includes("approval")) {
1276
+ if (!lowerPrompt.includes("success") || !lowerPrompt.includes("fail")) {
1277
+ issues.push("HITL node requires both success AND failure paths");
1278
+ }
1279
+ }
1280
+ // Check for output mapping
1281
+ if (!lowerPrompt.includes("workflow_output") && !lowerPrompt.includes("output")) {
1282
+ warnings.push("No explicit WORKFLOW_OUTPUT mapping found - ensure all paths lead to output");
1283
+ }
1284
+ // Check for trigger
1285
+ if (!lowerPrompt.includes("trigger") && !lowerPrompt.includes("chat") && !lowerPrompt.includes("voice") && !lowerPrompt.includes("document")) {
1286
+ issues.push("No trigger type specified - include chat_trigger, voice_trigger, or document_trigger");
1287
+ }
1288
+ // Check for common type issues
1289
+ if (lowerPrompt.includes("chat_conversation") && lowerPrompt.includes("search.query")) {
1290
+ warnings.push("Potential type mismatch: chat_conversation → search.query. Use conversation_to_search_query first.");
1291
+ }
1292
+ // Check for persona type
1293
+ if (!lowerPrompt.includes("voice ai") && !lowerPrompt.includes("chat ai") && !lowerPrompt.includes("dashboard ai") && !lowerPrompt.includes("persona type")) {
1294
+ warnings.push("No persona type specified - should specify Voice AI, Chat AI, or Dashboard AI");
1295
+ }
1296
+ return {
1297
+ valid: issues.length === 0,
1298
+ issues,
1299
+ warnings,
1300
+ };
1301
+ }
1302
+ // ─────────────────────────────────────────────────────────────────────────────
1303
+ // Workflow Analysis Functions
1304
+ // ─────────────────────────────────────────────────────────────────────────────
1305
+ // Import validation rules from single source of truth
1306
+ import { INPUT_SOURCE_RULES as VALIDATION_INPUT_RULES, ANTI_PATTERNS, OPTIMIZATION_RULES, findInputSourceRule, findAntiPatternByIssueType, generateMarkdownDocumentation, exportRulesAsJSON, } from "./validation-rules.js";
1307
+ // Re-export for consumers who want access to rules
1308
+ export { VALIDATION_INPUT_RULES, ANTI_PATTERNS, OPTIMIZATION_RULES, findInputSourceRule, findAntiPatternByIssueType, generateMarkdownDocumentation, exportRulesAsJSON, };
1309
+ /**
1310
+ * Parse a workflow_def object (from persona.workflow_def) into normalized nodes
1311
+ * Supports multiple formats:
1312
+ * - Ema format: { actions: [...], results: {...} }
1313
+ * - Generic format: { nodes: [...] }
1314
+ * - Object format: { nodeId: { action_name: ..., incoming_edges: [...] } }
1315
+ */
1316
+ export function parseWorkflowDef(workflowDef) {
1317
+ if (!workflowDef || typeof workflowDef !== "object") {
1318
+ return [];
1319
+ }
1320
+ const nodes = [];
1321
+ const def = workflowDef;
1322
+ // Handle Ema format: { actions: [...] } - This is the primary format from Ema platform
1323
+ if (Array.isArray(def.actions)) {
1324
+ for (const action of def.actions) {
1325
+ if (action && typeof action === "object") {
1326
+ // Ema action structure:
1327
+ // { name: "nodeId", action: { name: { namespaces: [...], name: "action_name" } }, inputs: {...}, displaySettings: {...} }
1328
+ const actionName = action.name;
1329
+ const actionDef = action.action;
1330
+ const actionType = actionDef?.name;
1331
+ const displaySettings = action.displaySettings;
1332
+ // Extract action type name (handles nested { namespaces: [], name: "..." } format)
1333
+ let actionTypeName;
1334
+ if (typeof actionType === "string") {
1335
+ actionTypeName = actionType;
1336
+ }
1337
+ else if (actionType && typeof actionType === "object") {
1338
+ actionTypeName = actionType.name;
1339
+ }
1340
+ // Convert Ema inputs to incoming_edges format
1341
+ const inputs = action.inputs;
1342
+ const incomingEdges = [];
1343
+ if (inputs) {
1344
+ for (const [inputName, inputValue] of Object.entries(inputs)) {
1345
+ if (inputValue && typeof inputValue === "object") {
1346
+ const inputObj = inputValue;
1347
+ // Handle actionOutput reference
1348
+ if (inputObj.actionOutput) {
1349
+ const actionOutput = inputObj.actionOutput;
1350
+ incomingEdges.push({
1351
+ source_node_id: actionOutput.actionName,
1352
+ source_output: actionOutput.output,
1353
+ target_input: inputName,
1354
+ });
1355
+ }
1356
+ // Handle multiBinding (multiple inputs)
1357
+ if (inputObj.multiBinding) {
1358
+ const multiBinding = inputObj.multiBinding;
1359
+ const elements = multiBinding.elements;
1360
+ if (elements) {
1361
+ for (const elem of elements) {
1362
+ if (elem.namedBinding) {
1363
+ const namedBinding = elem.namedBinding;
1364
+ const value = namedBinding.value;
1365
+ if (value?.actionOutput) {
1366
+ const actionOutput = value.actionOutput;
1367
+ incomingEdges.push({
1368
+ source_node_id: actionOutput.actionName,
1369
+ source_output: actionOutput.output,
1370
+ target_input: `${inputName}_${namedBinding.name}`,
1371
+ });
1372
+ }
1373
+ }
1374
+ else if (elem.actionOutput) {
1375
+ const actionOutput = elem.actionOutput;
1376
+ incomingEdges.push({
1377
+ source_node_id: actionOutput.actionName,
1378
+ source_output: actionOutput.output,
1379
+ target_input: inputName,
1380
+ });
1381
+ }
1382
+ }
1383
+ }
1384
+ }
1385
+ }
1386
+ }
1387
+ }
1388
+ nodes.push({
1389
+ id: actionName,
1390
+ action_name: actionTypeName,
1391
+ display_name: displaySettings?.displayName,
1392
+ incoming_edges: incomingEdges.length > 0 ? incomingEdges : undefined,
1393
+ parameters: action.inputs,
1394
+ runIf: action.runIf,
1395
+ });
1396
+ }
1397
+ }
1398
+ return nodes;
1399
+ }
1400
+ // Handle generic array of nodes format: { nodes: [...] }
1401
+ if (Array.isArray(def.nodes)) {
1402
+ for (const node of def.nodes) {
1403
+ if (node && typeof node === "object") {
1404
+ nodes.push(node);
1405
+ }
1406
+ }
1407
+ return nodes;
1408
+ }
1409
+ // Handle object format where keys are node IDs
1410
+ for (const [key, value] of Object.entries(def)) {
1411
+ if (value && typeof value === "object" && !Array.isArray(value)) {
1412
+ const nodeObj = value;
1413
+ // Check if this looks like a node (has action_name or incoming_edges)
1414
+ if (nodeObj.action_name || nodeObj.incoming_edges || nodeObj.action) {
1415
+ nodes.push({
1416
+ id: key,
1417
+ action_name: nodeObj.action_name ?? nodeObj.action?.name,
1418
+ display_name: nodeObj.display_name,
1419
+ incoming_edges: nodeObj.incoming_edges,
1420
+ parameters: nodeObj.parameters,
1421
+ runIf: nodeObj.runIf,
1422
+ });
1423
+ }
1424
+ }
1425
+ }
1426
+ return nodes;
1427
+ }
1428
+ /**
1429
+ * Build an adjacency list from workflow nodes
1430
+ */
1431
+ function buildAdjacencyList(nodes) {
1432
+ const adj = new Map();
1433
+ // Initialize all nodes
1434
+ for (const node of nodes) {
1435
+ if (!adj.has(node.id)) {
1436
+ adj.set(node.id, new Set());
1437
+ }
1438
+ }
1439
+ // Build edges from incoming_edges (reverse direction for adjacency)
1440
+ for (const node of nodes) {
1441
+ if (node.incoming_edges) {
1442
+ for (const edge of node.incoming_edges) {
1443
+ const sourceId = edge.source_node_id;
1444
+ if (!adj.has(sourceId)) {
1445
+ adj.set(sourceId, new Set());
1446
+ }
1447
+ adj.get(sourceId).add(node.id);
1448
+ }
1449
+ }
1450
+ }
1451
+ return adj;
1452
+ }
1453
+ /**
1454
+ * Detect cycles in the workflow graph using DFS
1455
+ */
1456
+ function detectCycles(nodes) {
1457
+ const issues = [];
1458
+ const adj = buildAdjacencyList(nodes);
1459
+ const visited = new Set();
1460
+ const recStack = new Set();
1461
+ const cycleNodes = [];
1462
+ function dfs(nodeId, path) {
1463
+ visited.add(nodeId);
1464
+ recStack.add(nodeId);
1465
+ const neighbors = adj.get(nodeId) ?? new Set();
1466
+ for (const neighbor of neighbors) {
1467
+ if (!visited.has(neighbor)) {
1468
+ if (dfs(neighbor, [...path, nodeId])) {
1469
+ return true;
1470
+ }
1471
+ }
1472
+ else if (recStack.has(neighbor)) {
1473
+ // Found cycle
1474
+ const cycleStart = path.indexOf(neighbor);
1475
+ if (cycleStart >= 0) {
1476
+ cycleNodes.push(...path.slice(cycleStart), nodeId);
1477
+ }
1478
+ else {
1479
+ cycleNodes.push(neighbor, nodeId);
1480
+ }
1481
+ return true;
1482
+ }
1483
+ }
1484
+ recStack.delete(nodeId);
1485
+ return false;
1486
+ }
1487
+ for (const node of nodes) {
1488
+ if (!visited.has(node.id)) {
1489
+ if (dfs(node.id, [])) {
1490
+ issues.push({
1491
+ type: "cycle",
1492
+ severity: "critical",
1493
+ nodes: [...new Set(cycleNodes)],
1494
+ reason: `Circular dependency detected: ${cycleNodes.join(" → ")}`,
1495
+ });
1496
+ break; // One cycle is enough to report
1497
+ }
1498
+ }
1499
+ }
1500
+ return issues;
1501
+ }
1502
+ /**
1503
+ * Detect orphan nodes (not reachable from trigger)
1504
+ */
1505
+ function detectOrphanNodes(nodes) {
1506
+ const issues = [];
1507
+ const adj = buildAdjacencyList(nodes);
1508
+ // Find trigger node
1509
+ const triggerNode = nodes.find(n => n.action_name?.includes("trigger") ||
1510
+ n.id === "trigger" ||
1511
+ (n.id && n.id.includes("trigger")));
1512
+ if (!triggerNode) {
1513
+ return []; // Can't detect orphans without trigger
1514
+ }
1515
+ // BFS from trigger
1516
+ const reachable = new Set();
1517
+ const queue = [triggerNode.id];
1518
+ reachable.add(triggerNode.id);
1519
+ while (queue.length > 0) {
1520
+ const current = queue.shift();
1521
+ const neighbors = adj.get(current) ?? new Set();
1522
+ for (const neighbor of neighbors) {
1523
+ if (!reachable.has(neighbor)) {
1524
+ reachable.add(neighbor);
1525
+ queue.push(neighbor);
1526
+ }
1527
+ }
1528
+ }
1529
+ // Find unreachable nodes
1530
+ for (const node of nodes) {
1531
+ if (!reachable.has(node.id) && node.id !== "WORKFLOW_OUTPUT") {
1532
+ issues.push({
1533
+ type: "orphan",
1534
+ severity: "warning",
1535
+ node: node.id,
1536
+ reason: `Node "${node.id}" is not reachable from trigger`,
1537
+ });
1538
+ }
1539
+ }
1540
+ return issues;
1541
+ }
1542
+ /**
1543
+ * Detect dead-end nodes (nodes that don't lead to WORKFLOW_OUTPUT)
1544
+ */
1545
+ function detectDeadEnds(nodes) {
1546
+ const issues = [];
1547
+ // Build reverse adjacency (target -> sources)
1548
+ const reverseAdj = new Map();
1549
+ for (const node of nodes) {
1550
+ if (!reverseAdj.has(node.id)) {
1551
+ reverseAdj.set(node.id, new Set());
1552
+ }
1553
+ if (node.incoming_edges) {
1554
+ for (const edge of node.incoming_edges) {
1555
+ if (!reverseAdj.has(edge.source_node_id)) {
1556
+ reverseAdj.set(edge.source_node_id, new Set());
1557
+ }
1558
+ reverseAdj.get(node.id).add(edge.source_node_id);
1559
+ }
1560
+ }
1561
+ }
1562
+ // Find WORKFLOW_OUTPUT or nodes that connect to it
1563
+ const outputNode = nodes.find(n => n.id === "WORKFLOW_OUTPUT" ||
1564
+ n.action_name === "WorkflowOutputSink");
1565
+ if (!outputNode) {
1566
+ issues.push({
1567
+ type: "missing_workflow_output",
1568
+ severity: "critical",
1569
+ reason: "No WORKFLOW_OUTPUT node found - workflow responses won't reach user",
1570
+ });
1571
+ return issues;
1572
+ }
1573
+ // BFS backwards from WORKFLOW_OUTPUT to find all nodes that can reach it
1574
+ const canReachOutput = new Set();
1575
+ const queue = [outputNode.id];
1576
+ canReachOutput.add(outputNode.id);
1577
+ // Also add nodes that have edges TO WORKFLOW_OUTPUT
1578
+ for (const node of nodes) {
1579
+ if (node.incoming_edges) {
1580
+ for (const edge of node.incoming_edges) {
1581
+ if (node.id === outputNode.id) {
1582
+ canReachOutput.add(edge.source_node_id);
1583
+ queue.push(edge.source_node_id);
1584
+ }
1585
+ }
1586
+ }
1587
+ }
1588
+ // Continue BFS
1589
+ while (queue.length > 0) {
1590
+ const current = queue.shift();
1591
+ const sources = reverseAdj.get(current) ?? new Set();
1592
+ for (const source of sources) {
1593
+ if (!canReachOutput.has(source)) {
1594
+ canReachOutput.add(source);
1595
+ queue.push(source);
1596
+ }
1597
+ }
1598
+ }
1599
+ // Find leaf nodes (no outgoing edges) that can't reach output
1600
+ const adj = buildAdjacencyList(nodes);
1601
+ for (const node of nodes) {
1602
+ const neighbors = adj.get(node.id) ?? new Set();
1603
+ if (neighbors.size === 0 &&
1604
+ node.id !== outputNode.id &&
1605
+ !node.id?.includes("trigger") &&
1606
+ !canReachOutput.has(node.id)) {
1607
+ issues.push({
1608
+ type: "dead_end",
1609
+ severity: "critical",
1610
+ node: node.id,
1611
+ missing: "WORKFLOW_OUTPUT connection",
1612
+ reason: `Node "${node.id}" doesn't lead to WORKFLOW_OUTPUT - responses won't reach user`,
1613
+ });
1614
+ }
1615
+ }
1616
+ return issues;
1617
+ }
1618
+ /**
1619
+ * Detect nodes whose outputs are not consumed by any other node
1620
+ * This catches anti-patterns like "combine_search_results but output not used"
1621
+ */
1622
+ function detectUnusedOutputs(nodes, workflowDef) {
1623
+ const issues = [];
1624
+ // Build set of all consumed node outputs
1625
+ const consumedOutputs = new Set(); // Format: "nodeId.outputName"
1626
+ for (const node of nodes) {
1627
+ if (node.incoming_edges) {
1628
+ for (const edge of node.incoming_edges) {
1629
+ consumedOutputs.add(`${edge.source_node_id}.${edge.source_output}`);
1630
+ // Also mark node as having at least one output consumed
1631
+ consumedOutputs.add(`${edge.source_node_id}.*`);
1632
+ }
1633
+ }
1634
+ }
1635
+ // Check results mapping - outputs that go to WORKFLOW_OUTPUT are consumed
1636
+ const def = workflowDef;
1637
+ const results = def?.results;
1638
+ if (results) {
1639
+ for (const [, result] of Object.entries(results)) {
1640
+ if (result?.actionName) {
1641
+ consumedOutputs.add(`${result.actionName}.${result.outputName ?? "*"}`);
1642
+ consumedOutputs.add(`${result.actionName}.*`);
1643
+ }
1644
+ }
1645
+ }
1646
+ // Nodes that produce output but should have downstream consumers
1647
+ // Especially combiners, generators, and transformers
1648
+ const NODES_THAT_PRODUCE_OUTPUT = [
1649
+ "combine_search_results",
1650
+ "personalized_content_generator",
1651
+ "generate_document",
1652
+ "json_mapper",
1653
+ "entity_extraction",
1654
+ "entity_extraction_with_documents",
1655
+ "conversation_summarizer",
1656
+ "custom_agent",
1657
+ "creative_ideation_agent",
1658
+ "response_validator",
1659
+ ];
1660
+ for (const node of nodes) {
1661
+ const actionName = node.action_name?.toLowerCase()?.replace(/_/g, "") ?? "";
1662
+ const nodeId = node.id ?? "";
1663
+ const nodeIdNormalized = nodeId.toLowerCase().replace(/_/g, "");
1664
+ // Skip trigger and output nodes
1665
+ if (nodeId.includes("trigger") || nodeId === "WORKFLOW_OUTPUT") {
1666
+ continue;
1667
+ }
1668
+ // Check if this is a node that should have its output consumed
1669
+ // Normalize both sides by removing underscores for comparison
1670
+ const shouldHaveOutputConsumed = NODES_THAT_PRODUCE_OUTPUT.some(name => {
1671
+ const normalized = name.replace(/_/g, "");
1672
+ return actionName.includes(normalized) || nodeIdNormalized.includes(normalized);
1673
+ });
1674
+ if (shouldHaveOutputConsumed) {
1675
+ // Check if any output from this node is consumed
1676
+ const isConsumed = consumedOutputs.has(`${nodeId}.*`);
1677
+ if (!isConsumed) {
1678
+ // Determine what the expected output would be
1679
+ let expectedOutput = "output";
1680
+ if (actionName.includes("combine")) {
1681
+ expectedOutput = "combined_results";
1682
+ }
1683
+ else if (actionName.includes("generate_document") || nodeId.toLowerCase().includes("generate_document")) {
1684
+ expectedOutput = "document_link";
1685
+ }
1686
+ else if (actionName.includes("entity_extraction") || nodeId.toLowerCase().includes("entity")) {
1687
+ expectedOutput = "extracted_entities";
1688
+ }
1689
+ else if (actionName.includes("json_mapper") || nodeId.toLowerCase().includes("mapper")) {
1690
+ expectedOutput = "mapped_output";
1691
+ }
1692
+ else if (actionName.includes("content_generator") || nodeId.toLowerCase().includes("content")) {
1693
+ expectedOutput = "generated_content";
1694
+ }
1695
+ issues.push({
1696
+ type: "unused_output",
1697
+ severity: "warning",
1698
+ node: nodeId,
1699
+ current: expectedOutput,
1700
+ reason: `Node "${nodeId}" produces output "${expectedOutput}" but it's not consumed by any downstream node. This node is doing work that goes nowhere.`,
1701
+ });
1702
+ }
1703
+ }
1704
+ }
1705
+ return issues;
1706
+ }
1707
+ /**
1708
+ * Detect categorizer issues (missing fallback, missing category edges)
1709
+ */
1710
+ function detectCategorizerIssues(nodes) {
1711
+ const issues = [];
1712
+ const categorizers = nodes.filter(n => n.action_name?.includes("categorizer") ||
1713
+ n.id?.includes("categorizer") ||
1714
+ n.id?.includes("classifier"));
1715
+ for (const categorizer of categorizers) {
1716
+ // Check if there are any outgoing edges (category routes)
1717
+ const adj = buildAdjacencyList(nodes);
1718
+ const outgoing = adj.get(categorizer.id) ?? new Set();
1719
+ if (outgoing.size === 0) {
1720
+ issues.push({
1721
+ type: "missing_category_edge",
1722
+ severity: "critical",
1723
+ node: categorizer.id,
1724
+ reason: `Categorizer "${categorizer.id}" has no outgoing category edges - routing won't work`,
1725
+ });
1726
+ }
1727
+ // Check for Fallback - look for edges or enumType options
1728
+ let hasFallback = false;
1729
+ for (const node of nodes) {
1730
+ if (node.incoming_edges) {
1731
+ for (const edge of node.incoming_edges) {
1732
+ if (edge.source_node_id === categorizer.id &&
1733
+ (edge.source_output?.toLowerCase().includes("fallback"))) {
1734
+ hasFallback = true;
1735
+ break;
1736
+ }
1737
+ }
1738
+ }
1739
+ }
1740
+ if (!hasFallback && outgoing.size > 0) {
1741
+ issues.push({
1742
+ type: "missing_fallback",
1743
+ severity: "critical",
1744
+ node: categorizer.id,
1745
+ reason: `Categorizer "${categorizer.id}" appears to be missing a Fallback category`,
1746
+ });
1747
+ }
1748
+ }
1749
+ return issues;
1750
+ }
1751
+ /**
1752
+ * Detect HITL issues (missing success or failure paths)
1753
+ */
1754
+ function detectHitlIssues(nodes) {
1755
+ const issues = [];
1756
+ const hitlNodes = nodes.filter(n => n.action_name?.includes("hitl") ||
1757
+ n.action_name?.includes("human_collaboration") ||
1758
+ n.id?.includes("hitl") ||
1759
+ n.id?.includes("approval"));
1760
+ for (const hitl of hitlNodes) {
1761
+ let hasSuccess = false;
1762
+ let hasFailure = false;
1763
+ // Look for edges from this HITL node
1764
+ for (const node of nodes) {
1765
+ if (node.incoming_edges) {
1766
+ for (const edge of node.incoming_edges) {
1767
+ if (edge.source_node_id === hitl.id) {
1768
+ const output = edge.source_output?.toLowerCase() ?? "";
1769
+ if (output.includes("success")) {
1770
+ hasSuccess = true;
1771
+ }
1772
+ if (output.includes("fail")) {
1773
+ hasFailure = true;
1774
+ }
1775
+ }
1776
+ }
1777
+ }
1778
+ }
1779
+ if (!hasSuccess && !hasFailure) {
1780
+ issues.push({
1781
+ type: "incomplete_hitl",
1782
+ severity: "critical",
1783
+ node: hitl.id,
1784
+ missing: "both success and failure paths",
1785
+ reason: `HITL node "${hitl.id}" missing both success AND failure paths`,
1786
+ });
1787
+ }
1788
+ else if (!hasSuccess) {
1789
+ issues.push({
1790
+ type: "incomplete_hitl",
1791
+ severity: "critical",
1792
+ node: hitl.id,
1793
+ missing: "success_path",
1794
+ reason: `HITL node "${hitl.id}" missing success path`,
1795
+ });
1796
+ }
1797
+ else if (!hasFailure) {
1798
+ issues.push({
1799
+ type: "incomplete_hitl",
1800
+ severity: "critical",
1801
+ node: hitl.id,
1802
+ missing: "failure_path",
1803
+ reason: `HITL node "${hitl.id}" missing failure path`,
1804
+ });
1805
+ }
1806
+ }
1807
+ return issues;
1808
+ }
1809
+ /**
1810
+ * Detect wrong input source issues (e.g., user_query for categorizer instead of chat_conversation)
1811
+ */
1812
+ function detectWrongInputSource(nodes) {
1813
+ const issues = [];
1814
+ for (const node of nodes) {
1815
+ const actionName = node.action_name ?? node.id;
1816
+ // Use the shared validation rules (single source of truth)
1817
+ const rule = findInputSourceRule(actionName);
1818
+ if (!rule)
1819
+ continue;
1820
+ if (node.incoming_edges) {
1821
+ for (const edge of node.incoming_edges) {
1822
+ const sourceOutput = edge.source_output?.toLowerCase() ?? "";
1823
+ const targetInput = edge.target_input?.toLowerCase() ?? "";
1824
+ // Check if using an avoided input
1825
+ for (const avoid of rule.avoid) {
1826
+ if (sourceOutput.includes(avoid.toLowerCase()) ||
1827
+ (targetInput.includes("query") && sourceOutput.includes("chat_conversation"))) {
1828
+ issues.push({
1829
+ type: "wrong_input_source",
1830
+ severity: rule.severity,
1831
+ node: node.id,
1832
+ current: sourceOutput,
1833
+ recommended: rule.recommended,
1834
+ reason: rule.reason,
1835
+ recommendation: rule.fix,
1836
+ });
1837
+ }
1838
+ }
1839
+ }
1840
+ }
1841
+ }
1842
+ return issues;
1843
+ }
1844
+ /**
1845
+ * Detect email-specific issues:
1846
+ * 1. Email recipient from text output (should be entity_extraction)
1847
+ * 2. Email without HITL confirmation
1848
+ * 3. Missing entity_extraction before email
1849
+ */
1850
+ function detectEmailIssues(nodes) {
1851
+ const issues = [];
1852
+ // Text outputs that should NEVER be connected to email_to
1853
+ const INVALID_EMAIL_SOURCES = [
1854
+ "summarized_conversation",
1855
+ "response_with_sources",
1856
+ "search_results",
1857
+ "combined_results",
1858
+ "generated_content",
1859
+ "text_with_sources",
1860
+ "user_query",
1861
+ "chat_conversation",
1862
+ ];
1863
+ // Find email sending nodes
1864
+ const emailNodes = nodes.filter(n => n.action_name?.includes("send_email") ||
1865
+ n.action_name?.includes("email_agent") ||
1866
+ n.id?.toLowerCase().includes("send_email") ||
1867
+ n.id?.toLowerCase().includes("email"));
1868
+ // Find entity extraction nodes
1869
+ const entityExtractionNodes = nodes.filter(n => n.action_name?.includes("entity_extraction") ||
1870
+ n.id?.toLowerCase().includes("entity_extraction") ||
1871
+ n.id?.toLowerCase().includes("extract"));
1872
+ // Find HITL nodes
1873
+ const hitlNodes = nodes.filter(n => n.action_name?.includes("hitl") ||
1874
+ n.action_name?.includes("human_collaboration") ||
1875
+ n.id?.toLowerCase().includes("hitl") ||
1876
+ n.id?.toLowerCase().includes("approval"));
1877
+ for (const emailNode of emailNodes) {
1878
+ // Check 1: Is email_to connected to invalid text source?
1879
+ if (emailNode.incoming_edges) {
1880
+ for (const edge of emailNode.incoming_edges) {
1881
+ const targetInput = edge.target_input?.toLowerCase() ?? "";
1882
+ const sourceOutput = edge.source_output?.toLowerCase() ?? "";
1883
+ // Check if this is the email_to input
1884
+ if (targetInput.includes("email_to") || targetInput.includes("recipient") || targetInput.includes("to_address")) {
1885
+ // Check if source is a text output (INVALID for email addresses)
1886
+ const isInvalidSource = INVALID_EMAIL_SOURCES.some(invalid => sourceOutput.includes(invalid.toLowerCase().replace(/_/g, "")) ||
1887
+ sourceOutput.replace(/_/g, "").includes(invalid.toLowerCase().replace(/_/g, "")));
1888
+ if (isInvalidSource) {
1889
+ issues.push({
1890
+ type: "unsafe_email_recipient",
1891
+ severity: "critical",
1892
+ node: emailNode.id,
1893
+ source: edge.source_node_id,
1894
+ current: sourceOutput,
1895
+ recommended: "entity_extraction.email_address",
1896
+ reason: `Email recipient "${targetInput}" is connected to "${sourceOutput}" which is TEXT content, not an email address. ` +
1897
+ `This will cause emails to fail or send to invalid addresses. ` +
1898
+ `Use entity_extraction to extract the email address from conversation.`,
1899
+ });
1900
+ }
1901
+ }
1902
+ }
1903
+ }
1904
+ // Check 2: Is there entity_extraction upstream of this email node?
1905
+ const hasEntityExtraction = entityExtractionNodes.length > 0;
1906
+ if (!hasEntityExtraction) {
1907
+ issues.push({
1908
+ type: "missing_entity_extraction",
1909
+ severity: "warning",
1910
+ node: emailNode.id,
1911
+ reason: `Email node "${emailNode.id}" has no entity_extraction upstream. ` +
1912
+ `Best practice: Use entity_extraction to extract recipient email, subject, and other required fields from conversation. ` +
1913
+ `This ensures structured data extraction instead of passing raw text.`,
1914
+ });
1915
+ }
1916
+ // Check 3: Is there HITL confirmation before email?
1917
+ // Check if any HITL node's output leads to this email node
1918
+ let hasHitlUpstream = false;
1919
+ if (emailNode.incoming_edges) {
1920
+ for (const edge of emailNode.incoming_edges) {
1921
+ const sourceNode = nodes.find(n => n.id === edge.source_node_id);
1922
+ if (sourceNode && (sourceNode.action_name?.includes("hitl") ||
1923
+ sourceNode.id?.toLowerCase().includes("hitl"))) {
1924
+ hasHitlUpstream = true;
1925
+ break;
1926
+ }
1927
+ }
1928
+ }
1929
+ // Also check runIf for HITL Success
1930
+ const hasHitlRunIf = emailNode.runIf &&
1931
+ typeof emailNode.runIf === "object" &&
1932
+ JSON.stringify(emailNode.runIf).toLowerCase().includes("hitl");
1933
+ if (!hasHitlUpstream && !hasHitlRunIf && hitlNodes.length === 0) {
1934
+ issues.push({
1935
+ type: "side_effect_without_hitl",
1936
+ severity: "warning",
1937
+ node: emailNode.id,
1938
+ reason: `Email node "${emailNode.id}" has no HITL confirmation upstream. ` +
1939
+ `Emails are high-impact actions with external side effects. ` +
1940
+ `Best practice: Add HITL node before send_email to confirm recipient and content with user.`,
1941
+ });
1942
+ }
1943
+ }
1944
+ return issues;
1945
+ }
1946
+ /**
1947
+ * Validate all edge connections for type compatibility
1948
+ */
1949
+ export function validateWorkflowConnections(workflowDef) {
1950
+ const nodes = parseWorkflowDef(workflowDef);
1951
+ const validations = [];
1952
+ for (const node of nodes) {
1953
+ if (!node.incoming_edges)
1954
+ continue;
1955
+ for (const edge of node.incoming_edges) {
1956
+ const sourceNode = nodes.find(n => n.id === edge.source_node_id);
1957
+ const validation = {
1958
+ edge_id: `${edge.source_node_id}.${edge.source_output} → ${node.id}.${edge.target_input}`,
1959
+ source_node: edge.source_node_id,
1960
+ source_output: edge.source_output,
1961
+ target_node: node.id,
1962
+ target_input: edge.target_input,
1963
+ compatible: true, // Default to true, validate below
1964
+ };
1965
+ // Determine types from output/input names
1966
+ const sourceOutput = edge.source_output?.toLowerCase() ?? "";
1967
+ const targetInput = edge.target_input?.toLowerCase() ?? "";
1968
+ // Infer types from naming conventions
1969
+ if (sourceOutput.includes("chat_conversation")) {
1970
+ validation.source_type = "WELL_KNOWN_TYPE_CHAT_CONVERSATION";
1971
+ }
1972
+ else if (sourceOutput.includes("search_result")) {
1973
+ validation.source_type = "WELL_KNOWN_TYPE_SEARCH_RESULT";
1974
+ }
1975
+ else if (sourceOutput.includes("document")) {
1976
+ validation.source_type = "WELL_KNOWN_TYPE_DOCUMENT";
1977
+ }
1978
+ else {
1979
+ validation.source_type = "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES";
1980
+ }
1981
+ // Check named_inputs FIRST - they accept ANY type
1982
+ // This must come before "conversation" check since named_inputs_conversation contains both
1983
+ if (targetInput.includes("named_input")) {
1984
+ validation.target_type = "WELL_KNOWN_TYPE_ANY";
1985
+ }
1986
+ else if (targetInput.includes("conversation")) {
1987
+ validation.target_type = "WELL_KNOWN_TYPE_CHAT_CONVERSATION";
1988
+ }
1989
+ else if (targetInput.includes("search_result")) {
1990
+ validation.target_type = "WELL_KNOWN_TYPE_SEARCH_RESULT";
1991
+ }
1992
+ else if (targetInput.includes("document")) {
1993
+ validation.target_type = "WELL_KNOWN_TYPE_DOCUMENT";
1994
+ }
1995
+ else {
1996
+ validation.target_type = "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES";
1997
+ }
1998
+ // Check compatibility
1999
+ if (validation.source_type && validation.target_type) {
2000
+ const compat = checkTypeCompatibility(validation.source_type, validation.target_type);
2001
+ if (compat) {
2002
+ validation.compatible = compat.compatible;
2003
+ validation.note = compat.note;
2004
+ }
2005
+ else if (validation.source_type !== validation.target_type &&
2006
+ validation.target_type !== "WELL_KNOWN_TYPE_ANY") {
2007
+ validation.compatible = false;
2008
+ validation.note = `Type mismatch: ${validation.source_type} → ${validation.target_type}`;
2009
+ }
2010
+ }
2011
+ validations.push(validation);
2012
+ }
2013
+ }
2014
+ return validations;
2015
+ }
2016
+ /**
2017
+ * Detect performance issues (redundant searches, consolidation opportunities)
2018
+ *
2019
+ * This checks for:
2020
+ * 1. Multiple search nodes using the same query source - should consolidate
2021
+ * 2. Conditional searches that could be a single search with filtered results
2022
+ * 3. Sequential operations that could run in parallel
2023
+ */
2024
+ function detectPerformanceIssues(nodes) {
2025
+ const issues = [];
2026
+ // Find all search nodes
2027
+ const searchNodes = nodes.filter(n => n.action_name?.includes("search") ||
2028
+ n.id?.includes("search"));
2029
+ if (searchNodes.length <= 1) {
2030
+ return issues; // No consolidation opportunity with 0-1 search nodes
2031
+ }
2032
+ // Group search nodes by their query source
2033
+ const searchesByQuerySource = new Map();
2034
+ for (const search of searchNodes) {
2035
+ // Find the query input edge
2036
+ const queryEdge = search.incoming_edges?.find(e => e.target_input?.toLowerCase().includes("query"));
2037
+ if (queryEdge) {
2038
+ const querySource = `${queryEdge.source_node_id}.${queryEdge.source_output}`;
2039
+ const existing = searchesByQuerySource.get(querySource) ?? [];
2040
+ existing.push(search);
2041
+ searchesByQuerySource.set(querySource, existing);
2042
+ }
2043
+ }
2044
+ // Check for redundant searches (same query source)
2045
+ for (const [querySource, searches] of searchesByQuerySource) {
2046
+ if (searches.length > 1) {
2047
+ // Check if these are conditional (runIf) searches
2048
+ const conditionalSearches = searches.filter(s =>
2049
+ // Check if node has runIf condition (indicates branch-specific execution)
2050
+ s.id?.includes("client_update") ||
2051
+ s.id?.includes("client_review") ||
2052
+ s.id?.includes("market_impact") ||
2053
+ s.id?.includes("_1") || s.id?.includes("_2") || s.id?.includes("_3"));
2054
+ if (conditionalSearches.length > 1) {
2055
+ // Multiple conditional searches with same query = consolidation opportunity
2056
+ issues.push({
2057
+ type: "redundant_search",
2058
+ severity: "warning",
2059
+ nodes: searches.map(s => s.id),
2060
+ query_source: querySource,
2061
+ reason: `${searches.length} search nodes all use the same query source (${querySource}). Consider consolidating into a single search - only one branch executes at a time, so multiple searches add complexity without benefit.`,
2062
+ recommendation: "Replace multiple conditional searches with a single search node. Pass results to all response nodes via named_inputs. This reduces workflow complexity and maintenance burden.",
2063
+ });
2064
+ }
2065
+ else {
2066
+ // Non-conditional redundant searches (actual duplicates)
2067
+ issues.push({
2068
+ type: "redundant_search",
2069
+ severity: "info",
2070
+ nodes: searches.map(s => s.id),
2071
+ query_source: querySource,
2072
+ reason: `${searches.length} search nodes share query source (${querySource}). Verify these are intentionally different (e.g., different file filters) or consolidate.`,
2073
+ recommendation: "If searches have different file filters, consider using a single search with broader filters and letting the LLM filter relevant results.",
2074
+ });
2075
+ }
2076
+ }
2077
+ }
2078
+ // Check for sequential searches that could parallelize
2079
+ for (const search of searchNodes) {
2080
+ // Find nodes that depend on this search
2081
+ const dependentSearches = searchNodes.filter(s => s.id !== search.id &&
2082
+ s.incoming_edges?.some(e => e.source_node_id === search.id));
2083
+ if (dependentSearches.length > 0) {
2084
+ issues.push({
2085
+ type: "sequential_search",
2086
+ severity: "info",
2087
+ node: search.id,
2088
+ dependent_nodes: dependentSearches.map(s => s.id),
2089
+ reason: `Search "${search.id}" has dependent searches that run sequentially. If searches are independent, they could run in parallel.`,
2090
+ recommendation: "Review if sequential dependency is necessary. Independent searches should branch from the same source to enable parallel execution.",
2091
+ });
2092
+ }
2093
+ }
2094
+ // Check for multiple LLM calls that could consolidate
2095
+ const llmNodes = nodes.filter(n => n.action_name?.includes("call_llm") ||
2096
+ n.action_name?.includes("respond") ||
2097
+ n.id?.includes("respond"));
2098
+ // Group LLM nodes by their search input source
2099
+ const llmsBySearchSource = new Map();
2100
+ for (const llm of llmNodes) {
2101
+ const searchInputEdge = llm.incoming_edges?.find(e => e.source_output?.toLowerCase().includes("search_result"));
2102
+ if (searchInputEdge) {
2103
+ const source = searchInputEdge.source_node_id;
2104
+ const existing = llmsBySearchSource.get(source) ?? [];
2105
+ existing.push(llm);
2106
+ llmsBySearchSource.set(source, existing);
2107
+ }
2108
+ }
2109
+ // Report if multiple LLM nodes use same search results
2110
+ for (const [searchSource, llms] of llmsBySearchSource) {
2111
+ if (llms.length > 1) {
2112
+ // This is often OK (conditional responses based on categorizer)
2113
+ // Only flag if they're not conditional
2114
+ const nonConditionalCount = llms.filter(l => !l.id?.includes("update") &&
2115
+ !l.id?.includes("review") &&
2116
+ !l.id?.includes("impact") &&
2117
+ !l.id?.includes("fallback")).length;
2118
+ if (nonConditionalCount > 1) {
2119
+ issues.push({
2120
+ type: "duplicate_llm_processing",
2121
+ severity: "info",
2122
+ nodes: llms.map(l => l.id),
2123
+ search_source: searchSource,
2124
+ reason: `Multiple LLM nodes process results from "${searchSource}". Consider consolidating if they produce similar outputs.`,
2125
+ recommendation: "Use a single call_llm with comprehensive instructions instead of multiple calls. This produces more coherent responses.",
2126
+ });
2127
+ }
2128
+ }
2129
+ }
2130
+ return issues;
2131
+ }
2132
+ /**
2133
+ * Detect all workflow issues
2134
+ */
2135
+ /**
2136
+ * Detect malformed runIf conditions
2137
+ *
2138
+ * Common mistake: using "category_<Name>" as output and comparing to "true"
2139
+ * Correct format: output="category", compare to enumValue="<Name>"
2140
+ */
2141
+ function detectMalformedRunIf(workflowDef) {
2142
+ const issues = [];
2143
+ const def = workflowDef;
2144
+ if (!def)
2145
+ return issues;
2146
+ const actions = def.actions;
2147
+ if (!actions)
2148
+ return issues;
2149
+ for (const action of actions) {
2150
+ const runIf = action.runIf;
2151
+ if (!runIf)
2152
+ continue;
2153
+ const lhs = runIf.lhs;
2154
+ const rhs = runIf.rhs;
2155
+ if (!lhs || !rhs)
2156
+ continue;
2157
+ const actionOutput = lhs.actionOutput;
2158
+ if (!actionOutput)
2159
+ continue;
2160
+ const output = String(actionOutput.output ?? "");
2161
+ const inlineRhs = rhs.inline;
2162
+ const enumValue = String(inlineRhs?.enumValue ?? "");
2163
+ // Detect malformed pattern: output="category_<Name>" compared to enumValue="true"
2164
+ if (output.startsWith("category_") && (enumValue === "true" || enumValue === "false")) {
2165
+ // Extract the category name from the malformed output
2166
+ const categoryName = output.replace(/^category_/, "");
2167
+ issues.push({
2168
+ type: "malformed_runif",
2169
+ severity: "critical",
2170
+ node: String(action.name ?? ""),
2171
+ current: `output="${output}" compared to enumValue="${enumValue}"`,
2172
+ recommended: `output="category" compared to enumValue="${categoryName}"`,
2173
+ reason: `Malformed runIf condition: comparing "${output}" to "${enumValue}" won't work. The categorizer output is "category", not "category_<Name>".`,
2174
+ auto_fixable: true,
2175
+ });
2176
+ }
2177
+ }
2178
+ return issues;
2179
+ }
2180
+ export function detectWorkflowIssues(workflowDef) {
2181
+ const nodes = parseWorkflowDef(workflowDef);
2182
+ if (nodes.length === 0) {
2183
+ return [{
2184
+ type: "orphan",
2185
+ severity: "critical",
2186
+ reason: "No workflow nodes found - workflow definition may be empty or invalid format",
2187
+ }];
2188
+ }
2189
+ let issues = [
2190
+ ...detectCycles(nodes),
2191
+ ...detectOrphanNodes(nodes),
2192
+ ...detectDeadEnds(nodes),
2193
+ ...detectUnusedOutputs(nodes, workflowDef),
2194
+ ...detectCategorizerIssues(nodes),
2195
+ ...detectHitlIssues(nodes),
2196
+ ...detectWrongInputSource(nodes),
2197
+ ...detectEmailIssues(nodes),
2198
+ ...detectPerformanceIssues(nodes),
2199
+ ...detectMalformedRunIf(workflowDef),
2200
+ ];
2201
+ // Add type mismatch issues from connection validation
2202
+ const connections = validateWorkflowConnections(workflowDef);
2203
+ for (const conn of connections) {
2204
+ if (!conn.compatible) {
2205
+ issues.push({
2206
+ type: "type_mismatch",
2207
+ severity: "critical",
2208
+ source: `${conn.source_node}.${conn.source_output}`,
2209
+ target: `${conn.target_node}.${conn.target_input}`,
2210
+ expected: conn.target_type,
2211
+ got: conn.source_type,
2212
+ reason: conn.note ?? `Type mismatch: ${conn.source_type} → ${conn.target_type}`,
2213
+ });
2214
+ }
2215
+ }
2216
+ // Filter out false positives for Voice AI workflows
2217
+ const def = workflowDef;
2218
+ if (def) {
2219
+ const results = def.results;
2220
+ const actions = def.actions;
2221
+ // If workflow has results mapping, it's Voice AI - WORKFLOW_OUTPUT is not required
2222
+ if (results && Object.keys(results).length > 0) {
2223
+ issues = issues.filter(i => i.type !== "missing_workflow_output");
2224
+ }
2225
+ // Check if categorizers use runIf pattern (valid alternative to explicit edges)
2226
+ if (actions) {
2227
+ const categorizerIssues = issues.filter(i => i.type === "missing_category_edge");
2228
+ for (const catIssue of categorizerIssues) {
2229
+ const categorizerName = catIssue.node;
2230
+ // Count nodes that have runIf referencing this categorizer
2231
+ const nodesUsingCategorizer = actions.filter(a => {
2232
+ const runIf = a.runIf;
2233
+ if (!runIf?.lhs)
2234
+ return false;
2235
+ const lhs = runIf.lhs;
2236
+ const actionOutput = lhs.actionOutput;
2237
+ return actionOutput?.actionName === categorizerName;
2238
+ });
2239
+ // If there are nodes with runIf conditions using this categorizer, it's valid
2240
+ if (nodesUsingCategorizer.length > 0) {
2241
+ issues = issues.filter(i => !(i.type === "missing_category_edge" && i.node === categorizerName));
2242
+ }
2243
+ }
2244
+ }
2245
+ }
2246
+ return issues;
2247
+ }
2248
+ /**
2249
+ * Analyze a workflow comprehensively
2250
+ */
2251
+ export function analyzeWorkflow(workflowDef, metadata) {
2252
+ const nodes = parseWorkflowDef(workflowDef);
2253
+ const issues = detectWorkflowIssues(workflowDef);
2254
+ // Count edges
2255
+ let totalEdges = 0;
2256
+ for (const node of nodes) {
2257
+ totalEdges += node.incoming_edges?.length ?? 0;
2258
+ }
2259
+ // Check for trigger
2260
+ const hasTrigger = nodes.some(n => n.action_name?.includes("trigger") ||
2261
+ n.id === "trigger" ||
2262
+ n.id?.includes("trigger"));
2263
+ // Check for WORKFLOW_OUTPUT
2264
+ const hasWorkflowOutput = nodes.some(n => n.id === "WORKFLOW_OUTPUT" ||
2265
+ n.action_name === "WorkflowOutputSink");
2266
+ // Count categorizers
2267
+ const categorizersCount = nodes.filter(n => n.action_name?.includes("categorizer") ||
2268
+ n.id?.includes("categorizer") ||
2269
+ n.id?.includes("classifier")).length;
2270
+ // Count HITL nodes
2271
+ const hitlNodesCount = nodes.filter(n => n.action_name?.includes("hitl") ||
2272
+ n.action_name?.includes("human_collaboration") ||
2273
+ n.id?.includes("hitl") ||
2274
+ n.id?.includes("approval")).length;
2275
+ // Summarize issues by severity
2276
+ const issueSummary = {
2277
+ critical: issues.filter(i => i.severity === "critical").length,
2278
+ warning: issues.filter(i => i.severity === "warning").length,
2279
+ info: issues.filter(i => i.severity === "info").length,
2280
+ };
2281
+ return {
2282
+ ...metadata,
2283
+ summary: {
2284
+ total_nodes: nodes.length,
2285
+ total_edges: totalEdges,
2286
+ has_trigger: hasTrigger,
2287
+ has_workflow_output: hasWorkflowOutput,
2288
+ categorizers_count: categorizersCount,
2289
+ hitl_nodes_count: hitlNodesCount,
2290
+ },
2291
+ issues,
2292
+ issue_summary: issueSummary,
2293
+ validation_passed: issueSummary.critical === 0,
2294
+ };
2295
+ }
2296
+ /**
2297
+ * Suggest fixes for detected workflow issues
2298
+ */
2299
+ export function suggestWorkflowFixes(issues) {
2300
+ const fixes = [];
2301
+ for (const issue of issues) {
2302
+ let fix = null;
2303
+ switch (issue.type) {
2304
+ case "missing_fallback":
2305
+ fix = {
2306
+ issue_type: issue.type,
2307
+ description: `Add Fallback category to categorizer "${issue.node}"`,
2308
+ after: `# Add to enumType.options for ${issue.node}:
2309
+ - name: "Fallback"
2310
+ description: "Default for unclear or ambiguous requests"
2311
+ examples:
2312
+ - "Hello"
2313
+ - "Help"
2314
+ - "I'm not sure"
2315
+
2316
+ # Add edge for Fallback category:
2317
+ - source_node: ${issue.node}
2318
+ source_output: category_Fallback
2319
+ target_node: fallback_handler
2320
+ target_input: trigger_when`,
2321
+ validation: "Verify categorizer has Fallback in enumType.options AND has outgoing edge for category_Fallback",
2322
+ };
2323
+ break;
2324
+ case "incomplete_hitl":
2325
+ fix = {
2326
+ issue_type: issue.type,
2327
+ description: `Add ${issue.missing} for HITL node "${issue.node}"`,
2328
+ after: `# Add ${issue.missing === "success_path" ? "success" : issue.missing === "failure_path" ? "failure" : "both"} handler(s):
2329
+ ${issue.missing?.includes("success") || issue.missing?.includes("both") ? `
2330
+ - name: "${issue.node}_success_response"
2331
+ runIf:
2332
+ lhs:
2333
+ actionOutput:
2334
+ actionName: "${issue.node}"
2335
+ output: "hitl_status"
2336
+ autoDetectedBinding: false
2337
+ operator: 1 # OPERATOR_EQ
2338
+ rhs:
2339
+ inline:
2340
+ enumValue: "HITL Success"
2341
+ autoDetectedBinding: false
2342
+ action:
2343
+ name:
2344
+ namespaces: ["actions", "emainternal"]
2345
+ name: "call_llm"
2346
+ # Connect to WORKFLOW_OUTPUT` : ""}
2347
+ ${issue.missing?.includes("failure") || issue.missing?.includes("both") ? `
2348
+ - name: "${issue.node}_failure_response"
2349
+ runIf:
2350
+ lhs:
2351
+ actionOutput:
2352
+ actionName: "${issue.node}"
2353
+ output: "hitl_status"
2354
+ autoDetectedBinding: false
2355
+ operator: 1 # OPERATOR_EQ
2356
+ rhs:
2357
+ inline:
2358
+ enumValue: "HITL Failure"
2359
+ autoDetectedBinding: false
2360
+ action:
2361
+ name:
2362
+ namespaces: ["actions", "emainternal"]
2363
+ name: "call_llm"
2364
+ # Connect to WORKFLOW_OUTPUT` : ""}`,
2365
+ validation: "Verify both 'hitl_status_HITL Success' and 'hitl_status_HITL Failure' (with space, not underscore) have handler nodes and connect to WORKFLOW_OUTPUT",
2366
+ };
2367
+ break;
2368
+ case "dead_end":
2369
+ fix = {
2370
+ issue_type: issue.type,
2371
+ description: `Connect "${issue.node}" to WORKFLOW_OUTPUT`,
2372
+ after: `# Add edge from ${issue.node} to WORKFLOW_OUTPUT:
2373
+ - source_node: ${issue.node}
2374
+ source_output: response_with_sources # or appropriate output
2375
+ target_node: WORKFLOW_OUTPUT
2376
+ target_input: ${issue.node}.response_with_sources`,
2377
+ validation: "Verify node's output is mapped to WORKFLOW_OUTPUT",
2378
+ };
2379
+ break;
2380
+ case "type_mismatch":
2381
+ fix = {
2382
+ issue_type: issue.type,
2383
+ description: `Fix type mismatch: ${issue.source} → ${issue.target}`,
2384
+ before: `# Current: ${issue.source} (${issue.got}) → ${issue.target} (${issue.expected})`,
2385
+ after: issue.got === "WELL_KNOWN_TYPE_CHAT_CONVERSATION" && issue.expected === "WELL_KNOWN_TYPE_TEXT_WITH_SOURCES"
2386
+ ? `# Insert conversation_to_search_query between source and target:
2387
+ - name: "summarizer"
2388
+ action:
2389
+ name:
2390
+ namespaces: ["actions", "emainternal"]
2391
+ name: "conversation_to_search_query"
2392
+ inputs:
2393
+ conversation:
2394
+ actionOutput:
2395
+ actionName: "[source_trigger]"
2396
+ output: "chat_conversation"
2397
+
2398
+ # Then update target to use summarizer output:
2399
+ - target input should reference summarizer.summarized_conversation`
2400
+ : `# Either:
2401
+ # 1. Use an intermediate conversion node
2402
+ # 2. Connect to a named_inputs_* (accepts ANY type)
2403
+ # 3. Use a different source output that matches the expected type`,
2404
+ validation: `Verify source output type (${issue.got}) matches target input type (${issue.expected})`,
2405
+ };
2406
+ break;
2407
+ case "wrong_input_source":
2408
+ fix = {
2409
+ issue_type: issue.type,
2410
+ description: `Change input for "${issue.node}" from ${issue.current} to ${issue.recommended}`,
2411
+ before: `# Current: using ${issue.current}`,
2412
+ after: `# Update incoming edge to use ${issue.recommended}:
2413
+ - source_node: trigger # or appropriate source
2414
+ source_output: ${issue.recommended}
2415
+ target_node: ${issue.node}
2416
+ target_input: ${issue.recommended === "chat_conversation" ? "conversation" : "query"}`,
2417
+ validation: `Verify ${issue.node} receives ${issue.recommended} - ${issue.reason}`,
2418
+ };
2419
+ break;
2420
+ case "missing_category_edge":
2421
+ fix = {
2422
+ issue_type: issue.type,
2423
+ description: `Add outgoing category edges for categorizer "${issue.node}"`,
2424
+ after: `# Add edges for each category in the categorizer:
2425
+ # For each category in enumType.options, add:
2426
+ - source_node: ${issue.node}
2427
+ source_output: category_<CategoryName>
2428
+ target_node: <handler_for_category>
2429
+ target_input: trigger_when
2430
+
2431
+ # Example categories: Product_Info, Support, Sales, Fallback`,
2432
+ validation: "Verify each category in enumType.options has a corresponding outgoing edge",
2433
+ };
2434
+ break;
2435
+ case "orphan":
2436
+ fix = {
2437
+ issue_type: issue.type,
2438
+ description: `Connect orphan node "${issue.node}" to workflow`,
2439
+ after: `# Option 1: Connect as target of a categorizer:
2440
+ - source_node: <categorizer>
2441
+ source_output: category_<SomeCategory>
2442
+ target_node: ${issue.node}
2443
+ target_input: trigger_when
2444
+
2445
+ # Option 2: Remove the node if no longer needed`,
2446
+ validation: "Verify node is reachable from trigger via edges",
2447
+ };
2448
+ break;
2449
+ case "unused_output":
2450
+ fix = {
2451
+ issue_type: issue.type,
2452
+ description: `Connect output of "${issue.node}" to downstream consumer or remove the node`,
2453
+ after: `# The node "${issue.node}" produces "${issue.current}" but nothing uses it.
2454
+ #
2455
+ # Option 1: Connect output to a downstream node that needs it:
2456
+ - node: <downstream_node>
2457
+ inputs:
2458
+ <input_name>:
2459
+ actionOutput:
2460
+ actionName: ${issue.node}
2461
+ output: ${issue.current}
2462
+
2463
+ # Option 2: If using combine_search_results, connect to respond_with_sources:
2464
+ # NOTE: respond_with_sources.search_results expects SEARCH_RESULT type
2465
+ # combined_results is TEXT_WITH_SOURCES - use original search output OR named_inputs
2466
+ - node: respond_with_sources
2467
+ inputs:
2468
+ search_results:
2469
+ actionOutput:
2470
+ actionName: <original_search_node> # NOT ${issue.node}
2471
+ output: search_results
2472
+ named_inputs: # Add combined for context
2473
+ multiBinding:
2474
+ elements:
2475
+ - namedBinding:
2476
+ name: combined_context
2477
+ value:
2478
+ actionOutput:
2479
+ actionName: ${issue.node}
2480
+ output: ${issue.current}
2481
+
2482
+ # Option 3: Remove the node if not needed:
2483
+ # Delete the "${issue.node}" action from the workflow`,
2484
+ validation: `Verify "${issue.node}.${issue.current}" is consumed by a downstream node or WORKFLOW_OUTPUT`,
2485
+ };
2486
+ break;
2487
+ case "unsafe_email_recipient":
2488
+ fix = {
2489
+ issue_type: issue.type,
2490
+ description: `Use entity_extraction to get email address instead of text content`,
2491
+ before: `# WRONG: Connecting text output to email recipient
2492
+ - node: ${issue.node}
2493
+ inputs:
2494
+ email_to:
2495
+ actionOutput:
2496
+ actionName: ${issue.source}
2497
+ output: ${issue.current} # ❌ This is TEXT, not an email address!`,
2498
+ after: `# CORRECT: Extract email address via entity_extraction
2499
+ # Step 1: Add entity_extraction node to extract email from conversation
2500
+ - name: extract_email
2501
+ action:
2502
+ name:
2503
+ namespaces: ["agents"]
2504
+ name: entity_extraction
2505
+ inputs:
2506
+ text:
2507
+ actionOutput:
2508
+ actionName: trigger
2509
+ output: chat_conversation
2510
+ displaySettings:
2511
+ displayName: "Extract Email Address"
2512
+ # Configure extraction schema for email
2513
+ parameters:
2514
+ extraction_schema:
2515
+ email_address:
2516
+ type: string
2517
+ required: true
2518
+ recipient_name:
2519
+ type: string
2520
+ required: false
2521
+
2522
+ # Step 2: Add HITL to confirm before sending
2523
+ - name: confirm_email
2524
+ action:
2525
+ name:
2526
+ namespaces: ["agents"]
2527
+ name: hitl
2528
+ inputs:
2529
+ request_text:
2530
+ actionOutput:
2531
+ actionName: extract_email
2532
+ output: extracted_entities
2533
+ displaySettings:
2534
+ displayName: "Confirm Email Recipient"
2535
+
2536
+ # Step 3: Send email ONLY after HITL approval with extracted email
2537
+ - name: ${issue.node}
2538
+ action:
2539
+ name:
2540
+ namespaces: ["agents"]
2541
+ name: send_email_agent
2542
+ inputs:
2543
+ email_to:
2544
+ actionOutput:
2545
+ actionName: extract_email
2546
+ output: email_address # ✅ Structured email from extraction
2547
+ email_body:
2548
+ actionOutput:
2549
+ actionName: ${issue.source}
2550
+ output: ${issue.current} # Text content is fine for body
2551
+ runIf:
2552
+ enum:
2553
+ enumType: HITL_STATUS
2554
+ enumValue: "HITL Success"`,
2555
+ validation: "Verify email_to receives output from entity_extraction.email_address, not text content",
2556
+ };
2557
+ break;
2558
+ case "missing_entity_extraction":
2559
+ fix = {
2560
+ issue_type: issue.type,
2561
+ description: `Add entity_extraction upstream of email node to extract structured data`,
2562
+ after: `# Add entity_extraction before email to extract required fields
2563
+ - name: extract_email_data
2564
+ action:
2565
+ name:
2566
+ namespaces: ["agents"]
2567
+ name: entity_extraction
2568
+ inputs:
2569
+ text:
2570
+ actionOutput:
2571
+ actionName: trigger
2572
+ output: chat_conversation
2573
+ displaySettings:
2574
+ displayName: "Extract Email Data"
2575
+ # Define what to extract
2576
+ parameters:
2577
+ extraction_schema:
2578
+ email_address:
2579
+ type: string
2580
+ required: true
2581
+ description: "Recipient email address"
2582
+ subject:
2583
+ type: string
2584
+ required: false
2585
+ description: "Email subject line"
2586
+ recipient_name:
2587
+ type: string
2588
+ required: false
2589
+
2590
+ # Then connect extracted data to email node:
2591
+ - node: ${issue.node}
2592
+ inputs:
2593
+ email_to:
2594
+ actionOutput:
2595
+ actionName: extract_email_data
2596
+ output: email_address`,
2597
+ validation: "Verify entity_extraction exists upstream of email node",
2598
+ };
2599
+ break;
2600
+ case "side_effect_without_hitl":
2601
+ fix = {
2602
+ issue_type: issue.type,
2603
+ description: `Add HITL confirmation before executing action with side effects`,
2604
+ after: `# Add HITL node before ${issue.node} to confirm with user
2605
+ - name: confirm_before_send
2606
+ action:
2607
+ name:
2608
+ namespaces: ["agents"]
2609
+ name: hitl
2610
+ inputs:
2611
+ request_text:
2612
+ constant:
2613
+ value: "Please confirm you want to proceed with this action."
2614
+ displaySettings:
2615
+ displayName: "Confirm Before Sending"
2616
+
2617
+ # Update ${issue.node} to only run after HITL approval
2618
+ - name: ${issue.node}
2619
+ # ... existing config ...
2620
+ runIf:
2621
+ enum:
2622
+ enumType: HITL_STATUS
2623
+ enumValue: "HITL Success"
2624
+
2625
+ # Add failure handling (IMPORTANT: don't leave HITL incomplete)
2626
+ - name: cancelled_response
2627
+ action:
2628
+ name:
2629
+ namespaces: ["generation"]
2630
+ name: call_llm
2631
+ inputs:
2632
+ query:
2633
+ constant:
2634
+ value: "The action was cancelled by the user."
2635
+ runIf:
2636
+ enum:
2637
+ enumType: HITL_STATUS
2638
+ enumValue: "HITL Failure"`,
2639
+ validation: "Verify HITL node exists before action and both success/failure paths are handled",
2640
+ };
2641
+ break;
2642
+ case "cycle":
2643
+ fix = {
2644
+ issue_type: issue.type,
2645
+ description: `Remove circular dependency in: ${issue.nodes?.join(" → ")}`,
2646
+ after: `# Identify and remove the edge creating the cycle.
2647
+ # The cycle involves: ${issue.nodes?.join(", ")}
2648
+ #
2649
+ # Common solutions:
2650
+ # 1. Remove the back-edge (edge pointing to earlier node)
2651
+ # 2. Restructure to use proper exit conditions
2652
+ # 3. Use a separate node for the "loop back" case that terminates properly`,
2653
+ validation: "Verify no circular paths exist in the workflow graph",
2654
+ };
2655
+ break;
2656
+ case "missing_workflow_output":
2657
+ fix = {
2658
+ issue_type: issue.type,
2659
+ description: "Add WORKFLOW_OUTPUT node and connect all response paths",
2660
+ after: `# Add WORKFLOW_OUTPUT node:
2661
+ - id: WORKFLOW_OUTPUT
2662
+ action_name: WorkflowOutputSink
2663
+ incoming_edges: []
2664
+
2665
+ # Connect all response nodes to it:
2666
+ # For each response/call_llm node that produces final output:
2667
+ - source_node: <response_node>
2668
+ source_output: response_with_sources
2669
+ target_node: WORKFLOW_OUTPUT
2670
+ target_input: <response_node>.response_with_sources`,
2671
+ validation: "Verify WORKFLOW_OUTPUT exists and all terminal response nodes connect to it",
2672
+ };
2673
+ break;
2674
+ // Performance optimization fixes
2675
+ case "redundant_search":
2676
+ fix = {
2677
+ issue_type: issue.type,
2678
+ description: `Consolidate ${issue.nodes?.length ?? 0} redundant search nodes into a single search`,
2679
+ before: `# Current: ${issue.nodes?.length ?? 0} separate search nodes, all using ${issue.query_source}
2680
+ # Nodes: ${issue.nodes?.join(", ")}`,
2681
+ after: `# RECOMMENDED: Replace multiple searches with a single consolidated search:
2682
+
2683
+ - name: "knowledge_search"
2684
+ action:
2685
+ name:
2686
+ namespaces: ["actions", "emainternal"]
2687
+ name: "search"
2688
+ inputs:
2689
+ query:
2690
+ actionOutput:
2691
+ actionName: "${issue.query_source?.split(".")[0] ?? "conversation_summarizer"}"
2692
+ output: "${issue.query_source?.split(".")[1] ?? "summarized_conversation"}"
2693
+ # Remove file_name_filters to search all files
2694
+ # OR use broad filters that cover all needed data
2695
+ max_extractive_segment_count:
2696
+ inline:
2697
+ wellKnown:
2698
+ int64Value: "25" # Increase to get comprehensive results
2699
+
2700
+ # Then update ALL response nodes to use this single search:
2701
+ - name: "respond_client_update" # and other response nodes
2702
+ inputs:
2703
+ named_inputs:
2704
+ multiBinding:
2705
+ elements:
2706
+ - namedBinding:
2707
+ name: "Search Results"
2708
+ value:
2709
+ actionOutput:
2710
+ actionName: "knowledge_search"
2711
+ output: "search_results"`,
2712
+ validation: "Verify single search provides sufficient results for all response branches",
2713
+ };
2714
+ break;
2715
+ case "sequential_search":
2716
+ fix = {
2717
+ issue_type: issue.type,
2718
+ description: `Parallelize sequential searches starting from "${issue.node}"`,
2719
+ before: `# Current: Sequential execution
2720
+ # ${issue.node} → ${issue.dependent_nodes?.join(" → ")}`,
2721
+ after: `# RECOMMENDED: Run independent searches in parallel by branching from same source:
2722
+
2723
+ # Instead of:
2724
+ # search_1 → search_2 → search_3
2725
+ #
2726
+ # Use:
2727
+ # ┌─> search_1 ─┐
2728
+ # │ │
2729
+ # source ─> search_2 ─> combine_search_results
2730
+ # │ │
2731
+ # └─> search_3 ─┘
2732
+
2733
+ # If searches need different inputs, ensure they branch from the categorizer
2734
+ # or conversation_summarizer rather than depending on each other.`,
2735
+ validation: "Verify searches don't actually depend on each other's results",
2736
+ };
2737
+ break;
2738
+ case "duplicate_llm_processing":
2739
+ fix = {
2740
+ issue_type: issue.type,
2741
+ description: `Consider consolidating ${issue.nodes?.length ?? 0} LLM nodes processing "${issue.search_source}"`,
2742
+ before: `# Current: ${issue.nodes?.length ?? 0} LLM nodes all process results from ${issue.search_source}
2743
+ # Nodes: ${issue.nodes?.join(", ")}`,
2744
+ after: `# OPTION 1: If these are conditional responses (different intents), keep as-is
2745
+ # This is the CORRECT pattern for categorizer-based routing
2746
+
2747
+ # OPTION 2: If these produce similar outputs, consolidate into one:
2748
+ - name: "unified_response"
2749
+ action:
2750
+ name:
2751
+ namespaces: ["actions", "emainternal"]
2752
+ name: "call_llm"
2753
+ inputs:
2754
+ query:
2755
+ actionOutput:
2756
+ actionName: "trigger"
2757
+ output: "user_query"
2758
+ named_inputs:
2759
+ multiBinding:
2760
+ elements:
2761
+ - namedBinding:
2762
+ name: "Search Results"
2763
+ value:
2764
+ actionOutput:
2765
+ actionName: "${issue.search_source}"
2766
+ output: "search_results"
2767
+ instructions:
2768
+ inline:
2769
+ wellKnown:
2770
+ stringValue: "Comprehensive instructions that handle all response types..."`,
2771
+ validation: "Verify consolidation doesn't lose intent-specific response quality",
2772
+ };
2773
+ break;
2774
+ }
2775
+ if (fix) {
2776
+ fixes.push(fix);
2777
+ }
2778
+ }
2779
+ return fixes;
2780
+ }