@ema.co/mcp-toolkit 2026.2.23 → 2026.2.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of @ema.co/mcp-toolkit might be problematic. Click here for more details.

@@ -191,8 +191,8 @@ export async function handleData(args, client, readFile) {
191
191
  }
192
192
  if (filePath) {
193
193
  // File upload - use provided readFile or fall back to fs
194
+ let fileContent;
194
195
  try {
195
- let fileContent;
196
196
  if (readFile) {
197
197
  fileContent = await readFile(filePath);
198
198
  }
@@ -200,25 +200,84 @@ export async function handleData(args, client, readFile) {
200
200
  const fs = await import("fs/promises");
201
201
  fileContent = await fs.readFile(filePath);
202
202
  }
203
- const path = await import("path");
204
- const filename = path.basename(filePath);
205
- const result = await client.uploadDataSource(personaId, fileContent, filename, {
206
- widgetName: widgetName, // Pass through widget_name for doc gen personas
203
+ }
204
+ catch (error) {
205
+ return { error: `Failed to read file: ${error instanceof Error ? error.message : String(error)}` };
206
+ }
207
+ const path = await import("path");
208
+ const filename = path.basename(filePath);
209
+ const effectiveWidgetName = widgetName ?? "fileUpload";
210
+ let result;
211
+ try {
212
+ result = await client.uploadDataSource(personaId, fileContent, filename, {
213
+ widgetName: effectiveWidgetName,
207
214
  });
208
- return {
209
- method: "upload",
210
- persona_id: personaId,
211
- path: filePath,
212
- widget_name: widgetName ?? "fileUpload",
213
- ...result,
214
- _warning: "IMPORTANT: Uploaded documents will NOT be used unless your workflow has a search node (search/v2).",
215
- _next_step: `Verify workflow has search: workflow(mode='get', persona_id='${personaId}') → check for search node. If missing, add one.`,
216
- _validation: "Deploy will BLOCK if you have documents but no search node in your workflow.",
217
- };
218
215
  }
219
216
  catch (error) {
220
217
  return { error: `Failed to upload file: ${error instanceof Error ? error.message : String(error)}` };
221
218
  }
219
+ // Auto-create the widget in proto_config if it doesn't exist yet.
220
+ // The upload API stores the file under the given widget_name/tags, but the
221
+ // widget entry in proto_config is what makes it searchable from a workflow.
222
+ // Flow from HAR: POST /api/v2/upload/files → update_persona adds widget entry.
223
+ const existingWidgets = (protoConfig?.widgets ?? []);
224
+ const widgetAlreadyExists = existingWidgets.some(w => w.name === effectiveWidgetName);
225
+ let widgetCreated = false;
226
+ if (!widgetAlreadyExists) {
227
+ const newWidget = {
228
+ name: effectiveWidgetName,
229
+ type: 3,
230
+ title: effectiveWidgetName,
231
+ editable: true,
232
+ fileUpload: {
233
+ localFiles: [],
234
+ tags: [effectiveWidgetName],
235
+ useChunking: true,
236
+ mergeFiles: [],
237
+ transforms: [],
238
+ fileTagMappings: [],
239
+ acceptedMimeTypes: [],
240
+ },
241
+ subtitle: "",
242
+ required: false,
243
+ subProjectType: 0,
244
+ };
245
+ const updatedProtoConfig = {
246
+ ...(protoConfig ?? {}),
247
+ widgets: [...existingWidgets, newWidget],
248
+ };
249
+ try {
250
+ await client.updateAiEmployee({
251
+ persona_id: personaId,
252
+ proto_config: updatedProtoConfig,
253
+ workflow: persona?.workflow_def,
254
+ });
255
+ widgetCreated = true;
256
+ }
257
+ catch (widgetError) {
258
+ return {
259
+ method: "upload",
260
+ persona_id: personaId,
261
+ path: filePath,
262
+ uploaded: true,
263
+ widget_created: false,
264
+ ...result,
265
+ error: `File uploaded successfully but widget registration failed: ${widgetError instanceof Error ? widgetError.message : String(widgetError)}`,
266
+ _tip: "The file is stored. Retry the upload with the same path — it will re-attempt widget creation.",
267
+ };
268
+ }
269
+ }
270
+ return {
271
+ method: "upload",
272
+ persona_id: personaId,
273
+ path: filePath,
274
+ widget_name: effectiveWidgetName,
275
+ widget_created: widgetCreated,
276
+ ...result,
277
+ _warning: "IMPORTANT: Uploaded documents will NOT be used unless your workflow has a search node (search/v2).",
278
+ _next_step: `Verify workflow has search: workflow(mode='get', persona_id='${personaId}') → check for search node wired to widget '${effectiveWidgetName}'. If missing, add one.`,
279
+ _validation: "Deploy will BLOCK if you have documents but no search node in your workflow.",
280
+ };
222
281
  }
223
282
  else if (items && items.length > 0) {
224
283
  // Dashboard row upload (LLM-generated content or file attachments)
@@ -187,24 +187,23 @@ export async function handleReference(args, context) {
187
187
  // type="patterns" - Workflow patterns
188
188
  // ─────────────────────────────────────────────────────────────────────────
189
189
  if (type === "patterns") {
190
- // Get specific pattern
190
+ // Get specific pattern by name
191
191
  if (args.pattern) {
192
- const patternName = args.pattern;
193
- const pattern = WORKFLOW_PATTERNS[patternName];
192
+ const pattern = WORKFLOW_PATTERNS.find(p => p.name === args.pattern);
194
193
  if (!pattern) {
195
- return { error: `Pattern not found: ${args.pattern}` };
194
+ return { error: `Pattern not found: ${args.pattern}`, available: WORKFLOW_PATTERNS.map(p => p.name) };
196
195
  }
197
196
  return pattern;
198
197
  }
199
- // List patterns
200
- let patterns = Object.entries(WORKFLOW_PATTERNS);
198
+ // List patterns (optionally filtered by persona_type)
199
+ let patterns = [...WORKFLOW_PATTERNS];
201
200
  if (args.persona_type) {
202
- patterns = patterns.filter(([_, p]) => !p.personaType || p.personaType === args.persona_type);
201
+ patterns = patterns.filter(p => !p.personaType || p.personaType === args.persona_type);
203
202
  }
204
203
  return {
205
204
  count: patterns.length,
206
- patterns: patterns.map(([name, p]) => ({
207
- name,
205
+ patterns: patterns.map(p => ({
206
+ name: p.name,
208
207
  description: p.description,
209
208
  use_case: p.useCase,
210
209
  persona_type: p.personaType,
@@ -321,8 +320,16 @@ export async function handleReference(args, context) {
321
320
  tags: ["support", "voice", "chat"],
322
321
  description: "Tier 1 customer support automation",
323
322
  },
323
+ {
324
+ id: "finance-dunning",
325
+ name: "Finance - Automated Dunning",
326
+ domain: "finance",
327
+ personas: 2,
328
+ tags: ["finance", "ar", "dunning", "collections", "receivables"],
329
+ description: "AR dunning assistant: balance inquiries, payment links, disputes, payment plans, escalation.",
330
+ },
324
331
  ],
325
- count: 3,
332
+ count: 4,
326
333
  _tip: "Use reference(demo_kit=\"finance-ap\") to get full kit details",
327
334
  };
328
335
  }
@@ -370,7 +377,23 @@ export async function handleReference(args, context) {
370
377
  _tip: "Use persona(method=\"analyze\", id=\"...\") to analyze each persona's workflow",
371
378
  };
372
379
  }
373
- return { error: `Demo kit not found: ${kitId}`, available: ["finance-ap", "sales-sdr", "support-tier1"] };
380
+ if (kitId === "finance-dunning") {
381
+ return {
382
+ id: "finance-dunning",
383
+ name: "Finance - Automated Dunning",
384
+ version: "1.0.0",
385
+ domain: "finance",
386
+ tags: ["finance", "ar", "dunning", "collections", "receivables", "billing"],
387
+ description: "AR dunning assistant: balance inquiries, payment links, disputes, payment plans, escalation. Chat or voice.",
388
+ intents: ["balance_inquiry", "payment_made", "dispute", "payment_plan", "how_to_pay", "escalate", "Fallback"],
389
+ persona_types: ["chat", "voice"],
390
+ demo_script: "docs/demos/finance-dunning.md",
391
+ design_doc: ".context/core/designs/2026-02-23-finance-automated-dunning.md",
392
+ scenario_id: "finance-dunning",
393
+ _tip: "Use demo(mode=\"kit\", persona_id=\"<id>\", scenario=\"finance-dunning\") to generate KB docs and demo script for a dunning persona",
394
+ };
395
+ }
396
+ return { error: `Demo kit not found: ${kitId}`, available: ["finance-ap", "finance-dunning", "sales-sdr", "support-tier1"] };
374
397
  }
375
398
  // ─────────────────────────────────────────────────────────────────────────
376
399
  // tags=true - Get tagging taxonomy
@@ -10,6 +10,7 @@ import { fingerprintPersona } from "../../../sync.js";
10
10
  import { createVersionStorage } from "../../../sync/version-storage.js";
11
11
  import { createVersionPolicyEngine } from "../../../sync/version-policy.js";
12
12
  import { handleWorkflow } from "./index.js";
13
+ import { handleWorkflowOptimize } from "./optimize.js";
13
14
  export async function handleWorkflowAdapter(args, createClient, getDefaultEnvName) {
14
15
  const normalizedArgs = { ...(args ?? {}) };
15
16
  const personaId = normalizedArgs.persona_id ? String(normalizedArgs.persona_id) : undefined;
@@ -152,21 +153,23 @@ export async function handleWorkflowAdapter(args, createClient, getDefaultEnvNam
152
153
  }
153
154
  return deployResult;
154
155
  }
156
+ case "optimize": {
157
+ return handleWorkflowOptimize(normalizedArgs, client);
158
+ }
155
159
  case "analyze":
156
160
  case "compare":
157
161
  case "compile":
158
- case "optimize":
159
162
  case "generate": {
160
163
  return {
161
164
  error: `Mode "${mode}" removed - LLM does this thinking`,
162
165
  hint: "Use workflow(mode='get') to fetch data, then analyze/generate in your reasoning. Deploy with workflow(mode='deploy').",
163
- valid_modes: ["get", "validate", "deploy"],
166
+ valid_modes: ["get", "validate", "deploy", "optimize"],
164
167
  };
165
168
  }
166
169
  default: {
167
170
  return {
168
- error: `Mode required. Valid modes: get, validate, deploy`,
169
- hint: "workflow(mode='get') returns data for LLM. workflow(mode='validate') validates specs. workflow(mode='deploy') executes LLM's workflow_def.",
171
+ error: `Mode required. Valid modes: get, validate, deploy, optimize`,
172
+ hint: "workflow(mode='get') returns data for LLM. workflow(mode='validate') validates specs. workflow(mode='deploy') executes LLM's workflow_def. workflow(mode='optimize') runs structural graph optimization.",
170
173
  example: `workflow(mode="get", persona_id="...")`,
171
174
  };
172
175
  }
@@ -64,11 +64,11 @@ export async function handleWorkflowOptimize(args, client) {
64
64
  // Core result
65
65
  modified: result.modified,
66
66
  workflow_def: result.workflowDef,
67
- // What was done
67
+ // What was done (includes pass number and flow_context for each change)
68
68
  applied_transforms: result.appliedTransforms.length > 0
69
69
  ? result.appliedTransforms
70
70
  : undefined,
71
- // What the LLM should review
71
+ // What the LLM should review manually
72
72
  advisories: result.advisories.length > 0
73
73
  ? result.advisories
74
74
  : undefined,
@@ -76,15 +76,19 @@ export async function handleWorkflowOptimize(args, client) {
76
76
  metrics: result.metrics,
77
77
  // Post-optimization validation
78
78
  validation: result.validation,
79
+ // Compact edge-list of the final workflow (trigger → nodes → outputs)
80
+ flow_diagram: result.flow_diagram,
79
81
  // Guidance
80
82
  _tip: result.modified
81
83
  ? "Optimization applied transforms. Review the changes, then deploy with: workflow(mode='deploy', persona_id='...', base_fingerprint='<fingerprint>', workflow_def={...})"
82
84
  : result.advisories.length > 0
83
85
  ? "No auto-transforms applied, but advisories found. Review them and modify the workflow_def manually if needed."
84
86
  : "Workflow is already optimal. No changes needed.",
85
- _next_step: result.modified
86
- ? "workflow(mode='get', persona_id='...') to get fresh fingerprint, then workflow(mode='deploy', persona_id='...', base_fingerprint='<fingerprint>', workflow_def={optimized_workflow_def})"
87
- : undefined,
87
+ _next_step: result.modified && personaId
88
+ ? `workflow(mode='get', persona_id='${personaId}') to get fresh fingerprint, then workflow(mode='deploy', persona_id='${personaId}', base_fingerprint='<fingerprint>', workflow_def={optimized_workflow_def})`
89
+ : result.modified
90
+ ? "Deploy the optimized workflow_def with: workflow(mode='deploy', persona_id='<target>', base_fingerprint='<fingerprint>', workflow_def={optimized_workflow_def})"
91
+ : undefined,
88
92
  };
89
93
  return response;
90
94
  }
@@ -183,6 +183,11 @@ export const WORKFLOW_PATTERNS = [
183
183
  "respond_for_external_actions.response → WORKFLOW_OUTPUT",
184
184
  ],
185
185
  useCase: "FAQ bot, documentation assistant, policy lookup",
186
+ antiPatterns: [
187
+ "Using call_llm instead of respond_for_external_actions (loses citation and conversation awareness)",
188
+ "Connecting chat_conversation directly to search.query (type mismatch — use conversation_to_search_query)",
189
+ "Forgetting to upload data sources to the persona (search returns empty results)",
190
+ ],
186
191
  },
187
192
  {
188
193
  name: "intent-routing",
@@ -223,6 +228,11 @@ export const WORKFLOW_PATTERNS = [
223
228
  "respond_for_external_actions.response → WORKFLOW_OUTPUT",
224
229
  ],
225
230
  useCase: "Research assistant needing both internal docs and current web info",
231
+ antiPatterns: [
232
+ "Using web search as the primary/only data source (slower, less reliable, uncontrolled content)",
233
+ "Not wiring combine_search_results.combined_results to response node (combined results go unused)",
234
+ "Forgetting to upload internal KB documents (search returns empty, only web results used)",
235
+ ],
226
236
  },
227
237
  {
228
238
  name: "tool-calling",
@@ -241,17 +251,20 @@ export const WORKFLOW_PATTERNS = [
241
251
  antiPatterns: [
242
252
  "Creating duplicate records on follow-up questions",
243
253
  "Not checking conversation history before actions",
254
+ "Forgetting voice-specific widgets (conversationSettings, voiceSettings, callSettings, vadSettings)",
255
+ "external_action_caller does NOT support HITL — cannot gate tool calls with human approval",
244
256
  ],
245
257
  },
246
258
  {
247
259
  name: "hitl-approval",
248
260
  personaType: "chat",
249
261
  description: "Human-in-the-loop approval — enable HITL flag on send_email_agent or entity_extraction_with_documents (only nodes that support HITL)",
250
- nodes: ["chat_trigger", "send_email_agent (with HITL flag)", "respond_for_external_actions"],
262
+ nodes: ["chat_trigger", "entity_extraction_with_documents", "send_email_agent", "respond_for_external_actions"],
251
263
  connections: [
252
- "chat_trigger.user_query → external_action_caller.query",
253
- "chat_trigger.chat_conversation → external_action_caller.conversation",
254
- "external_action_caller.tool_execution_resultrespond_for_external_actions.external_action_result",
264
+ "chat_trigger.user_query → entity_extraction_with_documents.query",
265
+ "chat_trigger.chat_conversation → entity_extraction_with_documents.conversation",
266
+ "entity_extraction_with_documents.extraction_columnssend_email_agent (HITL flag enabled: disable_human_interaction: false)",
267
+ "send_email_agent.confirmation → respond_for_external_actions.external_action_result",
255
268
  "chat_trigger.user_query → respond_for_external_actions.query",
256
269
  "chat_trigger.chat_conversation → respond_for_external_actions.conversation",
257
270
  "respond_for_external_actions.response → WORKFLOW_OUTPUT",
@@ -260,6 +273,8 @@ export const WORKFLOW_PATTERNS = [
260
273
  antiPatterns: [
261
274
  "Adding general_hitl as a standalone node (it is NOT deployable)",
262
275
  "Not wiring conversation context to response node",
276
+ "Using external_action_caller for HITL — it does NOT support the HITL flag",
277
+ "Only send_email_agent and entity_extraction_with_documents support HITL (disable_human_interaction: false)",
263
278
  ],
264
279
  },
265
280
  {
@@ -272,31 +287,38 @@ export const WORKFLOW_PATTERNS = [
272
287
  "entity_extraction_with_documents.extraction_columns → rule_validation_with_documents.map_of_extracted_columns",
273
288
  "workflowInput.document-mmf2 → rule_validation_with_documents.primary_docs",
274
289
  "rule_validation_with_documents.ruleset_output → call_llm.named_inputs_Validation_Results",
275
- "entity_extraction_node.extraction_columns → results (dot-notation key)",
276
- "rule_validation_node.ruleset_output → results (dot-notation key)",
277
- "call_llm.llm_output → results (dot-notation key)",
290
+ "entity_extraction_with_documents.extraction_columns → results (dot-notation: '<nodeId>.extraction_columns')",
291
+ "rule_validation_with_documents.ruleset_output → results (dot-notation: '<nodeId>.ruleset_output')",
292
+ "call_llm.llm_output → results (dot-notation: '<nodeId>.llm_output')",
278
293
  ],
279
294
  useCase: "Invoice processing, contract analysis, compliance checking",
295
+ antiPatterns: [
296
+ "Not mapping extraction/validation outputs to results (dashboard columns won't appear)",
297
+ "Missing primary_docs on rule_validation_with_documents (validator needs original documents for context)",
298
+ "Using a single call_llm without passing validation results (analysis lacks validation context)",
299
+ ],
280
300
  },
281
301
  {
282
302
  name: "dashboard-email-notification",
283
303
  personaType: "dashboard",
284
- description: "Document upload → extraction → email notification (with intermediary for type conversion)",
285
- nodes: ["document_trigger", "entity_extraction_with_documents", "json_mapper", "fixed_response", "send_email_agent", "call_llm"],
304
+ description: "Document upload → extraction → email notification (with intermediary for type conversion). Production workflows often add a body generator (call_llm/custom_agent) and dual send paths (auto + HITL) with CC config.",
305
+ nodes: ["document_trigger", "entity_extraction_with_documents", "json_mapper", "fixed_response", "call_llm (body generator)", "send_email_agent"],
286
306
  connections: [
287
307
  "workflowInput.document-mmf2 → entity_extraction_with_documents.documents",
288
308
  "entity_extraction_with_documents.extraction_columns → json_mapper.input_json",
289
309
  "json_mapper.output_json → fixed_response.named_inputs_Extracted_Data (template: '{{to}}', '{{subject}}', etc.)",
290
310
  "fixed_response.response → send_email_agent.email_to (one fixed_response per email field)",
291
- "send_email_agent.confirmation → call_llm.named_inputs_Email_Result",
292
- "entity_extraction_node.extraction_columnsresults (dot-notation key)",
293
- "call_llm.llm_output → results (dot-notation key)",
311
+ "entity_extraction_with_documents.extraction_columns → call_llm.named_inputs_Extracted_Data (for body generation)",
312
+ "call_llm.llm_outputsend_email_agent.email_body (LLM-generated body)",
313
+ "send_email_agent.confirmation → results (dot-notation: '<nodeId>.confirmation')",
314
+ "entity_extraction_with_documents.extraction_columns → results (dot-notation: '<nodeId>.extraction_columns')",
294
315
  ],
295
- useCase: "Invoice receipt notification, contract alerts, document-triggered emails",
316
+ useCase: "Invoice receipt notification, contract alerts, document-triggered emails, payment confirmations",
296
317
  antiPatterns: [
297
318
  "DO NOT wire entity_extraction directly to send_email — type mismatch (ANY vs TEXT_WITH_SOURCES)",
298
319
  "Use json_mapper + fixed_response as intermediary for type conversion",
299
320
  "Enable HITL flag on send_email_agent (disable_human_interaction: false) if approval needed",
321
+ "For CC/BCC: extract additional recipients via entity_extraction columns, route through separate fixed_response nodes",
300
322
  ],
301
323
  },
302
324
  {
@@ -314,6 +336,11 @@ export const WORKFLOW_PATTERNS = [
314
336
  "response_validator.abstain_reason → [conditional: if invalid] → abstain_action → WORKFLOW_OUTPUT",
315
337
  ],
316
338
  useCase: "Regulated industries, compliance-sensitive responses",
339
+ antiPatterns: [
340
+ "Not connecting both response and abstain paths to WORKFLOW_OUTPUT",
341
+ "Using guardrails without search results (validator has nothing to check against)",
342
+ "Skipping the abstain_action fallback (invalid responses return nothing to user)",
343
+ ],
317
344
  },
318
345
  {
319
346
  name: "externalized-instructions",
@@ -357,6 +384,140 @@ export const WORKFLOW_PATTERNS = [
357
384
  "Not gating fallback separately (fixed_response should handle fallback, not the LLM)",
358
385
  ],
359
386
  },
387
+ // ─── Composite Dashboard Patterns (from FX2 production analysis) ──────────
388
+ {
389
+ name: "multi-phase-validation",
390
+ personaType: "dashboard",
391
+ description: "Document upload → extraction → N sequential rule_validation_with_documents phases. Each phase checks a different concern (format → compliance → cross-reference) and feeds its output to the next. All phase outputs mapped to dashboard columns for per-phase visibility.",
392
+ nodes: ["document_trigger", "entity_extraction_with_documents", "rule_validation_phase_1", "rule_validation_phase_2", "rule_validation_phase_3", "call_llm"],
393
+ connections: [
394
+ "workflowInput.document-mmf2 → entity_extraction_with_documents.documents",
395
+ "entity_extraction_with_documents.extraction_columns → rule_validation_phase_1.map_of_extracted_columns",
396
+ "workflowInput.document-mmf2 → rule_validation_phase_1.primary_docs",
397
+ "rule_validation_phase_1.ruleset_output → rule_validation_phase_2.map_of_extracted_columns",
398
+ "workflowInput.document-mmf2 → rule_validation_phase_2.primary_docs",
399
+ "rule_validation_phase_2.ruleset_output → rule_validation_phase_3.map_of_extracted_columns",
400
+ "workflowInput.document-mmf2 → rule_validation_phase_3.primary_docs",
401
+ "rule_validation_phase_3.ruleset_output → call_llm.named_inputs_Final_Validation",
402
+ "entity_extraction_with_documents.extraction_columns → results (dot-notation: '<nodeId>.extraction_columns')",
403
+ "rule_validation_phase_1.ruleset_output → results (dot-notation: '<nodeId>.ruleset_output')",
404
+ "rule_validation_phase_2.ruleset_output → results (dot-notation: '<nodeId>.ruleset_output')",
405
+ "rule_validation_phase_3.ruleset_output → results (dot-notation: '<nodeId>.ruleset_output')",
406
+ "call_llm.llm_output → results (dot-notation: '<nodeId>.llm_output')",
407
+ ],
408
+ useCase: "Invoice processing with multi-step validation (format → compliance → PO matching), regulatory document review, dunning letter compliance checks",
409
+ antiPatterns: [
410
+ "Running all validation rules in a single phase (loses granularity, hard to debug which phase failed)",
411
+ "Not passing primary_docs to each validation phase (validator needs original documents for context)",
412
+ "Forgetting to map intermediate phase outputs to results (dashboard won't show per-phase status)",
413
+ ],
414
+ },
415
+ {
416
+ name: "confidence-dual-path",
417
+ personaType: "dashboard",
418
+ description: "Dashboard: after extraction and validation, fork into AUTO path (send_email_agent without HITL) and ESCALATE path (send_email_agent with HITL enabled). A confidence/risk score from validation determines which path fires via runIf conditions.",
419
+ nodes: ["document_trigger", "entity_extraction_with_documents", "rule_validation_with_documents", "call_llm (confidence scorer)", "send_email_auto (no HITL)", "send_email_escalate (HITL enabled)"],
420
+ connections: [
421
+ "workflowInput.document-mmf2 → entity_extraction_with_documents.documents",
422
+ "entity_extraction_with_documents.extraction_columns → rule_validation_with_documents.map_of_extracted_columns",
423
+ "workflowInput.document-mmf2 → rule_validation_with_documents.primary_docs",
424
+ "rule_validation_with_documents.ruleset_output → call_llm.named_inputs_Validation_Results",
425
+ "call_llm.llm_output → send_email_auto (runIf: validation_status == PASS)",
426
+ "call_llm.llm_output → send_email_escalate (runIf: validation_status != PASS)",
427
+ "send_email_auto.confirmation → results (dot-notation: '<nodeId>.confirmation')",
428
+ "send_email_escalate.confirmation → results (dot-notation: '<nodeId>.confirmation')",
429
+ "entity_extraction_with_documents.extraction_columns → results (dot-notation: '<nodeId>.extraction_columns')",
430
+ "rule_validation_with_documents.ruleset_output → results (dot-notation: '<nodeId>.ruleset_output')",
431
+ ],
432
+ useCase: "AP invoice processing (clean invoices auto-process, exceptions need human review), dunning workflows (high-confidence auto-send, ambiguous escalate), contract approvals",
433
+ antiPatterns: [
434
+ "Using a single send_email_agent for both paths (loses ability to gate high-risk sends separately)",
435
+ "Not having the ESCALATE path (all documents auto-process with no human oversight for edge cases)",
436
+ "Hardcoding threshold in node config — use validation rules output to drive the routing decision",
437
+ "Forgetting to use intermediary (json_mapper + fixed_response) between extraction and send_email inputs",
438
+ ],
439
+ },
440
+ {
441
+ name: "document-intake-resolution",
442
+ personaType: "dashboard",
443
+ description: "Dashboard: full document intake pipeline — extract entities → convert/normalize → search knowledge base for matching records → LLM resolves/reconciles against master data. The resolution chain ensures extracted entities are validated against existing records before downstream processing.",
444
+ nodes: ["document_trigger", "entity_extraction_with_documents", "json_mapper", "search", "call_llm (resolver)"],
445
+ connections: [
446
+ "workflowInput.document-mmf2 → entity_extraction_with_documents.documents",
447
+ "entity_extraction_with_documents.extraction_columns → json_mapper.input_json",
448
+ "json_mapper.output_json → search.query (lookup extracted entity in KB)",
449
+ "search.search_results → call_llm.named_inputs_Matching_Records",
450
+ "entity_extraction_with_documents.extraction_columns → call_llm.named_inputs_Extracted_Data",
451
+ "call_llm.llm_output → results (dot-notation: '<nodeId>.llm_output' — resolution status + matched record)",
452
+ "entity_extraction_with_documents.extraction_columns → results (dot-notation: '<nodeId>.extraction_columns')",
453
+ ],
454
+ useCase: "Invoice vendor matching against vendor master, contract party resolution, employee onboarding verification against HR records, PO line-item matching",
455
+ antiPatterns: [
456
+ "Skipping the search/resolution step (extracted data goes unvalidated against master data)",
457
+ "Using entity_extraction output directly as the resolved entity (extraction ≠ resolution)",
458
+ "Not handling 'no match found' case in the resolver LLM (must surface unresolved items)",
459
+ ],
460
+ },
461
+ {
462
+ name: "hitl-decision-form",
463
+ personaType: "dashboard",
464
+ description: "Dashboard: use entity_extraction_with_documents with HITL flag as a human review/decision form — presenting processed data for human verification or correction before downstream actions. The extraction columns define the form fields the reviewer sees and can modify.",
465
+ nodes: ["document_trigger", "entity_extraction_with_documents (processing)", "call_llm (prepare review)", "entity_extraction_with_documents (HITL review form)", "send_email_agent"],
466
+ connections: [
467
+ "workflowInput.document-mmf2 → entity_extraction_processing.documents",
468
+ "entity_extraction_processing.extraction_columns → call_llm.named_inputs_Extracted_Data",
469
+ "call_llm.llm_output → entity_extraction_review.named_inputs_Summary (HITL enabled)",
470
+ "entity_extraction_review.extraction_columns → send_email_agent (human-verified data)",
471
+ "entity_extraction_review.extraction_columns → results (dot-notation: '<nodeId>.extraction_columns')",
472
+ ],
473
+ useCase: "Invoice approval where reviewer corrects extracted amounts before payment, contract review where legal team verifies extracted terms, compliance review with sign-off",
474
+ antiPatterns: [
475
+ "Using general_hitl (NOT deployable) — use HITL flag on entity_extraction_with_documents",
476
+ "Confusing extraction-for-processing with extraction-as-review-form (different roles, different column configs)",
477
+ "Not passing processed data to the review form's named_inputs (reviewer sees empty form)",
478
+ ],
479
+ },
480
+ {
481
+ name: "document-generation-pipeline",
482
+ personaType: "dashboard",
483
+ description: "Dashboard: generate formatted documents from processed data — extraction → LLM drafts content → generate_document creates formatted output (PDF) → send_email_agent delivers as attachment.",
484
+ nodes: ["document_trigger", "entity_extraction_with_documents", "call_llm (content drafter)", "generate_document", "send_email_agent"],
485
+ connections: [
486
+ "workflowInput.document-mmf2 → entity_extraction_with_documents.documents",
487
+ "entity_extraction_with_documents.extraction_columns → call_llm.named_inputs_Extracted_Data",
488
+ "call_llm.llm_output → generate_document.markdown_file_contents",
489
+ "generate_document.document_link → send_email_agent.named_inputs_Attachment",
490
+ "send_email_agent.confirmation → results (dot-notation: '<nodeId>.confirmation')",
491
+ "entity_extraction_with_documents.extraction_columns → results (dot-notation: '<nodeId>.extraction_columns')",
492
+ ],
493
+ useCase: "Dunning letter generation, invoice creation from PO data, compliance report generation, customer correspondence, welcome packets",
494
+ antiPatterns: [
495
+ "Putting the full template in call_llm instructions (use data source templates for strict regulatory formats)",
496
+ "Skipping generate_document and sending raw LLM text as attachment (no formatting, no PDF)",
497
+ "Not including extracted entity data in the LLM's named_inputs (generated document lacks specifics)",
498
+ ],
499
+ },
500
+ // ─── Voice Patterns ──────────────────────────────────────────────────────
501
+ {
502
+ name: "voice-kb-search",
503
+ personaType: "voice",
504
+ description: "Voice AI with knowledge base search only — no external actions or side effects. Clean 4-node pattern for informational help desks. Requires voice-specific widgets.",
505
+ nodes: ["chat_trigger", "conversation_to_search_query", "search", "respond_for_external_actions"],
506
+ connections: [
507
+ "chat_trigger.chat_conversation → conversation_to_search_query.conversation",
508
+ "conversation_to_search_query.summarized_conversation → search.query",
509
+ "search.search_results → respond_for_external_actions.external_action_result",
510
+ "chat_trigger.user_query → respond_for_external_actions.query",
511
+ "chat_trigger.chat_conversation → respond_for_external_actions.conversation",
512
+ "respond_for_external_actions.response → WORKFLOW_OUTPUT",
513
+ ],
514
+ useCase: "FX rate inquiries, policy Q&A hotline, product information line, internal help desk for common questions",
515
+ antiPatterns: [
516
+ "Adding external_action_caller when no side effects are needed (over-engineering)",
517
+ "Using call_llm instead of respond_for_external_actions (loses citation and conversation awareness)",
518
+ "Forgetting voice-specific widgets (conversationSettings, voiceSettings, callSettings, vadSettings)",
519
+ ],
520
+ },
360
521
  ];
361
522
  // ─────────────────────────────────────────────────────────────────────────────
362
523
  // Qualifying Questions
@@ -1523,8 +1523,13 @@ persona(method="update", id="<ID>", config={widgets: [...]})
1523
1523
 
1524
1524
  ### 6. Upload Knowledge
1525
1525
  \`\`\`
1526
+ # Default widget ('fileUpload') — exists on every chat/voice persona
1526
1527
  persona(id="<ID>", data={method:"upload", path:"your-data.txt"})
1528
+
1529
+ # Custom widget — auto-created in proto_config on first upload (widget_created: true in response)
1530
+ persona(id="<ID>", data={method:"upload", path:"policies.pdf", widget_name:"policies"})
1527
1531
  \`\`\`
1532
+ Wire any custom widget to a search node: \`search/v2\` → \`datastore_configs\` → \`widgetConfig: { widgetName: "<widget_name>" }\`.
1528
1533
 
1529
1534
  ## Hard Requirements
1530
1535
 
package/dist/mcp/tools.js CHANGED
@@ -278,7 +278,7 @@ persona(
278
278
  // Especially important for Document Generation personas with multiple upload widgets
279
279
  widget_name: {
280
280
  type: "string",
281
- description: "Target widget for upload OR filter for stats. For Document Proposal Manager: 'upload' (Content Repository), 'upload1' (Service Line Docs), 'upload2' (Style Guide). Default: 'fileUpload'. See catalog(type='widgets') for reference."
281
+ description: "Target widget for upload OR filter for stats. Default: 'fileUpload'. If the named widget doesn't exist in the persona's proto_config, it is auto-created (type 3, fileUpload). Use a custom name to create a second knowledge base alongside the default one. For Document Proposal Manager: 'upload' (Content Repository), 'upload1' (Service Line Docs), 'upload2' (Style Guide). See catalog(type='widgets') for reference."
282
282
  },
283
283
  // delete params
284
284
  file_id: { type: "string", description: "File/item ID to delete (for method=delete)" },
@@ -704,9 +704,9 @@ persona(
704
704
  type: "number",
705
705
  description: "Max results to return (for method=conversations)",
706
706
  },
707
- offset: {
708
- type: "number",
709
- description: "Pagination offset (for method=conversations)",
707
+ pagination_token: {
708
+ type: "string",
709
+ description: "Pagination token from previous response (for method=conversations)",
710
710
  },
711
711
  channel: {
712
712
  type: "string",
@@ -1,7 +1,7 @@
1
1
  // This file is auto-generated by @hey-api/openapi-ts
2
- import { createSseClient } from '../core/serverSentEvents.gen';
3
- import { getValidRequestBody } from '../core/utils.gen';
4
- import { buildUrl, createConfig, createInterceptors, getParseAs, mergeConfigs, mergeHeaders, setAuthParams, } from './utils.gen';
2
+ import { createSseClient } from '../core/serverSentEvents.gen.js';
3
+ import { getValidRequestBody } from '../core/utils.gen.js';
4
+ import { buildUrl, createConfig, createInterceptors, getParseAs, mergeConfigs, mergeHeaders, setAuthParams, } from './utils.gen.js';
5
5
  export const createClient = (config = {}) => {
6
6
  let _config = mergeConfigs(createConfig(), config);
7
7
  const getConfig = () => ({ ..._config });
@@ -1,6 +1,6 @@
1
1
  // This file is auto-generated by @hey-api/openapi-ts
2
- export { formDataBodySerializer, jsonBodySerializer, urlSearchParamsBodySerializer, } from '../core/bodySerializer.gen';
3
- export { buildClientParams } from '../core/params.gen';
4
- export { serializeQueryKeyValue } from '../core/queryKeySerializer.gen';
5
- export { createClient } from './client.gen';
6
- export { createConfig, mergeHeaders } from './utils.gen';
2
+ export { formDataBodySerializer, jsonBodySerializer, urlSearchParamsBodySerializer, } from '../core/bodySerializer.gen.js';
3
+ export { buildClientParams } from '../core/params.gen.js';
4
+ export { serializeQueryKeyValue } from '../core/queryKeySerializer.gen.js';
5
+ export { createClient } from './client.gen.js';
6
+ export { createConfig, mergeHeaders } from './utils.gen.js';
@@ -1,8 +1,8 @@
1
1
  // This file is auto-generated by @hey-api/openapi-ts
2
- import { getAuthToken } from '../core/auth.gen';
3
- import { jsonBodySerializer } from '../core/bodySerializer.gen';
4
- import { serializeArrayParam, serializeObjectParam, serializePrimitiveParam, } from '../core/pathSerializer.gen';
5
- import { getUrl } from '../core/utils.gen';
2
+ import { getAuthToken } from '../core/auth.gen.js';
3
+ import { jsonBodySerializer } from '../core/bodySerializer.gen.js';
4
+ import { serializeArrayParam, serializeObjectParam, serializePrimitiveParam, } from '../core/pathSerializer.gen.js';
5
+ import { getUrl } from '../core/utils.gen.js';
6
6
  export const createQuerySerializer = ({ parameters = {}, ...args } = {}) => {
7
7
  const querySerializer = (queryParams) => {
8
8
  const search = [];
@@ -1,3 +1,3 @@
1
1
  // This file is auto-generated by @hey-api/openapi-ts
2
- import { createClient, createConfig } from './client';
2
+ import { createClient, createConfig } from './client/index.js';
3
3
  export const client = createClient(createConfig({ baseUrl: 'https://api.ema.co' }));
@@ -1,5 +1,5 @@
1
1
  // This file is auto-generated by @hey-api/openapi-ts
2
- import { serializeArrayParam, serializeObjectParam, serializePrimitiveParam, } from './pathSerializer.gen';
2
+ import { serializeArrayParam, serializeObjectParam, serializePrimitiveParam, } from './pathSerializer.gen.js';
3
3
  export const PATH_PARAM_RE = /\{[^{}]+\}/g;
4
4
  export const defaultPathSerializer = ({ path, url: _url }) => {
5
5
  let url = _url;
@@ -1,2 +1,2 @@
1
1
  // This file is auto-generated by @hey-api/openapi-ts
2
- export { archiveProjectById, createChatbotConversation, createConversation, createDocument, createPersona, createPersonaTemplate, createProject, createProjectTemplate, deleteConversation, deletePersona, deletePersonaTemplate, deleteProjectTemplate, editChartSnippet, emailActionImage, emailActionLink, emailActionUnsubscribe, fetchTicketUpdates, getChatbotConfig, getChatHistory, getConversationMessages, getConversationMessagesPaginated, getDefaultPersonaTemplateAccessSettingsForTenant, getLinkedInAuthUrl, getLogoUploadUrl, getMailLoginUrlForPersona, getPersonaAccessLevelById, getPersonaById, getPersonaTemplateById, getProjectById, getProjectsForPersona, getProjectTemplateByPersonaId, getWelcomeMessage, grantPersonaTemplateAccess, healthBasicCheck, healthCheck, importPersona, linkedinAccountNotification, linkedinActionLink, listAllPersonaTemplates, listAvailablePhoneNumbers, listConversations, listConversationsForPersona, listConversationsPaginated, listPersonas, listPersonaTemplates, listPersonaTemplatesPost, listSavedSnippets, metrics, processWebPrompt, provisionPhoneNumber, regenerateDocument, retrieveDocument, revokePersonaTemplateAccess, saveSnippet, sendChatMessage, sendChatMessageAsync, setDefaultPersonaTemplateAccessSetting, setPersonaTemplateAccessSettingsForNewSubTenant, streamChatbotConversationsCsv, triggerProjectWorkflow, unsaveSnippet, updateConversationDisplayName, updateDataUploadStatus, updateDocument, updateMessageFeedback, updatePersona, updatePersonaTemplate, updateProject, updateProjectTemplate, uploadLogo, verifyLogoUpload } from './sdk.gen';
2
+ export { archiveProjectById, createChatbotConversation, createConversation, createDocument, createPersona, createPersonaTemplate, createProject, createProjectTemplate, deleteConversation, deletePersona, deletePersonaTemplate, deleteProjectTemplate, editChartSnippet, emailActionImage, emailActionLink, emailActionUnsubscribe, fetchTicketUpdates, getChatbotConfig, getChatHistory, getConversationMessages, getConversationMessagesPaginated, getDefaultPersonaTemplateAccessSettingsForTenant, getLinkedInAuthUrl, getLogoUploadUrl, getMailLoginUrlForPersona, getPersonaAccessLevelById, getPersonaById, getPersonaTemplateById, getProjectById, getProjectsForPersona, getProjectTemplateByPersonaId, getWelcomeMessage, grantPersonaTemplateAccess, healthBasicCheck, healthCheck, importPersona, linkedinAccountNotification, linkedinActionLink, listAllPersonaTemplates, listAvailablePhoneNumbers, listConversations, listConversationsForPersona, listConversationsPaginated, listPersonas, listPersonaTemplates, listPersonaTemplatesPost, listSavedSnippets, metrics, processWebPrompt, provisionPhoneNumber, regenerateDocument, retrieveDocument, revokePersonaTemplateAccess, saveSnippet, sendChatMessage, sendChatMessageAsync, setDefaultPersonaTemplateAccessSetting, setPersonaTemplateAccessSettingsForNewSubTenant, streamChatbotConversationsCsv, triggerProjectWorkflow, unsaveSnippet, updateConversationDisplayName, updateDataUploadStatus, updateDocument, updateMessageFeedback, updatePersona, updatePersonaTemplate, updateProject, updateProjectTemplate, uploadLogo, verifyLogoUpload } from './sdk.gen.js';