@hotmeshio/long-tail 0.1.5 → 0.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +78 -8
- package/build/api/auth.d.ts +5 -0
- package/build/api/auth.js +42 -0
- package/build/api/bot-accounts.d.ts +50 -0
- package/build/api/bot-accounts.js +215 -0
- package/build/api/controlplane.d.ts +20 -0
- package/build/api/controlplane.js +110 -0
- package/build/api/dba.d.ts +15 -0
- package/build/api/dba.js +68 -0
- package/build/api/escalations.d.ts +70 -0
- package/build/api/escalations.js +656 -0
- package/build/api/exports.d.ts +32 -0
- package/build/api/exports.js +146 -0
- package/build/api/index.d.ts +18 -0
- package/build/api/index.js +54 -0
- package/build/api/insight.d.ts +29 -0
- package/build/api/insight.js +90 -0
- package/build/api/maintenance.d.ts +7 -0
- package/build/api/maintenance.js +28 -0
- package/build/api/mcp-runs.d.ts +16 -0
- package/build/api/mcp-runs.js +62 -0
- package/build/api/mcp.d.ts +52 -0
- package/build/api/mcp.js +212 -0
- package/build/api/namespaces.d.ts +7 -0
- package/build/{routes/escalations/helpers.js → api/namespaces.js} +24 -12
- package/build/api/roles.d.ts +25 -0
- package/build/api/roles.js +159 -0
- package/build/api/settings.d.ts +2 -0
- package/build/api/settings.js +35 -0
- package/build/api/tasks.d.ts +27 -0
- package/build/api/tasks.js +96 -0
- package/build/api/users.d.ts +44 -0
- package/build/api/users.js +162 -0
- package/build/api/workflow-sets.d.ts +26 -0
- package/build/api/workflow-sets.js +119 -0
- package/build/api/workflows.d.ts +48 -0
- package/build/api/workflows.js +298 -0
- package/build/api/yaml-workflows.d.ts +87 -0
- package/build/api/yaml-workflows.js +556 -0
- package/build/index.d.ts +4 -0
- package/build/index.js +6 -1
- package/build/lib/db/migrate.js +3 -6
- package/{lib → build/lib}/db/schemas/001_schema.sql +3 -0
- package/build/lib/db/schemas/004_workflow_sets.sql +29 -0
- package/build/lib/db/schemas/005_unique_graph_topic.sql +7 -0
- package/{lib → build/lib}/db/schemas/011_system_workflow_configs.sql +14 -0
- package/build/lib/db/schemas/016_streamable_http.sql +7 -0
- package/build/lib/events/callback.d.ts +41 -0
- package/build/lib/events/callback.js +98 -0
- package/build/modules/config.js +1 -1
- package/build/routes/auth.js +37 -36
- package/build/routes/bot-accounts.js +34 -164
- package/build/routes/controlplane.js +20 -60
- package/build/routes/dba.js +18 -28
- package/build/routes/docs.js +25 -7
- package/build/routes/escalations/bulk.js +17 -192
- package/build/routes/escalations/list.js +29 -75
- package/build/routes/escalations/resolve.js +3 -193
- package/build/routes/escalations/single.js +13 -122
- package/build/routes/exports.js +44 -95
- package/build/routes/index.js +2 -0
- package/build/routes/insight.js +61 -40
- package/build/routes/maintenance.js +41 -17
- package/build/routes/mcp-runs.js +52 -60
- package/build/routes/mcp.js +55 -161
- package/build/routes/namespaces.js +9 -20
- package/build/routes/roles.js +23 -97
- package/build/routes/settings.js +37 -25
- package/build/routes/tasks.js +28 -64
- package/build/routes/users.js +24 -113
- package/build/routes/workflow-sets.d.ts +2 -0
- package/build/routes/workflow-sets.js +98 -0
- package/build/routes/workflows/config.js +23 -57
- package/build/routes/workflows/discovery.js +11 -85
- package/build/routes/workflows/invocation.js +16 -84
- package/build/routes/yaml-workflows/cron.d.ts +2 -0
- package/build/routes/yaml-workflows/cron.js +68 -0
- package/build/routes/yaml-workflows/crud.js +38 -193
- package/build/routes/yaml-workflows/deployment.js +15 -140
- package/build/routes/yaml-workflows/index.js +4 -1
- package/build/routes/yaml-workflows/versions.js +20 -58
- package/build/sdk/index.d.ts +327 -0
- package/build/sdk/index.js +298 -0
- package/build/services/controlplane/index.d.ts +1 -2
- package/build/services/controlplane/index.js +3 -3
- package/build/services/controlplane/sql.d.ts +2 -2
- package/build/services/controlplane/sql.js +4 -5
- package/build/services/controlplane/types.d.ts +1 -0
- package/build/services/cron/index.d.ts +17 -0
- package/build/services/cron/index.js +94 -1
- package/build/services/export/index.js +6 -1
- package/build/services/hotmesh-utils.js +2 -4
- package/build/services/insight/index.d.ts +18 -0
- package/build/services/insight/index.js +60 -0
- package/build/services/mcp/client/connection.d.ts +9 -0
- package/build/services/mcp/client/connection.js +49 -1
- package/build/services/mcp/client/tools.js +13 -1
- package/build/services/mcp/db-server/schemas.d.ts +2 -2
- package/build/services/mcp/db.d.ts +1 -1
- package/build/services/mcp/db.js +10 -0
- package/build/services/mcp/playwright-server/schemas.d.ts +4 -4
- package/build/services/mcp/sql.d.ts +1 -1
- package/build/services/mcp/sql.js +2 -2
- package/build/services/mcp/types.d.ts +3 -1
- package/build/services/mcp-runs/sql.js +1 -1
- package/build/services/workflow-sets/db.d.ts +16 -0
- package/build/services/workflow-sets/db.js +78 -0
- package/build/services/workflow-sets/index.d.ts +1 -0
- package/build/services/workflow-sets/index.js +11 -0
- package/build/services/workflow-sets/sql.d.ts +6 -0
- package/build/services/workflow-sets/sql.js +24 -0
- package/build/services/yaml-workflow/db-utils.d.ts +1 -0
- package/build/services/yaml-workflow/db-utils.js +4 -0
- package/build/services/yaml-workflow/db.d.ts +8 -0
- package/build/services/yaml-workflow/db.js +41 -0
- package/build/services/yaml-workflow/invoke.d.ts +19 -0
- package/build/services/yaml-workflow/invoke.js +80 -0
- package/build/services/yaml-workflow/pipeline/build/dag.js +1 -1
- package/build/services/yaml-workflow/pipeline/build/wiring.d.ts +1 -1
- package/build/services/yaml-workflow/pipeline/build/wiring.js +102 -2
- package/build/services/yaml-workflow/pipeline/prompts.d.ts +1 -1
- package/build/services/yaml-workflow/pipeline/prompts.js +44 -1
- package/build/services/yaml-workflow/sql.d.ts +5 -1
- package/build/services/yaml-workflow/sql.js +23 -3
- package/build/services/yaml-workflow/types.d.ts +16 -1
- package/build/services/yaml-workflow/workers/callbacks.js +16 -2
- package/build/services/yaml-workflow/workers/register.js +36 -1
- package/build/start/adapters.js +4 -0
- package/build/system/index.js +12 -0
- package/build/system/mcp-servers/admin/schemas.d.ts +12 -12
- package/build/system/mcp-servers/db-query/schemas.d.ts +2 -2
- package/build/system/mcp-servers/knowledge.js +1 -1
- package/build/system/mcp-servers/playwright/schemas.d.ts +18 -18
- package/build/system/mcp-servers/playwright-cli/schemas.d.ts +34 -34
- package/build/system/mcp-servers/playwright-cli/tools-capture.js +5 -1
- package/build/system/mcp-servers/vision.js +54 -17
- package/build/system/seed/server-definitions.d.ts +7 -0
- package/build/system/seed/server-definitions.js +6 -2
- package/build/system/seed/tool-manifests-workflows.d.ts +7 -0
- package/build/system/seed/tool-manifests-workflows.js +10 -3
- package/build/system/workflows/mcp-workflow-builder/activities/caches.d.ts +5 -0
- package/build/system/workflows/mcp-workflow-builder/activities/caches.js +8 -0
- package/build/system/workflows/mcp-workflow-builder/activities/index.d.ts +2 -0
- package/build/system/workflows/mcp-workflow-builder/activities/index.js +8 -0
- package/build/system/workflows/mcp-workflow-builder/activities/llm.d.ts +2 -0
- package/build/system/workflows/mcp-workflow-builder/activities/llm.js +25 -0
- package/build/system/workflows/mcp-workflow-builder/activities/tool-loader.d.ts +11 -0
- package/build/system/workflows/mcp-workflow-builder/activities/tool-loader.js +34 -0
- package/build/system/workflows/mcp-workflow-builder/index.d.ts +16 -0
- package/build/system/workflows/mcp-workflow-builder/index.js +253 -0
- package/build/system/workflows/mcp-workflow-builder/prompts.d.ts +8 -0
- package/build/system/workflows/mcp-workflow-builder/prompts.js +316 -0
- package/build/system/workflows/mcp-workflow-planner/activities/analyze.d.ts +11 -0
- package/build/system/workflows/mcp-workflow-planner/activities/analyze.js +36 -0
- package/build/system/workflows/mcp-workflow-planner/activities/index.d.ts +3 -0
- package/build/system/workflows/mcp-workflow-planner/activities/index.js +12 -0
- package/build/system/workflows/mcp-workflow-planner/activities/persist.d.ts +19 -0
- package/build/system/workflows/mcp-workflow-planner/activities/persist.js +55 -0
- package/build/system/workflows/mcp-workflow-planner/activities/plan.d.ts +10 -0
- package/build/system/workflows/mcp-workflow-planner/activities/plan.js +43 -0
- package/build/system/workflows/mcp-workflow-planner/index.d.ts +7 -0
- package/build/system/workflows/mcp-workflow-planner/index.js +152 -0
- package/build/system/workflows/mcp-workflow-planner/prompts.d.ts +7 -0
- package/build/system/workflows/mcp-workflow-planner/prompts.js +77 -0
- package/build/system/workflows/shared/tool-loader.js +3 -0
- package/build/tsconfig.tsbuildinfo +1 -1
- package/build/types/index.d.ts +1 -0
- package/build/types/mcp.d.ts +4 -3
- package/build/types/sdk.d.ts +27 -0
- package/build/types/sdk.js +2 -0
- package/build/types/workflow-set.d.ts +44 -0
- package/build/types/workflow-set.js +5 -0
- package/build/types/yaml-workflow.d.ts +6 -2
- package/dashboard/dist/assets/AdminDashboard-DRjkRSjJ.js +2 -0
- package/dashboard/dist/assets/{AdminDashboard-CTyAMUJR.js.map → AdminDashboard-DRjkRSjJ.js.map} +1 -1
- package/dashboard/dist/assets/AvailableEscalationsPage-CnivX4Tz.js +2 -0
- package/dashboard/dist/assets/AvailableEscalationsPage-CnivX4Tz.js.map +1 -0
- package/dashboard/dist/assets/BotPicker-DwwaBhTH.js +2 -0
- package/dashboard/dist/assets/{BotPicker-C51nKFEu.js.map → BotPicker-DwwaBhTH.js.map} +1 -1
- package/dashboard/dist/assets/{CollapsibleSection-BSyfd8uL.js → CollapsibleSection-DQpaVA0M.js} +2 -2
- package/dashboard/dist/assets/{CollapsibleSection-BSyfd8uL.js.map → CollapsibleSection-DQpaVA0M.js.map} +1 -1
- package/dashboard/dist/assets/{ConfirmDeleteModal-CBdhia5T.js → ConfirmDeleteModal-B7JoDNvt.js} +2 -2
- package/dashboard/dist/assets/{ConfirmDeleteModal-CBdhia5T.js.map → ConfirmDeleteModal-B7JoDNvt.js.map} +1 -1
- package/dashboard/dist/assets/{CopyableId-dGlewBCS.js → CopyableId-AqoZayBG.js} +2 -2
- package/dashboard/dist/assets/{CopyableId-dGlewBCS.js.map → CopyableId-AqoZayBG.js.map} +1 -1
- package/dashboard/dist/assets/CredentialsPage-qGw1kQzi.js +2 -0
- package/dashboard/dist/assets/CredentialsPage-qGw1kQzi.js.map +1 -0
- package/dashboard/dist/assets/{CustomDurationPicker-BataWFj8.js → CustomDurationPicker-D1HUQcd0.js} +2 -2
- package/dashboard/dist/assets/{CustomDurationPicker-BataWFj8.js.map → CustomDurationPicker-D1HUQcd0.js.map} +1 -1
- package/dashboard/dist/assets/{DataTable-B3uf5CCo.js → DataTable-DKvSKoVG.js} +2 -2
- package/dashboard/dist/assets/{DataTable-B3uf5CCo.js.map → DataTable-DKvSKoVG.js.map} +1 -1
- package/dashboard/dist/assets/{ElapsedCell-G5oSwTpT.js → ElapsedCell-B0yrReGQ.js} +2 -2
- package/dashboard/dist/assets/{ElapsedCell-G5oSwTpT.js.map → ElapsedCell-B0yrReGQ.js.map} +1 -1
- package/dashboard/dist/assets/{EmptyState-BChBJNGS.js → EmptyState-X0fIzYID.js} +2 -2
- package/dashboard/dist/assets/{EmptyState-BChBJNGS.js.map → EmptyState-X0fIzYID.js.map} +1 -1
- package/dashboard/dist/assets/{EscalationsOverview-CxUv8xjG.js → EscalationsOverview-BQAT9W7r.js} +2 -2
- package/dashboard/dist/assets/{EscalationsOverview-CxUv8xjG.js.map → EscalationsOverview-BQAT9W7r.js.map} +1 -1
- package/dashboard/dist/assets/{EventTable-CVt8B0BZ.js → EventTable-CX1KNLhZ.js} +2 -2
- package/dashboard/dist/assets/{EventTable-CVt8B0BZ.js.map → EventTable-CX1KNLhZ.js.map} +1 -1
- package/dashboard/dist/assets/{FilterBar-CShf0oe7.js → FilterBar-DMTvuQy-.js} +2 -2
- package/dashboard/dist/assets/{FilterBar-CShf0oe7.js.map → FilterBar-DMTvuQy-.js.map} +1 -1
- package/dashboard/dist/assets/ListToolbar-DTOSxoEy.js +2 -0
- package/dashboard/dist/assets/ListToolbar-DTOSxoEy.js.map +1 -0
- package/dashboard/dist/assets/{McpOverview-CbaZRnJl.js → McpOverview-BaKTIWrG.js} +2 -2
- package/dashboard/dist/assets/{McpOverview-CbaZRnJl.js.map → McpOverview-BaKTIWrG.js.map} +1 -1
- package/dashboard/dist/assets/McpQueryDetailPage-CC08T5k8.js +5 -0
- package/dashboard/dist/assets/McpQueryDetailPage-CC08T5k8.js.map +1 -0
- package/dashboard/dist/assets/McpQueryPage-CVfF9dYg.js +2 -0
- package/dashboard/dist/assets/McpQueryPage-CVfF9dYg.js.map +1 -0
- package/dashboard/dist/assets/McpRunDetailPage-CKs1RWeV.js +2 -0
- package/dashboard/dist/assets/McpRunDetailPage-CKs1RWeV.js.map +1 -0
- package/dashboard/dist/assets/McpRunsPage-CcPD_tY1.js +2 -0
- package/dashboard/dist/assets/McpRunsPage-CcPD_tY1.js.map +1 -0
- package/dashboard/dist/assets/{Modal-CI5RBPOQ.js → Modal-_2AbWxJT.js} +2 -2
- package/dashboard/dist/assets/{Modal-CI5RBPOQ.js.map → Modal-_2AbWxJT.js.map} +1 -1
- package/dashboard/dist/assets/OperatorDashboard-BGiRaRDr.js +2 -0
- package/dashboard/dist/assets/OperatorDashboard-BGiRaRDr.js.map +1 -0
- package/dashboard/dist/assets/{PageHeader-SMD9qtOO.js → PageHeader-DVr5Qyzm.js} +2 -2
- package/dashboard/dist/assets/{PageHeader-SMD9qtOO.js.map → PageHeader-DVr5Qyzm.js.map} +1 -1
- package/dashboard/dist/assets/{PageHeaderWithStats-TikLQsTp.js → PageHeaderWithStats-D0KRASML.js} +2 -2
- package/dashboard/dist/assets/{PageHeaderWithStats-TikLQsTp.js.map → PageHeaderWithStats-D0KRASML.js.map} +1 -1
- package/dashboard/dist/assets/{PriorityBadge-CQ0EsLTA.js → PriorityBadge-Bx2559OU.js} +2 -2
- package/dashboard/dist/assets/{PriorityBadge-CQ0EsLTA.js.map → PriorityBadge-Bx2559OU.js.map} +1 -1
- package/dashboard/dist/assets/ProcessDetailPage-69I--sry.js +2 -0
- package/dashboard/dist/assets/ProcessDetailPage-69I--sry.js.map +1 -0
- package/dashboard/dist/assets/ProcessesListPage-BDpUbua2.js +2 -0
- package/dashboard/dist/assets/ProcessesListPage-BDpUbua2.js.map +1 -0
- package/dashboard/dist/assets/{RolePill-Crj4TH5p.js → RolePill-CcAqEaSt.js} +2 -2
- package/dashboard/dist/assets/{RolePill-Crj4TH5p.js.map → RolePill-CcAqEaSt.js.map} +1 -1
- package/dashboard/dist/assets/{RolesPage-C_RInUwS.js → RolesPage-Cl23Hjet.js} +2 -2
- package/dashboard/dist/assets/{RolesPage-C_RInUwS.js.map → RolesPage-Cl23Hjet.js.map} +1 -1
- package/dashboard/dist/assets/{RowActions-Cp5HyK_w.js → RowActions-B4mqIt3Z.js} +2 -2
- package/dashboard/dist/assets/{RowActions-Cp5HyK_w.js.map → RowActions-B4mqIt3Z.js.map} +1 -1
- package/dashboard/dist/assets/{StatCard-BKZLSgNV.js → StatCard-Cz_2OjAZ.js} +2 -2
- package/dashboard/dist/assets/{StatCard-BKZLSgNV.js.map → StatCard-Cz_2OjAZ.js.map} +1 -1
- package/dashboard/dist/assets/{StatusBadge-BYNGGZK5.js → StatusBadge-Wi2FJZsn.js} +2 -2
- package/dashboard/dist/assets/{StatusBadge-BYNGGZK5.js.map → StatusBadge-Wi2FJZsn.js.map} +1 -1
- package/dashboard/dist/assets/StepIndicator-PW5NRDMb.js +2 -0
- package/dashboard/dist/assets/StepIndicator-PW5NRDMb.js.map +1 -0
- package/dashboard/dist/assets/{StickyPagination-CTosgiU2.js → StickyPagination-Bl2Uzz65.js} +2 -2
- package/dashboard/dist/assets/{StickyPagination-CTosgiU2.js.map → StickyPagination-Bl2Uzz65.js.map} +1 -1
- package/dashboard/dist/assets/{SwimlaneTimeline-ylG5Ps1s.js → SwimlaneTimeline-CUPqMd0z.js} +2 -2
- package/dashboard/dist/assets/{SwimlaneTimeline-ylG5Ps1s.js.map → SwimlaneTimeline-CUPqMd0z.js.map} +1 -1
- package/dashboard/dist/assets/TagInput-BLtf86Ly.js +2 -0
- package/dashboard/dist/assets/TagInput-BLtf86Ly.js.map +1 -0
- package/dashboard/dist/assets/{TaskDetailPage-C9pDGdD2.js → TaskDetailPage-BXJFX74D.js} +2 -2
- package/dashboard/dist/assets/{TaskDetailPage-C9pDGdD2.js.map → TaskDetailPage-BXJFX74D.js.map} +1 -1
- package/dashboard/dist/assets/{TaskQueuePill-BtJbZTT0.js → TaskQueuePill-CWYj3xKe.js} +2 -2
- package/dashboard/dist/assets/{TaskQueuePill-BtJbZTT0.js.map → TaskQueuePill-CWYj3xKe.js.map} +1 -1
- package/dashboard/dist/assets/{TasksListPage-DtFLUEhg.js → TasksListPage-C3cX94Mw.js} +2 -2
- package/dashboard/dist/assets/{TasksListPage-DtFLUEhg.js.map → TasksListPage-C3cX94Mw.js.map} +1 -1
- package/dashboard/dist/assets/{TimeAgo-WuM6xImZ.js → TimeAgo-B_5yDDHV.js} +2 -2
- package/dashboard/dist/assets/{TimeAgo-WuM6xImZ.js.map → TimeAgo-B_5yDDHV.js.map} +1 -1
- package/dashboard/dist/assets/{TimestampCell-IVL_-Upy.js → TimestampCell-DRX724uU.js} +2 -2
- package/dashboard/dist/assets/{TimestampCell-IVL_-Upy.js.map → TimestampCell-DRX724uU.js.map} +1 -1
- package/dashboard/dist/assets/{UserName-DU9qeg13.js → UserName-Ca8FA469.js} +2 -2
- package/dashboard/dist/assets/{UserName-DU9qeg13.js.map → UserName-Ca8FA469.js.map} +1 -1
- package/dashboard/dist/assets/WorkflowExecutionPage-BBYWEV2P.js +2 -0
- package/dashboard/dist/assets/WorkflowExecutionPage-BBYWEV2P.js.map +1 -0
- package/dashboard/dist/assets/WorkflowPill-BXifAuLi.js +2 -0
- package/dashboard/dist/assets/{WorkflowPill-Diw8iWBP.js.map → WorkflowPill-BXifAuLi.js.map} +1 -1
- package/dashboard/dist/assets/WorkflowsDashboard-Drl3juz9.js +2 -0
- package/dashboard/dist/assets/WorkflowsDashboard-Drl3juz9.js.map +1 -0
- package/dashboard/dist/assets/{WorkflowsOverview-CPuvA4t3.js → WorkflowsOverview-03IRrDLg.js} +2 -2
- package/dashboard/dist/assets/{WorkflowsOverview-CPuvA4t3.js.map → WorkflowsOverview-03IRrDLg.js.map} +1 -1
- package/dashboard/dist/assets/YamlWorkflowsPage-DC2cLxVi.js +2 -0
- package/dashboard/dist/assets/YamlWorkflowsPage-DC2cLxVi.js.map +1 -0
- package/dashboard/dist/assets/{bots-BPiZXf2h.js → bots-DZEXcgiJ.js} +2 -2
- package/dashboard/dist/assets/{bots-BPiZXf2h.js.map → bots-DZEXcgiJ.js.map} +1 -1
- package/dashboard/dist/assets/{escalation-DTY_OKRh.js → escalation-Cw48lNaF.js} +2 -2
- package/dashboard/dist/assets/{escalation-DTY_OKRh.js.map → escalation-Cw48lNaF.js.map} +1 -1
- package/dashboard/dist/assets/{escalation-columns-C91fHSkp.js → escalation-columns-NINpo3qf.js} +2 -2
- package/dashboard/dist/assets/{escalation-columns-C91fHSkp.js.map → escalation-columns-NINpo3qf.js.map} +1 -1
- package/dashboard/dist/assets/helpers-Cuu3xKfr.js +2 -0
- package/dashboard/dist/assets/helpers-Cuu3xKfr.js.map +1 -0
- package/dashboard/dist/assets/helpers-fk_qr729.js +2 -0
- package/dashboard/dist/assets/helpers-fk_qr729.js.map +1 -0
- package/dashboard/dist/assets/index-B98ipWxE.js +2 -0
- package/dashboard/dist/assets/{index-CDWOfCmi.js.map → index-B98ipWxE.js.map} +1 -1
- package/dashboard/dist/assets/{index-D_qEAYrg.js → index-BIG3KooI.js} +2 -2
- package/dashboard/dist/assets/{index-D_qEAYrg.js.map → index-BIG3KooI.js.map} +1 -1
- package/dashboard/dist/assets/{index-DSzSoku1.js → index-BwN3KP_L.js} +91 -93
- package/dashboard/dist/assets/index-BwN3KP_L.js.map +1 -0
- package/dashboard/dist/assets/index-Bxe8h1x4.js +17 -0
- package/dashboard/dist/assets/index-Bxe8h1x4.js.map +1 -0
- package/dashboard/dist/assets/index-CNI7k7oB.js +6 -0
- package/dashboard/dist/assets/index-CNI7k7oB.js.map +1 -0
- package/dashboard/dist/assets/index-CORHB0WC.js +2 -0
- package/dashboard/dist/assets/index-CORHB0WC.js.map +1 -0
- package/dashboard/dist/assets/index-DcIKW-cZ.css +1 -0
- package/dashboard/dist/assets/index-Dj-z-x8M.js +2 -0
- package/dashboard/dist/assets/index-Dj-z-x8M.js.map +1 -0
- package/dashboard/dist/assets/index-DwRytW9O.js +5 -0
- package/dashboard/dist/assets/index-DwRytW9O.js.map +1 -0
- package/dashboard/dist/assets/index-aRvL-dXp.js +2 -0
- package/dashboard/dist/assets/index-aRvL-dXp.js.map +1 -0
- package/dashboard/dist/assets/index-b03HlbnH.js +2 -0
- package/dashboard/dist/assets/{index-BtOwLI0K.js.map → index-b03HlbnH.js.map} +1 -1
- package/dashboard/dist/assets/mcp-BZoFryNc.js +2 -0
- package/dashboard/dist/assets/mcp-BZoFryNc.js.map +1 -0
- package/dashboard/dist/assets/mcp-query-wiw1kwm8.js +2 -0
- package/dashboard/dist/assets/mcp-query-wiw1kwm8.js.map +1 -0
- package/dashboard/dist/assets/{mcp-runs-DmXYJD19.js → mcp-runs-BaEKnf5v.js} +2 -2
- package/dashboard/dist/assets/{mcp-runs-DmXYJD19.js.map → mcp-runs-BaEKnf5v.js.map} +1 -1
- package/dashboard/dist/assets/{namespaces-DoGa7jc7.js → namespaces-BwnZI4_A.js} +2 -2
- package/dashboard/dist/assets/{namespaces-DoGa7jc7.js.map → namespaces-BwnZI4_A.js.map} +1 -1
- package/dashboard/dist/assets/{roles-wCdQ2Z7k.js → roles-Bgn1K8zU.js} +2 -2
- package/dashboard/dist/assets/{roles-wCdQ2Z7k.js.map → roles-Bgn1K8zU.js.map} +1 -1
- package/dashboard/dist/assets/{settings-DDe_L7JT.js → settings-CizYiutL.js} +2 -2
- package/dashboard/dist/assets/{settings-DDe_L7JT.js.map → settings-CizYiutL.js.map} +1 -1
- package/dashboard/dist/assets/{tasks-3Hih8Bt7.js → tasks-Bmte_hc4.js} +2 -2
- package/dashboard/dist/assets/{tasks-3Hih8Bt7.js.map → tasks-Bmte_hc4.js.map} +1 -1
- package/dashboard/dist/assets/useEventHooks-CUCxpiI2.js +2 -0
- package/dashboard/dist/assets/useEventHooks-CUCxpiI2.js.map +1 -0
- package/dashboard/dist/assets/useExpandedRows-Cg9iq6Vy.js +2 -0
- package/dashboard/dist/assets/useExpandedRows-Cg9iq6Vy.js.map +1 -0
- package/dashboard/dist/assets/{useYamlActivityEvents-B5dHec6Y.js → useYamlActivityEvents-Cum02Ej9.js} +2 -2
- package/dashboard/dist/assets/useYamlActivityEvents-Cum02Ej9.js.map +1 -0
- package/dashboard/dist/assets/{users-BTagPmGW.js → users-NSDgTt-z.js} +2 -2
- package/dashboard/dist/assets/{users-BTagPmGW.js.map → users-NSDgTt-z.js.map} +1 -1
- package/dashboard/dist/assets/{vendor-icons-DCLlGYO9.js → vendor-icons-D1DdudfH.js} +141 -66
- package/dashboard/dist/assets/vendor-icons-D1DdudfH.js.map +1 -0
- package/dashboard/dist/assets/{workflows-B20dR3NE.js → workflows-k0XRdGXx.js} +2 -2
- package/dashboard/dist/assets/{workflows-B20dR3NE.js.map → workflows-k0XRdGXx.js.map} +1 -1
- package/dashboard/dist/assets/yaml-workflows-DAre8I78.js +2 -0
- package/dashboard/dist/assets/yaml-workflows-DAre8I78.js.map +1 -0
- package/dashboard/dist/index.html +3 -3
- package/docs/api/mcp-servers.md +60 -2
- package/docs/architecture.md +3 -3
- package/docs/dashboard.md +1 -1
- package/docs/epic-integration.md +224 -0
- package/docs/escalation-strategies.md +2 -3
- package/docs/events.md +28 -0
- package/docs/mcp.md +301 -31
- package/docs/sdk.md +177 -0
- package/docs/story.md +157 -0
- package/docs/workflow-builder.md +371 -0
- package/package.json +5 -4
- package/build/routes/escalations/helpers.d.ts +0 -5
- package/build/routes/resolve.d.ts +0 -9
- package/build/routes/resolve.js +0 -19
- package/build/routes/yaml-workflows/helpers.d.ts +0 -2
- package/build/routes/yaml-workflows/helpers.js +0 -8
- package/dashboard/dist/assets/AdminDashboard-CTyAMUJR.js +0 -2
- package/dashboard/dist/assets/AvailableEscalationsPage-BMXV3Q4l.js +0 -2
- package/dashboard/dist/assets/AvailableEscalationsPage-BMXV3Q4l.js.map +0 -1
- package/dashboard/dist/assets/BotPicker-C51nKFEu.js +0 -2
- package/dashboard/dist/assets/CredentialsPage-CoBNFSAu.js +0 -2
- package/dashboard/dist/assets/CredentialsPage-CoBNFSAu.js.map +0 -1
- package/dashboard/dist/assets/McpQueryDetailPage-CGoR9XK6.js +0 -5
- package/dashboard/dist/assets/McpQueryDetailPage-CGoR9XK6.js.map +0 -1
- package/dashboard/dist/assets/McpQueryPage-BjXoYQuU.js +0 -2
- package/dashboard/dist/assets/McpQueryPage-BjXoYQuU.js.map +0 -1
- package/dashboard/dist/assets/McpRunDetailPage-DLkA5Aar.js +0 -2
- package/dashboard/dist/assets/McpRunDetailPage-DLkA5Aar.js.map +0 -1
- package/dashboard/dist/assets/McpRunsPage-DCh9n11D.js +0 -2
- package/dashboard/dist/assets/McpRunsPage-DCh9n11D.js.map +0 -1
- package/dashboard/dist/assets/OperatorDashboard-Dc80suXd.js +0 -2
- package/dashboard/dist/assets/OperatorDashboard-Dc80suXd.js.map +0 -1
- package/dashboard/dist/assets/ProcessDetailPage-B2GKuGzk.js +0 -2
- package/dashboard/dist/assets/ProcessDetailPage-B2GKuGzk.js.map +0 -1
- package/dashboard/dist/assets/ProcessesListPage-CTjI3Wn6.js +0 -2
- package/dashboard/dist/assets/ProcessesListPage-CTjI3Wn6.js.map +0 -1
- package/dashboard/dist/assets/RefreshButton-BcQDObrv.js +0 -2
- package/dashboard/dist/assets/RefreshButton-BcQDObrv.js.map +0 -1
- package/dashboard/dist/assets/RunAsSelector-BhyWtofX.js +0 -2
- package/dashboard/dist/assets/RunAsSelector-BhyWtofX.js.map +0 -1
- package/dashboard/dist/assets/WorkflowExecutionPage-DOocX81f.js +0 -2
- package/dashboard/dist/assets/WorkflowExecutionPage-DOocX81f.js.map +0 -1
- package/dashboard/dist/assets/WorkflowPill-Diw8iWBP.js +0 -2
- package/dashboard/dist/assets/WorkflowsDashboard-DDtUIrTy.js +0 -2
- package/dashboard/dist/assets/WorkflowsDashboard-DDtUIrTy.js.map +0 -1
- package/dashboard/dist/assets/YamlWorkflowsPage-DlwwkluN.js +0 -2
- package/dashboard/dist/assets/YamlWorkflowsPage-DlwwkluN.js.map +0 -1
- package/dashboard/dist/assets/helpers-DBUZ9pnG.js +0 -2
- package/dashboard/dist/assets/helpers-DBUZ9pnG.js.map +0 -1
- package/dashboard/dist/assets/index-BOeA-gfK.js +0 -17
- package/dashboard/dist/assets/index-BOeA-gfK.js.map +0 -1
- package/dashboard/dist/assets/index-BZ6K_kmL.js +0 -3
- package/dashboard/dist/assets/index-BZ6K_kmL.js.map +0 -1
- package/dashboard/dist/assets/index-Bpm0yeoi.js +0 -2
- package/dashboard/dist/assets/index-Bpm0yeoi.js.map +0 -1
- package/dashboard/dist/assets/index-BtOwLI0K.js +0 -2
- package/dashboard/dist/assets/index-CBF3ZvRZ.js +0 -6
- package/dashboard/dist/assets/index-CBF3ZvRZ.js.map +0 -1
- package/dashboard/dist/assets/index-CDWOfCmi.js +0 -2
- package/dashboard/dist/assets/index-Ce6sL__n.js +0 -2
- package/dashboard/dist/assets/index-Ce6sL__n.js.map +0 -1
- package/dashboard/dist/assets/index-DSzSoku1.js.map +0 -1
- package/dashboard/dist/assets/index-gCy9XX3W.css +0 -1
- package/dashboard/dist/assets/mcp-BzVpaaKF.js +0 -2
- package/dashboard/dist/assets/mcp-BzVpaaKF.js.map +0 -1
- package/dashboard/dist/assets/mcp-query-wTuxTTCV.js +0 -2
- package/dashboard/dist/assets/mcp-query-wTuxTTCV.js.map +0 -1
- package/dashboard/dist/assets/useNatsEvents-DeGKHFTX.js +0 -2
- package/dashboard/dist/assets/useNatsEvents-DeGKHFTX.js.map +0 -1
- package/dashboard/dist/assets/useYamlActivityEvents-B5dHec6Y.js.map +0 -1
- package/dashboard/dist/assets/vendor-icons-DCLlGYO9.js.map +0 -1
- package/dashboard/dist/assets/yaml-workflows-CaLPMQha.js +0 -2
- package/dashboard/dist/assets/yaml-workflows-CaLPMQha.js.map +0 -1
- package/docs/img/01-login.png +0 -0
- package/docs/img/02-dashboard-home.png +0 -0
- package/docs/img/03-processes-list.png +0 -0
- package/docs/img/04-escalations-list.png +0 -0
- package/docs/img/05-mcp-servers.png +0 -0
- package/docs/img/06-mcp-pipelines.png +0 -0
- package/docs/img/07-workflows-list.png +0 -0
- package/docs/img/compilation/01-query-submit.png +0 -0
- package/docs/img/compilation/02-mcp-servers.png +0 -0
- package/docs/img/compilation/03-query-completed.png +0 -0
- package/docs/img/compilation/04-wizard-original.png +0 -0
- package/docs/img/compilation/05-wizard-timeline.png +0 -0
- package/docs/img/compilation/06-wizard-profile.png +0 -0
- package/docs/img/compilation/07-wizard-deploy.png +0 -0
- package/docs/img/compilation/08-wizard-test-modal.png +0 -0
- package/docs/img/compilation/09-wizard-test-compare.png +0 -0
- package/docs/img/compilation/10-wizard-verify.png +0 -0
- package/lib/db/README.md +0 -8
- /package/{lib → build/lib}/db/schemas/002_seed.sql +0 -0
- /package/{lib → build/lib}/db/schemas/003_workflow_discovery.sql +0 -0
- /package/{lib → build/lib}/db/schemas/004_query_router.sql +0 -0
- /package/{lib → build/lib}/db/schemas/005_triage_router.sql +0 -0
- /package/{lib → build/lib}/db/schemas/006_oauth.sql +0 -0
- /package/{lib → build/lib}/db/schemas/007_security.sql +0 -0
- /package/{lib → build/lib}/db/schemas/008_bot_accounts.sql +0 -0
- /package/{lib → build/lib}/db/schemas/009_audit_trail.sql +0 -0
- /package/{lib → build/lib}/db/schemas/010_credential_providers.sql +0 -0
- /package/{lib → build/lib}/db/schemas/012_drop_modality.sql +0 -0
- /package/{lib → build/lib}/db/schemas/013_execute_as.sql +0 -0
- /package/{lib → build/lib}/db/schemas/014_ephemeral_credentials.sql +0 -0
- /package/{lib → build/lib}/db/schemas/015_knowledge.sql +0 -0
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createWorkflowSet = createWorkflowSet;
|
|
4
|
+
exports.getWorkflowSet = getWorkflowSet;
|
|
5
|
+
exports.updateWorkflowSetPlan = updateWorkflowSetPlan;
|
|
6
|
+
exports.updateWorkflowSetStatus = updateWorkflowSetStatus;
|
|
7
|
+
exports.updateWorkflowSetSourceWorkflow = updateWorkflowSetSourceWorkflow;
|
|
8
|
+
exports.deleteWorkflowSet = deleteWorkflowSet;
|
|
9
|
+
exports.listWorkflowSets = listWorkflowSets;
|
|
10
|
+
const db_1 = require("../../lib/db");
|
|
11
|
+
const sql_1 = require("./sql");
|
|
12
|
+
const DEFAULT_LIMIT = 20;
|
|
13
|
+
async function createWorkflowSet(input) {
|
|
14
|
+
const pool = (0, db_1.getPool)();
|
|
15
|
+
const { rows } = await pool.query(sql_1.CREATE_WORKFLOW_SET, [
|
|
16
|
+
input.name,
|
|
17
|
+
input.description || null,
|
|
18
|
+
input.specification,
|
|
19
|
+
JSON.stringify(input.plan || []),
|
|
20
|
+
input.namespaces || [],
|
|
21
|
+
input.source_workflow_id || null,
|
|
22
|
+
]);
|
|
23
|
+
return rows[0];
|
|
24
|
+
}
|
|
25
|
+
async function getWorkflowSet(id) {
|
|
26
|
+
const pool = (0, db_1.getPool)();
|
|
27
|
+
const { rows } = await pool.query(sql_1.GET_WORKFLOW_SET, [id]);
|
|
28
|
+
return rows[0] || null;
|
|
29
|
+
}
|
|
30
|
+
async function updateWorkflowSetPlan(id, plan, namespaces) {
|
|
31
|
+
const pool = (0, db_1.getPool)();
|
|
32
|
+
const { rows } = await pool.query(sql_1.UPDATE_WORKFLOW_SET_PLAN, [
|
|
33
|
+
id,
|
|
34
|
+
JSON.stringify(plan),
|
|
35
|
+
namespaces,
|
|
36
|
+
]);
|
|
37
|
+
return rows[0] || null;
|
|
38
|
+
}
|
|
39
|
+
async function updateWorkflowSetStatus(id, status) {
|
|
40
|
+
const pool = (0, db_1.getPool)();
|
|
41
|
+
const { rows } = await pool.query(sql_1.UPDATE_WORKFLOW_SET_STATUS, [id, status]);
|
|
42
|
+
return rows[0] || null;
|
|
43
|
+
}
|
|
44
|
+
async function updateWorkflowSetSourceWorkflow(id, sourceWorkflowId) {
|
|
45
|
+
const pool = (0, db_1.getPool)();
|
|
46
|
+
await pool.query('UPDATE lt_workflow_sets SET source_workflow_id = $2 WHERE id = $1', [id, sourceWorkflowId]);
|
|
47
|
+
}
|
|
48
|
+
async function deleteWorkflowSet(id) {
|
|
49
|
+
const pool = (0, db_1.getPool)();
|
|
50
|
+
const { rowCount } = await pool.query(sql_1.DELETE_WORKFLOW_SET, [id]);
|
|
51
|
+
return (rowCount ?? 0) > 0;
|
|
52
|
+
}
|
|
53
|
+
async function listWorkflowSets(filters) {
|
|
54
|
+
const pool = (0, db_1.getPool)();
|
|
55
|
+
const conditions = [];
|
|
56
|
+
const values = [];
|
|
57
|
+
let idx = 1;
|
|
58
|
+
if (filters.status) {
|
|
59
|
+
conditions.push(`status = $${idx++}`);
|
|
60
|
+
values.push(filters.status);
|
|
61
|
+
}
|
|
62
|
+
if (filters.search) {
|
|
63
|
+
conditions.push(`(name ILIKE $${idx} OR description ILIKE $${idx})`);
|
|
64
|
+
values.push(`%${filters.search}%`);
|
|
65
|
+
idx++;
|
|
66
|
+
}
|
|
67
|
+
const where = conditions.length ? `WHERE ${conditions.join(' AND ')}` : '';
|
|
68
|
+
const limit = filters.limit || DEFAULT_LIMIT;
|
|
69
|
+
const offset = filters.offset || 0;
|
|
70
|
+
const [countResult, dataResult] = await Promise.all([
|
|
71
|
+
pool.query(`SELECT COUNT(*) FROM lt_workflow_sets ${where}`, values),
|
|
72
|
+
pool.query(`${sql_1.LIST_WORKFLOW_SETS_BASE} ${where} ORDER BY created_at DESC LIMIT $${idx++} OFFSET $${idx++}`, [...values, limit, offset]),
|
|
73
|
+
]);
|
|
74
|
+
return {
|
|
75
|
+
sets: dataResult.rows,
|
|
76
|
+
total: parseInt(countResult.rows[0].count, 10),
|
|
77
|
+
};
|
|
78
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { createWorkflowSet, getWorkflowSet, updateWorkflowSetPlan, updateWorkflowSetStatus, updateWorkflowSetSourceWorkflow, deleteWorkflowSet, listWorkflowSets, } from './db';
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.listWorkflowSets = exports.deleteWorkflowSet = exports.updateWorkflowSetSourceWorkflow = exports.updateWorkflowSetStatus = exports.updateWorkflowSetPlan = exports.getWorkflowSet = exports.createWorkflowSet = void 0;
|
|
4
|
+
var db_1 = require("./db");
|
|
5
|
+
Object.defineProperty(exports, "createWorkflowSet", { enumerable: true, get: function () { return db_1.createWorkflowSet; } });
|
|
6
|
+
Object.defineProperty(exports, "getWorkflowSet", { enumerable: true, get: function () { return db_1.getWorkflowSet; } });
|
|
7
|
+
Object.defineProperty(exports, "updateWorkflowSetPlan", { enumerable: true, get: function () { return db_1.updateWorkflowSetPlan; } });
|
|
8
|
+
Object.defineProperty(exports, "updateWorkflowSetStatus", { enumerable: true, get: function () { return db_1.updateWorkflowSetStatus; } });
|
|
9
|
+
Object.defineProperty(exports, "updateWorkflowSetSourceWorkflow", { enumerable: true, get: function () { return db_1.updateWorkflowSetSourceWorkflow; } });
|
|
10
|
+
Object.defineProperty(exports, "deleteWorkflowSet", { enumerable: true, get: function () { return db_1.deleteWorkflowSet; } });
|
|
11
|
+
Object.defineProperty(exports, "listWorkflowSets", { enumerable: true, get: function () { return db_1.listWorkflowSets; } });
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
export declare const CREATE_WORKFLOW_SET = "\n INSERT INTO lt_workflow_sets (name, description, specification, plan, namespaces, source_workflow_id)\n VALUES ($1, $2, $3, $4, $5, $6)\n RETURNING *";
|
|
2
|
+
export declare const GET_WORKFLOW_SET = "\n SELECT * FROM lt_workflow_sets WHERE id = $1";
|
|
3
|
+
export declare const UPDATE_WORKFLOW_SET_PLAN = "\n UPDATE lt_workflow_sets\n SET plan = $2, namespaces = $3, status = 'planned', updated_at = NOW()\n WHERE id = $1\n RETURNING *";
|
|
4
|
+
export declare const UPDATE_WORKFLOW_SET_STATUS = "\n UPDATE lt_workflow_sets\n SET status = $2, updated_at = NOW()\n WHERE id = $1\n RETURNING *";
|
|
5
|
+
export declare const DELETE_WORKFLOW_SET = "\n DELETE FROM lt_workflow_sets WHERE id = $1";
|
|
6
|
+
export declare const LIST_WORKFLOW_SETS_BASE = "\n SELECT * FROM lt_workflow_sets";
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
// ─── Workflow set CRUD ──────────────────────────────────────────────────────
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
exports.LIST_WORKFLOW_SETS_BASE = exports.DELETE_WORKFLOW_SET = exports.UPDATE_WORKFLOW_SET_STATUS = exports.UPDATE_WORKFLOW_SET_PLAN = exports.GET_WORKFLOW_SET = exports.CREATE_WORKFLOW_SET = void 0;
|
|
5
|
+
exports.CREATE_WORKFLOW_SET = `
|
|
6
|
+
INSERT INTO lt_workflow_sets (name, description, specification, plan, namespaces, source_workflow_id)
|
|
7
|
+
VALUES ($1, $2, $3, $4, $5, $6)
|
|
8
|
+
RETURNING *`;
|
|
9
|
+
exports.GET_WORKFLOW_SET = `
|
|
10
|
+
SELECT * FROM lt_workflow_sets WHERE id = $1`;
|
|
11
|
+
exports.UPDATE_WORKFLOW_SET_PLAN = `
|
|
12
|
+
UPDATE lt_workflow_sets
|
|
13
|
+
SET plan = $2, namespaces = $3, status = 'planned', updated_at = NOW()
|
|
14
|
+
WHERE id = $1
|
|
15
|
+
RETURNING *`;
|
|
16
|
+
exports.UPDATE_WORKFLOW_SET_STATUS = `
|
|
17
|
+
UPDATE lt_workflow_sets
|
|
18
|
+
SET status = $2, updated_at = NOW()
|
|
19
|
+
WHERE id = $1
|
|
20
|
+
RETURNING *`;
|
|
21
|
+
exports.DELETE_WORKFLOW_SET = `
|
|
22
|
+
DELETE FROM lt_workflow_sets WHERE id = $1`;
|
|
23
|
+
exports.LIST_WORKFLOW_SETS_BASE = `
|
|
24
|
+
SELECT * FROM lt_workflow_sets`;
|
|
@@ -69,6 +69,10 @@ async function listYamlWorkflows(filters) {
|
|
|
69
69
|
conditions.push(`source_workflow_id = $${idx++}`);
|
|
70
70
|
values.push(filters.source_workflow_id);
|
|
71
71
|
}
|
|
72
|
+
if (filters.set_id) {
|
|
73
|
+
conditions.push(`set_id = $${idx++}`);
|
|
74
|
+
values.push(filters.set_id);
|
|
75
|
+
}
|
|
72
76
|
const where = conditions.length ? `WHERE ${conditions.join(' AND ')}` : '';
|
|
73
77
|
const limit = filters.limit || defaults_1.YAML_LIST_LIMIT;
|
|
74
78
|
const offset = filters.offset || 0;
|
|
@@ -1,6 +1,11 @@
|
|
|
1
1
|
import type { LTYamlWorkflowRecord, LTYamlWorkflowVersionRecord, ActivityManifestEntry } from '../../types/yaml-workflow';
|
|
2
2
|
import type { CreateYamlWorkflowInput } from './types';
|
|
3
3
|
export { parseVersionFromYaml, updateYamlWorkflowStatus, listYamlWorkflows, findYamlWorkflowsByTags, } from './db-utils';
|
|
4
|
+
/**
|
|
5
|
+
* Check whether a graph_topic is already in use by a non-archived workflow
|
|
6
|
+
* in the same namespace. Returns the conflicting workflow name, or null.
|
|
7
|
+
*/
|
|
8
|
+
export declare function checkTopicConflict(appId: string, graphTopic: string, excludeId?: string): Promise<string | null>;
|
|
4
9
|
export declare function createYamlWorkflow(input: CreateYamlWorkflowInput): Promise<LTYamlWorkflowRecord>;
|
|
5
10
|
export declare function getYamlWorkflow(id: string): Promise<LTYamlWorkflowRecord | null>;
|
|
6
11
|
export declare function getYamlWorkflowByName(name: string): Promise<LTYamlWorkflowRecord | null>;
|
|
@@ -25,3 +30,6 @@ export declare function getVersionHistory(workflowId: string, limit?: number, of
|
|
|
25
30
|
export declare function getVersionSnapshot(workflowId: string, version: number): Promise<LTYamlWorkflowVersionRecord | null>;
|
|
26
31
|
export declare function markContentDeployed(workflowId: string): Promise<void>;
|
|
27
32
|
export declare function markAppIdContentDeployed(appId: string): Promise<void>;
|
|
33
|
+
export declare function updateCronSchedule(id: string, cronSchedule: string, cronEnvelope: Record<string, unknown> | null, executeAs: string | null): Promise<LTYamlWorkflowRecord | null>;
|
|
34
|
+
export declare function clearCronSchedule(id: string): Promise<LTYamlWorkflowRecord | null>;
|
|
35
|
+
export declare function getCronScheduledWorkflows(): Promise<LTYamlWorkflowRecord[]>;
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.findYamlWorkflowsByTags = exports.listYamlWorkflows = exports.updateYamlWorkflowStatus = exports.parseVersionFromYaml = void 0;
|
|
4
|
+
exports.checkTopicConflict = checkTopicConflict;
|
|
4
5
|
exports.createYamlWorkflow = createYamlWorkflow;
|
|
5
6
|
exports.getYamlWorkflow = getYamlWorkflow;
|
|
6
7
|
exports.getYamlWorkflowByName = getYamlWorkflowByName;
|
|
@@ -16,6 +17,9 @@ exports.getVersionHistory = getVersionHistory;
|
|
|
16
17
|
exports.getVersionSnapshot = getVersionSnapshot;
|
|
17
18
|
exports.markContentDeployed = markContentDeployed;
|
|
18
19
|
exports.markAppIdContentDeployed = markAppIdContentDeployed;
|
|
20
|
+
exports.updateCronSchedule = updateCronSchedule;
|
|
21
|
+
exports.clearCronSchedule = clearCronSchedule;
|
|
22
|
+
exports.getCronScheduledWorkflows = getCronScheduledWorkflows;
|
|
19
23
|
const db_1 = require("../../lib/db");
|
|
20
24
|
const defaults_1 = require("../../modules/defaults");
|
|
21
25
|
const sql_1 = require("./sql");
|
|
@@ -26,6 +30,19 @@ Object.defineProperty(exports, "parseVersionFromYaml", { enumerable: true, get:
|
|
|
26
30
|
Object.defineProperty(exports, "updateYamlWorkflowStatus", { enumerable: true, get: function () { return db_utils_2.updateYamlWorkflowStatus; } });
|
|
27
31
|
Object.defineProperty(exports, "listYamlWorkflows", { enumerable: true, get: function () { return db_utils_2.listYamlWorkflows; } });
|
|
28
32
|
Object.defineProperty(exports, "findYamlWorkflowsByTags", { enumerable: true, get: function () { return db_utils_2.findYamlWorkflowsByTags; } });
|
|
33
|
+
/**
|
|
34
|
+
* Check whether a graph_topic is already in use by a non-archived workflow
|
|
35
|
+
* in the same namespace. Returns the conflicting workflow name, or null.
|
|
36
|
+
*/
|
|
37
|
+
async function checkTopicConflict(appId, graphTopic, excludeId) {
|
|
38
|
+
const pool = (0, db_1.getPool)();
|
|
39
|
+
const { rows } = await pool.query(sql_1.CHECK_TOPIC_UNIQUE, [appId, graphTopic]);
|
|
40
|
+
if (rows.length === 0)
|
|
41
|
+
return null;
|
|
42
|
+
if (excludeId && rows[0].id === excludeId)
|
|
43
|
+
return null;
|
|
44
|
+
return rows[0].name;
|
|
45
|
+
}
|
|
29
46
|
async function createYamlWorkflow(input) {
|
|
30
47
|
const pool = (0, db_1.getPool)();
|
|
31
48
|
const { rows } = await pool.query(sql_1.CREATE_YAML_WORKFLOW, [
|
|
@@ -45,6 +62,9 @@ async function createYamlWorkflow(input) {
|
|
|
45
62
|
input.category || null,
|
|
46
63
|
input.tags || [],
|
|
47
64
|
input.metadata ? JSON.stringify(input.metadata) : null,
|
|
65
|
+
input.set_id || null,
|
|
66
|
+
input.set_role || null,
|
|
67
|
+
input.set_build_order ?? null,
|
|
48
68
|
]);
|
|
49
69
|
const record = rows[0];
|
|
50
70
|
await createVersionSnapshot(record.id, 1, record.yaml_content, input.activity_manifest || [], input.input_schema || {}, input.output_schema || {}, input.input_field_meta || [], 'Initial version');
|
|
@@ -194,3 +214,24 @@ async function markAppIdContentDeployed(appId) {
|
|
|
194
214
|
const pool = (0, db_1.getPool)();
|
|
195
215
|
await pool.query(sql_1.MARK_APP_ID_CONTENT_DEPLOYED, [appId]);
|
|
196
216
|
}
|
|
217
|
+
// -- Cron scheduling ---------------------------------------------------------
|
|
218
|
+
async function updateCronSchedule(id, cronSchedule, cronEnvelope, executeAs) {
|
|
219
|
+
const pool = (0, db_1.getPool)();
|
|
220
|
+
const { rows } = await pool.query(sql_1.UPDATE_CRON_SCHEDULE, [
|
|
221
|
+
id,
|
|
222
|
+
cronSchedule,
|
|
223
|
+
cronEnvelope ? JSON.stringify(cronEnvelope) : null,
|
|
224
|
+
executeAs,
|
|
225
|
+
]);
|
|
226
|
+
return rows[0] || null;
|
|
227
|
+
}
|
|
228
|
+
async function clearCronSchedule(id) {
|
|
229
|
+
const pool = (0, db_1.getPool)();
|
|
230
|
+
const { rows } = await pool.query(sql_1.CLEAR_CRON_SCHEDULE, [id]);
|
|
231
|
+
return rows[0] || null;
|
|
232
|
+
}
|
|
233
|
+
async function getCronScheduledWorkflows() {
|
|
234
|
+
const pool = (0, db_1.getPool)();
|
|
235
|
+
const { rows } = await pool.query(sql_1.GET_CRON_SCHEDULED_WORKFLOWS);
|
|
236
|
+
return rows;
|
|
237
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import type { LTYamlWorkflowRecord } from '../../types/yaml-workflow';
|
|
2
|
+
interface InvokeOptions {
|
|
3
|
+
data?: Record<string, unknown>;
|
|
4
|
+
sync?: boolean;
|
|
5
|
+
timeout?: number;
|
|
6
|
+
execute_as?: string;
|
|
7
|
+
userId?: string;
|
|
8
|
+
/** Source identifier for metadata injection (e.g., 'cron') */
|
|
9
|
+
source?: string;
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Invoke a YAML workflow with scope injection.
|
|
13
|
+
* Shared by HTTP route and cron callback.
|
|
14
|
+
*/
|
|
15
|
+
export declare function invokeYamlWorkflow(wf: LTYamlWorkflowRecord, options?: InvokeOptions): Promise<{
|
|
16
|
+
job_id: string;
|
|
17
|
+
result?: unknown;
|
|
18
|
+
}>;
|
|
19
|
+
export {};
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
exports.invokeYamlWorkflow = invokeYamlWorkflow;
|
|
37
|
+
const yamlDeployer = __importStar(require("./deployer"));
|
|
38
|
+
const principal_1 = require("../iam/principal");
|
|
39
|
+
/**
|
|
40
|
+
* Invoke a YAML workflow with scope injection.
|
|
41
|
+
* Shared by HTTP route and cron callback.
|
|
42
|
+
*/
|
|
43
|
+
async function invokeYamlWorkflow(wf, options = {}) {
|
|
44
|
+
const data = { ...(options.data || {}) };
|
|
45
|
+
// Inject _scope so compiled workflow activities have identity context
|
|
46
|
+
if (!data._scope) {
|
|
47
|
+
const executeAs = options.execute_as;
|
|
48
|
+
const userId = options.userId;
|
|
49
|
+
if (executeAs) {
|
|
50
|
+
const [botPrincipal, invokerPrincipal] = await Promise.all([
|
|
51
|
+
(0, principal_1.resolvePrincipal)(executeAs),
|
|
52
|
+
userId ? (0, principal_1.resolvePrincipal)(userId) : Promise.resolve(null),
|
|
53
|
+
]);
|
|
54
|
+
if (botPrincipal) {
|
|
55
|
+
data._scope = {
|
|
56
|
+
principal: botPrincipal,
|
|
57
|
+
scopes: ['mcp:tool:call'],
|
|
58
|
+
...(invokerPrincipal ? { initiatedBy: userId, initiatingPrincipal: invokerPrincipal } : {}),
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
else if (userId) {
|
|
63
|
+
const principal = await (0, principal_1.resolvePrincipal)(userId);
|
|
64
|
+
if (principal) {
|
|
65
|
+
data._scope = { principal, scopes: ['mcp:tool:call'] };
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
if (options.source) {
|
|
70
|
+
if (!data._metadata)
|
|
71
|
+
data._metadata = {};
|
|
72
|
+
data._metadata.source = options.source;
|
|
73
|
+
}
|
|
74
|
+
if (options.sync) {
|
|
75
|
+
const { job_id, result } = await yamlDeployer.invokeYamlWorkflowSync(wf.app_id, wf.graph_topic, data, options.timeout, wf.graph_topic);
|
|
76
|
+
return { job_id, result };
|
|
77
|
+
}
|
|
78
|
+
const jobId = await yamlDeployer.invokeYamlWorkflow(wf.app_id, wf.graph_topic, data, wf.graph_topic);
|
|
79
|
+
return { job_id: jobId };
|
|
80
|
+
}
|
|
@@ -78,7 +78,7 @@ function appendNormalStep(dag, idx, step, prefix, graphTopic, plan, steps, trigg
|
|
|
78
78
|
if (plan) {
|
|
79
79
|
dag.prevActivityId = (0, transform_1.insertTransformActivities)(idx, plan, steps, prefix, graphTopic, dag.triggerId, triggerInputKeys, dag.stepIndexToActivityId, collapsedToCoreIndex, dag.activities, dag.transitions, dag.manifest, dag.prevActivityId);
|
|
80
80
|
}
|
|
81
|
-
// Wire input mappings from upstream steps
|
|
81
|
+
// Wire input mappings from upstream steps (may include @pipe objects for derivations)
|
|
82
82
|
const inputMappings = (0, wiring_1.wireStepInputs)(idx, step, plan, dag.stepIndexToActivityId, dag.triggerId, triggerInputKeys, steps, prefix, dag.prevActivityId, dag.prevResult, collapsedToCoreIndex);
|
|
83
83
|
// Build the activity definition
|
|
84
84
|
const resultSchema = step.result ? (0, utils_1.inferSchema)(step.result) : { type: 'object' };
|
|
@@ -6,4 +6,4 @@ import type { ExtractedStep, EnhancedCompilationPlan } from '../../types';
|
|
|
6
6
|
* Build input mappings for a step using the compilation plan's data flow edges.
|
|
7
7
|
* Falls back to mechanical backward-scan when no plan is available.
|
|
8
8
|
*/
|
|
9
|
-
export declare function wireStepInputs(stepIdx: number, step: ExtractedStep, plan: EnhancedCompilationPlan | null, stepIndexToActivityId: Map<number, string>, triggerId: string, triggerInputKeys: Set<string>, steps: ExtractedStep[], prefix: string, prevActivityId: string, prevResult: unknown, collapsedToCoreIndex?: Map<number, number>): Record<string,
|
|
9
|
+
export declare function wireStepInputs(stepIdx: number, step: ExtractedStep, plan: EnhancedCompilationPlan | null, stepIndexToActivityId: Map<number, string>, triggerId: string, triggerInputKeys: Set<string>, steps: ExtractedStep[], prefix: string, prevActivityId: string, prevResult: unknown, collapsedToCoreIndex?: Map<number, number>): Record<string, unknown>;
|
|
@@ -4,6 +4,94 @@
|
|
|
4
4
|
*/
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
6
|
exports.wireStepInputs = wireStepInputs;
|
|
7
|
+
/**
|
|
8
|
+
* HotMesh @pipe sub-pipe for today's date as YYYY-MM-DD.
|
|
9
|
+
* Follows RPN convention: operands row, then operator row.
|
|
10
|
+
* Row 1: date.now() → epoch (operator, no extra operands)
|
|
11
|
+
* Row 2: [isoString, 0, 10] — three operands for substring
|
|
12
|
+
* Row 3: substring(isoString, 0, 10) → "YYYY-MM-DD"
|
|
13
|
+
*/
|
|
14
|
+
const DATE_SUB_PIPE = {
|
|
15
|
+
'@pipe': [
|
|
16
|
+
['{@date.now}'],
|
|
17
|
+
['{@date.toISOString}', 0, 10],
|
|
18
|
+
['{@string.substring}'],
|
|
19
|
+
],
|
|
20
|
+
};
|
|
21
|
+
/**
|
|
22
|
+
* Convert a scalar derivation spec into a HotMesh @pipe expression.
|
|
23
|
+
* Uses fan-out/fan-in pattern: sub-pipes as row-level siblings, NOT nested inside array rows.
|
|
24
|
+
*/
|
|
25
|
+
function buildDerivationPipe(sourceRef, derivation) {
|
|
26
|
+
if (!derivation)
|
|
27
|
+
return sourceRef;
|
|
28
|
+
switch (derivation.strategy) {
|
|
29
|
+
case 'concat': {
|
|
30
|
+
const parts = derivation.parts || ['{value}'];
|
|
31
|
+
const hasDate = parts.some((p) => p === '{date}');
|
|
32
|
+
if (!hasDate) {
|
|
33
|
+
// Simple concat — no sub-pipes needed
|
|
34
|
+
const args = parts.map((p) => p === '{value}' ? sourceRef : p);
|
|
35
|
+
if (args.length === 1 && args[0] === sourceRef)
|
|
36
|
+
return sourceRef;
|
|
37
|
+
return { '@pipe': [args, ['{@string.concat}']] };
|
|
38
|
+
}
|
|
39
|
+
// Fan-out/fan-in: each part that needs computation gets its own sub-pipe row
|
|
40
|
+
const rows = [];
|
|
41
|
+
for (const p of parts) {
|
|
42
|
+
if (p === '{value}') {
|
|
43
|
+
rows.push({ '@pipe': [[sourceRef]] });
|
|
44
|
+
}
|
|
45
|
+
else if (p === '{date}') {
|
|
46
|
+
rows.push(DATE_SUB_PIPE);
|
|
47
|
+
}
|
|
48
|
+
else {
|
|
49
|
+
rows.push({ '@pipe': [[p]] });
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
rows.push(['{@string.concat}']);
|
|
53
|
+
return { '@pipe': rows };
|
|
54
|
+
}
|
|
55
|
+
case 'template': {
|
|
56
|
+
const tpl = derivation.template || '{value}';
|
|
57
|
+
const segments = tpl.split(/(\{value\}|\{date\})/).filter(Boolean);
|
|
58
|
+
const hasDate = segments.includes('{date}');
|
|
59
|
+
if (!hasDate) {
|
|
60
|
+
const args = segments.map((s) => s === '{value}' ? sourceRef : s);
|
|
61
|
+
if (args.length === 1 && args[0] === sourceRef)
|
|
62
|
+
return sourceRef;
|
|
63
|
+
return { '@pipe': [args, ['{@string.concat}']] };
|
|
64
|
+
}
|
|
65
|
+
const rows = [];
|
|
66
|
+
for (const s of segments) {
|
|
67
|
+
if (s === '{value}') {
|
|
68
|
+
rows.push({ '@pipe': [[sourceRef]] });
|
|
69
|
+
}
|
|
70
|
+
else if (s === '{date}') {
|
|
71
|
+
rows.push(DATE_SUB_PIPE);
|
|
72
|
+
}
|
|
73
|
+
else {
|
|
74
|
+
rows.push({ '@pipe': [[s]] });
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
rows.push(['{@string.concat}']);
|
|
78
|
+
return { '@pipe': rows };
|
|
79
|
+
}
|
|
80
|
+
case 'prefix': {
|
|
81
|
+
const concatParts = [];
|
|
82
|
+
if (derivation.prefix)
|
|
83
|
+
concatParts.push(derivation.prefix);
|
|
84
|
+
concatParts.push(sourceRef);
|
|
85
|
+
if (derivation.suffix)
|
|
86
|
+
concatParts.push(derivation.suffix);
|
|
87
|
+
if (concatParts.length === 1)
|
|
88
|
+
return sourceRef;
|
|
89
|
+
return { '@pipe': [concatParts, ['{@string.concat}']] };
|
|
90
|
+
}
|
|
91
|
+
default:
|
|
92
|
+
return sourceRef;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
7
95
|
/**
|
|
8
96
|
* Build input mappings for a step using the compilation plan's data flow edges.
|
|
9
97
|
* Falls back to mechanical backward-scan when no plan is available.
|
|
@@ -23,6 +111,16 @@ function wireStepInputs(stepIdx, step, plan, stepIndexToActivityId, triggerId, t
|
|
|
23
111
|
: stepIdx;
|
|
24
112
|
const edgesForStep = plan.dataFlow.filter(e => e.toStep === collapsedIdx);
|
|
25
113
|
for (const edge of edgesForStep) {
|
|
114
|
+
// Skip edges that target a complex nested object argument — these are stored
|
|
115
|
+
// defaults in tool_arguments (e.g., a nested `login` object with selectors)
|
|
116
|
+
// and must not be overridden by a flat scalar from an upstream step.
|
|
117
|
+
if (step.kind === 'tool' && !edge.transform) {
|
|
118
|
+
const argValue = step.arguments[edge.toField];
|
|
119
|
+
if (argValue && typeof argValue === 'object' && !Array.isArray(argValue) &&
|
|
120
|
+
Object.keys(argValue).length > 2) {
|
|
121
|
+
continue;
|
|
122
|
+
}
|
|
123
|
+
}
|
|
26
124
|
if (edge.transform && Object.keys(edge.transform.fieldMap).length > 0) {
|
|
27
125
|
// This edge has a transform — the reshape activity was inserted before this step.
|
|
28
126
|
// Wire from the transform activity's output (which uses toField as the output key).
|
|
@@ -30,14 +128,16 @@ function wireStepInputs(stepIdx, step, plan, stepIndexToActivityId, triggerId, t
|
|
|
30
128
|
inputMappings[edge.toField] = `{${transformActId}.output.data.${edge.toField}}`;
|
|
31
129
|
}
|
|
32
130
|
else if (edge.fromStep === 'trigger') {
|
|
33
|
-
|
|
131
|
+
const rawRef = `{${triggerId}.output.data.${edge.fromField}}`;
|
|
132
|
+
inputMappings[edge.toField] = buildDerivationPipe(rawRef, edge.derivation);
|
|
34
133
|
}
|
|
35
134
|
else {
|
|
36
135
|
// Remap the source step from collapsed to core index
|
|
37
136
|
const remappedFrom = collapsedToCoreIndex?.get(edge.fromStep) ?? edge.fromStep;
|
|
38
137
|
const sourceActId = stepIndexToActivityId.get(remappedFrom);
|
|
39
138
|
if (sourceActId) {
|
|
40
|
-
|
|
139
|
+
const rawRef = `{${sourceActId}.output.data.${edge.fromField}}`;
|
|
140
|
+
inputMappings[edge.toField] = buildDerivationPipe(rawRef, edge.derivation);
|
|
41
141
|
}
|
|
42
142
|
}
|
|
43
143
|
}
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
* Consolidates all system/user prompt strings used by compile, validate,
|
|
5
5
|
* and extract stages so they can be reviewed and tuned in one place.
|
|
6
6
|
*/
|
|
7
|
-
export declare const COMPILATION_PROMPT = "You are a workflow compiler. You analyze MCP tool execution traces and produce a COMPILATION PLAN \u2014 a complete specification for building a deterministic YAML DAG workflow.\n\nGiven:\n1. The user's ORIGINAL PROMPT \u2014 the single most important signal for understanding intent\n2. EXECUTION STEPS \u2014 tool calls with arguments, result structure samples, and server IDs\n3. PATTERN ANNOTATIONS \u2014 pre-detected iteration candidates from static analysis\n4. NAIVE INPUT CLASSIFICATION \u2014 initial argument classification\n\nYour job: produce a plan that makes the workflow truly reusable and deterministic.\n\n## Critical: Understand Intent\n\nThe original prompt describes what the user wanted. The execution trace shows HOW an LLM accomplished it, but may include exploratory detours. Your compilation captures INTENT, not a blind replay.\n\nFor example, if the prompt says \"login to site X and take screenshots of all pages\":\n- INTENT: login \u2192 discover pages \u2192 iterate and screenshot each one\n- Execution may have included probing steps \u2014 exclude those\n- Deterministic version: accept credentials \u2192 login \u2192 extract links \u2192 iterate taking screenshots\n\n## Critical: Preserve Discovery Steps\n\nMany workflows follow a \"discover then act\" pattern: one step DISCOVERS data (e.g., extract navigation links, query a database, list files) and a later step ACTS on that data (e.g., screenshot each page, process each record, transform each file).\n\n**NEVER collapse discovery + action into a single step with the discovered data as a user input.** If the execution trace shows:\n1. Step A: extract_content \u2192 produces `links: [{text, href}, ...]`\n2. Step B: capture_pages(pages=[...array built from step A's links...])\n\nThe compiled workflow MUST keep BOTH steps: A produces the array, B consumes it. Do NOT make the array a trigger input \u2014 it was runtime-discovered, not user-provided.\n\n**How to detect**: If a step's argument contains a large array of items that closely mirrors a prior step's output array (same URLs, same items, possibly reshaped), that array was DERIVED from the prior step. Keep both steps and wire them with a data_flow edge (with a transform if formats differ).\n\n## Rules\n\n### Step Dispositions\n- **core**: Directly serves the workflow intent. Produces data consumed by later steps. **Discovery steps that produce arrays consumed by later action steps are ALWAYS core.**\n- **exploratory**: Probing/debugging/discovery steps that don't produce data needed by the workflow. Exclude these:\n - Checking if compiled workflows exist (list_workflows, list_yaml_workflows)\n - Listing files to see what exists (list_files, read_file)\n - Initial tool calls that failed and were retried with different parameters\n - Any step whose result is not consumed by a subsequent core step\n - **NEVER mark a discovery step as exploratory if its output was used to build arguments for a later step**\n\n### Signal Steps (Human-in-the-Loop)\nSteps with kind \"signal\" represent a durable pause where the workflow waits for human input (e.g., credentials, approval). These are ALWAYS core \u2014 they are essential to the workflow's data flow. The signal step receives data from a human and makes it available to subsequent steps. Do NOT mark signal steps as exploratory. The escalation tool call (escalate_and_wait) that precedes a signal step is also always core.\n\n**Signal data flow**: The signal step result contains the human response fields (e.g., password). These MUST be wired via data_flow edges to every downstream step that needs them. Add a data_flow edge from the signal step index to the consuming step with the matching field name.\n\n**Credentials from signals**: When the signal provides a credential (format: password in the schema), downstream tools that need it should receive it as a separate named input argument. The runtime exchanges ephemeral credential tokens automatically. For tools with complex stored arguments (like run_script steps arrays), wire the credential as a top-level argument name \u2014 the runtime merges it with stored defaults.\n\n### Iteration Specifications\nWhen the execution shows repeated tool calls with varying arguments (the pattern detector may have already collapsed these):\n- Identify the SOURCE: which prior step's result contains the array being iterated. This is the step that PRODUCED the list of items \u2014 look for a step whose result contains an array field with items matching the iteration's varying values. For example, if the iteration visits multiple URLs, find the step that returned those URLs (e.g., extract_content with links).\n- The source is NEVER a step that doesn't have the array in its output. Double-check: does the source step's resultKeys include the source_field?\n- Specify the source_field: the dot-path to the array (e.g., \"links\", \"results.pages\")\n- List varying_keys (change per item) vs constant_args (shared)\n- **KEY MAPPINGS are critical**: array items often use different key names than the tool expects.\n E.g., extract_content returns `links: [{text, href}, ...]` but the screenshot tool wants `url`.\n Map: `{ \"url\": \"href\" }` \u2014 tool arg name \u2192 array item key name.\n Use null for keys that are COMPUTED at runtime, not sourced from the array.\n For example, screenshot_path is often derived from the link text or URL \u2014 it's not a field in the source array directly:\n `{ \"screenshot_path\": null }` \u2014 the value must be computed or provided by the trigger.\n\n### Tool Simplification for Iterations (CRITICAL)\nThe iteration pattern works by extracting individual values from array items and passing them as simple key=value arguments to the iterated tool. This means:\n\n**The iterated tool MUST accept simple, flat arguments** (url, path, page_id \u2014 not complex nested structures like a `steps` array).\n\nIf the execution used a complex multi-step scripting tool (e.g., `run_script` with a `steps: [{action, url}, {action, path}]` array) for each iteration, you MUST replace it with a simpler tool from the same server that accepts flat arguments. Check the server's tool inventory for a simpler alternative.\n\nFor example:\n- `run_script(steps=[navigate, wait, screenshot])` per page \u2192 replace with `capture_page(url, path, full_page, wait_ms)` (1 call, flat args)\n- `run_script(steps=[navigate, fill, click])` per item \u2192 replace with `submit_form(url, fields)` (1 call, flat args)\n\nWhen replacing: use the same server_id but the simpler tool_name. The varying_keys and key_mappings should map directly to the simple tool's argument names.\n\n**If no simpler tool alternative exists**, use a data_flow edge with a transform to feed the array into a batch/composite tool that accepts the full array (like `capture_authenticated_pages` which takes a `pages` array).\n\n### Data Flow Graph\nSpecify directed edges showing how data flows between steps:\n- from_step: \"trigger\" (user input) or step index number\n- from_field: the output field name (or trigger input key)\n- to_step: the consuming step index\n- to_field: the argument key name\n- is_session_wire: true for session handles (page_id, _handle, session_id)\n\nSession handles are critical \u2014 they maintain authenticated browser sessions, database connections, etc. They must be threaded from their producer through ALL subsequent steps that need them.\n\n### Data Flow Transforms (CRITICAL for array reshaping)\nWhen a source step produces an array of objects in one format but the consuming step expects a DIFFERENT format, add a `transform` to the data_flow edge. Compare the source step's result structure with the consuming step's actual arguments from the trace.\n\n**Choosing the correct source field**: When a step produces multiple output fields, check the result sample to determine which field actually contains an ARRAY OF OBJECTS suitable for iteration/reshaping. Prefer structured array fields over raw/unstructured fields. Check the Tool-Specific Compilation Hints section (if present) for guidance on which fields to use for specific tools.\n\nFor example: extract_content returns `links: [{text, href}]` but capture tool expects `pages: [{url, screenshot_path, wait_ms, full_page}]`.\nAdd a transform with:\n- `field_map`: maps target keys \u2192 source keys (e.g., `{\"url\": \"href\"}`). Use null for keys not in the source.\n- `defaults`: static values to inject (e.g., `{\"wait_ms\": 3000, \"full_page\": true}`)\n- `derivations`: for computed keys (null in field_map), how to derive them from source data\n - strategy: \"slugify\" (lowercase, replace spaces/special with hyphens), \"prefix\", \"template\"\n - source_key: which source field to derive from\n - prefix/suffix/template: string manipulation params\n\nExample edge with transform:\n```\n{\n \"from_step\": 1, \"from_field\": \"links\", \"to_step\": 2, \"to_field\": \"pages\",\n \"is_session_wire\": false,\n \"transform\": {\n \"field_map\": { \"url\": \"href\", \"screenshot_path\": null },\n \"defaults\": { \"wait_ms\": 3000, \"full_page\": true },\n \"derivations\": {\n \"screenshot_path\": {\n \"source_key\": \"href\",\n \"strategy\": \"slugify\",\n \"prefix\": \"screenshots/\",\n \"suffix\": \".png\"\n }\n }\n }\n}\n```\n\nIMPORTANT: Check EVERY array-typed data_flow edge. Compare the source step's result item keys with the consuming step's argument item keys. If they differ, add a transform. Look at the actual tool_arguments in the execution trace to determine the correct field_map, defaults, and derivations.\n\n### Input Classification\n- **dynamic**: Simple values callers MUST provide: URLs, credentials, file paths, queries, search terms. These are always scalar strings, numbers, or booleans \u2014 NEVER complex objects or arrays.\n- **fixed**: Implementation details with sensible defaults: selectors, timeouts, boolean flags, AND complex structured arguments like `steps` arrays, `login` objects, or `pages` arrays. These are baked into stored tool_arguments.\n\n**Complex tool arguments (arrays of objects, nested structures) are ALWAYS fixed.** They represent the implementation recipe, not user input. For example:\n- A `steps` array describing browser actions (navigate, fill, click, screenshot) \u2192 **fixed**\n- A `login` object with selectors and credentials \u2192 flatten the credentials (username, password) as dynamic, but the selectors as fixed\n- A `pages` array of URLs to capture \u2192 **fixed** if hardcoded from the trace, or a data_flow edge if discovered at runtime\n\nFlatten nested objects containing dynamic values. E.g., `login: {url, username, password}` \u2192 separate `login_url`, `username`, `password` fields. But NEVER expose the full nested object or array as a trigger input.\n\n**Arrays that were DISCOVERED at runtime (by a prior step) are NOT inputs.** They flow between steps via data_flow edges. Only make an array a trigger input if the user explicitly provided it in their prompt. If the array was produced by a discovery step (extract_content, query, list), keep the discovery step as core and wire its output to the consuming step.\n\n### Data Flow Wiring Precision\n- **Only wire inputs that semantically match.** A directory name (e.g., `screenshot_dir = \"screenshots\"`) must NOT be wired to a file path argument (e.g., `screenshot_path` which expects `\"screenshots/home.png\"`). If a tool argument needs a specific file path but the trigger only provides a directory, leave that argument unwired \u2014 the stored tool_arguments default will provide the correct value.\n- **Trigger inputs should map to the EXACT tool argument they represent.** Don't reuse a trigger input for a different-purpose argument just because the names are vaguely related.\n- **When in doubt, don't wire.** An unwired argument falls back to the stored tool_arguments default from the original execution \u2014 this is always correct. An incorrectly wired argument overrides the correct default with a wrong value.\n\n### Session Fields and Threading Rules\nList all fields that represent session tokens/handles that must flow through the DAG (e.g., page_id, _handle, session_id).\n\n**Critical**: When a login/setup step produces a page_id or _handle, ALL subsequent browser/page steps must receive that session wire \u2014 including steps inside iterations. The data_flow graph must include session wire edges from the producing step to EVERY downstream step that operates on the same session, not just the immediately next one. For iterations: wire the session from the setup step directly to the iteration body step.\n\n**COMPLETENESS REQUIREMENT**: For EACH step that uses a session field (check the step's argumentKeys \u2014 if it includes page_id, _handle, or session_id), you MUST emit a data_flow edge wiring that field from its producer. If step 0 produces _handle and steps 1, 2, and 3 all use it, you need THREE edges: 0\u21921, 0\u21922, 0\u21923. Do NOT assume downstream steps will \"inherit\" session fields \u2014 each consumer needs an explicit edge.\n\n### Data Flow Completeness Check\nBefore finalizing the plan, verify:\n1. Every step that has a session field in its argumentKeys has a corresponding is_session_wire edge\n2. Every step that consumes data from a prior step has a data_flow edge for that field\n3. Every dynamic trigger input is wired to at least one step via a data_flow edge from \"trigger\"\n4. Transform edges include the source field AND the consuming step can access all fields it needs\n\n## Output Format\n\nReturn a JSON object (no markdown fences):\n{\n \"intent\": \"Brief generic description of what this workflow does\",\n \"description\": \"Suggested workflow description for discovery\",\n \"steps\": [\n { \"index\": 0, \"purpose\": \"Navigate to the target site\", \"disposition\": \"core\" },\n { \"index\": 1, \"purpose\": \"Extract navigation links from the page\", \"disposition\": \"core\" },\n { \"index\": 2, \"purpose\": \"List files to check directory structure\", \"disposition\": \"exploratory\" }\n ],\n \"core_step_indices\": [0, 1, 3],\n \"inputs\": [\n { \"key\": \"base_url\", \"type\": \"string\", \"classification\": \"dynamic\", \"description\": \"The base URL of the site\" },\n { \"key\": \"username\", \"type\": \"string\", \"classification\": \"dynamic\", \"description\": \"Login username\" },\n { \"key\": \"timeout\", \"type\": \"number\", \"classification\": \"fixed\", \"description\": \"Page load timeout\", \"default\": 30000 }\n ],\n \"iterations\": [\n {\n \"body_step_index\": 3,\n \"tool_name\": \"screenshot\",\n \"server_id\": \"playwright\",\n \"source_step_index\": 1,\n \"source_field\": \"links\",\n \"varying_keys\": [\"url\", \"screenshot_path\"],\n \"constant_args\": { \"full_page\": true },\n \"key_mappings\": { \"url\": \"href\", \"screenshot_path\": null }\n }\n ],\n \"data_flow\": [\n { \"from_step\": \"trigger\", \"from_field\": \"base_url\", \"to_step\": 0, \"to_field\": \"url\", \"is_session_wire\": false },\n { \"from_step\": 0, \"from_field\": \"page_id\", \"to_step\": 1, \"to_field\": \"page_id\", \"is_session_wire\": true },\n { \"from_step\": 0, \"from_field\": \"_handle\", \"to_step\": 1, \"to_field\": \"_handle\", \"is_session_wire\": true }\n ],\n \"session_fields\": [\"page_id\", \"_handle\"]\n}";
|
|
7
|
+
export declare const COMPILATION_PROMPT = "You are a workflow compiler. You analyze MCP tool execution traces and produce a COMPILATION PLAN \u2014 a complete specification for building a deterministic YAML DAG workflow.\n\nGiven:\n1. The user's ORIGINAL PROMPT \u2014 the single most important signal for understanding intent\n2. EXECUTION STEPS \u2014 tool calls with arguments, result structure samples, and server IDs\n3. PATTERN ANNOTATIONS \u2014 pre-detected iteration candidates from static analysis\n4. NAIVE INPUT CLASSIFICATION \u2014 initial argument classification\n\nYour job: produce a plan that makes the workflow truly reusable and deterministic.\n\n## Critical: Understand Intent\n\nThe original prompt describes what the user wanted. The execution trace shows HOW an LLM accomplished it, but may include exploratory detours. Your compilation captures INTENT, not a blind replay.\n\nFor example, if the prompt says \"login to site X and take screenshots of all pages\":\n- INTENT: login \u2192 discover pages \u2192 iterate and screenshot each one\n- Execution may have included probing steps \u2014 exclude those\n- Deterministic version: accept credentials \u2192 login \u2192 extract links \u2192 iterate taking screenshots\n\n## Critical: Preserve Discovery Steps\n\nMany workflows follow a \"discover then act\" pattern: one step DISCOVERS data (e.g., extract navigation links, query a database, list files) and a later step ACTS on that data (e.g., screenshot each page, process each record, transform each file).\n\n**NEVER collapse discovery + action into a single step with the discovered data as a user input.** If the execution trace shows:\n1. Step A: extract_content \u2192 produces `links: [{text, href}, ...]`\n2. Step B: capture_pages(pages=[...array built from step A's links...])\n\nThe compiled workflow MUST keep BOTH steps: A produces the array, B consumes it. Do NOT make the array a trigger input \u2014 it was runtime-discovered, not user-provided.\n\n**How to detect**: If a step's argument contains a large array of items that closely mirrors a prior step's output array (same URLs, same items, possibly reshaped), that array was DERIVED from the prior step. Keep both steps and wire them with a data_flow edge (with a transform if formats differ).\n\n## Rules\n\n### Step Dispositions\n- **core**: Directly serves the workflow intent. Produces data consumed by later steps. **Discovery steps that produce arrays consumed by later action steps are ALWAYS core.**\n- **exploratory**: Probing/debugging/discovery steps that don't produce data needed by the workflow. Exclude these:\n - Checking if compiled workflows exist (list_workflows, list_yaml_workflows)\n - Listing files to see what exists (list_files, read_file)\n - Initial tool calls that failed and were retried with different parameters\n - Any step whose result is not consumed by a subsequent core step\n - **NEVER mark a discovery step as exploratory if its output was used to build arguments for a later step**\n\n### Signal Steps (Human-in-the-Loop)\nSteps with kind \"signal\" represent a durable pause where the workflow waits for human input (e.g., credentials, approval). These are ALWAYS core \u2014 they are essential to the workflow's data flow. The signal step receives data from a human and makes it available to subsequent steps. Do NOT mark signal steps as exploratory. The escalation tool call (escalate_and_wait) that precedes a signal step is also always core.\n\n**Signal data flow**: The signal step result contains the human response fields (e.g., password). These MUST be wired via data_flow edges to every downstream step that needs them. Add a data_flow edge from the signal step index to the consuming step with the matching field name.\n\n**Credentials from signals**: When the signal provides a credential (format: password in the schema), downstream tools that need it should receive it as a separate named input argument. The runtime exchanges ephemeral credential tokens automatically. For tools with complex stored arguments (like run_script steps arrays), wire the credential as a top-level argument name \u2014 the runtime merges it with stored defaults.\n\n### Iteration Specifications\nWhen the execution shows repeated tool calls with varying arguments (the pattern detector may have already collapsed these):\n- Identify the SOURCE: which prior step's result contains the array being iterated. This is the step that PRODUCED the list of items \u2014 look for a step whose result contains an array field with items matching the iteration's varying values. For example, if the iteration visits multiple URLs, find the step that returned those URLs (e.g., extract_content with links).\n- The source is NEVER a step that doesn't have the array in its output. Double-check: does the source step's resultKeys include the source_field?\n- Specify the source_field: the dot-path to the array (e.g., \"links\", \"results.pages\")\n- List varying_keys (change per item) vs constant_args (shared)\n- **KEY MAPPINGS are critical**: array items often use different key names than the tool expects.\n E.g., extract_content returns `links: [{text, href}, ...]` but the screenshot tool wants `url`.\n Map: `{ \"url\": \"href\" }` \u2014 tool arg name \u2192 array item key name.\n Use null for keys that are COMPUTED at runtime, not sourced from the array.\n For example, screenshot_path is often derived from the link text or URL \u2014 it's not a field in the source array directly:\n `{ \"screenshot_path\": null }` \u2014 the value must be computed or provided by the trigger.\n\n### Tool Simplification for Iterations (CRITICAL)\nThe iteration pattern works by extracting individual values from array items and passing them as simple key=value arguments to the iterated tool. This means:\n\n**The iterated tool MUST accept simple, flat arguments** (url, path, page_id \u2014 not complex nested structures like a `steps` array).\n\nIf the execution used a complex multi-step scripting tool (e.g., `run_script` with a `steps: [{action, url}, {action, path}]` array) for each iteration, you MUST replace it with a simpler tool from the same server that accepts flat arguments. Check the server's tool inventory for a simpler alternative.\n\nFor example:\n- `run_script(steps=[navigate, wait, screenshot])` per page \u2192 replace with `capture_page(url, path, full_page, wait_ms)` (1 call, flat args)\n- `run_script(steps=[navigate, fill, click])` per item \u2192 replace with `submit_form(url, fields)` (1 call, flat args)\n\nWhen replacing: use the same server_id but the simpler tool_name. The varying_keys and key_mappings should map directly to the simple tool's argument names.\n\n**If no simpler tool alternative exists**, use a data_flow edge with a transform to feed the array into a batch/composite tool that accepts the full array (like `capture_authenticated_pages` which takes a `pages` array).\n\n### Data Flow Graph\nSpecify directed edges showing how data flows between steps:\n- from_step: \"trigger\" (user input) or step index number\n- from_field: the output field name (or trigger input key)\n- to_step: the consuming step index\n- to_field: the argument key name\n- is_session_wire: true for session handles (page_id, _handle, session_id)\n\nSession handles are critical \u2014 they maintain authenticated browser sessions, database connections, etc. They must be threaded from their producer through ALL subsequent steps that need them.\n\n### Chain Analysis to Downstream Steps\nWhen a step produces a meaningful result (analysis, extraction, description, computed value) and a later step consumes related data (saving, storing, forwarding, reporting), there MUST be a data_flow edge connecting them. Match by semantic intent, not just field name:\n- Step produces `description` \u2192 downstream step takes `value` \u2192 edge with from_field: \"description\", to_field: \"value\"\n- Step produces `analysis.summary` \u2192 downstream step takes `content` \u2192 edge with appropriate field mapping\n\nIf the original execution trace shows that a step's output was used (even indirectly) as input to a later step, the compiled version must preserve that data chain. A broken chain means the downstream step receives no data \u2014 the worst possible compilation error.\n\n### Data Flow Transforms (CRITICAL for array reshaping)\nWhen a source step produces an array of objects in one format but the consuming step expects a DIFFERENT format, add a `transform` to the data_flow edge. Compare the source step's result structure with the consuming step's actual arguments from the trace.\n\n**Choosing the correct source field**: When a step produces multiple output fields, check the result sample to determine which field actually contains an ARRAY OF OBJECTS suitable for iteration/reshaping. Prefer structured array fields over raw/unstructured fields. Check the Tool-Specific Compilation Hints section (if present) for guidance on which fields to use for specific tools.\n\nFor example: extract_content returns `links: [{text, href}]` but capture tool expects `pages: [{url, screenshot_path, wait_ms, full_page}]`.\nAdd a transform with:\n- `field_map`: maps target keys \u2192 source keys (e.g., `{\"url\": \"href\"}`). Use null for keys not in the source.\n- `defaults`: static values to inject (e.g., `{\"wait_ms\": 3000, \"full_page\": true}`)\n- `derivations`: for computed keys (null in field_map), how to derive them from source data\n - strategy: \"slugify\" (lowercase, replace spaces/special with hyphens), \"prefix\", \"template\", \"concat\"\n - source_key: which source field to derive from\n - prefix/suffix/template: string manipulation params\n\nAvailable derivation strategies:\n- **slugify**: Lowercase, replace spaces/special chars with hyphens. Optionally add prefix/suffix.\n- **prefix**: Prepend a static string.\n- **template**: Format string with `{value}` (source field) and `{date}` (today's date as YYYY-MM-DD via @date.yyyymmdd).\n- **concat**: Join multiple parts. Each part can use `{value}` and `{date}` placeholders.\n Example: `{ \"strategy\": \"concat\", \"parts\": [\"{value}\", \"-\", \"{date}\"] }` produces `my-slug-2026-04-17`.\n- **passthrough**: No transformation.\n\nUse these when the workflow needs runtime-computed values (date-stamped filenames, slugified URLs, templated paths).\n\nExample edge with transform:\n```\n{\n \"from_step\": 1, \"from_field\": \"links\", \"to_step\": 2, \"to_field\": \"pages\",\n \"is_session_wire\": false,\n \"transform\": {\n \"field_map\": { \"url\": \"href\", \"screenshot_path\": null },\n \"defaults\": { \"wait_ms\": 3000, \"full_page\": true },\n \"derivations\": {\n \"screenshot_path\": {\n \"source_key\": \"href\",\n \"strategy\": \"slugify\",\n \"prefix\": \"screenshots/\",\n \"suffix\": \".png\"\n }\n }\n }\n}\n```\n\nIMPORTANT: Check EVERY array-typed data_flow edge. Compare the source step's result item keys with the consuming step's argument item keys. If they differ, add a transform. Look at the actual tool_arguments in the execution trace to determine the correct field_map, defaults, and derivations.\n\n### Scalar Derivations on Data Flow Edges\nWhen a scalar value needs runtime transformation before reaching its consumer, add a `derivation` to the data_flow edge (NOT a `transform` \u2014 transforms are for array reshaping). Derivations generate runtime expressions for string manipulation.\n\nCommon use case: the user's prompt mentions a pattern like \"save with key slug-{date}\" or \"name it {something}-{today's date}\". The trigger provides the base value, and the derivation appends or formats it at runtime.\n\nAdd a `derivation` field to the data_flow edge:\n```\n{\n \"from_step\": \"trigger\", \"from_field\": \"key\", \"to_step\": 3, \"to_field\": \"key\",\n \"is_session_wire\": false,\n \"derivation\": { \"strategy\": \"concat\", \"parts\": [\"{value}\", \"-\", \"{date}\"] }\n}\n```\n\nThis produces a runtime-computed key like `my-slug-2026-04-17`. The `{value}` placeholder is the source field value; `{date}` is today's ISO date (YYYY-MM-DD).\n\nUse derivations when:\n- The user wants date-stamped keys, filenames, or identifiers\n- A value needs a prefix/suffix added at runtime\n- Two values need to be concatenated\n\n### Input Classification\n- **dynamic**: Simple values callers MUST provide: URLs, credentials, file paths, queries, search terms. These are always scalar strings, numbers, or booleans \u2014 NEVER complex objects or arrays.\n- **fixed**: Implementation details with sensible defaults: selectors, timeouts, boolean flags, AND complex structured arguments like `steps` arrays, `login` objects, or `pages` arrays. These are baked into stored tool_arguments.\n\n**Complex tool arguments (arrays of objects, nested structures) are ALWAYS fixed.** They represent the implementation recipe, not user input. For example:\n- A `steps` array describing browser actions (navigate, fill, click, screenshot) \u2192 **fixed**\n- A `login` object with selectors and credentials \u2192 flatten the credentials (username, password) as dynamic, but the selectors as fixed\n- A `pages` array of URLs to capture \u2192 **fixed** if hardcoded from the trace, or a data_flow edge if discovered at runtime\n\nFlatten nested objects containing dynamic values. E.g., `login: {url, username, password}` \u2192 separate `login_url`, `username`, `password` fields. But NEVER expose the full nested object or array as a trigger input.\n\n**Arrays that were DISCOVERED at runtime (by a prior step) are NOT inputs.** They flow between steps via data_flow edges. Only make an array a trigger input if the user explicitly provided it in their prompt. If the array was produced by a discovery step (extract_content, query, list), keep the discovery step as core and wire its output to the consuming step.\n\n### Prompt-Mentioned Values Are Dynamic\nIf a scalar value (URL, domain name, file path, key name, slug, query string) appears verbatim or closely paraphrased in the user's original prompt, classify it as **dynamic**. The user explicitly chose that value for this execution and will want to change it next time. Only classify a prompt-mentioned value as fixed if it is unambiguously an implementation constant (a CSS selector, a timeout, a boolean flag).\n\nThis is the most common compilation error: treating the user's specific request values as universal defaults. When in doubt, make it dynamic.\n\n### Data Flow Wiring Precision\n- **Only wire inputs that semantically match.** A directory name (e.g., `screenshot_dir = \"screenshots\"`) must NOT be wired to a file path argument (e.g., `screenshot_path` which expects `\"screenshots/home.png\"`). If a tool argument needs a specific file path but the trigger only provides a directory, leave that argument unwired \u2014 the stored tool_arguments default will provide the correct value.\n- **Trigger inputs should map to the EXACT tool argument they represent.** Don't reuse a trigger input for a different-purpose argument just because the names are vaguely related.\n- **When in doubt, don't wire.** An unwired argument falls back to the stored tool_arguments default from the original execution \u2014 this is always correct. An incorrectly wired argument overrides the correct default with a wrong value.\n\n### Session Fields and Threading Rules\nList all fields that represent session tokens/handles that must flow through the DAG (e.g., page_id, _handle, session_id).\n\n**Critical**: When a login/setup step produces a page_id or _handle, ALL subsequent browser/page steps must receive that session wire \u2014 including steps inside iterations. The data_flow graph must include session wire edges from the producing step to EVERY downstream step that operates on the same session, not just the immediately next one. For iterations: wire the session from the setup step directly to the iteration body step.\n\n**COMPLETENESS REQUIREMENT**: For EACH step that uses a session field (check the step's argumentKeys \u2014 if it includes page_id, _handle, or session_id), you MUST emit a data_flow edge wiring that field from its producer. If step 0 produces _handle and steps 1, 2, and 3 all use it, you need THREE edges: 0\u21921, 0\u21922, 0\u21923. Do NOT assume downstream steps will \"inherit\" session fields \u2014 each consumer needs an explicit edge.\n\n### Data Flow Completeness Check\nBefore finalizing the plan, verify:\n1. Every step that has a session field in its argumentKeys has a corresponding is_session_wire edge\n2. Every step that consumes data from a prior step has a data_flow edge for that field\n3. Every dynamic trigger input is wired to at least one step via a data_flow edge from \"trigger\"\n4. Transform edges include the source field AND the consuming step can access all fields it needs\n\n## Output Format\n\nReturn a JSON object (no markdown fences):\n{\n \"intent\": \"Brief generic description of what this workflow does\",\n \"description\": \"Suggested workflow description for discovery\",\n \"steps\": [\n { \"index\": 0, \"purpose\": \"Navigate to the target site\", \"disposition\": \"core\" },\n { \"index\": 1, \"purpose\": \"Extract navigation links from the page\", \"disposition\": \"core\" },\n { \"index\": 2, \"purpose\": \"List files to check directory structure\", \"disposition\": \"exploratory\" }\n ],\n \"core_step_indices\": [0, 1, 3],\n \"inputs\": [\n { \"key\": \"base_url\", \"type\": \"string\", \"classification\": \"dynamic\", \"description\": \"The base URL of the site\" },\n { \"key\": \"username\", \"type\": \"string\", \"classification\": \"dynamic\", \"description\": \"Login username\" },\n { \"key\": \"timeout\", \"type\": \"number\", \"classification\": \"fixed\", \"description\": \"Page load timeout\", \"default\": 30000 }\n ],\n \"iterations\": [\n {\n \"body_step_index\": 3,\n \"tool_name\": \"screenshot\",\n \"server_id\": \"playwright\",\n \"source_step_index\": 1,\n \"source_field\": \"links\",\n \"varying_keys\": [\"url\", \"screenshot_path\"],\n \"constant_args\": { \"full_page\": true },\n \"key_mappings\": { \"url\": \"href\", \"screenshot_path\": null }\n }\n ],\n \"data_flow\": [\n { \"from_step\": \"trigger\", \"from_field\": \"base_url\", \"to_step\": 0, \"to_field\": \"url\", \"is_session_wire\": false },\n { \"from_step\": 0, \"from_field\": \"page_id\", \"to_step\": 1, \"to_field\": \"page_id\", \"is_session_wire\": true },\n { \"from_step\": 0, \"from_field\": \"_handle\", \"to_step\": 1, \"to_field\": \"_handle\", \"is_session_wire\": true }\n ],\n \"session_fields\": [\"page_id\", \"_handle\"]\n}";
|
|
8
8
|
/**
|
|
9
9
|
* Build the retry hint injected into the compile stage when
|
|
10
10
|
* user feedback or a prior deployment error triggers recompilation.
|