windmill-components 1.687.0 → 1.695.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (264) hide show
  1. package/package/components/ArgInput.svelte +2 -0
  2. package/package/components/AutoscalingConfigEditor.svelte +18 -4
  3. package/package/components/CompareWorkspaces.svelte +206 -157
  4. package/package/components/DatatableSchemaDiff.svelte +2 -2
  5. package/package/components/Dev.svelte +401 -85
  6. package/package/components/EditableSchemaForm.svelte +4 -0
  7. package/package/components/ErrorOrRecoveryHandler.svelte +2 -2
  8. package/package/components/FlowPreviewContent.svelte +32 -30
  9. package/package/components/FlowRestartButton.svelte +143 -61
  10. package/package/components/FlowRestartButton.svelte.d.ts +37 -0
  11. package/package/components/FlowStatusViewer.svelte +15 -1
  12. package/package/components/FlowStatusViewer.svelte.d.ts +10 -2
  13. package/package/components/FlowStatusViewerInner.svelte +1 -2
  14. package/package/components/FlowStatusViewerInner.svelte.d.ts +6 -2
  15. package/package/components/ForkConflictModal.svelte +57 -0
  16. package/package/components/ForkConflictModal.svelte.d.ts +3 -0
  17. package/package/components/GitRepoViewer.svelte +251 -97
  18. package/package/components/InputTransformSchemaForm.svelte +1 -1
  19. package/package/components/InstanceSettings.svelte +36 -16
  20. package/package/components/Login.svelte +113 -28
  21. package/package/components/Login.svelte.d.ts +1 -0
  22. package/package/components/Path.svelte +7 -1
  23. package/package/components/Path.svelte.d.ts +1 -1
  24. package/package/components/RunsPage.svelte +2 -1
  25. package/package/components/S3FilePickerInner.svelte +89 -89
  26. package/package/components/ScriptEditor.svelte +18 -5
  27. package/package/components/ShareModal.svelte.d.ts +1 -1
  28. package/package/components/apps/components/helpers/RunnableComponent.svelte.d.ts +3 -0
  29. package/package/components/apps/components/helpers/executeRunnable.js +2 -1
  30. package/package/components/apps/editor/AppReportsDrawerInner.svelte +1 -1
  31. package/package/components/apps/editor/appPolicy.js +2 -1
  32. package/package/components/apps/editor/commonAppUtils.d.ts +3 -0
  33. package/package/components/apps/editor/inlineScriptsPanel/CacheTtlPopup.svelte +1 -1
  34. package/package/components/apps/editor/inlineScriptsPanel/InlineScriptEditor.svelte +7 -0
  35. package/package/components/apps/editor/inlineScriptsPanel/TagPopup.svelte +49 -0
  36. package/package/components/apps/editor/inlineScriptsPanel/TagPopup.svelte.d.ts +9 -0
  37. package/package/components/apps/inputType.d.ts +1 -0
  38. package/package/components/apps/sharedTypes.d.ts +1 -0
  39. package/package/components/auditLogs/AuditLogsFilters.svelte +8 -3
  40. package/package/components/common/fileUpload/S3ArgInput.svelte +12 -10
  41. package/package/components/common/fileUpload/S3ArgInput.svelte.d.ts +2 -0
  42. package/package/components/copilot/chat/AIChatDisplay.svelte +5 -36
  43. package/package/components/copilot/chat/AIChatInput.svelte +56 -47
  44. package/package/components/copilot/chat/AIChatManager.svelte.js +48 -46
  45. package/package/components/copilot/chat/ContextElementBadge.svelte +6 -4
  46. package/package/components/copilot/chat/app/core.d.ts +12 -20
  47. package/package/components/copilot/chat/app/core.js +103 -160
  48. package/package/components/copilot/chat/app/core.test.js +234 -9
  49. package/package/components/copilot/chat/context.js +44 -0
  50. package/package/components/copilot/chat/flow/FlowAIChat.svelte +5 -3
  51. package/package/components/copilot/chat/flow/core.d.ts +2 -1
  52. package/package/components/copilot/chat/flow/core.js +48 -21
  53. package/package/components/copilot/chat/flow/helperUtils.d.ts +5 -2
  54. package/package/components/copilot/chat/flow/helperUtils.js +33 -1
  55. package/package/components/copilot/chat/flow/helperUtils.test.js +116 -1
  56. package/package/components/copilot/chat/flow/openFlow.json +1 -1
  57. package/package/components/copilot/chat/flow/openFlowZod.gen.js +24 -0
  58. package/package/components/copilot/chat/script/core.js +3 -0
  59. package/package/components/copilot/chat/shared.d.ts +6 -0
  60. package/package/components/copilot/chat/shared.js +22 -1
  61. package/package/components/copilot/chat/shared.test.d.ts +1 -0
  62. package/package/components/copilot/chat/shared.test.js +412 -0
  63. package/package/components/copilot/chat/workspaceTools.d.ts +7 -0
  64. package/package/components/copilot/chat/workspaceTools.js +239 -0
  65. package/package/components/copilot/chat/workspaceToolsZod.gen.d.ts +1295 -0
  66. package/package/components/copilot/chat/workspaceToolsZod.gen.js +424 -0
  67. package/package/components/copilot/lib.js +3 -1
  68. package/package/components/copilot/lib.test.d.ts +1 -0
  69. package/package/components/copilot/lib.test.js +19 -0
  70. package/package/components/copilot/modelConfig.d.ts +3 -0
  71. package/package/components/copilot/modelConfig.js +10 -0
  72. package/package/components/flows/FlowProgressBar.svelte +5 -2
  73. package/package/components/flows/content/FlowModuleComponent.svelte +636 -599
  74. package/package/components/flows/conversations/FlowChatManager.svelte.js +21 -10
  75. package/package/components/flows/flowStateUtils.svelte.js +5 -1
  76. package/package/components/flows/map/FlowModuleSchemaMap.svelte +3 -2
  77. package/package/components/flows/map/FlowModuleSchemaMap.svelte.d.ts +1 -0
  78. package/package/components/git_sync/GitSyncContext.svelte.js +0 -2
  79. package/package/components/graph/FlowGraphV2.svelte +7 -3
  80. package/package/components/graph/FlowGraphV2.svelte.d.ts +1 -0
  81. package/package/components/graph/renderers/triggers/TriggersBadge.svelte +3 -0
  82. package/package/components/home/deploy_ui.js +1 -1
  83. package/package/components/icons/AzureIcon.svelte +12 -25
  84. package/package/components/icons/AzureIcon.svelte.d.ts +3 -2
  85. package/package/components/instanceSettings.js +24 -0
  86. package/package/components/mcp/McpScopeSelector.svelte +119 -9
  87. package/package/components/mcp/McpScopeSelector.svelte.d.ts +1 -0
  88. package/package/components/offboarding-utils.js +2 -0
  89. package/package/components/progressBar/ProgressBar.svelte +9 -5
  90. package/package/components/progressBar/ProgressBar.svelte.d.ts +1 -0
  91. package/package/components/raw_apps/DeleteAfterUsePopup.svelte +52 -0
  92. package/package/components/raw_apps/DeleteAfterUsePopup.svelte.d.ts +9 -0
  93. package/package/components/raw_apps/RawAppBackgroundRunner.svelte +5 -1
  94. package/package/components/raw_apps/RawAppEditor.svelte +159 -102
  95. package/package/components/raw_apps/RawAppInlineScriptEditor.svelte +9 -3
  96. package/package/components/raw_apps/RawAppInlineScriptEditor.svelte.d.ts +2 -1
  97. package/package/components/raw_apps/RawAppInlineScriptRunnable.svelte +1 -0
  98. package/package/components/raw_apps/RawAppInlineScriptRunnable.svelte.d.ts +1 -0
  99. package/package/components/raw_apps/RawAppInputsSpecEditor.svelte +48 -5
  100. package/package/components/raw_apps/RawAppSharedUiDrawer.svelte +129 -0
  101. package/package/components/raw_apps/RawAppSharedUiDrawer.svelte.d.ts +5 -0
  102. package/package/components/raw_apps/RawAppSidebar.svelte +12 -0
  103. package/package/components/raw_apps/dataTableRefUtils.d.ts +7 -0
  104. package/package/components/raw_apps/dataTableRefUtils.js +34 -0
  105. package/package/components/raw_apps/dataTableRefUtils.test.d.ts +1 -0
  106. package/package/components/raw_apps/dataTableRefUtils.test.js +29 -0
  107. package/package/components/raw_apps/rawAppPolicy.d.ts +1 -0
  108. package/package/components/raw_apps/rawAppPolicy.js +17 -2
  109. package/package/components/resources/resourceTypesFilter.d.ts +19 -0
  110. package/package/components/resources/resourceTypesFilter.js +21 -0
  111. package/package/components/restartFromStepPath.d.ts +39 -0
  112. package/package/components/restartFromStepPath.js +89 -0
  113. package/package/components/runs/JobDetailFieldConfig.d.ts +1 -0
  114. package/package/components/runs/JobDetailFieldConfig.js +57 -10
  115. package/package/components/runs/JobDetailHeader.svelte +24 -3
  116. package/package/components/runs/runsFilter.d.ts +1 -1
  117. package/package/components/schema/FlowPropertyEditor.svelte +30 -1
  118. package/package/components/schema/FlowPropertyEditor.svelte.d.ts +5 -2
  119. package/package/components/search/GlobalSearchModal.svelte +8 -1
  120. package/package/components/select/Select.svelte +1 -1
  121. package/package/components/settings/CreateToken.svelte +48 -77
  122. package/package/components/settings/EditTokenScopesModal.svelte +57 -0
  123. package/package/components/settings/EditTokenScopesModal.svelte.d.ts +10 -0
  124. package/package/components/settings/ScopesPicker.svelte +43 -0
  125. package/package/components/settings/ScopesPicker.svelte.d.ts +11 -0
  126. package/package/components/settings/TokensTable.svelte +51 -15
  127. package/package/components/sidebar/OperatorMenu.svelte +6 -0
  128. package/package/components/sidebar/SidebarContent.svelte +11 -1
  129. package/package/components/triggers/AddTriggersButton.svelte +6 -0
  130. package/package/components/triggers/CaptureWrapper.svelte +19 -1
  131. package/package/components/triggers/TriggerEditorToolbar.svelte.d.ts +1 -1
  132. package/package/components/triggers/TriggerModeToggle.svelte +36 -7
  133. package/package/components/triggers/TriggerModeToggle.svelte.d.ts +1 -1
  134. package/package/components/triggers/TriggerSuspendedJobsModal.svelte.d.ts +1 -1
  135. package/package/components/triggers/TriggersEditor.svelte +5 -1
  136. package/package/components/triggers/TriggersWrapper.svelte +10 -0
  137. package/package/components/triggers/azure/AzureCapture.svelte +41 -0
  138. package/package/components/triggers/azure/AzureCapture.svelte.d.ts +44 -0
  139. package/package/components/triggers/azure/AzureTriggerEditor.svelte +20 -0
  140. package/package/components/triggers/azure/AzureTriggerEditor.svelte.d.ts +9 -0
  141. package/package/components/triggers/azure/AzureTriggerEditorConfigSection.svelte +301 -0
  142. package/package/components/triggers/azure/AzureTriggerEditorConfigSection.svelte.d.ts +16 -0
  143. package/package/components/triggers/azure/AzureTriggerEditorInner.svelte +422 -0
  144. package/package/components/triggers/azure/AzureTriggerEditorInner.svelte.d.ts +25 -0
  145. package/package/components/triggers/azure/AzureTriggerPanel.svelte +55 -0
  146. package/package/components/triggers/azure/AzureTriggerPanel.svelte.d.ts +10 -0
  147. package/{dist/sharedUtils/components/triggers/kafka → package/components/triggers/azure}/utils.d.ts +1 -1
  148. package/package/components/triggers/azure/utils.js +56 -0
  149. package/package/components/triggers/email/EmailTriggerEditorInner.svelte +2 -0
  150. package/package/components/triggers/gcp/GcpTriggerEditorInner.svelte +9 -3
  151. package/package/components/triggers/http/RouteEditorInner.svelte +2 -0
  152. package/package/components/triggers/kafka/KafkaTriggerEditorInner.svelte +9 -3
  153. package/package/components/triggers/mqtt/MqttTriggerEditorInner.svelte +9 -3
  154. package/package/components/triggers/nats/NatsTriggerEditorInner.svelte +9 -3
  155. package/package/components/triggers/postgres/PostgresTriggerEditorInner.svelte +9 -3
  156. package/package/components/triggers/schedules/ScheduleEditorInner.svelte +9 -3
  157. package/package/components/triggers/sqs/SqsTriggerEditorInner.svelte +9 -3
  158. package/package/components/triggers/triggers.svelte.d.ts +1 -0
  159. package/package/components/triggers/triggers.svelte.js +23 -1
  160. package/package/components/triggers/utils.js +20 -0
  161. package/package/components/triggers/websocket/WebsocketTriggerEditorInner.svelte +9 -3
  162. package/package/components/triggers.d.ts +1 -1
  163. package/package/components/useNestedRestartState.svelte.d.ts +56 -0
  164. package/package/components/useNestedRestartState.svelte.js +320 -0
  165. package/package/components/workspaceSettings/SharedUiSettings.svelte +175 -0
  166. package/package/components/workspaceSettings/SharedUiSettings.svelte.d.ts +3 -0
  167. package/package/gen/core/OpenAPI.js +1 -1
  168. package/package/gen/schemas.gen.d.ts +294 -24
  169. package/package/gen/schemas.gen.js +297 -25
  170. package/package/gen/services.gen.d.ts +247 -4
  171. package/package/gen/services.gen.js +498 -7
  172. package/package/gen/types.gen.d.ts +990 -37
  173. package/package/hubPaths.json +2 -5
  174. package/package/infer.d.ts +1 -1
  175. package/package/infer.js +37 -51
  176. package/package/mcpEndpointTools.js +60 -4
  177. package/package/script_helpers.js +17 -0
  178. package/package/stores.d.ts +7 -0
  179. package/package/stores.js +6 -1
  180. package/package/system_prompts/index.d.ts +1 -0
  181. package/package/system_prompts/index.js +8 -0
  182. package/package/system_prompts/prompts.d.ts +16 -13
  183. package/package/system_prompts/prompts.js +653 -43
  184. package/package/templates/ci_test_bun.ts.template +8 -0
  185. package/package/templates/ci_test_python.py.template +8 -0
  186. package/package/utils/forkConflict.d.ts +26 -0
  187. package/package/utils/forkConflict.js +56 -0
  188. package/package/utils_deployable.d.ts +164 -121
  189. package/package/utils_deployable.js +61 -11
  190. package/package/utils_workspace_deploy.js +3 -1
  191. package/package.json +29 -5
  192. package/dist/sharedUtils/assets/tokens/colorTokensConfig.d.ts +0 -2
  193. package/dist/sharedUtils/base.d.ts +0 -1
  194. package/dist/sharedUtils/cloud.d.ts +0 -1
  195. package/dist/sharedUtils/common.d.ts +0 -111
  196. package/dist/sharedUtils/components/apps/components/display/dbtable/queries/count.d.ts +0 -5
  197. package/dist/sharedUtils/components/apps/components/display/dbtable/queries/delete.d.ts +0 -5
  198. package/dist/sharedUtils/components/apps/components/display/dbtable/queries/insert.d.ts +0 -5
  199. package/dist/sharedUtils/components/apps/components/display/dbtable/queries/select.d.ts +0 -13
  200. package/dist/sharedUtils/components/apps/components/display/dbtable/queries/update.d.ts +0 -11
  201. package/dist/sharedUtils/components/apps/components/display/dbtable/utils.d.ts +0 -95
  202. package/dist/sharedUtils/components/apps/editor/appPolicy.d.ts +0 -6
  203. package/dist/sharedUtils/components/apps/editor/appUtilsCore.d.ts +0 -7
  204. package/dist/sharedUtils/components/apps/editor/appUtilsS3.d.ts +0 -33
  205. package/dist/sharedUtils/components/apps/editor/commonAppUtils.d.ts +0 -10
  206. package/dist/sharedUtils/components/apps/editor/component/components.d.ts +0 -5371
  207. package/dist/sharedUtils/components/apps/editor/component/default-codes.d.ts +0 -3
  208. package/dist/sharedUtils/components/apps/editor/component/index.d.ts +0 -3
  209. package/dist/sharedUtils/components/apps/editor/component/sets.d.ts +0 -7
  210. package/dist/sharedUtils/components/apps/editor/componentsPanel/componentDefaultProps.d.ts +0 -3
  211. package/dist/sharedUtils/components/apps/gridUtils.d.ts +0 -14
  212. package/dist/sharedUtils/components/apps/inputType.d.ts +0 -178
  213. package/dist/sharedUtils/components/apps/rx.d.ts +0 -29
  214. package/dist/sharedUtils/components/apps/sharedTypes.d.ts +0 -21
  215. package/dist/sharedUtils/components/apps/types.d.ts +0 -274
  216. package/dist/sharedUtils/components/assets/lib.d.ts +0 -25
  217. package/dist/sharedUtils/components/common/alert/model.d.ts +0 -2
  218. package/dist/sharedUtils/components/common/badge/model.d.ts +0 -8
  219. package/dist/sharedUtils/components/common/button/model.d.ts +0 -45
  220. package/dist/sharedUtils/components/common/fileInput/model.d.ts +0 -1
  221. package/dist/sharedUtils/components/common/index.d.ts +0 -24
  222. package/dist/sharedUtils/components/common/skeleton/model.d.ts +0 -21
  223. package/dist/sharedUtils/components/dbTypes.d.ts +0 -14
  224. package/dist/sharedUtils/components/diff_drawer.d.ts +0 -26
  225. package/dist/sharedUtils/components/ducklake.d.ts +0 -1
  226. package/dist/sharedUtils/components/flows/scheduleUtils.d.ts +0 -7
  227. package/dist/sharedUtils/components/icons/index.d.ts +0 -101
  228. package/dist/sharedUtils/components/random_positive_adjetive.d.ts +0 -1
  229. package/dist/sharedUtils/components/raw_apps/rawAppPolicy.d.ts +0 -10
  230. package/dist/sharedUtils/components/raw_apps/utils.d.ts +0 -15
  231. package/dist/sharedUtils/components/triggers/email/utils.d.ts +0 -4
  232. package/dist/sharedUtils/components/triggers/gcp/utils.d.ts +0 -2
  233. package/dist/sharedUtils/components/triggers/http/utils.d.ts +0 -11
  234. package/dist/sharedUtils/components/triggers/mqtt/utils.d.ts +0 -2
  235. package/dist/sharedUtils/components/triggers/nats/utils.d.ts +0 -2
  236. package/dist/sharedUtils/components/triggers/postgres/utils.d.ts +0 -8
  237. package/dist/sharedUtils/components/triggers/sqs/utils.d.ts +0 -2
  238. package/dist/sharedUtils/components/triggers/triggers.svelte.d.ts +0 -32
  239. package/dist/sharedUtils/components/triggers/utils.d.ts +0 -80
  240. package/dist/sharedUtils/components/triggers/websocket/utils.d.ts +0 -2
  241. package/dist/sharedUtils/components/triggers.d.ts +0 -20
  242. package/dist/sharedUtils/gen/core/ApiError.d.ts +0 -10
  243. package/dist/sharedUtils/gen/core/ApiRequestOptions.d.ts +0 -13
  244. package/dist/sharedUtils/gen/core/ApiResult.d.ts +0 -7
  245. package/dist/sharedUtils/gen/core/CancelablePromise.d.ts +0 -26
  246. package/dist/sharedUtils/gen/core/OpenAPI.d.ts +0 -27
  247. package/dist/sharedUtils/gen/core/request.d.ts +0 -29
  248. package/dist/sharedUtils/gen/index.d.ts +0 -6
  249. package/dist/sharedUtils/gen/schemas.gen.d.ts +0 -7036
  250. package/dist/sharedUtils/gen/services.gen.d.ts +0 -6047
  251. package/dist/sharedUtils/gen/types.gen.d.ts +0 -21881
  252. package/dist/sharedUtils/history.svelte.d.ts +0 -9
  253. package/dist/sharedUtils/hub.d.ts +0 -49
  254. package/dist/sharedUtils/jsr.json +0 -6
  255. package/dist/sharedUtils/lib.d.ts +0 -5
  256. package/dist/sharedUtils/lib.es.js +0 -1588
  257. package/dist/sharedUtils/package.json +0 -12
  258. package/dist/sharedUtils/schema.d.ts +0 -3
  259. package/dist/sharedUtils/stores.d.ts +0 -97
  260. package/dist/sharedUtils/svelte5Utils.svelte.d.ts +0 -80
  261. package/dist/sharedUtils/toast.d.ts +0 -8
  262. package/dist/sharedUtils/utils.d.ts +0 -265
  263. package/package/components/copilot/chat/flow/openFlowZod.js +0 -24
  264. /package/package/components/copilot/chat/flow/{openFlowZod.d.ts → openFlowZod.gen.d.ts} +0 -0
@@ -1,29 +1,32 @@
1
1
  export declare const SCRIPT_BASE = "# Windmill Script Writing Guide\n\n## General Principles\n\n- Scripts must export a main function (do not call it)\n- Libraries are installed automatically - do not show installation instructions\n- Credentials and configuration are stored in resources and passed as parameters\n- The windmill client (`wmill`) provides APIs for interacting with the platform\n\n## Function Naming\n\n- Main function: `main` (or `preprocessor` for preprocessor scripts)\n- Must be async for TypeScript variants\n\n## Return Values\n\n- Scripts can return any JSON-serializable value\n- Return values become available to subsequent flow steps via `results.step_id`\n\n## Preprocessor Scripts\n\nPreprocessor scripts process raw trigger data from various sources (webhook, custom HTTP route, SQS, WebSocket, Kafka, NATS, MQTT, Postgres, or email) before passing it to the flow. This separates the trigger logic from the flow logic and keeps the auto-generated UI clean.\n\nThe returned object determines the parameter values passed to the flow.\ne.g., `{ b: 1, a: 2 }` calls the flow with `a = 2` and `b = 1`, assuming the flow has two inputs called `a` and `b`.\n\nThe preprocessor receives a single parameter called `event`.\n";
2
- export declare const FLOW_BASE = "# Windmill Flow Building Guide\n\n## CLI Commands\n\nCreate a folder ending with `__flow` and add a `flow.yaml` file with the flow definition.\nFor rawscript modules, use `!inline path/to/script.ts` for the content key. Inline script files should NOT include `.inline_script.` in their names (e.g. use `a.ts`, not `a.inline_script.ts`).\nAfter writing, tell the user they can run:\n- `wmill generate-metadata` - Generate lock files for the flow you modified\n- `wmill sync push` - Deploy to Windmill\n\nDo NOT run these commands yourself. Instead, inform the user that they should run them.\n\n## OpenFlow Schema\n\nThe OpenFlow schema (openflow.openapi.yaml) is the source of truth for flow structure. Refer to OPENFLOW_SCHEMA for the complete type definitions.\n\n## Reserved Module IDs\n\n- `failure` - Reserved for failure handler module\n- `preprocessor` - Reserved for preprocessor module\n- `Input` - Reserved for flow input reference\n\n## Hard Structural Rules\n\nThese are strict Windmill schema rules. Follow them exactly.\n\n- `value.modules` is only for normal sequential steps\n- `value.preprocessor_module` and `value.failure_module` are special top-level fields inside `value`, not entries in `value.modules`\n- If a flow needs a preprocessor, create `value.preprocessor_module` with `id: preprocessor`\n- If a flow needs a failure handler, create `value.failure_module` with `id: failure`\n- Do NOT create regular modules inside `value.modules` named `preprocessor` or `failure`\n- `preprocessor_module` and `failure_module` only support `script` or `rawscript`\n- `preprocessor_module` runs before normal modules and cannot reference `results.*`\n- `failure_module` can use the `error` object with `error.message`, `error.step_id`, `error.name`, and `error.stack`\n\nCorrect shape:\n\n```yaml\nvalue:\n preprocessor_module:\n id: preprocessor\n value:\n type: rawscript\n ...\n failure_module:\n id: failure\n value:\n type: rawscript\n ...\n modules:\n - id: process_event\n value:\n type: rawscript\n ...\n```\n\nIncorrect shape:\n\n```yaml\nvalue:\n modules:\n - id: preprocessor\n ...\n - id: process_event\n ...\n - id: failure\n ...\n```\n\n## Module ID Rules\n\n- Must be unique across the entire flow\n- Use underscores, not spaces (e.g., `fetch_data` not `fetch data`)\n- Use descriptive names that reflect the step's purpose\n\n## Common Mistakes to Avoid\n\n- Missing `input_transforms` - Rawscript parameters won't receive values without them\n- Referencing future steps - `results.step_id` only works for steps that execute before the current one\n- Duplicate module IDs - Each module ID must be unique in the flow\n\n## Data Flow Between Steps\n\n- `flow_input.property` - Access flow input parameters\n- `results.step_id` - Access output from a previous step only when that step result is in scope\n- `results.step_id.property` - Access specific property from a previous step output only when that step result is in scope\n- `flow_input.iter.value` - Current iteration value when inside a loop (`forloopflow` or `whileloopflow`)\n- `flow_input.iter.index` - Current loop index when inside a loop (`forloopflow` or `whileloopflow`)\n\n## Loop Structure Rules\n\n- For `whileloopflow`, use module-level `stop_after_if` on the loop module itself when the loop should stop after an iteration result\n- Do NOT put `stop_after_if` inside `value` of a `whileloopflow`\n- `stop_after_all_iters_if` is for checks after the whole loop finishes, not the normal per-iteration break condition\n- When a `whileloopflow` carries state forward between iterations, use `flow_input.iter.value` as the current loop value and provide an explicit first-iteration fallback when needed\n- Use `flow_input.iter.index` only when the loop logic is truly based on the iteration index, not as a replacement for the current loop value\n- If the user asks for a final scalar/object after a loop, add a normal step after the loop that extracts the final value from the loop result instead of returning the whole loop result array\n\nCorrect `whileloopflow` shape:\n\n```yaml\n- id: loop_until_done\n stop_after_if:\n expr: result.done === true\n skip_if_stopped: false\n value:\n type: whileloopflow\n skip_failures: false\n modules:\n - id: advance_state\n value:\n type: rawscript\n input_transforms:\n state:\n type: javascript\n expr: flow_input.iter && flow_input.iter.value !== undefined ? flow_input.iter.value : flow_input.initial_state\n- id: return_final_state\n value:\n type: rawscript\n input_transforms:\n final_state:\n type: javascript\n expr: results.loop_until_done[results.loop_until_done.length - 1]\n```\n\nIncorrect `whileloopflow` patterns:\n\n```yaml\n- id: loop_until_done\n value:\n type: whileloopflow\n stop_after_if:\n expr: result.done === true\n```\n\n```yaml\ninput_transforms:\n state:\n type: javascript\n expr: flow_input.iter.index\n```\n\n```yaml\ninput_transforms:\n final_state:\n type: javascript\n expr: results.loop_until_done\n```\n\n## Approval / Suspend Structure\n\n- `suspend` belongs on the flow module object itself, as a sibling of `id` and `value`\n- Never put `suspend` inside `value`\n\nCorrect shape:\n\n```yaml\n- id: request_approval\n suspend:\n required_events: 1\n resume_form:\n schema:\n type: object\n properties:\n comment:\n type: string\n required: [comment]\n value:\n type: identity\n```\n\nIncorrect shape:\n\n```yaml\n- id: request_approval\n value:\n type: rawscript\n suspend:\n required_events: 1\n```\n\n## Branch Result Scope Rules\n\n- Inside a branch, you may reference earlier outer steps and earlier steps in the same branch\n- Outside a `branchone`, do NOT reference ids of steps that only exist inside its branches or default branch. Use `results.<branchone_module_id>` instead\n- Outside a `branchall`, do NOT reference ids of steps inside its branches. Use `results.<branchall_module_id>` instead\n- If downstream steps need a stable shape after a branch, make each branch return the same fields\n- When needed, add a normalization step immediately after the branch and consume `results.<branch_module_id>` there\n\nCorrect after `branchone`:\n\n```yaml\n- id: route_order\n value:\n type: branchone\n ...\n- id: send_confirmation\n value:\n input_transforms:\n routed:\n type: javascript\n expr: results.route_order\n```\n\nIncorrect after `branchone`:\n\n```yaml\nexpr: results.create_shipment\nexpr: results.create_backorder\n```\n\nCorrect after `branchall`:\n\n```yaml\n- id: enrich_parallel\n value:\n type: branchall\n parallel: true\n ...\n- id: combine_data\n value:\n input_transforms:\n enrichments:\n type: javascript\n expr: results.enrich_parallel\n```\n\n## Input Transforms\n\nEvery rawscript module needs `input_transforms` to map function parameters to values:\n\nStatic transform (fixed value):\n{\"param_name\": {\"type\": \"static\", \"value\": \"fixed_string\"}}\n\nJavaScript transform (dynamic expression):\n{\"param_name\": {\"type\": \"javascript\", \"expr\": \"results.previous_step.data\"}}\n\n## Resource References\n\n- For flow inputs: Use type `\"object\"` with format `\"resource-{type}\"` (e.g., `\"resource-postgresql\"`)\n- For step inputs: Use static value `\"$res:path/to/resource\"`\n\n## Final Structural Self-Check\n\nBefore finalizing a flow, verify:\n\n- any preprocessor is in `value.preprocessor_module`\n- any failure handler is in `value.failure_module`\n- any approval step has module-level `suspend`\n- no downstream step references inner branch step ids from outside the branch\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations.\n\nTo accept an S3 object as flow input:\n\n```json\n{\n \"type\": \"object\",\n \"properties\": {\n \"file\": {\n \"type\": \"object\",\n \"format\": \"resource-s3_object\",\n \"description\": \"File to process\"\n }\n }\n}\n```\n\n## Using Resources in Flows\n\nOn Windmill, credentials and configuration are stored in resources. Resource types define the format of the resource.\n\n### As Flow Input\n\nIn the flow schema, set the property type to `\"object\"` with format `\"resource-{type}\"`:\n\n```json\n{\n \"type\": \"object\",\n \"properties\": {\n \"database\": {\n \"type\": \"object\",\n \"format\": \"resource-postgresql\",\n \"description\": \"Database connection\"\n }\n }\n}\n```\n\n### As Step Input (Static Reference)\n\nReference a specific resource using `$res:` prefix:\n\n```json\n{\n \"database\": {\n \"type\": \"static\",\n \"value\": \"$res:f/folder/my_database\"\n }\n}\n```\n";
2
+ export declare const FLOW_BASE = "# Windmill Flow Building Guide\n\n## OpenFlow Schema\n\nThe OpenFlow schema (openflow.openapi.yaml) is the source of truth for flow structure. Refer to OPENFLOW_SCHEMA for the complete type definitions.\n\n## Reserved Module IDs\n\n- `failure` - Reserved for failure handler module\n- `preprocessor` - Reserved for preprocessor module\n- `Input` - Reserved for flow input reference\n\n## Hard Structural Rules\n\nThese are strict Windmill schema rules. Follow them exactly.\n\n- `value.modules` is only for normal sequential steps\n- `value.preprocessor_module` and `value.failure_module` are special top-level fields inside `value`, not entries in `value.modules`\n- If a flow needs a preprocessor, create `value.preprocessor_module` with `id: preprocessor`\n- If a flow needs a failure handler, create `value.failure_module` with `id: failure`\n- Do NOT create regular modules inside `value.modules` named `preprocessor` or `failure`\n- `preprocessor_module` and `failure_module` only support `script` or `rawscript`\n- `preprocessor_module` runs before normal modules and cannot reference `results.*`\n- `failure_module` can use the `error` object with `error.message`, `error.step_id`, `error.name`, and `error.stack`\n\nCorrect shape:\n\n```yaml\nvalue:\n preprocessor_module:\n id: preprocessor\n value:\n type: rawscript\n ...\n failure_module:\n id: failure\n value:\n type: rawscript\n ...\n modules:\n - id: process_event\n value:\n type: rawscript\n ...\n```\n\nIncorrect shape:\n\n```yaml\nvalue:\n modules:\n - id: preprocessor\n ...\n - id: process_event\n ...\n - id: failure\n ...\n```\n\n## Module ID Rules\n\n- Must be unique across the entire flow\n- Use underscores, not spaces (e.g., `fetch_data` not `fetch data`)\n- Use descriptive names that reflect the step's purpose\n\n## Common Mistakes to Avoid\n\n- Missing `input_transforms` - Rawscript parameters won't receive values without them\n- Referencing future steps - `results.step_id` only works for steps that execute before the current one\n- Duplicate module IDs - Each module ID must be unique in the flow\n\n## Data Flow Between Steps\n\n- `flow_input.property` - Access flow input parameters\n- `results.step_id` - Access output from a previous step only when that step result is in scope\n- `results.step_id.property` - Access specific property from a previous step output only when that step result is in scope\n- `flow_input.iter.value` - Current iteration value when inside a loop (`forloopflow` or `whileloopflow`)\n- `flow_input.iter.index` - Current loop index when inside a loop (`forloopflow` or `whileloopflow`)\n\n## Loop Structure Rules\n\n- For `whileloopflow`, use module-level `stop_after_if` on the loop module itself when the loop should stop after an iteration result\n- Do NOT put `stop_after_if` inside `value` of a `whileloopflow`\n- `stop_after_all_iters_if` is for checks after the whole loop finishes, not the normal per-iteration break condition\n- When a `whileloopflow` carries state forward between iterations, use `flow_input.iter.value` as the current loop value and provide an explicit first-iteration fallback when needed\n- Use `flow_input.iter.index` only when the loop logic is truly based on the iteration index, not as a replacement for the current loop value\n- If the user asks for a final scalar/object after a loop, add a normal step after the loop that extracts the final value from the loop result instead of returning the whole loop result array\n\nCorrect `whileloopflow` shape:\n\n```yaml\n- id: loop_until_done\n stop_after_if:\n expr: result.done === true\n skip_if_stopped: false\n value:\n type: whileloopflow\n skip_failures: false\n modules:\n - id: advance_state\n value:\n type: rawscript\n input_transforms:\n state:\n type: javascript\n expr: flow_input.iter && flow_input.iter.value !== undefined ? flow_input.iter.value : flow_input.initial_state\n- id: return_final_state\n value:\n type: rawscript\n input_transforms:\n final_state:\n type: javascript\n expr: results.loop_until_done[results.loop_until_done.length - 1]\n```\n\nIncorrect `whileloopflow` patterns:\n\n```yaml\n- id: loop_until_done\n value:\n type: whileloopflow\n stop_after_if:\n expr: result.done === true\n```\n\n```yaml\ninput_transforms:\n state:\n type: javascript\n expr: flow_input.iter.index\n```\n\n```yaml\ninput_transforms:\n final_state:\n type: javascript\n expr: results.loop_until_done\n```\n\n## Approval / Suspend Structure\n\n- `suspend` belongs on the flow module object itself, as a sibling of `id` and `value`\n- Never put `suspend` inside `value`\n\nCorrect shape:\n\n```yaml\n- id: request_approval\n suspend:\n required_events: 1\n resume_form:\n schema:\n type: object\n properties:\n comment:\n type: string\n required: [comment]\n value:\n type: identity\n```\n\nIncorrect shape:\n\n```yaml\n- id: request_approval\n value:\n type: rawscript\n suspend:\n required_events: 1\n```\n\n## Branch Result Scope Rules\n\n- Inside a branch, you may reference earlier outer steps and earlier steps in the same branch\n- Outside a `branchone`, do NOT reference ids of steps that only exist inside its branches or default branch. Use `results.<branchone_module_id>` instead\n- Outside a `branchall`, do NOT reference ids of steps inside its branches. Use `results.<branchall_module_id>` instead\n- If downstream steps need a stable shape after a branch, make each branch return the same fields\n- When needed, add a normalization step immediately after the branch and consume `results.<branch_module_id>` there\n\nCorrect after `branchone`:\n\n```yaml\n- id: route_order\n value:\n type: branchone\n ...\n- id: send_confirmation\n value:\n input_transforms:\n routed:\n type: javascript\n expr: results.route_order\n```\n\nIncorrect after `branchone`:\n\n```yaml\nexpr: results.create_shipment\nexpr: results.create_backorder\n```\n\nCorrect after `branchall`:\n\n```yaml\n- id: enrich_parallel\n value:\n type: branchall\n parallel: true\n ...\n- id: combine_data\n value:\n input_transforms:\n enrichments:\n type: javascript\n expr: results.enrich_parallel\n```\n\n## Input Transforms\n\nEvery rawscript module needs `input_transforms` to map function parameters to values:\n\nStatic transform (fixed value):\n{\"param_name\": {\"type\": \"static\", \"value\": \"fixed_string\"}}\n\nJavaScript transform (dynamic expression):\n{\"param_name\": {\"type\": \"javascript\", \"expr\": \"results.previous_step.data\"}}\n\n## Resource References\n\n- For flow inputs: Use type `\"object\"` with format `\"resource-{type}\"` (e.g., `\"resource-postgresql\"`)\n- For step inputs: Use static value `\"$res:path/to/resource\"`\n\n## Final Structural Self-Check\n\nBefore finalizing a flow, verify:\n\n- any preprocessor is in `value.preprocessor_module`\n- any failure handler is in `value.failure_module`\n- any approval step has module-level `suspend`\n- no downstream step references inner branch step ids from outside the branch\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations.\n\nTo accept an S3 object as flow input:\n\n```json\n{\n \"type\": \"object\",\n \"properties\": {\n \"file\": {\n \"type\": \"object\",\n \"format\": \"resource-s3_object\",\n \"description\": \"File to process\"\n }\n }\n}\n```\n\n## Using Resources in Flows\n\nOn Windmill, credentials and configuration are stored in resources. Resource types define the format of the resource.\n\n### As Flow Input\n\nIn the flow schema, set the property type to `\"object\"` with format `\"resource-{type}\"`:\n\n```json\n{\n \"type\": \"object\",\n \"properties\": {\n \"database\": {\n \"type\": \"object\",\n \"format\": \"resource-postgresql\",\n \"description\": \"Database connection\"\n }\n }\n}\n```\n\n### As Step Input (Static Reference)\n\nReference a specific resource using `$res:` prefix:\n\n```json\n{\n \"database\": {\n \"type\": \"static\",\n \"value\": \"$res:f/folder/my_database\"\n }\n}\n```\n";
3
+ export declare const WORKFLOW_AS_CODE_BASE = "# Windmill Workflow-as-Code Writing Guide\n\n## Scope\n\nUse this guide when writing or modifying Windmill Workflow-as-Code (WAC) scripts.\nWAC is authored as a Windmill script and deployed with the normal script workflow. It is not an OpenFlow YAML flow.\n\nSupported WAC authoring targets:\n- TypeScript scripts that import from `windmill-client`\n- Python 3 scripts that import from `wmill`\n\n## File Shape\n\nTypeScript:\n\n```typescript\nimport {\n task,\n taskScript,\n taskFlow,\n step,\n sleep,\n waitForApproval,\n getResumeUrls,\n parallel,\n workflow,\n} from \"windmill-client\";\n\nconst process = task(async (x: string): Promise<string> => {\n return `processed: ${x}`;\n});\n\nexport const main = workflow(async (x: string) => {\n const result = await process(x);\n return { result };\n});\n```\n\nPython:\n\n```python\nfrom wmill import task, task_script, task_flow, step, sleep, wait_for_approval, get_resume_urls, parallel, workflow\n\n@task()\nasync def process(x: str) -> str:\n return f\"processed: {x}\"\n\n@workflow\nasync def main(x: str):\n result = await process(x)\n return {\"result\": result}\n```\n\nRules:\n- Do not call `main`.\n- TypeScript should export the workflow entrypoint, preferably `export const main = workflow(async (...) => { ... })`.\n- Python must use `@workflow` on an async top-level function, usually `main`.\n- Define task functions and `taskScript`/`task_script` or `taskFlow`/`task_flow` assignments at module top level with stable names.\n- Use the exact SDK names. Do not alias `workflow`, `task`, `taskScript`, `taskFlow`, `step`, `sleep`, `waitForApproval`, `task_script`, `task_flow`, or `wait_for_approval`; the WAC parser recognizes these names directly.\n\n## Checkpoint And Replay Model\n\nThe parent workflow may rerun from the top after any suspension, retry, approval, or child task completion. Completed durable steps are replayed from the checkpoint.\n\nPut every side effect or non-deterministic value behind a durable WAC boundary:\n- Use `task()` / `@task()` for substantial work that should run as its own child job.\n- Use `taskScript()` / `task_script()` for an existing script or a relative module file.\n- Use `taskFlow()` / `task_flow()` for an existing Windmill flow.\n- Use `step(name, fn)` for lightweight inline work whose result must be checkpointed.\n- Use `sleep(seconds)` for server-side sleeps that do not hold a worker.\n- Use `waitForApproval()` / `wait_for_approval()` for external approval suspension.\n\nNever put API calls, database writes, notifications, random values, timestamps, or irreversible changes directly in the top-level workflow body. The workflow body can be rerun. Put those operations in a task or in `step()`.\n\nBranching on task or step results is safe because those results are checkpointed. Branching on current time, random data, environment reads, or external state is unsafe unless the value is first captured with `step()`.\n\n## Tasks\n\nUse `task()` / `@task()` for inline functions that become workflow steps:\n\n```typescript\nconst enrich = task(async (customerId: string) => {\n return await fetchCustomer(customerId);\n});\n```\n\n```python\n@task(timeout=600, tag=\"etl\")\nasync def enrich(customer_id: str):\n return await fetch_customer(customer_id)\n```\n\nIn TypeScript, prefer assigning each task to a named top-level const. In Python, prefer top-level async functions decorated with `@task()` or `@task`.\n\nFor existing scripts:\n\n```typescript\nconst helper = taskScript(\"./helper.ts\");\nconst existing = taskScript(\"f/data/extract\", { timeout: 600 });\nconst value = await helper({ input: x });\n```\n\n```python\nhelper = task_script(\"./helper.py\")\nexisting = task_script(\"f/data/extract\", timeout=600)\nvalue = await helper(input=x)\n```\n\nFor existing flows:\n\n```typescript\nconst pipeline = taskFlow(\"f/etl/pipeline\");\nconst output = await pipeline({ input: data });\n```\n\n```python\npipeline = task_flow(\"f/etl/pipeline\")\noutput = await pipeline(input=data)\n```\n\n## Inline Steps\n\nUse `step()` for lightweight inline values that must not change during replay:\n\n```typescript\nconst urls = await step(\"get_urls\", () => getResumeUrls());\nconst startedAt = await step(\"started_at\", () => new Date().toISOString());\n```\n\n```python\nurls = await step(\"get_urls\", lambda: get_resume_urls())\n```\n\nUse stable, descriptive step names. Do not generate step names dynamically.\n\n## Parallelism\n\nTo run independent work in parallel, start task promises/coroutines before awaiting them together:\n\n```typescript\nconst [a, b] = await Promise.all([process(\"a\"), process(\"b\")]);\nconst many = await parallel(items, process, { concurrency: 5 });\n```\n\n```python\nimport asyncio\n\na, b = await asyncio.gather(process(\"a\"), process(\"b\"))\nmany = await parallel(items, process, concurrency=5)\n```\n\nOnly parallelize independent steps. Do not read the result of a task before it is awaited.\n\n## Approvals\n\nGenerate resume URLs inside `step()` before sending them:\n\n```typescript\nconst urls = await step(\"get_urls\", () => getResumeUrls());\nawait step(\"notify\", () => sendApprovalEmail(urls.approvalPage));\nconst approval = await waitForApproval({ timeout: 3600 });\n```\n\n```python\nurls = await step(\"get_urls\", lambda: get_resume_urls())\nawait step(\"notify\", lambda: send_approval_email(urls[\"approvalPage\"]))\napproval = await wait_for_approval(timeout=3600)\n```\n\n`selfApproval: false` and `self_approval=False` are Enterprise-only approval behavior. Do not use them unless the user asks for that behavior.\n\n## Error Handling\n\nLet task errors fail the workflow unless the user asks for recovery logic.\n\nPython: `except Exception` is safe around WAC calls because internal suspension inherits from `BaseException`. Avoid bare `except:` in workflow code. If the user asks for recovery logic around failed child work, catch `TaskError` from `wmill` for task failures.\n\nTypeScript: avoid broad `try/catch` around WAC SDK calls. The SDK uses an internal suspension error during initial dispatch; catching it can break workflow suspension. If a broad catch is unavoidable, rethrow internal suspension errors before handling business errors.\n";
3
4
  export declare const FLOW_CHAT_SPECIAL_MODULES = "## Special Modules\n\n- Use `set_preprocessor_module` to add, replace, or remove the top-level `value.preprocessor_module`\n- Use `set_failure_module` to add, replace, or remove the top-level `value.failure_module`\n- Use `set_flow_json` only when you are replacing the whole flow, including normal modules and optional special modules\n\n**Example - Update only the special modules:**\n```javascript\nset_preprocessor_module({\n module: JSON.stringify({\n id: \"preprocessor\",\n value: {\n type: \"rawscript\",\n language: \"bun\",\n content: \"export async function preprocessor(payload: string) { const trimmed = payload.trim(); if (!trimmed) { throw new Error('payload must not be empty'); } return { payload: trimmed }; }\",\n input_transforms: {\n payload: { type: \"javascript\", expr: \"flow_input.payload\" }\n }\n }\n })\n})\n\nset_failure_module({\n module: JSON.stringify({\n id: \"failure\",\n value: {\n type: \"rawscript\",\n language: \"bun\",\n content: \"export async function main(message: string, name: string, step_id: string) { return { message, name, step_id }; }\",\n input_transforms: {\n message: { type: \"javascript\", expr: \"error.message\" },\n name: { type: \"javascript\", expr: \"error.name\" },\n step_id: { type: \"javascript\", expr: \"error.step_id\" }\n }\n }\n })\n})\n```\n";
4
5
  export declare const SDK_TYPESCRIPT = "# TypeScript SDK (windmill-client)\n\nImport: import * as wmill from 'windmill-client'\n\nworkerHasInternalServer(): boolean\n\n/**\n * Initialize the Windmill client with authentication token and base URL\n * @param token - Authentication token (defaults to WM_TOKEN env variable)\n * @param baseUrl - API base URL (defaults to BASE_INTERNAL_URL or BASE_URL env variable)\n */\nsetClient(token?: string, baseUrl?: string): void\n\n/**\n * Create a client configuration from env variables\n * @returns client configuration\n */\ngetWorkspace(): string\n\n/**\n * Get a resource value by path\n * @param path path of the resource, default to internal state path\n * @param undefinedIfEmpty if the resource does not exist, return undefined instead of throwing an error\n * @returns resource value\n */\nasync getResource(path?: string, undefinedIfEmpty?: boolean): Promise<any>\n\n/**\n * Get the true root job id\n * @param jobId job id to get the root job id from (default to current job)\n * @returns root job id\n */\nasync getRootJobId(jobId?: string): Promise<string>\n\n/**\n * @deprecated Use runScriptByPath or runScriptByHash instead\n */\nasync runScript(path: string | null = null, hash_: string | null = null, args: Record<string, any> | null = null, verbose: boolean = false): Promise<any>\n\n/**\n * Run a script synchronously by its path and wait for the result\n * @param path - Script path in Windmill\n * @param args - Arguments to pass to the script\n * @param verbose - Enable verbose logging\n * @returns Script execution result\n */\nasync runScriptByPath(path: string, args: Record<string, any> | null = null, verbose: boolean = false): Promise<any>\n\n/**\n * Run a script synchronously by its hash and wait for the result\n * @param hash_ - Script hash in Windmill\n * @param args - Arguments to pass to the script\n * @param verbose - Enable verbose logging\n * @returns Script execution result\n */\nasync runScriptByHash(hash_: string, args: Record<string, any> | null = null, verbose: boolean = false): Promise<any>\n\n/**\n * Append a text to the result stream\n * @param text text to append to the result stream\n */\nappendToResultStream(text: string): void\n\n/**\n * Stream to the result stream\n * @param stream stream to stream to the result stream\n */\nasync streamResult(stream: AsyncIterable<string>): Promise<void>\n\n/**\n * Run a flow synchronously by its path and wait for the result\n * @param path - Flow path in Windmill\n * @param args - Arguments to pass to the flow\n * @param verbose - Enable verbose logging\n * @returns Flow execution result\n */\nasync runFlow(path: string | null = null, args: Record<string, any> | null = null, verbose: boolean = false): Promise<any>\n\n/**\n * Wait for a job to complete and return its result\n * @param jobId - ID of the job to wait for\n * @param verbose - Enable verbose logging\n * @returns Job result when completed\n */\nasync waitJob(jobId: string, verbose: boolean = false): Promise<any>\n\n/**\n * Get the result of a completed job\n * @param jobId - ID of the completed job\n * @returns Job result\n */\nasync getResult(jobId: string): Promise<any>\n\n/**\n * Get the result of a job if completed, or its current status\n * @param jobId - ID of the job\n * @returns Object with started, completed, success, and result properties\n */\nasync getResultMaybe(jobId: string): Promise<any>\n\n/**\n * @deprecated Use runScriptByPathAsync or runScriptByHashAsync instead\n */\nasync runScriptAsync(path: string | null, hash_: string | null, args: Record<string, any> | null, scheduledInSeconds: number | null = null): Promise<string>\n\n/**\n * Run a script asynchronously by its path\n * @param path - Script path in Windmill\n * @param args - Arguments to pass to the script\n * @param scheduledInSeconds - Schedule execution for a future time (in seconds)\n * @returns Job ID of the created job\n */\nasync runScriptByPathAsync(path: string, args: Record<string, any> | null = null, scheduledInSeconds: number | null = null): Promise<string>\n\n/**\n * Run a script asynchronously by its hash\n * @param hash_ - Script hash in Windmill\n * @param args - Arguments to pass to the script\n * @param scheduledInSeconds - Schedule execution for a future time (in seconds)\n * @returns Job ID of the created job\n */\nasync runScriptByHashAsync(hash_: string, args: Record<string, any> | null = null, scheduledInSeconds: number | null = null): Promise<string>\n\n/**\n * Run a flow asynchronously by its path\n * @param path - Flow path in Windmill\n * @param args - Arguments to pass to the flow\n * @param scheduledInSeconds - Schedule execution for a future time (in seconds)\n * @param doNotTrackInParent - If false, tracks state in parent job (only use when fully awaiting the job)\n * @returns Job ID of the created job\n */\nasync runFlowAsync(path: string | null, args: Record<string, any> | null, scheduledInSeconds: number | null = null, // can only be set to false if this the job will be fully await and not concurrent with any other job // as otherwise the child flow and its own child will store their state in the parent job which will // lead to incorrectness and failures doNotTrackInParent: boolean = true): Promise<string>\n\n/**\n * Resolve a resource value in case the default value was picked because the input payload was undefined\n * @param obj resource value or path of the resource under the format `$res:path`\n * @returns resource value\n */\nasync resolveDefaultResource(obj: any): Promise<any>\n\n/**\n * Get the state file path from environment variables\n * @returns State path string\n */\ngetStatePath(): string\n\n/**\n * Set a resource value by path\n * @param path path of the resource to set, default to state path\n * @param value new value of the resource to set\n * @param initializeToTypeIfNotExist if the resource does not exist, initialize it with this type\n */\nasync setResource(value: any, path?: string, initializeToTypeIfNotExist?: string): Promise<void>\n\n/**\n * Set the state\n * @param state state to set\n * @deprecated use setState instead\n */\nasync setInternalState(state: any): Promise<void>\n\n/**\n * Set the state\n * @param state state to set\n * @param path Optional state resource path override. Defaults to `getStatePath()`.\n */\nasync setState(state: any, path?: string): Promise<void>\n\n/**\n * Set the progress\n * Progress cannot go back and limited to 0% to 99% range\n * @param percent Progress to set in %\n * @param jobId? Job to set progress for\n */\nasync setProgress(percent: number, jobId?: any): Promise<void>\n\n/**\n * Get the progress\n * @param jobId? Job to get progress from\n * @returns Optional clamped between 0 and 100 progress value\n */\nasync getProgress(jobId?: any): Promise<number | null>\n\n/**\n * Set a flow user state\n * @param key key of the state\n * @param value value of the state\n */\nasync setFlowUserState(key: string, value: any, errorIfNotPossible?: boolean): Promise<void>\n\n/**\n * Get a flow user state\n * @param path path of the variable\n */\nasync getFlowUserState(key: string, errorIfNotPossible?: boolean): Promise<any>\n\n/**\n * Get the internal state\n * @deprecated use getState instead\n */\nasync getInternalState(): Promise<any>\n\n/**\n * Get the state shared across executions\n * @param path Optional state resource path override. Defaults to `getStatePath()`.\n */\nasync getState(path?: string): Promise<any>\n\n/**\n * Get a variable by path\n * @param path path of the variable\n * @returns variable value\n */\nasync getVariable(path: string): Promise<string>\n\n/**\n * Set a variable by path, create if not exist\n * @param path path of the variable\n * @param value value of the variable\n * @param isSecretIfNotExist if the variable does not exist, create it as secret or not (default: false)\n * @param descriptionIfNotExist if the variable does not exist, create it with this description (default: \"\")\n */\nasync setVariable(path: string, value: string, isSecretIfNotExist?: boolean, descriptionIfNotExist?: string): Promise<void>\n\n/**\n * Build a PostgreSQL connection URL from a database resource\n * @param path - Path to the database resource\n * @returns PostgreSQL connection URL string\n */\nasync databaseUrlFromResource(path: string): Promise<string>\n\nasync polarsConnectionSettings(s3_resource_path: string | undefined): Promise<any>\n\nasync duckdbConnectionSettings(s3_resource_path: string | undefined): Promise<any>\n\n/**\n * Get S3 client settings from a resource or workspace default\n * @param s3_resource_path - Path to S3 resource (uses workspace default if undefined)\n * @returns S3 client configuration settings\n */\nasync denoS3LightClientSettings(s3_resource_path: string | undefined): Promise<DenoS3LightClientSettings>\n\n/**\n * Load the content of a file stored in S3. If the s3ResourcePath is undefined, it will default to the workspace S3 resource.\n * \n * ```typescript\n * let fileContent = await wmill.loadS3FileContent(inputFile)\n * // if the file is a raw text file, it can be decoded and printed directly:\n * const text = new TextDecoder().decode(fileContentStream)\n * console.log(text);\n * ```\n */\nasync loadS3File(s3object: S3Object, s3ResourcePath: string | undefined = undefined): Promise<Uint8Array | undefined>\n\n/**\n * Load the content of a file stored in S3 as a stream. If the s3ResourcePath is undefined, it will default to the workspace S3 resource.\n * \n * ```typescript\n * let fileContentBlob = await wmill.loadS3FileStream(inputFile)\n * // if the content is plain text, the blob can be read directly:\n * console.log(await fileContentBlob.text());\n * ```\n */\nasync loadS3FileStream(s3object: S3Object, s3ResourcePath: string | undefined = undefined): Promise<Blob | undefined>\n\n/**\n * Persist a file to the S3 bucket. If the s3ResourcePath is undefined, it will default to the workspace S3 resource.\n * \n * ```typescript\n * const s3object = await writeS3File(s3Object, \"Hello Windmill!\")\n * const fileContentAsUtf8Str = (await s3object.toArray()).toString('utf-8')\n * console.log(fileContentAsUtf8Str)\n * ```\n */\nasync writeS3File(s3object: S3Object | undefined, fileContent: string | Blob, s3ResourcePath: string | undefined = undefined, contentType: string | undefined = undefined, contentDisposition: string | undefined = undefined): Promise<S3Object>\n\n/**\n * Sign S3 objects to be used by anonymous users in public apps\n * @param s3objects s3 objects to sign\n * @returns signed s3 objects\n */\nasync signS3Objects(s3objects: S3Object[]): Promise<S3Object[]>\n\n/**\n * Sign S3 object to be used by anonymous users in public apps\n * @param s3object s3 object to sign\n * @returns signed s3 object\n */\nasync signS3Object(s3object: S3Object): Promise<S3Object>\n\n/**\n * Generate a presigned public URL for an array of S3 objects.\n * If an S3 object is not signed yet, it will be signed first.\n * @param s3Objects s3 objects to sign\n * @returns list of signed public URLs\n */\nasync getPresignedS3PublicUrls(s3Objects: S3Object[], { baseUrl }: { baseUrl?: string } = {}): Promise<string[]>\n\n/**\n * Generate a presigned public URL for an S3 object. If the S3 object is not signed yet, it will be signed first.\n * @param s3Object s3 object to sign\n * @returns signed public URL\n */\nasync getPresignedS3PublicUrl(s3Objects: S3Object, { baseUrl }: { baseUrl?: string } = {}): Promise<string>\n\n/**\n * Get URLs needed for resuming a flow after this step\n * @param approver approver name\n * @param flowLevel if true, generate resume URLs for the parent flow instead of the specific step.\n * This allows pre-approvals that can be consumed by any later suspend step in the same flow.\n * @returns approval page UI URL, resume and cancel API URLs for resuming the flow\n */\nasync getResumeUrls(approver?: string, flowLevel?: boolean): Promise<{\n approvalPage: string;\n resume: string;\n cancel: string;\n}>\n\n/**\n * @deprecated use getResumeUrls instead\n */\ngetResumeEndpoints(approver?: string): Promise<{\n approvalPage: string;\n resume: string;\n cancel: string;\n}>\n\n/**\n * Get an OIDC jwt token for auth to external services (e.g: Vault, AWS) (ee only)\n * @param audience audience of the token\n * @param expiresIn Optional number of seconds until the token expires\n * @returns jwt token\n */\nasync getIdToken(audience: string, expiresIn?: number): Promise<string>\n\n/**\n * Convert a base64-encoded string to Uint8Array\n * @param data - Base64-encoded string\n * @returns Decoded Uint8Array\n */\nbase64ToUint8Array(data: string): Uint8Array\n\n/**\n * Convert a Uint8Array to base64-encoded string\n * @param arrayBuffer - Uint8Array to encode\n * @returns Base64-encoded string\n */\nuint8ArrayToBase64(arrayBuffer: Uint8Array): string\n\n/**\n * Get email from workspace username\n * This method is particularly useful for apps that require the email address of the viewer.\n * Indeed, in the viewer context, WM_USERNAME is set to the username of the viewer but WM_EMAIL is set to the email of the creator of the app.\n * @param username\n * @returns email address\n */\nasync usernameToEmail(username: string): Promise<string>\n\n/**\n * Sends an interactive approval request via Slack, allowing optional customization of the message, approver, and form fields.\n * \n * **[Enterprise Edition Only]** To include form fields in the Slack approval request, go to **Advanced -> Suspend -> Form**\n * and define a form. Learn more at [Windmill Documentation](https://www.windmill.dev/docs/flows/flow_approval#form).\n * \n * @param {Object} options - The configuration options for the Slack approval request.\n * @param {string} options.slackResourcePath - The path to the Slack resource in Windmill.\n * @param {string} options.channelId - The Slack channel ID where the approval request will be sent.\n * @param {string} [options.message] - Optional custom message to include in the Slack approval request.\n * @param {string} [options.approver] - Optional user ID or name of the approver for the request.\n * @param {DefaultArgs} [options.defaultArgsJson] - Optional object defining or overriding the default arguments to a form field.\n * @param {Enums} [options.dynamicEnumsJson] - Optional object overriding the enum default values of an enum form field.\n * @param {string} [options.resumeButtonText] - Optional text for the resume button.\n * @param {string} [options.cancelButtonText] - Optional text for the cancel button.\n * \n * @returns {Promise<void>} Resolves when the Slack approval request is successfully sent.\n * \n * @throws {Error} If the function is not called within a flow or flow preview.\n * @throws {Error} If the `JobService.getSlackApprovalPayload` call fails.\n * \n * **Usage Example:**\n * ```typescript\n * await requestInteractiveSlackApproval({\n * slackResourcePath: \"/u/alex/my_slack_resource\",\n * channelId: \"admins-slack-channel\",\n * message: \"Please approve this request\",\n * approver: \"approver123\",\n * defaultArgsJson: { key1: \"value1\", key2: 42 },\n * dynamicEnumsJson: { foo: [\"choice1\", \"choice2\"], bar: [\"optionA\", \"optionB\"] },\n * resumeButtonText: \"Resume\",\n * cancelButtonText: \"Cancel\",\n * });\n * ```\n * \n * **Note:** This function requires execution within a Windmill flow or flow preview.\n */\nasync requestInteractiveSlackApproval({ slackResourcePath, channelId, message, approver, defaultArgsJson, dynamicEnumsJson, resumeButtonText, cancelButtonText, }: SlackApprovalOptions): Promise<void>\n\n/**\n * Sends an interactive approval request via Teams, allowing optional customization of the message, approver, and form fields.\n * \n * **[Enterprise Edition Only]** To include form fields in the Teams approval request, go to **Advanced -> Suspend -> Form**\n * and define a form. Learn more at [Windmill Documentation](https://www.windmill.dev/docs/flows/flow_approval#form).\n * \n * @param {Object} options - The configuration options for the Teams approval request.\n * @param {string} options.teamName - The Teams team name where the approval request will be sent.\n * @param {string} options.channelName - The Teams channel name where the approval request will be sent.\n * @param {string} [options.message] - Optional custom message to include in the Teams approval request.\n * @param {string} [options.approver] - Optional user ID or name of the approver for the request.\n * @param {DefaultArgs} [options.defaultArgsJson] - Optional object defining or overriding the default arguments to a form field.\n * @param {Enums} [options.dynamicEnumsJson] - Optional object overriding the enum default values of an enum form field.\n * \n * @returns {Promise<void>} Resolves when the Teams approval request is successfully sent.\n * \n * @throws {Error} If the function is not called within a flow or flow preview.\n * @throws {Error} If the `JobService.getTeamsApprovalPayload` call fails.\n * \n * **Usage Example:**\n * ```typescript\n * await requestInteractiveTeamsApproval({\n * teamName: \"admins-teams\",\n * channelName: \"admins-teams-channel\",\n * message: \"Please approve this request\",\n * approver: \"approver123\",\n * defaultArgsJson: { key1: \"value1\", key2: 42 },\n * dynamicEnumsJson: { foo: [\"choice1\", \"choice2\"], bar: [\"optionA\", \"optionB\"] },\n * });\n * ```\n * \n * **Note:** This function requires execution within a Windmill flow or flow preview.\n */\nasync requestInteractiveTeamsApproval({ teamName, channelName, message, approver, defaultArgsJson, dynamicEnumsJson, }: TeamsApprovalOptions): Promise<void>\n\n/**\n * Parse an S3 object from URI string or record format\n * @param s3Object - S3 object as URI string (s3://storage/key) or record\n * @returns S3 object record with storage and s3 key\n */\nparseS3Object(s3Object: S3Object): S3ObjectRecord\n\nsetWorkflowCtx(ctx: WorkflowCtx | null): void\n\nasync sleep(seconds: number): Promise<void>\n\nasync step<T>(name: string, fn: () => T | Promise<T>): Promise<T>\n\n/**\n * Create a task that dispatches to a separate Windmill script.\n * \n * @example\n * const extract = taskScript(\"f/data/extract\");\n * // inside workflow: await extract({ url: \"https://...\" })\n */\ntaskScript(path: string, options?: TaskOptions): (...args: any[]) => PromiseLike<any>\n\n/**\n * Create a task that dispatches to a separate Windmill flow.\n * \n * @example\n * const pipeline = taskFlow(\"f/etl/pipeline\");\n * // inside workflow: await pipeline({ input: data })\n */\ntaskFlow(path: string, options?: TaskOptions): (...args: any[]) => PromiseLike<any>\n\n/**\n * Mark an async function as a workflow-as-code entry point.\n * \n * The function must be **deterministic**: given the same inputs it must call\n * tasks in the same order on every replay. Branching on task results is fine\n * (results are replayed from checkpoint), but branching on external state\n * (current time, random values, external API calls) must use `step()` to\n * checkpoint the value so replays see the same result.\n */\nworkflow<T>(fn: (...args: any[]) => Promise<T>): void\n\n/**\n * Suspend the workflow and wait for an external approval.\n * \n * Use `getResumeUrls()` (wrapped in `step()`) to obtain resume/cancel/approvalPage\n * URLs before calling this function.\n * \n * @example\n * const urls = await step(\"urls\", () => getResumeUrls());\n * await step(\"notify\", () => sendEmail(urls.approvalPage));\n * const { value, approver } = await waitForApproval({ timeout: 3600 });\n */\nwaitForApproval(options?: { timeout?: number; form?: object; selfApproval?: boolean; }): PromiseLike<{ value: any; approver: string; approved: boolean }>\n\n/**\n * Process items in parallel with optional concurrency control.\n * \n * Each item is processed by calling `fn(item)`, which should be a task().\n * Items are dispatched in batches of `concurrency` (default: all at once).\n * \n * @example\n * const process = task(async (item: string) => { ... });\n * const results = await parallel(items, process, { concurrency: 5 });\n */\nasync parallel<T, R>(items: T[], fn: (item: T) => PromiseLike<R> | R, options?: { concurrency?: number },): Promise<R[]>\n\n/**\n * Commit Kafka offsets for a trigger with auto_commit disabled.\n * @param triggerPath - Path to the Kafka trigger (from event.wm_trigger.trigger_path)\n * @param topic - Kafka topic name (from event.topic)\n * @param partition - Partition number (from event.partition)\n * @param offset - Message offset to commit (from event.offset)\n */\nasync commitKafkaOffsets(triggerPath: string, topic: string, partition: number, offset: number,): Promise<void>\n\n/**\n * Create a SQL template function for PostgreSQL/datatable queries\n * @param name - Database/datatable name (default: \"main\")\n * @returns SQL template function for building parameterized queries\n * @example\n * let sql = wmill.datatable()\n * let name = 'Robin'\n * let age = 21\n * await sql`\n * SELECT * FROM friends\n * WHERE name = ${name} AND age = ${age}::int\n * `.fetch()\n */\ndatatable(name: string = \"main\"): DatatableSqlTemplateFunction\n\n/**\n * Create a SQL template function for DuckDB/ducklake queries\n * @param name - DuckDB database name (default: \"main\")\n * @returns SQL template function for building parameterized queries\n * @example\n * let sql = wmill.ducklake()\n * let name = 'Robin'\n * let age = 21\n * await sql`\n * SELECT * FROM friends\n * WHERE name = ${name} AND age = ${age}\n * `.fetch()\n */\nducklake(name: string = \"main\"): SqlTemplateFunction\n";
5
6
  export declare const SDK_PYTHON = "# Python SDK (wmill)\n\nImport: import wmill\n\ndef worker_has_internal_server() -> bool\n\ndef get_mocked_api() -> Optional[dict]\n\n# Get the HTTP client instance.\n# \n# Returns:\n# Configured httpx.Client for API requests\ndef get_client() -> httpx.Client\n\n# Make an HTTP GET request to the Windmill API.\n# \n# Args:\n# endpoint: API endpoint path\n# raise_for_status: Whether to raise an exception on HTTP errors\n# **kwargs: Additional arguments passed to httpx.get\n# \n# Returns:\n# HTTP response object\ndef get(endpoint, raise_for_status = True, **kwargs) -> httpx.Response\n\n# Make an HTTP POST request to the Windmill API.\n# \n# Args:\n# endpoint: API endpoint path\n# raise_for_status: Whether to raise an exception on HTTP errors\n# **kwargs: Additional arguments passed to httpx.post\n# \n# Returns:\n# HTTP response object\ndef post(endpoint, raise_for_status = True, **kwargs) -> httpx.Response\n\n# Create a new authentication token.\n# \n# Args:\n# duration: Token validity duration (default: 1 day)\n# \n# Returns:\n# New authentication token string\ndef create_token(duration = dt.timedelta(days=1)) -> str\n\n# Create a script job and return its job id.\n# \n# .. deprecated:: Use run_script_by_path_async or run_script_by_hash_async instead.\ndef run_script_async(path: str = None, hash_: str = None, args: dict = None, scheduled_in_secs: int = None) -> str\n\n# Create a script job by path and return its job id.\ndef run_script_by_path_async(path: str, args: dict = None, scheduled_in_secs: int = None) -> str\n\n# Create a script job by hash and return its job id.\ndef run_script_by_hash_async(hash_: str, args: dict = None, scheduled_in_secs: int = None) -> str\n\n# Create a flow job and return its job id.\ndef run_flow_async(path: str, args: dict = None, scheduled_in_secs: int = None, do_not_track_in_parent: bool = True) -> str\n\n# Run script synchronously and return its result.\n# \n# .. deprecated:: Use run_script_by_path or run_script_by_hash instead.\ndef run_script(path: str = None, hash_: str = None, args: dict = None, timeout: dt.timedelta | int | float | None = None, verbose: bool = False, cleanup: bool = True, assert_result_is_not_none: bool = False) -> Any\n\n# Run script by path synchronously and return its result.\ndef run_script_by_path(path: str, args: dict = None, timeout: dt.timedelta | int | float | None = None, verbose: bool = False, cleanup: bool = True, assert_result_is_not_none: bool = False) -> Any\n\n# Run script by hash synchronously and return its result.\ndef run_script_by_hash(hash_: str, args: dict = None, timeout: dt.timedelta | int | float | None = None, verbose: bool = False, cleanup: bool = True, assert_result_is_not_none: bool = False) -> Any\n\n# Run a script on the current worker without creating a job.\n# \n# On agent workers (no internal server), falls back to running a normal\n# preview job and waiting for the result.\ndef run_inline_script_preview(content: str, language: str, args: dict = None) -> Any\n\n# Wait for a job to complete and return its result.\n# \n# Args:\n# job_id: ID of the job to wait for\n# timeout: Maximum time to wait (seconds or timedelta)\n# verbose: Enable verbose logging\n# cleanup: Register cleanup handler to cancel job on exit\n# assert_result_is_not_none: Raise exception if result is None\n# \n# Returns:\n# Job result when completed\n# \n# Raises:\n# TimeoutError: If timeout is reached\n# Exception: If job fails\ndef wait_job(job_id, timeout: dt.timedelta | int | float | None = None, verbose: bool = False, cleanup: bool = True, assert_result_is_not_none: bool = False)\n\n# Cancel a specific job by ID.\n# \n# Args:\n# job_id: UUID of the job to cancel\n# reason: Optional reason for cancellation\n# \n# Returns:\n# Response message from the cancel endpoint\ndef cancel_job(job_id: str, reason: str = None) -> str\n\n# Cancel currently running executions of the same script.\ndef cancel_running() -> dict\n\n# Get job details by ID.\n# \n# Args:\n# job_id: UUID of the job\n# \n# Returns:\n# Job details dictionary\ndef get_job(job_id: str) -> dict\n\n# Get the root job ID for a flow hierarchy.\n# \n# Args:\n# job_id: Job ID (defaults to current WM_JOB_ID)\n# \n# Returns:\n# Root job ID\ndef get_root_job_id(job_id: str | None = None) -> dict\n\n# Get an OIDC JWT token for authentication to external services.\n# \n# Args:\n# audience: Token audience (e.g., \"vault\", \"aws\")\n# expires_in: Optional expiration time in seconds\n# \n# Returns:\n# JWT token string\ndef get_id_token(audience: str, expires_in: int | None = None) -> str\n\n# Get the status of a job.\n# \n# Args:\n# job_id: UUID of the job\n# \n# Returns:\n# Job status: \"RUNNING\", \"WAITING\", or \"COMPLETED\"\ndef get_job_status(job_id: str) -> JobStatus\n\n# Get the result of a completed job.\n# \n# Args:\n# job_id: UUID of the completed job\n# assert_result_is_not_none: Raise exception if result is None\n# \n# Returns:\n# Job result\ndef get_result(job_id: str, assert_result_is_not_none: bool = True) -> Any\n\n# Get a variable value by path.\n# \n# Args:\n# path: Variable path in Windmill\n# \n# Returns:\n# Variable value as string\ndef get_variable(path: str) -> str\n\n# Set a variable value by path, creating it if it doesn't exist.\n# \n# Args:\n# path: Variable path in Windmill\n# value: Variable value to set\n# is_secret: Whether the variable should be secret (default: False)\ndef set_variable(path: str, value: str, is_secret: bool = False) -> None\n\n# Get a resource value by path.\n# \n# Args:\n# path: Resource path in Windmill\n# none_if_undefined: Return None instead of raising if not found\n# interpolated: if variables and resources are fully unrolled\n# \n# Returns:\n# Resource value dictionary or None\ndef get_resource(path: str, none_if_undefined: bool = False, interpolated: bool = True) -> dict | None\n\n# Set a resource value by path, creating it if it doesn't exist.\n# \n# Args:\n# value: Resource value to set\n# path: Resource path in Windmill\n# resource_type: Resource type for creation\ndef set_resource(value: Any, path: str, resource_type: str)\n\n# List resources from Windmill workspace.\n# \n# Args:\n# resource_type: Optional resource type to filter by (e.g., \"postgresql\", \"mysql\", \"s3\")\n# page: Optional page number for pagination\n# per_page: Optional number of results per page\n# \n# Returns:\n# List of resource dictionaries\ndef list_resources(resource_type: str = None, page: int = None, per_page: int = None) -> list[dict]\n\n# Set the workflow state.\n# \n# Args:\n# value: State value to set\n# path: Optional state resource path override.\ndef set_state(value: Any, path: str | None = None) -> None\n\n# Get the workflow state.\n# \n# Args:\n# path: Optional state resource path override.\n# \n# Returns:\n# State value or None if not set\ndef get_state(path: str | None = None) -> Any\n\n# Set job progress percentage (0-99).\n# \n# Args:\n# value: Progress percentage\n# job_id: Job ID (defaults to current WM_JOB_ID)\ndef set_progress(value: int, job_id: Optional[str] = None)\n\n# Get job progress percentage.\n# \n# Args:\n# job_id: Job ID (defaults to current WM_JOB_ID)\n# \n# Returns:\n# Progress value (0-100) or None if not set\ndef get_progress(job_id: Optional[str] = None) -> Any\n\n# Set the user state of a flow at a given key\ndef set_flow_user_state(key: str, value: Any) -> None\n\n# Get the user state of a flow at a given key\ndef get_flow_user_state(key: str) -> Any\n\n# Get the Windmill server version.\n# \n# Returns:\n# Version string\ndef version()\n\n# Convenient helpers that takes an S3 resource as input and returns the settings necessary to\n# initiate an S3 connection from DuckDB\ndef get_duckdb_connection_settings(s3_resource_path: str = '') -> DuckDbConnectionSettings | None\n\n# Convenient helpers that takes an S3 resource as input and returns the settings necessary to\n# initiate an S3 connection from Polars\ndef get_polars_connection_settings(s3_resource_path: str = '') -> PolarsConnectionSettings\n\n# Convenient helpers that takes an S3 resource as input and returns the settings necessary to\n# initiate an S3 connection using boto3\ndef get_boto3_connection_settings(s3_resource_path: str = '') -> Boto3ConnectionSettings\n\n# Load a file from the workspace s3 bucket and returns its content as bytes.\n# \n# '''python\n# from wmill import S3Object\n# \n# s3_obj = S3Object(s3=\"/path/to/my_file.txt\")\n# my_obj_content = client.load_s3_file(s3_obj)\n# file_content = my_obj_content.decode(\"utf-8\")\n# '''\ndef load_s3_file(s3object: S3Object | str, s3_resource_path: str | None) -> bytes\n\n# Load a file from the workspace s3 bucket and returns the bytes stream.\n# \n# '''python\n# from wmill import S3Object\n# \n# s3_obj = S3Object(s3=\"/path/to/my_file.txt\")\n# with wmill.load_s3_file_reader(s3object, s3_resource_path) as file_reader:\n# print(file_reader.read())\n# '''\ndef load_s3_file_reader(s3object: S3Object | str, s3_resource_path: str | None) -> BufferedReader\n\n# Write a file to the workspace S3 bucket\n# \n# '''python\n# from wmill import S3Object\n# \n# s3_obj = S3Object(s3=\"/path/to/my_file.txt\")\n# \n# # for an in memory bytes array:\n# file_content = b'Hello Windmill!'\n# client.write_s3_file(s3_obj, file_content)\n# \n# # for a file:\n# with open(\"my_file.txt\", \"rb\") as my_file:\n# client.write_s3_file(s3_obj, my_file)\n# '''\ndef write_s3_file(s3object: S3Object | str | None, file_content: BufferedReader | bytes, s3_resource_path: str | None, content_type: str | None = None, content_disposition: str | None = None) -> S3Object\n\n# Permanently delete a file from the workspace S3 bucket.\n# \n# '''python\n# from wmill import S3Object\n# \n# s3_obj = S3Object(s3=\"/path/to/my_file.txt\")\n# client.delete_s3_object(s3_obj)\n# '''\ndef delete_s3_object(s3object: S3Object | str, s3_resource_path: str | None = None) -> None\n\n# Sign S3 objects for use by anonymous users in public apps.\n# \n# Args:\n# s3_objects: List of S3 objects to sign\n# \n# Returns:\n# List of signed S3 objects\ndef sign_s3_objects(s3_objects: list[S3Object | str]) -> list[S3Object]\n\n# Sign a single S3 object for use by anonymous users in public apps.\n# \n# Args:\n# s3_object: S3 object to sign\n# \n# Returns:\n# Signed S3 object\ndef sign_s3_object(s3_object: S3Object | str) -> S3Object\n\n# Generate presigned public URLs for an array of S3 objects.\n# If an S3 object is not signed yet, it will be signed first.\n# \n# Args:\n# s3_objects: List of S3 objects to sign\n# base_url: Optional base URL for the presigned URLs (defaults to WM_BASE_URL)\n# \n# Returns:\n# List of signed public URLs\n# \n# Example:\n# >>> s3_objs = [S3Object(s3=\"/path/to/file1.txt\"), S3Object(s3=\"/path/to/file2.txt\")]\n# >>> urls = client.get_presigned_s3_public_urls(s3_objs)\ndef get_presigned_s3_public_urls(s3_objects: list[S3Object | str], base_url: str | None = None) -> list[str]\n\n# Generate a presigned public URL for an S3 object.\n# If the S3 object is not signed yet, it will be signed first.\n# \n# Args:\n# s3_object: S3 object to sign\n# base_url: Optional base URL for the presigned URL (defaults to WM_BASE_URL)\n# \n# Returns:\n# Signed public URL\n# \n# Example:\n# >>> s3_obj = S3Object(s3=\"/path/to/file.txt\")\n# >>> url = client.get_presigned_s3_public_url(s3_obj)\ndef get_presigned_s3_public_url(s3_object: S3Object | str, base_url: str | None = None) -> str\n\n# Get the current user information.\n# \n# Returns:\n# User details dictionary\ndef whoami() -> dict\n\n# Get the current user information (alias for whoami).\n# \n# Returns:\n# User details dictionary\ndef user() -> dict\n\n# Get the state resource path from environment.\n# \n# Returns:\n# State path string\ndef state_path() -> str\n\n# Get the workflow state.\n# \n# Returns:\n# State value or None if not set\ndef state() -> Any\n\n# Set the state in the shared folder using pickle\ndef set_shared_state_pickle(value: Any, path: str = 'state.pickle') -> None\n\n# Get the state in the shared folder using pickle\ndef get_shared_state_pickle(path: str = 'state.pickle') -> Any\n\n# Set the state in the shared folder using pickle\ndef set_shared_state(value: Any, path: str = 'state.json') -> None\n\n# Get the state in the shared folder using pickle\ndef get_shared_state(path: str = 'state.json') -> None\n\n# Get URLs needed for resuming a flow after suspension.\n# \n# Args:\n# approver: Optional approver name\n# flow_level: If True, generate resume URLs for the parent flow instead of the\n# specific step. This allows pre-approvals that can be consumed by any later\n# suspend step in the same flow.\n# \n# Returns:\n# Dictionary with approvalPage, resume, and cancel URLs\ndef get_resume_urls(approver: str = None, flow_level: bool = None) -> dict\n\n# Sends an interactive approval request via Slack, allowing optional customization of the message, approver, and form fields.\n# \n# **[Enterprise Edition Only]** To include form fields in the Slack approval request, use the \"Advanced -> Suspend -> Form\" functionality.\n# Learn more at: https://www.windmill.dev/docs/flows/flow_approval#form\n# \n# :param slack_resource_path: The path to the Slack resource in Windmill.\n# :type slack_resource_path: str\n# :param channel_id: The Slack channel ID where the approval request will be sent.\n# :type channel_id: str\n# :param message: Optional custom message to include in the Slack approval request.\n# :type message: str, optional\n# :param approver: Optional user ID or name of the approver for the request.\n# :type approver: str, optional\n# :param default_args_json: Optional dictionary defining or overriding the default arguments for form fields.\n# :type default_args_json: dict, optional\n# :param dynamic_enums_json: Optional dictionary overriding the enum default values of enum form fields.\n# :type dynamic_enums_json: dict, optional\n# \n# :raises Exception: If the function is not called within a flow or flow preview.\n# :raises Exception: If the required flow job or flow step environment variables are not set.\n# \n# :return: None\n# \n# **Usage Example:**\n# >>> client.request_interactive_slack_approval(\n# ... slack_resource_path=\"/u/alex/my_slack_resource\",\n# ... channel_id=\"admins-slack-channel\",\n# ... message=\"Please approve this request\",\n# ... approver=\"approver123\",\n# ... default_args_json={\"key1\": \"value1\", \"key2\": 42},\n# ... dynamic_enums_json={\"foo\": [\"choice1\", \"choice2\"], \"bar\": [\"optionA\", \"optionB\"]},\n# ... )\n# \n# **Notes:**\n# - This function must be executed within a Windmill flow or flow preview.\n# - The function checks for required environment variables (`WM_FLOW_JOB_ID`, `WM_FLOW_STEP_ID`) to ensure it is run in the appropriate context.\ndef request_interactive_slack_approval(slack_resource_path: str, channel_id: str, message: str = None, approver: str = None, default_args_json: dict = None, dynamic_enums_json: dict = None) -> None\n\n# Get email from workspace username\n# This method is particularly useful for apps that require the email address of the viewer.\n# Indeed, in the viewer context WM_USERNAME is set to the username of the viewer but WM_EMAIL is set to the email of the creator of the app.\ndef username_to_email(username: str) -> str\n\n# Send a message to a Microsoft Teams conversation with conversation_id, where success is used to style the message\ndef send_teams_message(conversation_id: str, text: str, success: bool = True, card_block: dict = None)\n\n# Get a DataTable client for SQL queries.\n# \n# Args:\n# name: Database name (default: \"main\")\n# \n# Returns:\n# DataTableClient instance\ndef datatable(name: str = 'main')\n\n# Get a DuckLake client for DuckDB queries.\n# \n# Args:\n# name: Database name (default: \"main\")\n# \n# Returns:\n# DucklakeClient instance\ndef ducklake(name: str = 'main')\n\ndef init_global_client(f)\n\ndef deprecate(in_favor_of: str)\n\n# Get the current workspace ID.\n# \n# Returns:\n# Workspace ID string\ndef get_workspace() -> str\n\ndef get_version() -> str\n\n# Run a script synchronously by hash and return its result.\n# \n# Args:\n# hash: Script hash\n# args: Script arguments\n# verbose: Enable verbose logging\n# assert_result_is_not_none: Raise exception if result is None\n# cleanup: Register cleanup handler to cancel job on exit\n# timeout: Maximum time to wait\n# \n# Returns:\n# Script result\ndef run_script_sync(hash: str, args: Dict[str, Any] = None, verbose: bool = False, assert_result_is_not_none: bool = True, cleanup: bool = True, timeout: dt.timedelta = None) -> Any\n\n# Run a script synchronously by path and return its result.\n# \n# Args:\n# path: Script path\n# args: Script arguments\n# verbose: Enable verbose logging\n# assert_result_is_not_none: Raise exception if result is None\n# cleanup: Register cleanup handler to cancel job on exit\n# timeout: Maximum time to wait\n# \n# Returns:\n# Script result\ndef run_script_by_path_sync(path: str, args: Dict[str, Any] = None, verbose: bool = False, assert_result_is_not_none: bool = True, cleanup: bool = True, timeout: dt.timedelta = None) -> Any\n\n# Convenient helpers that takes an S3 resource as input and returns the settings necessary to\n# initiate an S3 connection from DuckDB\ndef duckdb_connection_settings(s3_resource_path: str = '') -> DuckDbConnectionSettings\n\n# Convenient helpers that takes an S3 resource as input and returns the settings necessary to\n# initiate an S3 connection from Polars\ndef polars_connection_settings(s3_resource_path: str = '') -> PolarsConnectionSettings\n\n# Convenient helpers that takes an S3 resource as input and returns the settings necessary to\n# initiate an S3 connection using boto3\ndef boto3_connection_settings(s3_resource_path: str = '') -> Boto3ConnectionSettings\n\n# Get the state resource path from environment.\n# \n# Returns:\n# State path string\ndef get_state_path() -> str\n\n# Parse resource syntax from string.\ndef parse_resource_syntax(s: str) -> Optional[str]\n\n# Parse S3 object from string or S3Object format.\ndef parse_s3_object(s3_object: S3Object | str) -> S3Object\n\n# Parse variable syntax from string.\ndef parse_variable_syntax(s: str) -> Optional[str]\n\n# Append a text to the result stream.\n# \n# Args:\n# text: text to append to the result stream\ndef append_to_result_stream(text: str) -> None\n\n# Stream to the result stream.\n# \n# Args:\n# stream: stream to stream to the result stream\ndef stream_result(stream) -> None\n\n# Execute a SQL query against the DataTable.\n# \n# Args:\n# sql: SQL query string with $1, $2, etc. placeholders\n# *args: Positional arguments to bind to query placeholders\n# \n# Returns:\n# SqlQuery instance for fetching results\ndef query(sql: str, *args) -> SqlQuery\n\n# Execute query and fetch results.\n# \n# Args:\n# result_collection: Optional result collection mode\n# \n# Returns:\n# Query results\ndef fetch(result_collection: str | None = None)\n\n# Execute query and fetch first row of results.\n# \n# Returns:\n# First row of query results\ndef fetch_one()\n\n# Execute query and fetch first row of results. Return result as a scalar value.\n# \n# Returns:\n# First row of query result as a scalar value\ndef fetch_one_scalar()\n\n# Execute query and don't return any results.\n# \ndef execute()\n\n# DuckDB executor requires explicit argument types at declaration\n# These types exist in both DuckDB and Postgres\n# Check that the types exist if you plan to extend this function for other SQL engines.\ndef infer_sql_type(value) -> str\n\ndef parse_sql_client_name(name: str) -> tuple[str, Optional[str]]\n\n# Decorator that marks a function as a workflow task.\n# \n# Works in both WAC v1 (sync, HTTP-based dispatch) and WAC v2\n# (async, checkpoint/replay) modes:\n# \n# - **v2 (inside @workflow)**: dispatches as a checkpoint step.\n# - **v1 (WM_JOB_ID set, no @workflow)**: dispatches via HTTP API.\n# - **Standalone**: executes the function body directly.\n# \n# Usage::\n# \n# @task\n# async def extract_data(url: str): ...\n# \n# @task(path=\"f/external_script\", timeout=600, tag=\"gpu\")\n# async def run_external(x: int): ...\ndef task(_func = None, path: Optional[str] = None, tag: Optional[str] = None, timeout: Optional[int] = None, cache_ttl: Optional[int] = None, priority: Optional[int] = None, concurrency_limit: Optional[int] = None, concurrency_key: Optional[str] = None, concurrency_time_window_s: Optional[int] = None)\n\n# Create a task that dispatches to a separate Windmill script.\n# \n# Usage::\n# \n# extract = task_script(\"f/data/extract\", timeout=600)\n# \n# @workflow\n# async def main():\n# data = await extract(url=\"https://...\")\ndef task_script(path: str, timeout: Optional[int] = None, tag: Optional[str] = None, cache_ttl: Optional[int] = None, priority: Optional[int] = None, concurrency_limit: Optional[int] = None, concurrency_key: Optional[str] = None, concurrency_time_window_s: Optional[int] = None)\n\n# Create a task that dispatches to a separate Windmill flow.\n# \n# Usage::\n# \n# pipeline = task_flow(\"f/etl/pipeline\", priority=10)\n# \n# @workflow\n# async def main():\n# result = await pipeline(input=data)\ndef task_flow(path: str, timeout: Optional[int] = None, tag: Optional[str] = None, cache_ttl: Optional[int] = None, priority: Optional[int] = None, concurrency_limit: Optional[int] = None, concurrency_key: Optional[str] = None, concurrency_time_window_s: Optional[int] = None)\n\n# Decorator marking an async function as a workflow-as-code entry point.\n# \n# The function must be **deterministic**: given the same inputs it must call\n# tasks in the same order on every replay. Branching on task results is fine\n# (results are replayed from checkpoint), but branching on external state\n# (current time, random values, external API calls) must use ``step()`` to\n# checkpoint the value so replays see the same result.\ndef workflow(func)\n\n# Execute ``fn`` inline and checkpoint the result.\n# \n# On replay the cached value is returned without re-executing ``fn``.\n# Use for lightweight deterministic operations (timestamps, random IDs,\n# config reads) that should not incur the overhead of a child job.\nasync def step(name: str, fn)\n\n# Server-side sleep \u2014 suspend the workflow for the given duration without holding a worker.\n# \n# Inside a @workflow, the parent job suspends and auto-resumes after ``seconds``.\n# Outside a workflow, falls back to ``asyncio.sleep``.\nasync def sleep(seconds: int)\n\n# Suspend the workflow and wait for an external approval.\n# \n# Use ``get_resume_urls()`` (wrapped in ``step()``) to obtain\n# resume/cancel/approval URLs before calling this function.\n# \n# Returns a dict with ``value`` (form data), ``approver``, and ``approved``.\n# \n# Args:\n# timeout: Approval timeout in seconds (default 1800).\n# form: Optional form schema for the approval page.\n# self_approval: Whether the user who triggered the flow can approve it (default True).\n# \n# Example::\n# \n# urls = await step(\"urls\", lambda: get_resume_urls())\n# await step(\"notify\", lambda: send_email(urls[\"approvalPage\"]))\n# result = await wait_for_approval(timeout=3600)\nasync def wait_for_approval(timeout: int = 1800, form: dict | None = None, self_approval: bool = True) -> dict\n\n# Process items in parallel with optional concurrency control.\n# \n# Each item is processed by calling ``fn(item)``, which should be a @task.\n# Items are dispatched in batches of ``concurrency`` (default: all at once).\n# \n# Example::\n# \n# @task\n# async def process(item: str):\n# ...\n# \n# results = await parallel(items, process, concurrency=5)\nasync def parallel(items, fn, concurrency: Optional[int] = None)\n\n# Commit Kafka offsets for a trigger with auto_commit disabled.\n# \n# Args:\n# trigger_path: Path to the Kafka trigger (from event['wm_trigger']['trigger_path'])\n# topic: Kafka topic name (from event['topic'])\n# partition: Partition number (from event['partition'])\n# offset: Message offset to commit (from event['offset'])\ndef commit_kafka_offsets(trigger_path: str, topic: str, partition: int, offset: int) -> None\n\n";
7
+ export declare const WAC_SDK_TYPESCRIPT = "## TypeScript Workflow-as-Code API (windmill-client)\n\nImport: `import { workflow, task, taskScript, taskFlow, step, sleep, waitForApproval, getResumeUrls, parallel } from \"windmill-client\"`\n\n```typescript\nexport interface TaskOptions {\n timeout?: number;\n tag?: string;\n cache_ttl?: number;\n priority?: number;\n concurrency_limit?: number;\n concurrency_key?: string;\n concurrency_time_window_s?: number;\n}\n\n/**\n * Get URLs needed for resuming a flow after this step\n * @param approver approver name\n * @param flowLevel if true, generate resume URLs for the parent flow instead of the specific step.\n * This allows pre-approvals that can be consumed by any later suspend step in the same flow.\n * @returns approval page UI URL, resume and cancel API URLs for resuming the flow\n */\nexport async function getResumeUrls(approver?: string, flowLevel?: boolean): Promise<{ approvalPage: string; resume: string; cancel: string; }>\n\n/**\n * Wrap an async function as a workflow task.\n *\n * @example\n * const extract_data = task(async (url: string) => { ... });\n * const run_external = task(\"f/external_script\", async (x: number) => { ... });\n *\n * Inside a `workflow()`, calling a task dispatches it as a step.\n * Outside a workflow, the function body executes directly.\n */\nexport function task<T extends (...args: any[]) => Promise<any>>(fnOrPath: T | string, maybeFnOrOptions?: T | TaskOptions, maybeOptions?: TaskOptions,): T\n\n/**\n * Create a task that dispatches to a separate Windmill script.\n *\n * @example\n * const extract = taskScript(\"f/data/extract\");\n * // inside workflow: await extract({ url: \"https://...\" })\n */\nexport function taskScript(path: string, options?: TaskOptions): (...args: any[]) => PromiseLike<any>\n\n/**\n * Create a task that dispatches to a separate Windmill flow.\n *\n * @example\n * const pipeline = taskFlow(\"f/etl/pipeline\");\n * // inside workflow: await pipeline({ input: data })\n */\nexport function taskFlow(path: string, options?: TaskOptions): (...args: any[]) => PromiseLike<any>\n\n/**\n * Mark an async function as a workflow-as-code entry point.\n *\n * The function must be **deterministic**: given the same inputs it must call\n * tasks in the same order on every replay. Branching on task results is fine\n * (results are replayed from checkpoint), but branching on external state\n * (current time, random values, external API calls) must use `step()` to\n * checkpoint the value so replays see the same result.\n */\nexport function workflow<T>(fn: (...args: any[]) => Promise<T>)\n\nexport async function step<T>(name: string, fn: () => T | Promise<T>): Promise<T>\n\nexport async function sleep(seconds: number): Promise<void>\n\n/**\n * Suspend the workflow and wait for an external approval.\n *\n * Use `getResumeUrls()` (wrapped in `step()`) to obtain resume/cancel/approvalPage\n * URLs before calling this function.\n *\n * @example\n * const urls = await step(\"urls\", () => getResumeUrls());\n * await step(\"notify\", () => sendEmail(urls.approvalPage));\n * const { value, approver } = await waitForApproval({ timeout: 3600 });\n */\nexport function waitForApproval(options?: { timeout?: number; form?: object; selfApproval?: boolean; }): PromiseLike<{ value: any; approver: string; approved: boolean }>\n\n/**\n * Process items in parallel with optional concurrency control.\n *\n * Each item is processed by calling `fn(item)`, which should be a task().\n * Items are dispatched in batches of `concurrency` (default: all at once).\n *\n * @example\n * const process = task(async (item: string) => { ... });\n * const results = await parallel(items, process, { concurrency: 5 });\n */\nexport async function parallel<T, R>(items: T[], fn: (item: T) => PromiseLike<R> | R, options?: { concurrency?: number },): Promise<R[]>\n```\n";
8
+ export declare const WAC_SDK_PYTHON = "## Python Workflow-as-Code API (wmill)\n\nImport: `from wmill import workflow, task, task_script, task_flow, step, sleep, wait_for_approval, get_resume_urls, parallel, TaskError`\n\n```python\n# Raised when a WAC task step failed.\n#\n# Attributes:\n# step_key: The checkpoint key of the failed step.\n# child_job_id: The UUID of the failed child job.\n# result: The error result from the child job.\nclass TaskError(Exception):\n def __init__(self, message: str, *, step_key: str = '', child_job_id: str = '', result = None)\n\n# Get URLs needed for resuming a flow after suspension.\n#\n# Args:\n# approver: Optional approver name\n# flow_level: If True, generate resume URLs for the parent flow instead of the\n# specific step. This allows pre-approvals that can be consumed by any later\n# suspend step in the same flow.\n#\n# Returns:\n# Dictionary with approvalPage, resume, and cancel URLs\ndef get_resume_urls(approver: str = None, flow_level: bool = None) -> dict\n\n# Decorator that marks a function as a workflow task.\n#\n# Works in both WAC v1 (sync, HTTP-based dispatch) and WAC v2\n# (async, checkpoint/replay) modes:\n#\n# - **v2 (inside @workflow)**: dispatches as a checkpoint step.\n# - **v1 (WM_JOB_ID set, no @workflow)**: dispatches via HTTP API.\n# - **Standalone**: executes the function body directly.\n#\n# Usage::\n#\n# @task\n# async def extract_data(url: str): ...\n#\n# @task(path=\"f/external_script\", timeout=600, tag=\"gpu\")\n# async def run_external(x: int): ...\ndef task(_func = None, *, path: Optional[str] = None, tag: Optional[str] = None, timeout: Optional[int] = None, cache_ttl: Optional[int] = None, priority: Optional[int] = None, concurrency_limit: Optional[int] = None, concurrency_key: Optional[str] = None, concurrency_time_window_s: Optional[int] = None)\n\n# Create a task that dispatches to a separate Windmill script.\n#\n# Usage::\n#\n# extract = task_script(\"f/data/extract\", timeout=600)\n#\n# @workflow\n# async def main():\n# data = await extract(url=\"https://...\")\ndef task_script(path: str, *, timeout: Optional[int] = None, tag: Optional[str] = None, cache_ttl: Optional[int] = None, priority: Optional[int] = None, concurrency_limit: Optional[int] = None, concurrency_key: Optional[str] = None, concurrency_time_window_s: Optional[int] = None)\n\n# Create a task that dispatches to a separate Windmill flow.\n#\n# Usage::\n#\n# pipeline = task_flow(\"f/etl/pipeline\", priority=10)\n#\n# @workflow\n# async def main():\n# result = await pipeline(input=data)\ndef task_flow(path: str, *, timeout: Optional[int] = None, tag: Optional[str] = None, cache_ttl: Optional[int] = None, priority: Optional[int] = None, concurrency_limit: Optional[int] = None, concurrency_key: Optional[str] = None, concurrency_time_window_s: Optional[int] = None)\n\n# Decorator marking an async function as a workflow-as-code entry point.\n#\n# The function must be **deterministic**: given the same inputs it must call\n# tasks in the same order on every replay. Branching on task results is fine\n# (results are replayed from checkpoint), but branching on external state\n# (current time, random values, external API calls) must use ``step()`` to\n# checkpoint the value so replays see the same result.\ndef workflow(func)\n\n# Execute ``fn`` inline and checkpoint the result.\n#\n# On replay the cached value is returned without re-executing ``fn``.\n# Use for lightweight deterministic operations (timestamps, random IDs,\n# config reads) that should not incur the overhead of a child job.\nasync def step(name: str, fn)\n\n# Server-side sleep \u2014 suspend the workflow for the given duration without holding a worker.\n#\n# Inside a @workflow, the parent job suspends and auto-resumes after ``seconds``.\n# Outside a workflow, falls back to ``asyncio.sleep``.\nasync def sleep(seconds: int)\n\n# Suspend the workflow and wait for an external approval.\n#\n# Use ``get_resume_urls()`` (wrapped in ``step()``) to obtain\n# resume/cancel/approval URLs before calling this function.\n#\n# Returns a dict with ``value`` (form data), ``approver``, and ``approved``.\n#\n# Args:\n# timeout: Approval timeout in seconds (default 1800).\n# form: Optional form schema for the approval page.\n# self_approval: Whether the user who triggered the flow can approve it (default True).\n#\n# Example::\n#\n# urls = await step(\"urls\", lambda: get_resume_urls())\n# await step(\"notify\", lambda: send_email(urls[\"approvalPage\"]))\n# result = await wait_for_approval(timeout=3600)\nasync def wait_for_approval(timeout: int = 1800, form: dict | None = None, self_approval: bool = True) -> dict\n\n# Process items in parallel with optional concurrency control.\n#\n# Each item is processed by calling ``fn(item)``, which should be a @task.\n# Items are dispatched in batches of ``concurrency`` (default: all at once).\n#\n# Example::\n#\n# @task\n# async def process(item: str):\n# ...\n#\n# results = await parallel(items, process, concurrency=5)\nasync def parallel(items, fn, *, concurrency: Optional[int] = None)\n```\n";
6
9
  export declare const DATATABLE_SDK_TYPESCRIPT = "## TypeScript Datatable API (windmill-client)\n\nImport: `import * as wmill from 'windmill-client'`\n\nSQL statement object with query content, arguments, and execution methods\n```typescript\ntype SqlStatement<T> = {\n /** Raw SQL content with formatted arguments */\n content: string;\n\n /** Argument values keyed by parameter name */\n args: Record<string, any>;\n\n /**\n * Execute the SQL query and return results\n * @param params - Optional parameters including result collection mode\n * @returns Query results based on the result collection mode\n */\n fetch<ResultCollectionT extends ResultCollection = \"last_statement_all_rows\">(\n params?: FetchParams<ResultCollectionT | ResultCollection> // The union is for auto-completion\n ): Promise<SqlResult<T, ResultCollectionT>>;\n\n /**\n * Execute the SQL query and return only the first row\n * @param params - Optional parameters\n * @returns First row of the query result\n */\n fetchOne(\n params?: Omit<FetchParams<\"last_statement_first_row\">, \"resultCollection\">\n ): Promise<SqlResult<T, \"last_statement_first_row\">>;\n\n /**\n * Execute the SQL query and return only the first row as a scalar value\n * @param params - Optional parameters\n * @returns First row of the query result\n */\n fetchOneScalar(\n params?: Omit<\n FetchParams<\"last_statement_first_row_scalar\">,\n \"resultCollection\"\n >\n ): Promise<SqlResult<T, \"last_statement_first_row_scalar\">>;\n\n /**\n * Execute the SQL query without fetching rows\n * @param params - Optional parameters\n */\n execute(\n params?: Omit<FetchParams<\"last_statement_first_row\">, \"resultCollection\">\n ): Promise<void>;\n};\n```\n\n```typescript\n// Template tag function: sql`SELECT * FROM table WHERE id = ${id}`.fetch()\ninterface DatatableSqlTemplateFunction {\n // Tagged template usage:\n <T = any>(strings: TemplateStringsArray, ...values: any[]): SqlStatement<T>;\n query<T = any>(sql: string, ...params: any[]): SqlStatement<T>;\n};\n```\n\nCreate a SQL template function for PostgreSQL/datatable queries\n@param name - Database/datatable name (default: \"main\")\n@returns SQL template function for building parameterized queries\n@example\nlet sql = wmill.datatable()\nlet name = 'Robin'\nlet age = 21\nawait sql`\n SELECT * FROM friends\n WHERE name = ${name} AND age = ${age}::int\n`.fetch()\n```typescript\nfunction datatable(name: string = \"main\"): DatatableSqlTemplateFunction\n```\n";
7
10
  export declare const DATATABLE_SDK_PYTHON = "## Python Datatable API (wmill)\n\nImport: `import wmill`\n\n# Get a DataTable client for SQL queries.\n# \n# Args:\n# name: Database name (default: \"main\")\n# \n# Returns:\n# DataTableClient instance\ndef datatable(name: str = 'main') -> DataTableClient\n\n# Client for executing SQL queries against Windmill DataTables.\nclass DataTableClient:\n # Initialize DataTableClient.\n # \n # Args:\n # client: Windmill client instance\n # name: DataTable name\n def __init__(client: Windmill, name: str)\n\n # Execute a SQL query against the DataTable.\n # \n # Args:\n # sql: SQL query string with $1, $2, etc. placeholders\n # *args: Positional arguments to bind to query placeholders\n # \n # Returns:\n # SqlQuery instance for fetching results\n def query(sql: str, *args) -> SqlQuery\n\n\n# Query result handler for DataTable and DuckLake queries.\nclass SqlQuery:\n # Initialize SqlQuery.\n # \n # Args:\n # sql: SQL query string\n # fetch_fn: Function to execute the query\n def __init__(sql: str, fetch_fn)\n\n # Execute query and fetch results.\n # \n # Args:\n # result_collection: Optional result collection mode\n # \n # Returns:\n # Query results\n def fetch(result_collection: str | None = None)\n\n # Execute query and fetch first row of results.\n # \n # Returns:\n # First row of query results\n def fetch_one()\n\n # Execute query and fetch first row of results. Return result as a scalar value.\n # \n # Returns:\n # First row of query result as a scalar value\n def fetch_one_scalar()\n\n # Execute query and don't return any results.\n # \n def execute()\n\n\n";
8
- export declare const OPENFLOW_SCHEMA = "## OpenFlow Schema\n\n{\"OpenFlow\":{\"type\":\"object\",\"description\":\"Top-level flow definition containing metadata, configuration, and the flow structure\",\"properties\":{\"summary\":{\"type\":\"string\",\"description\":\"Short description of what this flow does\"},\"description\":{\"type\":\"string\",\"description\":\"Detailed documentation for this flow\"},\"value\":{\"$ref\":\"#/components/schemas/FlowValue\"},\"schema\":{\"type\":\"object\",\"description\":\"JSON Schema for flow inputs. Use this to define input parameters, their types, defaults, and validation. For resource inputs, set type to 'object' and format to 'resource-<type>' (e.g., 'resource-stripe')\"},\"on_behalf_of_email\":{\"type\":\"string\",\"description\":\"The flow will be run with the permissions of the user with this email.\"}},\"required\":[\"summary\",\"value\"]},\"FlowValue\":{\"type\":\"object\",\"description\":\"The flow structure containing modules and optional preprocessor/failure handlers\",\"properties\":{\"modules\":{\"type\":\"array\",\"description\":\"Array of steps that execute in sequence. Each step can be a script, subflow, loop, or branch\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}},\"failure_module\":{\"description\":\"Special module that executes when the flow fails. Receives error object with message, name, stack, and step_id. Must have id 'failure'. Only supports script/rawscript types\",\"$ref\":\"#/components/schemas/FlowModule\"},\"preprocessor_module\":{\"description\":\"Special module that runs before the first step on external triggers. Must have id 'preprocessor'. Only supports script/rawscript types. Cannot reference other step results\",\"$ref\":\"#/components/schemas/FlowModule\"},\"same_worker\":{\"type\":\"boolean\",\"description\":\"If true, all steps run on the same worker for better performance\"},\"concurrent_limit\":{\"type\":\"number\",\"description\":\"Maximum number of concurrent executions of this flow\"},\"concurrency_key\":{\"type\":\"string\",\"description\":\"Expression to group concurrent executions (e.g., by user ID)\"},\"concurrency_time_window_s\":{\"type\":\"number\",\"description\":\"Time window in seconds for concurrent_limit\"},\"debounce_delay_s\":{\"type\":\"integer\",\"description\":\"Delay in seconds to debounce flow executions\"},\"debounce_key\":{\"type\":\"string\",\"description\":\"Expression to group debounced executions\"},\"debounce_args_to_accumulate\":{\"type\":\"array\",\"description\":\"Arguments to accumulate across debounced executions\",\"items\":{\"type\":\"string\"}},\"max_total_debouncing_time\":{\"type\":\"integer\",\"description\":\"Maximum total time in seconds that a job can be debounced\"},\"max_total_debounces_amount\":{\"type\":\"integer\",\"description\":\"Maximum number of times a job can be debounced\"},\"skip_expr\":{\"type\":\"string\",\"description\":\"JavaScript expression to conditionally skip the entire flow\"},\"cache_ttl\":{\"type\":\"number\",\"description\":\"Cache duration in seconds for flow results\"},\"cache_ignore_s3_path\":{\"type\":\"boolean\"},\"delete_after_secs\":{\"type\":\"integer\",\"description\":\"If set, delete the flow job's args, result and logs after this many seconds following job completion\"},\"flow_env\":{\"type\":\"object\",\"description\":\"Environment variables available to all steps. Values can be strings, JSON values, or special references: '$var:path' (workspace variable) or '$res:path' (resource).\",\"additionalProperties\":{}},\"priority\":{\"type\":\"number\",\"description\":\"Execution priority (higher numbers run first)\"},\"early_return\":{\"type\":\"string\",\"description\":\"JavaScript expression to return early from the flow\"},\"chat_input_enabled\":{\"type\":\"boolean\",\"description\":\"Whether this flow accepts chat-style input\"},\"notes\":{\"type\":\"array\",\"description\":\"Sticky notes attached to the flow\",\"items\":{\"$ref\":\"#/components/schemas/FlowNote\"}},\"groups\":{\"type\":\"array\",\"description\":\"Semantic groups of modules for organizational purposes\",\"items\":{\"$ref\":\"#/components/schemas/FlowGroup\"}}},\"required\":[\"modules\"]},\"Retry\":{\"type\":\"object\",\"description\":\"Retry configuration for failed module executions\",\"properties\":{\"constant\":{\"type\":\"object\",\"description\":\"Retry with constant delay between attempts\",\"properties\":{\"attempts\":{\"type\":\"integer\",\"description\":\"Number of retry attempts\"},\"seconds\":{\"type\":\"integer\",\"description\":\"Seconds to wait between retries\"}}},\"exponential\":{\"type\":\"object\",\"description\":\"Retry with exponential backoff (delay doubles each time)\",\"properties\":{\"attempts\":{\"type\":\"integer\",\"description\":\"Number of retry attempts\"},\"multiplier\":{\"type\":\"integer\",\"description\":\"Multiplier for exponential backoff\"},\"seconds\":{\"type\":\"integer\",\"minimum\":1,\"description\":\"Initial delay in seconds\"},\"random_factor\":{\"type\":\"integer\",\"minimum\":0,\"maximum\":100,\"description\":\"Random jitter percentage (0-100) to avoid thundering herd\"}}},\"retry_if\":{\"$ref\":\"#/components/schemas/RetryIf\"}}},\"FlowNote\":{\"type\":\"object\",\"description\":\"A sticky note attached to a flow for documentation and annotation\",\"properties\":{\"id\":{\"type\":\"string\",\"description\":\"Unique identifier for the note\"},\"text\":{\"type\":\"string\",\"description\":\"Content of the note\"},\"position\":{\"type\":\"object\",\"description\":\"Position of the note in the flow editor\",\"properties\":{\"x\":{\"type\":\"number\",\"description\":\"X coordinate\"},\"y\":{\"type\":\"number\",\"description\":\"Y coordinate\"}},\"required\":[\"x\",\"y\"]},\"size\":{\"type\":\"object\",\"description\":\"Size of the note in the flow editor\",\"properties\":{\"width\":{\"type\":\"number\",\"description\":\"Width in pixels\"},\"height\":{\"type\":\"number\",\"description\":\"Height in pixels\"}},\"required\":[\"width\",\"height\"]},\"color\":{\"type\":\"string\",\"description\":\"Color of the note (e.g., \\\"yellow\\\", \\\"#ffff00\\\")\"},\"type\":{\"type\":\"string\",\"enum\":[\"free\",\"group\"],\"description\":\"Type of note - 'free' for standalone notes, 'group' for notes that group other nodes\"},\"locked\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Whether the note is locked and cannot be edited or moved\"},\"contained_node_ids\":{\"type\":\"array\",\"items\":{\"type\":\"string\"},\"description\":\"For group notes, the IDs of nodes contained within this group\"}},\"required\":[\"id\",\"text\",\"color\",\"type\"]},\"FlowGroup\":{\"type\":\"object\",\"description\":\"A semantic group of flow modules for organizational purposes. Does not affect execution \\u2014 modules remain in their original position in the flow. Groups provide naming and collapsibility in the editor. Members are computed dynamically from all nodes on paths between start_id and end_id.\",\"properties\":{\"summary\":{\"type\":\"string\",\"description\":\"Display name for this group\"},\"note\":{\"type\":\"string\",\"description\":\"Markdown note shown below the group header\"},\"autocollapse\":{\"type\":\"boolean\",\"default\":false,\"description\":\"If true, this group is collapsed by default in the flow editor. UI hint only.\"},\"start_id\":{\"type\":\"string\",\"description\":\"ID of the first flow module in this group (topological entry point)\"},\"end_id\":{\"type\":\"string\",\"description\":\"ID of the last flow module in this group (topological exit point)\"},\"color\":{\"type\":\"string\",\"description\":\"Color for the group in the flow editor\"}},\"required\":[\"start_id\",\"end_id\"]},\"RetryIf\":{\"type\":\"object\",\"description\":\"Conditional retry based on error or result\",\"properties\":{\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression that returns true to retry. Has access to 'result' and 'error' variables\"}},\"required\":[\"expr\"]},\"StopAfterIf\":{\"type\":\"object\",\"description\":\"Early termination condition for a module\",\"properties\":{\"skip_if_stopped\":{\"type\":\"boolean\",\"description\":\"If true, following steps are skipped when this condition triggers\"},\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression evaluated after the module runs. Can use 'result' (step's result) or 'flow_input'. Return true to stop\"},\"error_message\":{\"type\":\"string\",\"nullable\":true,\"description\":\"Custom error message when stopping with an error. Mutually exclusive with skip_if_stopped. If set to a non-empty string, the flow stops with this error. If empty string, a default error message is used. If null or omitted, no error is raised.\"}},\"required\":[\"expr\"]},\"FlowModule\":{\"type\":\"object\",\"description\":\"A single step in a flow. Can be a script, subflow, loop, or branch\",\"properties\":{\"id\":{\"type\":\"string\",\"description\":\"Unique identifier for this step. Used to reference results via 'results.step_id'. Must be a valid identifier (alphanumeric, underscore, hyphen)\"},\"value\":{\"$ref\":\"#/components/schemas/FlowModuleValue\"},\"stop_after_if\":{\"description\":\"Early termination condition evaluated after this step completes\",\"$ref\":\"#/components/schemas/StopAfterIf\"},\"stop_after_all_iters_if\":{\"description\":\"For loops only - early termination condition evaluated after all iterations complete\",\"$ref\":\"#/components/schemas/StopAfterIf\"},\"skip_if\":{\"type\":\"object\",\"description\":\"Conditionally skip this step based on previous results or flow inputs\",\"properties\":{\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression that returns true to skip. Can use 'flow_input' or 'results.<step_id>'\"}},\"required\":[\"expr\"]},\"sleep\":{\"description\":\"Delay before executing this step (in seconds or as expression)\",\"$ref\":\"#/components/schemas/InputTransform\"},\"cache_ttl\":{\"type\":\"number\",\"description\":\"Cache duration in seconds for this step's results\"},\"cache_ignore_s3_path\":{\"type\":\"boolean\"},\"timeout\":{\"description\":\"Maximum execution time in seconds (static value or expression)\",\"$ref\":\"#/components/schemas/InputTransform\"},\"delete_after_secs\":{\"type\":\"integer\",\"description\":\"If set, delete the step's args, result and logs after this many seconds following job completion\"},\"summary\":{\"type\":\"string\",\"description\":\"Short description of what this step does\"},\"mock\":{\"type\":\"object\",\"description\":\"Mock configuration for testing without executing the actual step\",\"properties\":{\"enabled\":{\"type\":\"boolean\",\"description\":\"If true, return mock value instead of executing\"},\"return_value\":{\"description\":\"Value to return when mocked\"}}},\"suspend\":{\"type\":\"object\",\"description\":\"Configuration for approval/resume steps that wait for user input\",\"properties\":{\"required_events\":{\"type\":\"integer\",\"description\":\"Number of approvals required before continuing\"},\"timeout\":{\"type\":\"integer\",\"description\":\"Timeout in seconds before auto-continuing or canceling\"},\"resume_form\":{\"type\":\"object\",\"description\":\"Form schema for collecting input when resuming\",\"properties\":{\"schema\":{\"type\":\"object\",\"description\":\"JSON Schema for the resume form\"}}},\"user_auth_required\":{\"type\":\"boolean\",\"description\":\"If true, only authenticated users can approve\"},\"user_groups_required\":{\"description\":\"Expression or list of groups that can approve\",\"$ref\":\"#/components/schemas/InputTransform\"},\"self_approval_disabled\":{\"type\":\"boolean\",\"description\":\"If true, the user who started the flow cannot approve\"},\"hide_cancel\":{\"type\":\"boolean\",\"description\":\"If true, hide the cancel button on the approval form\"},\"continue_on_disapprove_timeout\":{\"type\":\"boolean\",\"description\":\"If true, continue flow on timeout instead of canceling\"}}},\"priority\":{\"type\":\"number\",\"description\":\"Execution priority for this step (higher numbers run first)\"},\"continue_on_error\":{\"type\":\"boolean\",\"description\":\"If true, flow continues even if this step fails\"},\"retry\":{\"description\":\"Retry configuration if this step fails\",\"$ref\":\"#/components/schemas/Retry\"},\"debouncing\":{\"description\":\"Debounce configuration for this step (EE only)\",\"type\":\"object\",\"properties\":{\"debounce_delay_s\":{\"type\":\"integer\",\"description\":\"Delay in seconds to debounce this step's executions across flow runs\"},\"debounce_key\":{\"type\":\"string\",\"description\":\"Expression to group debounced executions. Supports $workspace and $args[name]. Default: $workspace/flow/<flow_path>-<step_id>\"},\"debounce_args_to_accumulate\":{\"type\":\"array\",\"description\":\"Array-type arguments to accumulate across debounced executions\",\"items\":{\"type\":\"string\"}},\"max_total_debouncing_time\":{\"type\":\"integer\",\"description\":\"Maximum total time in seconds before forced execution\"},\"max_total_debounces_amount\":{\"type\":\"integer\",\"description\":\"Maximum number of debounces before forced execution\"}}}},\"required\":[\"value\",\"id\"]},\"InputTransform\":{\"description\":\"Maps input parameters for a step. Can be a static value or a JavaScript expression that references previous results or flow inputs\",\"oneOf\":[{\"$ref\":\"#/components/schemas/StaticTransform\"},{\"$ref\":\"#/components/schemas/JavascriptTransform\"},{\"$ref\":\"#/components/schemas/AiTransform\"}],\"discriminator\":{\"propertyName\":\"type\",\"mapping\":{\"static\":\"#/components/schemas/StaticTransform\",\"javascript\":\"#/components/schemas/JavascriptTransform\",\"ai\":\"#/components/schemas/AiTransform\"}}},\"StaticTransform\":{\"type\":\"object\",\"description\":\"Static value passed directly to the step. Use for hardcoded values or resource references like '$res:path/to/resource'\",\"properties\":{\"value\":{\"description\":\"The static value. For resources, use format '$res:path/to/resource'\"},\"type\":{\"type\":\"string\",\"enum\":[\"static\"]}},\"required\":[\"type\"]},\"JavascriptTransform\":{\"type\":\"object\",\"description\":\"JavaScript expression evaluated at runtime. Can reference previous step results via 'results.step_id' or flow inputs via 'flow_input.property'. Inside loops, use 'flow_input.iter.value' for the current iteration value\",\"properties\":{\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression returning the value. Available variables - results (object with all previous step results), flow_input (flow inputs), flow_input.iter (in loops)\"},\"type\":{\"type\":\"string\",\"enum\":[\"javascript\"]}},\"required\":[\"expr\",\"type\"]},\"AiTransform\":{\"type\":\"object\",\"description\":\"Value resolved by the AI runtime for this input. The AI engine decides how to satisfy the parameter.\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"ai\"]}},\"required\":[\"type\"]},\"AIProviderKind\":{\"type\":\"string\",\"description\":\"Supported AI provider types\",\"enum\":[\"openai\",\"azure_openai\",\"anthropic\",\"mistral\",\"deepseek\",\"googleai\",\"groq\",\"openrouter\",\"togetherai\",\"customai\",\"aws_bedrock\"]},\"ProviderConfig\":{\"type\":\"object\",\"description\":\"Complete AI provider configuration with resource reference and model selection\",\"properties\":{\"kind\":{\"$ref\":\"#/components/schemas/AIProviderKind\"},\"resource\":{\"type\":\"string\",\"description\":\"Resource reference in format '$res:{resource_path}' pointing to provider credentials\"},\"model\":{\"type\":\"string\",\"description\":\"Model identifier (e.g., 'gpt-4', 'claude-3-opus-20240229', 'gemini-pro')\"}},\"required\":[\"kind\",\"resource\",\"model\"]},\"StaticProviderTransform\":{\"type\":\"object\",\"description\":\"Static provider configuration passed directly to the AI agent\",\"properties\":{\"value\":{\"$ref\":\"#/components/schemas/ProviderConfig\"},\"type\":{\"type\":\"string\",\"enum\":[\"static\"]}},\"required\":[\"type\",\"value\"]},\"ProviderTransform\":{\"description\":\"Provider configuration - can be static (ProviderConfig), JavaScript expression, or AI-determined\",\"oneOf\":[{\"$ref\":\"#/components/schemas/StaticProviderTransform\"},{\"$ref\":\"#/components/schemas/JavascriptTransform\"},{\"$ref\":\"#/components/schemas/AiTransform\"}],\"discriminator\":{\"propertyName\":\"type\",\"mapping\":{\"static\":\"#/components/schemas/StaticProviderTransform\",\"javascript\":\"#/components/schemas/JavascriptTransform\",\"ai\":\"#/components/schemas/AiTransform\"}}},\"MemoryOff\":{\"type\":\"object\",\"description\":\"No conversation memory/context\",\"properties\":{\"kind\":{\"type\":\"string\",\"enum\":[\"off\"]}},\"required\":[\"kind\"]},\"MemoryAuto\":{\"type\":\"object\",\"description\":\"Automatic context management\",\"properties\":{\"kind\":{\"type\":\"string\",\"enum\":[\"auto\"]},\"context_length\":{\"type\":\"integer\",\"description\":\"Maximum number of messages to retain in context\"},\"memory_id\":{\"type\":\"string\",\"description\":\"Identifier for persistent memory across agent invocations\"}},\"required\":[\"kind\"]},\"MemoryMessage\":{\"type\":\"object\",\"description\":\"A single message in conversation history\",\"properties\":{\"role\":{\"type\":\"string\",\"enum\":[\"user\",\"assistant\",\"system\"]},\"content\":{\"type\":\"string\"}},\"required\":[\"role\",\"content\"]},\"MemoryManual\":{\"type\":\"object\",\"description\":\"Explicit message history\",\"properties\":{\"kind\":{\"type\":\"string\",\"enum\":[\"manual\"]},\"messages\":{\"type\":\"array\",\"items\":{\"$ref\":\"#/components/schemas/MemoryMessage\"}}},\"required\":[\"kind\",\"messages\"]},\"MemoryConfig\":{\"description\":\"Conversation memory configuration\",\"oneOf\":[{\"$ref\":\"#/components/schemas/MemoryOff\"},{\"$ref\":\"#/components/schemas/MemoryAuto\"},{\"$ref\":\"#/components/schemas/MemoryManual\"}],\"discriminator\":{\"propertyName\":\"kind\",\"mapping\":{\"off\":\"#/components/schemas/MemoryOff\",\"auto\":\"#/components/schemas/MemoryAuto\",\"manual\":\"#/components/schemas/MemoryManual\"}}},\"StaticMemoryTransform\":{\"type\":\"object\",\"description\":\"Static memory configuration passed directly to the AI agent\",\"properties\":{\"value\":{\"$ref\":\"#/components/schemas/MemoryConfig\"},\"type\":{\"type\":\"string\",\"enum\":[\"static\"]}},\"required\":[\"type\",\"value\"]},\"MemoryTransform\":{\"description\":\"Memory configuration - can be static (MemoryConfig), JavaScript expression, or AI-determined\",\"oneOf\":[{\"$ref\":\"#/components/schemas/StaticMemoryTransform\"},{\"$ref\":\"#/components/schemas/JavascriptTransform\"},{\"$ref\":\"#/components/schemas/AiTransform\"}],\"discriminator\":{\"propertyName\":\"type\",\"mapping\":{\"static\":\"#/components/schemas/StaticMemoryTransform\",\"javascript\":\"#/components/schemas/JavascriptTransform\",\"ai\":\"#/components/schemas/AiTransform\"}}},\"FlowModuleValue\":{\"description\":\"The actual implementation of a flow step. Can be a script (inline or referenced), subflow, loop, branch, or special module type\",\"oneOf\":[{\"$ref\":\"#/components/schemas/RawScript\"},{\"$ref\":\"#/components/schemas/PathScript\"},{\"$ref\":\"#/components/schemas/PathFlow\"},{\"$ref\":\"#/components/schemas/ForloopFlow\"},{\"$ref\":\"#/components/schemas/WhileloopFlow\"},{\"$ref\":\"#/components/schemas/BranchOne\"},{\"$ref\":\"#/components/schemas/BranchAll\"},{\"$ref\":\"#/components/schemas/Identity\"},{\"$ref\":\"#/components/schemas/AiAgent\"}],\"discriminator\":{\"propertyName\":\"type\",\"mapping\":{\"rawscript\":\"#/components/schemas/RawScript\",\"script\":\"#/components/schemas/PathScript\",\"flow\":\"#/components/schemas/PathFlow\",\"forloopflow\":\"#/components/schemas/ForloopFlow\",\"whileloopflow\":\"#/components/schemas/WhileloopFlow\",\"branchone\":\"#/components/schemas/BranchOne\",\"branchall\":\"#/components/schemas/BranchAll\",\"identity\":\"#/components/schemas/Identity\",\"aiagent\":\"#/components/schemas/AiAgent\"}}},\"RawScript\":{\"type\":\"object\",\"description\":\"Inline script with code defined directly in the flow. Use 'bun' as default language if unspecified. The script receives arguments from input_transforms\",\"properties\":{\"input_transforms\":{\"type\":\"object\",\"description\":\"Map of parameter names to their values (static or JavaScript expressions). These become the script's input arguments\",\"additionalProperties\":{\"$ref\":\"#/components/schemas/InputTransform\"}},\"content\":{\"type\":\"string\",\"description\":\"The script source code. Should export a 'main' function\"},\"language\":{\"type\":\"string\",\"description\":\"Programming language for this script\",\"enum\":[\"deno\",\"bun\",\"python3\",\"go\",\"bash\",\"powershell\",\"postgresql\",\"mysql\",\"bigquery\",\"snowflake\",\"mssql\",\"oracledb\",\"graphql\",\"nativets\",\"php\",\"rust\",\"ansible\",\"csharp\",\"nu\",\"java\",\"ruby\",\"rlang\",\"duckdb\"]},\"path\":{\"type\":\"string\",\"description\":\"Optional path for saving this script\"},\"lock\":{\"type\":\"string\",\"description\":\"Lock file content for dependencies\"},\"type\":{\"type\":\"string\",\"enum\":[\"rawscript\"]},\"tag\":{\"type\":\"string\",\"description\":\"Worker group tag for execution routing\"},\"concurrent_limit\":{\"type\":\"number\",\"description\":\"Maximum concurrent executions of this script\"},\"concurrency_time_window_s\":{\"type\":\"number\",\"description\":\"Time window for concurrent_limit\"},\"custom_concurrency_key\":{\"type\":\"string\",\"description\":\"Custom key for grouping concurrent executions\"},\"is_trigger\":{\"type\":\"boolean\",\"description\":\"If true, this script is a trigger that can start the flow\"},\"assets\":{\"type\":\"array\",\"description\":\"External resources this script accesses (S3 objects, resources, etc.)\",\"items\":{\"type\":\"object\",\"required\":[\"path\",\"kind\"],\"properties\":{\"path\":{\"type\":\"string\",\"description\":\"Path to the asset\"},\"kind\":{\"type\":\"string\",\"description\":\"Type of asset\",\"enum\":[\"s3object\",\"resource\",\"ducklake\",\"datatable\",\"volume\"]},\"access_type\":{\"type\":\"string\",\"nullable\":true,\"description\":\"Access level for this asset\",\"enum\":[\"r\",\"w\",\"rw\"]},\"alt_access_type\":{\"type\":\"string\",\"nullable\":true,\"description\":\"Alternative access level\",\"enum\":[\"r\",\"w\",\"rw\"]}}}}},\"required\":[\"type\",\"content\",\"language\",\"input_transforms\"]},\"PathScript\":{\"type\":\"object\",\"description\":\"Reference to an existing script by path. Use this when calling a previously saved script instead of writing inline code\",\"properties\":{\"input_transforms\":{\"type\":\"object\",\"description\":\"Map of parameter names to their values (static or JavaScript expressions). These become the script's input arguments\",\"additionalProperties\":{\"$ref\":\"#/components/schemas/InputTransform\"}},\"path\":{\"type\":\"string\",\"description\":\"Path to the script in the workspace (e.g., 'f/scripts/send_email')\"},\"hash\":{\"type\":\"string\",\"description\":\"Optional specific version hash of the script to use\"},\"type\":{\"type\":\"string\",\"enum\":[\"script\"]},\"tag_override\":{\"type\":\"string\",\"description\":\"Override the script's default worker group tag\"},\"is_trigger\":{\"type\":\"boolean\",\"description\":\"If true, this script is a trigger that can start the flow\"}},\"required\":[\"type\",\"path\",\"input_transforms\"]},\"PathFlow\":{\"type\":\"object\",\"description\":\"Reference to an existing flow by path. Use this to call another flow as a subflow\",\"properties\":{\"input_transforms\":{\"type\":\"object\",\"description\":\"Map of parameter names to their values (static or JavaScript expressions). These become the subflow's input arguments\",\"additionalProperties\":{\"$ref\":\"#/components/schemas/InputTransform\"}},\"path\":{\"type\":\"string\",\"description\":\"Path to the flow in the workspace (e.g., 'f/flows/process_user')\"},\"type\":{\"type\":\"string\",\"enum\":[\"flow\"]}},\"required\":[\"type\",\"path\",\"input_transforms\"]},\"ForloopFlow\":{\"type\":\"object\",\"description\":\"Executes nested modules in a loop over an iterator. Inside the loop, use 'flow_input.iter.value' to access the current iteration value, and 'flow_input.iter.index' for the index. Supports parallel execution for better performance on I/O-bound operations\",\"properties\":{\"modules\":{\"type\":\"array\",\"description\":\"Steps to execute for each iteration. These can reference the iteration value via 'flow_input.iter.value'\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}},\"iterator\":{\"description\":\"JavaScript expression that returns an array to iterate over. Can reference 'results.step_id' or 'flow_input'\",\"$ref\":\"#/components/schemas/InputTransform\"},\"skip_failures\":{\"type\":\"boolean\",\"description\":\"If true, iteration failures don't stop the loop. Failed iterations return null\"},\"type\":{\"type\":\"string\",\"enum\":[\"forloopflow\"]},\"parallel\":{\"type\":\"boolean\",\"description\":\"If true, iterations run concurrently (faster for I/O-bound operations). Use with parallelism to control concurrency\"},\"parallelism\":{\"description\":\"Maximum number of concurrent iterations when parallel=true. Limits resource usage. Can be static number or expression\",\"$ref\":\"#/components/schemas/InputTransform\"},\"squash\":{\"type\":\"boolean\"}},\"required\":[\"modules\",\"iterator\",\"skip_failures\",\"type\"]},\"WhileloopFlow\":{\"type\":\"object\",\"description\":\"Executes nested modules repeatedly while a condition is true. The loop checks the condition after each iteration. Use stop_after_if on modules to control loop termination\",\"properties\":{\"modules\":{\"type\":\"array\",\"description\":\"Steps to execute in each iteration. Use stop_after_if to control when the loop ends\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}},\"skip_failures\":{\"type\":\"boolean\",\"description\":\"If true, iteration failures don't stop the loop. Failed iterations return null\"},\"type\":{\"type\":\"string\",\"enum\":[\"whileloopflow\"]},\"parallel\":{\"type\":\"boolean\",\"description\":\"If true, iterations run concurrently (use with caution in while loops)\"},\"parallelism\":{\"description\":\"Maximum number of concurrent iterations when parallel=true\",\"$ref\":\"#/components/schemas/InputTransform\"},\"squash\":{\"type\":\"boolean\"}},\"required\":[\"modules\",\"skip_failures\",\"type\"]},\"BranchOne\":{\"type\":\"object\",\"description\":\"Conditional branching where only the first matching branch executes. Branches are evaluated in order, and the first one with a true expression runs. If no branches match, the default branch executes\",\"properties\":{\"branches\":{\"type\":\"array\",\"description\":\"Array of branches to evaluate in order. The first branch with expr evaluating to true executes\",\"items\":{\"type\":\"object\",\"properties\":{\"summary\":{\"type\":\"string\",\"description\":\"Short description of this branch condition\"},\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression that returns boolean. Can use 'results.step_id' or 'flow_input'. First true expr wins\"},\"modules\":{\"type\":\"array\",\"description\":\"Steps to execute if this branch's expr is true\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}}},\"required\":[\"modules\",\"expr\"]}},\"default\":{\"type\":\"array\",\"description\":\"Steps to execute if no branch expressions match\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}},\"type\":{\"type\":\"string\",\"enum\":[\"branchone\"]}},\"required\":[\"branches\",\"default\",\"type\"]},\"BranchAll\":{\"type\":\"object\",\"description\":\"Parallel branching where all branches execute simultaneously. Unlike BranchOne, all branches run regardless of conditions. Useful for executing independent tasks concurrently\",\"properties\":{\"branches\":{\"type\":\"array\",\"description\":\"Array of branches that all execute (either in parallel or sequentially)\",\"items\":{\"type\":\"object\",\"properties\":{\"summary\":{\"type\":\"string\",\"description\":\"Short description of this branch's purpose\"},\"skip_failure\":{\"type\":\"boolean\",\"description\":\"If true, failure in this branch doesn't fail the entire flow\"},\"modules\":{\"type\":\"array\",\"description\":\"Steps to execute in this branch\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}}},\"required\":[\"modules\"]}},\"type\":{\"type\":\"string\",\"enum\":[\"branchall\"]},\"parallel\":{\"type\":\"boolean\",\"description\":\"If true, all branches execute concurrently. If false, they execute sequentially\"}},\"required\":[\"branches\",\"type\"]},\"AgentTool\":{\"type\":\"object\",\"description\":\"A tool available to an AI agent. Can be a flow module or an external MCP (Model Context Protocol) tool\",\"properties\":{\"id\":{\"type\":\"string\",\"description\":\"Unique identifier for this tool. Cannot contain spaces - use underscores instead (e.g., 'get_user_data' not 'get user data')\"},\"summary\":{\"type\":\"string\",\"description\":\"Short description of what this tool does (shown to the AI)\"},\"value\":{\"$ref\":\"#/components/schemas/ToolValue\"}},\"required\":[\"id\",\"value\"]},\"ToolValue\":{\"description\":\"The implementation of a tool. Can be a flow module (script/flow) or an MCP tool reference\",\"oneOf\":[{\"$ref\":\"#/components/schemas/FlowModuleTool\"},{\"$ref\":\"#/components/schemas/McpToolValue\"},{\"$ref\":\"#/components/schemas/WebsearchToolValue\"}],\"discriminator\":{\"propertyName\":\"tool_type\",\"mapping\":{\"flowmodule\":\"#/components/schemas/FlowModuleTool\",\"mcp\":\"#/components/schemas/McpToolValue\",\"websearch\":\"#/components/schemas/WebsearchToolValue\"}}},\"FlowModuleTool\":{\"description\":\"A tool implemented as a flow module (script, flow, etc.). The AI can call this like any other flow module\",\"allOf\":[{\"type\":\"object\",\"properties\":{\"tool_type\":{\"type\":\"string\",\"enum\":[\"flowmodule\"]}},\"required\":[\"tool_type\"]},{\"$ref\":\"#/components/schemas/FlowModuleValue\"}]},\"WebsearchToolValue\":{\"type\":\"object\",\"description\":\"A tool implemented as a websearch tool. The AI can call this like any other websearch tool\",\"properties\":{\"tool_type\":{\"type\":\"string\",\"enum\":[\"websearch\"]}},\"required\":[\"tool_type\"]},\"McpToolValue\":{\"type\":\"object\",\"description\":\"Reference to an external MCP (Model Context Protocol) tool. The AI can call tools from MCP servers\",\"properties\":{\"tool_type\":{\"type\":\"string\",\"enum\":[\"mcp\"]},\"resource_path\":{\"type\":\"string\",\"description\":\"Path to the MCP resource/server configuration\"},\"include_tools\":{\"type\":\"array\",\"description\":\"Whitelist of specific tools to include from this MCP server\",\"items\":{\"type\":\"string\"}},\"exclude_tools\":{\"type\":\"array\",\"description\":\"Blacklist of tools to exclude from this MCP server\",\"items\":{\"type\":\"string\"}}},\"required\":[\"tool_type\",\"resource_path\"]},\"AiAgent\":{\"type\":\"object\",\"description\":\"AI agent step that can use tools to accomplish tasks. The agent receives inputs and can call any of its configured tools to complete the task\",\"properties\":{\"input_transforms\":{\"type\":\"object\",\"description\":\"Input parameters for the AI agent mapped to their values\",\"properties\":{\"provider\":{\"$ref\":\"#/components/schemas/ProviderTransform\"},\"output_type\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Output format type.\\nValid values: 'text' (default) - plain text response, 'image' - image generation\\n\"},\"user_message\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"The user's prompt/message to the AI agent. Supports variable interpolation with flow.input syntax.\"},\"system_prompt\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"System instructions that guide the AI's behavior, persona, and response style. Optional.\"},\"streaming\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Boolean. If true, stream the AI response incrementally.\\nStreaming events include: token_delta, tool_call, tool_call_arguments, tool_execution, tool_result\\n\"},\"memory\":{\"$ref\":\"#/components/schemas/MemoryTransform\"},\"output_schema\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"JSON Schema object defining structured output format. Used when you need the AI to return data in a specific shape.\\nSupports standard JSON Schema properties: type, properties, required, items, enum, pattern, minLength, maxLength, minimum, maximum, etc.\\nExample: { type: 'object', properties: { name: { type: 'string' }, age: { type: 'integer' } }, required: ['name'] }\\n\"},\"user_attachments\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Array of file references (images or PDFs) for the AI agent.\\nFormat: Array<{ bucket: string, key: string }> - S3 object references\\nExample: [{ bucket: 'my-bucket', key: 'documents/report.pdf' }]\\n\"},\"max_completion_tokens\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Integer. Maximum number of tokens the AI will generate in its response.\\nRange: 1 to 4,294,967,295. Typical values: 256-4096 for most use cases.\\n\"},\"temperature\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Float. Controls randomness/creativity of responses.\\nRange: 0.0 to 2.0 (provider-dependent)\\n- 0.0 = deterministic, focused responses\\n- 0.7 = balanced (common default)\\n- 1.0+ = more creative/random\\n\"}},\"required\":[\"provider\",\"user_message\",\"output_type\"]},\"tools\":{\"type\":\"array\",\"description\":\"Array of tools the agent can use. The agent decides which tools to call based on the task\",\"items\":{\"$ref\":\"#/components/schemas/AgentTool\"}},\"type\":{\"type\":\"string\",\"enum\":[\"aiagent\"]},\"parallel\":{\"type\":\"boolean\",\"description\":\"If true, the agent can execute multiple tool calls in parallel\"}},\"required\":[\"tools\",\"type\",\"input_transforms\"]},\"Identity\":{\"type\":\"object\",\"description\":\"Pass-through module that returns its input unchanged. Useful for flow structure or as a placeholder\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"identity\"]},\"flow\":{\"type\":\"boolean\",\"description\":\"If true, marks this as a flow identity (special handling)\"}},\"required\":[\"type\"]},\"FlowStatus\":{\"type\":\"object\",\"properties\":{\"step\":{\"type\":\"integer\"},\"modules\":{\"type\":\"array\",\"items\":{\"$ref\":\"#/components/schemas/FlowStatusModule\"}},\"user_states\":{\"additionalProperties\":true},\"preprocessor_module\":{\"allOf\":[{\"$ref\":\"#/components/schemas/FlowStatusModule\"}]},\"failure_module\":{\"allOf\":[{\"$ref\":\"#/components/schemas/FlowStatusModule\"},{\"type\":\"object\",\"properties\":{\"parent_module\":{\"type\":\"string\"}}}]},\"retry\":{\"type\":\"object\",\"properties\":{\"fail_count\":{\"type\":\"integer\"},\"failed_jobs\":{\"type\":\"array\",\"items\":{\"type\":\"string\",\"format\":\"uuid\"}}}}},\"required\":[\"step\",\"modules\",\"failure_module\"]},\"FlowStatusModule\":{\"type\":\"object\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"WaitingForPriorSteps\",\"WaitingForEvents\",\"WaitingForExecutor\",\"InProgress\",\"Success\",\"Failure\"]},\"id\":{\"type\":\"string\"},\"job\":{\"type\":\"string\",\"format\":\"uuid\"},\"count\":{\"type\":\"integer\"},\"progress\":{\"type\":\"integer\"},\"iterator\":{\"type\":\"object\",\"properties\":{\"index\":{\"type\":\"integer\"},\"itered\":{\"type\":\"array\",\"items\":{}},\"itered_len\":{\"type\":\"integer\"},\"args\":{}}},\"flow_jobs\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"flow_jobs_success\":{\"type\":\"array\",\"items\":{\"type\":\"boolean\"}},\"flow_jobs_duration\":{\"type\":\"object\",\"properties\":{\"started_at\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"duration_ms\":{\"type\":\"array\",\"items\":{\"type\":\"integer\"}}}},\"branch_chosen\":{\"type\":\"object\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"branch\",\"default\"]},\"branch\":{\"type\":\"integer\"}},\"required\":[\"type\"]},\"branchall\":{\"type\":\"object\",\"properties\":{\"branch\":{\"type\":\"integer\"},\"len\":{\"type\":\"integer\"}},\"required\":[\"branch\",\"len\"]},\"approvers\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"resume_id\":{\"type\":\"integer\"},\"approver\":{\"type\":\"string\"}},\"required\":[\"resume_id\",\"approver\"]}},\"failed_retries\":{\"type\":\"array\",\"items\":{\"type\":\"string\",\"format\":\"uuid\"}},\"skipped\":{\"type\":\"boolean\"},\"agent_actions\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"oneOf\":[{\"type\":\"object\",\"properties\":{\"job_id\":{\"type\":\"string\",\"format\":\"uuid\"},\"function_name\":{\"type\":\"string\"},\"type\":{\"type\":\"string\",\"enum\":[\"tool_call\"]},\"module_id\":{\"type\":\"string\"}},\"required\":[\"job_id\",\"function_name\",\"type\",\"module_id\"]},{\"type\":\"object\",\"properties\":{\"call_id\":{\"type\":\"string\",\"format\":\"uuid\"},\"function_name\":{\"type\":\"string\"},\"resource_path\":{\"type\":\"string\"},\"type\":{\"type\":\"string\",\"enum\":[\"mcp_tool_call\"]},\"arguments\":{\"type\":\"object\"}},\"required\":[\"call_id\",\"function_name\",\"resource_path\",\"type\"]},{\"type\":\"object\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"web_search\"]}},\"required\":[\"type\"]},{\"type\":\"object\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"message\"]}},\"required\":[\"content\",\"type\"]}]}},\"agent_actions_success\":{\"type\":\"array\",\"items\":{\"type\":\"boolean\"}}},\"required\":[\"type\"]}}";
9
- export declare const CLI_COMMANDS = "# Windmill CLI Commands\n\nThe Windmill CLI (`wmill`) provides commands for managing scripts, flows, apps, and other resources.\n\n## Global Options\n\n- `--workspace <workspace:string>` - Specify the target workspace. This overrides the default workspace.\n- `--debug --verbose` - Show debug/verbose logs\n- `--show-diffs` - Show diff informations when syncing (may show sensitive informations)\n- `--token <token:string>` - Specify an API token. This will override any stored token.\n- `--base-url <baseUrl:string>` - Specify the base URL of the API. If used, --token and --workspace are required and no local remote/workspace already set will be used.\n- `--config-dir <configDir:string>` - Specify a custom config directory. Overrides WMILL_CONFIG_DIR environment variable and default ~/.config location.\n\n## Commands\n\n### app\n\napp related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `app list` - list all apps\n - `--json` - Output as JSON (for piping to jq)\n- `app get <path:string>` - get an app's details\n - `--json` - Output as JSON (for piping to jq)\n- `app push <file_path:string> <remote_path:string>` - push a local app \n- `app dev [app_folder:string]` - Start a development server for building apps with live reload and hot module replacement\n - `--port <port:number>` - Port to run the dev server on (will find next available port if occupied)\n - `--host <host:string>` - Host to bind the dev server to\n - `--entry <entry:string>` - Entry point file (default: index.ts for Svelte/Vue, index.tsx otherwise)\n - `--no-open` - Don't automatically open the browser\n- `app lint [app_folder:string]` - Lint a raw app folder to validate structure and buildability\n - `--fix` - Attempt to fix common issues (not implemented yet)\n- `app new` - create a new raw app from a template\n- `app generate-agents [app_folder:string]` - regenerate AGENTS.md and DATATABLES.md from remote workspace\n- `app set-permissioned-as <path:string> <email:string>` - Set the on_behalf_of_email for an app (requires admin or wm_deployers group)\n\n### audit\n\nView audit logs (requires admin)\n\n**Subcommands:**\n\n- `audit list` - List audit log entries\n- `audit get <id:string>` - Get a specific audit log entry\n - `--json` - Output as JSON (for piping to jq)\n\n### config\n\nShow all available wmill.yaml configuration options\n\n**Options:**\n- `--json` - Output as JSON for programmatic consumption\n\n**Subcommands:**\n\n- `config migrate` - Migrate wmill.yaml from gitBranches/environments to workspaces format\n\n### dependencies\n\nworkspace dependencies related commands\n\n**Alias:** `deps`\n\n**Subcommands:**\n\n- `dependencies push <file_path:string>` - Push workspace dependencies from a local file\n\n### dev\n\nLaunch a dev server that watches for local file changes and auto-pushes them to the remote workspace. Provides live reload for scripts and flows during development.\n\n**Options:**\n- `--includes <pattern...:string>` - Filter paths givena glob pattern or path\n\n### docs\n\nSearch Windmill documentation.\n\n**Arguments:** `<query:string>`\n\n**Options:**\n- `--json` - Output results as JSON.\n\n### flow\n\nflow related commands\n\n**Options:**\n- `--show-archived` - Enable archived flows in output\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `flow list` - list all flows\n - `--show-archived` - Enable archived flows in output\n - `--json` - Output as JSON (for piping to jq)\n- `flow get <path:string>` - get a flow's details\n - `--json` - Output as JSON (for piping to jq)\n- `flow push <file_path:string> <remote_path:string>` - push a local flow spec. This overrides any remote versions.\n - `--message <message:string>` - Deployment message\n- `flow run <path:string>` - run a flow by path.\n - `-d --data <data:string>` - Inputs specified as a JSON string or a file using @<filename> or stdin using @-.\n - `-s --silent` - Do not ouput anything other then the final output. Useful for scripting.\n- `flow preview <flow_path:string>` - preview a local flow without deploying it. Runs the flow definition from local files and uses local PathScripts by default.\n - `-d --data <data:string>` - Inputs specified as a JSON string or a file using @<filename> or stdin using @-.\n - `-s --silent` - Do not output anything other then the final output. Useful for scripting.\n - `--remote` - Use deployed workspace scripts for PathScript steps instead of local files.\n- `flow new <flow_path:string>` - create a new empty flow\n - `--summary <summary:string>` - flow summary\n - `--description <description:string>` - flow description\n- `flow bootstrap <flow_path:string>` - create a new empty flow (alias for new)\n - `--summary <summary:string>` - flow summary\n - `--description <description:string>` - flow description\n- `flow history <path:string>` - Show version history for a flow\n - `--json` - Output as JSON (for piping to jq)\n- `flow show-version <path:string> <version:string>` - Show a specific version of a flow\n - `--json` - Output as JSON (for piping to jq)\n- `flow set-permissioned-as <path:string> <email:string>` - Set the on_behalf_of_email for a flow (requires admin or wm_deployers group)\n\n### folder\n\nfolder related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `folder list` - list all folders\n - `--json` - Output as JSON (for piping to jq)\n- `folder get <name:string>` - get a folder's details\n - `--json` - Output as JSON (for piping to jq)\n- `folder new <name:string>` - create a new folder locally\n - `--summary <summary:string>` - folder summary\n- `folder push <name:string>` - push a local folder to the remote by name. This overrides any remote versions.\n- `folder add-missing` - create default folder.meta.yaml for all subdirectories of f/ that are missing one\n - `-y, --yes` - skip confirmation prompt\n- `folder show-rules <name:string>` - Show default_permissioned_as rules for a folder. Use --test-path to see which rule matches a given item path.\n - `--test-path <path:string>` - Test which rule matches this item path (e.g. f/prod/jobs/my_script)\n - `--json` - Output as JSON\n\n### generate-metadata\n\nGenerate metadata (locks, schemas) for all scripts, flows, and apps\n\n**Arguments:** `[folder:string]`\n\n**Options:**\n- `--yes` - Skip confirmation prompt\n- `--dry-run` - Show what would be updated without making changes\n- `--lock-only` - Re-generate only the lock files\n- `--schema-only` - Re-generate only script schemas (skips flows and apps)\n- `--skip-scripts` - Skip processing scripts\n- `--skip-flows` - Skip processing flows\n- `--skip-apps` - Skip processing apps\n- `--strict-folder-boundaries` - Only update items inside the specified folder (requires folder argument)\n- `-i --includes <patterns:file[]>` - Comma separated patterns to specify which files to include\n- `-e --excludes <patterns:file[]>` - Comma separated patterns to specify which files to exclude\n\n### gitsync-settings\n\nManage git-sync settings between local wmill.yaml and Windmill backend\n\n**Subcommands:**\n\n- `gitsync-settings pull` - Pull git-sync settings from Windmill backend to local wmill.yaml\n - `--repository <repo:string>` - Specify repository path (e.g., u/user/repo)\n - `--default` - Write settings to top-level defaults instead of overrides\n - `--replace` - Replace existing settings (non-interactive mode)\n - `--override` - Add branch-specific override (non-interactive mode)\n - `--diff` - Show differences without applying changes\n - `--json-output` - Output in JSON format\n - `--with-backend-settings <json:string>` - Use provided JSON settings instead of querying backend (for testing)\n - `--yes` - Skip interactive prompts and use default behavior\n - `--promotion <branch:string>` - Use promotionOverrides from the specified branch instead of regular overrides\n- `gitsync-settings push` - Push git-sync settings from local wmill.yaml to Windmill backend\n - `--repository <repo:string>` - Specify repository path (e.g., u/user/repo)\n - `--diff` - Show what would be pushed without applying changes\n - `--json-output` - Output in JSON format\n - `--with-backend-settings <json:string>` - Use provided JSON settings instead of querying backend (for testing)\n - `--yes` - Skip interactive prompts and use default behavior\n - `--promotion <branch:string>` - Use promotionOverrides from the specified branch instead of regular overrides\n\n### group\n\nManage workspace groups\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `group list` - List all groups in the workspace\n - `--json` - Output as JSON (for piping to jq)\n- `group get <name:string>` - Get group details and members\n - `--json` - Output as JSON (for piping to jq)\n- `group create <name:string>` - Create a new group\n - `--summary <summary:string>` - Group summary/description\n- `group delete <name:string>` - Delete a group\n- `group add-user <name:string> <username:string>` - Add a user to a group\n- `group remove-user <name:string> <username:string>` - Remove a user from a group\n\n### hub\n\nHub related commands. EXPERIMENTAL. INTERNAL USE ONLY.\n\n**Subcommands:**\n\n- `hub pull` - pull any supported definitions. EXPERIMENTAL.\n\n### init\n\nBootstrap a windmill project with a wmill.yaml file\n\n**Options:**\n- `--use-default` - Use default settings without checking backend\n- `--use-backend` - Use backend git-sync settings if available\n- `--repository <repo:string>` - Specify repository path (e.g., u/user/repo) when using backend settings\n- `--bind-profile` - Automatically bind active workspace profile to current Git branch\n- `--no-bind-profile` - Skip workspace profile binding prompt\n\n### instance\n\nsync local with a remote instance or the opposite (push or pull)\n\n**Subcommands:**\n\n- `instance add [instance_name:string] [remote:string] [token:string]` - Add a new instance\n- `instance remove <instance:string:instance>` - Remove an instance\n- `instance switch <instance:string:instance>` - Switch the current instance\n- `instance pull` - Pull instance settings, users, configs, instance groups and overwrite local\n - `--yes` - Pull without needing confirmation\n - `--dry-run` - Perform a dry run without making changes\n - `--skip-users` - Skip pulling users\n - `--skip-settings` - Skip pulling settings\n - `--skip-configs` - Skip pulling configs (worker groups)\n - `--skip-groups` - Skip pulling instance groups\n - `--include-workspaces` - Also pull workspaces\n - `--folder-per-instance` - Create a folder per instance\n - `--instance <instance:string>` - Name of the instance to pull from, override the active instance\n - `--prefix <prefix:string>` - Prefix of the local workspaces to pull, used to create the folders when using --include-workspaces\n - `--prefix-settings` - Store instance yamls inside prefixed folders when using --prefix and --folder-per-instance\n- `instance push` - Push instance settings, users, configs, group and overwrite remote\n - `--yes` - Push without needing confirmation\n - `--dry-run` - Perform a dry run without making changes\n - `--skip-users` - Skip pushing users\n - `--skip-settings` - Skip pushing settings\n - `--skip-configs` - Skip pushing configs (worker groups)\n - `--skip-groups` - Skip pushing instance groups\n - `--include-workspaces` - Also push workspaces\n - `--folder-per-instance` - Create a folder per instance\n - `--instance <instance:string>` - Name of the instance to push to, override the active instance\n - `--prefix <prefix:string>` - Prefix of the local workspaces folders to push\n - `--prefix-settings` - Store instance yamls inside prefixed folders when using --prefix and --folder-per-instance\n- `instance whoami` - Display information about the currently logged-in user\n- `instance get-config` - Dump the current instance config (global settings + worker configs) as YAML\n - `-o, --output-file <file:string>` - Write YAML to a file instead of stdout\n - `--show-secrets` - Include sensitive fields (license key, JWT secret) without prompting\n - `--instance <instance:string>` - Name of the instance, override the active instance\n\n### job\n\nManage jobs (list, inspect, cancel)\n\n**Subcommands:**\n\n- `job list` - List recent jobs\n- `job get <id:string>` - Get job details. For flows: shows step tree with sub-job IDs\n - `--json` - Output as JSON (for piping to jq)\n- `job result <id:string>` - Get the result of a completed job (machine-friendly)\n- `job logs <id:string>` - Get job logs. For flows: aggregates all step logs\n- `job cancel <id:string>` - Cancel a running or queued job\n - `--reason <reason:string>` - Reason for cancellation\n\n### jobs\n\nPull completed and queued jobs from workspace\n\n**Arguments:** `[workspace:string]`\n\n**Options:**\n- `-c, --completed-output <file:string>` - Completed jobs output file (default: completed_jobs.json)\n- `-q, --queued-output <file:string>` - Queued jobs output file (default: queued_jobs.json)\n- `--skip-worker-check` - Skip checking for active workers before export\n\n**Subcommands:**\n\n- `jobs pull`\n- `jobs push`\n\n### lint\n\nValidate Windmill flow, schedule, and trigger YAML files in a directory\n\n**Arguments:** `[directory:string]`\n\n**Options:**\n- `--json` - Output results in JSON format\n- `--fail-on-warn` - Exit with code 1 when warnings are emitted\n- `--locks-required` - Fail if scripts or flow inline scripts that need locks have no locks\n- `-w, --watch` - Watch for file changes and re-lint automatically\n\n### queues\n\nList all queues with their metrics\n\n**Arguments:** `[workspace:string] the optional workspace to filter by (default to all workspaces)`\n\n**Options:**\n- `--instance [instance]` - Name of the instance to push to, override the active instance\n- `--base-url [baseUrl]` - If used with --token, will be used as the base url for the instance\n\n### resource\n\nresource related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `resource list` - list all resources\n - `--json` - Output as JSON (for piping to jq)\n- `resource get <path:string>` - get a resource's details\n - `--json` - Output as JSON (for piping to jq)\n- `resource new <path:string>` - create a new resource locally\n- `resource push <file_path:string> <remote_path:string>` - push a local resource spec. This overrides any remote versions.\n\n### resource-type\n\nresource type related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `resource-type list` - list all resource types\n - `--schema` - Show schema in the output\n - `--json` - Output as JSON (for piping to jq)\n- `resource-type get <path:string>` - get a resource type's details\n - `--json` - Output as JSON (for piping to jq)\n- `resource-type new <name:string>` - create a new resource type locally\n- `resource-type push <file_path:string> <name:string>` - push a local resource spec. This overrides any remote versions.\n- `resource-type generate-namespace` - Create a TypeScript definition file with the RT namespace generated from the resource types\n\n### schedule\n\nschedule related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `schedule list` - list all schedules\n - `--json` - Output as JSON (for piping to jq)\n- `schedule get <path:string>` - get a schedule's details\n - `--json` - Output as JSON (for piping to jq)\n- `schedule new <path:string>` - create a new schedule locally\n- `schedule push <file_path:string> <remote_path:string>` - push a local schedule spec. This overrides any remote versions.\n- `schedule enable <path:string>` - Enable a schedule\n- `schedule disable <path:string>` - Disable a schedule\n- `schedule set-permissioned-as <path:string> <email:string>` - Set the email (run-as user) for a schedule (requires admin or wm_deployers group)\n\n### script\n\nscript related commands\n\n**Options:**\n- `--show-archived` - Show archived scripts instead of active ones\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `script list` - list all scripts\n - `--show-archived` - Show archived scripts instead of active ones\n - `--json` - Output as JSON (for piping to jq)\n- `script push <path:file>` - push a local script spec. This overrides any remote versions. Use the script file (.ts, .js, .py, .sh)\n - `--message <message:string>` - Deployment message\n- `script get <path:file>` - get a script's details\n - `--json` - Output as JSON (for piping to jq)\n- `script show <path:file>` - show a script's content (alias for get)\n- `script run <path:file>` - run a script by path\n - `-d --data <data:file>` - Inputs specified as a JSON string or a file using @<filename> or stdin using @-.\n - `-s --silent` - Do not output anything other then the final output. Useful for scripting.\n- `script preview <path:file>` - preview a local script without deploying it. Supports both regular and codebase scripts.\n - `-d --data <data:file>` - Inputs specified as a JSON string or a file using @<filename> or stdin using @-.\n - `-s --silent` - Do not output anything other than the final output. Useful for scripting.\n- `script new <path:file> <language:string>` - create a new script\n - `--summary <summary:string>` - script summary\n - `--description <description:string>` - script description\n- `script bootstrap <path:file> <language:string>` - create a new script (alias for new)\n - `--summary <summary:string>` - script summary\n - `--description <description:string>` - script description\n- `script set-permissioned-as <path:string> <email:string>` - Set the on_behalf_of_email for a script (requires admin or wm_deployers group)\n- `script history <path:string>` - show version history for a script\n - `--json` - Output as JSON (for piping to jq)\n\n### sync\n\nsync local with a remote workspaces or the opposite (push or pull)\n\n**Subcommands:**\n\n- `sync pull` - Pull any remote changes and apply them locally.\n - `--yes` - Pull without needing confirmation\n - `--dry-run` - Show changes that would be pulled without actually pushing\n - `--plain-secrets` - Pull secrets as plain text\n - `--json` - Use JSON instead of YAML\n - `--skip-variables` - Skip syncing variables (including secrets)\n - `--skip-secrets` - Skip syncing only secrets variables\n - `--include-secrets` - Include secrets in sync (overrides skipSecrets in wmill.yaml)\n - `--skip-resources` - Skip syncing resources\n - `--skip-resource-types` - Skip syncing resource types\n - `--skip-scripts` - Skip syncing scripts\n - `--skip-flows` - Skip syncing flows\n - `--skip-apps` - Skip syncing apps\n - `--skip-folders` - Skip syncing folders\n - `--skip-workspace-dependencies` - Skip syncing workspace dependencies\n - `--skip-scripts-metadata` - Skip syncing scripts metadata, focus solely on logic\n - `--include-schedules` - Include syncing schedules\n - `--include-triggers` - Include syncing triggers\n - `--include-users` - Include syncing users\n - `--include-groups` - Include syncing groups\n - `--include-settings` - Include syncing workspace settings\n - `--include-key` - Include workspace encryption key\n - `--skip-branch-validation` - Skip git branch validation and prompts\n - `--json-output` - Output results in JSON format\n - `-i --includes <patterns:file[]>` - Comma separated patterns to specify which file to take into account (among files that are compatible with windmill). Patterns can include * (any string until '/') and ** (any string). Overrides wmill.yaml includes\n - `-e --excludes <patterns:file[]>` - Comma separated patterns to specify which file to NOT take into account. Overrides wmill.yaml excludes\n - `--extra-includes <patterns:file[]>` - Comma separated patterns to specify which file to take into account (among files that are compatible with windmill). Patterns can include * (any string until '/') and ** (any string). Useful to still take wmill.yaml into account and act as a second pattern to satisfy\n - `--repository <repo:string>` - Specify repository path (e.g., u/user/repo) when multiple repositories exist\n - `--promotion <branch:string>` - Use promotionOverrides from the specified branch instead of regular overrides\n - `--branch, --env <branch:string>` - [Deprecated: use --workspace] Override the current git branch/environment\n- `sync push` - Push any local changes and apply them remotely.\n - `--yes` - Push without needing confirmation\n - `--dry-run` - Show changes that would be pushed without actually pushing\n - `--plain-secrets` - Push secrets as plain text\n - `--json` - Use JSON instead of YAML\n - `--skip-variables` - Skip syncing variables (including secrets)\n - `--skip-secrets` - Skip syncing only secrets variables\n - `--include-secrets` - Include secrets in sync (overrides skipSecrets in wmill.yaml)\n - `--skip-resources` - Skip syncing resources\n - `--skip-resource-types` - Skip syncing resource types\n - `--skip-scripts` - Skip syncing scripts\n - `--skip-flows` - Skip syncing flows\n - `--skip-apps` - Skip syncing apps\n - `--skip-folders` - Skip syncing folders\n - `--skip-workspace-dependencies` - Skip syncing workspace dependencies\n - `--skip-scripts-metadata` - Skip syncing scripts metadata, focus solely on logic\n - `--include-schedules` - Include syncing schedules\n - `--include-triggers` - Include syncing triggers\n - `--include-users` - Include syncing users\n - `--include-groups` - Include syncing groups\n - `--include-settings` - Include syncing workspace settings\n - `--include-key` - Include workspace encryption key\n - `--skip-branch-validation` - Skip git branch validation and prompts\n - `--json-output` - Output results in JSON format\n - `-i --includes <patterns:file[]>` - Comma separated patterns to specify which file to take into account (among files that are compatible with windmill). Patterns can include * (any string until '/') and ** (any string)\n - `-e --excludes <patterns:file[]>` - Comma separated patterns to specify which file to NOT take into account.\n - `--extra-includes <patterns:file[]>` - Comma separated patterns to specify which file to take into account (among files that are compatible with windmill). Patterns can include * (any string until '/') and ** (any string). Useful to still take wmill.yaml into account and act as a second pattern to satisfy\n - `--message <message:string>` - Include a message that will be added to all scripts/flows/apps updated during this push\n - `--parallel <number>` - Number of changes to process in parallel\n - `--repository <repo:string>` - Specify repository path (e.g., u/user/repo) when multiple repositories exist\n - `--branch, --env <branch:string>` - [Deprecated: use --workspace] Override the current git branch/environment\n - `--lint` - Run lint validation before pushing\n - `--locks-required` - Fail if scripts or flow inline scripts that need locks have no locks\n - `--auto-metadata` - Automatically regenerate stale metadata (locks and schemas) before pushing\n - `--accept-overriding-permissioned-as-with-self` - Accept that items with a different permissioned_as will be updated with your own user\n\n### token\n\nManage API tokens\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `token list` - List API tokens\n - `--json` - Output as JSON (for piping to jq)\n- `token create` - Create a new API token\n - `--label <label:string>` - Token label\n - `--expiration <expiration:string>` - Token expiration (ISO 8601 timestamp)\n- `token delete <token_prefix:string>` - Delete a token by its prefix\n\n### trigger\n\ntrigger related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `trigger list` - list all triggers\n - `--json` - Output as JSON (for piping to jq)\n- `trigger get <path:string>` - get a trigger's details\n - `--json` - Output as JSON (for piping to jq)\n - `--kind <kind:string>` - Trigger kind (http, websocket, kafka, nats, postgres, mqtt, sqs, gcp, email). Recommended for faster lookup\n- `trigger new <path:string>` - create a new trigger locally\n - `--kind <kind:string>` - Trigger kind (required: http, websocket, kafka, nats, postgres, mqtt, sqs, gcp, email)\n- `trigger push <file_path:string> <remote_path:string>` - push a local trigger spec. This overrides any remote versions.\n- `trigger set-permissioned-as <path:string> <email:string>` - Set the email (run-as user) for a trigger (requires admin or wm_deployers group)\n - `--kind <kind:string>` - Trigger kind (required: http, websocket, kafka, nats, postgres, mqtt, sqs, gcp, email)\n\n### user\n\nuser related commands\n\n**Subcommands:**\n\n- `user add <email:string> [password:string]` - Create a user\n - `--superadmin` - Specify to make the new user superadmin.\n - `--company <company:string>` - Specify to set the company of the new user.\n - `--name <name:string>` - Specify to set the name of the new user.\n- `user remove <email:string>` - Delete a user\n- `user create-token` - Create a new API token for the authenticated user\n - `--email <email:string>` - Specify credentials to use for authentication. This will not be stored. It will only be used to exchange for a token with the API server, which will not be stored either.\n - `--password <password:string>` - Specify credentials to use for authentication. This will not be stored. It will only be used to exchange for a token with the API server, which will not be stored either.\n\n### variable\n\nvariable related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `variable list` - list all variables\n - `--json` - Output as JSON (for piping to jq)\n- `variable get <path:string>` - get a variable's details\n - `--json` - Output as JSON (for piping to jq)\n- `variable new <path:string>` - create a new variable locally\n- `variable push <file_path:string> <remote_path:string>` - Push a local variable spec. This overrides any remote versions.\n - `--plain-secrets` - Push secrets as plain text\n- `variable add <value:string> <remote_path:string>` - Create a new variable on the remote. This will update the variable if it already exists.\n - `--plain-secrets` - Push secrets as plain text\n - `--public` - Legacy option, use --plain-secrets instead\n\n### version\n\nShow version information\n\n### worker-groups\n\ndisplay worker groups, pull and push worker groups configs\n\n**Subcommands:**\n\n- `worker-groups pull` - Pull worker groups (similar to `wmill instance pull --skip-users --skip-settings --skip-groups`)\n - `--instance` - Name of the instance to push to, override the active instance\n - `--base-url` - Base url to be passed to the instance settings instead of the local one\n - `--yes` - Pull without needing confirmation\n- `worker-groups push` - Push worker groups (similar to `wmill instance push --skip-users --skip-settings --skip-groups`)\n - `--instance [instance]` - Name of the instance to push to, override the active instance\n - `--base-url [baseUrl]` - If used with --token, will be used as the base url for the instance\n - `--yes` - Push without needing confirmation\n\n### workers\n\nList all workers grouped by worker groups\n\n**Options:**\n- `--instance [instance]` - Name of the instance to push to, override the active instance\n- `--base-url [baseUrl]` - If used with --token, will be used as the base url for the instance\n\n### workspace\n\nworkspace related commands\n\n**Alias:** `profile`\n\n**Subcommands:**\n\n- `workspace switch <workspace_name:string:workspace>` - Switch to another workspace\n- `workspace add [workspace_name:string] [workspace_id:string] [remote:string]` - Add a workspace\n - `-c --create` - Create the workspace if it does not exist\n - `--create-workspace-name <workspace_name:string>` - Specify the workspace name. Ignored if --create is not specified or the workspace already exists. Will default to the workspace id.\n - `--create-username <username:string>` - Specify your own username in the newly created workspace. Ignored if --create is not specified, the workspace already exists or automatic username creation is enabled on the instance.\n- `workspace remove <workspace_name:string>` - Remove a workspace\n- `workspace whoami` - Show the currently active user\n- `workspace list` - List local workspace profiles\n- `workspace list-remote` - List workspaces on the remote server that you have access to\n- `workspace list-forks` - List forked workspaces on the remote server\n- `workspace bind` - Create or update a workspace entry in wmill.yaml from the active profile\n - `--workspace <name:string>` - Workspace name (default: current branch or workspaceId)\n - `--branch <branch:string>` - Git branch to associate (default: workspace name)\n- `workspace unbind` - Remove baseUrl and workspaceId from a workspace entry\n - `--workspace <name:string>` - Workspace to unbind\n- `workspace fork [workspace_name:string] [workspace_id:string]` - Create a forked workspace\n - `--create-workspace-name <workspace_name:string>` - Specify the workspace name. Ignored if --create is not specified or the workspace already exists. Will default to the workspace id.\n - `--color <color:string>` - Workspace color (hex code, e.g. #ff0000)\n - `--datatable-behavior <behavior:string>` - How to handle datatables: skip, schema_only, or schema_and_data (default: interactive prompt)\n - `-y --yes` - Skip interactive prompts (defaults datatable behavior to 'skip')\n- `workspace delete-fork <fork_name:string>` - Delete a forked workspace and git branch\n - `-y --yes` - Skip confirmation prompt\n- `workspace merge` - Compare and deploy changes between a fork and its parent workspace\n - `--direction <direction:string>` - Deploy direction: to-parent or to-fork\n - `--all` - Deploy all changed items including conflicts\n - `--skip-conflicts` - Skip items modified in both workspaces\n - `--include <items:string>` - Comma-separated kind:path items to include (e.g. script:f/test/main,flow:f/my/flow)\n - `--exclude <items:string>` - Comma-separated kind:path items to exclude\n - `--preserve-on-behalf-of` - Preserve original on_behalf_of/permissioned_as values\n - `-y --yes` - Non-interactive mode (deploy without prompts)\n\n";
11
+ export declare const OPENFLOW_SCHEMA = "## OpenFlow Schema\n\n{\"OpenFlow\":{\"type\":\"object\",\"description\":\"Top-level flow definition containing metadata, configuration, and the flow structure\",\"properties\":{\"summary\":{\"type\":\"string\",\"description\":\"Short description of what this flow does\"},\"description\":{\"type\":\"string\",\"description\":\"Detailed documentation for this flow\"},\"value\":{\"$ref\":\"#/components/schemas/FlowValue\"},\"schema\":{\"type\":\"object\",\"description\":\"JSON Schema for flow inputs. Use this to define input parameters, their types, defaults, and validation. For resource inputs, set type to 'object' and format to 'resource-<type>' (e.g., 'resource-stripe')\"},\"on_behalf_of_email\":{\"type\":\"string\",\"description\":\"The flow will be run with the permissions of the user with this email.\"}},\"required\":[\"summary\",\"value\"]},\"FlowValue\":{\"type\":\"object\",\"description\":\"The flow structure containing modules and optional preprocessor/failure handlers\",\"properties\":{\"modules\":{\"type\":\"array\",\"description\":\"Array of steps that execute in sequence. Each step can be a script, subflow, loop, or branch\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}},\"failure_module\":{\"description\":\"Special module that executes when the flow fails. Receives error object with message, name, stack, and step_id. Must have id 'failure'. Only supports script/rawscript types\",\"$ref\":\"#/components/schemas/FlowModule\"},\"preprocessor_module\":{\"description\":\"Special module that runs before the first step on external triggers. Must have id 'preprocessor'. Only supports script/rawscript types. Cannot reference other step results\",\"$ref\":\"#/components/schemas/FlowModule\"},\"same_worker\":{\"type\":\"boolean\",\"description\":\"If true, all steps run on the same worker for better performance\"},\"concurrent_limit\":{\"type\":\"number\",\"description\":\"Maximum number of concurrent executions of this flow\"},\"concurrency_key\":{\"type\":\"string\",\"description\":\"Expression to group concurrent executions (e.g., by user ID)\"},\"concurrency_time_window_s\":{\"type\":\"number\",\"description\":\"Time window in seconds for concurrent_limit\"},\"debounce_delay_s\":{\"type\":\"integer\",\"description\":\"Delay in seconds to debounce flow executions\"},\"debounce_key\":{\"type\":\"string\",\"description\":\"Expression to group debounced executions\"},\"debounce_args_to_accumulate\":{\"type\":\"array\",\"description\":\"Arguments to accumulate across debounced executions\",\"items\":{\"type\":\"string\"}},\"max_total_debouncing_time\":{\"type\":\"integer\",\"description\":\"Maximum total time in seconds that a job can be debounced\"},\"max_total_debounces_amount\":{\"type\":\"integer\",\"description\":\"Maximum number of times a job can be debounced\"},\"skip_expr\":{\"type\":\"string\",\"description\":\"JavaScript expression to conditionally skip the entire flow\"},\"cache_ttl\":{\"type\":\"number\",\"description\":\"Cache duration in seconds for flow results\"},\"cache_ignore_s3_path\":{\"type\":\"boolean\"},\"delete_after_secs\":{\"type\":\"integer\",\"description\":\"If set, delete the flow job's args, result and logs after this many seconds following job completion\"},\"flow_env\":{\"type\":\"object\",\"description\":\"Environment variables available to all steps. Values can be strings, JSON values, or special references: '$var:path' (workspace variable) or '$res:path' (resource).\",\"additionalProperties\":{}},\"priority\":{\"type\":\"number\",\"description\":\"Execution priority (higher numbers run first)\"},\"early_return\":{\"type\":\"string\",\"description\":\"JavaScript expression to return early from the flow\"},\"chat_input_enabled\":{\"type\":\"boolean\",\"description\":\"Whether this flow accepts chat-style input\"},\"notes\":{\"type\":\"array\",\"description\":\"Sticky notes attached to the flow\",\"items\":{\"$ref\":\"#/components/schemas/FlowNote\"}},\"groups\":{\"type\":\"array\",\"description\":\"Semantic groups of modules for organizational purposes\",\"items\":{\"$ref\":\"#/components/schemas/FlowGroup\"}}},\"required\":[\"modules\"]},\"Retry\":{\"type\":\"object\",\"description\":\"Retry configuration for failed module executions\",\"properties\":{\"constant\":{\"type\":\"object\",\"description\":\"Retry with constant delay between attempts\",\"properties\":{\"attempts\":{\"type\":\"integer\",\"description\":\"Number of retry attempts\"},\"seconds\":{\"type\":\"integer\",\"description\":\"Seconds to wait between retries\"}}},\"exponential\":{\"type\":\"object\",\"description\":\"Retry with exponential backoff (delay doubles each time)\",\"properties\":{\"attempts\":{\"type\":\"integer\",\"description\":\"Number of retry attempts\"},\"multiplier\":{\"type\":\"integer\",\"description\":\"Multiplier for exponential backoff\"},\"seconds\":{\"type\":\"integer\",\"minimum\":1,\"description\":\"Initial delay in seconds\"},\"random_factor\":{\"type\":\"integer\",\"minimum\":0,\"maximum\":100,\"description\":\"Random jitter percentage (0-100) to avoid thundering herd\"}}},\"retry_if\":{\"$ref\":\"#/components/schemas/RetryIf\"}}},\"FlowNote\":{\"type\":\"object\",\"description\":\"A sticky note attached to a flow for documentation and annotation\",\"properties\":{\"id\":{\"type\":\"string\",\"description\":\"Unique identifier for the note\"},\"text\":{\"type\":\"string\",\"description\":\"Content of the note\"},\"position\":{\"type\":\"object\",\"description\":\"Position of the note in the flow editor\",\"properties\":{\"x\":{\"type\":\"number\",\"description\":\"X coordinate\"},\"y\":{\"type\":\"number\",\"description\":\"Y coordinate\"}},\"required\":[\"x\",\"y\"]},\"size\":{\"type\":\"object\",\"description\":\"Size of the note in the flow editor\",\"properties\":{\"width\":{\"type\":\"number\",\"description\":\"Width in pixels\"},\"height\":{\"type\":\"number\",\"description\":\"Height in pixels\"}},\"required\":[\"width\",\"height\"]},\"color\":{\"type\":\"string\",\"description\":\"Color of the note (e.g., \\\"yellow\\\", \\\"#ffff00\\\")\"},\"type\":{\"type\":\"string\",\"enum\":[\"free\",\"group\"],\"description\":\"Type of note - 'free' for standalone notes, 'group' for notes that group other nodes\"},\"locked\":{\"type\":\"boolean\",\"default\":false,\"description\":\"Whether the note is locked and cannot be edited or moved\"},\"contained_node_ids\":{\"type\":\"array\",\"items\":{\"type\":\"string\"},\"description\":\"For group notes, the IDs of nodes contained within this group\"}},\"required\":[\"id\",\"text\",\"color\",\"type\"]},\"FlowGroup\":{\"type\":\"object\",\"description\":\"A semantic group of flow modules for organizational purposes. Does not affect execution \\u2014 modules remain in their original position in the flow. Groups provide naming and collapsibility in the editor. Members are computed dynamically from all nodes on paths between start_id and end_id.\",\"properties\":{\"summary\":{\"type\":\"string\",\"description\":\"Display name for this group\"},\"note\":{\"type\":\"string\",\"description\":\"Markdown note shown below the group header\"},\"autocollapse\":{\"type\":\"boolean\",\"default\":false,\"description\":\"If true, this group is collapsed by default in the flow editor. UI hint only.\"},\"start_id\":{\"type\":\"string\",\"description\":\"ID of the first flow module in this group (topological entry point)\"},\"end_id\":{\"type\":\"string\",\"description\":\"ID of the last flow module in this group (topological exit point)\"},\"color\":{\"type\":\"string\",\"description\":\"Color for the group in the flow editor\"}},\"required\":[\"start_id\",\"end_id\"]},\"RetryIf\":{\"type\":\"object\",\"description\":\"Conditional retry based on error or result\",\"properties\":{\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression that returns true to retry. Has access to 'result' and 'error' variables\"}},\"required\":[\"expr\"]},\"StopAfterIf\":{\"type\":\"object\",\"description\":\"Early termination condition for a module\",\"properties\":{\"skip_if_stopped\":{\"type\":\"boolean\",\"description\":\"If true, following steps are skipped when this condition triggers\"},\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression evaluated after the module runs. Can use 'result' (step's result) or 'flow_input'. Return true to stop\"},\"error_message\":{\"type\":\"string\",\"nullable\":true,\"description\":\"Custom error message when stopping with an error. Mutually exclusive with skip_if_stopped. If set to a non-empty string, the flow stops with this error. If empty string, a default error message is used. If null or omitted, no error is raised.\"}},\"required\":[\"expr\"]},\"FlowModule\":{\"type\":\"object\",\"description\":\"A single step in a flow. Can be a script, subflow, loop, or branch\",\"properties\":{\"id\":{\"type\":\"string\",\"description\":\"Unique identifier for this step. Used to reference results via 'results.step_id'. Must be a valid identifier (alphanumeric, underscore, hyphen)\"},\"value\":{\"$ref\":\"#/components/schemas/FlowModuleValue\"},\"stop_after_if\":{\"description\":\"Early termination condition evaluated after this step completes\",\"$ref\":\"#/components/schemas/StopAfterIf\"},\"stop_after_all_iters_if\":{\"description\":\"For loops only - early termination condition evaluated after all iterations complete\",\"$ref\":\"#/components/schemas/StopAfterIf\"},\"skip_if\":{\"type\":\"object\",\"description\":\"Conditionally skip this step based on previous results or flow inputs\",\"properties\":{\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression that returns true to skip. Can use 'flow_input' or 'results.<step_id>'\"}},\"required\":[\"expr\"]},\"sleep\":{\"description\":\"Delay before executing this step (in seconds or as expression)\",\"$ref\":\"#/components/schemas/InputTransform\"},\"cache_ttl\":{\"type\":\"number\",\"description\":\"Cache duration in seconds for this step's results\"},\"cache_ignore_s3_path\":{\"type\":\"boolean\"},\"timeout\":{\"description\":\"Maximum execution time in seconds (static value or expression)\",\"$ref\":\"#/components/schemas/InputTransform\"},\"delete_after_secs\":{\"type\":\"integer\",\"description\":\"If set, delete the step's args, result and logs after this many seconds following job completion\"},\"summary\":{\"type\":\"string\",\"description\":\"Short description of what this step does\"},\"mock\":{\"type\":\"object\",\"description\":\"Mock configuration for testing without executing the actual step\",\"properties\":{\"enabled\":{\"type\":\"boolean\",\"description\":\"If true, return mock value instead of executing\"},\"return_value\":{\"description\":\"Value to return when mocked\"}}},\"suspend\":{\"type\":\"object\",\"description\":\"Configuration for approval/resume steps that wait for user input\",\"properties\":{\"required_events\":{\"type\":\"integer\",\"description\":\"Number of approvals required before continuing\"},\"timeout\":{\"type\":\"integer\",\"description\":\"Timeout in seconds before auto-continuing or canceling\"},\"resume_form\":{\"type\":\"object\",\"description\":\"Form schema for collecting input when resuming\",\"properties\":{\"schema\":{\"type\":\"object\",\"description\":\"JSON Schema for the resume form\"}}},\"user_auth_required\":{\"type\":\"boolean\",\"description\":\"If true, only authenticated users can approve\"},\"user_groups_required\":{\"description\":\"Expression or list of groups that can approve\",\"$ref\":\"#/components/schemas/InputTransform\"},\"self_approval_disabled\":{\"type\":\"boolean\",\"description\":\"If true, the user who started the flow cannot approve\"},\"hide_cancel\":{\"type\":\"boolean\",\"description\":\"If true, hide the cancel button on the approval form\"},\"continue_on_disapprove_timeout\":{\"type\":\"boolean\",\"description\":\"If true, continue flow on timeout instead of canceling\"}}},\"priority\":{\"type\":\"number\",\"description\":\"Execution priority for this step (higher numbers run first)\"},\"continue_on_error\":{\"type\":\"boolean\",\"description\":\"If true, flow continues even if this step fails\"},\"retry\":{\"description\":\"Retry configuration if this step fails\",\"$ref\":\"#/components/schemas/Retry\"},\"debouncing\":{\"description\":\"Debounce configuration for this step (EE only)\",\"type\":\"object\",\"properties\":{\"debounce_delay_s\":{\"type\":\"integer\",\"description\":\"Delay in seconds to debounce this step's executions across flow runs\"},\"debounce_key\":{\"type\":\"string\",\"description\":\"Expression to group debounced executions. Supports $workspace and $args[name]. Default: $workspace/flow/<flow_path>-<step_id>\"},\"debounce_args_to_accumulate\":{\"type\":\"array\",\"description\":\"Array-type arguments to accumulate across debounced executions\",\"items\":{\"type\":\"string\"}},\"max_total_debouncing_time\":{\"type\":\"integer\",\"description\":\"Maximum total time in seconds before forced execution\"},\"max_total_debounces_amount\":{\"type\":\"integer\",\"description\":\"Maximum number of debounces before forced execution\"}}}},\"required\":[\"value\",\"id\"]},\"InputTransform\":{\"description\":\"Maps input parameters for a step. Can be a static value or a JavaScript expression that references previous results or flow inputs\",\"oneOf\":[{\"$ref\":\"#/components/schemas/StaticTransform\"},{\"$ref\":\"#/components/schemas/JavascriptTransform\"},{\"$ref\":\"#/components/schemas/AiTransform\"}],\"discriminator\":{\"propertyName\":\"type\",\"mapping\":{\"static\":\"#/components/schemas/StaticTransform\",\"javascript\":\"#/components/schemas/JavascriptTransform\",\"ai\":\"#/components/schemas/AiTransform\"}}},\"StaticTransform\":{\"type\":\"object\",\"description\":\"Static value passed directly to the step. Use for hardcoded values or resource references like '$res:path/to/resource'\",\"properties\":{\"value\":{\"description\":\"The static value. For resources, use format '$res:path/to/resource'\"},\"type\":{\"type\":\"string\",\"enum\":[\"static\"]}},\"required\":[\"type\"]},\"JavascriptTransform\":{\"type\":\"object\",\"description\":\"JavaScript expression evaluated at runtime. Can reference previous step results via 'results.step_id' or flow inputs via 'flow_input.property'. Inside loops, use 'flow_input.iter.value' for the current iteration value\",\"properties\":{\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression returning the value. Available variables - results (object with all previous step results), flow_input (flow inputs), flow_input.iter (in loops)\"},\"type\":{\"type\":\"string\",\"enum\":[\"javascript\"]}},\"required\":[\"expr\",\"type\"]},\"AiTransform\":{\"type\":\"object\",\"description\":\"Value resolved by the AI runtime for this input. The AI engine decides how to satisfy the parameter.\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"ai\"]}},\"required\":[\"type\"]},\"AIProviderKind\":{\"type\":\"string\",\"description\":\"Supported AI provider types\",\"enum\":[\"openai\",\"azure_openai\",\"anthropic\",\"mistral\",\"deepseek\",\"googleai\",\"groq\",\"openrouter\",\"togetherai\",\"customai\",\"aws_bedrock\"]},\"ProviderConfig\":{\"type\":\"object\",\"description\":\"Complete AI provider configuration with resource reference and model selection\",\"properties\":{\"kind\":{\"$ref\":\"#/components/schemas/AIProviderKind\"},\"resource\":{\"type\":\"string\",\"description\":\"Resource reference in format '$res:{resource_path}' pointing to provider credentials\"},\"model\":{\"type\":\"string\",\"description\":\"Model identifier (e.g., 'gpt-4', 'claude-3-opus-20240229', 'gemini-pro')\"}},\"required\":[\"kind\",\"resource\",\"model\"]},\"StaticProviderTransform\":{\"type\":\"object\",\"description\":\"Static provider configuration passed directly to the AI agent\",\"properties\":{\"value\":{\"$ref\":\"#/components/schemas/ProviderConfig\"},\"type\":{\"type\":\"string\",\"enum\":[\"static\"]}},\"required\":[\"type\",\"value\"]},\"ProviderTransform\":{\"description\":\"Provider configuration - can be static (ProviderConfig), JavaScript expression, or AI-determined\",\"oneOf\":[{\"$ref\":\"#/components/schemas/StaticProviderTransform\"},{\"$ref\":\"#/components/schemas/JavascriptTransform\"},{\"$ref\":\"#/components/schemas/AiTransform\"}],\"discriminator\":{\"propertyName\":\"type\",\"mapping\":{\"static\":\"#/components/schemas/StaticProviderTransform\",\"javascript\":\"#/components/schemas/JavascriptTransform\",\"ai\":\"#/components/schemas/AiTransform\"}}},\"MemoryOff\":{\"type\":\"object\",\"description\":\"No conversation memory/context\",\"properties\":{\"kind\":{\"type\":\"string\",\"enum\":[\"off\"]}},\"required\":[\"kind\"]},\"MemoryAuto\":{\"type\":\"object\",\"description\":\"Automatic context management\",\"properties\":{\"kind\":{\"type\":\"string\",\"enum\":[\"auto\"]},\"context_length\":{\"type\":\"integer\",\"description\":\"Maximum number of messages to retain in context\"},\"memory_id\":{\"type\":\"string\",\"description\":\"Identifier for persistent memory across agent invocations\"}},\"required\":[\"kind\"]},\"MemoryMessage\":{\"type\":\"object\",\"description\":\"A single message in conversation history\",\"properties\":{\"role\":{\"type\":\"string\",\"enum\":[\"user\",\"assistant\",\"system\"]},\"content\":{\"type\":\"string\"}},\"required\":[\"role\",\"content\"]},\"MemoryManual\":{\"type\":\"object\",\"description\":\"Explicit message history\",\"properties\":{\"kind\":{\"type\":\"string\",\"enum\":[\"manual\"]},\"messages\":{\"type\":\"array\",\"items\":{\"$ref\":\"#/components/schemas/MemoryMessage\"}}},\"required\":[\"kind\",\"messages\"]},\"MemoryConfig\":{\"description\":\"Conversation memory configuration\",\"oneOf\":[{\"$ref\":\"#/components/schemas/MemoryOff\"},{\"$ref\":\"#/components/schemas/MemoryAuto\"},{\"$ref\":\"#/components/schemas/MemoryManual\"}],\"discriminator\":{\"propertyName\":\"kind\",\"mapping\":{\"off\":\"#/components/schemas/MemoryOff\",\"auto\":\"#/components/schemas/MemoryAuto\",\"manual\":\"#/components/schemas/MemoryManual\"}}},\"StaticMemoryTransform\":{\"type\":\"object\",\"description\":\"Static memory configuration passed directly to the AI agent\",\"properties\":{\"value\":{\"$ref\":\"#/components/schemas/MemoryConfig\"},\"type\":{\"type\":\"string\",\"enum\":[\"static\"]}},\"required\":[\"type\",\"value\"]},\"MemoryTransform\":{\"description\":\"Memory configuration - can be static (MemoryConfig), JavaScript expression, or AI-determined\",\"oneOf\":[{\"$ref\":\"#/components/schemas/StaticMemoryTransform\"},{\"$ref\":\"#/components/schemas/JavascriptTransform\"},{\"$ref\":\"#/components/schemas/AiTransform\"}],\"discriminator\":{\"propertyName\":\"type\",\"mapping\":{\"static\":\"#/components/schemas/StaticMemoryTransform\",\"javascript\":\"#/components/schemas/JavascriptTransform\",\"ai\":\"#/components/schemas/AiTransform\"}}},\"FlowModuleValue\":{\"description\":\"The actual implementation of a flow step. Can be a script (inline or referenced), subflow, loop, branch, or special module type\",\"oneOf\":[{\"$ref\":\"#/components/schemas/RawScript\"},{\"$ref\":\"#/components/schemas/PathScript\"},{\"$ref\":\"#/components/schemas/PathFlow\"},{\"$ref\":\"#/components/schemas/ForloopFlow\"},{\"$ref\":\"#/components/schemas/WhileloopFlow\"},{\"$ref\":\"#/components/schemas/BranchOne\"},{\"$ref\":\"#/components/schemas/BranchAll\"},{\"$ref\":\"#/components/schemas/Identity\"},{\"$ref\":\"#/components/schemas/AiAgent\"}],\"discriminator\":{\"propertyName\":\"type\",\"mapping\":{\"rawscript\":\"#/components/schemas/RawScript\",\"script\":\"#/components/schemas/PathScript\",\"flow\":\"#/components/schemas/PathFlow\",\"forloopflow\":\"#/components/schemas/ForloopFlow\",\"whileloopflow\":\"#/components/schemas/WhileloopFlow\",\"branchone\":\"#/components/schemas/BranchOne\",\"branchall\":\"#/components/schemas/BranchAll\",\"identity\":\"#/components/schemas/Identity\",\"aiagent\":\"#/components/schemas/AiAgent\"}}},\"RawScript\":{\"type\":\"object\",\"description\":\"Inline script with code defined directly in the flow. Use 'bun' as default language if unspecified. The script receives arguments from input_transforms\",\"properties\":{\"input_transforms\":{\"type\":\"object\",\"description\":\"Map of parameter names to their values (static or JavaScript expressions). These become the script's input arguments\",\"additionalProperties\":{\"$ref\":\"#/components/schemas/InputTransform\"}},\"content\":{\"type\":\"string\",\"description\":\"The script source code. Should export a 'main' function\"},\"language\":{\"type\":\"string\",\"description\":\"Programming language for this script\",\"enum\":[\"deno\",\"bun\",\"python3\",\"go\",\"bash\",\"powershell\",\"postgresql\",\"mysql\",\"bigquery\",\"snowflake\",\"mssql\",\"oracledb\",\"graphql\",\"nativets\",\"php\",\"rust\",\"ansible\",\"csharp\",\"nu\",\"java\",\"ruby\",\"rlang\",\"duckdb\"]},\"path\":{\"type\":\"string\",\"description\":\"Optional path for saving this script\"},\"lock\":{\"type\":\"string\",\"description\":\"Lock file content for dependencies\"},\"type\":{\"type\":\"string\",\"enum\":[\"rawscript\"]},\"tag\":{\"type\":\"string\",\"description\":\"Worker group tag for execution routing\"},\"concurrent_limit\":{\"type\":\"number\",\"description\":\"Maximum concurrent executions of this script\"},\"concurrency_time_window_s\":{\"type\":\"number\",\"description\":\"Time window for concurrent_limit\"},\"custom_concurrency_key\":{\"type\":\"string\",\"description\":\"Custom key for grouping concurrent executions\"},\"is_trigger\":{\"type\":\"boolean\",\"description\":\"If true, this script is a trigger that can start the flow\"},\"assets\":{\"type\":\"array\",\"description\":\"External resources this script accesses (S3 objects, resources, etc.)\",\"items\":{\"type\":\"object\",\"required\":[\"path\",\"kind\"],\"properties\":{\"path\":{\"type\":\"string\",\"description\":\"Path to the asset\"},\"kind\":{\"type\":\"string\",\"description\":\"Type of asset\",\"enum\":[\"s3object\",\"resource\",\"ducklake\",\"datatable\",\"volume\"]},\"access_type\":{\"type\":\"string\",\"nullable\":true,\"description\":\"Access level for this asset\",\"enum\":[\"r\",\"w\",\"rw\"]},\"alt_access_type\":{\"type\":\"string\",\"nullable\":true,\"description\":\"Alternative access level\",\"enum\":[\"r\",\"w\",\"rw\"]}}}}},\"required\":[\"type\",\"content\",\"language\",\"input_transforms\"]},\"PathScript\":{\"type\":\"object\",\"description\":\"Reference to an existing script by path. Use this when calling a previously saved script instead of writing inline code\",\"properties\":{\"input_transforms\":{\"type\":\"object\",\"description\":\"Map of parameter names to their values (static or JavaScript expressions). These become the script's input arguments\",\"additionalProperties\":{\"$ref\":\"#/components/schemas/InputTransform\"}},\"path\":{\"type\":\"string\",\"description\":\"Path to the script in the workspace (e.g., 'f/scripts/send_email')\"},\"hash\":{\"type\":\"string\",\"description\":\"Optional specific version hash of the script to use\"},\"type\":{\"type\":\"string\",\"enum\":[\"script\"]},\"tag_override\":{\"type\":\"string\",\"description\":\"Override the script's default worker group tag\"},\"is_trigger\":{\"type\":\"boolean\",\"description\":\"If true, this script is a trigger that can start the flow\"}},\"required\":[\"type\",\"path\",\"input_transforms\"]},\"PathFlow\":{\"type\":\"object\",\"description\":\"Reference to an existing flow by path. Use this to call another flow as a subflow\",\"properties\":{\"input_transforms\":{\"type\":\"object\",\"description\":\"Map of parameter names to their values (static or JavaScript expressions). These become the subflow's input arguments\",\"additionalProperties\":{\"$ref\":\"#/components/schemas/InputTransform\"}},\"path\":{\"type\":\"string\",\"description\":\"Path to the flow in the workspace (e.g., 'f/flows/process_user')\"},\"type\":{\"type\":\"string\",\"enum\":[\"flow\"]}},\"required\":[\"type\",\"path\",\"input_transforms\"]},\"ForloopFlow\":{\"type\":\"object\",\"description\":\"Executes nested modules in a loop over an iterator. Inside the loop, use 'flow_input.iter.value' to access the current iteration value, and 'flow_input.iter.index' for the index. Supports parallel execution for better performance on I/O-bound operations\",\"properties\":{\"modules\":{\"type\":\"array\",\"description\":\"Steps to execute for each iteration. These can reference the iteration value via 'flow_input.iter.value'\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}},\"iterator\":{\"description\":\"JavaScript expression that returns an array to iterate over. Can reference 'results.step_id' or 'flow_input'\",\"$ref\":\"#/components/schemas/InputTransform\"},\"skip_failures\":{\"type\":\"boolean\",\"description\":\"If true, iteration failures don't stop the loop. Failed iterations return null\"},\"type\":{\"type\":\"string\",\"enum\":[\"forloopflow\"]},\"parallel\":{\"type\":\"boolean\",\"description\":\"If true, iterations run concurrently (faster for I/O-bound operations). Use with parallelism to control concurrency\"},\"parallelism\":{\"description\":\"Maximum number of concurrent iterations when parallel=true. Limits resource usage. Can be static number or expression\",\"$ref\":\"#/components/schemas/InputTransform\"},\"squash\":{\"type\":\"boolean\"}},\"required\":[\"modules\",\"iterator\",\"skip_failures\",\"type\"]},\"WhileloopFlow\":{\"type\":\"object\",\"description\":\"Executes nested modules repeatedly while a condition is true. The loop checks the condition after each iteration. Use stop_after_if on modules to control loop termination\",\"properties\":{\"modules\":{\"type\":\"array\",\"description\":\"Steps to execute in each iteration. Use stop_after_if to control when the loop ends\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}},\"skip_failures\":{\"type\":\"boolean\",\"description\":\"If true, iteration failures don't stop the loop. Failed iterations return null\"},\"type\":{\"type\":\"string\",\"enum\":[\"whileloopflow\"]},\"parallel\":{\"type\":\"boolean\",\"description\":\"If true, iterations run concurrently (use with caution in while loops)\"},\"parallelism\":{\"description\":\"Maximum number of concurrent iterations when parallel=true\",\"$ref\":\"#/components/schemas/InputTransform\"},\"squash\":{\"type\":\"boolean\"}},\"required\":[\"modules\",\"skip_failures\",\"type\"]},\"BranchOne\":{\"type\":\"object\",\"description\":\"Conditional branching where only the first matching branch executes. Branches are evaluated in order, and the first one with a true expression runs. If no branches match, the default branch executes\",\"properties\":{\"branches\":{\"type\":\"array\",\"description\":\"Array of branches to evaluate in order. The first branch with expr evaluating to true executes\",\"items\":{\"type\":\"object\",\"properties\":{\"summary\":{\"type\":\"string\",\"description\":\"Short description of this branch condition\"},\"expr\":{\"type\":\"string\",\"description\":\"JavaScript expression that returns boolean. Can use 'results.step_id' or 'flow_input'. First true expr wins\"},\"modules\":{\"type\":\"array\",\"description\":\"Steps to execute if this branch's expr is true\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}}},\"required\":[\"modules\",\"expr\"]}},\"default\":{\"type\":\"array\",\"description\":\"Steps to execute if no branch expressions match\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}},\"type\":{\"type\":\"string\",\"enum\":[\"branchone\"]}},\"required\":[\"branches\",\"default\",\"type\"]},\"BranchAll\":{\"type\":\"object\",\"description\":\"Parallel branching where all branches execute simultaneously. Unlike BranchOne, all branches run regardless of conditions. Useful for executing independent tasks concurrently\",\"properties\":{\"branches\":{\"type\":\"array\",\"description\":\"Array of branches that all execute (either in parallel or sequentially)\",\"items\":{\"type\":\"object\",\"properties\":{\"summary\":{\"type\":\"string\",\"description\":\"Short description of this branch's purpose\"},\"skip_failure\":{\"type\":\"boolean\",\"description\":\"If true, failure in this branch doesn't fail the entire flow\"},\"modules\":{\"type\":\"array\",\"description\":\"Steps to execute in this branch\",\"items\":{\"$ref\":\"#/components/schemas/FlowModule\"}}},\"required\":[\"modules\"]}},\"type\":{\"type\":\"string\",\"enum\":[\"branchall\"]},\"parallel\":{\"type\":\"boolean\",\"description\":\"If true, all branches execute concurrently. If false, they execute sequentially\"}},\"required\":[\"branches\",\"type\"]},\"AgentTool\":{\"type\":\"object\",\"description\":\"A tool available to an AI agent. Can be a flow module or an external MCP (Model Context Protocol) tool\",\"properties\":{\"id\":{\"type\":\"string\",\"description\":\"Unique identifier for this tool. Cannot contain spaces - use underscores instead (e.g., 'get_user_data' not 'get user data')\"},\"summary\":{\"type\":\"string\",\"description\":\"Short description of what this tool does (shown to the AI)\"},\"value\":{\"$ref\":\"#/components/schemas/ToolValue\"}},\"required\":[\"id\",\"value\"]},\"ToolValue\":{\"description\":\"The implementation of a tool. Can be a flow module (script/flow) or an MCP tool reference\",\"oneOf\":[{\"$ref\":\"#/components/schemas/FlowModuleTool\"},{\"$ref\":\"#/components/schemas/McpToolValue\"},{\"$ref\":\"#/components/schemas/WebsearchToolValue\"}],\"discriminator\":{\"propertyName\":\"tool_type\",\"mapping\":{\"flowmodule\":\"#/components/schemas/FlowModuleTool\",\"mcp\":\"#/components/schemas/McpToolValue\",\"websearch\":\"#/components/schemas/WebsearchToolValue\"}}},\"FlowModuleTool\":{\"description\":\"A tool implemented as a flow module (script, flow, etc.). The AI can call this like any other flow module\",\"allOf\":[{\"type\":\"object\",\"properties\":{\"tool_type\":{\"type\":\"string\",\"enum\":[\"flowmodule\"]}},\"required\":[\"tool_type\"]},{\"$ref\":\"#/components/schemas/FlowModuleValue\"}]},\"WebsearchToolValue\":{\"type\":\"object\",\"description\":\"A tool implemented as a websearch tool. The AI can call this like any other websearch tool\",\"properties\":{\"tool_type\":{\"type\":\"string\",\"enum\":[\"websearch\"]}},\"required\":[\"tool_type\"]},\"McpToolValue\":{\"type\":\"object\",\"description\":\"Reference to an external MCP (Model Context Protocol) tool. The AI can call tools from MCP servers\",\"properties\":{\"tool_type\":{\"type\":\"string\",\"enum\":[\"mcp\"]},\"resource_path\":{\"type\":\"string\",\"description\":\"Path to the MCP resource/server configuration\"},\"include_tools\":{\"type\":\"array\",\"description\":\"Whitelist of specific tools to include from this MCP server\",\"items\":{\"type\":\"string\"}},\"exclude_tools\":{\"type\":\"array\",\"description\":\"Blacklist of tools to exclude from this MCP server\",\"items\":{\"type\":\"string\"}}},\"required\":[\"tool_type\",\"resource_path\"]},\"AiAgent\":{\"type\":\"object\",\"description\":\"AI agent step that can use tools to accomplish tasks. The agent receives inputs and can call any of its configured tools to complete the task\",\"properties\":{\"input_transforms\":{\"type\":\"object\",\"description\":\"Input parameters for the AI agent mapped to their values\",\"properties\":{\"provider\":{\"$ref\":\"#/components/schemas/ProviderTransform\"},\"output_type\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Output format type.\\nValid values: 'text' (default) - plain text response, 'image' - image generation\\n\"},\"user_message\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"The user's prompt/message to the AI agent. Supports variable interpolation with flow.input syntax.\"},\"system_prompt\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"System instructions that guide the AI's behavior, persona, and response style. Optional.\"},\"streaming\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Boolean. If true, stream the AI response incrementally.\\nStreaming events include: token_delta, tool_call, tool_call_arguments, tool_execution, tool_result\\n\"},\"memory\":{\"$ref\":\"#/components/schemas/MemoryTransform\"},\"output_schema\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"JSON Schema object defining structured output format. Used when you need the AI to return data in a specific shape.\\nSupports standard JSON Schema properties: type, properties, required, items, enum, pattern, minLength, maxLength, minimum, maximum, etc.\\nExample: { type: 'object', properties: { name: { type: 'string' }, age: { type: 'integer' } }, required: ['name'] }\\n\"},\"user_attachments\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Array of file references (images or PDFs) for the AI agent.\\nFormat: Array<{ bucket: string, key: string }> - S3 object references\\nExample: [{ bucket: 'my-bucket', key: 'documents/report.pdf' }]\\n\"},\"max_completion_tokens\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Integer. Maximum number of tokens the AI will generate in its response.\\nRange: 1 to 4,294,967,295. Typical values: 256-4096 for most use cases.\\n\"},\"temperature\":{\"allOf\":[{\"$ref\":\"#/components/schemas/InputTransform\"}],\"description\":\"Float. Controls randomness/creativity of responses.\\nRange: 0.0 to 2.0 (provider-dependent)\\n- 0.0 = deterministic, focused responses\\n- 0.7 = balanced (common default)\\n- 1.0+ = more creative/random\\n\"}},\"required\":[\"provider\",\"user_message\",\"output_type\"]},\"tools\":{\"type\":\"array\",\"description\":\"Array of tools the agent can use. The agent decides which tools to call based on the task\",\"items\":{\"$ref\":\"#/components/schemas/AgentTool\"}},\"type\":{\"type\":\"string\",\"enum\":[\"aiagent\"]},\"omit_output_from_conversation\":{\"type\":\"boolean\",\"default\":false,\"description\":\"If true, this AI agent step does not persist its assistant or tool messages to the flow conversation when chat mode is enabled.\"},\"parallel\":{\"type\":\"boolean\",\"description\":\"If true, the agent can execute multiple tool calls in parallel\"}},\"required\":[\"tools\",\"type\",\"input_transforms\"]},\"Identity\":{\"type\":\"object\",\"description\":\"Pass-through module that returns its input unchanged. Useful for flow structure or as a placeholder\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"identity\"]},\"flow\":{\"type\":\"boolean\",\"description\":\"If true, marks this as a flow identity (special handling)\"}},\"required\":[\"type\"]},\"FlowStatus\":{\"type\":\"object\",\"properties\":{\"step\":{\"type\":\"integer\"},\"modules\":{\"type\":\"array\",\"items\":{\"$ref\":\"#/components/schemas/FlowStatusModule\"}},\"user_states\":{\"additionalProperties\":true},\"preprocessor_module\":{\"allOf\":[{\"$ref\":\"#/components/schemas/FlowStatusModule\"}]},\"failure_module\":{\"allOf\":[{\"$ref\":\"#/components/schemas/FlowStatusModule\"},{\"type\":\"object\",\"properties\":{\"parent_module\":{\"type\":\"string\"}}}]},\"retry\":{\"type\":\"object\",\"properties\":{\"fail_count\":{\"type\":\"integer\"},\"failed_jobs\":{\"type\":\"array\",\"items\":{\"type\":\"string\",\"format\":\"uuid\"}}}}},\"required\":[\"step\",\"modules\",\"failure_module\"]},\"FlowStatusModule\":{\"type\":\"object\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"WaitingForPriorSteps\",\"WaitingForEvents\",\"WaitingForExecutor\",\"InProgress\",\"Success\",\"Failure\"]},\"id\":{\"type\":\"string\"},\"job\":{\"type\":\"string\",\"format\":\"uuid\"},\"count\":{\"type\":\"integer\"},\"progress\":{\"type\":\"integer\"},\"iterator\":{\"type\":\"object\",\"properties\":{\"index\":{\"type\":\"integer\"},\"itered\":{\"type\":\"array\",\"items\":{}},\"itered_len\":{\"type\":\"integer\"},\"args\":{}}},\"flow_jobs\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"flow_jobs_success\":{\"type\":\"array\",\"items\":{\"type\":\"boolean\"}},\"flow_jobs_duration\":{\"type\":\"object\",\"properties\":{\"started_at\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}},\"duration_ms\":{\"type\":\"array\",\"items\":{\"type\":\"integer\"}}}},\"branch_chosen\":{\"type\":\"object\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"branch\",\"default\"]},\"branch\":{\"type\":\"integer\"}},\"required\":[\"type\"]},\"branchall\":{\"type\":\"object\",\"properties\":{\"branch\":{\"type\":\"integer\"},\"len\":{\"type\":\"integer\"}},\"required\":[\"branch\",\"len\"]},\"approvers\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"properties\":{\"resume_id\":{\"type\":\"integer\"},\"approver\":{\"type\":\"string\"}},\"required\":[\"resume_id\",\"approver\"]}},\"failed_retries\":{\"type\":\"array\",\"items\":{\"type\":\"string\",\"format\":\"uuid\"}},\"skipped\":{\"type\":\"boolean\"},\"agent_actions\":{\"type\":\"array\",\"items\":{\"type\":\"object\",\"oneOf\":[{\"type\":\"object\",\"properties\":{\"job_id\":{\"type\":\"string\",\"format\":\"uuid\"},\"function_name\":{\"type\":\"string\"},\"type\":{\"type\":\"string\",\"enum\":[\"tool_call\"]},\"module_id\":{\"type\":\"string\"}},\"required\":[\"job_id\",\"function_name\",\"type\",\"module_id\"]},{\"type\":\"object\",\"properties\":{\"call_id\":{\"type\":\"string\",\"format\":\"uuid\"},\"function_name\":{\"type\":\"string\"},\"resource_path\":{\"type\":\"string\"},\"type\":{\"type\":\"string\",\"enum\":[\"mcp_tool_call\"]},\"arguments\":{\"type\":\"object\"}},\"required\":[\"call_id\",\"function_name\",\"resource_path\",\"type\"]},{\"type\":\"object\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"web_search\"]}},\"required\":[\"type\"]},{\"type\":\"object\",\"properties\":{\"type\":{\"type\":\"string\",\"enum\":[\"message\"]}},\"required\":[\"content\",\"type\"]}]}},\"agent_actions_success\":{\"type\":\"array\",\"items\":{\"type\":\"boolean\"}}},\"required\":[\"type\"]}}";
12
+ export declare const CLI_COMMANDS = "# Windmill CLI Commands\n\nThe Windmill CLI (`wmill`) provides commands for managing scripts, flows, apps, and other resources.\n\n## Global Options\n\n- `--workspace <workspace:string>` - Specify the target workspace. This overrides the default workspace.\n- `--debug --verbose` - Show debug/verbose logs\n- `--show-diffs` - Show diff informations when syncing (may show sensitive informations)\n- `--token <token:string>` - Specify an API token. This will override any stored token.\n- `--base-url <baseUrl:string>` - Specify the base URL of the API. If used, --token and --workspace are required and no local remote/workspace already set will be used.\n- `--config-dir <configDir:string>` - Specify a custom config directory. Overrides WMILL_CONFIG_DIR environment variable and default ~/.config location.\n\n## Commands\n\n### app\n\napp related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `app list` - list all apps\n - `--json` - Output as JSON (for piping to jq)\n- `app get <path:string>` - get an app's details\n - `--json` - Output as JSON (for piping to jq)\n- `app push <file_path:string> <remote_path:string>` - push a local app \n- `app dev [app_folder:string]` - Start a development server for building apps with live reload and hot module replacement\n - `--port <port:number>` - Port to run the dev server on (will find next available port if occupied)\n - `--host <host:string>` - Host to bind the dev server to\n - `--entry <entry:string>` - Entry point file (default: index.ts for Svelte/Vue, index.tsx otherwise)\n - `--no-open` - Don't automatically open the browser\n- `app lint [app_folder:string]` - Lint a raw app folder to validate structure and buildability\n - `--fix` - Attempt to fix common issues (not implemented yet)\n- `app new` - create a new raw app from a template\n - `--summary <summary:string>` - App summary (short description). Skips the prompt when provided. Triggers non-interactive mode.\n - `--path <path:string>` - App path (e.g., f/folder/my_app or u/username/my_app). Skips the prompt when provided. Triggers non-interactive mode.\n - `--framework <framework:string>` - Framework template: react19 | react18 | svelte5 | vue. Skips the prompt when provided. Triggers non-interactive mode.\n - `--datatable <datatable:string>` - Datatable to wire up. Without this flag in non-interactive mode, no datatable is configured.\n - `--schema <schema:string>` - Schema to use with --datatable. Created (CREATE SCHEMA IF NOT EXISTS) if it doesn't already exist.\n - `--overwrite` - Overwrite the target directory if it already exists, without prompting.\n - `--no-open-in-desktop` - Do not prompt to open the new app in Claude Desktop.\n- `app generate-agents [app_folder:string]` - regenerate AGENTS.md and DATATABLES.md from remote workspace\n- `app set-permissioned-as <path:string> <email:string>` - Set the on_behalf_of_email for an app (requires admin or wm_deployers group)\n\n### audit\n\nView audit logs (requires admin)\n\n**Subcommands:**\n\n- `audit list` - List audit log entries\n- `audit get <id:string>` - Get a specific audit log entry\n - `--json` - Output as JSON (for piping to jq)\n\n### config\n\nShow all available wmill.yaml configuration options\n\n**Options:**\n- `--json` - Output as JSON for programmatic consumption\n\n**Subcommands:**\n\n- `config migrate` - Migrate wmill.yaml from gitBranches/environments to workspaces format\n\n### dependencies\n\nworkspace dependencies related commands\n\n**Alias:** `deps`\n\n**Subcommands:**\n\n- `dependencies push <file_path:string>` - Push workspace dependencies from a local file\n\n### dev\n\nWatch local file changes and live-reload the dev page for preview. Does NOT deploy to the remote workspace \u2014 use wmill sync push for that.\n\n**Options:**\n- `--includes <pattern...:string>` - Filter paths given a glob pattern or path\n- `--proxy-port <port:number>` - Port for a localhost reverse proxy to the remote Windmill server\n- `--path <path:string>` - Watch a specific windmill path (e.g., u/admin/my_script or f/my_flow)\n- `--no-open` - Do not open the browser automatically\n\n### docs\n\nSearch Windmill documentation.\n\n**Arguments:** `<query:string>`\n\n**Options:**\n- `--json` - Output results as JSON.\n\n### flow\n\nflow related commands\n\n**Options:**\n- `--show-archived` - Enable archived flows in output\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `flow list` - list all flows\n - `--show-archived` - Enable archived flows in output\n - `--json` - Output as JSON (for piping to jq)\n- `flow get <path:string>` - get a flow's details\n - `--json` - Output as JSON (for piping to jq)\n- `flow push <file_path:string> <remote_path:string>` - push a local flow spec. This overrides any remote versions.\n - `--message <message:string>` - Deployment message\n- `flow run <path:string>` - run a flow by path.\n - `-d --data <data:string>` - Inputs specified as a JSON string or a file using @<filename> or stdin using @-.\n - `-s --silent` - Do not ouput anything other then the final output. Useful for scripting.\n- `flow preview <flow_path:string>` - preview a local flow without deploying it. Runs the flow definition from local files and uses local PathScripts by default.\n - `-d --data <data:string>` - Inputs specified as a JSON string or a file using @<filename> or stdin using @-.\n - `-s --silent` - Do not output anything other then the final output. Useful for scripting.\n - `--remote` - Use deployed workspace scripts for PathScript steps instead of local files.\n- `flow new <flow_path:string>` - create a new empty flow\n - `--summary <summary:string>` - flow summary\n - `--description <description:string>` - flow description\n- `flow bootstrap <flow_path:string>` - create a new empty flow (alias for new)\n - `--summary <summary:string>` - flow summary\n - `--description <description:string>` - flow description\n- `flow history <path:string>` - Show version history for a flow\n - `--json` - Output as JSON (for piping to jq)\n- `flow show-version <path:string> <version:string>` - Show a specific version of a flow\n - `--json` - Output as JSON (for piping to jq)\n- `flow set-permissioned-as <path:string> <email:string>` - Set the on_behalf_of_email for a flow (requires admin or wm_deployers group)\n\n### folder\n\nfolder related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `folder list` - list all folders\n - `--json` - Output as JSON (for piping to jq)\n- `folder get <name:string>` - get a folder's details\n - `--json` - Output as JSON (for piping to jq)\n- `folder new <name:string>` - create a new folder locally\n - `--summary <summary:string>` - folder summary\n- `folder push <name:string>` - push a local folder to the remote by name. This overrides any remote versions.\n- `folder add-missing` - create default folder.meta.yaml for all subdirectories of f/ that are missing one\n - `-y, --yes` - skip confirmation prompt\n- `folder show-rules <name:string>` - Show default_permissioned_as rules for a folder. Use --test-path to see which rule matches a given item path.\n - `--test-path <path:string>` - Test which rule matches this item path (e.g. f/prod/jobs/my_script)\n - `--json` - Output as JSON\n\n### generate-metadata\n\nGenerate metadata (locks, schemas) for all scripts, flows, and apps\n\n**Arguments:** `[folder:string]`\n\n**Options:**\n- `--yes` - Skip confirmation prompt\n- `--dry-run` - Show what would be updated without making changes\n- `--lock-only` - Re-generate only the lock files\n- `--schema-only` - Re-generate only script schemas (skips flows and apps)\n- `--skip-scripts` - Skip processing scripts\n- `--skip-flows` - Skip processing flows\n- `--skip-apps` - Skip processing apps\n- `--strict-folder-boundaries` - Only update items inside the specified folder (requires folder argument)\n- `-i --includes <patterns:file[]>` - Comma separated patterns to specify which files to include\n- `-e --excludes <patterns:file[]>` - Comma separated patterns to specify which files to exclude\n\n**Subcommands:**\n\n- `generate-metadata rehash [folder:string]`\n - `--skip-scripts` - Skip processing scripts\n - `--skip-flows` - Skip processing flows\n - `--skip-apps` - Skip processing apps\n - `-i --includes <patterns:file[]>` - Comma separated patterns to specify which files to include\n - `-e --excludes <patterns:file[]>` - Comma separated patterns to specify which files to exclude\n\n### gitsync-settings\n\nManage git-sync settings between local wmill.yaml and Windmill backend\n\n**Subcommands:**\n\n- `gitsync-settings pull` - Pull git-sync settings from Windmill backend to local wmill.yaml\n - `--repository <repo:string>` - Specify repository path (e.g., u/user/repo)\n - `--default` - Write settings to top-level defaults instead of overrides\n - `--replace` - Replace existing settings (non-interactive mode)\n - `--override` - Add branch-specific override (non-interactive mode)\n - `--diff` - Show differences without applying changes\n - `--json-output` - Output in JSON format\n - `--with-backend-settings <json:string>` - Use provided JSON settings instead of querying backend (for testing)\n - `--yes` - Skip interactive prompts and use default behavior\n - `--promotion <branch:string>` - Use promotionOverrides from the specified branch instead of regular overrides\n- `gitsync-settings push` - Push git-sync settings from local wmill.yaml to Windmill backend\n - `--repository <repo:string>` - Specify repository path (e.g., u/user/repo)\n - `--diff` - Show what would be pushed without applying changes\n - `--json-output` - Output in JSON format\n - `--with-backend-settings <json:string>` - Use provided JSON settings instead of querying backend (for testing)\n - `--yes` - Skip interactive prompts and use default behavior\n - `--promotion <branch:string>` - Use promotionOverrides from the specified branch instead of regular overrides\n\n### group\n\nManage workspace groups\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `group list` - List all groups in the workspace\n - `--json` - Output as JSON (for piping to jq)\n- `group get <name:string>` - Get group details and members\n - `--json` - Output as JSON (for piping to jq)\n- `group create <name:string>` - Create a new group\n - `--summary <summary:string>` - Group summary/description\n- `group delete <name:string>` - Delete a group\n- `group add-user <name:string> <username:string>` - Add a user to a group\n- `group remove-user <name:string> <username:string>` - Remove a user from a group\n\n### hub\n\nHub related commands. EXPERIMENTAL. INTERNAL USE ONLY.\n\n**Subcommands:**\n\n- `hub pull` - pull any supported definitions. EXPERIMENTAL.\n\n### init\n\nBootstrap a windmill project with a wmill.yaml file\n\n**Options:**\n- `--use-default` - Use default settings without checking backend\n- `--use-backend` - Use backend git-sync settings if available\n- `--repository <repo:string>` - Specify repository path (e.g., u/user/repo) when using backend settings\n- `--bind-profile` - Automatically bind active workspace profile to current Git branch\n- `--no-bind-profile` - Skip workspace profile binding prompt\n\n### instance\n\nsync local with a remote instance or the opposite (push or pull)\n\n**Subcommands:**\n\n- `instance add [instance_name:string] [remote:string] [token:string]` - Add a new instance\n- `instance remove <instance:string:instance>` - Remove an instance\n- `instance switch <instance:string:instance>` - Switch the current instance\n- `instance pull` - Pull instance settings, users, configs, instance groups and overwrite local\n - `--yes` - Pull without needing confirmation\n - `--dry-run` - Perform a dry run without making changes\n - `--skip-users` - Skip pulling users\n - `--skip-settings` - Skip pulling settings\n - `--skip-configs` - Skip pulling configs (worker groups)\n - `--skip-groups` - Skip pulling instance groups\n - `--include-workspaces` - Also pull workspaces\n - `--folder-per-instance` - Create a folder per instance\n - `--instance <instance:string>` - Name of the instance to pull from, override the active instance\n - `--prefix <prefix:string>` - Prefix of the local workspaces to pull, used to create the folders when using --include-workspaces\n - `--prefix-settings` - Store instance yamls inside prefixed folders when using --prefix and --folder-per-instance\n- `instance push` - Push instance settings, users, configs, group and overwrite remote\n - `--yes` - Push without needing confirmation\n - `--dry-run` - Perform a dry run without making changes\n - `--skip-users` - Skip pushing users\n - `--skip-settings` - Skip pushing settings\n - `--skip-configs` - Skip pushing configs (worker groups)\n - `--skip-groups` - Skip pushing instance groups\n - `--include-workspaces` - Also push workspaces\n - `--folder-per-instance` - Create a folder per instance\n - `--instance <instance:string>` - Name of the instance to push to, override the active instance\n - `--prefix <prefix:string>` - Prefix of the local workspaces folders to push\n - `--prefix-settings` - Store instance yamls inside prefixed folders when using --prefix and --folder-per-instance\n- `instance whoami` - Display information about the currently logged-in user\n- `instance get-config` - Dump the current instance config (global settings + worker configs) as YAML\n - `-o, --output-file <file:string>` - Write YAML to a file instead of stdout\n - `--show-secrets` - Include sensitive fields (license key, JWT secret) without prompting\n - `--instance <instance:string>` - Name of the instance, override the active instance\n- `instance connect-slack`\n - `--bot-token <bot_token:string>` - Slack bot token (xoxb-...)\n - `--team-id <team_id:string>` - Slack team id\n - `--team-name <team_name:string>` - Slack team name\n - `--instance <instance:string>` - Instance profile to connect against (defaults to the active instance)\n\n### job\n\nManage jobs (list, inspect, cancel)\n\n**Subcommands:**\n\n- `job list` - List recent jobs\n- `job get <id:string>` - Get job details. For flows: shows step tree with sub-job IDs\n - `--json` - Output as JSON (for piping to jq)\n- `job result <id:string>` - Get the result of a completed job (machine-friendly)\n- `job logs <id:string>` - Get job logs. For flows: aggregates all step logs\n- `job cancel <id:string>` - Cancel a running or queued job\n - `--reason <reason:string>` - Reason for cancellation\n\n### jobs\n\nPull completed and queued jobs from workspace\n\n**Arguments:** `[workspace:string]`\n\n**Options:**\n- `-c, --completed-output <file:string>` - Completed jobs output file (default: completed_jobs.json)\n- `-q, --queued-output <file:string>` - Queued jobs output file (default: queued_jobs.json)\n- `--skip-worker-check` - Skip checking for active workers before export\n\n**Subcommands:**\n\n- `jobs pull`\n- `jobs push`\n\n### lint\n\nValidate Windmill flow, schedule, and trigger YAML files in a directory\n\n**Arguments:** `[directory:string]`\n\n**Options:**\n- `--json` - Output results in JSON format\n- `--fail-on-warn` - Exit with code 1 when warnings are emitted\n- `--locks-required` - Fail if scripts or flow inline scripts that need locks have no locks\n- `-w, --watch` - Watch for file changes and re-lint automatically\n\n### queues\n\nList all queues with their metrics\n\n**Arguments:** `[workspace:string] the optional workspace to filter by (default to all workspaces)`\n\n**Options:**\n- `--instance [instance]` - Name of the instance to push to, override the active instance\n- `--base-url [baseUrl]` - If used with --token, will be used as the base url for the instance\n\n### resource\n\nresource related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `resource list` - list all resources\n - `--json` - Output as JSON (for piping to jq)\n- `resource get <path:string>` - get a resource's details\n - `--json` - Output as JSON (for piping to jq)\n- `resource new <path:string>` - create a new resource locally\n- `resource push <file_path:string> <remote_path:string>` - push a local resource spec. This overrides any remote versions.\n\n### resource-type\n\nresource type related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `resource-type list` - list all resource types\n - `--schema` - Show schema in the output\n - `--json` - Output as JSON (for piping to jq)\n- `resource-type get <path:string>` - get a resource type's details\n - `--json` - Output as JSON (for piping to jq)\n- `resource-type new <name:string>` - create a new resource type locally\n- `resource-type push <file_path:string> <name:string>` - push a local resource spec. This overrides any remote versions.\n- `resource-type generate-namespace` - Create a TypeScript definition file with the RT namespace generated from the resource types\n\n### schedule\n\nschedule related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `schedule list` - list all schedules\n - `--json` - Output as JSON (for piping to jq)\n- `schedule get <path:string>` - get a schedule's details\n - `--json` - Output as JSON (for piping to jq)\n- `schedule new <path:string>` - create a new schedule locally\n- `schedule push <file_path:string> <remote_path:string>` - push a local schedule spec. This overrides any remote versions.\n- `schedule enable <path:string>` - Enable a schedule\n - `--force` - Bypass the fork-conflict warning when the parent workspace has the same schedule (acknowledges that both crons will fire)\n- `schedule disable <path:string>` - Disable a schedule\n- `schedule set-permissioned-as <path:string> <email:string>` - Set the email (run-as user) for a schedule (requires admin or wm_deployers group)\n\n### script\n\nscript related commands\n\n**Options:**\n- `--show-archived` - Show archived scripts instead of active ones\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `script list` - list all scripts\n - `--show-archived` - Show archived scripts instead of active ones\n - `--json` - Output as JSON (for piping to jq)\n- `script push <path:file>` - push a local script spec. This overrides any remote versions. Use the script file (.ts, .js, .py, .sh)\n - `--message <message:string>` - Deployment message\n- `script get <path:file>` - get a script's details\n - `--json` - Output as JSON (for piping to jq)\n- `script show <path:file>` - show a script's content (alias for get)\n- `script run <path:file>` - run a script by path\n - `-d --data <data:file>` - Inputs specified as a JSON string or a file using @<filename> or stdin using @-.\n - `-s --silent` - Do not output anything other then the final output. Useful for scripting.\n- `script preview <path:file>` - preview a local script without deploying it. Supports both regular and codebase scripts.\n - `-d --data <data:file>` - Inputs specified as a JSON string or a file using @<filename> or stdin using @-.\n - `-s --silent` - Do not output anything other than the final output. Useful for scripting.\n- `script new <path:file> <language:string>` - create a new script\n - `--summary <summary:string>` - script summary\n - `--description <description:string>` - script description\n- `script bootstrap <path:file> <language:string>` - create a new script (alias for new)\n - `--summary <summary:string>` - script summary\n - `--description <description:string>` - script description\n- `script set-permissioned-as <path:string> <email:string>` - Set the on_behalf_of_email for a script (requires admin or wm_deployers group)\n- `script history <path:string>` - show version history for a script\n - `--json` - Output as JSON (for piping to jq)\n\n### sync\n\nsync local with a remote workspaces or the opposite (push or pull)\n\n**Subcommands:**\n\n- `sync pull` - Pull any remote changes and apply them locally.\n - `--yes` - Pull without needing confirmation\n - `--dry-run` - Show changes that would be pulled without actually pushing\n - `--plain-secrets` - Pull secrets as plain text\n - `--json` - Use JSON instead of YAML\n - `--skip-variables` - Skip syncing variables (including secrets)\n - `--skip-secrets` - Skip syncing only secrets variables\n - `--include-secrets` - Include secrets in sync (overrides skipSecrets in wmill.yaml)\n - `--skip-resources` - Skip syncing resources\n - `--skip-resource-types` - Skip syncing resource types\n - `--skip-scripts` - Skip syncing scripts\n - `--skip-flows` - Skip syncing flows\n - `--skip-apps` - Skip syncing apps\n - `--skip-folders` - Skip syncing folders\n - `--skip-workspace-dependencies` - Skip syncing workspace dependencies\n - `--skip-scripts-metadata` - Skip syncing scripts metadata, focus solely on logic\n - `--include-schedules` - Include syncing schedules\n - `--include-triggers` - Include syncing triggers\n - `--include-users` - Include syncing users\n - `--include-groups` - Include syncing groups\n - `--include-settings` - Include syncing workspace settings\n - `--include-key` - Include workspace encryption key\n - `--skip-branch-validation` - Skip git branch validation and prompts\n - `--json-output` - Output results in JSON format\n - `-i --includes <patterns:file[]>` - Comma separated patterns to specify which file to take into account (among files that are compatible with windmill). Patterns can include * (any string until '/') and ** (any string). Overrides wmill.yaml includes\n - `-e --excludes <patterns:file[]>` - Comma separated patterns to specify which file to NOT take into account. Overrides wmill.yaml excludes\n - `--extra-includes <patterns:file[]>` - Comma separated patterns to specify which file to take into account (among files that are compatible with windmill). Patterns can include * (any string until '/') and ** (any string). Useful to still take wmill.yaml into account and act as a second pattern to satisfy\n - `--repository <repo:string>` - Specify repository path (e.g., u/user/repo) when multiple repositories exist\n - `--promotion <branch:string>` - Use promotionOverrides from the specified branch instead of regular overrides\n - `--branch, --env <branch:string>` - [Deprecated: use --workspace] Override the current git branch/environment\n- `sync push` - Push any local changes and apply them remotely.\n - `--yes` - Push without needing confirmation\n - `--dry-run` - Show changes that would be pushed without actually pushing\n - `--plain-secrets` - Push secrets as plain text\n - `--json` - Use JSON instead of YAML\n - `--skip-variables` - Skip syncing variables (including secrets)\n - `--skip-secrets` - Skip syncing only secrets variables\n - `--include-secrets` - Include secrets in sync (overrides skipSecrets in wmill.yaml)\n - `--skip-resources` - Skip syncing resources\n - `--skip-resource-types` - Skip syncing resource types\n - `--skip-scripts` - Skip syncing scripts\n - `--skip-flows` - Skip syncing flows\n - `--skip-apps` - Skip syncing apps\n - `--skip-folders` - Skip syncing folders\n - `--skip-workspace-dependencies` - Skip syncing workspace dependencies\n - `--skip-scripts-metadata` - Skip syncing scripts metadata, focus solely on logic\n - `--include-schedules` - Include syncing schedules\n - `--include-triggers` - Include syncing triggers\n - `--include-users` - Include syncing users\n - `--include-groups` - Include syncing groups\n - `--include-settings` - Include syncing workspace settings\n - `--include-key` - Include workspace encryption key\n - `--skip-branch-validation` - Skip git branch validation and prompts\n - `--json-output` - Output results in JSON format\n - `-i --includes <patterns:file[]>` - Comma separated patterns to specify which file to take into account (among files that are compatible with windmill). Patterns can include * (any string until '/') and ** (any string)\n - `-e --excludes <patterns:file[]>` - Comma separated patterns to specify which file to NOT take into account.\n - `--extra-includes <patterns:file[]>` - Comma separated patterns to specify which file to take into account (among files that are compatible with windmill). Patterns can include * (any string until '/') and ** (any string). Useful to still take wmill.yaml into account and act as a second pattern to satisfy\n - `--message <message:string>` - Include a message that will be added to all scripts/flows/apps updated during this push\n - `--parallel <number>` - Number of changes to process in parallel\n - `--repository <repo:string>` - Specify repository path (e.g., u/user/repo) when multiple repositories exist\n - `--branch, --env <branch:string>` - [Deprecated: use --workspace] Override the current git branch/environment\n - `--lint` - Run lint validation before pushing\n - `--locks-required` - Fail if scripts or flow inline scripts that need locks have no locks\n - `--auto-metadata` - Automatically regenerate stale metadata (locks and schemas) before pushing\n - `--accept-overriding-permissioned-as-with-self` - Accept that items with a different permissioned_as will be updated with your own user\n\n### token\n\nManage API tokens\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `token list` - List API tokens\n - `--json` - Output as JSON (for piping to jq)\n- `token create` - Create a new API token\n - `--label <label:string>` - Token label\n - `--expiration <expiration:string>` - Token expiration (ISO 8601 timestamp)\n- `token delete <token_prefix:string>` - Delete a token by its prefix\n\n### trigger\n\ntrigger related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `trigger list` - list all triggers\n - `--json` - Output as JSON (for piping to jq)\n- `trigger get <path:string>` - get a trigger's details\n - `--json` - Output as JSON (for piping to jq)\n - `--kind <kind:string>` - Trigger kind (http, websocket, kafka, nats, postgres, mqtt, sqs, gcp, azure, email). Recommended for faster lookup\n- `trigger new <path:string>` - create a new trigger locally\n - `--kind <kind:string>` - Trigger kind (required: http, websocket, kafka, nats, postgres, mqtt, sqs, gcp, azure, email)\n- `trigger push <file_path:string> <remote_path:string>` - push a local trigger spec. This overrides any remote versions.\n- `trigger set-permissioned-as <path:string> <email:string>` - Set the email (run-as user) for a trigger (requires admin or wm_deployers group)\n - `--kind <kind:string>` - Trigger kind (required: http, websocket, kafka, nats, postgres, mqtt, sqs, gcp, azure, email)\n\n### user\n\nuser related commands\n\n**Subcommands:**\n\n- `user add <email:string> [password:string]` - Create a user\n - `--superadmin` - Specify to make the new user superadmin.\n - `--company <company:string>` - Specify to set the company of the new user.\n - `--name <name:string>` - Specify to set the name of the new user.\n- `user remove <email:string>` - Delete a user\n- `user create-token` - Create a new API token for the authenticated user\n - `--email <email:string>` - Specify credentials to use for authentication. This will not be stored. It will only be used to exchange for a token with the API server, which will not be stored either.\n - `--password <password:string>` - Specify credentials to use for authentication. This will not be stored. It will only be used to exchange for a token with the API server, which will not be stored either.\n\n### variable\n\nvariable related commands\n\n**Options:**\n- `--json` - Output as JSON (for piping to jq)\n\n**Subcommands:**\n\n- `variable list` - list all variables\n - `--json` - Output as JSON (for piping to jq)\n- `variable get <path:string>` - get a variable's details\n - `--json` - Output as JSON (for piping to jq)\n- `variable new <path:string>` - create a new variable locally\n- `variable push <file_path:string> <remote_path:string>` - Push a local variable spec. This overrides any remote versions.\n - `--plain-secrets` - Push secrets as plain text\n- `variable add <value:string> <remote_path:string>` - Create a new variable on the remote. This will update the variable if it already exists.\n - `--plain-secrets` - Push secrets as plain text\n - `--public` - Legacy option, use --plain-secrets instead\n\n### version\n\nShow version information\n\n### worker-groups\n\ndisplay worker groups, pull and push worker groups configs\n\n**Subcommands:**\n\n- `worker-groups pull` - Pull worker groups (similar to `wmill instance pull --skip-users --skip-settings --skip-groups`)\n - `--instance` - Name of the instance to push to, override the active instance\n - `--base-url` - Base url to be passed to the instance settings instead of the local one\n - `--yes` - Pull without needing confirmation\n- `worker-groups push` - Push worker groups (similar to `wmill instance push --skip-users --skip-settings --skip-groups`)\n - `--instance [instance]` - Name of the instance to push to, override the active instance\n - `--base-url [baseUrl]` - If used with --token, will be used as the base url for the instance\n - `--yes` - Push without needing confirmation\n\n### workers\n\nList all workers grouped by worker groups\n\n**Options:**\n- `--instance [instance]` - Name of the instance to push to, override the active instance\n- `--base-url [baseUrl]` - If used with --token, will be used as the base url for the instance\n\n### workspace\n\nworkspace related commands\n\n**Alias:** `profile`\n\n**Subcommands:**\n\n- `workspace switch <workspace_name:string:workspace>` - Switch to another workspace\n- `workspace add [workspace_name:string] [workspace_id:string] [remote:string]` - Add a workspace\n - `-c --create` - Create the workspace if it does not exist\n - `--create-workspace-name <workspace_name:string>` - Specify the workspace name. Ignored if --create is not specified or the workspace already exists. Will default to the workspace id.\n - `--create-username <username:string>` - Specify your own username in the newly created workspace. Ignored if --create is not specified, the workspace already exists or automatic username creation is enabled on the instance.\n- `workspace remove <workspace_name:string>` - Remove a workspace\n- `workspace whoami` - Show the currently active user\n- `workspace list` - List local workspace profiles\n- `workspace list-remote` - List workspaces on the remote server that you have access to\n- `workspace list-forks` - List forked workspaces on the remote server\n- `workspace bind` - Create or update a workspace entry in wmill.yaml from the active profile\n - `--workspace <name:string>` - Workspace name (default: current branch or workspaceId)\n - `--branch <branch:string>` - Git branch to associate (default: workspace name)\n- `workspace unbind` - Remove baseUrl and workspaceId from a workspace entry\n - `--workspace <name:string>` - Workspace to unbind\n- `workspace fork [workspace_name:string] [workspace_id:string]` - Create a forked workspace\n - `--create-workspace-name <workspace_name:string>` - Specify the workspace name. Ignored if --create is not specified or the workspace already exists. Will default to the workspace id.\n - `--color <color:string>` - Workspace color (hex code, e.g. #ff0000)\n - `--datatable-behavior <behavior:string>` - How to handle datatables: skip, schema_only, or schema_and_data (default: interactive prompt)\n - `-y --yes` - Skip interactive prompts (defaults datatable behavior to 'skip')\n- `workspace delete-fork <fork_name:string>` - Delete a forked workspace and git branch\n - `-y --yes` - Skip confirmation prompt\n- `workspace merge` - Compare and deploy changes between a fork and its parent workspace\n - `--direction <direction:string>` - Deploy direction: to-parent or to-fork\n - `--all` - Deploy all changed items including conflicts\n - `--skip-conflicts` - Skip items modified in both workspaces\n - `--include <items:string>` - Comma-separated kind:path items to include (e.g. script:f/test/main,flow:f/my/flow)\n - `--exclude <items:string>` - Comma-separated kind:path items to exclude\n - `--preserve-on-behalf-of` - Preserve original on_behalf_of/permissioned_as values\n - `-y --yes` - Non-interactive mode (deploy without prompts)\n- `workspace connect-slack` - Non-interactively connect Slack to the active workspace using a pre-minted bot token (xoxb-...). Produces the same artifacts as the UI OAuth flow: workspace_settings fields, g/slack group, f/slack_bot folder, and the encrypted bot token variable + resource at f/slack_bot/bot_token.\n - `--bot-token <bot_token:string>` - Slack bot token (xoxb-...)\n - `--team-id <team_id:string>` - Slack team id\n - `--team-name <team_name:string>` - Slack team name\n- `workspace disconnect-slack`\n\n";
10
13
  export declare const LANG_BASH = "# Bash\n\n## Structure\n\nDo not include `#!/bin/bash`. Arguments are obtained as positional parameters:\n\n```bash\n# Get arguments\nvar1=\"$1\"\nvar2=\"$2\"\n\necho \"Processing $var1 and $var2\"\n\n# Return JSON by echoing to stdout\necho \"{\\\"result\\\": \\\"$var1\\\", \\\"count\\\": $var2}\"\n```\n\n**Important:**\n- Do not include shebang (`#!/bin/bash`)\n- Arguments are always strings\n- Access with `$1`, `$2`, etc.\n\n## Output\n\nThe script output is captured as the result. For structured data, output valid JSON:\n\n```bash\nname=\"$1\"\ncount=\"$2\"\n\n# Output JSON result\ncat << EOF\n{\n \"name\": \"$name\",\n \"count\": $count,\n \"timestamp\": \"$(date -Iseconds)\"\n}\nEOF\n```\n\n## Environment Variables\n\nEnvironment variables set in Windmill are available:\n\n```bash\n# Access environment variable\necho \"Workspace: $WM_WORKSPACE\"\necho \"Job ID: $WM_JOB_ID\"\n```\n";
11
- export declare const LANG_BIGQUERY = "# BigQuery\n\nArguments use `@name` syntax.\n\nName the parameters by adding comments before the statement:\n\n```sql\n-- @name1 (string)\n-- @name2 (int64) = 0\nSELECT * FROM users WHERE name = @name1 AND age > @name2;\n```\n";
12
- export declare const LANG_BUN = "# TypeScript (Bun)\n\nBun runtime with full npm ecosystem and fastest execution.\n\n## Structure\n\nExport a single **async** function called `main`:\n\n```typescript\nexport async function main(param1: string, param2: number) {\n // Your code here\n return { result: param1, count: param2 };\n}\n```\n\nDo not call the main function. Libraries are installed automatically.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nUse the `RT` namespace for resource types:\n\n```typescript\nexport async function main(stripe: RT.Stripe) {\n // stripe contains API key and config from the resource\n}\n```\n\nOnly use resource types if you need them to satisfy the instructions. Always use the RT namespace.\n\nBefore using a resource type, check the `rt.d.ts` file in the project root to see all available resource types and their fields. This file is generated by `wmill resource-type generate-namespace`.\n\n## Imports\n\n```typescript\nimport Stripe from \"stripe\";\nimport { someFunction } from \"some-package\";\n```\n\n## Windmill Client\n\nImport the windmill client for platform interactions:\n\n```typescript\nimport * as wmill from \"windmill-client\";\n```\n\nSee the SDK documentation for available methods.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```typescript\ntype Event = {\n kind:\n | \"webhook\"\n | \"http\"\n | \"websocket\"\n | \"kafka\"\n | \"email\"\n | \"nats\"\n | \"postgres\"\n | \"sqs\"\n | \"mqtt\"\n | \"gcp\";\n body: any;\n headers: Record<string, string>;\n query: Record<string, string>;\n};\n\nexport async function preprocessor(event: Event) {\n return {\n param1: event.body.field1,\n param2: event.query.id,\n };\n}\n```\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations.\n\n### S3Object Type\n\nThe S3Object type represents a file in S3 storage:\n\n```typescript\ntype S3Object = {\n s3: string; // Path within the bucket\n};\n```\n\n## TypeScript Operations\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\n// Load file content from S3\nconst content: Uint8Array = await wmill.loadS3File(s3object);\n\n// Load file as stream\nconst blob: Blob = await wmill.loadS3FileStream(s3object);\n\n// Write file to S3\nconst result: S3Object = await wmill.writeS3File(\n s3object, // Target path (or undefined to auto-generate)\n fileContent, // string or Blob\n s3ResourcePath // Optional: specific S3 resource to use\n);\n```\n";
13
- export declare const LANG_BUNNATIVE = "# TypeScript (Bun Native)\n\nNative TypeScript execution with fetch only - no external imports allowed.\n\n## Structure\n\nExport a single **async** function called `main`:\n\n```typescript\nexport async function main(param1: string, param2: number) {\n // Your code here\n return { result: param1, count: param2 };\n}\n```\n\nDo not call the main function.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nUse the `RT` namespace for resource types:\n\n```typescript\nexport async function main(stripe: RT.Stripe) {\n // stripe contains API key and config from the resource\n}\n```\n\nOnly use resource types if you need them to satisfy the instructions. Always use the RT namespace.\n\nBefore using a resource type, check the `rt.d.ts` file in the project root to see all available resource types and their fields. This file is generated by `wmill resource-type generate-namespace`.\n\n## Imports\n\n**No imports allowed.** Use the globally available `fetch` function:\n\n```typescript\nexport async function main(url: string) {\n const response = await fetch(url);\n return await response.json();\n}\n```\n\n## Windmill Client\n\nThe windmill client is not available in native TypeScript mode. Use fetch to call APIs directly.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```typescript\ntype Event = {\n kind:\n | \"webhook\"\n | \"http\"\n | \"websocket\"\n | \"kafka\"\n | \"email\"\n | \"nats\"\n | \"postgres\"\n | \"sqs\"\n | \"mqtt\"\n | \"gcp\";\n body: any;\n headers: Record<string, string>;\n query: Record<string, string>;\n};\n\nexport async function preprocessor(event: Event) {\n return {\n param1: event.body.field1,\n param2: event.query.id,\n };\n}\n```\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations.\n\n### S3Object Type\n\nThe S3Object type represents a file in S3 storage:\n\n```typescript\ntype S3Object = {\n s3: string; // Path within the bucket\n};\n```\n\n## TypeScript Operations\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\n// Load file content from S3\nconst content: Uint8Array = await wmill.loadS3File(s3object);\n\n// Load file as stream\nconst blob: Blob = await wmill.loadS3FileStream(s3object);\n\n// Write file to S3\nconst result: S3Object = await wmill.writeS3File(\n s3object, // Target path (or undefined to auto-generate)\n fileContent, // string or Blob\n s3ResourcePath // Optional: specific S3 resource to use\n);\n```\n";
14
+ export declare const LANG_BIGQUERY = "# BigQuery\n\nArguments use `@name` syntax.\n\nName the parameters by adding comments before the statement:\n\n```sql\n-- @name1 (string)\n-- @name2 (int64) = 0\nSELECT * FROM users WHERE name = @name1 AND age > @name2;\n```\n\n## Receiving an S3Object as a script parameter\n\nDeclare the arg with type `(s3object)`. Windmill renders an S3 file picker for\nit, downloads the file, and binds it as a `STRING` JSON parameter \u2014 Parquet/CSV\nfiles are decoded server-side into a JSON array of records, JSON/JSONL pass\nthrough. Consume with `JSON_EXTRACT_ARRAY` / `JSON_VALUE`:\n\n```sql\n-- @file (s3object)\nSELECT\n CAST(JSON_VALUE(row, '$.id') AS INT64) AS id,\n JSON_VALUE(row, '$.name') AS name\nFROM UNNEST(JSON_EXTRACT_ARRAY(@file)) AS row;\n```\n\n## Streaming query results to S3\n\nAdd a `-- s3` directive at the top of the script to stream the result set to S3\ninstead of returning rows. Windmill writes the file and returns its `S3Object`\nas the script result.\n\n```sql\n-- s3 prefix=exports/users format=parquet\nSELECT id, name FROM users;\n```\n\nAll keys are optional: `prefix` (object key prefix), `storage` (named storage \u2014\nomit to use the workspace default), `format` (`json` (default), `parquet`, or\n`csv`). Use this for large result sets \u2014 rows stream directly to S3 instead of\nbeing buffered, bypassing the 10000-row return cap.\n";
15
+ export declare const LANG_BUN = "# TypeScript (Bun)\n\nBun runtime with full npm ecosystem and fastest execution.\n\n## Structure\n\nExport a single **async** function called `main`:\n\n```typescript\nexport async function main(param1: string, param2: number) {\n // Your code here\n return { result: param1, count: param2 };\n}\n```\n\nDo not call the main function. Libraries are installed automatically.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nUse the `RT` namespace for resource types:\n\n```typescript\nexport async function main(stripe: RT.Stripe) {\n // stripe contains API key and config from the resource\n}\n```\n\nOnly use resource types if you need them to satisfy the instructions. Always use the RT namespace.\n\nBefore using a resource type, check the `rt.d.ts` file in the project root to see all available resource types and their fields. This file is generated by `wmill resource-type generate-namespace`.\n\n## Imports\n\n```typescript\nimport Stripe from \"stripe\";\nimport { someFunction } from \"some-package\";\n```\n\n## Windmill Client\n\nImport the windmill client for platform interactions:\n\n```typescript\nimport * as wmill from \"windmill-client\";\n```\n\nSee the SDK documentation for available methods.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```typescript\ntype Event = {\n kind:\n | \"webhook\"\n | \"http\"\n | \"websocket\"\n | \"kafka\"\n | \"email\"\n | \"nats\"\n | \"postgres\"\n | \"sqs\"\n | \"mqtt\"\n | \"gcp\";\n body: any;\n headers: Record<string, string>;\n query: Record<string, string>;\n};\n\nexport async function preprocessor(event: Event) {\n return {\n param1: event.body.field1,\n param2: event.query.id,\n };\n}\n```\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations. The `wmill.S3Object` type covers both the `s3://storage/key` URI form (`s3:///key` for the workspace default storage) and the `{ s3, storage? }` record form \u2014 always use it instead of redefining your own.\n\n### Receiving an S3Object as a script parameter\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\nexport async function main(file: wmill.S3Object) {\n const content = await wmill.loadS3File(file);\n // ...\n}\n```\n\n### S3 operations\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\n// Load file content from S3\nconst content: Uint8Array = await wmill.loadS3File(s3object);\n\n// Load file as stream\nconst blob: Blob = await wmill.loadS3FileStream(s3object);\n\n// Write file to S3\nconst result: wmill.S3Object = await wmill.writeS3File(\n s3object, // Target path (or undefined to auto-generate)\n fileContent, // string or Blob\n s3ResourcePath // Optional: specific S3 resource to use\n);\n```\n";
16
+ export declare const LANG_BUNNATIVE = "# TypeScript (Bun Native)\n\nNative TypeScript execution with fetch only - no external imports allowed.\n\n## Structure\n\nExport a single **async** function called `main`:\n\n```typescript\nexport async function main(param1: string, param2: number) {\n // Your code here\n return { result: param1, count: param2 };\n}\n```\n\nDo not call the main function.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nUse the `RT` namespace for resource types:\n\n```typescript\nexport async function main(stripe: RT.Stripe) {\n // stripe contains API key and config from the resource\n}\n```\n\nOnly use resource types if you need them to satisfy the instructions. Always use the RT namespace.\n\nBefore using a resource type, check the `rt.d.ts` file in the project root to see all available resource types and their fields. This file is generated by `wmill resource-type generate-namespace`.\n\n## Imports\n\n**No imports allowed.** Use the globally available `fetch` function:\n\n```typescript\nexport async function main(url: string) {\n const response = await fetch(url);\n return await response.json();\n}\n```\n\n## Windmill Client\n\nThe windmill client is not available in native TypeScript mode. Use fetch to call APIs directly.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```typescript\ntype Event = {\n kind:\n | \"webhook\"\n | \"http\"\n | \"websocket\"\n | \"kafka\"\n | \"email\"\n | \"nats\"\n | \"postgres\"\n | \"sqs\"\n | \"mqtt\"\n | \"gcp\";\n body: any;\n headers: Record<string, string>;\n query: Record<string, string>;\n};\n\nexport async function preprocessor(event: Event) {\n return {\n param1: event.body.field1,\n param2: event.query.id,\n };\n}\n```\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations. The `wmill.S3Object` type covers both the `s3://storage/key` URI form (`s3:///key` for the workspace default storage) and the `{ s3, storage? }` record form \u2014 always use it instead of redefining your own.\n\n### Receiving an S3Object as a script parameter\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\nexport async function main(file: wmill.S3Object) {\n const content = await wmill.loadS3File(file);\n // ...\n}\n```\n\n### S3 operations\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\n// Load file content from S3\nconst content: Uint8Array = await wmill.loadS3File(s3object);\n\n// Load file as stream\nconst blob: Blob = await wmill.loadS3FileStream(s3object);\n\n// Write file to S3\nconst result: wmill.S3Object = await wmill.writeS3File(\n s3object, // Target path (or undefined to auto-generate)\n fileContent, // string or Blob\n s3ResourcePath // Optional: specific S3 resource to use\n);\n```\n";
14
17
  export declare const LANG_CSHARP = "# C#\n\nThe script must contain a public static `Main` method inside a class:\n\n```csharp\npublic class Script\n{\n public static object Main(string name, int count)\n {\n return new { Name = name, Count = count };\n }\n}\n```\n\n**Important:**\n- Class name is irrelevant\n- Method must be `public static`\n- Return type can be `object` or specific type\n\n## NuGet Packages\n\nAdd packages using the `#r` directive at the top:\n\n```csharp\n#r \"nuget: Newtonsoft.Json, 13.0.3\"\n#r \"nuget: RestSharp, 110.2.0\"\n\nusing Newtonsoft.Json;\nusing RestSharp;\n\npublic class Script\n{\n public static object Main(string url)\n {\n var client = new RestClient(url);\n var request = new RestRequest();\n var response = client.Get(request);\n return JsonConvert.DeserializeObject(response.Content);\n }\n}\n```\n";
15
- export declare const LANG_DENO = "# TypeScript (Deno)\n\nDeno runtime with npm support via `npm:` prefix and native Deno libraries.\n\n## Structure\n\nExport a single **async** function called `main`:\n\n```typescript\nexport async function main(param1: string, param2: number) {\n // Your code here\n return { result: param1, count: param2 };\n}\n```\n\nDo not call the main function. Libraries are installed automatically.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nUse the `RT` namespace for resource types:\n\n```typescript\nexport async function main(stripe: RT.Stripe) {\n // stripe contains API key and config from the resource\n}\n```\n\nOnly use resource types if you need them to satisfy the instructions. Always use the RT namespace.\n\nBefore using a resource type, check the `rt.d.ts` file in the project root to see all available resource types and their fields. This file is generated by `wmill resource-type generate-namespace`.\n\n## Imports\n\n```typescript\n// npm packages use npm: prefix\nimport Stripe from \"npm:stripe\";\nimport { someFunction } from \"npm:some-package\";\n\n// Deno standard library\nimport { serve } from \"https://deno.land/std/http/server.ts\";\n```\n\n## Windmill Client\n\nImport the windmill client for platform interactions:\n\n```typescript\nimport * as wmill from \"windmill-client\";\n```\n\nSee the SDK documentation for available methods.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```typescript\ntype Event = {\n kind:\n | \"webhook\"\n | \"http\"\n | \"websocket\"\n | \"kafka\"\n | \"email\"\n | \"nats\"\n | \"postgres\"\n | \"sqs\"\n | \"mqtt\"\n | \"gcp\";\n body: any;\n headers: Record<string, string>;\n query: Record<string, string>;\n};\n\nexport async function preprocessor(event: Event) {\n return {\n param1: event.body.field1,\n param2: event.query.id,\n };\n}\n```\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations.\n\n### S3Object Type\n\nThe S3Object type represents a file in S3 storage:\n\n```typescript\ntype S3Object = {\n s3: string; // Path within the bucket\n};\n```\n\n## TypeScript Operations\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\n// Load file content from S3\nconst content: Uint8Array = await wmill.loadS3File(s3object);\n\n// Load file as stream\nconst blob: Blob = await wmill.loadS3FileStream(s3object);\n\n// Write file to S3\nconst result: S3Object = await wmill.writeS3File(\n s3object, // Target path (or undefined to auto-generate)\n fileContent, // string or Blob\n s3ResourcePath // Optional: specific S3 resource to use\n);\n```\n";
16
- export declare const LANG_DUCKDB = "# DuckDB\n\nArguments are defined with comments and used with `$name` syntax:\n\n```sql\n-- $name (text) = default\n-- $age (integer)\nSELECT * FROM users WHERE name = $name AND age > $age;\n```\n\n## Ducklake Integration\n\nAttach Ducklake for data lake operations:\n\n```sql\n-- Main ducklake\nATTACH 'ducklake' AS dl;\n\n-- Named ducklake\nATTACH 'ducklake://my_lake' AS dl;\n\n-- Then query\nSELECT * FROM dl.schema.table;\n```\n\n## External Database Connections\n\nConnect to external databases using resources:\n\n```sql\nATTACH '$res:path/to/resource' AS db (TYPE postgres);\nSELECT * FROM db.schema.table;\n```\n\n## S3 File Operations\n\nRead files from S3 storage:\n\n```sql\n-- Default storage\nSELECT * FROM read_csv('s3:///path/to/file.csv');\n\n-- Named storage\nSELECT * FROM read_csv('s3://storage_name/path/to/file.csv');\n\n-- Parquet files\nSELECT * FROM read_parquet('s3:///path/to/file.parquet');\n\n-- JSON files\nSELECT * FROM read_json('s3:///path/to/file.json');\n```\n";
18
+ export declare const LANG_DENO = "# TypeScript (Deno)\n\nDeno runtime with npm support via `npm:` prefix and native Deno libraries.\n\n## Structure\n\nExport a single **async** function called `main`:\n\n```typescript\nexport async function main(param1: string, param2: number) {\n // Your code here\n return { result: param1, count: param2 };\n}\n```\n\nDo not call the main function. Libraries are installed automatically.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nUse the `RT` namespace for resource types:\n\n```typescript\nexport async function main(stripe: RT.Stripe) {\n // stripe contains API key and config from the resource\n}\n```\n\nOnly use resource types if you need them to satisfy the instructions. Always use the RT namespace.\n\nBefore using a resource type, check the `rt.d.ts` file in the project root to see all available resource types and their fields. This file is generated by `wmill resource-type generate-namespace`.\n\n## Imports\n\n```typescript\n// npm packages use npm: prefix\nimport Stripe from \"npm:stripe\";\nimport { someFunction } from \"npm:some-package\";\n\n// Deno standard library\nimport { serve } from \"https://deno.land/std/http/server.ts\";\n```\n\n## Windmill Client\n\nImport the windmill client for platform interactions:\n\n```typescript\nimport * as wmill from \"windmill-client\";\n```\n\nSee the SDK documentation for available methods.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```typescript\ntype Event = {\n kind:\n | \"webhook\"\n | \"http\"\n | \"websocket\"\n | \"kafka\"\n | \"email\"\n | \"nats\"\n | \"postgres\"\n | \"sqs\"\n | \"mqtt\"\n | \"gcp\";\n body: any;\n headers: Record<string, string>;\n query: Record<string, string>;\n};\n\nexport async function preprocessor(event: Event) {\n return {\n param1: event.body.field1,\n param2: event.query.id,\n };\n}\n```\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations. The `wmill.S3Object` type covers both the `s3://storage/key` URI form (`s3:///key` for the workspace default storage) and the `{ s3, storage? }` record form \u2014 always use it instead of redefining your own.\n\n### Receiving an S3Object as a script parameter\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\nexport async function main(file: wmill.S3Object) {\n const content = await wmill.loadS3File(file);\n // ...\n}\n```\n\n### S3 operations\n\n```typescript\nimport * as wmill from \"windmill-client\";\n\n// Load file content from S3\nconst content: Uint8Array = await wmill.loadS3File(s3object);\n\n// Load file as stream\nconst blob: Blob = await wmill.loadS3FileStream(s3object);\n\n// Write file to S3\nconst result: wmill.S3Object = await wmill.writeS3File(\n s3object, // Target path (or undefined to auto-generate)\n fileContent, // string or Blob\n s3ResourcePath // Optional: specific S3 resource to use\n);\n```\n";
19
+ export declare const LANG_DUCKDB = "# DuckDB\n\nArguments are defined with comments and used with `$name` syntax:\n\n```sql\n-- $name (text) = default\n-- $age (integer)\nSELECT * FROM users WHERE name = $name AND age > $age;\n```\n\n## Ducklake Integration\n\nAttach Ducklake for data lake operations:\n\n```sql\n-- Main ducklake\nATTACH 'ducklake' AS dl;\n\n-- Named ducklake\nATTACH 'ducklake://my_lake' AS dl;\n\n-- Then query\nSELECT * FROM dl.schema.table;\n```\n\n## External Database Connections\n\nConnect to external databases using resources:\n\n```sql\nATTACH '$res:path/to/resource' AS db (TYPE postgres);\nSELECT * FROM db.schema.table;\n```\n\n## S3 File Operations\n\nRead files from S3 storage:\n\n```sql\n-- Default storage\nSELECT * FROM read_csv('s3:///path/to/file.csv');\n\n-- Named storage\nSELECT * FROM read_csv('s3://storage_name/path/to/file.csv');\n\n-- Parquet files\nSELECT * FROM read_parquet('s3:///path/to/file.parquet');\n\n-- JSON files\nSELECT * FROM read_json('s3:///path/to/file.json');\n```\n\n### Receiving an S3Object as a script parameter\n\nDeclare the arg with type `(s3object)`. Windmill renders an S3 file picker for it\nand binds the arg as the bare `s3://storage/key` URI, which DuckDB's reader\nfunctions consume directly:\n\n```sql\n-- $file (s3object)\nSELECT * FROM read_parquet($file);\n```\n\nWorks with any DuckDB reader: `read_csv($file)`, `read_json($file)`, etc.\n\n### Writing query results to S3\n\nDuckDB writes to S3 natively via `COPY ... TO`:\n\n```sql\nCOPY (SELECT * FROM users) TO 's3:///exports/users.parquet' (FORMAT PARQUET);\n```\n\nUse this instead of the `-- s3` streaming directive supported by the other SQL\ndialects \u2014 that directive is not available in DuckDB.\n";
17
20
  export declare const LANG_GO = "# Go\n\n## Structure\n\nThe file package must be `inner` and export a function called `main`:\n\n```go\npackage inner\n\nfunc main(param1 string, param2 int) (map[string]interface{}, error) {\n return map[string]interface{}{\n \"result\": param1,\n \"count\": param2,\n }, nil\n}\n```\n\n**Important:**\n- Package must be `inner`\n- Return type must be `({return_type}, error)`\n- Function name is `main` (lowercase)\n\n## Return Types\n\nThe return type can be any Go type that can be serialized to JSON:\n\n```go\npackage inner\n\ntype Result struct {\n Name string `json:\"name\"`\n Count int `json:\"count\"`\n}\n\nfunc main(name string, count int) (Result, error) {\n return Result{\n Name: name,\n Count: count,\n }, nil\n}\n```\n\n## Error Handling\n\nReturn errors as the second return value:\n\n```go\npackage inner\n\nimport \"errors\"\n\nfunc main(value int) (string, error) {\n if value < 0 {\n return \"\", errors.New(\"value must be positive\")\n }\n return \"success\", nil\n}\n```\n";
18
21
  export declare const LANG_GRAPHQL = "# GraphQL\n\n## Structure\n\nWrite GraphQL queries or mutations. Arguments can be added as query parameters:\n\n```graphql\nquery GetUser($id: ID!) {\n user(id: $id) {\n id\n name\n email\n }\n}\n```\n\n## Variables\n\nVariables are passed as script arguments and automatically bound to the query:\n\n```graphql\nquery SearchProducts($query: String!, $limit: Int = 10) {\n products(search: $query, first: $limit) {\n edges {\n node {\n id\n name\n price\n }\n }\n }\n}\n```\n\n## Mutations\n\n```graphql\nmutation CreateUser($input: CreateUserInput!) {\n createUser(input: $input) {\n id\n name\n createdAt\n }\n}\n```\n";
19
22
  export declare const LANG_JAVA = "# Java\n\nThe script must contain a Main public class with a `public static main()` method:\n\n```java\npublic class Main {\n public static Object main(String name, int count) {\n java.util.Map<String, Object> result = new java.util.HashMap<>();\n result.put(\"name\", name);\n result.put(\"count\", count);\n return result;\n }\n}\n```\n\n**Important:**\n- Class must be named `Main`\n- Method must be `public static Object main(...)`\n- Return type is `Object` or `void`\n\n## Maven Dependencies\n\nAdd dependencies using comments at the top:\n\n```java\n//requirements:\n//com.google.code.gson:gson:2.10.1\n//org.apache.httpcomponents:httpclient:4.5.14\n\nimport com.google.gson.Gson;\n\npublic class Main {\n public static Object main(String input) {\n Gson gson = new Gson();\n return gson.fromJson(input, Object.class);\n }\n}\n```\n";
20
- export declare const LANG_MSSQL = "# Microsoft SQL Server (MSSQL)\n\nArguments use `@P1`, `@P2`, etc.\n\nName the parameters by adding comments before the statement:\n\n```sql\n-- @P1 name1 (varchar)\n-- @P2 name2 (int) = 0\nSELECT * FROM users WHERE name = @P1 AND age > @P2;\n```\n";
21
- export declare const LANG_MYSQL = "# MySQL\n\nArguments use `?` placeholders.\n\nName the parameters by adding comments before the statement:\n\n```sql\n-- ? name1 (text)\n-- ? name2 (int) = 0\nSELECT * FROM users WHERE name = ? AND age > ?;\n```\n";
23
+ export declare const LANG_MSSQL = "# Microsoft SQL Server (MSSQL)\n\nArguments use `@P1`, `@P2`, etc.\n\nName the parameters by adding comments before the statement:\n\n```sql\n-- @P1 name1 (varchar)\n-- @P2 name2 (int) = 0\nSELECT * FROM users WHERE name = @P1 AND age > @P2;\n```\n\n## Receiving an S3Object as a script parameter\n\nDeclare the arg with type `(s3object)`. Windmill renders an S3 file picker for\nit, downloads the file, and binds it as `nvarchar(max)` JSON text \u2014 Parquet/CSV\nfiles are decoded server-side into a JSON array of records, JSON/JSONL pass\nthrough. Consume with `OPENJSON`:\n\n```sql\n-- @P1 file (s3object)\nSELECT id, name\nFROM OPENJSON(@P1)\nWITH (id INT, name NVARCHAR(200));\n```\n\n## Streaming query results to S3\n\nAdd a `-- s3` directive at the top of the script to stream the result set to S3\ninstead of returning rows. Windmill writes the file and returns its `S3Object`\nas the script result.\n\n```sql\n-- s3 prefix=exports/users format=parquet\nSELECT id, name FROM users;\n```\n\nAll keys are optional: `prefix` (object key prefix), `storage` (named storage \u2014\nomit to use the workspace default), `format` (`json` (default), `parquet`, or\n`csv`). Use this for large result sets \u2014 rows stream directly to S3 instead of\nbeing buffered as the script return value.\n";
24
+ export declare const LANG_MYSQL = "# MySQL\n\nArguments use `?` placeholders.\n\nName the parameters by adding comments before the statement:\n\n```sql\n-- ? name1 (text)\n-- ? name2 (int) = 0\nSELECT * FROM users WHERE name = ? AND age > ?;\n```\n\n## Receiving an S3Object as a script parameter\n\nDeclare the arg with type `(s3object)`. Windmill renders an S3 file picker for\nit, downloads the file, and binds it as JSON text \u2014 Parquet/CSV files are\ndecoded server-side into a JSON array of records, JSON/JSONL pass through.\nConsume with `JSON_TABLE`:\n\n```sql\n-- ? file (s3object)\nSELECT id, name\nFROM JSON_TABLE(?, '$[*]'\n COLUMNS (id INT PATH '$.id', name VARCHAR(200) PATH '$.name')\n) AS r;\n```\n\n## Streaming query results to S3\n\nAdd a `-- s3` directive at the top of the script to stream the result set to S3\ninstead of returning rows. Windmill writes the file and returns its `S3Object`\nas the script result.\n\n```sql\n-- s3 prefix=exports/users format=parquet\nSELECT id, name FROM users;\n```\n\nAll keys are optional: `prefix` (object key prefix), `storage` (named storage \u2014\nomit to use the workspace default), `format` (`json` (default), `parquet`, or\n`csv`). Use this for large result sets \u2014 rows stream directly to S3 instead of\nbeing buffered as the script return value.\n";
22
25
  export declare const LANG_NATIVETS = "# TypeScript (Native)\n\nNative TypeScript execution with fetch only - no external imports allowed.\n\n## Structure\n\nExport a single **async** function called `main`:\n\n```typescript\nexport async function main(param1: string, param2: number) {\n // Your code here\n return { result: param1, count: param2 };\n}\n```\n\nDo not call the main function.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nUse the `RT` namespace for resource types:\n\n```typescript\nexport async function main(stripe: RT.Stripe) {\n // stripe contains API key and config from the resource\n}\n```\n\nOnly use resource types if you need them to satisfy the instructions. Always use the RT namespace.\n\nBefore using a resource type, check the `rt.d.ts` file in the project root to see all available resource types and their fields. This file is generated by `wmill resource-type generate-namespace`.\n\n## Imports\n\n**No imports allowed.** Use the globally available `fetch` function:\n\n```typescript\nexport async function main(url: string) {\n const response = await fetch(url);\n return await response.json();\n}\n```\n\n## Windmill Client\n\nThe windmill client is not available in native TypeScript mode. Use fetch to call APIs directly.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```typescript\ntype Event = {\n kind:\n | \"webhook\"\n | \"http\"\n | \"websocket\"\n | \"kafka\"\n | \"email\"\n | \"nats\"\n | \"postgres\"\n | \"sqs\"\n | \"mqtt\"\n | \"gcp\";\n body: any;\n headers: Record<string, string>;\n query: Record<string, string>;\n};\n\nexport async function preprocessor(event: Event) {\n return {\n param1: event.body.field1,\n param2: event.query.id\n };\n}\n```\n";
23
26
  export declare const LANG_PHP = "# PHP\n\n## Structure\n\nThe script must start with `<?php` and contain at least one function called `main`:\n\n```php\n<?php\n\nfunction main(string $param1, int $param2) {\n return [\"result\" => $param1, \"count\" => $param2];\n}\n```\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nYou need to **redefine** the type of the resources that are needed before the main function. Always check if the class already exists using `class_exists`:\n\n```php\n<?php\n\nif (!class_exists('Postgresql')) {\n class Postgresql {\n public string $host;\n public int $port;\n public string $user;\n public string $password;\n public string $dbname;\n }\n}\n\nfunction main(Postgresql $db) {\n // $db contains the database connection details\n}\n```\n\nThe resource type name has to be exactly as specified.\n\n## Library Dependencies\n\nSpecify library dependencies as comments before the main function:\n\n```php\n<?php\n\n// require:\n// guzzlehttp/guzzle\n// stripe/stripe-php@^10.0\n\nfunction main() {\n // Libraries are available\n}\n```\n\nOne dependency per line. No need to require autoload, it is already done.\n";
24
- export declare const LANG_POSTGRESQL = "# PostgreSQL\n\nArguments are obtained directly in the statement with `$1::{type}`, `$2::{type}`, etc.\n\nName the parameters by adding comments at the beginning of the script (without specifying the type):\n\n```sql\n-- $1 name1\n-- $2 name2 = default_value\nSELECT * FROM users WHERE name = $1::TEXT AND age > $2::INT;\n```\n";
27
+ export declare const LANG_POSTGRESQL = "# PostgreSQL\n\nArguments are obtained directly in the statement with `$1::{type}`, `$2::{type}`, etc.\n\nName the parameters by adding comments at the beginning of the script (without specifying the type):\n\n```sql\n-- $1 name1\n-- $2 name2 = default_value\nSELECT * FROM users WHERE name = $1::TEXT AND age > $2::INT;\n```\n\n## Receiving an S3Object as a script parameter\n\nDeclare the arg with type `(s3object)`. Windmill renders an S3 file picker for\nit, downloads the file, and binds it as a `jsonb` parameter \u2014 Parquet/CSV files\nare decoded server-side into a JSON array of records, JSON/JSONL pass through.\nConsume with `jsonb_to_recordset` (or any `jsonb` API):\n\n```sql\n-- $1 file (s3object)\nSELECT *\nFROM jsonb_to_recordset($1::jsonb) AS r(id INT, name TEXT);\n```\n\n## Streaming query results to S3\n\nAdd a `-- s3` directive at the top of the script to stream the result set to S3\ninstead of returning rows. Windmill writes the file and returns its `S3Object`\nas the script result.\n\n```sql\n-- s3 prefix=exports/users format=parquet\nSELECT id, name FROM users;\n```\n\nAll keys are optional: `prefix` (object key prefix), `storage` (named storage \u2014\nomit to use the workspace default), `format` (`json` (default), `parquet`, or\n`csv`). Use this for large result sets \u2014 rows stream directly to S3 instead of\nbeing buffered as the script return value.\n";
25
28
  export declare const LANG_POWERSHELL = "# PowerShell\n\n## Structure\n\nArguments are obtained by calling the `param` function on the first line:\n\n```powershell\nparam($Name, $Count = 0, [int]$Age)\n\n# Your code here\nWrite-Output \"Processing $Name, count: $Count, age: $Age\"\n\n# Return object\n@{\n name = $Name\n count = $Count\n age = $Age\n}\n```\n\n## Parameter Types\n\nYou can specify types for parameters:\n\n```powershell\nparam(\n [string]$Name,\n [int]$Count = 0,\n [bool]$Enabled = $true,\n [array]$Items\n)\n\n@{\n name = $Name\n count = $Count\n enabled = $Enabled\n items = $Items\n}\n```\n\n## Return Values\n\nReturn values by outputting them at the end of the script:\n\n```powershell\nparam($Input)\n\n$result = @{\n processed = $true\n data = $Input\n timestamp = Get-Date -Format \"o\"\n}\n\n$result\n```\n";
26
- export declare const LANG_PYTHON3 = "# Python\n\n## Structure\n\nThe script must contain at least one function called `main`:\n\n```python\ndef main(param1: str, param2: int):\n # Your code here\n return {\"result\": param1, \"count\": param2}\n```\n\nDo not call the main function. Libraries are installed automatically.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nYou need to **redefine** the type of the resources that are needed before the main function as TypedDict:\n\n```python\nfrom typing import TypedDict\n\nclass postgresql(TypedDict):\n host: str\n port: int\n user: str\n password: str\n dbname: str\n\ndef main(db: postgresql):\n # db contains the database connection details\n pass\n```\n\n**Important rules:**\n\n- The resource type name must be **IN LOWERCASE**\n- Only include resource types if they are actually needed\n- If an import conflicts with a resource type name, **rename the imported object, not the type name**\n- Make sure to import TypedDict from typing **if you're using it**\n\n## Imports\n\nLibraries are installed automatically. Do not show installation instructions.\n\n```python\nimport requests\nimport pandas as pd\nfrom datetime import datetime\n```\n\nIf an import name conflicts with a resource type:\n\n```python\n# Wrong - don't rename the type\nimport stripe as stripe_lib\nclass stripe_type(TypedDict): ...\n\n# Correct - rename the import\nimport stripe as stripe_sdk\nclass stripe(TypedDict):\n api_key: str\n```\n\n## Windmill Client\n\nImport the windmill client for platform interactions:\n\n```python\nimport wmill\n```\n\nSee the SDK documentation for available methods.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```python\nfrom typing import TypedDict, Literal, Any\n\nclass Event(TypedDict):\n kind: Literal[\"webhook\", \"http\", \"websocket\", \"kafka\", \"email\", \"nats\", \"postgres\", \"sqs\", \"mqtt\", \"gcp\"]\n body: Any\n headers: dict[str, str]\n query: dict[str, str]\n\ndef preprocessor(event: Event):\n # Transform the event into flow input parameters\n return {\n \"param1\": event[\"body\"][\"field1\"],\n \"param2\": event[\"query\"][\"id\"]\n }\n```\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations.\n\n```python\nimport wmill\n\n# Load file content from S3\ncontent: bytes = wmill.load_s3_file(s3object)\n\n# Load file as stream reader\nreader: BufferedReader = wmill.load_s3_file_reader(s3object)\n\n# Write file to S3\nresult: S3Object = wmill.write_s3_file(\n s3object, # Target path (or None to auto-generate)\n file_content, # bytes or BufferedReader\n s3_resource_path, # Optional: specific S3 resource\n content_type, # Optional: MIME type\n content_disposition # Optional: Content-Disposition header\n)\n```\n";
29
+ export declare const LANG_PYTHON3 = "# Python\n\n## Structure\n\nThe script must contain at least one function called `main`:\n\n```python\ndef main(param1: str, param2: int):\n # Your code here\n return {\"result\": param1, \"count\": param2}\n```\n\nDo not call the main function. Libraries are installed automatically.\n\n## Resource Types\n\nOn Windmill, credentials and configuration are stored in resources and passed as parameters to main.\n\nYou need to **redefine** the type of the resources that are needed before the main function as TypedDict:\n\n```python\nfrom typing import TypedDict\n\nclass postgresql(TypedDict):\n host: str\n port: int\n user: str\n password: str\n dbname: str\n\ndef main(db: postgresql):\n # db contains the database connection details\n pass\n```\n\n**Important rules:**\n\n- The resource type name must be **IN LOWERCASE**\n- Only include resource types if they are actually needed\n- If an import conflicts with a resource type name, **rename the imported object, not the type name**\n- Make sure to import TypedDict from typing **if you're using it**\n\n## Imports\n\nLibraries are installed automatically. Do not show installation instructions.\n\n```python\nimport requests\nimport pandas as pd\nfrom datetime import datetime\n```\n\nIf an import name conflicts with a resource type:\n\n```python\n# Wrong - don't rename the type\nimport stripe as stripe_lib\nclass stripe_type(TypedDict): ...\n\n# Correct - rename the import\nimport stripe as stripe_sdk\nclass stripe(TypedDict):\n api_key: str\n```\n\n## Windmill Client\n\nImport the windmill client for platform interactions:\n\n```python\nimport wmill\n```\n\nSee the SDK documentation for available methods.\n\n## Preprocessor Scripts\n\nFor preprocessor scripts, the function should be named `preprocessor` and receives an `event` parameter:\n\n```python\nfrom typing import TypedDict, Literal, Any\n\nclass Event(TypedDict):\n kind: Literal[\"webhook\", \"http\", \"websocket\", \"kafka\", \"email\", \"nats\", \"postgres\", \"sqs\", \"mqtt\", \"gcp\"]\n body: Any\n headers: dict[str, str]\n query: dict[str, str]\n\ndef preprocessor(event: Event):\n # Transform the event into flow input parameters\n return {\n \"param1\": event[\"body\"][\"field1\"],\n \"param2\": event[\"query\"][\"id\"]\n }\n```\n\n## S3 Object Operations\n\nWindmill provides built-in support for S3-compatible storage operations.\n\n### Receiving an S3Object as a script parameter\n\nTo accept a file from S3 as input to a script, type the parameter with `S3Object` (imported from `wmill`):\n\n```python\nimport wmill\nfrom wmill import S3Object\n\ndef main(file: S3Object):\n content = wmill.load_s3_file(file)\n # ...\n```\n\n### S3 operations\n\n```python\nimport wmill\n\n# Load file content from S3\ncontent: bytes = wmill.load_s3_file(s3object)\n\n# Load file as stream reader\nreader: BufferedReader = wmill.load_s3_file_reader(s3object)\n\n# Write file to S3\nresult: S3Object = wmill.write_s3_file(\n s3object, # Target path (or None to auto-generate)\n file_content, # bytes or BufferedReader\n s3_resource_path, # Optional: specific S3 resource\n content_type, # Optional: MIME type\n content_disposition # Optional: Content-Disposition header\n)\n```\n";
27
30
  export declare const LANG_RLANG = "# R\n\n## Structure\n\nDefine a `main` function using `<-` or `=` assignment. Parameters become the script inputs:\n\n```r\nlibrary(dplyr)\nlibrary(jsonlite)\n\nmain <- function(x, name = \"default\", flag = TRUE) {\n df <- tibble(x = x, name = name)\n result <- df %>% mutate(greeting = paste(\"Hello\", name))\n return(toJSON(result, auto_unbox = TRUE))\n}\n```\n\n**Important:**\n- The `main` function is required\n- Use `library()` to load packages \u2014 they are resolved and installed automatically\n- `jsonlite` is always available (used internally for argument parsing)\n- Return values must be JSON-serializable\n\n## Parameters\n\nR types map to Windmill types:\n- `numeric` \u2192 float/int\n- `character` \u2192 string\n- `logical` \u2192 bool (use `TRUE`/`FALSE`)\n- `list` \u2192 object/dict\n- `NULL` \u2192 null\n\nDefault values are inferred from the function signature:\n\n```r\nmain <- function(\n name, # required string\n count = 10, # optional int, default 10\n verbose = FALSE # optional bool, default FALSE\n) {\n # ...\n}\n```\n\n## Resources and Variables\n\nUse the built-in Windmill helpers (no import needed):\n\n```r\nmain <- function() {\n # Get a variable\n api_key <- get_variable(\"f/my_folder/api_key\")\n\n # Get a resource (returns a list)\n db <- get_resource(\"f/my_folder/postgres_config\")\n host <- db$host\n port <- db$port\n\n return(list(host = host, port = port))\n}\n```\n\n## Output\n\nReturn any JSON-serializable value from `main`. The return value becomes the step result:\n\n```r\nmain <- function(x) {\n # Return a scalar\n return(x + 1)\n\n # Or a list (becomes JSON object)\n return(list(result = x + 1, status = \"ok\"))\n}\n```\n\n## Annotations\n\nControl execution behavior with comment annotations:\n\n```r\n#renv_verbose = true # Show verbose renv output during resolution\n#renv_install_verbose = true # Show verbose output during package installation\n#sandbox = true # Run in nsjail sandbox (requires nsjail)\n```\n";
28
31
  export declare const LANG_RUST = "# Rust\n\n## Structure\n\nThe script must contain a function called `main` with proper return type:\n\n```rust\nuse anyhow::anyhow;\nuse serde::Serialize;\n\n#[derive(Serialize, Debug)]\nstruct ReturnType {\n result: String,\n count: i32,\n}\n\nfn main(param1: String, param2: i32) -> anyhow::Result<ReturnType> {\n Ok(ReturnType {\n result: param1,\n count: param2,\n })\n}\n```\n\n**Important:**\n- Arguments should be owned types\n- Return type must be serializable (`#[derive(Serialize)]`)\n- Return type is `anyhow::Result<T>`\n\n## Dependencies\n\nPackages must be specified with a partial cargo.toml at the beginning of the script:\n\n```rust\n//! ```cargo\n//! [dependencies]\n//! anyhow = \"1.0.86\"\n//! reqwest = { version = \"0.11\", features = [\"json\"] }\n//! tokio = { version = \"1\", features = [\"full\"] }\n//! ```\n\nuse anyhow::anyhow;\n// ... rest of the code\n```\n\n**Note:** Serde is already included, no need to add it again.\n\n## Async Functions\n\nIf you need to handle async functions (e.g., using tokio), keep the main function sync and create the runtime inside:\n\n```rust\n//! ```cargo\n//! [dependencies]\n//! anyhow = \"1.0.86\"\n//! tokio = { version = \"1\", features = [\"full\"] }\n//! reqwest = { version = \"0.11\", features = [\"json\"] }\n//! ```\n\nuse anyhow::anyhow;\nuse serde::Serialize;\n\n#[derive(Serialize, Debug)]\nstruct Response {\n data: String,\n}\n\nfn main(url: String) -> anyhow::Result<Response> {\n let rt = tokio::runtime::Runtime::new()?;\n rt.block_on(async {\n let resp = reqwest::get(&url).await?.text().await?;\n Ok(Response { data: resp })\n })\n}\n```\n";
29
- export declare const LANG_SNOWFLAKE = "# Snowflake\n\nArguments use `?` placeholders.\n\nName the parameters by adding comments before the statement:\n\n```sql\n-- ? name1 (text)\n-- ? name2 (number) = 0\nSELECT * FROM users WHERE name = ? AND age > ?;\n```\n";
32
+ export declare const LANG_SNOWFLAKE = "# Snowflake\n\nArguments use `?` placeholders.\n\nName the parameters by adding comments before the statement:\n\n```sql\n-- ? name1 (text)\n-- ? name2 (number) = 0\nSELECT * FROM users WHERE name = ? AND age > ?;\n```\n\n## Receiving an S3Object as a script parameter\n\nDeclare the arg with type `(s3object)`. Windmill renders an S3 file picker for\nit, downloads the file, and binds it as JSON text \u2014 Parquet/CSV files are\ndecoded server-side into a JSON array of records, JSON/JSONL pass through.\nWrap the bind with `PARSE_JSON(?)` and walk it with `LATERAL FLATTEN`:\n\n```sql\n-- ? file (s3object)\nSELECT\n v.value:id::NUMBER AS id,\n v.value:name::STRING AS name\nFROM LATERAL FLATTEN(input => PARSE_JSON(?)) v;\n```\n\n## Streaming query results to S3\n\nAdd a `-- s3` directive at the top of the script to stream the result set to S3\ninstead of returning rows. Windmill writes the file and returns its `S3Object`\nas the script result.\n\n```sql\n-- s3 prefix=exports/users format=parquet\nSELECT id, name FROM users;\n```\n\nAll keys are optional: `prefix` (object key prefix), `storage` (named storage \u2014\nomit to use the workspace default), `format` (`json` (default), `parquet`, or\n`csv`). Use this for large result sets \u2014 rows stream directly to S3 instead of\nbeing buffered, bypassing the 10000-row return cap.\n";