@mindstudio-ai/agent 0.1.29 → 0.1.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -30,7 +30,7 @@ var init_metadata = __esm({
30
30
  stepType: "activeCampaignCreateContact",
31
31
  description: "Create or sync a contact in ActiveCampaign.",
32
32
  usageNotes: "- Requires an ActiveCampaign OAuth connection (connectionId).\n- If a contact with the email already exists, it may be updated depending on ActiveCampaign settings.\n- Custom fields are passed as a key-value map where keys are field IDs.",
33
- inputSchema: { "type": "object", "properties": { "email": { "type": "string", "description": "Contact email address" }, "firstName": { "type": "string", "description": "Contact first name" }, "lastName": { "type": "string", "description": "Contact last name" }, "phone": { "type": "string", "description": "Contact phone number" }, "accountId": { "type": "string", "description": "ActiveCampaign account ID to associate the contact with" }, "customFields": { "type": "object", "description": "Custom field values keyed by field ID" }, "connectionId": { "type": "string", "description": "ActiveCampaign OAuth connection ID" } }, "required": ["email", "firstName", "lastName", "phone", "accountId", "customFields"] },
33
+ inputSchema: { "type": "object", "properties": { "email": { "type": "string", "description": "Contact email address" }, "firstName": { "type": "string", "description": "Contact first name" }, "lastName": { "type": "string", "description": "Contact last name" }, "phone": { "type": "string", "description": "Contact phone number" }, "accountId": { "type": "string", "description": "ActiveCampaign account ID to associate the contact with" }, "customFields": { "type": "object", "properties": {}, "required": [], "description": "Custom field values keyed by field ID" }, "connectionId": { "type": "string", "description": "ActiveCampaign OAuth connection ID" } }, "required": ["email", "firstName", "lastName", "phone", "accountId", "customFields"] },
34
34
  outputSchema: { "type": "object", "properties": { "contactId": { "type": "string", "description": "ActiveCampaign contact ID of the created contact" } }, "required": ["contactId"] }
35
35
  },
36
36
  "addSubtitlesToVideo": {
@@ -44,7 +44,7 @@ var init_metadata = __esm({
44
44
  stepType: "airtableCreateUpdateRecord",
45
45
  description: "Create a new record or update an existing record in an Airtable table.",
46
46
  usageNotes: '- If recordId is provided, updates that record. Otherwise, creates a new one.\n- When updating with updateMode "onlySpecified", unspecified fields are left as-is. With "all", unspecified fields are cleared.\n- Array fields (e.g. multipleAttachments) accept arrays of values.',
47
- inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Airtable OAuth connection ID" }, "baseId": { "type": "string", "description": "Airtable base ID" }, "tableId": { "type": "string", "description": "Airtable table ID" }, "recordId": { "type": "string", "description": "Record ID to update. Omit to create a new record" }, "updateMode": { "enum": ["onlySpecified", "all"], "type": "string", "description": "How to handle unspecified fields on update. 'onlySpecified' leaves them as-is, 'all' clears them" }, "fields": { "description": "Field schema metadata used for type resolution" }, "recordData": { "type": "object", "description": "Field values to set, keyed by field ID" } }, "required": ["baseId", "tableId", "fields", "recordData"] },
47
+ inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Airtable OAuth connection ID" }, "baseId": { "type": "string", "description": "Airtable base ID" }, "tableId": { "type": "string", "description": "Airtable table ID" }, "recordId": { "type": "string", "description": "Record ID to update. Omit to create a new record" }, "updateMode": { "enum": ["onlySpecified", "all"], "type": "string", "description": "How to handle unspecified fields on update. 'onlySpecified' leaves them as-is, 'all' clears them" }, "fields": { "description": "Field schema metadata used for type resolution" }, "recordData": { "type": "object", "properties": {}, "required": [], "description": "Field values to set, keyed by field ID" } }, "required": ["baseId", "tableId", "fields", "recordData"] },
48
48
  outputSchema: { "type": "object", "properties": { "recordId": { "type": "string", "description": "The Airtable record ID of the created or updated record" } }, "required": ["recordId"] }
49
49
  },
50
50
  "airtableDeleteRecord": {
@@ -59,27 +59,27 @@ var init_metadata = __esm({
59
59
  description: "Fetch a single record from an Airtable table by its record ID.",
60
60
  usageNotes: "- Requires an active Airtable OAuth connection (connectionId).\n- If the record is not found, returns a string message instead of a record object.",
61
61
  inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Airtable OAuth connection ID" }, "baseId": { "type": "string", "description": 'Airtable base ID (e.g. "appXXXXXX")' }, "tableId": { "type": "string", "description": 'Airtable table ID (e.g. "tblXXXXXX")' }, "recordId": { "type": "string", "description": 'Record ID to fetch (e.g. "recXXXXXX")' } }, "required": ["baseId", "tableId", "recordId"] },
62
- outputSchema: { "type": "object", "properties": { "record": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string", "description": "Airtable record ID" }, "createdTime": { "type": "string", "description": "ISO 8601 timestamp when the record was created" }, "fields": { "type": "object", "description": "Field values keyed by field name" } }, "required": ["id", "createdTime", "fields"] }, { "type": "null" }] } }, "required": ["record"] }
62
+ outputSchema: { "type": "object", "properties": { "record": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string", "description": "Airtable record ID" }, "createdTime": { "type": "string", "description": "ISO 8601 timestamp when the record was created" }, "fields": { "type": "object", "properties": {}, "required": [], "description": "Field values keyed by field name" } }, "required": ["id", "createdTime", "fields"] }, { "type": "null" }] } }, "required": ["record"] }
63
63
  },
64
64
  "airtableGetTableRecords": {
65
65
  stepType: "airtableGetTableRecords",
66
66
  description: "Fetch multiple records from an Airtable table with optional pagination.",
67
67
  usageNotes: "- Requires an active Airtable OAuth connection (connectionId).\n- Default limit is 100 records. Maximum is 1000.\n- When outputFormat is 'csv', the variable receives CSV text. The direct execution output always returns parsed records.",
68
68
  inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Airtable OAuth connection ID" }, "baseId": { "type": "string", "description": 'Airtable base ID (e.g. "appXXXXXX")' }, "tableId": { "type": "string", "description": 'Airtable table ID (e.g. "tblXXXXXX")' }, "outputFormat": { "enum": ["json", "csv"], "type": "string", "description": "Output format for the result. Defaults to 'json'" }, "limit": { "type": "number", "description": "Maximum number of records to return. Defaults to 100, max 1000" } }, "required": ["baseId", "tableId"] },
69
- outputSchema: { "type": "object", "properties": { "records": { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", "description": "Airtable record ID" }, "createdTime": { "type": "string", "description": "ISO 8601 timestamp when the record was created" }, "fields": { "type": "object", "description": "Field values keyed by field name" } }, "required": ["id", "createdTime", "fields"] }, "description": "The list of records retrieved from the Airtable table" } }, "required": ["records"] }
69
+ outputSchema: { "type": "object", "properties": { "records": { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", "description": "Airtable record ID" }, "createdTime": { "type": "string", "description": "ISO 8601 timestamp when the record was created" }, "fields": { "type": "object", "properties": {}, "required": [], "description": "Field values keyed by field name" } }, "required": ["id", "createdTime", "fields"] }, "description": "The list of records retrieved from the Airtable table" } }, "required": ["records"] }
70
70
  },
71
71
  "analyzeImage": {
72
72
  stepType: "analyzeImage",
73
73
  description: "Analyze an image using a vision model based on a text prompt.",
74
74
  usageNotes: "- Uses the configured vision model to generate a text analysis of the image.\n- The prompt should describe what to look for or extract from the image.",
75
- inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Instructions describing what to look for or extract from the image" }, "imageUrl": { "type": "string", "description": "URL of the image to analyze" }, "visionModelOverride": { "anyOf": [{ "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"] }, { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"] }] } }, "required": ["prompt", "imageUrl"] },
75
+ inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Instructions describing what to look for or extract from the image" }, "imageUrl": { "type": "string", "description": "URL of the image to analyze" }, "visionModelOverride": { "anyOf": [{ "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object", "properties": {}, "required": [] } }, "required": ["model"] }, { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"] }] } }, "required": ["prompt", "imageUrl"] },
76
76
  outputSchema: { "type": "object", "properties": { "analysis": { "type": "string", "description": "Text analysis of the image generated by the vision model" } }, "required": ["analysis"] }
77
77
  },
78
78
  "analyzeVideo": {
79
79
  stepType: "analyzeVideo",
80
80
  description: "Analyze a video using a video analysis model based on a text prompt.",
81
81
  usageNotes: "- Uses the configured video analysis model to generate a text analysis of the video.\n- The prompt should describe what to look for or extract from the video.",
82
- inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Instructions describing what to look for or extract from the video" }, "videoUrl": { "type": "string", "description": "URL of the video to analyze" }, "videoAnalysisModelOverride": { "anyOf": [{ "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"] }, { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"] }] } }, "required": ["prompt", "videoUrl"] },
82
+ inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Instructions describing what to look for or extract from the video" }, "videoUrl": { "type": "string", "description": "URL of the video to analyze" }, "videoAnalysisModelOverride": { "anyOf": [{ "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object", "properties": {}, "required": [] } }, "required": ["model"] }, { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"] }] } }, "required": ["prompt", "videoUrl"] },
83
83
  outputSchema: { "type": "object", "properties": { "analysis": { "type": "string", "description": "Text analysis of the video generated by the video analysis model" } }, "required": ["analysis"] }
84
84
  },
85
85
  "captureThumbnail": {
@@ -107,15 +107,15 @@ var init_metadata = __esm({
107
107
  stepType: "codaCreateUpdateRow",
108
108
  description: "Create a new row or update an existing row in a Coda table.",
109
109
  usageNotes: "- Requires a Coda OAuth connection (connectionId).\n- If rowId is provided, updates that row. Otherwise, creates a new one.\n- Row data keys are column IDs. Empty values are excluded.",
110
- inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Coda OAuth connection ID" }, "docId": { "type": "string", "description": "Coda document ID" }, "tableId": { "type": "string", "description": "Table ID within the document" }, "rowId": { "type": "string", "description": "Row ID to update. Omit to create a new row" }, "rowData": { "type": "object", "description": "Column values to set, keyed by column ID" } }, "required": ["docId", "tableId", "rowData"] },
110
+ inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Coda OAuth connection ID" }, "docId": { "type": "string", "description": "Coda document ID" }, "tableId": { "type": "string", "description": "Table ID within the document" }, "rowId": { "type": "string", "description": "Row ID to update. Omit to create a new row" }, "rowData": { "type": "object", "properties": {}, "required": [], "description": "Column values to set, keyed by column ID" } }, "required": ["docId", "tableId", "rowData"] },
111
111
  outputSchema: { "type": "object", "properties": { "rowId": { "type": "string", "description": "The Coda row ID of the created or updated row" } }, "required": ["rowId"] }
112
112
  },
113
113
  "codaFindRow": {
114
114
  stepType: "codaFindRow",
115
115
  description: "Search for a row in a Coda table by matching column values.",
116
116
  usageNotes: "- Requires a Coda OAuth connection (connectionId).\n- Returns the first row matching all specified column values, or null if no match.\n- Search criteria in rowData are ANDed together.",
117
- inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Coda OAuth connection ID" }, "docId": { "type": "string", "description": "Coda document ID" }, "tableId": { "type": "string", "description": "Table ID to search within" }, "rowData": { "type": "object", "description": "Column values to match against, keyed by column ID. All criteria are ANDed together" } }, "required": ["docId", "tableId", "rowData"] },
118
- outputSchema: { "type": "object", "properties": { "row": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string", "description": "Coda row ID" }, "values": { "type": "object", "description": "Column values keyed by column name" } }, "required": ["id", "values"] }, { "type": "null" }] } }, "required": ["row"] }
117
+ inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Coda OAuth connection ID" }, "docId": { "type": "string", "description": "Coda document ID" }, "tableId": { "type": "string", "description": "Table ID to search within" }, "rowData": { "type": "object", "properties": {}, "required": [], "description": "Column values to match against, keyed by column ID. All criteria are ANDed together" } }, "required": ["docId", "tableId", "rowData"] },
118
+ outputSchema: { "type": "object", "properties": { "row": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string", "description": "Coda row ID" }, "values": { "type": "object", "properties": {}, "required": [], "description": "Column values keyed by column name" } }, "required": ["id", "values"] }, { "type": "null" }] } }, "required": ["row"] }
119
119
  },
120
120
  "codaGetPage": {
121
121
  stepType: "codaGetPage",
@@ -129,7 +129,7 @@ var init_metadata = __esm({
129
129
  description: "Fetch rows from a Coda table with optional pagination.",
130
130
  usageNotes: "- Requires a Coda OAuth connection (connectionId).\n- Default limit is 10000 rows. Rows are fetched in pages of 500.\n- When outputFormat is 'csv', the variable receives CSV text. The direct execution output always returns parsed rows.",
131
131
  inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "Coda OAuth connection ID" }, "docId": { "type": "string", "description": "Coda document ID" }, "tableId": { "type": "string", "description": "Table ID within the document" }, "limit": { "type": ["number", "string"] }, "outputFormat": { "enum": ["json", "csv"], "type": "string", "description": "Output format for the result. Defaults to 'json'" } }, "required": ["docId", "tableId"] },
132
- outputSchema: { "type": "object", "properties": { "rows": { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", "description": "Coda row ID" }, "values": { "type": "object", "description": "Column values keyed by column name" } }, "required": ["id", "values"] }, "description": "The list of rows retrieved from the Coda table" } }, "required": ["rows"] }
132
+ outputSchema: { "type": "object", "properties": { "rows": { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", "description": "Coda row ID" }, "values": { "type": "object", "properties": {}, "required": [], "description": "Column values keyed by column name" } }, "required": ["id", "values"] }, "description": "The list of rows retrieved from the Coda table" } }, "required": ["rows"] }
133
133
  },
134
134
  "convertPdfToImages": {
135
135
  stepType: "convertPdfToImages",
@@ -214,7 +214,7 @@ var init_metadata = __esm({
214
214
  stepType: "detectChanges",
215
215
  description: "Detect changes between runs by comparing current input against previously stored state. Routes execution based on whether a change occurred.",
216
216
  usageNotes: '- Persists state across runs using a global variable keyed to the step ID.\n- Two modes: "comparison" (default) uses strict string inequality; "ai" uses an LLM to determine if a meaningful change occurred.\n- First run always treats the value as "changed" since there is no previous state.\n- Each mode supports transitions to different steps/workflows for the "changed" and "unchanged" paths.\n- AI mode bills normally for the LLM call.',
217
- inputSchema: { "type": "object", "properties": { "mode": { "enum": ["ai", "comparison"], "type": "string", "description": "Detection mode: 'comparison' for strict string inequality, 'ai' for LLM-based. Default: 'comparison'" }, "input": { "type": "string", "description": "Current value to check (variable template)" }, "prompt": { "type": "string", "description": "AI mode: what constitutes a meaningful change" }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "AI mode: model settings override" }, "previousValueVariable": { "type": "string", "description": "Optional variable name to store the previous value into for downstream access" }, "changedStepId": { "type": "string", "description": "Step to transition to if changed (same workflow)" }, "changedWorkflowId": { "type": "string", "description": "Workflow to jump to if changed (cross workflow)" }, "unchangedStepId": { "type": "string", "description": "Step to transition to if unchanged (same workflow)" }, "unchangedWorkflowId": { "type": "string", "description": "Workflow to jump to if unchanged (cross workflow)" } }, "required": ["mode", "input"], "description": "Configuration for the detect changes step" },
217
+ inputSchema: { "type": "object", "properties": { "mode": { "enum": ["ai", "comparison"], "type": "string", "description": "Detection mode: 'comparison' for strict string inequality, 'ai' for LLM-based. Default: 'comparison'" }, "input": { "type": "string", "description": "Current value to check (variable template)" }, "prompt": { "type": "string", "description": "AI mode: what constitutes a meaningful change" }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "AI mode: model settings override" }, "previousValueVariable": { "type": "string", "description": "Optional variable name to store the previous value into for downstream access" }, "changedStepId": { "type": "string", "description": "Step to transition to if changed (same workflow)" }, "changedWorkflowId": { "type": "string", "description": "Workflow to jump to if changed (cross workflow)" }, "unchangedStepId": { "type": "string", "description": "Step to transition to if unchanged (same workflow)" }, "unchangedWorkflowId": { "type": "string", "description": "Workflow to jump to if unchanged (cross workflow)" } }, "required": ["mode", "input"], "description": "Configuration for the detect changes step" },
218
218
  outputSchema: { "type": "object", "properties": { "hasChanged": { "type": "boolean", "description": "Whether a change was detected" }, "currentValue": { "type": "string", "description": "The resolved input value" }, "previousValue": { "type": "string", "description": "The stored value from last run (empty string on first run)" }, "isFirstRun": { "type": "boolean", "description": "True when no previous state exists" } }, "required": ["hasChanged", "currentValue", "previousValue", "isFirstRun"] }
219
219
  },
220
220
  "detectPII": {
@@ -327,7 +327,7 @@ var init_metadata = __esm({
327
327
  description: "Retrieve metadata and recent videos for a YouTube channel.",
328
328
  usageNotes: "- Accepts a YouTube channel URL (e.g. https://www.youtube.com/@ChannelName or /channel/ID).\n- Returns channel info and video listings as a JSON object.",
329
329
  inputSchema: { "type": "object", "properties": { "channelUrl": { "type": "string", "description": "YouTube channel URL (e.g. https://www.youtube.com/@ChannelName or /channel/ID)" } }, "required": ["channelUrl"] },
330
- outputSchema: { "type": "object" }
330
+ outputSchema: { "type": "object", "properties": {}, "required": [] }
331
331
  },
332
332
  "fetchYoutubeComments": {
333
333
  stepType: "fetchYoutubeComments",
@@ -341,13 +341,13 @@ var init_metadata = __esm({
341
341
  description: "Retrieve metadata for a YouTube video (title, description, stats, channel info).",
342
342
  usageNotes: "- Returns video metadata, channel info, and engagement stats.\n- Video format data is excluded from the response.",
343
343
  inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "YouTube video URL to fetch metadata for" } }, "required": ["videoUrl"] },
344
- outputSchema: { "type": "object" }
344
+ outputSchema: { "type": "object", "properties": {}, "required": [] }
345
345
  },
346
346
  "generateAsset": {
347
347
  stepType: "generatePdf",
348
348
  description: "Generate an HTML asset and export it as a webpage, PDF, or image",
349
349
  usageNotes: '- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)',
350
- inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
350
+ inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "properties": {}, "required": [], "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "properties": {}, "required": [], "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
351
351
  outputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "CDN URL of the generated asset (PDF, PNG, HTML, or MP4 depending on outputFormat)" } }, "required": ["url"] }
352
352
  },
353
353
  "generateChart": {
@@ -361,28 +361,28 @@ var init_metadata = __esm({
361
361
  stepType: "generateImage",
362
362
  description: "Generate an image from a text prompt using an AI model.",
363
363
  usageNotes: "- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Images are automatically hosted on a CDN.\n- In foreground mode, the image is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple images are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.",
364
- inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the image to generate" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "imageModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Image generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default image model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple image variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated image" } }, "required": ["prompt"] },
364
+ inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the image to generate" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "imageModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Image generation model identifier" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default image model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple image variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated image" } }, "required": ["prompt"] },
365
365
  outputSchema: { "type": "object", "properties": { "imageUrl": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }] } }, "required": ["imageUrl"] }
366
366
  },
367
367
  "generateLipsync": {
368
368
  stepType: "generateLipsync",
369
369
  description: "Generate a lip sync video from provided audio and image.",
370
370
  usageNotes: "- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.",
371
- inputSchema: { "type": "object", "properties": { "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" }, "lipsyncModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default lipsync model if not specified" } } },
371
+ inputSchema: { "type": "object", "properties": { "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" }, "lipsyncModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object", "properties": {}, "required": [] } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default lipsync model if not specified" } } },
372
372
  outputSchema: { "description": "This step does not produce output data." }
373
373
  },
374
374
  "generateMusic": {
375
375
  stepType: "generateMusic",
376
376
  description: "Generate an audio file from provided instructions (text) using a music model.",
377
377
  usageNotes: "- The text field contains the instructions (prompt) for the music generation.\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.",
378
- inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The instructions (prompt) for the music generation" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "musicModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default music model if not specified" } }, "required": ["text"] },
378
+ inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The instructions (prompt) for the music generation" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "musicModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object", "properties": {}, "required": [] } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default music model if not specified" } }, "required": ["text"] },
379
379
  outputSchema: { "description": "This step does not produce output data." }
380
380
  },
381
381
  "generatePdf": {
382
382
  stepType: "generatePdf",
383
383
  description: "Generate an HTML asset and export it as a webpage, PDF, or image",
384
384
  usageNotes: '- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)',
385
- inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
385
+ inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "properties": {}, "required": [], "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "properties": {}, "required": [], "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
386
386
  outputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "CDN URL of the generated asset (PDF, PNG, HTML, or MP4 depending on outputFormat)" } }, "required": ["url"] }
387
387
  },
388
388
  "generateStaticVideoFromImage": {
@@ -407,14 +407,14 @@ var init_metadata = __esm({
407
407
  The method code itself is unchanged \u2014 streaming is transparent to the
408
408
  developer. See V2ExecutionService.ts and the invoke handler in V2Apps for
409
409
  the server-side plumbing.`,
410
- inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The message to send (prompt for AI, or text for system echo)" }, "source": { "enum": ["user", "system"], "type": "string", "description": 'Message source: "user" sends to AI model, "system" echoes message content directly. Defaults to "user"' }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model configuration override. Optional; uses the workflow's default model if not specified" }, "structuredOutputType": { "enum": ["text", "json", "csv"], "type": "string", "description": "Output format constraint for structured responses" }, "structuredOutputExample": { "type": "string", "description": "Sample showing the desired output shape (for JSON/CSV formats). A TypeScript interface is also useful here for more complex types." }, "chatHistoryMode": { "enum": ["include", "exclude"], "type": "string", "description": "Whether to include or exclude prior chat history in the AI context" } }, "required": ["message"], "description": "Configuration for the user message step" },
410
+ inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The message to send (prompt for AI, or text for system echo)" }, "source": { "enum": ["user", "system"], "type": "string", "description": 'Message source: "user" sends to AI model, "system" echoes message content directly. Defaults to "user"' }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model configuration override. Optional; uses the workflow's default model if not specified" }, "structuredOutputType": { "enum": ["text", "json", "csv"], "type": "string", "description": "Output format constraint for structured responses" }, "structuredOutputExample": { "type": "string", "description": "Sample showing the desired output shape (for JSON/CSV formats). A TypeScript interface is also useful here for more complex types." }, "chatHistoryMode": { "enum": ["include", "exclude"], "type": "string", "description": "Whether to include or exclude prior chat history in the AI context" } }, "required": ["message"], "description": "Configuration for the user message step" },
411
411
  outputSchema: { "type": "object", "properties": { "content": { "type": "string", "description": "The AI model's response or echoed system message content" } }, "required": ["content"] }
412
412
  },
413
413
  "generateVideo": {
414
414
  stepType: "generateVideo",
415
415
  description: "Generate a video from a text prompt using an AI model.",
416
416
  usageNotes: "- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Videos are automatically hosted on a CDN.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple videos are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.",
417
- inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the video to generate" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "videoModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Video generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default video model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple video variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" } }, "required": ["prompt"] },
417
+ inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the video to generate" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "videoModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Video generation model identifier" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default video model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple video variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" } }, "required": ["prompt"] },
418
418
  outputSchema: { "type": "object", "properties": { "videoUrl": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }] } }, "required": ["videoUrl"] }
419
419
  },
420
420
  "getGmailAttachments": {
@@ -477,7 +477,7 @@ var init_metadata = __esm({
477
477
  stepType: "httpRequest",
478
478
  description: "Make an HTTP request to an external endpoint and return the response.",
479
479
  usageNotes: "- Supports GET, POST, PATCH, DELETE, and PUT methods.\n- Body can be raw JSON/text, URL-encoded form data, or multipart form data.",
480
- inputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "The request URL" }, "method": { "type": "string", "description": "HTTP method (GET, POST, PATCH, DELETE, or PUT)" }, "headers": { "type": "object", "description": "Custom request headers as key-value pairs" }, "queryParams": { "type": "object", "description": "Query string parameters as key-value pairs" }, "body": { "type": "string", "description": "Raw request body (used for JSON or custom content types)" }, "bodyItems": { "type": "object", "description": "Key-value body items (used for form data or URL-encoded content types)" }, "contentType": { "enum": ["none", "application/json", "application/x-www-form-urlencoded", "multipart/form-data", "custom"], "type": "string", "description": "The content type for the request body" }, "customContentType": { "type": "string", "description": 'Custom Content-Type header value (used when contentType is "custom")' }, "testData": { "type": "object", "description": "Test data for debug/preview mode" } }, "required": ["url", "method", "headers", "queryParams", "body", "bodyItems", "contentType", "customContentType"], "description": "HTTP request configuration" },
480
+ inputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "The request URL" }, "method": { "type": "string", "description": "HTTP method (GET, POST, PATCH, DELETE, or PUT)" }, "headers": { "type": "object", "properties": {}, "required": [], "description": "Custom request headers as key-value pairs" }, "queryParams": { "type": "object", "properties": {}, "required": [], "description": "Query string parameters as key-value pairs" }, "body": { "type": "string", "description": "Raw request body (used for JSON or custom content types)" }, "bodyItems": { "type": "object", "properties": {}, "required": [], "description": "Key-value body items (used for form data or URL-encoded content types)" }, "contentType": { "enum": ["none", "application/json", "application/x-www-form-urlencoded", "multipart/form-data", "custom"], "type": "string", "description": "The content type for the request body" }, "customContentType": { "type": "string", "description": 'Custom Content-Type header value (used when contentType is "custom")' }, "testData": { "type": "object", "properties": {}, "required": [], "description": "Test data for debug/preview mode" } }, "required": ["url", "method", "headers", "queryParams", "body", "bodyItems", "contentType", "customContentType"], "description": "HTTP request configuration" },
481
481
  outputSchema: { "type": "object", "properties": { "ok": { "type": "boolean", "description": "Whether the HTTP response status code is in the 2xx range" }, "status": { "type": "number", "description": "HTTP response status code" }, "statusText": { "type": "string", "description": "HTTP response status text" }, "response": { "type": "string", "description": "Response body as a string" } }, "required": ["ok", "status", "statusText", "response"] }
482
482
  },
483
483
  "hubspotCreateCompany": {
@@ -499,14 +499,14 @@ var init_metadata = __esm({
499
499
  description: "Look up a HubSpot company by domain name or company ID.",
500
500
  usageNotes: "- Requires a HubSpot OAuth connection (connectionId).\n- Returns null if the company is not found.\n- When searching by domain, performs a search query then fetches the full company record.\n- Use additionalProperties to request specific HubSpot properties beyond the defaults.",
501
501
  inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "HubSpot OAuth connection ID" }, "searchBy": { "enum": ["domain", "id"], "type": "string", "description": "How to look up the company: by domain name or HubSpot company ID" }, "companyDomain": { "type": "string", "description": "Domain to search by (used when searchBy is 'domain')" }, "companyId": { "type": "string", "description": "HubSpot company ID (used when searchBy is 'id')" }, "additionalProperties": { "type": "array", "items": { "type": "string" }, "description": "Extra HubSpot property names to include in the response beyond the defaults" } }, "required": ["searchBy", "companyDomain", "companyId", "additionalProperties"] },
502
- outputSchema: { "type": "object", "properties": { "company": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string" }, "properties": { "type": "object" }, "createdAt": { "type": "string" }, "updatedAt": { "type": "string" }, "archived": { "type": "boolean" } }, "required": ["id", "properties", "createdAt", "updatedAt", "archived"] }, { "type": "null" }] } }, "required": ["company"] }
502
+ outputSchema: { "type": "object", "properties": { "company": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string" }, "properties": { "type": "object", "properties": {}, "required": [] }, "createdAt": { "type": "string" }, "updatedAt": { "type": "string" }, "archived": { "type": "boolean" } }, "required": ["id", "properties", "createdAt", "updatedAt", "archived"] }, { "type": "null" }] } }, "required": ["company"] }
503
503
  },
504
504
  "hubspotGetContact": {
505
505
  stepType: "hubspotGetContact",
506
506
  description: "Look up a HubSpot contact by email address or contact ID.",
507
507
  usageNotes: "- Requires a HubSpot OAuth connection (connectionId).\n- Returns null if the contact is not found.\n- Use additionalProperties to request specific HubSpot properties beyond the defaults.",
508
508
  inputSchema: { "type": "object", "properties": { "connectionId": { "type": "string", "description": "HubSpot OAuth connection ID" }, "searchBy": { "enum": ["email", "id"], "type": "string", "description": "How to look up the contact: by email address or HubSpot contact ID" }, "contactEmail": { "type": "string", "description": "Email address to search by (used when searchBy is 'email')" }, "contactId": { "type": "string", "description": "HubSpot contact ID (used when searchBy is 'id')" }, "additionalProperties": { "type": "array", "items": { "type": "string" }, "description": "Extra HubSpot property names to include in the response beyond the defaults" } }, "required": ["searchBy", "contactEmail", "contactId", "additionalProperties"] },
509
- outputSchema: { "type": "object", "properties": { "contact": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string" }, "properties": { "type": "object" }, "createdAt": { "type": "string" }, "updatedAt": { "type": "string" }, "archived": { "type": "boolean" } }, "required": ["id", "properties", "createdAt", "updatedAt", "archived"] }, { "type": "null" }] } }, "required": ["contact"] }
509
+ outputSchema: { "type": "object", "properties": { "contact": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string" }, "properties": { "type": "object", "properties": {}, "required": [] }, "createdAt": { "type": "string" }, "updatedAt": { "type": "string" }, "archived": { "type": "boolean" } }, "required": ["id", "properties", "createdAt", "updatedAt", "archived"] }, { "type": "null" }] } }, "required": ["contact"] }
510
510
  },
511
511
  "hunterApiCompanyEnrichment": {
512
512
  stepType: "hunterApiCompanyEnrichment",
@@ -568,7 +568,7 @@ var init_metadata = __esm({
568
568
  stepType: "listDataSources",
569
569
  description: "List all data sources for the current app.",
570
570
  usageNotes: "- Returns metadata for every data source associated with the current app version.\n- Each entry includes the data source ID, name, description, status, and document list.",
571
- inputSchema: { "type": "object" },
571
+ inputSchema: { "type": "object", "properties": {}, "required": [] },
572
572
  outputSchema: { "description": "This step does not produce output data." }
573
573
  },
574
574
  "listGmailDrafts": {
@@ -614,21 +614,21 @@ var init_metadata = __esm({
614
614
  - In comparison mode, the context is the left operand and each case's condition is the right operand. First matching case wins. Use operator "default" as a fallback.
615
615
  - Requires at least two cases.
616
616
  - Each case can transition to a step in the current workflow (destinationStepId) or jump to another workflow (destinationWorkflowId).`,
617
- inputSchema: { "type": "object", "properties": { "mode": { "enum": ["ai", "comparison"], "type": "string", "description": "Evaluation mode: 'ai' for LLM-based, 'comparison' for operator-based. Default: 'ai'" }, "context": { "type": "string", "description": "AI mode: prompt context. Comparison mode: left operand (resolved via variables)." }, "cases": { "type": "array", "items": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string", "description": "Unique case identifier" }, "condition": { "type": "string", "description": "AI mode: statement to evaluate. Comparison mode: right operand value." }, "operator": { "enum": ["eq", "neq", "gt", "lt", "gte", "lte", "exists", "not_exists", "contains", "not_contains", "default"], "type": "string", "description": "Comparison operator (comparison mode only)" }, "destinationStepId": { "type": "string", "description": "Step to transition to if this case wins (workflow mode only)" }, "destinationWorkflowId": { "type": "string", "description": "Workflow to jump to if this case wins (uses that workflow's initial step)" } }, "required": ["id", "condition"] }, { "type": "string" }] }, "description": "List of conditions to evaluate (objects for managed UIs, strings for code)" }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Optional model settings override; uses the organization default if not specified (AI mode only)" } }, "required": ["context", "cases"], "description": "Configuration for the router step" },
617
+ inputSchema: { "type": "object", "properties": { "mode": { "enum": ["ai", "comparison"], "type": "string", "description": "Evaluation mode: 'ai' for LLM-based, 'comparison' for operator-based. Default: 'ai'" }, "context": { "type": "string", "description": "AI mode: prompt context. Comparison mode: left operand (resolved via variables)." }, "cases": { "type": "array", "items": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string", "description": "Unique case identifier" }, "condition": { "type": "string", "description": "AI mode: statement to evaluate. Comparison mode: right operand value." }, "operator": { "enum": ["eq", "neq", "gt", "lt", "gte", "lte", "exists", "not_exists", "contains", "not_contains", "default"], "type": "string", "description": "Comparison operator (comparison mode only)" }, "destinationStepId": { "type": "string", "description": "Step to transition to if this case wins (workflow mode only)" }, "destinationWorkflowId": { "type": "string", "description": "Workflow to jump to if this case wins (uses that workflow's initial step)" } }, "required": ["id", "condition"] }, { "type": "string" }] }, "description": "List of conditions to evaluate (objects for managed UIs, strings for code)" }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Optional model settings override; uses the organization default if not specified (AI mode only)" } }, "required": ["context", "cases"], "description": "Configuration for the router step" },
618
618
  outputSchema: { "type": "object", "properties": { "selectedCase": { "type": "number", "description": "The index of the winning case" } }, "required": ["selectedCase"] }
619
619
  },
620
620
  "makeDotComRunScenario": {
621
621
  stepType: "makeDotComRunScenario",
622
622
  description: "Trigger a Make.com (formerly Integromat) scenario via webhook and return the response.",
623
623
  usageNotes: "- The webhook URL must be configured in your Make.com scenario.\n- Input key-value pairs are sent as JSON in the POST body.\n- Response format depends on the Make.com scenario configuration.",
624
- inputSchema: { "type": "object", "properties": { "webhookUrl": { "type": "string", "description": "Make.com webhook URL for the scenario" }, "input": { "type": "object", "description": "Key-value pairs to send as the JSON POST body" } }, "required": ["webhookUrl", "input"] },
624
+ inputSchema: { "type": "object", "properties": { "webhookUrl": { "type": "string", "description": "Make.com webhook URL for the scenario" }, "input": { "type": "object", "properties": {}, "required": [], "description": "Key-value pairs to send as the JSON POST body" } }, "required": ["webhookUrl", "input"] },
625
625
  outputSchema: { "type": "object", "properties": { "data": { "description": "Response from the Make.com scenario (JSON or string depending on scenario configuration)" } }, "required": ["data"] }
626
626
  },
627
627
  "mergeAudio": {
628
628
  stepType: "mergeAudio",
629
629
  description: "Merge one or more clips into a single audio file.",
630
630
  usageNotes: "",
631
- inputSchema: { "type": "object", "properties": { "mp3Urls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the MP3 audio clips to merge in order" }, "fileMetadata": { "type": "object", "description": "FFmpeg MP3 metadata key-value pairs to embed in the output file" }, "albumArtUrl": { "type": "string", "description": "URL of an image to embed as album art in the output file" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["mp3Urls"] },
631
+ inputSchema: { "type": "object", "properties": { "mp3Urls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the MP3 audio clips to merge in order" }, "fileMetadata": { "type": "object", "properties": {}, "required": [], "description": "FFmpeg MP3 metadata key-value pairs to embed in the output file" }, "albumArtUrl": { "type": "string", "description": "URL of an image to embed as album art in the output file" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["mp3Urls"] },
632
632
  outputSchema: { "type": "object", "properties": { "audioUrl": { "type": "string", "description": "URL of the merged audio file" } }, "required": ["audioUrl"] }
633
633
  },
634
634
  "mergeVideos": {
@@ -656,7 +656,7 @@ var init_metadata = __esm({
656
656
  stepType: "n8nRunNode",
657
657
  description: "Trigger an n8n workflow node via webhook and return the response.",
658
658
  usageNotes: "- The webhook URL must be configured in your n8n workflow.\n- Supports GET and POST methods with optional Basic authentication.\n- For GET requests, input values are sent as query parameters. For POST, they are sent as JSON body.",
659
- inputSchema: { "type": "object", "properties": { "method": { "type": "string", "description": "HTTP method to use (GET or POST)" }, "authentication": { "enum": ["none", "basic", "string"], "type": "string", "description": "Authentication type for the webhook request" }, "user": { "type": "string", "description": "Username for Basic authentication" }, "password": { "type": "string", "description": "Password for Basic authentication" }, "webhookUrl": { "type": "string", "description": "n8n webhook URL for the workflow node" }, "input": { "type": "object", "description": "Key-value pairs sent as query params (GET) or JSON body (POST)" } }, "required": ["method", "authentication", "user", "password", "webhookUrl", "input"] },
659
+ inputSchema: { "type": "object", "properties": { "method": { "type": "string", "description": "HTTP method to use (GET or POST)" }, "authentication": { "enum": ["none", "basic", "string"], "type": "string", "description": "Authentication type for the webhook request" }, "user": { "type": "string", "description": "Username for Basic authentication" }, "password": { "type": "string", "description": "Password for Basic authentication" }, "webhookUrl": { "type": "string", "description": "n8n webhook URL for the workflow node" }, "input": { "type": "object", "properties": {}, "required": [], "description": "Key-value pairs sent as query params (GET) or JSON body (POST)" } }, "required": ["method", "authentication", "user", "password", "webhookUrl", "input"] },
660
660
  outputSchema: { "type": "object", "properties": { "data": { "description": "Response from the n8n node (JSON or string depending on node configuration)" } }, "required": ["data"] }
661
661
  },
662
662
  "notionCreatePage": {
@@ -705,7 +705,7 @@ var init_metadata = __esm({
705
705
  stepType: "postToZapier",
706
706
  description: "Send data to a Zapier Zap via webhook and return the response.",
707
707
  usageNotes: "- The webhook URL must be configured in the Zapier Zap settings\n- Input keys and values are sent as the JSON body of the POST request\n- The webhook response (JSON or plain text) is returned as the output",
708
- inputSchema: { "type": "object", "properties": { "webhookUrl": { "type": "string", "description": "Zapier webhook URL to send data to" }, "input": { "type": "object", "description": "Key-value pairs to send as the JSON POST body" } }, "required": ["webhookUrl", "input"] },
708
+ inputSchema: { "type": "object", "properties": { "webhookUrl": { "type": "string", "description": "Zapier webhook URL to send data to" }, "input": { "type": "object", "properties": {}, "required": [], "description": "Key-value pairs to send as the JSON POST body" } }, "required": ["webhookUrl", "input"] },
709
709
  outputSchema: { "type": "object", "properties": { "data": { "description": "Parsed webhook response from Zapier (JSON object, array, or string)" } }, "required": ["data"] }
710
710
  },
711
711
  "queryAppDatabase": {
@@ -761,8 +761,8 @@ var init_metadata = __esm({
761
761
  stepType: "runFromConnectorRegistry",
762
762
  description: "Run a raw API connector to a third-party service",
763
763
  usageNotes: '- Use the /developer/v2/helpers/connectors endpoint to list available services and actions.\n- Use /developer/v2/helpers/connectors/{serviceId}/{actionId} to get the full input configuration for an action.\n- Use /developer/v2/helpers/connections to list your available OAuth connections.\n- The actionId format is "serviceId/actionId" (e.g., "slack/send-message").\n- Pass a __connectionId to authenticate the request with a specific OAuth connection, otherwise the default will be used (if configured).',
764
- inputSchema: { "type": "object", "properties": { "actionId": { "type": "string", "description": "The connector action identifier in the format serviceId/actionId" }, "displayName": { "type": "string", "description": "Human-readable name of the connector action" }, "icon": { "type": "string", "description": "Icon URL for the connector" }, "configurationValues": { "type": "object", "description": "Key-value configuration parameters for the connector action" }, "__connectionId": { "type": "string", "description": "OAuth connection ID used to authenticate the connector request" } }, "required": ["actionId", "displayName", "icon", "configurationValues"], "description": "Configuration for the connector registry step" },
765
- outputSchema: { "type": "object", "properties": { "data": { "type": "object", "description": "Key-value map of output variables set by the connector" } }, "required": ["data"] }
764
+ inputSchema: { "type": "object", "properties": { "actionId": { "type": "string", "description": "The connector action identifier in the format serviceId/actionId" }, "displayName": { "type": "string", "description": "Human-readable name of the connector action" }, "icon": { "type": "string", "description": "Icon URL for the connector" }, "configurationValues": { "type": "object", "properties": {}, "required": [], "description": "Key-value configuration parameters for the connector action" }, "__connectionId": { "type": "string", "description": "OAuth connection ID used to authenticate the connector request" } }, "required": ["actionId", "displayName", "icon", "configurationValues"], "description": "Configuration for the connector registry step" },
765
+ outputSchema: { "type": "object", "properties": { "data": { "type": "object", "properties": {}, "required": [], "description": "Key-value map of output variables set by the connector" } }, "required": ["data"] }
766
766
  },
767
767
  "runPackagedWorkflow": {
768
768
  stepType: "runPackagedWorkflow",
@@ -770,7 +770,7 @@ var init_metadata = __esm({
770
770
  usageNotes: `- From the user's perspective, packaged workflows are just ordinary blocks. Behind the scenes, they operate like packages/libraries in a programming language, letting the user execute custom functionality.
771
771
  - Some of these packaged workflows are available as part of MindStudio's "Standard Library" and available to every user.
772
772
  - Available packaged workflows are documented here as individual blocks, but the runPackagedWorkflow block is how they need to be wrapped in order to be executed correctly.`,
773
- inputSchema: { "type": "object", "properties": { "appId": { "type": "string", "description": "The app ID of the packaged workflow source" }, "workflowId": { "type": "string", "description": "The source workflow ID to execute" }, "inputVariables": { "type": "object", "description": "Variables to pass as input to the packaged workflow" }, "outputVariables": { "type": "object", "description": "Variables to capture from the packaged workflow output" }, "name": { "type": "string", "description": "Display name of the packaged workflow" } }, "required": ["appId", "workflowId", "inputVariables", "outputVariables", "name"], "description": "Configuration for the packaged workflow step" },
773
+ inputSchema: { "type": "object", "properties": { "appId": { "type": "string", "description": "The app ID of the packaged workflow source" }, "workflowId": { "type": "string", "description": "The source workflow ID to execute" }, "inputVariables": { "type": "object", "properties": {}, "required": [], "description": "Variables to pass as input to the packaged workflow" }, "outputVariables": { "type": "object", "properties": {}, "required": [], "description": "Variables to capture from the packaged workflow output" }, "name": { "type": "string", "description": "Display name of the packaged workflow" } }, "required": ["appId", "workflowId", "inputVariables", "outputVariables", "name"], "description": "Configuration for the packaged workflow step" },
774
774
  outputSchema: { "type": "object", "properties": { "data": { "description": "The result data returned from the packaged workflow" } }, "required": ["data"] }
775
775
  },
776
776
  "scrapeFacebookPage": {
@@ -847,22 +847,22 @@ var init_metadata = __esm({
847
847
  stepType: "scrapeUrl",
848
848
  description: "Extract text, HTML, or structured content from one or more web pages.",
849
849
  usageNotes: '- Accepts a single URL or multiple URLs (as a JSON array, comma-separated, or newline-separated).\n- Output format controls the result shape: "text" returns markdown, "html" returns raw HTML, "json" returns structured scraper data.\n- Can optionally capture a screenshot of each page.',
850
- inputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "URL(s) to scrape. Accepts a single URL, JSON array, or comma/newline-separated list" }, "service": { "enum": ["default", "firecrawl"], "type": "string", "description": "Scraping service to use" }, "autoEnhance": { "type": "boolean", "description": "Whether to enable enhanced scraping for social media URLs (e.g. Twitter, LinkedIn)" }, "pageOptions": { "type": "object", "properties": { "onlyMainContent": { "type": "boolean", "description": "Whether to extract only the main content of the page, excluding navigation, footers, etc." }, "screenshot": { "type": "boolean", "description": "Whether to capture a screenshot of the page" }, "waitFor": { "type": "number", "description": "Milliseconds to wait before scraping (0 for immediate)" }, "replaceAllPathsWithAbsolutePaths": { "type": "boolean", "description": "Whether to convert relative URLs to absolute URLs in the result" }, "headers": { "type": "object", "description": "Custom HTTP request headers as key-value pairs" }, "removeTags": { "type": "array", "items": { "type": "string" }, "description": "HTML tags to remove from the scraped result" }, "mobile": { "type": "boolean", "description": "Whether to scrape using a mobile user-agent" } }, "required": ["onlyMainContent", "screenshot", "waitFor", "replaceAllPathsWithAbsolutePaths", "headers", "removeTags", "mobile"], "description": "Page-level scraping options (content filtering, screenshots, headers, etc.)" } }, "required": ["url"] },
851
- outputSchema: { "type": "object", "properties": { "content": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }, { "type": "object", "properties": { "text": { "type": "string", "description": "Markdown/plain-text content of the scraped page" }, "html": { "type": "string", "description": "Raw HTML content of the scraped page" }, "json": { "type": "object", "description": "Structured data extracted from the page" }, "screenshotUrl": { "type": "string", "description": "Screenshot URL of the page (if requested)" }, "metadata": { "type": "object", "properties": { "title": { "type": "string", "description": "Page title" }, "description": { "type": "string", "description": "Page meta description" }, "url": { "type": "string", "description": "Canonical URL" }, "image": { "type": "string", "description": "Open Graph image URL" } }, "required": ["title", "description", "url", "image"], "description": "Page metadata (Open Graph / meta tags)" } }, "required": ["text", "html"] }, { "type": "array", "items": { "type": "object", "properties": { "text": { "type": "string", "description": "Markdown/plain-text content of the scraped page" }, "html": { "type": "string", "description": "Raw HTML content of the scraped page" }, "json": { "type": "object", "description": "Structured data extracted from the page" }, "screenshotUrl": { "type": "string", "description": "Screenshot URL of the page (if requested)" }, "metadata": { "type": "object", "properties": { "title": { "type": "string", "description": "Page title" }, "description": { "type": "string", "description": "Page meta description" }, "url": { "type": "string", "description": "Canonical URL" }, "image": { "type": "string", "description": "Open Graph image URL" } }, "required": ["title", "description", "url", "image"], "description": "Page metadata (Open Graph / meta tags)" } }, "required": ["text", "html"] } }] }, "screenshot": { "type": "string", "description": "Screenshot URL, only present when screenshot was requested via pageOptions" } }, "required": ["content"] }
850
+ inputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "URL(s) to scrape. Accepts a single URL, JSON array, or comma/newline-separated list" }, "service": { "enum": ["default", "firecrawl"], "type": "string", "description": "Scraping service to use" }, "autoEnhance": { "type": "boolean", "description": "Whether to enable enhanced scraping for social media URLs (e.g. Twitter, LinkedIn)" }, "pageOptions": { "type": "object", "properties": { "onlyMainContent": { "type": "boolean", "description": "Whether to extract only the main content of the page, excluding navigation, footers, etc." }, "screenshot": { "type": "boolean", "description": "Whether to capture a screenshot of the page" }, "waitFor": { "type": "number", "description": "Milliseconds to wait before scraping (0 for immediate)" }, "replaceAllPathsWithAbsolutePaths": { "type": "boolean", "description": "Whether to convert relative URLs to absolute URLs in the result" }, "headers": { "type": "object", "properties": {}, "required": [], "description": "Custom HTTP request headers as key-value pairs" }, "removeTags": { "type": "array", "items": { "type": "string" }, "description": "HTML tags to remove from the scraped result" }, "mobile": { "type": "boolean", "description": "Whether to scrape using a mobile user-agent" } }, "required": ["onlyMainContent", "screenshot", "waitFor", "replaceAllPathsWithAbsolutePaths", "headers", "removeTags", "mobile"], "description": "Page-level scraping options (content filtering, screenshots, headers, etc.)" } }, "required": ["url"] },
851
+ outputSchema: { "type": "object", "properties": { "content": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }, { "type": "object", "properties": { "text": { "type": "string", "description": "Markdown/plain-text content of the scraped page" }, "html": { "type": "string", "description": "Raw HTML content of the scraped page" }, "json": { "type": "object", "properties": {}, "required": [], "description": "Structured data extracted from the page" }, "screenshotUrl": { "type": "string", "description": "Screenshot URL of the page (if requested)" }, "metadata": { "type": "object", "properties": { "title": { "type": "string", "description": "Page title" }, "description": { "type": "string", "description": "Page meta description" }, "url": { "type": "string", "description": "Canonical URL" }, "image": { "type": "string", "description": "Open Graph image URL" } }, "required": ["title", "description", "url", "image"], "description": "Page metadata (Open Graph / meta tags)" } }, "required": ["text", "html"] }, { "type": "array", "items": { "type": "object", "properties": { "text": { "type": "string", "description": "Markdown/plain-text content of the scraped page" }, "html": { "type": "string", "description": "Raw HTML content of the scraped page" }, "json": { "type": "object", "properties": {}, "required": [], "description": "Structured data extracted from the page" }, "screenshotUrl": { "type": "string", "description": "Screenshot URL of the page (if requested)" }, "metadata": { "type": "object", "properties": { "title": { "type": "string", "description": "Page title" }, "description": { "type": "string", "description": "Page meta description" }, "url": { "type": "string", "description": "Canonical URL" }, "image": { "type": "string", "description": "Open Graph image URL" } }, "required": ["title", "description", "url", "image"], "description": "Page metadata (Open Graph / meta tags)" } }, "required": ["text", "html"] } }] }, "screenshot": { "type": "string", "description": "Screenshot URL, only present when screenshot was requested via pageOptions" } }, "required": ["content"] }
852
852
  },
853
853
  "scrapeXPost": {
854
854
  stepType: "scrapeXPost",
855
855
  description: "Scrape data from a single X (Twitter) post by URL.",
856
856
  usageNotes: "- Returns structured post data (text, html, optional json/screenshot/metadata).\n- Optionally saves the text content to a variable.",
857
857
  inputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "Full URL to the X post (e.g. https://x.com/elonmusk/status/1655608985058267139)" } }, "required": ["url"] },
858
- outputSchema: { "type": "object", "properties": { "post": { "type": "object", "properties": { "text": { "type": "string", "description": "Markdown/plain-text content of the scraped page" }, "html": { "type": "string", "description": "Raw HTML content of the scraped page" }, "json": { "type": "object", "description": "Structured data extracted from the page" }, "screenshotUrl": { "type": "string", "description": "Screenshot URL of the page (if requested)" }, "metadata": { "type": "object", "properties": { "title": { "type": "string", "description": "Page title" }, "description": { "type": "string", "description": "Page meta description" }, "url": { "type": "string", "description": "Canonical URL" }, "image": { "type": "string", "description": "Open Graph image URL" } }, "required": ["title", "description", "url", "image"], "description": "Page metadata (Open Graph / meta tags)" } }, "required": ["text", "html"], "description": "Scraped post data including text, HTML, and optional structured JSON" } }, "required": ["post"] }
858
+ outputSchema: { "type": "object", "properties": { "post": { "type": "object", "properties": { "text": { "type": "string", "description": "Markdown/plain-text content of the scraped page" }, "html": { "type": "string", "description": "Raw HTML content of the scraped page" }, "json": { "type": "object", "properties": {}, "required": [], "description": "Structured data extracted from the page" }, "screenshotUrl": { "type": "string", "description": "Screenshot URL of the page (if requested)" }, "metadata": { "type": "object", "properties": { "title": { "type": "string", "description": "Page title" }, "description": { "type": "string", "description": "Page meta description" }, "url": { "type": "string", "description": "Canonical URL" }, "image": { "type": "string", "description": "Open Graph image URL" } }, "required": ["title", "description", "url", "image"], "description": "Page metadata (Open Graph / meta tags)" } }, "required": ["text", "html"], "description": "Scraped post data including text, HTML, and optional structured JSON" } }, "required": ["post"] }
859
859
  },
860
860
  "scrapeXProfile": {
861
861
  stepType: "scrapeXProfile",
862
862
  description: "Scrape public profile data from an X (Twitter) account by URL.",
863
863
  usageNotes: "- Returns structured profile data.\n- Optionally saves the result to a variable.",
864
864
  inputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "Full URL or username for the X profile (e.g. https://x.com/elonmusk)" } }, "required": ["url"] },
865
- outputSchema: { "type": "object", "properties": { "profile": { "type": "object", "properties": { "text": { "type": "string", "description": "Markdown/plain-text content of the scraped page" }, "html": { "type": "string", "description": "Raw HTML content of the scraped page" }, "json": { "type": "object", "description": "Structured data extracted from the page" }, "screenshotUrl": { "type": "string", "description": "Screenshot URL of the page (if requested)" }, "metadata": { "type": "object", "properties": { "title": { "type": "string", "description": "Page title" }, "description": { "type": "string", "description": "Page meta description" }, "url": { "type": "string", "description": "Canonical URL" }, "image": { "type": "string", "description": "Open Graph image URL" } }, "required": ["title", "description", "url", "image"], "description": "Page metadata (Open Graph / meta tags)" } }, "required": ["text", "html"], "description": "Scraped profile data including text, HTML, and optional structured JSON" } }, "required": ["profile"] }
865
+ outputSchema: { "type": "object", "properties": { "profile": { "type": "object", "properties": { "text": { "type": "string", "description": "Markdown/plain-text content of the scraped page" }, "html": { "type": "string", "description": "Raw HTML content of the scraped page" }, "json": { "type": "object", "properties": {}, "required": [], "description": "Structured data extracted from the page" }, "screenshotUrl": { "type": "string", "description": "Screenshot URL of the page (if requested)" }, "metadata": { "type": "object", "properties": { "title": { "type": "string", "description": "Page title" }, "description": { "type": "string", "description": "Page meta description" }, "url": { "type": "string", "description": "Canonical URL" }, "image": { "type": "string", "description": "Open Graph image URL" } }, "required": ["title", "description", "url", "image"], "description": "Page metadata (Open Graph / meta tags)" } }, "required": ["text", "html"], "description": "Scraped profile data including text, HTML, and optional structured JSON" } }, "required": ["profile"] }
866
866
  },
867
867
  "screenshotUrl": {
868
868
  stepType: "screenshotUrl",
@@ -918,7 +918,7 @@ var init_metadata = __esm({
918
918
  description: "Fetch Google Trends data for a search term.",
919
919
  usageNotes: '- date accepts shorthand ("now 1-H", "today 1-m", "today 5-y", etc.) or custom "yyyy-mm-dd yyyy-mm-dd" ranges.\n- data_type controls the shape of returned data: TIMESERIES, GEO_MAP, GEO_MAP_0, RELATED_TOPICS, or RELATED_QUERIES.',
920
920
  inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The search term to look up on Google Trends" }, "hl": { "type": "string", "description": 'Language code (e.g. "en")' }, "geo": { "type": "string", "description": "Geographic region: empty string for worldwide, or a two-letter country code" }, "data_type": { "enum": ["TIMESERIES", "GEO_MAP", "GEO_MAP_0", "RELATED_TOPICS", "RELATED_QUERIES"], "type": "string", "description": "Type of trend data to return" }, "cat": { "type": "string", "description": 'Category filter ("0" for all categories)' }, "date": { "type": "string", "description": 'Date range for trend data. Available options: - "now 1-H" - Past hour - "now 4-H" - Past 4 hours - "now 1-d" - Past day - "now 7-d" - Past 7 days - "today 1-m" - Past 30 days - "today 3-m" - Past 90 days - "today 12-m" - Past 12 months - "today 5-y" - Past 5 years - "all - 2004" - present - You can also pass custom values: "yyyy-mm-dd yyyy-mm-dd"' }, "ts": { "type": "string", "description": "Timezone offset in minutes (-1439 to 1439, default: 420 for PDT)" } }, "required": ["text", "hl", "geo", "data_type", "cat", "date", "ts"] },
921
- outputSchema: { "type": "object", "properties": { "trends": { "type": "object", "description": "Google Trends data for the searched term" } }, "required": ["trends"] }
921
+ outputSchema: { "type": "object", "properties": { "trends": { "type": "object", "properties": {}, "required": [], "description": "Google Trends data for the searched term" } }, "required": ["trends"] }
922
922
  },
923
923
  "searchPerplexity": {
924
924
  stepType: "searchPerplexity",
@@ -939,20 +939,20 @@ var init_metadata = __esm({
939
939
  description: "Search for YouTube videos by keyword.",
940
940
  usageNotes: "- Supports pagination (up to 5 pages) and country/language filters.\n- Use the filter/filterType fields for YouTube search parameter (sp) filters.",
941
941
  inputSchema: { "type": "object", "properties": { "query": { "type": "string", "description": "Search query for YouTube videos" }, "limitPages": { "type": "string", "description": "Maximum number of pages to fetch (1-5)" }, "filter": { "type": "string", "description": "YouTube search parameter (sp) filter value" }, "filterType": { "type": "string", "description": "Filter type identifier" }, "countryCode": { "type": "string", "description": 'Google gl country code for regional results (default: "US")' }, "languageCode": { "type": "string", "description": 'Google hl language code for result language (default: "en")' } }, "required": ["query", "limitPages", "filter", "filterType"] },
942
- outputSchema: { "type": "object", "properties": { "results": { "type": "object", "description": "YouTube search results including video_results, channel_results, etc." } }, "required": ["results"] }
942
+ outputSchema: { "type": "object", "properties": { "results": { "type": "object", "properties": {}, "required": [], "description": "YouTube search results including video_results, channel_results, etc." } }, "required": ["results"] }
943
943
  },
944
944
  "searchYoutubeTrends": {
945
945
  stepType: "searchYoutubeTrends",
946
946
  description: "Retrieve trending videos on YouTube by category and region.",
947
947
  usageNotes: '- Categories: "now" (trending now), "music", "gaming", "films".\n- Supports country and language filtering.',
948
948
  inputSchema: { "type": "object", "properties": { "bp": { "enum": ["now", "music", "gaming", "films"], "type": "string", "description": 'Trending category: "now" (trending now), "music", "gaming", or "films"' }, "hl": { "type": "string", "description": 'Language code (e.g. "en")' }, "gl": { "type": "string", "description": 'Country code (e.g. "US")' } }, "required": ["bp", "hl", "gl"] },
949
- outputSchema: { "type": "object" }
949
+ outputSchema: { "type": "object", "properties": {}, "required": [] }
950
950
  },
951
951
  "sendEmail": {
952
952
  stepType: "sendEmail",
953
953
  description: "Send an email to one or more configured recipient addresses.",
954
954
  usageNotes: '- Recipient email addresses are resolved from OAuth connections configured by the app creator. The user running the workflow does not specify the recipient directly.\n- If the body is a URL to a hosted HTML file on the CDN, the HTML is fetched and used as the email body.\n- When generateHtml is enabled, the body text is converted to a styled HTML email using an AI model.\n- connectionId can be a comma-separated list to send to multiple recipients.\n- The special connectionId "trigger_email" uses the email address that triggered the workflow.',
955
- inputSchema: { "type": "object", "properties": { "subject": { "type": "string", "description": "Email subject line" }, "body": { "type": "string", "description": "Email body content (plain text, markdown, HTML, or a CDN URL to an HTML file)" }, "connectionId": { "type": "string", "description": "OAuth connection ID(s) for the recipient(s), comma-separated for multiple" }, "generateHtml": { "type": "boolean", "description": "When true, auto-convert the body text into a styled HTML email using AI" }, "generateHtmlInstructions": { "type": "string", "description": "Natural language instructions for the HTML generation style" }, "generateHtmlModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model settings override for HTML generation" }, "attachments": { "type": "array", "items": { "type": "string" }, "description": "URLs of files to attach to the email" } }, "required": ["subject", "body"] },
955
+ inputSchema: { "type": "object", "properties": { "subject": { "type": "string", "description": "Email subject line" }, "body": { "type": "string", "description": "Email body content (plain text, markdown, HTML, or a CDN URL to an HTML file)" }, "connectionId": { "type": "string", "description": "OAuth connection ID(s) for the recipient(s), comma-separated for multiple" }, "generateHtml": { "type": "boolean", "description": "When true, auto-convert the body text into a styled HTML email using AI" }, "generateHtmlInstructions": { "type": "string", "description": "Natural language instructions for the HTML generation style" }, "generateHtmlModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model settings override for HTML generation" }, "attachments": { "type": "array", "items": { "type": "string" }, "description": "URLs of files to attach to the email" } }, "required": ["subject", "body"] },
956
956
  outputSchema: { "type": "object", "properties": { "recipients": { "type": "array", "items": { "type": "string" }, "description": "Email addresses the message was sent to" } }, "required": ["recipients"] }
957
957
  },
958
958
  "sendGmailDraft": {
@@ -995,7 +995,7 @@ var init_metadata = __esm({
995
995
  description: "Explicitly set a variable to a given value.",
996
996
  usageNotes: "- Useful for bootstrapping global variables or setting constants.\n- The variable name and value both support variable interpolation.\n- The type field is a UI hint only (controls input widget in the editor).",
997
997
  inputSchema: { "type": "object", "properties": { "value": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }] } }, "required": ["value"], "description": "Configuration for the set variable step" },
998
- outputSchema: { "type": "object" }
998
+ outputSchema: { "type": "object", "properties": {}, "required": [] }
999
999
  },
1000
1000
  "telegramEditMessage": {
1001
1001
  stepType: "telegramEditMessage",
@@ -1057,14 +1057,14 @@ var init_metadata = __esm({
1057
1057
  stepType: "textToSpeech",
1058
1058
  description: "Generate an audio file from provided text using a speech model.",
1059
1059
  usageNotes: "- The text field contains the exact words to be spoken (not instructions).\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.",
1060
- inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The text to convert to speech" }, "intermediateAsset": { "type": "boolean" }, "speechModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Speech synthesis model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default speech model if not specified" } }, "required": ["text"] },
1060
+ inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The text to convert to speech" }, "intermediateAsset": { "type": "boolean" }, "speechModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Speech synthesis model identifier" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default speech model if not specified" } }, "required": ["text"] },
1061
1061
  outputSchema: { "type": "object", "properties": { "audioUrl": { "type": "string", "description": "URL of the generated audio file" } }, "required": ["audioUrl"] }
1062
1062
  },
1063
1063
  "transcribeAudio": {
1064
1064
  stepType: "transcribeAudio",
1065
1065
  description: "Convert an audio file to text using a transcription model.",
1066
1066
  usageNotes: "- The prompt field provides optional context to improve transcription accuracy (e.g. language, speaker names, domain).",
1067
- inputSchema: { "type": "object", "properties": { "audioUrl": { "type": "string", "description": "URL of the audio file to transcribe" }, "prompt": { "type": "string", "description": "Optional context to improve transcription accuracy (e.g. language, speaker names, domain terms)" }, "transcriptionModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Audio transcription model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default transcription model if not specified" } }, "required": ["audioUrl", "prompt"] },
1067
+ inputSchema: { "type": "object", "properties": { "audioUrl": { "type": "string", "description": "URL of the audio file to transcribe" }, "prompt": { "type": "string", "description": "Optional context to improve transcription accuracy (e.g. language, speaker names, domain terms)" }, "transcriptionModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Audio transcription model identifier" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default transcription model if not specified" } }, "required": ["audioUrl", "prompt"] },
1068
1068
  outputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The transcribed text from the audio file" } }, "required": ["text"] }
1069
1069
  },
1070
1070
  "trimMedia": {
@@ -1138,7 +1138,7 @@ var init_metadata = __esm({
1138
1138
  The method code itself is unchanged \u2014 streaming is transparent to the
1139
1139
  developer. See V2ExecutionService.ts and the invoke handler in V2Apps for
1140
1140
  the server-side plumbing.`,
1141
- inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The message to send (prompt for AI, or text for system echo)" }, "source": { "enum": ["user", "system"], "type": "string", "description": 'Message source: "user" sends to AI model, "system" echoes message content directly. Defaults to "user"' }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model configuration override. Optional; uses the workflow's default model if not specified" }, "structuredOutputType": { "enum": ["text", "json", "csv"], "type": "string", "description": "Output format constraint for structured responses" }, "structuredOutputExample": { "type": "string", "description": "Sample showing the desired output shape (for JSON/CSV formats). A TypeScript interface is also useful here for more complex types." }, "chatHistoryMode": { "enum": ["include", "exclude"], "type": "string", "description": "Whether to include or exclude prior chat history in the AI context" } }, "required": ["message"], "description": "Configuration for the user message step" },
1141
+ inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The message to send (prompt for AI, or text for system echo)" }, "source": { "enum": ["user", "system"], "type": "string", "description": 'Message source: "user" sends to AI model, "system" echoes message content directly. Defaults to "user"' }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "properties": {}, "required": [], "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model configuration override. Optional; uses the workflow's default model if not specified" }, "structuredOutputType": { "enum": ["text", "json", "csv"], "type": "string", "description": "Output format constraint for structured responses" }, "structuredOutputExample": { "type": "string", "description": "Sample showing the desired output shape (for JSON/CSV formats). A TypeScript interface is also useful here for more complex types." }, "chatHistoryMode": { "enum": ["include", "exclude"], "type": "string", "description": "Whether to include or exclude prior chat history in the AI context" } }, "required": ["message"], "description": "Configuration for the user message step" },
1142
1142
  outputSchema: { "type": "object", "properties": { "content": { "type": "string", "description": "The AI model's response or echoed system message content" } }, "required": ["content"] }
1143
1143
  },
1144
1144
  "videoFaceSwap": {
@@ -4450,36 +4450,15 @@ var init_tools = __esm({
4450
4450
  }
4451
4451
  });
4452
4452
 
4453
- // src/generated/llms-content.ts
4454
- var llms_content_exports = {};
4455
- __export(llms_content_exports, {
4456
- llmsContent: () => llmsContent
4457
- });
4458
- var llmsContent;
4459
- var init_llms_content = __esm({
4460
- "src/generated/llms-content.ts"() {
4453
+ // src/ask/prompt/identity.ts
4454
+ var identity;
4455
+ var init_identity = __esm({
4456
+ "src/ask/prompt/identity.ts"() {
4461
4457
  "use strict";
4462
- llmsContent = '# @mindstudio-ai/agent\n\nTypeScript SDK, CLI, and MCP server for MindStudio. One API key gives you access to 200+ AI models (OpenAI, Anthropic, Google, Meta, xAI, DeepSeek, etc.) and 1,000+ actions including 850+ connector actions across third-party services from the open-source MindStudio Connector Registry (https://github.com/mindstudio-ai/mscr). No separate provider API keys required.\n\nThis file is the complete API reference. No other documentation is needed to use the SDK.\n\n## Recommended workflow\n\nThere are 150+ actions available. Do NOT try to read or load them all at once. Follow this discovery flow:\n\n1. **Identify yourself** \u2014 Call `changeName` to set your display name (use your name or whatever your user calls you). If you have a profile picture or icon, call `uploadFile` to upload it, then `changeProfilePicture` with the returned URL. This helps users identify your requests in their logs.\n2. **Ask** \u2014 Use `mindstudio ask "your question"` (CLI) or the `ask` MCP tool for SDK guidance. It knows every action, model, and connector and returns working TypeScript code with real model IDs and config options. Examples: `mindstudio ask "generate an image with FLUX"`, `mindstudio ask "what models support vision?"`, `mindstudio ask "how do I send a Slack message?"`.\n3. **Browse** \u2014 For manual discovery, call `listActions` (MCP tool) or `mindstudio list-actions --summary` (CLI) to get a compact `{ action: description }` map of everything available (~3k tokens). Call `mindstudio info <action>` (CLI) for parameter details.\n4. **Call it** \u2014 Invoke the action with the required parameters. All actions share the same calling convention (see below).\n\nFor specific use cases:\n\n- **OAuth third-party integrations** (Slack, Google, HubSpot, etc.): These are optional OAuth connectors from the MindStudio Connector Registry \u2014 for most tasks, use actions directly instead. If you need a third-party integration: call `listConnectors()` to browse services \u2192 `getConnectorAction(serviceId, actionId)` for input fields \u2192 execute via `runFromConnectorRegistry`. Requires an OAuth connection set up in MindStudio first \u2014 call `listConnections()` to check available connections.\n- **Pre-built agents**: Call `listAgents()` to see what\'s available \u2192 `runAgent({ appId })` to execute one. **Important:** Not all agents are configured for API use. Do not try to run an agent just because it appears in the list \u2014 only run agents the user specifically asks you to run.\n- **Model selection**: Call `listModelsSummary()` or `listModelsSummaryByType("llm_chat")` to browse models, then pass the model ID as `modelOverride.model` to actions like `generateText`. Use the summary endpoints (not `listModels`) to keep token usage low.\n- **Cost estimation**: AI-powered actions (text generation, image generation, video, audio, etc.) cost money. Call `estimateStepCost(stepType, stepInput)` before running these and confirm with the user before proceeding \u2014 unless they\'ve explicitly given permission to go ahead. Non-AI actions (data lookups, OAuth connectors, etc.) are generally free.\n\n## Install\n\nStandalone binary (CLI/MCP, no dependencies):\n```bash\ncurl -fsSL https://msagent.ai/install.sh | bash\n```\n\nnpm (SDK + CLI):\n```bash\nnpm install @mindstudio-ai/agent\n```\n\nRequires Node.js >= 18.\n\n## CLI\n\nThe package includes a CLI for executing steps from the command line or scripts:\n\n```bash\n# Execute with named flags (kebab-case)\nmindstudio generate-image --prompt "A mountain landscape"\n\n# Execute with JSON input (JSON5-tolerant)\nmindstudio generate-image \'{prompt: "A mountain landscape"}\'\n\n# Extract a single output field\nmindstudio generate-image --prompt "A sunset" --output-key imageUrl\n\n# List all methods (compact JSON \u2014 best for LLM discovery)\nmindstudio list --summary\n\n# List all methods (human-readable table)\nmindstudio list\n\n# Show method details (params, types, output)\nmindstudio info generate-image\n\n# Run via npx without installing\nnpx @mindstudio-ai/agent generate-text --message "Hello"\n```\n\nAuth: run `mindstudio login`, set `MINDSTUDIO_API_KEY` env var, or pass `--api-key <key>`.\nMethod names are kebab-case on the CLI (camelCase also accepted). Flags are kebab-case (`--video-url` for `videoUrl`).\nUse `--output-key <key>` to extract a single field, `--no-meta` to strip $-prefixed metadata.\n\n### Authentication\n\n```bash\n# Interactive login (opens browser, saves key to ~/.mindstudio/config.json)\nmindstudio login\n\n# Check current auth status\nmindstudio whoami\n\n# Clear stored credentials\nmindstudio logout\n```\n\nAuth resolution order: `--api-key` flag > `MINDSTUDIO_API_KEY` env > `~/.mindstudio/config.json` > `CALLBACK_TOKEN` env.\n\n## MCP server\n\nThe package includes an MCP server exposing all methods as tools. Start by calling the `listSteps` tool to discover available methods.\n\n```bash\nmindstudio mcp\n```\n\nMCP client config (standalone binary \u2014 recommended):\n```json\n{\n "mcpServers": {\n "mindstudio": {\n "command": "mindstudio",\n "args": ["mcp"],\n "env": { "MINDSTUDIO_API_KEY": "your-api-key" }\n }\n }\n}\n```\n\n## Setup\n\n```typescript\nimport { MindStudioAgent } from \'@mindstudio-ai/agent\';\n\n// With API key (or set MINDSTUDIO_API_KEY env var)\nconst agent = new MindStudioAgent({ apiKey: \'your-key\' });\n```\n\nYour MindStudio API key authenticates all requests. MindStudio routes to the correct AI provider (OpenAI, Google, Anthropic, etc.) server-side \u2014 you do NOT need separate provider API keys.\n\nConstructor options:\n```typescript\nnew MindStudioAgent({\n apiKey?: string, // Auth token. Falls back to MINDSTUDIO_API_KEY env var.\n baseUrl?: string, // API base URL. Defaults to "https://v1.mindstudio-api.com".\n maxRetries?: number, // Retries on 429 rate limit (default: 3). Uses Retry-After header for delay.\n})\n```\n\n## Models\n\nDirect access to 200+ AI models from every major provider \u2014 all through a single API key, billed at cost with no markups.\n\nUse `listModels()` or `listModelsByType()` for full model details, or `listModelsSummary()` / `listModelsSummaryByType()` for a lightweight list (id, name, type, tags) suitable for LLM context windows. Pass a model ID to `modelOverride.model` in methods like `generateText` to select a specific model:\n\n```typescript\nconst { models } = await agent.listModelsByType(\'llm_chat\');\nconst model = models.find(m => m.name.includes("Gemini"));\n\nconst { content } = await agent.generateText({\n message: \'Hello\',\n modelOverride: {\n model: model.id,\n temperature: 0.7,\n maxResponseTokens: 1024,\n },\n});\n```\n\n## Calling convention\n\nEvery method has the signature:\n```typescript\nagent.methodName(input: InputType, options?: { appId?: string, threadId?: string }): Promise<OutputType & StepExecutionMeta>\n```\n\nThe first argument is the step-specific input object. The optional second argument controls thread/app context.\n\n**Results are returned flat** \u2014 output fields are spread at the top level alongside metadata:\n\n```typescript\nconst { content } = await agent.generateText({ message: \'Hello\' });\n\n// Full result shape for any method:\nconst result = await agent.generateText({ message: `Hello` });\nresult.content; // step-specific output field\nresult.$appId; // string \u2014 app ID for this execution\nresult.$threadId; // string \u2014 thread ID for this execution\nresult.$rateLimitRemaining; // number | undefined \u2014 API calls remaining in rate limit window\nresult.$billingCost; // number | undefined \u2014 cost in credits for this call\nresult.$billingEvents; // object[] | undefined \u2014 itemized billing events\n```\n\n## Thread persistence\n\nPass `$appId`/`$threadId` from a previous result to maintain conversation state, variable state, or other context across calls:\n\n```typescript\nconst r1 = await agent.generateText({ message: \'My name is Alice\' });\nconst r2 = await agent.generateText(\n { message: \'What is my name?\' },\n { threadId: r1.$threadId, appId: r1.$appId },\n);\n// r2.content => "Your name is Alice"\n```\n\n## Error handling\n\nAll errors throw `MindStudioError`:\n```typescript\nimport { MindStudioError } from \'@mindstudio-ai/agent\';\n\ntry {\n await agent.generateImage({ prompt: \'...\' });\n} catch (err) {\n if (err instanceof MindStudioError) {\n err.message; // Human-readable error message\n err.code; // Machine-readable code: "invalid_step_config", "api_error", "call_cap_exceeded", "output_fetch_error"\n err.status; // HTTP status code (400, 401, 429, etc.)\n err.details; // Raw error body from the API\n }\n}\n```\n\n429 rate limit errors are retried automatically (configurable via `maxRetries`).\n\n## Low-level access\n\nFor action types not covered by generated methods:\n```typescript\nconst result = await agent.executeStep(\'stepType\', { ...params });\n```\n\n## Batch execution\n\nExecute multiple steps in parallel in a single request. Maximum 50 steps per batch.\nIndividual step failures do not affect other steps \u2014 partial success is possible.\n\n```typescript\nconst result = await agent.executeStepBatch([\n { stepType: \'generateImage\', step: { prompt: \'a sunset\' } },\n { stepType: \'textToSpeech\', step: { text: \'hello world\' } },\n], { appId?, threadId? });\n\n// Result:\nresult.results; // BatchStepResult[] \u2014 same order as input\nresult.results[0].stepType; // string\nresult.results[0].output; // object | undefined (step output on success)\nresult.results[0].error; // string | undefined (error message on failure)\nresult.results[0].billingCost; // number | undefined (cost on success)\nresult.totalBillingCost; // number | undefined\nresult.appId; // string\nresult.threadId; // string\n```\n\nCLI:\n```bash\nmindstudio batch \'[{"stepType":"generateImage","step":{"prompt":"a cat"}}]\'\ncat steps.json | mindstudio batch\n```\n\n## Methods\n\nAll methods below are called on a `MindStudioAgent` instance (`agent.methodName(...)`).\nInput shows the first argument object. Output shows the fields available on the returned result.\n\n### General\n\n#### addSubtitlesToVideo\nAutomatically add subtitles to a video\n- Can control style of text and animation\n- Input: `{ videoUrl: string, language: string, fontName: string, fontSize: number, fontWeight: "normal" | "bold" | "black", fontColor: "white" | "black" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", highlightColor: "white" | "black" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", strokeWidth: number, strokeColor: "black" | "white" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", backgroundColor: "black" | "white" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta" | "none", backgroundOpacity: number, position: "top" | "center" | "bottom", yOffset: number, wordsPerSubtitle: number, enableAnimation: boolean, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### analyzeImage\nAnalyze an image using a vision model based on a text prompt.\n- Uses the configured vision model to generate a text analysis of the image.\n- The prompt should describe what to look for or extract from the image.\n- Input: `{ prompt: string, imageUrl: string, visionModelOverride?: { model: string, config?: object } | { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ analysis: string }`\n\n#### analyzeVideo\nAnalyze a video using a video analysis model based on a text prompt.\n- Uses the configured video analysis model to generate a text analysis of the video.\n- The prompt should describe what to look for or extract from the video.\n- Input: `{ prompt: string, videoUrl: string, videoAnalysisModelOverride?: { model: string, config?: object } | { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ analysis: string }`\n\n#### captureThumbnail\nCapture a thumbnail from a video at a specified timestamp\n- Input: `{ videoUrl: string, at: number | string }`\n- Output: `{ thumbnailUrl: string }`\n\n#### checkAppRole\nCheck whether the current user has a specific app role and branch accordingly.\n- Checks if the current user has been assigned a specific role in this app.\n- If the user has the role, transitions to the "has role" path.\n- If the user does not have the role, transitions to the "no role" path, or errors if no path is configured.\n- Role names are defined by the app creator and assigned to users via the app roles system.\n- The roleName field supports {{variables}} for dynamic role checks.\n- Input: `{ roleName: string, hasRoleStepId?: string, hasRoleWorkflowId?: string, noRoleStepId?: string, noRoleWorkflowId?: string }`\n- Output: `{ hasRole: boolean, userRoles: string[] }`\n\n#### convertPdfToImages\nConvert each page of a PDF document into a PNG image.\n- Each page is converted to a separate PNG and re-hosted on the CDN.\n- Returns an array of image URLs, one per page.\n- Input: `{ pdfUrl: string }`\n- Output: `{ imageUrls: string[] }`\n\n#### createDataSource\nCreate a new empty vector data source for the current app.\n- Creates a new data source (vector database) associated with the current app version.\n- The data source is created empty \u2014 use the "Upload Data Source Document" block to add documents.\n- Returns the new data source ID which can be used in subsequent blocks.\n- Input: `{ name: string }`\n- Output: `unknown`\n\n#### createGmailDraft\nCreate a draft email in the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- The draft appears in the user\'s Gmail Drafts folder but is not sent.\n- messageType controls the body format: "plain" for plain text, "html" for raw HTML, "markdown" for auto-converted markdown.\n- Input: `{ to: string, subject: string, message: string, connectionId?: string, messageType: "plain" | "html" | "markdown" }`\n- Output: `{ draftId: string }`\n\n#### deleteDataSource\nDelete a vector data source from the current app.\n- Soft-deletes a data source (vector database) by marking it as deleted.\n- The Milvus partition is cleaned up asynchronously by a background cron job.\n- The data source must belong to the current app version.\n- Input: `{ dataSourceId: string }`\n- Output: `unknown`\n\n#### deleteDataSourceDocument\nDelete a single document from a data source.\n- Soft-deletes a document by marking it as deleted.\n- Requires both the data source ID and document ID.\n- After deletion, reloads vectors into Milvus so the data source reflects the change immediately.\n- Input: `{ dataSourceId: string, documentId: string }`\n- Output: `unknown`\n\n#### detectChanges\nDetect changes between runs by comparing current input against previously stored state. Routes execution based on whether a change occurred.\n- Persists state across runs using a global variable keyed to the step ID.\n- Two modes: "comparison" (default) uses strict string inequality; "ai" uses an LLM to determine if a meaningful change occurred.\n- First run always treats the value as "changed" since there is no previous state.\n- Each mode supports transitions to different steps/workflows for the "changed" and "unchanged" paths.\n- AI mode bills normally for the LLM call.\n- Input: `{ mode: "ai" | "comparison", input: string, prompt?: string, modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, previousValueVariable?: string, changedStepId?: string, changedWorkflowId?: string, unchangedStepId?: string, unchangedWorkflowId?: string }`\n- Output: `{ hasChanged: boolean, currentValue: string, previousValue: string, isFirstRun: boolean }`\n\n#### detectPII\nScan text for personally identifiable information using Microsoft Presidio.\n- In workflow mode, transitions to detectedStepId if PII is found, notDetectedStepId otherwise.\n- In direct execution, returns the detection results without transitioning.\n- If entities is empty, returns immediately with no detections.\n- Input: `{ input: string, language: string, entities: string[], detectedStepId?: string, notDetectedStepId?: string, outputLogVariable?: string | null }`\n- Output: `{ detected: boolean, detections: { entity_type: string, start: number, end: number, score: number }[] }`\n\n#### discordEditMessage\nEdit a previously sent Discord channel message. Use with the message ID returned by Send Discord Message.\n- Only messages sent by the bot can be edited.\n- The messageId is returned by the Send Discord Message step.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- When editing with an attachment, the new attachment replaces any previous attachments on the message.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Input: `{ botToken: string, channelId: string, messageId: string, text: string, attachmentUrl?: string }`\n- Output: `unknown`\n\n#### discordSendFollowUp\nSend a follow-up message to a Discord slash command interaction.\n- Requires the applicationId and interactionToken from the Discord trigger variables.\n- Follow-up messages appear as new messages in the channel after the initial response.\n- Returns the sent message ID.\n- Interaction tokens expire after 15 minutes.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Input: `{ applicationId: string, interactionToken: string, text: string, attachmentUrl?: string }`\n- Output: `{ messageId: string }`\n\n#### discordSendMessage\nSend a message to Discord \u2014 either edit the loading message or send a new channel message.\n- mode "edit" replaces the loading message (interaction response) with the final result. Uses applicationId and interactionToken from trigger variables. No bot permissions required.\n- mode "send" sends a new message to a channel. Uses botToken and channelId from trigger variables. Returns a messageId that can be used with Edit Discord Message.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Interaction tokens expire after 15 minutes.\n- Input: `{ mode: "edit" | "send", text: string, applicationId?: string, interactionToken?: string, botToken?: string, channelId?: string, attachmentUrl?: string }`\n- Output: `{ messageId?: string }`\n\n#### downloadVideo\nDownload a video file\n- Works with YouTube, TikTok, etc., by using ytdlp behind the scenes\n- Can save as mp4 or mp3\n- Input: `{ videoUrl: string, format: "mp4" | "mp3" }`\n- Output: `{ videoUrl: string }`\n\n#### enhanceImageGenerationPrompt\nGenerate or enhance an image generation prompt using a language model. Optionally generates a negative prompt.\n- Rewrites the user\'s prompt with added detail about style, lighting, colors, and composition.\n- Also useful for initial generation, it doesn\'t always need to be enhancing an existing prompt\n- When includeNegativePrompt is true, a second model call generates a negative prompt.\n- Input: `{ initialPrompt: string, includeNegativePrompt: boolean, negativePromptDestinationVariableName?: string, systemPrompt: string, modelOverride?: unknown }`\n- Output: `{ prompt: string, negativePrompt?: string }`\n\n#### enhanceVideoGenerationPrompt\nGenerate or enhance a video generation prompt using a language model. Optionally generates a negative prompt.\n- Rewrites the user\'s prompt with added detail about style, camera movement, lighting, and composition.\n- Also useful for initial generation, it doesn\'t always need to be enhancing an existing prompt\n- When includeNegativePrompt is true, a second model call generates a negative prompt.\n- Input: `{ initialPrompt: string, includeNegativePrompt: boolean, negativePromptDestinationVariableName?: string, systemPrompt: string, modelOverride?: unknown }`\n- Output: `{ prompt: string, negativePrompt?: string }`\n\n#### extractAudioFromVideo\nExtract audio MP3 from a video file\n- Input: `{ videoUrl: string }`\n- Output: `{ audioUrl: string }`\n\n#### extractText\nDownload a file from a URL and extract its text content. Supports PDFs, plain text files, and other document formats.\n- Best suited for PDFs and raw text/document files. For web pages, use the scrapeUrl step instead.\n- Accepts a single URL, a comma-separated list of URLs, or a JSON array of URLs.\n- Files are rehosted on the MindStudio CDN before extraction.\n- Maximum file size is 50MB per URL.\n- Input: `{ url: string | string[] }`\n- Output: `{ text: string | string[] }`\n\n#### fetchDataSourceDocument\nFetch the full extracted text contents of a document in a data source.\n- Loads a document by ID and returns its full extracted text content.\n- The document must have been successfully processed (status "done").\n- Also returns document metadata (name, summary, word count).\n- Input: `{ dataSourceId: string, documentId: string }`\n- Output: `unknown`\n\n#### fetchSlackChannelHistory\nFetch recent message history from a Slack channel.\n- The user is responsible for connecting their Slack workspace and selecting the channel\n- Input: `{ connectionId?: string, channelId: string, limit?: number, startDate?: string, endDate?: string, includeImages?: boolean, includeRawMessage?: boolean }`\n- Output: `{ messages: { from: string, content: string, timestamp?: string, images?: string[], rawMessage?: { app_id?: string, assistant_app_thread?: { first_user_thread_reply?: string, title?: string, title_blocks?: unknown[] }, attachments?: { actions?: unknown[], app_id?: string, app_unfurl_url?: string, author_icon?: string, author_id?: string, author_link?: string, author_name?: string, author_subname?: string, blocks?: unknown[], bot_id?: string, bot_team_id?: string, callback_id?: string, channel_id?: string, channel_name?: string, channel_team?: string, color?: string, fallback?: string, fields?: unknown[], file_id?: string, filename?: string, files?: unknown[], footer?: string, footer_icon?: string, from_url?: string, hide_border?: boolean, hide_color?: boolean, id?: number, image_bytes?: number, image_height?: number, image_url?: string, image_width?: number, indent?: boolean, is_app_unfurl?: boolean, is_file_attachment?: boolean, is_msg_unfurl?: boolean, is_reply_unfurl?: boolean, is_thread_root_unfurl?: boolean, list?: unknown, list_record?: unknown, list_record_id?: string, list_records?: unknown[], list_schema?: unknown[], list_view?: unknown, list_view_id?: string, message_blocks?: unknown[], metadata?: unknown, mimetype?: string, mrkdwn_in?: string[], msg_subtype?: string, original_url?: string, pretext?: string, preview?: unknown, service_icon?: string, service_name?: string, service_url?: string, size?: number, text?: string, thumb_height?: number, thumb_url?: string, thumb_width?: number, title?: string, title_link?: string, ts?: string, url?: string, video_html?: string, video_html_height?: number, video_html_width?: number, video_url?: string }[], blocks?: { accessory?: unknown, alt_text?: string, api_decoration_available?: boolean, app_collaborators?: string[], app_id?: string, author_name?: string, block_id?: string, bot_user_id?: string, button_label?: string, call?: unknown, call_id?: string, description?: unknown, developer_trace_id?: string, dispatch_action?: boolean, element?: unknown, elements?: unknown[], expand?: boolean, external_id?: string, fallback?: string, fields?: unknown[], file?: unknown, file_id?: string, function_trigger_id?: string, hint?: unknown, image_bytes?: number, image_height?: number, image_url?: string, image_width?: number, is_animated?: boolean, is_workflow_app?: boolean, label?: unknown, optional?: boolean, owning_team_id?: string, provider_icon_url?: string, provider_name?: string, sales_home_workflow_app_type?: number, share_url?: string, slack_file?: unknown, source?: string, text?: unknown, thumbnail_url?: string, title?: unknown, title_url?: string, trigger_subtype?: string, trigger_type?: string, type?: unknown, url?: string, video_url?: string, workflow_id?: string }[], bot_id?: string, bot_profile?: { app_id?: string, deleted?: boolean, icons?: unknown, id?: string, name?: string, team_id?: string, updated?: number }, client_msg_id?: string, display_as_bot?: boolean, edited?: { ts?: string, user?: string }, files?: { access?: string, alt_txt?: string, app_id?: string, app_name?: string, attachments?: unknown[], blocks?: unknown[], bot_id?: string, can_toggle_canvas_lock?: boolean, canvas_printing_enabled?: boolean, canvas_template_mode?: string, cc?: unknown[], channel_actions_count?: number, channel_actions_ts?: string, channels?: string[], comments_count?: number, converted_pdf?: string, created?: number, deanimate?: string, deanimate_gif?: string, display_as_bot?: boolean, dm_mpdm_users_with_file_access?: unknown[], duration_ms?: number, edit_link?: string, edit_timestamp?: number, editable?: boolean, editor?: string, editors?: string[], external_id?: string, external_type?: string, external_url?: string, favorites?: unknown[], file_access?: string, filetype?: string, from?: unknown[], groups?: string[], has_more?: boolean, has_more_shares?: boolean, has_rich_preview?: boolean, headers?: unknown, hls?: string, hls_embed?: string, id?: string, image_exif_rotation?: number, ims?: string[], initial_comment?: unknown, is_channel_space?: boolean, is_external?: boolean, is_public?: boolean, is_restricted_sharing_enabled?: boolean, is_starred?: boolean, last_editor?: string, last_read?: number, lines?: number, lines_more?: number, linked_channel_id?: string, list_csv_download_url?: string, list_limits?: unknown, list_metadata?: unknown, media_display_type?: string, media_progress?: unknown, mimetype?: string, mode?: string, mp4?: string, mp4_low?: string, name?: string, non_owner_editable?: boolean, num_stars?: number, org_or_workspace_access?: string, original_attachment_count?: number, original_h?: string, original_w?: string, permalink?: string, permalink_public?: string, pinned_to?: string[], pjpeg?: string, plain_text?: string, pretty_type?: string, preview?: string, preview_highlight?: string, preview_is_truncated?: boolean, preview_plain_text?: string, private_channels_with_file_access_count?: number, private_file_with_access_count?: number, public_url_shared?: boolean, quip_thread_id?: string, reactions?: unknown[], saved?: unknown, sent_to_self?: boolean, shares?: unknown, show_badge?: boolean, simplified_html?: string, size?: number, source_team?: string, subject?: string, subtype?: string, team_pref_version_history_enabled?: boolean, teams_shared_with?: unknown[], template_conversion_ts?: number, template_description?: string, template_icon?: string, template_name?: string, template_title?: string, thumb_1024?: string, thumb_1024_gif?: string, thumb_1024_h?: string, thumb_1024_w?: string, thumb_160?: string, thumb_160_gif?: string, thumb_160_h?: string, thumb_160_w?: string, thumb_360?: string, thumb_360_gif?: string, thumb_360_h?: string, thumb_360_w?: string, thumb_480?: string, thumb_480_gif?: string, thumb_480_h?: string, thumb_480_w?: string, thumb_64?: string, thumb_64_gif?: string, thumb_64_h?: string, thumb_64_w?: string, thumb_720?: string, thumb_720_gif?: string, thumb_720_h?: string, thumb_720_w?: string, thumb_80?: string, thumb_800?: string, thumb_800_gif?: string, thumb_800_h?: string, thumb_800_w?: string, thumb_80_gif?: string, thumb_80_h?: string, thumb_80_w?: string, thumb_960?: string, thumb_960_gif?: string, thumb_960_h?: string, thumb_960_w?: string, thumb_gif?: string, thumb_pdf?: string, thumb_pdf_h?: string, thumb_pdf_w?: string, thumb_tiny?: string, thumb_video?: string, thumb_video_h?: number, thumb_video_w?: number, timestamp?: number, title?: string, title_blocks?: unknown[], to?: unknown[], transcription?: unknown, update_notification?: number, updated?: number, url_private?: string, url_private_download?: string, url_static_preview?: string, user?: string, user_team?: string, username?: string, vtt?: string }[], icons?: { emoji?: string, image_36?: string, image_48?: string, image_64?: string, image_72?: string }, inviter?: string, is_locked?: boolean, latest_reply?: string, metadata?: { event_payload?: unknown, event_type?: string }, parent_user_id?: string, purpose?: string, reactions?: { count?: number, name?: string, url?: string, users?: string[] }[], reply_count?: number, reply_users?: string[], reply_users_count?: number, root?: { bot_id?: string, icons?: unknown, latest_reply?: string, parent_user_id?: string, reply_count?: number, reply_users?: string[], reply_users_count?: number, subscribed?: boolean, subtype?: string, text?: string, thread_ts?: string, ts?: string, type?: string, username?: string }, subscribed?: boolean, subtype?: string, team?: string, text?: string, thread_ts?: string, topic?: string, ts?: string, type?: string, upload?: boolean, user?: string, username?: string, x_files?: string[] } }[] }`\n\n#### generateAsset\nGenerate an HTML asset and export it as a webpage, PDF, or image\n- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)\n- Input: `{ source: string, sourceType: "html" | "markdown" | "spa" | "raw" | "dynamic" | "customInterface", outputFormat: "pdf" | "png" | "html" | "mp4" | "openGraph", pageSize: "full" | "letter" | "A4" | "custom", testData: object, options?: { pageWidthPx?: number, pageHeightPx?: number, pageOrientation?: "portrait" | "landscape", rehostMedia?: boolean, videoDurationSeconds?: number }, spaSource?: { source?: string, lastCompiledSource?: string, files?: object, paths: string[], root: string, zipUrl: string }, rawSource?: string, dynamicPrompt?: string, dynamicSourceModelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, transitionControl?: "default" | "native", shareControl?: "default" | "hidden", shareImageUrl?: string, intermediateAsset?: boolean }`\n- Output: `{ url: string }`\n\n#### generateChart\nCreate a chart image using QuickChart (Chart.js) and return the URL.\n- The data field must be a Chart.js-compatible JSON object serialized as a string.\n- Supported chart types: bar, line, pie.\n- Input: `{ chart: { chartType: "bar" | "line" | "pie", data: string, options: { width: string, height: string } } }`\n- Output: `{ chartUrl: string }`\n\n#### generateImage\nGenerate an image from a text prompt using an AI model.\n- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Images are automatically hosted on a CDN.\n- In foreground mode, the image is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple images are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.\n- Input: `{ prompt: string, intermediateAsset?: boolean, imageModelOverride?: { model: string, config?: object }, generateVariants?: boolean, numVariants?: number, addWatermark?: boolean }`\n- Output: `{ imageUrl: string | string[] }`\n\n#### generateLipsync\nGenerate a lip sync video from provided audio and image.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ intermediateAsset?: boolean, addWatermark?: boolean, lipsyncModelOverride?: { model: string, config?: object } }`\n- Output: `unknown`\n\n#### generateMusic\nGenerate an audio file from provided instructions (text) using a music model.\n- The text field contains the instructions (prompt) for the music generation.\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ text: string, intermediateAsset?: boolean, musicModelOverride?: { model: string, config?: object } }`\n- Output: `unknown`\n\n#### generateStaticVideoFromImage\nConvert a static image to an MP4\n- Can use to create slides/intertitles/slates for video composition\n- Input: `{ imageUrl: string, duration: string }`\n- Output: `{ videoUrl: string }`\n\n#### generateText\nSend a message to an AI model and return the response, or echo a system message.\n- Source "user" sends the message to an LLM and returns the model\'s response.\n- Source "system" echoes the message content directly (no AI call).\n- Mode "background" saves the result to a variable. Mode "foreground" streams it to the user (not available in direct execution).\n- Structured output (JSON/CSV) can be enforced via structuredOutputType and structuredOutputExample.\n- When executed inside a v2 app method (managed sandbox or local dev tunnel),\nLLM token output can be streamed to the frontend in real time via an SSE\nside-channel. The frontend opts in by passing { stream: true } to the method\ninvocation via @mindstudio-ai/interface. Tokens are published to Redis\npub/sub as they arrive and forwarded as SSE events on the invoke response.\nThe method code itself is unchanged \u2014 streaming is transparent to the\ndeveloper. See V2ExecutionService.ts and the invoke handler in V2Apps for\nthe server-side plumbing.\n- Input: `{ message: string, source?: "user" | "system", modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, structuredOutputType?: "text" | "json" | "csv", structuredOutputExample?: string, chatHistoryMode?: "include" | "exclude" }`\n- Output: `{ content: string }`\n\n#### generateVideo\nGenerate a video from a text prompt using an AI model.\n- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Videos are automatically hosted on a CDN.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple videos are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.\n- Input: `{ prompt: string, intermediateAsset?: boolean, videoModelOverride?: { model: string, config?: object }, generateVariants?: boolean, numVariants?: number, addWatermark?: boolean }`\n- Output: `{ videoUrl: string | string[] }`\n\n#### getGmailAttachments\nDownload attachments from a Gmail email and re-host them on CDN.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Attachments are uploaded to CDN and returned as URLs.\n- Attachments larger than 25MB are skipped.\n- Use the message ID from Search Gmail Emails, List Recent Gmail Emails, or Get Gmail Email steps.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### getGmailUnreadCount\nGet the number of unread emails in the connected Gmail inbox.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the unread message count for the inbox label.\n- This is a lightweight call that does not fetch any email content.\n- Input: `{ connectionId?: string }`\n- Output: `unknown`\n\n#### getMediaMetadata\nGet info about a media file\n- Input: `{ mediaUrl: string }`\n- Output: `{ metadata: string }`\n\n#### httpRequest\nMake an HTTP request to an external endpoint and return the response.\n- Supports GET, POST, PATCH, DELETE, and PUT methods.\n- Body can be raw JSON/text, URL-encoded form data, or multipart form data.\n- Input: `{ url: string, method: string, headers: object, queryParams: object, body: string, bodyItems: object, contentType: "none" | "application/json" | "application/x-www-form-urlencoded" | "multipart/form-data" | "custom", customContentType: string, testData?: object }`\n- Output: `{ ok: boolean, status: number, statusText: string, response: string }`\n\n#### imageFaceSwap\nReplace a face in an image with a face from another image using AI.\n- Requires both a target image and a face source image.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, faceImageUrl: string, engine: string }`\n- Output: `{ imageUrl: string }`\n\n#### imageRemoveWatermark\nRemove watermarks from an image using AI.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ imageUrl: string }`\n\n#### insertVideoClips\nInsert b-roll clips into a base video at a timecode, optionally with an xfade transition.\n- Input: `{ baseVideoUrl: string, overlayVideos: { videoUrl: string, startTimeSec: number }[], transition?: string, transitionDuration?: number, useOverlayAudio?: boolean, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### listDataSources\nList all data sources for the current app.\n- Returns metadata for every data source associated with the current app version.\n- Each entry includes the data source ID, name, description, status, and document list.\n- Input: `object`\n- Output: `unknown`\n\n#### listGmailLabels\nList all labels in the connected Gmail account. Use these label IDs or names with the Update Gmail Labels step.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns both system labels (INBOX, SENT, TRASH, etc.) and user-created labels.\n- Label type is "system" for built-in labels or "user" for custom labels.\n- Input: `{ connectionId?: string }`\n- Output: `unknown`\n\n#### listRecentGmailEmails\nList recent emails from the connected Gmail inbox.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns up to 100 emails (default 5), ordered by most recent first.\n- Functionally equivalent to Search Gmail Emails with an "in:inbox" query.\n- Input: `{ connectionId?: string, exportType: "json" | "text", limit: string }`\n- Output: `unknown`\n\n#### logic\nRoute execution to different branches based on AI evaluation, comparison operators, or workflow jumps.\n- Supports two modes: "ai" (default) uses an AI model to pick the most accurate statement; "comparison" uses operator-based checks.\n- In AI mode, the model picks the most accurate statement from the list. All possible cases must be specified.\n- In comparison mode, the context is the left operand and each case\'s condition is the right operand. First matching case wins. Use operator "default" as a fallback.\n- Requires at least two cases.\n- Each case can transition to a step in the current workflow (destinationStepId) or jump to another workflow (destinationWorkflowId).\n- Input: `{ mode?: "ai" | "comparison", context: string, cases: ({ id: string, condition: string, operator?: "eq" | "neq" | "gt" | "lt" | "gte" | "lte" | "exists" | "not_exists" | "contains" | "not_contains" | "default", destinationStepId?: string, destinationWorkflowId?: string } | string)[], modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ selectedCase: number }`\n\n#### makeDotComRunScenario\nTrigger a Make.com (formerly Integromat) scenario via webhook and return the response.\n- The webhook URL must be configured in your Make.com scenario.\n- Input key-value pairs are sent as JSON in the POST body.\n- Response format depends on the Make.com scenario configuration.\n- Input: `{ webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### mergeAudio\nMerge one or more clips into a single audio file.\n- Input: `{ mp3Urls: string[], fileMetadata?: object, albumArtUrl?: string, intermediateAsset?: boolean }`\n- Output: `{ audioUrl: string }`\n\n#### mergeVideos\nMerge one or more clips into a single video.\n- Input: `{ videoUrls: string[], transition?: string, transitionDuration?: number, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### mixAudioIntoVideo\nMix an audio track into a video\n- Input: `{ videoUrl: string, audioUrl: string, options: { keepVideoAudio?: boolean, audioGainDb?: number, videoGainDb?: number, loopAudio?: boolean }, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### muteVideo\nMute a video file\n- Input: `{ videoUrl: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### n8nRunNode\nTrigger an n8n workflow node via webhook and return the response.\n- The webhook URL must be configured in your n8n workflow.\n- Supports GET and POST methods with optional Basic authentication.\n- For GET requests, input values are sent as query parameters. For POST, they are sent as JSON body.\n- Input: `{ method: string, authentication: "none" | "basic" | "string", user: string, password: string, webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### postToSlackChannel\nSend a message to a Slack channel via a connected bot.\n- The user is responsible for connecting their Slack workspace and selecting the channel\n- Supports both simple text messages and slack blocks messages\n- Text messages can use limited markdown (slack-only fomatting\u2014e.g., headers are just rendered as bold)\n- Input: `{ channelId: string, messageType: "string" | "blocks", message: string, connectionId?: string }`\n- Output: `unknown`\n\n#### postToZapier\nSend data to a Zapier Zap via webhook and return the response.\n- The webhook URL must be configured in the Zapier Zap settings\n- Input keys and values are sent as the JSON body of the POST request\n- The webhook response (JSON or plain text) is returned as the output\n- Input: `{ webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### queryAppDatabase\nExecute a SQL query against the app managed database.\n- Executes raw SQL against a SQLite database managed by the app.\n- For SELECT queries, returns rows as JSON.\n- For INSERT/UPDATE/DELETE, returns the number of affected rows.\n- Use {{variables}} directly in your SQL. By default they are automatically extracted\nand passed as safe parameterized values (preventing SQL injection).\nExample: INSERT INTO contacts (name, comment) VALUES ({{name}}, {{comment}})\n- Full MindStudio handlebars syntax is supported, including helpers like {{json myVar}},\n{{get myVar "$.path"}}, {{global.orgName}}, etc.\n- Set parameterize to false for raw/dynamic SQL where variables are interpolated directly\ninto the query string. Use this when another step generates full or partial SQL, e.g.\na bulk INSERT with a precomputed VALUES list. The user is responsible for sanitization\nwhen parameterize is false.\n- Input: `{ databaseId: string, sql: string, parameterize?: boolean }`\n- Output: `{ rows: unknown[], changes: number }`\n\n#### queryDataSource\nSearch a vector data source (RAG) and return relevant document chunks.\n- Queries a vectorized data source and returns the most relevant chunks.\n- Useful for retrieval-augmented generation (RAG) workflows.\n- Input: `{ dataSourceId: string, query: string, maxResults: number }`\n- Output: `{ text: string, chunks: string[], query: string, citations: unknown[], latencyMs: number }`\n\n#### queryExternalDatabase\nExecute a SQL query against an external database connected to the workspace.\n- Requires a database connection configured in the workspace.\n- Supports PostgreSQL (including Supabase), MySQL, and MSSQL.\n- Results can be returned as JSON or CSV.\n- Input: `{ connectionId?: string, query: string, outputFormat: "json" | "csv" }`\n- Output: `{ data: unknown }`\n\n#### redactPII\nReplace personally identifiable information in text with placeholders using Microsoft Presidio.\n- PII is replaced with entity type placeholders (e.g. "Call me at <PHONE_NUMBER>").\n- If entities is empty, returns empty text immediately without processing.\n- Input: `{ input: string, language: string, entities: string[] }`\n- Output: `{ text: string }`\n\n#### removeBackgroundFromImage\nRemove the background from an image using AI, producing a transparent PNG.\n- Uses the Bria background removal model via fal.ai.\n- Output is re-hosted on the CDN as a PNG with transparency.\n- Input: `{ imageUrl: string }`\n- Output: `{ imageUrl: string }`\n\n#### resizeVideo\nResize a video file\n- Input: `{ videoUrl: string, mode: "fit" | "exact", maxWidth?: number, maxHeight?: number, width?: number, height?: number, strategy?: "pad" | "crop", intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### runFromConnectorRegistry\nRun a raw API connector to a third-party service\n- Use the /developer/v2/helpers/connectors endpoint to list available services and actions.\n- Use /developer/v2/helpers/connectors/{serviceId}/{actionId} to get the full input configuration for an action.\n- Use /developer/v2/helpers/connections to list your available OAuth connections.\n- The actionId format is "serviceId/actionId" (e.g., "slack/send-message").\n- Pass a __connectionId to authenticate the request with a specific OAuth connection, otherwise the default will be used (if configured).\n- Input: `{ actionId: string, displayName: string, icon: string, configurationValues: object, __connectionId?: string }`\n- Output: `{ data: object }`\n\n#### runPackagedWorkflow\nRun a packaged workflow ("custom block")\n- From the user\'s perspective, packaged workflows are just ordinary blocks. Behind the scenes, they operate like packages/libraries in a programming language, letting the user execute custom functionality.\n- Some of these packaged workflows are available as part of MindStudio\'s "Standard Library" and available to every user.\n- Available packaged workflows are documented here as individual blocks, but the runPackagedWorkflow block is how they need to be wrapped in order to be executed correctly.\n- Input: `{ appId: string, workflowId: string, inputVariables: object, outputVariables: object, name: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeLinkedInCompany\nScrape public company data from a LinkedIn company page.\n- Requires a LinkedIn company URL (e.g. https://www.linkedin.com/company/mindstudioai).\n- Returns structured company data including description, employees, updates, and similar companies.\n- Input: `{ url: string }`\n- Output: `{ company: unknown }`\n\n#### scrapeLinkedInProfile\nScrape public profile data from a LinkedIn profile page.\n- Requires a LinkedIn profile URL (e.g. https://www.linkedin.com/in/username).\n- Returns structured profile data including experience, education, articles, and activities.\n- Input: `{ url: string }`\n- Output: `{ profile: unknown }`\n\n#### scrapeUrl\nExtract text, HTML, or structured content from one or more web pages.\n- Accepts a single URL or multiple URLs (as a JSON array, comma-separated, or newline-separated).\n- Output format controls the result shape: "text" returns markdown, "html" returns raw HTML, "json" returns structured scraper data.\n- Can optionally capture a screenshot of each page.\n- Input: `{ url: string, service?: "default" | "firecrawl", autoEnhance?: boolean, pageOptions?: { onlyMainContent: boolean, screenshot: boolean, waitFor: number, replaceAllPathsWithAbsolutePaths: boolean, headers: object, removeTags: string[], mobile: boolean } }`\n- Output: `{ content: string | string[] | { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } | { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } }[], screenshot?: string }`\n\n#### scrapeXPost\nScrape data from a single X (Twitter) post by URL.\n- Returns structured post data (text, html, optional json/screenshot/metadata).\n- Optionally saves the text content to a variable.\n- Input: `{ url: string }`\n- Output: `{ post: { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } }`\n\n#### scrapeXProfile\nScrape public profile data from an X (Twitter) account by URL.\n- Returns structured profile data.\n- Optionally saves the result to a variable.\n- Input: `{ url: string }`\n- Output: `{ profile: { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } }`\n\n#### screenshotUrl\nCapture a screenshot of a web page as a PNG image.\n- Takes a viewport or full-page screenshot of the given URL.\n- Returns a CDN-hosted PNG image URL.\n- Viewport mode captures only the visible area; fullPage captures the entire scrollable page.\n- You can customize viewport width/height, add a delay, or wait for a CSS selector before capturing.\n- Input: `{ url: string, mode?: "viewport" | "fullPage", width?: number, height?: number, delay?: number, waitFor?: string }`\n- Output: `{ screenshotUrl: string }`\n\n#### searchGmailEmails\nSearch for emails in the connected Gmail account using a Gmail search query. To list recent inbox emails, pass an empty query string.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Uses Gmail search syntax (e.g. "from:user@example.com", "subject:invoice", "is:unread").\n- To list recent inbox emails, use an empty query string or "in:inbox".\n- Returns up to 100 emails (default 5). The variable receives text or JSON depending on exportType.\n- The direct execution output always returns structured email objects.\n- Input: `{ query: string, connectionId?: string, exportType: "json" | "text", limit: string }`\n- Output: `{ emails: { id: string, subject: string, from: string, to: string, date: string, plainBody: string, htmlBody: string, labels: string }[] }`\n\n#### searchGoogle\nSearch the web using Google and return structured results.\n- Defaults to us/english, but can optionally specify country and/or language.\n- Defaults to any time, but can optionally specify last hour, last day, week, month, or year.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, languageCode?: string, dateRange?: "hour" | "day" | "week" | "month" | "year" | "any", numResults?: number }`\n- Output: `{ results: { title: string, description: string, url: string }[] }`\n\n#### searchGoogleImages\nSearch Google Images and return image results with URLs and metadata.\n- Defaults to us/english, but can optionally specify country and/or language.\n- Defaults to any time, but can optionally specify last hour, last day, week, month, or year.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, languageCode?: string, dateRange?: "hour" | "day" | "week" | "month" | "year" | "any", numResults?: number }`\n- Output: `{ images: { title: string, imageUrl: string, imageWidth: number, imageHeight: number, thumbnailUrl: string, thumbnailWidth: number, thumbnailHeight: number, source: string, domain: string, link: string, googleUrl: string, position: number }[] }`\n\n#### searchGoogleNews\nSearch Google News for recent news articles matching a query.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ text: string, exportType: "text" | "json", numResults?: number }`\n- Output: `{ articles: { title: string, link: string, date: string, source: { name: string }, snippet?: string }[] }`\n\n#### searchGoogleTrends\nFetch Google Trends data for a search term.\n- date accepts shorthand ("now 1-H", "today 1-m", "today 5-y", etc.) or custom "yyyy-mm-dd yyyy-mm-dd" ranges.\n- data_type controls the shape of returned data: TIMESERIES, GEO_MAP, GEO_MAP_0, RELATED_TOPICS, or RELATED_QUERIES.\n- Input: `{ text: string, hl: string, geo: string, data_type: "TIMESERIES" | "GEO_MAP" | "GEO_MAP_0" | "RELATED_TOPICS" | "RELATED_QUERIES", cat: string, date: string, ts: string }`\n- Output: `{ trends: object }`\n\n#### searchPerplexity\nSearch the web using the Perplexity API and return structured results.\n- Defaults to US results. Use countryCode (ISO code) to filter by country.\n- Returns 10 results by default, configurable from 1 to 20.\n- The variable receives text or JSON depending on exportType. The direct execution output always returns structured results.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, numResults?: number }`\n- Output: `{ results: { title: string, description: string, url: string }[] }`\n\n#### sendEmail\nSend an email to one or more configured recipient addresses.\n- Recipient email addresses are resolved from OAuth connections configured by the app creator. The user running the workflow does not specify the recipient directly.\n- If the body is a URL to a hosted HTML file on the CDN, the HTML is fetched and used as the email body.\n- When generateHtml is enabled, the body text is converted to a styled HTML email using an AI model.\n- connectionId can be a comma-separated list to send to multiple recipients.\n- The special connectionId "trigger_email" uses the email address that triggered the workflow.\n- Input: `{ subject: string, body: string, connectionId?: string, generateHtml?: boolean, generateHtmlInstructions?: string, generateHtmlModelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, attachments?: string[] }`\n- Output: `{ recipients: string[] }`\n\n#### sendGmailDraft\nSend an existing draft from the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- The draft is sent and removed from the Drafts folder.\n- Use the draft ID returned by the Create Gmail Draft or List Gmail Drafts steps.\n- Input: `{ draftId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### sendGmailMessage\nSend an email from the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- messageType controls the body format: "plain" for plain text, "html" for raw HTML, "markdown" for auto-converted markdown.\n- Input: `{ to: string, subject: string, message: string, connectionId?: string, messageType: "plain" | "html" | "markdown" }`\n- Output: `{ messageId: string }`\n\n#### sendSMS\nSend an SMS or MMS message to a phone number configured via OAuth connection.\n- User is responsible for configuring the connection to the number (MindStudio requires double opt-in to prevent spam)\n- If mediaUrls are provided, the message is sent as MMS instead of SMS\n- MMS supports up to 10 media URLs (images, video, audio, PDF) with a 5MB limit per file\n- MMS is only supported on US and Canadian carriers; international numbers will receive SMS only (media silently dropped)\n- Input: `{ body: string, connectionId?: string, mediaUrls?: string[] }`\n- Output: `unknown`\n\n#### setGmailReadStatus\nMark one or more Gmail emails as read or unread.\n- Requires a Google OAuth connection with Gmail modify scope.\n- Accepts one or more message IDs as a comma-separated string or array.\n- Set markAsRead to true to mark as read, false to mark as unread.\n- Input: `{ messageIds: string, markAsRead: boolean, connectionId?: string }`\n- Output: `unknown`\n\n#### setRunTitle\nSet the title of the agent run for the user\'s history\n- Input: `{ title: string }`\n- Output: `unknown`\n\n#### setVariable\nExplicitly set a variable to a given value.\n- Useful for bootstrapping global variables or setting constants.\n- The variable name and value both support variable interpolation.\n- The type field is a UI hint only (controls input widget in the editor).\n- Input: `{ value: string | string[] }`\n- Output: `object`\n\n#### telegramEditMessage\nEdit a previously sent Telegram message. Use with the message ID returned by Send Telegram Message.\n- Only text messages sent by the bot can be edited.\n- The messageId is returned by the Send Telegram Message step.\n- Common pattern: send a "Processing..." message, do work, then edit it with the result.\n- Input: `{ botToken: string, chatId: string, messageId: string, text: string }`\n- Output: `unknown`\n\n#### telegramReplyToMessage\nSend a reply to a specific Telegram message. The reply will be visually threaded in the chat.\n- Use the rawMessage.message_id from the incoming trigger variables to reply to the user\'s message.\n- Especially useful in group chats where replies provide context.\n- Returns the sent message ID, which can be used with Edit Telegram Message.\n- Input: `{ botToken: string, chatId: string, replyToMessageId: string, text: string }`\n- Output: `{ messageId: number }`\n\n#### telegramSendAudio\nSend an audio file to a Telegram chat as music or a voice note via a bot.\n- "audio" mode sends as a standard audio file. "voice" mode sends as a voice message (re-uploads the file for large file support).\n- Input: `{ botToken: string, chatId: string, audioUrl: string, mode: "audio" | "voice", caption?: string }`\n- Output: `unknown`\n\n#### telegramSendFile\nSend a document/file to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, fileUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSendImage\nSend an image to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, imageUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSendMessage\nSend a text message to a Telegram chat via a bot.\n- Messages are sent using MarkdownV2 formatting. Special characters are auto-escaped.\n- botToken format is "botId:token" \u2014 both parts are required.\n- Returns the sent message ID, which can be used with Edit Telegram Message to update the message later.\n- Input: `{ botToken: string, chatId: string, text: string }`\n- Output: `{ messageId: number }`\n\n#### telegramSendVideo\nSend a video to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, videoUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSetTyping\nShow the "typing..." indicator in a Telegram chat via a bot.\n- The typing indicator automatically expires after a few seconds. Use this right before sending a message for a natural feel.\n- Input: `{ botToken: string, chatId: string }`\n- Output: `unknown`\n\n#### textToSpeech\nGenerate an audio file from provided text using a speech model.\n- The text field contains the exact words to be spoken (not instructions).\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ text: string, intermediateAsset?: boolean, speechModelOverride?: { model: string, config?: object } }`\n- Output: `{ audioUrl: string }`\n\n#### transcribeAudio\nConvert an audio file to text using a transcription model.\n- The prompt field provides optional context to improve transcription accuracy (e.g. language, speaker names, domain).\n- Input: `{ audioUrl: string, prompt: string, transcriptionModelOverride?: { model: string, config?: object } }`\n- Output: `{ text: string }`\n\n#### trimMedia\nTrim an audio or video clip\n- Input: `{ inputUrl: string, start?: number | string, duration?: string | number, intermediateAsset?: boolean }`\n- Output: `{ mediaUrl: string }`\n\n#### updateGmailLabels\nAdd or remove labels on Gmail messages, identified by message IDs or a search query.\n- Requires a Google OAuth connection with Gmail modify scope.\n- Provide either a query (Gmail search syntax) or explicit messageIds to target messages.\n- Label IDs can be label names or Gmail label IDs \u2014 names are resolved automatically.\n- Input: `{ query: string, connectionId?: string, messageIds: string, addLabelIds: string, removeLabelIds: string }`\n- Output: `{ updatedMessageIds: string[] }`\n\n#### uploadDataSourceDocument\nUpload a file into an existing data source from a URL or raw text content.\n- If "file" is a single URL, the file is downloaded from that URL and uploaded.\n- If "file" is any other string, a .txt document is created from that content and uploaded.\n- The block waits (polls) for processing to complete before transitioning, up to 5 minutes.\n- Once processing finishes, vectors are loaded into Milvus so the data source is immediately queryable.\n- Supported file types (when using a URL) are the same as the data source upload UI (PDF, DOCX, TXT, etc.).\n- Input: `{ dataSourceId: string, file: string, fileName: string }`\n- Output: `unknown`\n\n#### upscaleImage\nIncrease the resolution of an image using AI upscaling.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, targetResolution: "2k" | "4k" | "8k", engine: "standard" | "pro" }`\n- Output: `{ imageUrl: string }`\n\n#### upscaleVideo\nUpscale a video file\n- Input: `{ videoUrl: string, targetResolution: "720p" | "1080p" | "2K" | "4K", engine: "standard" | "pro" | "ultimate" | "flashvsr" | "seedance" | "seedvr2" | "runwayml/upscale-v1", intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoFaceSwap\nSwap faces in a video file\n- Input: `{ videoUrl: string, faceImageUrl: string, targetIndex: number, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoRemoveBackground\nRemove or replace background from a video\n- Input: `{ videoUrl: string, newBackground: "transparent" | "image", newBackgroundImageUrl?: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoRemoveWatermark\nRemove a watermark from a video\n- Input: `{ videoUrl: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### watermarkImage\nOverlay a watermark image onto another image.\n- The watermark is placed at the specified corner with configurable padding and width.\n- Input: `{ imageUrl: string, watermarkImageUrl: string, corner: "top-left" | "top-right" | "bottom-left" | "bottom-right", paddingPx: number, widthPx: number, intermediateAsset?: boolean }`\n- Output: `{ imageUrl: string }`\n\n#### watermarkVideo\nAdd an image watermark to a video\n- Input: `{ videoUrl: string, imageUrl: string, corner: "top-left" | "top-right" | "bottom-left" | "bottom-right", paddingPx: number, widthPx: number, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n### ActiveCampaign\n\n#### activeCampaignAddNote\nAdd a note to an existing contact in ActiveCampaign.\n- Requires an ActiveCampaign OAuth connection (connectionId).\n- The contact must already exist \u2014 use the contact ID from a previous create or search step.\n- Input: `{ contactId: string, note: string, connectionId?: string }`\n- Output: `unknown`\n\n#### activeCampaignCreateContact\nCreate or sync a contact in ActiveCampaign.\n- Requires an ActiveCampaign OAuth connection (connectionId).\n- If a contact with the email already exists, it may be updated depending on ActiveCampaign settings.\n- Custom fields are passed as a key-value map where keys are field IDs.\n- Input: `{ email: string, firstName: string, lastName: string, phone: string, accountId: string, customFields: object, connectionId?: string }`\n- Output: `{ contactId: string }`\n\n### Airtable\n\n#### airtableCreateUpdateRecord\nCreate a new record or update an existing record in an Airtable table.\n- If recordId is provided, updates that record. Otherwise, creates a new one.\n- When updating with updateMode "onlySpecified", unspecified fields are left as-is. With "all", unspecified fields are cleared.\n- Array fields (e.g. multipleAttachments) accept arrays of values.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId?: string, updateMode?: "onlySpecified" | "all", fields: unknown, recordData: object }`\n- Output: `{ recordId: string }`\n\n#### airtableDeleteRecord\nDelete a record from an Airtable table by its record ID.\n- Requires an active Airtable OAuth connection (connectionId).\n- Silently succeeds if the record does not exist.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId: string }`\n- Output: `{ deleted: boolean }`\n\n#### airtableGetRecord\nFetch a single record from an Airtable table by its record ID.\n- Requires an active Airtable OAuth connection (connectionId).\n- If the record is not found, returns a string message instead of a record object.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId: string }`\n- Output: `{ record: { id: string, createdTime: string, fields: object } | null }`\n\n#### airtableGetTableRecords\nFetch multiple records from an Airtable table with optional pagination.\n- Requires an active Airtable OAuth connection (connectionId).\n- Default limit is 100 records. Maximum is 1000.\n- When outputFormat is \'csv\', the variable receives CSV text. The direct execution output always returns parsed records.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, outputFormat?: "json" | "csv", limit?: number }`\n- Output: `{ records: { id: string, createdTime: string, fields: object }[] }`\n\n### Apollo\n\n#### enrichPerson\nLook up professional information about a person using Apollo.io. Search by ID, name, LinkedIn URL, email, or domain.\n- At least one search parameter must be provided.\n- Returns enriched data from Apollo including contact details, employment info, and social profiles.\n- Input: `{ params: { id: string, name: string, linkedinUrl: string, email: string, domain: string } }`\n- Output: `{ data: unknown }`\n\n#### peopleSearch\nSearch for people matching specific criteria using Apollo.io. Supports natural language queries and advanced filters.\n- Can use a natural language "smartQuery" which is converted to Apollo search parameters by an AI model.\n- Advanced params can override or supplement the smart query results.\n- Optionally enriches returned people and/or their organizations for additional detail.\n- Results are paginated. Use limit and page to control the result window.\n- Input: `{ smartQuery: string, enrichPeople: boolean, enrichOrganizations: boolean, limit: string, page: string, params: { personTitles: string, includeSimilarTitles: string, qKeywords: string, personLocations: string, personSeniorities: string, organizationLocations: string, qOrganizationDomainsList: string, contactEmailStatus: string, organizationNumEmployeesRanges: string, revenueRangeMin: string, revenueRangeMax: string, currentlyUsingAllOfTechnologyUids: string, currentlyUsingAnyOfTechnologyUids: string, currentlyNotUsingAnyOfTechnologyUids: string } }`\n- Output: `{ results: unknown }`\n\n### Coda\n\n#### codaCreateUpdatePage\nCreate a new page or update an existing page in a Coda document.\n- Requires a Coda OAuth connection (connectionId).\n- If pageData.pageId is provided, updates that page. Otherwise, creates a new one.\n- Page content is provided as markdown and converted to Coda\'s canvas format.\n- When updating, insertionMode controls how content is applied (default: \'append\').\n- Input: `{ connectionId?: string, pageData: { docId: string, pageId?: string, name: string, subtitle: string, iconName: string, imageUrl: string, parentPageId?: string, pageContent: string | unknown, contentUpdate?: unknown, insertionMode?: string } }`\n- Output: `{ pageId: string }`\n\n#### codaCreateUpdateRow\nCreate a new row or update an existing row in a Coda table.\n- Requires a Coda OAuth connection (connectionId).\n- If rowId is provided, updates that row. Otherwise, creates a new one.\n- Row data keys are column IDs. Empty values are excluded.\n- Input: `{ connectionId?: string, docId: string, tableId: string, rowId?: string, rowData: object }`\n- Output: `{ rowId: string }`\n\n#### codaFindRow\nSearch for a row in a Coda table by matching column values.\n- Requires a Coda OAuth connection (connectionId).\n- Returns the first row matching all specified column values, or null if no match.\n- Search criteria in rowData are ANDed together.\n- Input: `{ connectionId?: string, docId: string, tableId: string, rowData: object }`\n- Output: `{ row: { id: string, values: object } | null }`\n\n#### codaGetPage\nExport and read the contents of a page from a Coda document.\n- Requires a Coda OAuth connection (connectionId).\n- Page export is asynchronous on Coda\'s side \u2014 there may be a brief delay while it processes.\n- If a page was just created in a prior step, there is an automatic 20-second retry if the first export attempt fails.\n- Input: `{ connectionId?: string, docId: string, pageId: string, outputFormat?: "html" | "markdown" }`\n- Output: `{ content: string }`\n\n#### codaGetTableRows\nFetch rows from a Coda table with optional pagination.\n- Requires a Coda OAuth connection (connectionId).\n- Default limit is 10000 rows. Rows are fetched in pages of 500.\n- When outputFormat is \'csv\', the variable receives CSV text. The direct execution output always returns parsed rows.\n- Input: `{ connectionId?: string, docId: string, tableId: string, limit?: number | string, outputFormat?: "json" | "csv" }`\n- Output: `{ rows: { id: string, values: object }[] }`\n\n### Facebook\n\n#### scrapeFacebookPage\nScrape a Facebook page\n- Input: `{ pageUrl: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeFacebookPosts\nGet all the posts for a Facebook page\n- Input: `{ pageUrl: string }`\n- Output: `{ data: unknown }`\n\n### Gmail\n\n#### deleteGmailEmail\nMove an email to trash in the connected Gmail account (recoverable delete).\n- Requires a Google OAuth connection with Gmail modify scope.\n- Uses trash (recoverable) rather than permanent delete.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### getGmailDraft\nRetrieve a specific draft from Gmail by draft ID.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the draft content including subject, recipients, sender, and body.\n- Input: `{ draftId: string, connectionId?: string }`\n- Output: `{ draftId: string, messageId: string, subject: string, to: string, from: string, body: string }`\n\n#### getGmailEmail\nRetrieve a specific email from Gmail by message ID.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the email subject, sender, recipient, date, body (plain text preferred, falls back to HTML), and labels.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `{ messageId: string, subject: string, from: string, to: string, date: string, body: string, labels: string }`\n\n#### listGmailDrafts\nList drafts in the connected Gmail account.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns up to 50 drafts (default 10).\n- The variable receives text or JSON depending on exportType.\n- Input: `{ connectionId?: string, limit?: string, exportType: "json" | "text" }`\n- Output: `{ drafts: { draftId: string, messageId: string, subject: string, to: string, snippet: string }[] }`\n\n#### replyToGmailEmail\nReply to an existing email in Gmail. The reply is threaded under the original message.\n- Requires a Google OAuth connection with Gmail compose and readonly scopes.\n- The reply is sent to the original sender and threaded under the original message.\n- messageType controls the body format: "plain", "html", or "markdown".\n- Input: `{ messageId: string, message: string, messageType: "plain" | "html" | "markdown", connectionId?: string }`\n- Output: `{ messageId: string }`\n\n### Google\n\n#### createGoogleDoc\nCreate a new Google Document and optionally populate it with content.\n- textType determines how the text field is interpreted: "plain" for plain text, "html" for HTML markup, "markdown" for Markdown.\n- Input: `{ title: string, text: string, connectionId?: string, textType: "plain" | "html" | "markdown" }`\n- Output: `{ documentUrl: string }`\n\n#### createGoogleSheet\nCreate a new Google Spreadsheet and populate it with CSV data.\n- Input: `{ title: string, text: string, connectionId?: string }`\n- Output: `{ spreadsheetUrl: string }`\n\n#### deleteGoogleSheetRows\nDelete a range of rows from a Google Spreadsheet.\n- Requires a Google OAuth connection with Drive scope.\n- startRow and endRow are 1-based row numbers (inclusive).\n- If sheetName is omitted, operates on the first sheet.\n- Input: `{ documentId: string, sheetName?: string, startRow: string, endRow: string, connectionId?: string }`\n- Output: `unknown`\n\n#### fetchGoogleDoc\nFetch the contents of an existing Google Document.\n- exportType controls the output format: "html" for HTML markup, "markdown" for Markdown, "json" for structured JSON, "plain" for plain text.\n- Input: `{ documentId: string, connectionId?: string, exportType: "html" | "markdown" | "json" | "plain" }`\n- Output: `{ content: string }`\n\n#### fetchGoogleSheet\nFetch contents of a Google Spreadsheet range.\n- range uses A1 notation (e.g. "Sheet1!A1:C10"). Omit to fetch the entire first sheet.\n- exportType controls the output format: "csv" for comma-separated values, "json" for structured JSON.\n- Input: `{ spreadsheetId: string, range: string, connectionId?: string, exportType: "csv" | "json" }`\n- Output: `{ content: string }`\n\n#### getGoogleSheetInfo\nGet metadata about a Google Spreadsheet including sheet names, row counts, and column counts.\n- Requires a Google OAuth connection with Drive scope.\n- Returns the spreadsheet title and a list of all sheets with their dimensions.\n- Input: `{ documentId: string, connectionId?: string }`\n- Output: `{ title: string, sheets: { sheetId: number, title: string, rowCount: number, columnCount: number }[] }`\n\n#### updateGoogleDoc\nUpdate the contents of an existing Google Document.\n- operationType controls how content is applied: "addToTop" prepends, "addToBottom" appends, "overwrite" replaces all content.\n- textType determines how the text field is interpreted: "plain" for plain text, "html" for HTML markup, "markdown" for Markdown.\n- Input: `{ documentId: string, connectionId?: string, text: string, textType: "plain" | "html" | "markdown", operationType: "addToTop" | "addToBottom" | "overwrite" }`\n- Output: `{ documentUrl: string }`\n\n#### updateGoogleSheet\nUpdate a Google Spreadsheet with new data.\n- operationType controls how data is written: "addToBottom" appends rows, "overwrite" replaces all data, "range" writes to a specific cell range.\n- Data should be provided as CSV in the text field.\n- Input: `{ text: string, connectionId?: string, spreadsheetId: string, range: string, operationType: "addToBottom" | "overwrite" | "range" }`\n- Output: `{ spreadsheetUrl: string }`\n\n### Google Calendar\n\n#### createGoogleCalendarEvent\nCreate a new event on a Google Calendar.\n- Requires a Google OAuth connection with Calendar events scope.\n- Date/time values must be ISO 8601 format (e.g. "2025-07-02T10:00:00-07:00").\n- Attendees are specified as one email address per line in a single string.\n- Set addMeetLink to true to automatically attach a Google Meet video call.\n- Input: `{ connectionId?: string, summary: string, description?: string, location?: string, startDateTime: string, endDateTime: string, attendees?: string, addMeetLink?: boolean, calendarId?: string }`\n- Output: `{ eventId: string, htmlLink: string }`\n\n#### deleteGoogleCalendarEvent\nRetrieve a specific event from a Google Calendar by event ID.\n- Requires a Google OAuth connection with Calendar events scope.\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns the structured event.\n- Input: `{ connectionId?: string, eventId: string, calendarId?: string }`\n- Output: `unknown`\n\n#### getGoogleCalendarEvent\nRetrieve a specific event from a Google Calendar by event ID.\n- Requires a Google OAuth connection with Calendar events scope.\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns the structured event.\n- Input: `{ connectionId?: string, eventId: string, exportType: "json" | "text", calendarId?: string }`\n- Output: `{ event: { id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null } }`\n\n#### listGoogleCalendarEvents\nList upcoming events from a Google Calendar, ordered by start time.\n- Requires a Google OAuth connection with Calendar events scope.\n- Only returns future events (timeMin = now).\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns structured events.\n- Input: `{ connectionId?: string, limit: number, exportType: "json" | "text", calendarId?: string }`\n- Output: `{ events: ({ id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null })[] }`\n\n#### searchGoogleCalendarEvents\nSearch for events in a Google Calendar by keyword, date range, or both.\n- Requires a Google OAuth connection with Calendar events scope.\n- Supports keyword search via "query" and date filtering via "timeMin"/"timeMax" (ISO 8601 format).\n- Unlike "List Events" which only shows future events, this allows searching past events too.\n- Input: `{ query?: string, timeMin?: string, timeMax?: string, calendarId?: string, limit?: number, exportType: "json" | "text", connectionId?: string }`\n- Output: `{ events: ({ id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null })[] }`\n\n#### updateGoogleCalendarEvent\nUpdate an existing event on a Google Calendar. Only specified fields are changed.\n- Requires a Google OAuth connection with Calendar events scope.\n- Fetches the existing event first, then applies only the provided updates. Omitted fields are left unchanged.\n- Attendees are specified as one email address per line, and replace the entire attendee list.\n- Input: `{ connectionId?: string, eventId: string, summary?: string, description?: string, location?: string, startDateTime?: string, endDateTime?: string, attendees?: string, calendarId?: string }`\n- Output: `{ eventId: string, htmlLink: string }`\n\n### Google Drive\n\n#### getGoogleDriveFile\nDownload a file from Google Drive and rehost it on the CDN. Returns a public CDN URL.\n- Requires a Google OAuth connection with Drive scope.\n- Google-native files (Docs, Sheets, Slides) cannot be downloaded \u2014 use dedicated steps instead.\n- Maximum file size: 200MB.\n- The file is downloaded and re-uploaded to the CDN; the returned URL is publicly accessible.\n- Input: `{ fileId: string, connectionId?: string }`\n- Output: `{ url: string, name: string, mimeType: string, size: number }`\n\n#### listGoogleDriveFiles\nList files in a Google Drive folder.\n- Requires a Google OAuth connection with Drive scope.\n- If folderId is omitted, lists files in the root folder.\n- Returns file metadata including name, type, size, and links.\n- Input: `{ folderId?: string, limit?: number, connectionId?: string, exportType: "json" | "text" }`\n- Output: `{ files: { id: string, name: string, mimeType: string, size: string, webViewLink: string, createdTime: string, modifiedTime: string }[] }`\n\n#### searchGoogleDrive\nSearch for files in Google Drive by keyword.\n- Requires a Google OAuth connection with Drive scope.\n- Searches file content and names using Google Drive\'s fullText search.\n- Input: `{ query: string, limit?: number, connectionId?: string, exportType: "json" | "text" }`\n- Output: `{ files: { id: string, name: string, mimeType: string, size: string, webViewLink: string, createdTime: string, modifiedTime: string }[] }`\n\n### HubSpot\n\n#### hubspotCreateCompany\nCreate a new company or update an existing one in HubSpot. Matches by domain.\n- Requires a HubSpot OAuth connection (connectionId).\n- If a company with the given domain already exists, it is updated. Otherwise, a new one is created.\n- Property values are type-checked against enabledProperties before being sent to HubSpot.\n- Input: `{ connectionId?: string, company: { domain: string, name: string }, enabledProperties: ({ label: string, value: string, type: "string" | "number" | "bool" })[] }`\n- Output: `{ companyId: string }`\n\n#### hubspotCreateContact\nCreate a new contact or update an existing one in HubSpot. Matches by email address.\n- Requires a HubSpot OAuth connection (connectionId).\n- If a contact with the given email already exists, it is updated. Otherwise, a new one is created.\n- If companyDomain is provided, the contact is associated with that company (creating the company if needed).\n- Property values are type-checked against enabledProperties before being sent to HubSpot.\n- Input: `{ connectionId?: string, contact: { email: string, firstname: string, lastname: string }, enabledProperties: ({ label: string, value: string, type: "string" | "number" | "bool" })[], companyDomain: string }`\n- Output: `{ contactId: string }`\n\n#### hubspotGetCompany\nLook up a HubSpot company by domain name or company ID.\n- Requires a HubSpot OAuth connection (connectionId).\n- Returns null if the company is not found.\n- When searching by domain, performs a search query then fetches the full company record.\n- Use additionalProperties to request specific HubSpot properties beyond the defaults.\n- Input: `{ connectionId?: string, searchBy: "domain" | "id", companyDomain: string, companyId: string, additionalProperties: string[] }`\n- Output: `{ company: { id: string, properties: object, createdAt: string, updatedAt: string, archived: boolean } | null }`\n\n#### hubspotGetContact\nLook up a HubSpot contact by email address or contact ID.\n- Requires a HubSpot OAuth connection (connectionId).\n- Returns null if the contact is not found.\n- Use additionalProperties to request specific HubSpot properties beyond the defaults.\n- Input: `{ connectionId?: string, searchBy: "email" | "id", contactEmail: string, contactId: string, additionalProperties: string[] }`\n- Output: `{ contact: { id: string, properties: object, createdAt: string, updatedAt: string, archived: boolean } | null }`\n\n### Hunter.io\n\n#### hunterApiCompanyEnrichment\nLook up company information by domain using Hunter.io.\n- Returns company name, description, location, industry, size, technologies, and more.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns null if the company is not found.\n- Input: `{ domain: string }`\n- Output: `{ data: { name: string, domain: string, description: string | null, country: string | null, state: string | null, city: string | null, industry: string | null, employees_range: string | null, logo_url: string | null, technologies: string[] } | null }`\n\n#### hunterApiDomainSearch\nSearch for email addresses associated with a domain using Hunter.io.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns a list of email addresses found for the domain along with organization info.\n- Input: `{ domain: string }`\n- Output: `{ data: { domain: string, disposable: boolean, webmail: boolean, accept_all: boolean, pattern: string, organization: string, country: string | null, state: string | null, emails: ({ value: string, type: string, confidence: number, first_name: string | null, last_name: string | null, position: string | null, seniority: string | null, department: string | null, linkedin: string | null, twitter: string | null, phone_number: string | null })[], linked_domains: string[] } }`\n\n#### hunterApiEmailFinder\nFind an email address for a specific person at a domain using Hunter.io.\n- Requires a first name, last name, and domain.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns the most likely email address with a confidence score.\n- Input: `{ domain: string, firstName: string, lastName: string }`\n- Output: `{ data: { first_name: string, last_name: string, email: string, score: number, domain: string, accept_all: boolean, position: string | null, twitter: string | null, linkedin_url: string | null, phone_number: string | null, company: string | null, sources: { domain: string, uri: string, extracted_on: string }[] } }`\n\n#### hunterApiEmailVerification\nVerify whether an email address is valid and deliverable using Hunter.io.\n- Checks email format, MX records, SMTP server, and mailbox deliverability.\n- Returns a status ("valid", "invalid", "accept_all", "webmail", "disposable", "unknown") and a score.\n- Input: `{ email: string }`\n- Output: `{ data: { status: string, result: string, score: number, email: string, regexp: boolean, gibberish: boolean, disposable: boolean, webmail: boolean, mx_records: boolean, smtp_server: boolean, smtp_check: boolean, accept_all: boolean, block: boolean, sources: { domain: string, uri: string, extracted_on: string }[] } }`\n\n#### hunterApiPersonEnrichment\nLook up professional information about a person by their email address using Hunter.io.\n- Returns name, job title, social profiles, and company information.\n- If the person is not found, returns an object with an error message instead of throwing.\n- Input: `{ email: string }`\n- Output: `{ data: { first_name: string, last_name: string, email: string, position: string | null, seniority: string | null, department: string | null, linkedin_url: string | null, twitter: string | null, phone_number: string | null, company: { name: string, domain: string, industry: string | null } | null } | { error: string } }`\n\n### Instagram\n\n#### scrapeInstagramComments\nGet all the comments for an Instagram post\n- Input: `{ postUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramMentions\nScrape an Instagram profile\'s mentions\n- Input: `{ profileUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramPosts\nGet all the posts for an Instagram profile\n- Input: `{ profileUrl: string, resultsLimit: string, onlyPostsNewerThan: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramProfile\nScrape an Instagram profile\n- Input: `{ profileUrl: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramReels\nGet all the reels for an Instagram profile\n- Input: `{ profileUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n### LinkedIn\n\n#### postToLinkedIn\nCreate a post on LinkedIn from the connected account.\n- Requires a LinkedIn OAuth connection (connectionId).\n- Supports text posts, image posts, video posts, document posts, and article posts.\n- Attach one media type per post: image, video, document, or article.\n- Documents support PDF, PPT, PPTX, DOC, DOCX (max 100MB, 300 pages). Displays as a slideshow carousel.\n- Articles create a link preview with optional custom title, description, and thumbnail.\n- Visibility controls who can see the post.\n- Input: `{ message: string, visibility: "PUBLIC" | "CONNECTIONS", imageUrl?: string, videoUrl?: string, documentUrl?: string, articleUrl?: string, titleText?: string, descriptionText?: string, connectionId?: string }`\n- Output: `unknown`\n\n### Meta Threads\n\n#### scrapeMetaThreadsProfile\nScrape a Meta Threads profile\n- Input: `{ profileUrl: string }`\n- Output: `{ data: unknown }`\n\n### Notion\n\n#### notionCreatePage\nCreate a new page in Notion as a child of an existing page.\n- Requires a Notion OAuth connection (connectionId).\n- Content is provided as markdown and converted to Notion blocks (headings, paragraphs, lists, code, quotes).\n- The page is created as a child of the specified parent page (pageId).\n- Input: `{ pageId: string, content: string, title: string, connectionId?: string }`\n- Output: `{ pageId: string, pageUrl: string }`\n\n#### notionUpdatePage\nUpdate the content of an existing Notion page.\n- Requires a Notion OAuth connection (connectionId).\n- Content is provided as markdown and converted to Notion blocks.\n- "append" mode adds content to the end of the page. "overwrite" mode deletes all existing blocks first.\n- Input: `{ pageId: string, content: string, mode: "append" | "overwrite", connectionId?: string }`\n- Output: `{ pageId: string, pageUrl: string }`\n\n### X\n\n#### postToX\nCreate a post on X (Twitter) from the connected account.\n- Requires an X OAuth connection (connectionId).\n- Maximum 280 characters of text.\n- Optionally attach up to 4 media items (images, GIFs, or videos) via mediaUrls.\n- Media URLs must be publicly accessible. The service fetches and uploads them to X.\n- Supported formats: JPEG, PNG, GIF, WEBP, MP4. Images up to 5MB, videos up to 512MB.\n- Input: `{ text: string, connectionId?: string, mediaUrls?: string[] }`\n- Output: `unknown`\n\n#### searchXPosts\nSearch recent X (Twitter) posts matching a query.\n- Searches only the past 7 days of posts.\n- Query supports X API v2 search operators (up to 512 characters).\nAvailable search operators in query:\n| Operator | Description |\n| -----------------| -------------------------------------------------|\n| from: | Posts from a specific user (e.g., from:elonmusk) |\n| to: | Posts sent to a specific user (e.g., to:NASA) |\n| @ | Mentions a user (e.g., @openai) |\n| # | Hashtag search (e.g., #AI) |\n| is:retweet | Filters retweets |\n| is:reply | Filters replies |\n| has:media | Posts containing media (images, videos, or GIFs) |\n| has:links | Posts containing URLs |\n| lang: | Filters by language (e.g., lang:en) |\n| - | Excludes specific terms (e.g., -spam) |\n| () | Groups terms or operators (e.g., (AI OR ML)) |\n| AND, OR, NOT | Boolean logic for combining or excluding terms |\nConjunction-Required Operators (must be combined with a standalone operator):\n| Operator | Description |\n| ------------ | -----------------------------------------------|\n| has:media | Posts containing media (images, videos, or GIFs) |\n| has:links | Posts containing URLs |\n| is:retweet | Filters retweets |\n| is:reply | Filters replies |\nFor example, has:media alone is invalid, but #AI has:media is valid.\n- Input: `{ query: string, scope: "recent" | "all", options: { startTime?: string, endTime?: string, maxResults?: number } }`\n- Output: `{ posts: { id: string, authorId: string, dateCreated: string, text: string, stats: { retweets: number, replies: number, likes: number } }[] }`\n\n### YouTube\n\n#### fetchYoutubeCaptions\nRetrieve the captions/transcript for a YouTube video.\n- Supports multiple languages via the language parameter.\n- "text" export produces timestamped plain text; "json" export produces structured transcript data.\n- Input: `{ videoUrl: string, exportType: "text" | "json", language: string }`\n- Output: `{ transcripts: { text: string, start: number }[] }`\n\n#### fetchYoutubeChannel\nRetrieve metadata and recent videos for a YouTube channel.\n- Accepts a YouTube channel URL (e.g. https://www.youtube.com/@ChannelName or /channel/ID).\n- Returns channel info and video listings as a JSON object.\n- Input: `{ channelUrl: string }`\n- Output: `object`\n\n#### fetchYoutubeComments\nRetrieve comments for a YouTube video.\n- Paginates through comments (up to 5 pages).\n- "text" export produces markdown-formatted text; "json" export produces structured comment data.\n- Input: `{ videoUrl: string, exportType: "text" | "json", limitPages: string }`\n- Output: `{ comments: { id: string, link: string, publishedDate: string, text: string, likes: number, replies: number, author: string, authorLink: string, authorImg: string }[] }`\n\n#### fetchYoutubeVideo\nRetrieve metadata for a YouTube video (title, description, stats, channel info).\n- Returns video metadata, channel info, and engagement stats.\n- Video format data is excluded from the response.\n- Input: `{ videoUrl: string }`\n- Output: `object`\n\n#### searchYoutube\nSearch for YouTube videos by keyword.\n- Supports pagination (up to 5 pages) and country/language filters.\n- Use the filter/filterType fields for YouTube search parameter (sp) filters.\n- Input: `{ query: string, limitPages: string, filter: string, filterType: string, countryCode?: string, languageCode?: string }`\n- Output: `{ results: object }`\n\n#### searchYoutubeTrends\nRetrieve trending videos on YouTube by category and region.\n- Categories: "now" (trending now), "music", "gaming", "films".\n- Supports country and language filtering.\n- Input: `{ bp: "now" | "music" | "gaming" | "films", hl: string, gl: string }`\n- Output: `object`\n\n### Helpers\n\n#### `listModels()`\nList all available AI models across all categories.\n\nOutput:\n```typescript\n{\n models: {\n id: string;\n name: string; // Display name\n type: "llm_chat" | "image_generation" | "video_generation" | "video_analysis" | "text_to_speech" | "vision" | "transcription";\n maxTemperature: number;\n maxResponseSize: number;\n inputs: object[]; // Accepted input types\n }[]\n}\n```\n\n#### `listModelsByType(modelType)`\nList AI models filtered by type.\n- `modelType`: `"llm_chat"` | `"image_generation"` | `"video_generation"` | `"video_analysis"` | `"text_to_speech"` | `"vision"` | `"transcription"`\n- Output: same as `listModels()`\n\n#### `listModelsSummary()`\nList all available AI models (summary). Returns only id, name, type, and tags. Suitable for display or consumption inside a model context window.\n\nOutput:\n```typescript\n{\n models: {\n id: string;\n name: string;\n type: "llm_chat" | "image_generation" | "video_generation" | "video_analysis" | "text_to_speech" | "vision" | "transcription";\n tags: string; // Comma-separated tags\n }[]\n}\n```\n\n#### `listModelsSummaryByType(modelType)`\nList AI models (summary) filtered by type.\n- `modelType`: `"llm_chat"` | `"image_generation"` | `"video_generation"` | `"video_analysis"` | `"text_to_speech"` | `"vision"` | `"transcription"`\n- Output: same as `listModelsSummary()`\n\n#### `listConnectors()`\nList available OAuth connector services (Slack, Google, HubSpot, etc.) and their actions. These are third-party integrations \u2014 for most tasks, use actions directly instead.\n\nOutput:\n```typescript\n{\n services: {\n id: string;\n name: string;\n icon: string;\n actions: { id: string; name: string }[];\n }[]\n}\n```\n\n#### `getConnector(serviceId)`\nGet details for a single OAuth connector service by ID.\n\nOutput:\n```typescript\n{\n service: {\n id: string;\n name: string;\n icon: string;\n actions: { id: string; name: string }[];\n }\n}\n```\n\n#### `getConnectorAction(serviceId, actionId)`\nGet the full configuration for an OAuth connector action, including all input fields needed to call it via `runFromConnectorRegistry`. OAuth connectors are sourced from the open-source MindStudio Connector Registry (MSCR) with 850+ actions across third-party services.\n\nOutput:\n```typescript\n{\n action: {\n id: string;\n name: string;\n description: string;\n quickHelp: string;\n configuration: { title: string; items: { label: string; helpText: string; variable: string; type: string; defaultValue: string; placeholder: string; selectOptions?: object }[] }[];\n }\n}\n```\n\n#### `listConnections()`\nList OAuth connections for the organization (authenticated third-party service links). Use the returned connection IDs when calling OAuth connector actions. Connectors require the user to connect to the third-party service in MindStudio before they can be used.\n\nOutput:\n```typescript\n{\n connections: {\n id: string; // Connection ID to pass to connector actions\n provider: string; // Integration provider (e.g. slack, google)\n name: string; // Display name or account identifier\n }[]\n}\n```\n\n#### `estimateStepCost(stepType, step?, options?)`\nEstimate the cost of executing a step before running it. Pass the same step config you would use for execution.\n\n```typescript\nconst estimate = await agent.estimateStepCost(\'generateText\', { message: \'Hello\' });\n```\n\n- `stepType`: string \u2014 The action name (e.g. `"generateText"`).\n- `step`: object \u2014 Optional action input parameters for more accurate estimates.\n- `options`: `{ appId?: string, workflowId?: string }` \u2014 Optional context for pricing.\n\nOutput:\n```typescript\n{\n costType?: string; // "free" when the step has no cost\n estimates?: {\n eventType: string; // Billing event type\n label: string; // Human-readable cost label\n unitPrice: number; // Price per unit in billing units\n unitType: string; // What constitutes a unit (e.g. "token", "request")\n estimatedCost?: number; // Estimated total cost, or null if not estimable\n quantity: number; // Number of billable units\n }[]\n}\n```\n\n#### `changeName(displayName)`\nUpdate the display name of the authenticated agent. Useful for agents to set their own name after connecting.\n\n```typescript\nawait agent.changeName(\'My Agent\');\n```\n\n#### `changeProfilePicture(profilePictureUrl)`\nUpdate the profile picture of the authenticated agent. Useful for agents to set their own avatar after connecting.\n\n```typescript\nawait agent.changeProfilePicture(\'https://example.com/avatar.png\');\n```\n\n#### `uploadFile(content, options)`\nUpload a file to the MindStudio CDN. Gets a signed upload URL, PUTs the file content, and returns the permanent public URL.\n\n```typescript\nimport { readFileSync } from \'fs\';\nconst { url } = await agent.uploadFile(readFileSync(\'photo.png\'), { extension: \'png\', type: \'image/png\' });\n```\n\n- `content`: `Buffer | Uint8Array` \u2014 The file content.\n- `options.extension`: string \u2014 File extension without the dot (e.g. `"png"`, `"jpg"`, `"mp4"`).\n- `options.type`: string (optional) \u2014 MIME type (e.g. `"image/png"`). Determines which CDN subdomain is used.\n\nOutput: `{ url: string }` \u2014 The permanent public CDN URL.\n';
4463
- }
4464
- });
4458
+ identity = `
4459
+ You are a senior MindStudio SDK engineer. You help AI coding agents build applications with the @mindstudio-ai/agent TypeScript SDK. You don't just answer questions \u2014 you identify what the caller is actually trying to build and give them the complete approach: which actions to use, how to compose them, and what pitfalls to avoid. Your output is consumed by developers who will implement what you propose. Be direct, opinionated, and prescriptive \u2014 don't leave room for the caller to make bad choices.
4465
4460
 
4466
- // src/ask/prompt.ts
4467
- async function buildSystemPrompt(agent) {
4468
- const [modelsResult, connectionsResult, connectorsResult, llmsResult] = await Promise.allSettled([
4469
- agent.listModelsSummary(),
4470
- agent.listConnections(),
4471
- agent.listConnectors(),
4472
- Promise.resolve().then(() => (init_llms_content(), llms_content_exports))
4473
- ]);
4474
- const modelsSummary = modelsResult.status === "fulfilled" ? modelsResult.value.models.map(
4475
- (m) => `- ${m.id} (${m.name}, type: ${m.type}${m.popularity != null ? ", popularity: " + m.popularity : ""}${m.tags ? ", tags: " + m.tags : ""})`
4476
- ).join("\n") : "(Could not load models \u2014 use the listModels tool to look them up)";
4477
- const connections = connectionsResult.status === "fulfilled" && connectionsResult.value.connections.length > 0 ? connectionsResult.value.connections.map((c) => `- ${c.provider}: ${c.name} (id: ${c.id})`).join("\n") : "No OAuth connections configured.";
4478
- const connectorServices = connectorsResult.status === "fulfilled" ? connectorsResult.value.services.map(
4479
- (s) => `- ${s.id}: ${s.name} (${s.actions?.length ?? 0} actions)`
4480
- ).join("\n") : "(Could not load connectors \u2014 use the getConnectorDetails tool)";
4481
- const llmsContent2 = llmsResult.status === "fulfilled" ? llmsResult.value.llmsContent : "(Could not load action reference \u2014 use getActionDetails tool)";
4482
- const identity = `You are a senior MindStudio SDK engineer. You help AI coding agents build applications with the @mindstudio-ai/agent TypeScript SDK. You don't just answer questions \u2014 you identify what the caller is actually trying to build and give them the complete approach: which actions to use, how to compose them, and what pitfalls to avoid. Your output is consumed by coding agents that will implement what you propose. Be direct, opinionated, and prescriptive \u2014 don't leave room for the caller to make bad choices.
4461
+ Do not offer advice or recommend products or services outside the scope of the MindStudio SDK. If something is not possibl, or would be tricky to do, note it for the developer and let them figure out how to solve it - that's outside the scope of your role.
4483
4462
 
4484
4463
  ## Scope
4485
4464
 
@@ -4487,8 +4466,13 @@ async function buildSystemPrompt(agent) {
4487
4466
  2. **AI models** \u2014 model selection, config options, override patterns
4488
4467
  3. **OAuth connectors** \u2014 discovering and using the 850+ connector actions
4489
4468
  4. **Architecture** \u2014 batch execution, error handling, data flow between actions
4490
- 5. **Managed databases and auth** \u2014 db, auth, Roles, resolveUser for MindStudio apps`;
4491
- const referenceDocs = `<sdk_reference>
4469
+ 5. **Managed databases and auth** \u2014 db, auth, Roles, resolveUser for MindStudio apps`.trim();
4470
+ }
4471
+ });
4472
+
4473
+ // src/ask/prompt/reference.ts
4474
+ function buildReferenceDocs(data) {
4475
+ return `<sdk_reference>
4492
4476
  <quick_reference>
4493
4477
  Auth is always pre-configured. Use \`new MindStudioAgent()\` with no arguments in code examples.
4494
4478
  Calling convention: \`const result = await agent.methodName({ ...input })\`
@@ -4529,29 +4513,41 @@ async function buildSystemPrompt(agent) {
4529
4513
  </model_overrides>
4530
4514
 
4531
4515
  <actions>
4532
- ${llmsContent2}
4516
+ ${data.llmsContent}
4533
4517
  </actions>
4534
4518
 
4535
4519
  <models>
4536
- ${modelsSummary}
4520
+ ${data.modelsSummary}
4537
4521
  </models>
4538
4522
 
4539
4523
  <oauth_connections>
4540
- ${connections}
4524
+ ${data.connections}
4541
4525
  </oauth_connections>
4542
4526
 
4543
4527
  <connector_services>
4544
4528
  OAuth connector services from the MindStudio Connector Registry. Each service has multiple actions (850+ total). Use the getConnectorDetails tool to drill into a service's actions and get input fields. Connector actions are executed via the \`runFromConnectorRegistry\` SDK action and require the user to have an OAuth connection set up for that service.
4545
4529
 
4546
- ${connectorServices}
4530
+ ${data.connectorServices}
4547
4531
  </connector_services>
4548
4532
  </sdk_reference>`;
4549
- const instructions = `<instructions>
4533
+ }
4534
+ var init_reference = __esm({
4535
+ "src/ask/prompt/reference.ts"() {
4536
+ "use strict";
4537
+ }
4538
+ });
4539
+
4540
+ // src/ask/prompt/instructions.ts
4541
+ var instructions;
4542
+ var init_instructions = __esm({
4543
+ "src/ask/prompt/instructions.ts"() {
4544
+ "use strict";
4545
+ instructions = `<instructions>
4550
4546
  <principles>
4551
4547
  - Respond to intent, not just the question. When asked "how do I call generateText," also surface relevant configuration the caller probably doesn't know about \u2014 structured output options, response format controls, model-specific features. When asked "how do I parse JSON from a model response," recognize they're probably doing it wrong and suggest built-in structured output instead.
4552
4548
  - Think at the workflow level. When the caller describes a multi-step process ("take user input, call an LLM, extract entities, save to database"), respond with the complete architectural approach: which actions to use, how to chain them, where to use batch execution, what error handling to add. Not just the signature for one action.
4553
- - Be opinionated about SDK usage. Make concrete recommendations about the right way to build things. "Use executeStepBatch here" is better than "you could optionally batch these." But stay grounded on model claims \u2014 only state facts from model metadata, not editorial judgments about quality.
4554
- - Match depth to the question. A simple "what params does generateImage take" gets a concise answer with a code example. A workflow question gets the full architectural response. Don't over-explain simple lookups, don't under-serve complex ones.
4549
+ - Be opinionated about SDK usage. Make concrete recommendations about the right way to build things. "Use executeStepBatch here" is better than "you could optionally batch these." Stay grounded on model claims \u2014 only state facts from model metadata, not editorial judgments about quality.
4550
+ - Match depth to the question. Sometimes the user will ask you for help with a single function signature, for example - or sometimes they will want complete archiecture review. A simple "what params does generateImage take" gets a concise answer with a code example. A workflow question gets the full architectural response.
4555
4551
  </principles>
4556
4552
 
4557
4553
  <anti_patterns>
@@ -4559,7 +4555,7 @@ async function buildSystemPrompt(agent) {
4559
4555
 
4560
4556
  - **Manual JSON parsing from LLM output** \u2014 if they're calling generateText and then parsing the response, they probably want structured output / response format controls instead of \`JSON.parse(content)\`.
4561
4557
  - **Sequential calls that should be batched** \u2014 multiple independent action calls (generate image + text-to-speech + search) should use \`executeStepBatch()\`. Three round trips become one.
4562
- - **Building custom HTTP integrations when a connector exists** \u2014 if they're asking how to call the Slack API, Airtable API, HubSpot API, etc. via \`httpRequest\`, the answer is \`runFromConnectorRegistry\` with an existing OAuth connector. 850+ connector actions exist for this.
4558
+ - **Building custom HTTP integrations when a connector exists** \u2014 if they're asking how to call the Slack API, Airtable API, HubSpot API, etc. via \`httpRequest\`, the answer is \`runFromConnectorRegistry\` with an existing OAuth connector. 850+ connector actions exist for this. If the user's has not configured the specific connectio in MindStudio yet, that should be step one. Direct them to https://app.mindstudio.ai/services/integrations
4563
4559
  - **Missing MindStudioError handling** \u2014 the SDK has structured errors with \`code\`, \`status\`, \`details\`. Catching generic \`Error\` loses actionable information. Always include \`MindStudioError\` handling in code examples.
4564
4560
  - **One-at-a-time db writes when batch exists** \u2014 N sequential \`update()\` or \`push()\` calls should be a single \`db.batch()\` call. One round trip instead of N.
4565
4561
  - **Hardcoded model IDs without context** \u2014 model IDs can change. When writing code with a specific model, include a comment noting which model it is and why it was chosen, so the caller can swap it later.
@@ -4568,14 +4564,25 @@ async function buildSystemPrompt(agent) {
4568
4564
  <model_guidance>
4569
4565
  Each model in the reference above includes a \`popularity\` score (0.0\u20131.0) reflecting real platform usage over the last 30 days, normalized per model type. Use this to guide recommendations:
4570
4566
 
4571
- - **When the caller asks for a specific model**: use it. But if a significantly more popular or newer model exists in the same category, mention it. "Here's how to do X with Claude 3.5 Haiku \u2014 note that Claude Haiku 4.5 (popularity: 0.9) is the current generation and a better default."
4572
- - **When the caller asks generally** (no model specified): default to a high-popularity model. For text generation, show examples across multiple providers (Anthropic, Google, OpenAI) so the caller sees the breadth \u2014 pick one for the primary example and mention the others as alternatives.
4573
- - **Never rank vendors** or claim one provider is better than another. Popularity reflects usage patterns, not quality judgments. Present options with their model IDs and let the caller choose.
4574
- - **Popularity scores**: 1.0 = most used in its category, 0.5\u20130.9 = commonly used, 0.1\u20130.4 = niche, 0.0 = rarely used, null = new model with no data yet.
4575
- - **Prefer popular models as defaults** in code examples unless the caller has a reason to use something specific. A model with popularity 0.9 is a safer recommendation than one with 0.2.
4567
+ - **When the caller asks for a specific model**: use it. But if a significantly more popular or newer model exists in the same category, mention it. "Here's how to do X with Claude 3.5 Haiku \u2014 note that Claude Haiku 4.5 is the current generation and a better default."
4568
+ - **Never rank vendors** or claim one provider is better than another. Popularity reflects usage patterns, not quality judgments. Present options with their model IDs and let the caller choose. Never make assumptions about the capabilities of models, or suggest that a certain model is "good" at a specific task or better/worse than another at a task.
4569
+ - **Prefer popular models as defaults** in code examples unless the caller has a reason to use something specific.
4576
4570
  - **Always recommend latest-generation models**: For Anthropic, this is Claude 4 family, GPT-5 for OpenAI, Gemini 3 for Google, etc. MindStudio supports a ton of different models, including legacy models - but they are there for niche uses or backward compatibility/existing user requirements - they should NOT be used for new projects.
4577
- - Any image generation model that supports source images in its config supports "remixing" or "image editing". All flagship image models support image remixing and editing. Ignore the tags when recommending image models for editing - all of them support it, especially if their tags say things like "Source Image" etc.
4578
- - For image generation/editing, prefer to recommend Seedream 4.5 or Google Gemini 3.1
4571
+ - Use the names of model input objects to infer capabilities. For example, any image generation model that supports source images in its config supports "remixing" or "image editing". Any video model with a start and end frame option supports creating "looping" videos.
4572
+
4573
+ ## Explicit preferences
4574
+ MindStudio has hundreds of models. Many of them are for niche use cases. In general, prefer to recommend the following models unless the user specifies otherwise.
4575
+
4576
+ Text generation:
4577
+ - Google Gemini, Anthropic Claude, OpenAI GPT
4578
+ Image generation:
4579
+ - Seedream 4.5, Google Gemini 3.1 Flash Image (Nano Banana Pro)
4580
+ Video generation:
4581
+ - Grok Imagine, Kling O3, Google Veo 3.1
4582
+ Text to speech:
4583
+ - Elevenlabs TTS
4584
+ Image analysis:
4585
+ - Prefer using a text generation model from the recommendations above - they all support image inputs
4579
4586
  </model_guidance>
4580
4587
 
4581
4588
  <tools>
@@ -4597,6 +4604,44 @@ async function buildSystemPrompt(agent) {
4597
4604
  - Model tags in the summary are editorial labels, not technical specs. When answering questions about model capabilities (supported inputs, config options, dimensions, etc.), call listModels with details=true to check the \`inputs\` array \u2014 that is the source of truth.
4598
4605
  </response_format>
4599
4606
  </instructions>`;
4607
+ }
4608
+ });
4609
+
4610
+ // src/generated/llms-content.ts
4611
+ var llms_content_exports = {};
4612
+ __export(llms_content_exports, {
4613
+ llmsContent: () => llmsContent
4614
+ });
4615
+ var llmsContent;
4616
+ var init_llms_content = __esm({
4617
+ "src/generated/llms-content.ts"() {
4618
+ "use strict";
4619
+ llmsContent = '# @mindstudio-ai/agent\n\nTypeScript SDK, CLI, and MCP server for MindStudio. One API key gives you access to 200+ AI models (OpenAI, Anthropic, Google, Meta, xAI, DeepSeek, etc.) and 1,000+ actions including 850+ connector actions across third-party services from the open-source MindStudio Connector Registry (https://github.com/mindstudio-ai/mscr). No separate provider API keys required.\n\nThis file is the complete API reference. No other documentation is needed to use the SDK.\n\n## Recommended workflow\n\nThere are 150+ actions available. Do NOT try to read or load them all at once. Follow this discovery flow:\n\n1. **Identify yourself** \u2014 Call `changeName` to set your display name (use your name or whatever your user calls you). If you have a profile picture or icon, call `uploadFile` to upload it, then `changeProfilePicture` with the returned URL. This helps users identify your requests in their logs.\n2. **Ask** \u2014 Use `mindstudio ask "your question"` (CLI) or the `ask` MCP tool for SDK guidance. It knows every action, model, and connector and returns working TypeScript code with real model IDs and config options. Examples: `mindstudio ask "generate an image with FLUX"`, `mindstudio ask "what models support vision?"`, `mindstudio ask "how do I send a Slack message?"`.\n3. **Browse** \u2014 For manual discovery, call `listActions` (MCP tool) or `mindstudio list-actions --summary` (CLI) to get a compact `{ action: description }` map of everything available (~3k tokens). Call `mindstudio info <action>` (CLI) for parameter details.\n4. **Call it** \u2014 Invoke the action with the required parameters. All actions share the same calling convention (see below).\n\nFor specific use cases:\n\n- **OAuth third-party integrations** (Slack, Google, HubSpot, etc.): These are optional OAuth connectors from the MindStudio Connector Registry \u2014 for most tasks, use actions directly instead. If you need a third-party integration: call `listConnectors()` to browse services \u2192 `getConnectorAction(serviceId, actionId)` for input fields \u2192 execute via `runFromConnectorRegistry`. Requires an OAuth connection set up in MindStudio first \u2014 call `listConnections()` to check available connections.\n- **Pre-built agents**: Call `listAgents()` to see what\'s available \u2192 `runAgent({ appId })` to execute one. **Important:** Not all agents are configured for API use. Do not try to run an agent just because it appears in the list \u2014 only run agents the user specifically asks you to run.\n- **Model selection**: Call `listModelsSummary()` or `listModelsSummaryByType("llm_chat")` to browse models, then pass the model ID as `modelOverride.model` to actions like `generateText`. Use the summary endpoints (not `listModels`) to keep token usage low.\n- **Cost estimation**: AI-powered actions (text generation, image generation, video, audio, etc.) cost money. Call `estimateStepCost(stepType, stepInput)` before running these and confirm with the user before proceeding \u2014 unless they\'ve explicitly given permission to go ahead. Non-AI actions (data lookups, OAuth connectors, etc.) are generally free.\n\n## Install\n\nStandalone binary (CLI/MCP, no dependencies):\n```bash\ncurl -fsSL https://msagent.ai/install.sh | bash\n```\n\nnpm (SDK + CLI):\n```bash\nnpm install @mindstudio-ai/agent\n```\n\nRequires Node.js >= 18.\n\n## CLI\n\nThe package includes a CLI for executing steps from the command line or scripts:\n\n```bash\n# Execute with named flags (kebab-case)\nmindstudio generate-image --prompt "A mountain landscape"\n\n# Execute with JSON input (JSON5-tolerant)\nmindstudio generate-image \'{prompt: "A mountain landscape"}\'\n\n# Extract a single output field\nmindstudio generate-image --prompt "A sunset" --output-key imageUrl\n\n# List all methods (compact JSON \u2014 best for LLM discovery)\nmindstudio list --summary\n\n# List all methods (human-readable table)\nmindstudio list\n\n# Show method details (params, types, output)\nmindstudio info generate-image\n\n# Run via npx without installing\nnpx @mindstudio-ai/agent generate-text --message "Hello"\n```\n\nAuth: run `mindstudio login`, set `MINDSTUDIO_API_KEY` env var, or pass `--api-key <key>`.\nMethod names are kebab-case on the CLI (camelCase also accepted). Flags are kebab-case (`--video-url` for `videoUrl`).\nUse `--output-key <key>` to extract a single field, `--no-meta` to strip $-prefixed metadata.\n\n### Authentication\n\n```bash\n# Interactive login (opens browser, saves key to ~/.mindstudio/config.json)\nmindstudio login\n\n# Check current auth status\nmindstudio whoami\n\n# Clear stored credentials\nmindstudio logout\n```\n\nAuth resolution order: `--api-key` flag > `MINDSTUDIO_API_KEY` env > `~/.mindstudio/config.json` > `CALLBACK_TOKEN` env.\n\n## MCP server\n\nThe package includes an MCP server exposing all methods as tools. Start by calling the `listSteps` tool to discover available methods.\n\n```bash\nmindstudio mcp\n```\n\nMCP client config (standalone binary \u2014 recommended):\n```json\n{\n "mcpServers": {\n "mindstudio": {\n "command": "mindstudio",\n "args": ["mcp"],\n "env": { "MINDSTUDIO_API_KEY": "your-api-key" }\n }\n }\n}\n```\n\n## Setup\n\n```typescript\nimport { MindStudioAgent } from \'@mindstudio-ai/agent\';\n\n// With API key (or set MINDSTUDIO_API_KEY env var)\nconst agent = new MindStudioAgent({ apiKey: \'your-key\' });\n```\n\nYour MindStudio API key authenticates all requests. MindStudio routes to the correct AI provider (OpenAI, Google, Anthropic, etc.) server-side \u2014 you do NOT need separate provider API keys.\n\nConstructor options:\n```typescript\nnew MindStudioAgent({\n apiKey?: string, // Auth token. Falls back to MINDSTUDIO_API_KEY env var.\n baseUrl?: string, // API base URL. Defaults to "https://v1.mindstudio-api.com".\n maxRetries?: number, // Retries on 429 rate limit (default: 3). Uses Retry-After header for delay.\n})\n```\n\n## Models\n\nDirect access to 200+ AI models from every major provider \u2014 all through a single API key, billed at cost with no markups.\n\nUse `listModels()` or `listModelsByType()` for full model details, or `listModelsSummary()` / `listModelsSummaryByType()` for a lightweight list (id, name, type, tags) suitable for LLM context windows. Pass a model ID to `modelOverride.model` in methods like `generateText` to select a specific model:\n\n```typescript\nconst { models } = await agent.listModelsByType(\'llm_chat\');\nconst model = models.find(m => m.name.includes("Gemini"));\n\nconst { content } = await agent.generateText({\n message: \'Hello\',\n modelOverride: {\n model: model.id,\n temperature: 0.7,\n maxResponseTokens: 1024,\n },\n});\n```\n\n## Calling convention\n\nEvery method has the signature:\n```typescript\nagent.methodName(input: InputType, options?: { appId?: string, threadId?: string }): Promise<OutputType & StepExecutionMeta>\n```\n\nThe first argument is the step-specific input object. The optional second argument controls thread/app context.\n\n**Results are returned flat** \u2014 output fields are spread at the top level alongside metadata:\n\n```typescript\nconst { content } = await agent.generateText({ message: \'Hello\' });\n\n// Full result shape for any method:\nconst result = await agent.generateText({ message: `Hello` });\nresult.content; // step-specific output field\nresult.$appId; // string \u2014 app ID for this execution\nresult.$threadId; // string \u2014 thread ID for this execution\nresult.$rateLimitRemaining; // number | undefined \u2014 API calls remaining in rate limit window\nresult.$billingCost; // number | undefined \u2014 cost in credits for this call\nresult.$billingEvents; // object[] | undefined \u2014 itemized billing events\n```\n\n## Thread persistence\n\nPass `$appId`/`$threadId` from a previous result to maintain conversation state, variable state, or other context across calls:\n\n```typescript\nconst r1 = await agent.generateText({ message: \'My name is Alice\' });\nconst r2 = await agent.generateText(\n { message: \'What is my name?\' },\n { threadId: r1.$threadId, appId: r1.$appId },\n);\n// r2.content => "Your name is Alice"\n```\n\n## Error handling\n\nAll errors throw `MindStudioError`:\n```typescript\nimport { MindStudioError } from \'@mindstudio-ai/agent\';\n\ntry {\n await agent.generateImage({ prompt: \'...\' });\n} catch (err) {\n if (err instanceof MindStudioError) {\n err.message; // Human-readable error message\n err.code; // Machine-readable code: "invalid_step_config", "api_error", "call_cap_exceeded", "output_fetch_error"\n err.status; // HTTP status code (400, 401, 429, etc.)\n err.details; // Raw error body from the API\n }\n}\n```\n\n429 rate limit errors are retried automatically (configurable via `maxRetries`).\n\n## Low-level access\n\nFor action types not covered by generated methods:\n```typescript\nconst result = await agent.executeStep(\'stepType\', { ...params });\n```\n\n## Batch execution\n\nExecute multiple steps in parallel in a single request. Maximum 50 steps per batch.\nIndividual step failures do not affect other steps \u2014 partial success is possible.\n\n```typescript\nconst result = await agent.executeStepBatch([\n { stepType: \'generateImage\', step: { prompt: \'a sunset\' } },\n { stepType: \'textToSpeech\', step: { text: \'hello world\' } },\n], { appId?, threadId? });\n\n// Result:\nresult.results; // BatchStepResult[] \u2014 same order as input\nresult.results[0].stepType; // string\nresult.results[0].output; // object | undefined (step output on success)\nresult.results[0].error; // string | undefined (error message on failure)\nresult.results[0].billingCost; // number | undefined (cost on success)\nresult.totalBillingCost; // number | undefined\nresult.appId; // string\nresult.threadId; // string\n```\n\nCLI:\n```bash\nmindstudio batch \'[{"stepType":"generateImage","step":{"prompt":"a cat"}}]\'\ncat steps.json | mindstudio batch\n```\n\n## Methods\n\nAll methods below are called on a `MindStudioAgent` instance (`agent.methodName(...)`).\nInput shows the first argument object. Output shows the fields available on the returned result.\n\n### General\n\n#### addSubtitlesToVideo\nAutomatically add subtitles to a video\n- Can control style of text and animation\n- Input: `{ videoUrl: string, language: string, fontName: string, fontSize: number, fontWeight: "normal" | "bold" | "black", fontColor: "white" | "black" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", highlightColor: "white" | "black" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", strokeWidth: number, strokeColor: "black" | "white" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", backgroundColor: "black" | "white" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta" | "none", backgroundOpacity: number, position: "top" | "center" | "bottom", yOffset: number, wordsPerSubtitle: number, enableAnimation: boolean, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### analyzeImage\nAnalyze an image using a vision model based on a text prompt.\n- Uses the configured vision model to generate a text analysis of the image.\n- The prompt should describe what to look for or extract from the image.\n- Input: `{ prompt: string, imageUrl: string, visionModelOverride?: { model: string, config?: object } | { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ analysis: string }`\n\n#### analyzeVideo\nAnalyze a video using a video analysis model based on a text prompt.\n- Uses the configured video analysis model to generate a text analysis of the video.\n- The prompt should describe what to look for or extract from the video.\n- Input: `{ prompt: string, videoUrl: string, videoAnalysisModelOverride?: { model: string, config?: object } | { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ analysis: string }`\n\n#### captureThumbnail\nCapture a thumbnail from a video at a specified timestamp\n- Input: `{ videoUrl: string, at: number | string }`\n- Output: `{ thumbnailUrl: string }`\n\n#### checkAppRole\nCheck whether the current user has a specific app role and branch accordingly.\n- Checks if the current user has been assigned a specific role in this app.\n- If the user has the role, transitions to the "has role" path.\n- If the user does not have the role, transitions to the "no role" path, or errors if no path is configured.\n- Role names are defined by the app creator and assigned to users via the app roles system.\n- The roleName field supports {{variables}} for dynamic role checks.\n- Input: `{ roleName: string, hasRoleStepId?: string, hasRoleWorkflowId?: string, noRoleStepId?: string, noRoleWorkflowId?: string }`\n- Output: `{ hasRole: boolean, userRoles: string[] }`\n\n#### convertPdfToImages\nConvert each page of a PDF document into a PNG image.\n- Each page is converted to a separate PNG and re-hosted on the CDN.\n- Returns an array of image URLs, one per page.\n- Input: `{ pdfUrl: string }`\n- Output: `{ imageUrls: string[] }`\n\n#### createDataSource\nCreate a new empty vector data source for the current app.\n- Creates a new data source (vector database) associated with the current app version.\n- The data source is created empty \u2014 use the "Upload Data Source Document" block to add documents.\n- Returns the new data source ID which can be used in subsequent blocks.\n- Input: `{ name: string }`\n- Output: `unknown`\n\n#### createGmailDraft\nCreate a draft email in the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- The draft appears in the user\'s Gmail Drafts folder but is not sent.\n- messageType controls the body format: "plain" for plain text, "html" for raw HTML, "markdown" for auto-converted markdown.\n- Input: `{ to: string, subject: string, message: string, connectionId?: string, messageType: "plain" | "html" | "markdown" }`\n- Output: `{ draftId: string }`\n\n#### deleteDataSource\nDelete a vector data source from the current app.\n- Soft-deletes a data source (vector database) by marking it as deleted.\n- The Milvus partition is cleaned up asynchronously by a background cron job.\n- The data source must belong to the current app version.\n- Input: `{ dataSourceId: string }`\n- Output: `unknown`\n\n#### deleteDataSourceDocument\nDelete a single document from a data source.\n- Soft-deletes a document by marking it as deleted.\n- Requires both the data source ID and document ID.\n- After deletion, reloads vectors into Milvus so the data source reflects the change immediately.\n- Input: `{ dataSourceId: string, documentId: string }`\n- Output: `unknown`\n\n#### detectChanges\nDetect changes between runs by comparing current input against previously stored state. Routes execution based on whether a change occurred.\n- Persists state across runs using a global variable keyed to the step ID.\n- Two modes: "comparison" (default) uses strict string inequality; "ai" uses an LLM to determine if a meaningful change occurred.\n- First run always treats the value as "changed" since there is no previous state.\n- Each mode supports transitions to different steps/workflows for the "changed" and "unchanged" paths.\n- AI mode bills normally for the LLM call.\n- Input: `{ mode: "ai" | "comparison", input: string, prompt?: string, modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, previousValueVariable?: string, changedStepId?: string, changedWorkflowId?: string, unchangedStepId?: string, unchangedWorkflowId?: string }`\n- Output: `{ hasChanged: boolean, currentValue: string, previousValue: string, isFirstRun: boolean }`\n\n#### detectPII\nScan text for personally identifiable information using Microsoft Presidio.\n- In workflow mode, transitions to detectedStepId if PII is found, notDetectedStepId otherwise.\n- In direct execution, returns the detection results without transitioning.\n- If entities is empty, returns immediately with no detections.\n- Input: `{ input: string, language: string, entities: string[], detectedStepId?: string, notDetectedStepId?: string, outputLogVariable?: string | null }`\n- Output: `{ detected: boolean, detections: { entity_type: string, start: number, end: number, score: number }[] }`\n\n#### discordEditMessage\nEdit a previously sent Discord channel message. Use with the message ID returned by Send Discord Message.\n- Only messages sent by the bot can be edited.\n- The messageId is returned by the Send Discord Message step.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- When editing with an attachment, the new attachment replaces any previous attachments on the message.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Input: `{ botToken: string, channelId: string, messageId: string, text: string, attachmentUrl?: string }`\n- Output: `unknown`\n\n#### discordSendFollowUp\nSend a follow-up message to a Discord slash command interaction.\n- Requires the applicationId and interactionToken from the Discord trigger variables.\n- Follow-up messages appear as new messages in the channel after the initial response.\n- Returns the sent message ID.\n- Interaction tokens expire after 15 minutes.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Input: `{ applicationId: string, interactionToken: string, text: string, attachmentUrl?: string }`\n- Output: `{ messageId: string }`\n\n#### discordSendMessage\nSend a message to Discord \u2014 either edit the loading message or send a new channel message.\n- mode "edit" replaces the loading message (interaction response) with the final result. Uses applicationId and interactionToken from trigger variables. No bot permissions required.\n- mode "send" sends a new message to a channel. Uses botToken and channelId from trigger variables. Returns a messageId that can be used with Edit Discord Message.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Interaction tokens expire after 15 minutes.\n- Input: `{ mode: "edit" | "send", text: string, applicationId?: string, interactionToken?: string, botToken?: string, channelId?: string, attachmentUrl?: string }`\n- Output: `{ messageId?: string }`\n\n#### downloadVideo\nDownload a video file\n- Works with YouTube, TikTok, etc., by using ytdlp behind the scenes\n- Can save as mp4 or mp3\n- Input: `{ videoUrl: string, format: "mp4" | "mp3" }`\n- Output: `{ videoUrl: string }`\n\n#### enhanceImageGenerationPrompt\nGenerate or enhance an image generation prompt using a language model. Optionally generates a negative prompt.\n- Rewrites the user\'s prompt with added detail about style, lighting, colors, and composition.\n- Also useful for initial generation, it doesn\'t always need to be enhancing an existing prompt\n- When includeNegativePrompt is true, a second model call generates a negative prompt.\n- Input: `{ initialPrompt: string, includeNegativePrompt: boolean, negativePromptDestinationVariableName?: string, systemPrompt: string, modelOverride?: unknown }`\n- Output: `{ prompt: string, negativePrompt?: string }`\n\n#### enhanceVideoGenerationPrompt\nGenerate or enhance a video generation prompt using a language model. Optionally generates a negative prompt.\n- Rewrites the user\'s prompt with added detail about style, camera movement, lighting, and composition.\n- Also useful for initial generation, it doesn\'t always need to be enhancing an existing prompt\n- When includeNegativePrompt is true, a second model call generates a negative prompt.\n- Input: `{ initialPrompt: string, includeNegativePrompt: boolean, negativePromptDestinationVariableName?: string, systemPrompt: string, modelOverride?: unknown }`\n- Output: `{ prompt: string, negativePrompt?: string }`\n\n#### extractAudioFromVideo\nExtract audio MP3 from a video file\n- Input: `{ videoUrl: string }`\n- Output: `{ audioUrl: string }`\n\n#### extractText\nDownload a file from a URL and extract its text content. Supports PDFs, plain text files, and other document formats.\n- Best suited for PDFs and raw text/document files. For web pages, use the scrapeUrl step instead.\n- Accepts a single URL, a comma-separated list of URLs, or a JSON array of URLs.\n- Files are rehosted on the MindStudio CDN before extraction.\n- Maximum file size is 50MB per URL.\n- Input: `{ url: string | string[] }`\n- Output: `{ text: string | string[] }`\n\n#### fetchDataSourceDocument\nFetch the full extracted text contents of a document in a data source.\n- Loads a document by ID and returns its full extracted text content.\n- The document must have been successfully processed (status "done").\n- Also returns document metadata (name, summary, word count).\n- Input: `{ dataSourceId: string, documentId: string }`\n- Output: `unknown`\n\n#### fetchSlackChannelHistory\nFetch recent message history from a Slack channel.\n- The user is responsible for connecting their Slack workspace and selecting the channel\n- Input: `{ connectionId?: string, channelId: string, limit?: number, startDate?: string, endDate?: string, includeImages?: boolean, includeRawMessage?: boolean }`\n- Output: `{ messages: { from: string, content: string, timestamp?: string, images?: string[], rawMessage?: { app_id?: string, assistant_app_thread?: { first_user_thread_reply?: string, title?: string, title_blocks?: unknown[] }, attachments?: { actions?: unknown[], app_id?: string, app_unfurl_url?: string, author_icon?: string, author_id?: string, author_link?: string, author_name?: string, author_subname?: string, blocks?: unknown[], bot_id?: string, bot_team_id?: string, callback_id?: string, channel_id?: string, channel_name?: string, channel_team?: string, color?: string, fallback?: string, fields?: unknown[], file_id?: string, filename?: string, files?: unknown[], footer?: string, footer_icon?: string, from_url?: string, hide_border?: boolean, hide_color?: boolean, id?: number, image_bytes?: number, image_height?: number, image_url?: string, image_width?: number, indent?: boolean, is_app_unfurl?: boolean, is_file_attachment?: boolean, is_msg_unfurl?: boolean, is_reply_unfurl?: boolean, is_thread_root_unfurl?: boolean, list?: unknown, list_record?: unknown, list_record_id?: string, list_records?: unknown[], list_schema?: unknown[], list_view?: unknown, list_view_id?: string, message_blocks?: unknown[], metadata?: unknown, mimetype?: string, mrkdwn_in?: string[], msg_subtype?: string, original_url?: string, pretext?: string, preview?: unknown, service_icon?: string, service_name?: string, service_url?: string, size?: number, text?: string, thumb_height?: number, thumb_url?: string, thumb_width?: number, title?: string, title_link?: string, ts?: string, url?: string, video_html?: string, video_html_height?: number, video_html_width?: number, video_url?: string }[], blocks?: { accessory?: unknown, alt_text?: string, api_decoration_available?: boolean, app_collaborators?: string[], app_id?: string, author_name?: string, block_id?: string, bot_user_id?: string, button_label?: string, call?: unknown, call_id?: string, description?: unknown, developer_trace_id?: string, dispatch_action?: boolean, element?: unknown, elements?: unknown[], expand?: boolean, external_id?: string, fallback?: string, fields?: unknown[], file?: unknown, file_id?: string, function_trigger_id?: string, hint?: unknown, image_bytes?: number, image_height?: number, image_url?: string, image_width?: number, is_animated?: boolean, is_workflow_app?: boolean, label?: unknown, optional?: boolean, owning_team_id?: string, provider_icon_url?: string, provider_name?: string, sales_home_workflow_app_type?: number, share_url?: string, slack_file?: unknown, source?: string, text?: unknown, thumbnail_url?: string, title?: unknown, title_url?: string, trigger_subtype?: string, trigger_type?: string, type?: unknown, url?: string, video_url?: string, workflow_id?: string }[], bot_id?: string, bot_profile?: { app_id?: string, deleted?: boolean, icons?: unknown, id?: string, name?: string, team_id?: string, updated?: number }, client_msg_id?: string, display_as_bot?: boolean, edited?: { ts?: string, user?: string }, files?: { access?: string, alt_txt?: string, app_id?: string, app_name?: string, attachments?: unknown[], blocks?: unknown[], bot_id?: string, can_toggle_canvas_lock?: boolean, canvas_printing_enabled?: boolean, canvas_template_mode?: string, cc?: unknown[], channel_actions_count?: number, channel_actions_ts?: string, channels?: string[], comments_count?: number, converted_pdf?: string, created?: number, deanimate?: string, deanimate_gif?: string, display_as_bot?: boolean, dm_mpdm_users_with_file_access?: unknown[], duration_ms?: number, edit_link?: string, edit_timestamp?: number, editable?: boolean, editor?: string, editors?: string[], external_id?: string, external_type?: string, external_url?: string, favorites?: unknown[], file_access?: string, filetype?: string, from?: unknown[], groups?: string[], has_more?: boolean, has_more_shares?: boolean, has_rich_preview?: boolean, headers?: unknown, hls?: string, hls_embed?: string, id?: string, image_exif_rotation?: number, ims?: string[], initial_comment?: unknown, is_channel_space?: boolean, is_external?: boolean, is_public?: boolean, is_restricted_sharing_enabled?: boolean, is_starred?: boolean, last_editor?: string, last_read?: number, lines?: number, lines_more?: number, linked_channel_id?: string, list_csv_download_url?: string, list_limits?: unknown, list_metadata?: unknown, media_display_type?: string, media_progress?: unknown, mimetype?: string, mode?: string, mp4?: string, mp4_low?: string, name?: string, non_owner_editable?: boolean, num_stars?: number, org_or_workspace_access?: string, original_attachment_count?: number, original_h?: string, original_w?: string, permalink?: string, permalink_public?: string, pinned_to?: string[], pjpeg?: string, plain_text?: string, pretty_type?: string, preview?: string, preview_highlight?: string, preview_is_truncated?: boolean, preview_plain_text?: string, private_channels_with_file_access_count?: number, private_file_with_access_count?: number, public_url_shared?: boolean, quip_thread_id?: string, reactions?: unknown[], saved?: unknown, sent_to_self?: boolean, shares?: unknown, show_badge?: boolean, simplified_html?: string, size?: number, source_team?: string, subject?: string, subtype?: string, team_pref_version_history_enabled?: boolean, teams_shared_with?: unknown[], template_conversion_ts?: number, template_description?: string, template_icon?: string, template_name?: string, template_title?: string, thumb_1024?: string, thumb_1024_gif?: string, thumb_1024_h?: string, thumb_1024_w?: string, thumb_160?: string, thumb_160_gif?: string, thumb_160_h?: string, thumb_160_w?: string, thumb_360?: string, thumb_360_gif?: string, thumb_360_h?: string, thumb_360_w?: string, thumb_480?: string, thumb_480_gif?: string, thumb_480_h?: string, thumb_480_w?: string, thumb_64?: string, thumb_64_gif?: string, thumb_64_h?: string, thumb_64_w?: string, thumb_720?: string, thumb_720_gif?: string, thumb_720_h?: string, thumb_720_w?: string, thumb_80?: string, thumb_800?: string, thumb_800_gif?: string, thumb_800_h?: string, thumb_800_w?: string, thumb_80_gif?: string, thumb_80_h?: string, thumb_80_w?: string, thumb_960?: string, thumb_960_gif?: string, thumb_960_h?: string, thumb_960_w?: string, thumb_gif?: string, thumb_pdf?: string, thumb_pdf_h?: string, thumb_pdf_w?: string, thumb_tiny?: string, thumb_video?: string, thumb_video_h?: number, thumb_video_w?: number, timestamp?: number, title?: string, title_blocks?: unknown[], to?: unknown[], transcription?: unknown, update_notification?: number, updated?: number, url_private?: string, url_private_download?: string, url_static_preview?: string, user?: string, user_team?: string, username?: string, vtt?: string }[], icons?: { emoji?: string, image_36?: string, image_48?: string, image_64?: string, image_72?: string }, inviter?: string, is_locked?: boolean, latest_reply?: string, metadata?: { event_payload?: unknown, event_type?: string }, parent_user_id?: string, purpose?: string, reactions?: { count?: number, name?: string, url?: string, users?: string[] }[], reply_count?: number, reply_users?: string[], reply_users_count?: number, root?: { bot_id?: string, icons?: unknown, latest_reply?: string, parent_user_id?: string, reply_count?: number, reply_users?: string[], reply_users_count?: number, subscribed?: boolean, subtype?: string, text?: string, thread_ts?: string, ts?: string, type?: string, username?: string }, subscribed?: boolean, subtype?: string, team?: string, text?: string, thread_ts?: string, topic?: string, ts?: string, type?: string, upload?: boolean, user?: string, username?: string, x_files?: string[] } }[] }`\n\n#### generateAsset\nGenerate an HTML asset and export it as a webpage, PDF, or image\n- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)\n- Input: `{ source: string, sourceType: "html" | "markdown" | "spa" | "raw" | "dynamic" | "customInterface", outputFormat: "pdf" | "png" | "html" | "mp4" | "openGraph", pageSize: "full" | "letter" | "A4" | "custom", testData: object, options?: { pageWidthPx?: number, pageHeightPx?: number, pageOrientation?: "portrait" | "landscape", rehostMedia?: boolean, videoDurationSeconds?: number }, spaSource?: { source?: string, lastCompiledSource?: string, files?: object, paths: string[], root: string, zipUrl: string }, rawSource?: string, dynamicPrompt?: string, dynamicSourceModelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, transitionControl?: "default" | "native", shareControl?: "default" | "hidden", shareImageUrl?: string, intermediateAsset?: boolean }`\n- Output: `{ url: string }`\n\n#### generateChart\nCreate a chart image using QuickChart (Chart.js) and return the URL.\n- The data field must be a Chart.js-compatible JSON object serialized as a string.\n- Supported chart types: bar, line, pie.\n- Input: `{ chart: { chartType: "bar" | "line" | "pie", data: string, options: { width: string, height: string } } }`\n- Output: `{ chartUrl: string }`\n\n#### generateImage\nGenerate an image from a text prompt using an AI model.\n- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Images are automatically hosted on a CDN.\n- In foreground mode, the image is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple images are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.\n- Input: `{ prompt: string, intermediateAsset?: boolean, imageModelOverride?: { model: string, config?: object }, generateVariants?: boolean, numVariants?: number, addWatermark?: boolean }`\n- Output: `{ imageUrl: string | string[] }`\n\n#### generateLipsync\nGenerate a lip sync video from provided audio and image.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ intermediateAsset?: boolean, addWatermark?: boolean, lipsyncModelOverride?: { model: string, config?: object } }`\n- Output: `unknown`\n\n#### generateMusic\nGenerate an audio file from provided instructions (text) using a music model.\n- The text field contains the instructions (prompt) for the music generation.\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ text: string, intermediateAsset?: boolean, musicModelOverride?: { model: string, config?: object } }`\n- Output: `unknown`\n\n#### generateStaticVideoFromImage\nConvert a static image to an MP4\n- Can use to create slides/intertitles/slates for video composition\n- Input: `{ imageUrl: string, duration: string }`\n- Output: `{ videoUrl: string }`\n\n#### generateText\nSend a message to an AI model and return the response, or echo a system message.\n- Source "user" sends the message to an LLM and returns the model\'s response.\n- Source "system" echoes the message content directly (no AI call).\n- Mode "background" saves the result to a variable. Mode "foreground" streams it to the user (not available in direct execution).\n- Structured output (JSON/CSV) can be enforced via structuredOutputType and structuredOutputExample.\n- When executed inside a v2 app method (managed sandbox or local dev tunnel),\nLLM token output can be streamed to the frontend in real time via an SSE\nside-channel. The frontend opts in by passing { stream: true } to the method\ninvocation via @mindstudio-ai/interface. Tokens are published to Redis\npub/sub as they arrive and forwarded as SSE events on the invoke response.\nThe method code itself is unchanged \u2014 streaming is transparent to the\ndeveloper. See V2ExecutionService.ts and the invoke handler in V2Apps for\nthe server-side plumbing.\n- Input: `{ message: string, source?: "user" | "system", modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, structuredOutputType?: "text" | "json" | "csv", structuredOutputExample?: string, chatHistoryMode?: "include" | "exclude" }`\n- Output: `{ content: string }`\n\n#### generateVideo\nGenerate a video from a text prompt using an AI model.\n- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Videos are automatically hosted on a CDN.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple videos are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.\n- Input: `{ prompt: string, intermediateAsset?: boolean, videoModelOverride?: { model: string, config?: object }, generateVariants?: boolean, numVariants?: number, addWatermark?: boolean }`\n- Output: `{ videoUrl: string | string[] }`\n\n#### getGmailAttachments\nDownload attachments from a Gmail email and re-host them on CDN.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Attachments are uploaded to CDN and returned as URLs.\n- Attachments larger than 25MB are skipped.\n- Use the message ID from Search Gmail Emails, List Recent Gmail Emails, or Get Gmail Email steps.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### getGmailUnreadCount\nGet the number of unread emails in the connected Gmail inbox.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the unread message count for the inbox label.\n- This is a lightweight call that does not fetch any email content.\n- Input: `{ connectionId?: string }`\n- Output: `unknown`\n\n#### getMediaMetadata\nGet info about a media file\n- Input: `{ mediaUrl: string }`\n- Output: `{ metadata: string }`\n\n#### httpRequest\nMake an HTTP request to an external endpoint and return the response.\n- Supports GET, POST, PATCH, DELETE, and PUT methods.\n- Body can be raw JSON/text, URL-encoded form data, or multipart form data.\n- Input: `{ url: string, method: string, headers: object, queryParams: object, body: string, bodyItems: object, contentType: "none" | "application/json" | "application/x-www-form-urlencoded" | "multipart/form-data" | "custom", customContentType: string, testData?: object }`\n- Output: `{ ok: boolean, status: number, statusText: string, response: string }`\n\n#### imageFaceSwap\nReplace a face in an image with a face from another image using AI.\n- Requires both a target image and a face source image.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, faceImageUrl: string, engine: string }`\n- Output: `{ imageUrl: string }`\n\n#### imageRemoveWatermark\nRemove watermarks from an image using AI.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ imageUrl: string }`\n\n#### insertVideoClips\nInsert b-roll clips into a base video at a timecode, optionally with an xfade transition.\n- Input: `{ baseVideoUrl: string, overlayVideos: { videoUrl: string, startTimeSec: number }[], transition?: string, transitionDuration?: number, useOverlayAudio?: boolean, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### listDataSources\nList all data sources for the current app.\n- Returns metadata for every data source associated with the current app version.\n- Each entry includes the data source ID, name, description, status, and document list.\n- Input: `object`\n- Output: `unknown`\n\n#### listGmailLabels\nList all labels in the connected Gmail account. Use these label IDs or names with the Update Gmail Labels step.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns both system labels (INBOX, SENT, TRASH, etc.) and user-created labels.\n- Label type is "system" for built-in labels or "user" for custom labels.\n- Input: `{ connectionId?: string }`\n- Output: `unknown`\n\n#### listRecentGmailEmails\nList recent emails from the connected Gmail inbox.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns up to 100 emails (default 5), ordered by most recent first.\n- Functionally equivalent to Search Gmail Emails with an "in:inbox" query.\n- Input: `{ connectionId?: string, exportType: "json" | "text", limit: string }`\n- Output: `unknown`\n\n#### logic\nRoute execution to different branches based on AI evaluation, comparison operators, or workflow jumps.\n- Supports two modes: "ai" (default) uses an AI model to pick the most accurate statement; "comparison" uses operator-based checks.\n- In AI mode, the model picks the most accurate statement from the list. All possible cases must be specified.\n- In comparison mode, the context is the left operand and each case\'s condition is the right operand. First matching case wins. Use operator "default" as a fallback.\n- Requires at least two cases.\n- Each case can transition to a step in the current workflow (destinationStepId) or jump to another workflow (destinationWorkflowId).\n- Input: `{ mode?: "ai" | "comparison", context: string, cases: ({ id: string, condition: string, operator?: "eq" | "neq" | "gt" | "lt" | "gte" | "lte" | "exists" | "not_exists" | "contains" | "not_contains" | "default", destinationStepId?: string, destinationWorkflowId?: string } | string)[], modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ selectedCase: number }`\n\n#### makeDotComRunScenario\nTrigger a Make.com (formerly Integromat) scenario via webhook and return the response.\n- The webhook URL must be configured in your Make.com scenario.\n- Input key-value pairs are sent as JSON in the POST body.\n- Response format depends on the Make.com scenario configuration.\n- Input: `{ webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### mergeAudio\nMerge one or more clips into a single audio file.\n- Input: `{ mp3Urls: string[], fileMetadata?: object, albumArtUrl?: string, intermediateAsset?: boolean }`\n- Output: `{ audioUrl: string }`\n\n#### mergeVideos\nMerge one or more clips into a single video.\n- Input: `{ videoUrls: string[], transition?: string, transitionDuration?: number, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### mixAudioIntoVideo\nMix an audio track into a video\n- Input: `{ videoUrl: string, audioUrl: string, options: { keepVideoAudio?: boolean, audioGainDb?: number, videoGainDb?: number, loopAudio?: boolean }, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### muteVideo\nMute a video file\n- Input: `{ videoUrl: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### n8nRunNode\nTrigger an n8n workflow node via webhook and return the response.\n- The webhook URL must be configured in your n8n workflow.\n- Supports GET and POST methods with optional Basic authentication.\n- For GET requests, input values are sent as query parameters. For POST, they are sent as JSON body.\n- Input: `{ method: string, authentication: "none" | "basic" | "string", user: string, password: string, webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### postToSlackChannel\nSend a message to a Slack channel via a connected bot.\n- The user is responsible for connecting their Slack workspace and selecting the channel\n- Supports both simple text messages and slack blocks messages\n- Text messages can use limited markdown (slack-only fomatting\u2014e.g., headers are just rendered as bold)\n- Input: `{ channelId: string, messageType: "string" | "blocks", message: string, connectionId?: string }`\n- Output: `unknown`\n\n#### postToZapier\nSend data to a Zapier Zap via webhook and return the response.\n- The webhook URL must be configured in the Zapier Zap settings\n- Input keys and values are sent as the JSON body of the POST request\n- The webhook response (JSON or plain text) is returned as the output\n- Input: `{ webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### queryAppDatabase\nExecute a SQL query against the app managed database.\n- Executes raw SQL against a SQLite database managed by the app.\n- For SELECT queries, returns rows as JSON.\n- For INSERT/UPDATE/DELETE, returns the number of affected rows.\n- Use {{variables}} directly in your SQL. By default they are automatically extracted\nand passed as safe parameterized values (preventing SQL injection).\nExample: INSERT INTO contacts (name, comment) VALUES ({{name}}, {{comment}})\n- Full MindStudio handlebars syntax is supported, including helpers like {{json myVar}},\n{{get myVar "$.path"}}, {{global.orgName}}, etc.\n- Set parameterize to false for raw/dynamic SQL where variables are interpolated directly\ninto the query string. Use this when another step generates full or partial SQL, e.g.\na bulk INSERT with a precomputed VALUES list. The user is responsible for sanitization\nwhen parameterize is false.\n- Input: `{ databaseId: string, sql: string, parameterize?: boolean }`\n- Output: `{ rows: unknown[], changes: number }`\n\n#### queryDataSource\nSearch a vector data source (RAG) and return relevant document chunks.\n- Queries a vectorized data source and returns the most relevant chunks.\n- Useful for retrieval-augmented generation (RAG) workflows.\n- Input: `{ dataSourceId: string, query: string, maxResults: number }`\n- Output: `{ text: string, chunks: string[], query: string, citations: unknown[], latencyMs: number }`\n\n#### queryExternalDatabase\nExecute a SQL query against an external database connected to the workspace.\n- Requires a database connection configured in the workspace.\n- Supports PostgreSQL (including Supabase), MySQL, and MSSQL.\n- Results can be returned as JSON or CSV.\n- Input: `{ connectionId?: string, query: string, outputFormat: "json" | "csv" }`\n- Output: `{ data: unknown }`\n\n#### redactPII\nReplace personally identifiable information in text with placeholders using Microsoft Presidio.\n- PII is replaced with entity type placeholders (e.g. "Call me at <PHONE_NUMBER>").\n- If entities is empty, returns empty text immediately without processing.\n- Input: `{ input: string, language: string, entities: string[] }`\n- Output: `{ text: string }`\n\n#### removeBackgroundFromImage\nRemove the background from an image using AI, producing a transparent PNG.\n- Uses the Bria background removal model via fal.ai.\n- Output is re-hosted on the CDN as a PNG with transparency.\n- Input: `{ imageUrl: string }`\n- Output: `{ imageUrl: string }`\n\n#### resizeVideo\nResize a video file\n- Input: `{ videoUrl: string, mode: "fit" | "exact", maxWidth?: number, maxHeight?: number, width?: number, height?: number, strategy?: "pad" | "crop", intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### runFromConnectorRegistry\nRun a raw API connector to a third-party service\n- Use the /developer/v2/helpers/connectors endpoint to list available services and actions.\n- Use /developer/v2/helpers/connectors/{serviceId}/{actionId} to get the full input configuration for an action.\n- Use /developer/v2/helpers/connections to list your available OAuth connections.\n- The actionId format is "serviceId/actionId" (e.g., "slack/send-message").\n- Pass a __connectionId to authenticate the request with a specific OAuth connection, otherwise the default will be used (if configured).\n- Input: `{ actionId: string, displayName: string, icon: string, configurationValues: object, __connectionId?: string }`\n- Output: `{ data: object }`\n\n#### runPackagedWorkflow\nRun a packaged workflow ("custom block")\n- From the user\'s perspective, packaged workflows are just ordinary blocks. Behind the scenes, they operate like packages/libraries in a programming language, letting the user execute custom functionality.\n- Some of these packaged workflows are available as part of MindStudio\'s "Standard Library" and available to every user.\n- Available packaged workflows are documented here as individual blocks, but the runPackagedWorkflow block is how they need to be wrapped in order to be executed correctly.\n- Input: `{ appId: string, workflowId: string, inputVariables: object, outputVariables: object, name: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeLinkedInCompany\nScrape public company data from a LinkedIn company page.\n- Requires a LinkedIn company URL (e.g. https://www.linkedin.com/company/mindstudioai).\n- Returns structured company data including description, employees, updates, and similar companies.\n- Input: `{ url: string }`\n- Output: `{ company: unknown }`\n\n#### scrapeLinkedInProfile\nScrape public profile data from a LinkedIn profile page.\n- Requires a LinkedIn profile URL (e.g. https://www.linkedin.com/in/username).\n- Returns structured profile data including experience, education, articles, and activities.\n- Input: `{ url: string }`\n- Output: `{ profile: unknown }`\n\n#### scrapeUrl\nExtract text, HTML, or structured content from one or more web pages.\n- Accepts a single URL or multiple URLs (as a JSON array, comma-separated, or newline-separated).\n- Output format controls the result shape: "text" returns markdown, "html" returns raw HTML, "json" returns structured scraper data.\n- Can optionally capture a screenshot of each page.\n- Input: `{ url: string, service?: "default" | "firecrawl", autoEnhance?: boolean, pageOptions?: { onlyMainContent: boolean, screenshot: boolean, waitFor: number, replaceAllPathsWithAbsolutePaths: boolean, headers: object, removeTags: string[], mobile: boolean } }`\n- Output: `{ content: string | string[] | { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } | { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } }[], screenshot?: string }`\n\n#### scrapeXPost\nScrape data from a single X (Twitter) post by URL.\n- Returns structured post data (text, html, optional json/screenshot/metadata).\n- Optionally saves the text content to a variable.\n- Input: `{ url: string }`\n- Output: `{ post: { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } }`\n\n#### scrapeXProfile\nScrape public profile data from an X (Twitter) account by URL.\n- Returns structured profile data.\n- Optionally saves the result to a variable.\n- Input: `{ url: string }`\n- Output: `{ profile: { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } }`\n\n#### screenshotUrl\nCapture a screenshot of a web page as a PNG image.\n- Takes a viewport or full-page screenshot of the given URL.\n- Returns a CDN-hosted PNG image URL.\n- Viewport mode captures only the visible area; fullPage captures the entire scrollable page.\n- You can customize viewport width/height, add a delay, or wait for a CSS selector before capturing.\n- Input: `{ url: string, mode?: "viewport" | "fullPage", width?: number, height?: number, delay?: number, waitFor?: string }`\n- Output: `{ screenshotUrl: string }`\n\n#### searchGmailEmails\nSearch for emails in the connected Gmail account using a Gmail search query. To list recent inbox emails, pass an empty query string.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Uses Gmail search syntax (e.g. "from:user@example.com", "subject:invoice", "is:unread").\n- To list recent inbox emails, use an empty query string or "in:inbox".\n- Returns up to 100 emails (default 5). The variable receives text or JSON depending on exportType.\n- The direct execution output always returns structured email objects.\n- Input: `{ query: string, connectionId?: string, exportType: "json" | "text", limit: string }`\n- Output: `{ emails: { id: string, subject: string, from: string, to: string, date: string, plainBody: string, htmlBody: string, labels: string }[] }`\n\n#### searchGoogle\nSearch the web using Google and return structured results.\n- Defaults to us/english, but can optionally specify country and/or language.\n- Defaults to any time, but can optionally specify last hour, last day, week, month, or year.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, languageCode?: string, dateRange?: "hour" | "day" | "week" | "month" | "year" | "any", numResults?: number }`\n- Output: `{ results: { title: string, description: string, url: string }[] }`\n\n#### searchGoogleImages\nSearch Google Images and return image results with URLs and metadata.\n- Defaults to us/english, but can optionally specify country and/or language.\n- Defaults to any time, but can optionally specify last hour, last day, week, month, or year.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, languageCode?: string, dateRange?: "hour" | "day" | "week" | "month" | "year" | "any", numResults?: number }`\n- Output: `{ images: { title: string, imageUrl: string, imageWidth: number, imageHeight: number, thumbnailUrl: string, thumbnailWidth: number, thumbnailHeight: number, source: string, domain: string, link: string, googleUrl: string, position: number }[] }`\n\n#### searchGoogleNews\nSearch Google News for recent news articles matching a query.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ text: string, exportType: "text" | "json", numResults?: number }`\n- Output: `{ articles: { title: string, link: string, date: string, source: { name: string }, snippet?: string }[] }`\n\n#### searchGoogleTrends\nFetch Google Trends data for a search term.\n- date accepts shorthand ("now 1-H", "today 1-m", "today 5-y", etc.) or custom "yyyy-mm-dd yyyy-mm-dd" ranges.\n- data_type controls the shape of returned data: TIMESERIES, GEO_MAP, GEO_MAP_0, RELATED_TOPICS, or RELATED_QUERIES.\n- Input: `{ text: string, hl: string, geo: string, data_type: "TIMESERIES" | "GEO_MAP" | "GEO_MAP_0" | "RELATED_TOPICS" | "RELATED_QUERIES", cat: string, date: string, ts: string }`\n- Output: `{ trends: object }`\n\n#### searchPerplexity\nSearch the web using the Perplexity API and return structured results.\n- Defaults to US results. Use countryCode (ISO code) to filter by country.\n- Returns 10 results by default, configurable from 1 to 20.\n- The variable receives text or JSON depending on exportType. The direct execution output always returns structured results.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, numResults?: number }`\n- Output: `{ results: { title: string, description: string, url: string }[] }`\n\n#### sendEmail\nSend an email to one or more configured recipient addresses.\n- Recipient email addresses are resolved from OAuth connections configured by the app creator. The user running the workflow does not specify the recipient directly.\n- If the body is a URL to a hosted HTML file on the CDN, the HTML is fetched and used as the email body.\n- When generateHtml is enabled, the body text is converted to a styled HTML email using an AI model.\n- connectionId can be a comma-separated list to send to multiple recipients.\n- The special connectionId "trigger_email" uses the email address that triggered the workflow.\n- Input: `{ subject: string, body: string, connectionId?: string, generateHtml?: boolean, generateHtmlInstructions?: string, generateHtmlModelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, attachments?: string[] }`\n- Output: `{ recipients: string[] }`\n\n#### sendGmailDraft\nSend an existing draft from the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- The draft is sent and removed from the Drafts folder.\n- Use the draft ID returned by the Create Gmail Draft or List Gmail Drafts steps.\n- Input: `{ draftId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### sendGmailMessage\nSend an email from the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- messageType controls the body format: "plain" for plain text, "html" for raw HTML, "markdown" for auto-converted markdown.\n- Input: `{ to: string, subject: string, message: string, connectionId?: string, messageType: "plain" | "html" | "markdown" }`\n- Output: `{ messageId: string }`\n\n#### sendSMS\nSend an SMS or MMS message to a phone number configured via OAuth connection.\n- User is responsible for configuring the connection to the number (MindStudio requires double opt-in to prevent spam)\n- If mediaUrls are provided, the message is sent as MMS instead of SMS\n- MMS supports up to 10 media URLs (images, video, audio, PDF) with a 5MB limit per file\n- MMS is only supported on US and Canadian carriers; international numbers will receive SMS only (media silently dropped)\n- Input: `{ body: string, connectionId?: string, mediaUrls?: string[] }`\n- Output: `unknown`\n\n#### setGmailReadStatus\nMark one or more Gmail emails as read or unread.\n- Requires a Google OAuth connection with Gmail modify scope.\n- Accepts one or more message IDs as a comma-separated string or array.\n- Set markAsRead to true to mark as read, false to mark as unread.\n- Input: `{ messageIds: string, markAsRead: boolean, connectionId?: string }`\n- Output: `unknown`\n\n#### setRunTitle\nSet the title of the agent run for the user\'s history\n- Input: `{ title: string }`\n- Output: `unknown`\n\n#### setVariable\nExplicitly set a variable to a given value.\n- Useful for bootstrapping global variables or setting constants.\n- The variable name and value both support variable interpolation.\n- The type field is a UI hint only (controls input widget in the editor).\n- Input: `{ value: string | string[] }`\n- Output: `object`\n\n#### telegramEditMessage\nEdit a previously sent Telegram message. Use with the message ID returned by Send Telegram Message.\n- Only text messages sent by the bot can be edited.\n- The messageId is returned by the Send Telegram Message step.\n- Common pattern: send a "Processing..." message, do work, then edit it with the result.\n- Input: `{ botToken: string, chatId: string, messageId: string, text: string }`\n- Output: `unknown`\n\n#### telegramReplyToMessage\nSend a reply to a specific Telegram message. The reply will be visually threaded in the chat.\n- Use the rawMessage.message_id from the incoming trigger variables to reply to the user\'s message.\n- Especially useful in group chats where replies provide context.\n- Returns the sent message ID, which can be used with Edit Telegram Message.\n- Input: `{ botToken: string, chatId: string, replyToMessageId: string, text: string }`\n- Output: `{ messageId: number }`\n\n#### telegramSendAudio\nSend an audio file to a Telegram chat as music or a voice note via a bot.\n- "audio" mode sends as a standard audio file. "voice" mode sends as a voice message (re-uploads the file for large file support).\n- Input: `{ botToken: string, chatId: string, audioUrl: string, mode: "audio" | "voice", caption?: string }`\n- Output: `unknown`\n\n#### telegramSendFile\nSend a document/file to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, fileUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSendImage\nSend an image to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, imageUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSendMessage\nSend a text message to a Telegram chat via a bot.\n- Messages are sent using MarkdownV2 formatting. Special characters are auto-escaped.\n- botToken format is "botId:token" \u2014 both parts are required.\n- Returns the sent message ID, which can be used with Edit Telegram Message to update the message later.\n- Input: `{ botToken: string, chatId: string, text: string }`\n- Output: `{ messageId: number }`\n\n#### telegramSendVideo\nSend a video to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, videoUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSetTyping\nShow the "typing..." indicator in a Telegram chat via a bot.\n- The typing indicator automatically expires after a few seconds. Use this right before sending a message for a natural feel.\n- Input: `{ botToken: string, chatId: string }`\n- Output: `unknown`\n\n#### textToSpeech\nGenerate an audio file from provided text using a speech model.\n- The text field contains the exact words to be spoken (not instructions).\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ text: string, intermediateAsset?: boolean, speechModelOverride?: { model: string, config?: object } }`\n- Output: `{ audioUrl: string }`\n\n#### transcribeAudio\nConvert an audio file to text using a transcription model.\n- The prompt field provides optional context to improve transcription accuracy (e.g. language, speaker names, domain).\n- Input: `{ audioUrl: string, prompt: string, transcriptionModelOverride?: { model: string, config?: object } }`\n- Output: `{ text: string }`\n\n#### trimMedia\nTrim an audio or video clip\n- Input: `{ inputUrl: string, start?: number | string, duration?: string | number, intermediateAsset?: boolean }`\n- Output: `{ mediaUrl: string }`\n\n#### updateGmailLabels\nAdd or remove labels on Gmail messages, identified by message IDs or a search query.\n- Requires a Google OAuth connection with Gmail modify scope.\n- Provide either a query (Gmail search syntax) or explicit messageIds to target messages.\n- Label IDs can be label names or Gmail label IDs \u2014 names are resolved automatically.\n- Input: `{ query: string, connectionId?: string, messageIds: string, addLabelIds: string, removeLabelIds: string }`\n- Output: `{ updatedMessageIds: string[] }`\n\n#### uploadDataSourceDocument\nUpload a file into an existing data source from a URL or raw text content.\n- If "file" is a single URL, the file is downloaded from that URL and uploaded.\n- If "file" is any other string, a .txt document is created from that content and uploaded.\n- The block waits (polls) for processing to complete before transitioning, up to 5 minutes.\n- Once processing finishes, vectors are loaded into Milvus so the data source is immediately queryable.\n- Supported file types (when using a URL) are the same as the data source upload UI (PDF, DOCX, TXT, etc.).\n- Input: `{ dataSourceId: string, file: string, fileName: string }`\n- Output: `unknown`\n\n#### upscaleImage\nIncrease the resolution of an image using AI upscaling.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, targetResolution: "2k" | "4k" | "8k", engine: "standard" | "pro" }`\n- Output: `{ imageUrl: string }`\n\n#### upscaleVideo\nUpscale a video file\n- Input: `{ videoUrl: string, targetResolution: "720p" | "1080p" | "2K" | "4K", engine: "standard" | "pro" | "ultimate" | "flashvsr" | "seedance" | "seedvr2" | "runwayml/upscale-v1", intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoFaceSwap\nSwap faces in a video file\n- Input: `{ videoUrl: string, faceImageUrl: string, targetIndex: number, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoRemoveBackground\nRemove or replace background from a video\n- Input: `{ videoUrl: string, newBackground: "transparent" | "image", newBackgroundImageUrl?: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoRemoveWatermark\nRemove a watermark from a video\n- Input: `{ videoUrl: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### watermarkImage\nOverlay a watermark image onto another image.\n- The watermark is placed at the specified corner with configurable padding and width.\n- Input: `{ imageUrl: string, watermarkImageUrl: string, corner: "top-left" | "top-right" | "bottom-left" | "bottom-right", paddingPx: number, widthPx: number, intermediateAsset?: boolean }`\n- Output: `{ imageUrl: string }`\n\n#### watermarkVideo\nAdd an image watermark to a video\n- Input: `{ videoUrl: string, imageUrl: string, corner: "top-left" | "top-right" | "bottom-left" | "bottom-right", paddingPx: number, widthPx: number, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n### ActiveCampaign\n\n#### activeCampaignAddNote\nAdd a note to an existing contact in ActiveCampaign.\n- Requires an ActiveCampaign OAuth connection (connectionId).\n- The contact must already exist \u2014 use the contact ID from a previous create or search step.\n- Input: `{ contactId: string, note: string, connectionId?: string }`\n- Output: `unknown`\n\n#### activeCampaignCreateContact\nCreate or sync a contact in ActiveCampaign.\n- Requires an ActiveCampaign OAuth connection (connectionId).\n- If a contact with the email already exists, it may be updated depending on ActiveCampaign settings.\n- Custom fields are passed as a key-value map where keys are field IDs.\n- Input: `{ email: string, firstName: string, lastName: string, phone: string, accountId: string, customFields: object, connectionId?: string }`\n- Output: `{ contactId: string }`\n\n### Airtable\n\n#### airtableCreateUpdateRecord\nCreate a new record or update an existing record in an Airtable table.\n- If recordId is provided, updates that record. Otherwise, creates a new one.\n- When updating with updateMode "onlySpecified", unspecified fields are left as-is. With "all", unspecified fields are cleared.\n- Array fields (e.g. multipleAttachments) accept arrays of values.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId?: string, updateMode?: "onlySpecified" | "all", fields: unknown, recordData: object }`\n- Output: `{ recordId: string }`\n\n#### airtableDeleteRecord\nDelete a record from an Airtable table by its record ID.\n- Requires an active Airtable OAuth connection (connectionId).\n- Silently succeeds if the record does not exist.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId: string }`\n- Output: `{ deleted: boolean }`\n\n#### airtableGetRecord\nFetch a single record from an Airtable table by its record ID.\n- Requires an active Airtable OAuth connection (connectionId).\n- If the record is not found, returns a string message instead of a record object.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId: string }`\n- Output: `{ record: { id: string, createdTime: string, fields: object } | null }`\n\n#### airtableGetTableRecords\nFetch multiple records from an Airtable table with optional pagination.\n- Requires an active Airtable OAuth connection (connectionId).\n- Default limit is 100 records. Maximum is 1000.\n- When outputFormat is \'csv\', the variable receives CSV text. The direct execution output always returns parsed records.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, outputFormat?: "json" | "csv", limit?: number }`\n- Output: `{ records: { id: string, createdTime: string, fields: object }[] }`\n\n### Apollo\n\n#### enrichPerson\nLook up professional information about a person using Apollo.io. Search by ID, name, LinkedIn URL, email, or domain.\n- At least one search parameter must be provided.\n- Returns enriched data from Apollo including contact details, employment info, and social profiles.\n- Input: `{ params: { id: string, name: string, linkedinUrl: string, email: string, domain: string } }`\n- Output: `{ data: unknown }`\n\n#### peopleSearch\nSearch for people matching specific criteria using Apollo.io. Supports natural language queries and advanced filters.\n- Can use a natural language "smartQuery" which is converted to Apollo search parameters by an AI model.\n- Advanced params can override or supplement the smart query results.\n- Optionally enriches returned people and/or their organizations for additional detail.\n- Results are paginated. Use limit and page to control the result window.\n- Input: `{ smartQuery: string, enrichPeople: boolean, enrichOrganizations: boolean, limit: string, page: string, params: { personTitles: string, includeSimilarTitles: string, qKeywords: string, personLocations: string, personSeniorities: string, organizationLocations: string, qOrganizationDomainsList: string, contactEmailStatus: string, organizationNumEmployeesRanges: string, revenueRangeMin: string, revenueRangeMax: string, currentlyUsingAllOfTechnologyUids: string, currentlyUsingAnyOfTechnologyUids: string, currentlyNotUsingAnyOfTechnologyUids: string } }`\n- Output: `{ results: unknown }`\n\n### Coda\n\n#### codaCreateUpdatePage\nCreate a new page or update an existing page in a Coda document.\n- Requires a Coda OAuth connection (connectionId).\n- If pageData.pageId is provided, updates that page. Otherwise, creates a new one.\n- Page content is provided as markdown and converted to Coda\'s canvas format.\n- When updating, insertionMode controls how content is applied (default: \'append\').\n- Input: `{ connectionId?: string, pageData: { docId: string, pageId?: string, name: string, subtitle: string, iconName: string, imageUrl: string, parentPageId?: string, pageContent: string | unknown, contentUpdate?: unknown, insertionMode?: string } }`\n- Output: `{ pageId: string }`\n\n#### codaCreateUpdateRow\nCreate a new row or update an existing row in a Coda table.\n- Requires a Coda OAuth connection (connectionId).\n- If rowId is provided, updates that row. Otherwise, creates a new one.\n- Row data keys are column IDs. Empty values are excluded.\n- Input: `{ connectionId?: string, docId: string, tableId: string, rowId?: string, rowData: object }`\n- Output: `{ rowId: string }`\n\n#### codaFindRow\nSearch for a row in a Coda table by matching column values.\n- Requires a Coda OAuth connection (connectionId).\n- Returns the first row matching all specified column values, or null if no match.\n- Search criteria in rowData are ANDed together.\n- Input: `{ connectionId?: string, docId: string, tableId: string, rowData: object }`\n- Output: `{ row: { id: string, values: object } | null }`\n\n#### codaGetPage\nExport and read the contents of a page from a Coda document.\n- Requires a Coda OAuth connection (connectionId).\n- Page export is asynchronous on Coda\'s side \u2014 there may be a brief delay while it processes.\n- If a page was just created in a prior step, there is an automatic 20-second retry if the first export attempt fails.\n- Input: `{ connectionId?: string, docId: string, pageId: string, outputFormat?: "html" | "markdown" }`\n- Output: `{ content: string }`\n\n#### codaGetTableRows\nFetch rows from a Coda table with optional pagination.\n- Requires a Coda OAuth connection (connectionId).\n- Default limit is 10000 rows. Rows are fetched in pages of 500.\n- When outputFormat is \'csv\', the variable receives CSV text. The direct execution output always returns parsed rows.\n- Input: `{ connectionId?: string, docId: string, tableId: string, limit?: number | string, outputFormat?: "json" | "csv" }`\n- Output: `{ rows: { id: string, values: object }[] }`\n\n### Facebook\n\n#### scrapeFacebookPage\nScrape a Facebook page\n- Input: `{ pageUrl: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeFacebookPosts\nGet all the posts for a Facebook page\n- Input: `{ pageUrl: string }`\n- Output: `{ data: unknown }`\n\n### Gmail\n\n#### deleteGmailEmail\nMove an email to trash in the connected Gmail account (recoverable delete).\n- Requires a Google OAuth connection with Gmail modify scope.\n- Uses trash (recoverable) rather than permanent delete.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### getGmailDraft\nRetrieve a specific draft from Gmail by draft ID.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the draft content including subject, recipients, sender, and body.\n- Input: `{ draftId: string, connectionId?: string }`\n- Output: `{ draftId: string, messageId: string, subject: string, to: string, from: string, body: string }`\n\n#### getGmailEmail\nRetrieve a specific email from Gmail by message ID.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the email subject, sender, recipient, date, body (plain text preferred, falls back to HTML), and labels.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `{ messageId: string, subject: string, from: string, to: string, date: string, body: string, labels: string }`\n\n#### listGmailDrafts\nList drafts in the connected Gmail account.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns up to 50 drafts (default 10).\n- The variable receives text or JSON depending on exportType.\n- Input: `{ connectionId?: string, limit?: string, exportType: "json" | "text" }`\n- Output: `{ drafts: { draftId: string, messageId: string, subject: string, to: string, snippet: string }[] }`\n\n#### replyToGmailEmail\nReply to an existing email in Gmail. The reply is threaded under the original message.\n- Requires a Google OAuth connection with Gmail compose and readonly scopes.\n- The reply is sent to the original sender and threaded under the original message.\n- messageType controls the body format: "plain", "html", or "markdown".\n- Input: `{ messageId: string, message: string, messageType: "plain" | "html" | "markdown", connectionId?: string }`\n- Output: `{ messageId: string }`\n\n### Google\n\n#### createGoogleDoc\nCreate a new Google Document and optionally populate it with content.\n- textType determines how the text field is interpreted: "plain" for plain text, "html" for HTML markup, "markdown" for Markdown.\n- Input: `{ title: string, text: string, connectionId?: string, textType: "plain" | "html" | "markdown" }`\n- Output: `{ documentUrl: string }`\n\n#### createGoogleSheet\nCreate a new Google Spreadsheet and populate it with CSV data.\n- Input: `{ title: string, text: string, connectionId?: string }`\n- Output: `{ spreadsheetUrl: string }`\n\n#### deleteGoogleSheetRows\nDelete a range of rows from a Google Spreadsheet.\n- Requires a Google OAuth connection with Drive scope.\n- startRow and endRow are 1-based row numbers (inclusive).\n- If sheetName is omitted, operates on the first sheet.\n- Input: `{ documentId: string, sheetName?: string, startRow: string, endRow: string, connectionId?: string }`\n- Output: `unknown`\n\n#### fetchGoogleDoc\nFetch the contents of an existing Google Document.\n- exportType controls the output format: "html" for HTML markup, "markdown" for Markdown, "json" for structured JSON, "plain" for plain text.\n- Input: `{ documentId: string, connectionId?: string, exportType: "html" | "markdown" | "json" | "plain" }`\n- Output: `{ content: string }`\n\n#### fetchGoogleSheet\nFetch contents of a Google Spreadsheet range.\n- range uses A1 notation (e.g. "Sheet1!A1:C10"). Omit to fetch the entire first sheet.\n- exportType controls the output format: "csv" for comma-separated values, "json" for structured JSON.\n- Input: `{ spreadsheetId: string, range: string, connectionId?: string, exportType: "csv" | "json" }`\n- Output: `{ content: string }`\n\n#### getGoogleSheetInfo\nGet metadata about a Google Spreadsheet including sheet names, row counts, and column counts.\n- Requires a Google OAuth connection with Drive scope.\n- Returns the spreadsheet title and a list of all sheets with their dimensions.\n- Input: `{ documentId: string, connectionId?: string }`\n- Output: `{ title: string, sheets: { sheetId: number, title: string, rowCount: number, columnCount: number }[] }`\n\n#### updateGoogleDoc\nUpdate the contents of an existing Google Document.\n- operationType controls how content is applied: "addToTop" prepends, "addToBottom" appends, "overwrite" replaces all content.\n- textType determines how the text field is interpreted: "plain" for plain text, "html" for HTML markup, "markdown" for Markdown.\n- Input: `{ documentId: string, connectionId?: string, text: string, textType: "plain" | "html" | "markdown", operationType: "addToTop" | "addToBottom" | "overwrite" }`\n- Output: `{ documentUrl: string }`\n\n#### updateGoogleSheet\nUpdate a Google Spreadsheet with new data.\n- operationType controls how data is written: "addToBottom" appends rows, "overwrite" replaces all data, "range" writes to a specific cell range.\n- Data should be provided as CSV in the text field.\n- Input: `{ text: string, connectionId?: string, spreadsheetId: string, range: string, operationType: "addToBottom" | "overwrite" | "range" }`\n- Output: `{ spreadsheetUrl: string }`\n\n### Google Calendar\n\n#### createGoogleCalendarEvent\nCreate a new event on a Google Calendar.\n- Requires a Google OAuth connection with Calendar events scope.\n- Date/time values must be ISO 8601 format (e.g. "2025-07-02T10:00:00-07:00").\n- Attendees are specified as one email address per line in a single string.\n- Set addMeetLink to true to automatically attach a Google Meet video call.\n- Input: `{ connectionId?: string, summary: string, description?: string, location?: string, startDateTime: string, endDateTime: string, attendees?: string, addMeetLink?: boolean, calendarId?: string }`\n- Output: `{ eventId: string, htmlLink: string }`\n\n#### deleteGoogleCalendarEvent\nRetrieve a specific event from a Google Calendar by event ID.\n- Requires a Google OAuth connection with Calendar events scope.\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns the structured event.\n- Input: `{ connectionId?: string, eventId: string, calendarId?: string }`\n- Output: `unknown`\n\n#### getGoogleCalendarEvent\nRetrieve a specific event from a Google Calendar by event ID.\n- Requires a Google OAuth connection with Calendar events scope.\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns the structured event.\n- Input: `{ connectionId?: string, eventId: string, exportType: "json" | "text", calendarId?: string }`\n- Output: `{ event: { id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null } }`\n\n#### listGoogleCalendarEvents\nList upcoming events from a Google Calendar, ordered by start time.\n- Requires a Google OAuth connection with Calendar events scope.\n- Only returns future events (timeMin = now).\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns structured events.\n- Input: `{ connectionId?: string, limit: number, exportType: "json" | "text", calendarId?: string }`\n- Output: `{ events: ({ id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null })[] }`\n\n#### searchGoogleCalendarEvents\nSearch for events in a Google Calendar by keyword, date range, or both.\n- Requires a Google OAuth connection with Calendar events scope.\n- Supports keyword search via "query" and date filtering via "timeMin"/"timeMax" (ISO 8601 format).\n- Unlike "List Events" which only shows future events, this allows searching past events too.\n- Input: `{ query?: string, timeMin?: string, timeMax?: string, calendarId?: string, limit?: number, exportType: "json" | "text", connectionId?: string }`\n- Output: `{ events: ({ id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null })[] }`\n\n#### updateGoogleCalendarEvent\nUpdate an existing event on a Google Calendar. Only specified fields are changed.\n- Requires a Google OAuth connection with Calendar events scope.\n- Fetches the existing event first, then applies only the provided updates. Omitted fields are left unchanged.\n- Attendees are specified as one email address per line, and replace the entire attendee list.\n- Input: `{ connectionId?: string, eventId: string, summary?: string, description?: string, location?: string, startDateTime?: string, endDateTime?: string, attendees?: string, calendarId?: string }`\n- Output: `{ eventId: string, htmlLink: string }`\n\n### Google Drive\n\n#### getGoogleDriveFile\nDownload a file from Google Drive and rehost it on the CDN. Returns a public CDN URL.\n- Requires a Google OAuth connection with Drive scope.\n- Google-native files (Docs, Sheets, Slides) cannot be downloaded \u2014 use dedicated steps instead.\n- Maximum file size: 200MB.\n- The file is downloaded and re-uploaded to the CDN; the returned URL is publicly accessible.\n- Input: `{ fileId: string, connectionId?: string }`\n- Output: `{ url: string, name: string, mimeType: string, size: number }`\n\n#### listGoogleDriveFiles\nList files in a Google Drive folder.\n- Requires a Google OAuth connection with Drive scope.\n- If folderId is omitted, lists files in the root folder.\n- Returns file metadata including name, type, size, and links.\n- Input: `{ folderId?: string, limit?: number, connectionId?: string, exportType: "json" | "text" }`\n- Output: `{ files: { id: string, name: string, mimeType: string, size: string, webViewLink: string, createdTime: string, modifiedTime: string }[] }`\n\n#### searchGoogleDrive\nSearch for files in Google Drive by keyword.\n- Requires a Google OAuth connection with Drive scope.\n- Searches file content and names using Google Drive\'s fullText search.\n- Input: `{ query: string, limit?: number, connectionId?: string, exportType: "json" | "text" }`\n- Output: `{ files: { id: string, name: string, mimeType: string, size: string, webViewLink: string, createdTime: string, modifiedTime: string }[] }`\n\n### HubSpot\n\n#### hubspotCreateCompany\nCreate a new company or update an existing one in HubSpot. Matches by domain.\n- Requires a HubSpot OAuth connection (connectionId).\n- If a company with the given domain already exists, it is updated. Otherwise, a new one is created.\n- Property values are type-checked against enabledProperties before being sent to HubSpot.\n- Input: `{ connectionId?: string, company: { domain: string, name: string }, enabledProperties: ({ label: string, value: string, type: "string" | "number" | "bool" })[] }`\n- Output: `{ companyId: string }`\n\n#### hubspotCreateContact\nCreate a new contact or update an existing one in HubSpot. Matches by email address.\n- Requires a HubSpot OAuth connection (connectionId).\n- If a contact with the given email already exists, it is updated. Otherwise, a new one is created.\n- If companyDomain is provided, the contact is associated with that company (creating the company if needed).\n- Property values are type-checked against enabledProperties before being sent to HubSpot.\n- Input: `{ connectionId?: string, contact: { email: string, firstname: string, lastname: string }, enabledProperties: ({ label: string, value: string, type: "string" | "number" | "bool" })[], companyDomain: string }`\n- Output: `{ contactId: string }`\n\n#### hubspotGetCompany\nLook up a HubSpot company by domain name or company ID.\n- Requires a HubSpot OAuth connection (connectionId).\n- Returns null if the company is not found.\n- When searching by domain, performs a search query then fetches the full company record.\n- Use additionalProperties to request specific HubSpot properties beyond the defaults.\n- Input: `{ connectionId?: string, searchBy: "domain" | "id", companyDomain: string, companyId: string, additionalProperties: string[] }`\n- Output: `{ company: { id: string, properties: object, createdAt: string, updatedAt: string, archived: boolean } | null }`\n\n#### hubspotGetContact\nLook up a HubSpot contact by email address or contact ID.\n- Requires a HubSpot OAuth connection (connectionId).\n- Returns null if the contact is not found.\n- Use additionalProperties to request specific HubSpot properties beyond the defaults.\n- Input: `{ connectionId?: string, searchBy: "email" | "id", contactEmail: string, contactId: string, additionalProperties: string[] }`\n- Output: `{ contact: { id: string, properties: object, createdAt: string, updatedAt: string, archived: boolean } | null }`\n\n### Hunter.io\n\n#### hunterApiCompanyEnrichment\nLook up company information by domain using Hunter.io.\n- Returns company name, description, location, industry, size, technologies, and more.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns null if the company is not found.\n- Input: `{ domain: string }`\n- Output: `{ data: { name: string, domain: string, description: string | null, country: string | null, state: string | null, city: string | null, industry: string | null, employees_range: string | null, logo_url: string | null, technologies: string[] } | null }`\n\n#### hunterApiDomainSearch\nSearch for email addresses associated with a domain using Hunter.io.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns a list of email addresses found for the domain along with organization info.\n- Input: `{ domain: string }`\n- Output: `{ data: { domain: string, disposable: boolean, webmail: boolean, accept_all: boolean, pattern: string, organization: string, country: string | null, state: string | null, emails: ({ value: string, type: string, confidence: number, first_name: string | null, last_name: string | null, position: string | null, seniority: string | null, department: string | null, linkedin: string | null, twitter: string | null, phone_number: string | null })[], linked_domains: string[] } }`\n\n#### hunterApiEmailFinder\nFind an email address for a specific person at a domain using Hunter.io.\n- Requires a first name, last name, and domain.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns the most likely email address with a confidence score.\n- Input: `{ domain: string, firstName: string, lastName: string }`\n- Output: `{ data: { first_name: string, last_name: string, email: string, score: number, domain: string, accept_all: boolean, position: string | null, twitter: string | null, linkedin_url: string | null, phone_number: string | null, company: string | null, sources: { domain: string, uri: string, extracted_on: string }[] } }`\n\n#### hunterApiEmailVerification\nVerify whether an email address is valid and deliverable using Hunter.io.\n- Checks email format, MX records, SMTP server, and mailbox deliverability.\n- Returns a status ("valid", "invalid", "accept_all", "webmail", "disposable", "unknown") and a score.\n- Input: `{ email: string }`\n- Output: `{ data: { status: string, result: string, score: number, email: string, regexp: boolean, gibberish: boolean, disposable: boolean, webmail: boolean, mx_records: boolean, smtp_server: boolean, smtp_check: boolean, accept_all: boolean, block: boolean, sources: { domain: string, uri: string, extracted_on: string }[] } }`\n\n#### hunterApiPersonEnrichment\nLook up professional information about a person by their email address using Hunter.io.\n- Returns name, job title, social profiles, and company information.\n- If the person is not found, returns an object with an error message instead of throwing.\n- Input: `{ email: string }`\n- Output: `{ data: { first_name: string, last_name: string, email: string, position: string | null, seniority: string | null, department: string | null, linkedin_url: string | null, twitter: string | null, phone_number: string | null, company: { name: string, domain: string, industry: string | null } | null } | { error: string } }`\n\n### Instagram\n\n#### scrapeInstagramComments\nGet all the comments for an Instagram post\n- Input: `{ postUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramMentions\nScrape an Instagram profile\'s mentions\n- Input: `{ profileUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramPosts\nGet all the posts for an Instagram profile\n- Input: `{ profileUrl: string, resultsLimit: string, onlyPostsNewerThan: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramProfile\nScrape an Instagram profile\n- Input: `{ profileUrl: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramReels\nGet all the reels for an Instagram profile\n- Input: `{ profileUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n### LinkedIn\n\n#### postToLinkedIn\nCreate a post on LinkedIn from the connected account.\n- Requires a LinkedIn OAuth connection (connectionId).\n- Supports text posts, image posts, video posts, document posts, and article posts.\n- Attach one media type per post: image, video, document, or article.\n- Documents support PDF, PPT, PPTX, DOC, DOCX (max 100MB, 300 pages). Displays as a slideshow carousel.\n- Articles create a link preview with optional custom title, description, and thumbnail.\n- Visibility controls who can see the post.\n- Input: `{ message: string, visibility: "PUBLIC" | "CONNECTIONS", imageUrl?: string, videoUrl?: string, documentUrl?: string, articleUrl?: string, titleText?: string, descriptionText?: string, connectionId?: string }`\n- Output: `unknown`\n\n### Meta Threads\n\n#### scrapeMetaThreadsProfile\nScrape a Meta Threads profile\n- Input: `{ profileUrl: string }`\n- Output: `{ data: unknown }`\n\n### Notion\n\n#### notionCreatePage\nCreate a new page in Notion as a child of an existing page.\n- Requires a Notion OAuth connection (connectionId).\n- Content is provided as markdown and converted to Notion blocks (headings, paragraphs, lists, code, quotes).\n- The page is created as a child of the specified parent page (pageId).\n- Input: `{ pageId: string, content: string, title: string, connectionId?: string }`\n- Output: `{ pageId: string, pageUrl: string }`\n\n#### notionUpdatePage\nUpdate the content of an existing Notion page.\n- Requires a Notion OAuth connection (connectionId).\n- Content is provided as markdown and converted to Notion blocks.\n- "append" mode adds content to the end of the page. "overwrite" mode deletes all existing blocks first.\n- Input: `{ pageId: string, content: string, mode: "append" | "overwrite", connectionId?: string }`\n- Output: `{ pageId: string, pageUrl: string }`\n\n### X\n\n#### postToX\nCreate a post on X (Twitter) from the connected account.\n- Requires an X OAuth connection (connectionId).\n- Maximum 280 characters of text.\n- Optionally attach up to 4 media items (images, GIFs, or videos) via mediaUrls.\n- Media URLs must be publicly accessible. The service fetches and uploads them to X.\n- Supported formats: JPEG, PNG, GIF, WEBP, MP4. Images up to 5MB, videos up to 512MB.\n- Input: `{ text: string, connectionId?: string, mediaUrls?: string[] }`\n- Output: `unknown`\n\n#### searchXPosts\nSearch recent X (Twitter) posts matching a query.\n- Searches only the past 7 days of posts.\n- Query supports X API v2 search operators (up to 512 characters).\nAvailable search operators in query:\n| Operator | Description |\n| -----------------| -------------------------------------------------|\n| from: | Posts from a specific user (e.g., from:elonmusk) |\n| to: | Posts sent to a specific user (e.g., to:NASA) |\n| @ | Mentions a user (e.g., @openai) |\n| # | Hashtag search (e.g., #AI) |\n| is:retweet | Filters retweets |\n| is:reply | Filters replies |\n| has:media | Posts containing media (images, videos, or GIFs) |\n| has:links | Posts containing URLs |\n| lang: | Filters by language (e.g., lang:en) |\n| - | Excludes specific terms (e.g., -spam) |\n| () | Groups terms or operators (e.g., (AI OR ML)) |\n| AND, OR, NOT | Boolean logic for combining or excluding terms |\nConjunction-Required Operators (must be combined with a standalone operator):\n| Operator | Description |\n| ------------ | -----------------------------------------------|\n| has:media | Posts containing media (images, videos, or GIFs) |\n| has:links | Posts containing URLs |\n| is:retweet | Filters retweets |\n| is:reply | Filters replies |\nFor example, has:media alone is invalid, but #AI has:media is valid.\n- Input: `{ query: string, scope: "recent" | "all", options: { startTime?: string, endTime?: string, maxResults?: number } }`\n- Output: `{ posts: { id: string, authorId: string, dateCreated: string, text: string, stats: { retweets: number, replies: number, likes: number } }[] }`\n\n### YouTube\n\n#### fetchYoutubeCaptions\nRetrieve the captions/transcript for a YouTube video.\n- Supports multiple languages via the language parameter.\n- "text" export produces timestamped plain text; "json" export produces structured transcript data.\n- Input: `{ videoUrl: string, exportType: "text" | "json", language: string }`\n- Output: `{ transcripts: { text: string, start: number }[] }`\n\n#### fetchYoutubeChannel\nRetrieve metadata and recent videos for a YouTube channel.\n- Accepts a YouTube channel URL (e.g. https://www.youtube.com/@ChannelName or /channel/ID).\n- Returns channel info and video listings as a JSON object.\n- Input: `{ channelUrl: string }`\n- Output: `object`\n\n#### fetchYoutubeComments\nRetrieve comments for a YouTube video.\n- Paginates through comments (up to 5 pages).\n- "text" export produces markdown-formatted text; "json" export produces structured comment data.\n- Input: `{ videoUrl: string, exportType: "text" | "json", limitPages: string }`\n- Output: `{ comments: { id: string, link: string, publishedDate: string, text: string, likes: number, replies: number, author: string, authorLink: string, authorImg: string }[] }`\n\n#### fetchYoutubeVideo\nRetrieve metadata for a YouTube video (title, description, stats, channel info).\n- Returns video metadata, channel info, and engagement stats.\n- Video format data is excluded from the response.\n- Input: `{ videoUrl: string }`\n- Output: `object`\n\n#### searchYoutube\nSearch for YouTube videos by keyword.\n- Supports pagination (up to 5 pages) and country/language filters.\n- Use the filter/filterType fields for YouTube search parameter (sp) filters.\n- Input: `{ query: string, limitPages: string, filter: string, filterType: string, countryCode?: string, languageCode?: string }`\n- Output: `{ results: object }`\n\n#### searchYoutubeTrends\nRetrieve trending videos on YouTube by category and region.\n- Categories: "now" (trending now), "music", "gaming", "films".\n- Supports country and language filtering.\n- Input: `{ bp: "now" | "music" | "gaming" | "films", hl: string, gl: string }`\n- Output: `object`\n\n### Helpers\n\n#### `listModels()`\nList all available AI models across all categories.\n\nOutput:\n```typescript\n{\n models: {\n id: string;\n name: string; // Display name\n type: "llm_chat" | "image_generation" | "video_generation" | "video_analysis" | "text_to_speech" | "vision" | "transcription";\n maxTemperature: number;\n maxResponseSize: number;\n inputs: object[]; // Accepted input types\n }[]\n}\n```\n\n#### `listModelsByType(modelType)`\nList AI models filtered by type.\n- `modelType`: `"llm_chat"` | `"image_generation"` | `"video_generation"` | `"video_analysis"` | `"text_to_speech"` | `"vision"` | `"transcription"`\n- Output: same as `listModels()`\n\n#### `listModelsSummary()`\nList all available AI models (summary). Returns only id, name, type, and tags. Suitable for display or consumption inside a model context window.\n\nOutput:\n```typescript\n{\n models: {\n id: string;\n name: string;\n type: "llm_chat" | "image_generation" | "video_generation" | "video_analysis" | "text_to_speech" | "vision" | "transcription";\n tags: string; // Comma-separated tags\n }[]\n}\n```\n\n#### `listModelsSummaryByType(modelType)`\nList AI models (summary) filtered by type.\n- `modelType`: `"llm_chat"` | `"image_generation"` | `"video_generation"` | `"video_analysis"` | `"text_to_speech"` | `"vision"` | `"transcription"`\n- Output: same as `listModelsSummary()`\n\n#### `listConnectors()`\nList available OAuth connector services (Slack, Google, HubSpot, etc.) and their actions. These are third-party integrations \u2014 for most tasks, use actions directly instead.\n\nOutput:\n```typescript\n{\n services: {\n id: string;\n name: string;\n icon: string;\n actions: { id: string; name: string }[];\n }[]\n}\n```\n\n#### `getConnector(serviceId)`\nGet details for a single OAuth connector service by ID.\n\nOutput:\n```typescript\n{\n service: {\n id: string;\n name: string;\n icon: string;\n actions: { id: string; name: string }[];\n }\n}\n```\n\n#### `getConnectorAction(serviceId, actionId)`\nGet the full configuration for an OAuth connector action, including all input fields needed to call it via `runFromConnectorRegistry`. OAuth connectors are sourced from the open-source MindStudio Connector Registry (MSCR) with 850+ actions across third-party services.\n\nOutput:\n```typescript\n{\n action: {\n id: string;\n name: string;\n description: string;\n quickHelp: string;\n configuration: { title: string; items: { label: string; helpText: string; variable: string; type: string; defaultValue: string; placeholder: string; selectOptions?: object }[] }[];\n }\n}\n```\n\n#### `listConnections()`\nList OAuth connections for the organization (authenticated third-party service links). Use the returned connection IDs when calling OAuth connector actions. Connectors require the user to connect to the third-party service in MindStudio before they can be used.\n\nOutput:\n```typescript\n{\n connections: {\n id: string; // Connection ID to pass to connector actions\n provider: string; // Integration provider (e.g. slack, google)\n name: string; // Display name or account identifier\n }[]\n}\n```\n\n#### `estimateStepCost(stepType, step?, options?)`\nEstimate the cost of executing a step before running it. Pass the same step config you would use for execution.\n\n```typescript\nconst estimate = await agent.estimateStepCost(\'generateText\', { message: \'Hello\' });\n```\n\n- `stepType`: string \u2014 The action name (e.g. `"generateText"`).\n- `step`: object \u2014 Optional action input parameters for more accurate estimates.\n- `options`: `{ appId?: string, workflowId?: string }` \u2014 Optional context for pricing.\n\nOutput:\n```typescript\n{\n costType?: string; // "free" when the step has no cost\n estimates?: {\n eventType: string; // Billing event type\n label: string; // Human-readable cost label\n unitPrice: number; // Price per unit in billing units\n unitType: string; // What constitutes a unit (e.g. "token", "request")\n estimatedCost?: number; // Estimated total cost, or null if not estimable\n quantity: number; // Number of billable units\n }[]\n}\n```\n\n#### `changeName(displayName)`\nUpdate the display name of the authenticated agent. Useful for agents to set their own name after connecting.\n\n```typescript\nawait agent.changeName(\'My Agent\');\n```\n\n#### `changeProfilePicture(profilePictureUrl)`\nUpdate the profile picture of the authenticated agent. Useful for agents to set their own avatar after connecting.\n\n```typescript\nawait agent.changeProfilePicture(\'https://example.com/avatar.png\');\n```\n\n#### `uploadFile(content, options)`\nUpload a file to the MindStudio CDN. Gets a signed upload URL, PUTs the file content, and returns the permanent public URL.\n\n```typescript\nimport { readFileSync } from \'fs\';\nconst { url } = await agent.uploadFile(readFileSync(\'photo.png\'), { extension: \'png\', type: \'image/png\' });\n```\n\n- `content`: `Buffer | Uint8Array` \u2014 The file content.\n- `options.extension`: string \u2014 File extension without the dot (e.g. `"png"`, `"jpg"`, `"mp4"`).\n- `options.type`: string (optional) \u2014 MIME type (e.g. `"image/png"`). Determines which CDN subdomain is used.\n\nOutput: `{ url: string }` \u2014 The permanent public CDN URL.\n';
4620
+ }
4621
+ });
4622
+
4623
+ // src/ask/prompt/index.ts
4624
+ async function buildSystemPrompt(agent) {
4625
+ const [modelsResult, connectionsResult, connectorsResult, llmsResult] = await Promise.allSettled([
4626
+ agent.listModelsSummary(),
4627
+ agent.listConnections(),
4628
+ agent.listConnectors(),
4629
+ Promise.resolve().then(() => (init_llms_content(), llms_content_exports))
4630
+ ]);
4631
+ const modelsSummary = modelsResult.status === "fulfilled" ? modelsResult.value.models.map(
4632
+ (m) => `- ${m.id} (${m.name}, type: ${m.type}${m.popularity != null ? ", popularity: " + m.popularity : ""}${m.tags ? ", tags: " + m.tags : ""})`
4633
+ ).join("\n") : "(Could not load models \u2014 use the listModels tool to look them up)";
4634
+ const connections = connectionsResult.status === "fulfilled" && connectionsResult.value.connections.length > 0 ? connectionsResult.value.connections.map((c) => `- ${c.provider}: ${c.name} (id: ${c.id})`).join("\n") : "No OAuth connections configured.";
4635
+ const connectorServices = connectorsResult.status === "fulfilled" ? connectorsResult.value.services.map(
4636
+ (s) => `- ${s.id}: ${s.name} (${s.actions?.length ?? 0} actions)`
4637
+ ).join("\n") : "(Could not load connectors \u2014 use the getConnectorDetails tool)";
4638
+ const llmsContent2 = llmsResult.status === "fulfilled" ? llmsResult.value.llmsContent : "(Could not load action reference \u2014 use getActionDetails tool)";
4639
+ const referenceDocs = buildReferenceDocs({
4640
+ modelsSummary,
4641
+ connections,
4642
+ connectorServices,
4643
+ llmsContent: llmsContent2
4644
+ });
4600
4645
  return `${identity}
4601
4646
 
4602
4647
  ${referenceDocs}
@@ -4604,8 +4649,19 @@ ${referenceDocs}
4604
4649
  ${instructions}`;
4605
4650
  }
4606
4651
  var init_prompt = __esm({
4652
+ "src/ask/prompt/index.ts"() {
4653
+ "use strict";
4654
+ init_identity();
4655
+ init_reference();
4656
+ init_instructions();
4657
+ }
4658
+ });
4659
+
4660
+ // src/ask/prompt.ts
4661
+ var init_prompt2 = __esm({
4607
4662
  "src/ask/prompt.ts"() {
4608
4663
  "use strict";
4664
+ init_prompt();
4609
4665
  }
4610
4666
  });
4611
4667
 
@@ -4748,7 +4804,7 @@ var init_ask = __esm({
4748
4804
  init_client();
4749
4805
  init_sse();
4750
4806
  init_tools();
4751
- init_prompt();
4807
+ init_prompt2();
4752
4808
  DEFAULT_BASE_URL2 = "https://v1.mindstudio-api.com";
4753
4809
  ansi = {
4754
4810
  dim: (s) => `\x1B[2m${s}\x1B[0m`,
@@ -4820,7 +4876,7 @@ async function startMcpServer(options) {
4820
4876
  capabilities: { tools: {} },
4821
4877
  serverInfo: {
4822
4878
  name: "mindstudio-agent",
4823
- version: "0.1.29"
4879
+ version: "0.1.31"
4824
4880
  },
4825
4881
  instructions: 'Welcome to MindStudio \u2014 a platform with 200+ AI models, 850+ third-party integrations, and pre-built agents.\n\nGetting started:\n1. Call `ask` with any question about the SDK \u2014 it knows every action, model, and connector and returns working code with real model IDs and config options. Examples: ask("generate an image with FLUX"), ask("what models support vision?"), ask("how do I send a Slack message?").\n2. Call `changeName` to set your display name \u2014 use your name or whatever your user calls you. This is how you\'ll appear in MindStudio request logs.\n3. If you have a profile picture or icon, call `uploadFile` to upload it, then `changeProfilePicture` with the returned URL.\n4. For manual browsing, call `listActions` to discover all available actions.\n\nThen use the tools to generate text, images, video, audio, search the web, work with data sources, run agents, and more.\n\nImportant:\n- AI-powered actions (text generation, image generation, video, audio, etc.) cost money. Before running these, call `estimateActionCost` and confirm with the user before proceeding \u2014 unless they\'ve explicitly told you to go ahead.\n- Not all agents from `listAgents` are configured for API use. Do not try to run an agent just because it appears in the list \u2014 it will likely fail. Only run agents the user specifically asks you to run.'
4826
4882
  });
@@ -5776,7 +5832,7 @@ function isNewerVersion(current, latest) {
5776
5832
  return false;
5777
5833
  }
5778
5834
  async function checkForUpdate() {
5779
- const currentVersion = "0.1.29";
5835
+ const currentVersion = "0.1.31";
5780
5836
  if (!currentVersion) return null;
5781
5837
  try {
5782
5838
  const { loadConfig: loadConfig2, saveConfig: saveConfig2 } = await Promise.resolve().then(() => (init_config(), config_exports));
@@ -5805,7 +5861,7 @@ async function checkForUpdate() {
5805
5861
  }
5806
5862
  }
5807
5863
  function printUpdateNotice(latestVersion) {
5808
- const currentVersion = "0.1.29";
5864
+ const currentVersion = "0.1.31";
5809
5865
  process.stderr.write(
5810
5866
  `
5811
5867
  ${ansi2.cyanBright("Update available")} ${ansi2.gray(currentVersion + " \u2192")} ${ansi2.cyanBold(latestVersion)}
@@ -5818,7 +5874,7 @@ function isStandaloneBinary() {
5818
5874
  return !argv1.includes("node_modules");
5819
5875
  }
5820
5876
  async function cmdUpdate() {
5821
- const currentVersion = "0.1.29";
5877
+ const currentVersion = "0.1.31";
5822
5878
  process.stderr.write(
5823
5879
  ` ${ansi2.gray("Current version:")} ${currentVersion}
5824
5880
  `
@@ -5953,7 +6009,7 @@ async function cmdLogin(options) {
5953
6009
  process.stderr.write("\n");
5954
6010
  printLogo();
5955
6011
  process.stderr.write("\n");
5956
- const ver = "0.1.29";
6012
+ const ver = "0.1.31";
5957
6013
  process.stderr.write(
5958
6014
  ` ${ansi2.bold("MindStudio Agent")} ${ver ? " " + ansi2.gray("v" + ver) : ""}
5959
6015
  `
@@ -6280,7 +6336,7 @@ async function main() {
6280
6336
  try {
6281
6337
  if (command === "version" || command === "-v") {
6282
6338
  process.stdout.write(
6283
- "0.1.29\n"
6339
+ "0.1.31\n"
6284
6340
  );
6285
6341
  return;
6286
6342
  }