@mindstudio-ai/agent 0.1.15 → 0.1.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -398,7 +398,15 @@ var init_metadata = __esm({
398
398
  usageNotes: `- Source "user" sends the message to an LLM and returns the model's response.
399
399
  - Source "system" echoes the message content directly (no AI call).
400
400
  - Mode "background" saves the result to a variable. Mode "foreground" streams it to the user (not available in direct execution).
401
- - Structured output (JSON/CSV) can be enforced via structuredOutputType and structuredOutputExample.`,
401
+ - Structured output (JSON/CSV) can be enforced via structuredOutputType and structuredOutputExample.
402
+ - When executed inside a v2 app method (managed sandbox or local dev tunnel),
403
+ LLM token output can be streamed to the frontend in real time via an SSE
404
+ side-channel. The frontend opts in by passing { stream: true } to the method
405
+ invocation via @mindstudio-ai/interface. Tokens are published to Redis
406
+ pub/sub as they arrive and forwarded as SSE events on the invoke response.
407
+ The method code itself is unchanged \u2014 streaming is transparent to the
408
+ developer. See V2ExecutionService.ts and the invoke handler in V2Apps for
409
+ the server-side plumbing.`,
402
410
  inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The message to send (prompt for AI, or text for system echo)" }, "source": { "enum": ["user", "system"], "type": "string", "description": 'Message source: "user" sends to AI model, "system" echoes message content directly. Defaults to "user"' }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model configuration override. Optional; uses the workflow's default model if not specified" }, "structuredOutputType": { "enum": ["text", "json", "csv"], "type": "string", "description": "Output format constraint for structured responses" }, "structuredOutputExample": { "type": "string", "description": "Sample showing the desired output shape (for JSON/CSV formats). A TypeScript interface is also useful here for more complex types." }, "chatHistoryMode": { "enum": ["include", "exclude"], "type": "string", "description": "Whether to include or exclude prior chat history in the AI context" } }, "required": ["message"], "description": "Configuration for the user message step" },
403
411
  outputSchema: { "type": "object", "properties": { "content": { "type": "string", "description": "The AI model's response or echoed system message content" } }, "required": ["content"] }
404
412
  },
@@ -1114,7 +1122,15 @@ var init_metadata = __esm({
1114
1122
  usageNotes: `- Source "user" sends the message to an LLM and returns the model's response.
1115
1123
  - Source "system" echoes the message content directly (no AI call).
1116
1124
  - Mode "background" saves the result to a variable. Mode "foreground" streams it to the user (not available in direct execution).
1117
- - Structured output (JSON/CSV) can be enforced via structuredOutputType and structuredOutputExample.`,
1125
+ - Structured output (JSON/CSV) can be enforced via structuredOutputType and structuredOutputExample.
1126
+ - When executed inside a v2 app method (managed sandbox or local dev tunnel),
1127
+ LLM token output can be streamed to the frontend in real time via an SSE
1128
+ side-channel. The frontend opts in by passing { stream: true } to the method
1129
+ invocation via @mindstudio-ai/interface. Tokens are published to Redis
1130
+ pub/sub as they arrive and forwarded as SSE events on the invoke response.
1131
+ The method code itself is unchanged \u2014 streaming is transparent to the
1132
+ developer. See V2ExecutionService.ts and the invoke handler in V2Apps for
1133
+ the server-side plumbing.`,
1118
1134
  inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The message to send (prompt for AI, or text for system echo)" }, "source": { "enum": ["user", "system"], "type": "string", "description": 'Message source: "user" sends to AI model, "system" echoes message content directly. Defaults to "user"' }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model configuration override. Optional; uses the workflow's default model if not specified" }, "structuredOutputType": { "enum": ["text", "json", "csv"], "type": "string", "description": "Output format constraint for structured responses" }, "structuredOutputExample": { "type": "string", "description": "Sample showing the desired output shape (for JSON/CSV formats). A TypeScript interface is also useful here for more complex types." }, "chatHistoryMode": { "enum": ["include", "exclude"], "type": "string", "description": "Whether to include or exclude prior chat history in the AI context" } }, "required": ["message"], "description": "Configuration for the user message step" },
1119
1135
  outputSchema: { "type": "object", "properties": { "content": { "type": "string", "description": "The AI model's response or echoed system message content" } }, "required": ["content"] }
1120
1136
  },
@@ -3469,6 +3485,46 @@ var init_client = __esm({
3469
3485
  return data;
3470
3486
  }
3471
3487
  // -------------------------------------------------------------------------
3488
+ // Streaming
3489
+ // -------------------------------------------------------------------------
3490
+ /**
3491
+ * Send a stream chunk to the caller via SSE.
3492
+ *
3493
+ * When invoked from a method that was called with `stream: true`, chunks
3494
+ * are delivered in real-time as Server-Sent Events. When there is no active
3495
+ * stream (no `STREAM_ID`), calls are silently ignored — so it's safe to
3496
+ * call unconditionally.
3497
+ *
3498
+ * Accepts strings (sent as `type: 'token'`) or structured data (sent as
3499
+ * `type: 'data'`). The caller receives each chunk as an SSE event.
3500
+ *
3501
+ * @example
3502
+ * ```ts
3503
+ * // Stream text tokens
3504
+ * await agent.stream('Processing item 1...');
3505
+ *
3506
+ * // Stream structured data
3507
+ * await agent.stream({ progress: 50, currentItem: 'abc' });
3508
+ * ```
3509
+ */
3510
+ stream = async (data) => {
3511
+ if (!this._streamId) return;
3512
+ const url = `${this._httpConfig.baseUrl}/_internal/v2/stream-chunk`;
3513
+ const body = typeof data === "string" ? { streamId: this._streamId, type: "token", text: data } : { streamId: this._streamId, type: "data", data };
3514
+ const res = await fetch(url, {
3515
+ method: "POST",
3516
+ headers: {
3517
+ "Content-Type": "application/json",
3518
+ Authorization: this._httpConfig.token
3519
+ },
3520
+ body: JSON.stringify(body)
3521
+ });
3522
+ if (!res.ok) {
3523
+ const text = await res.text().catch(() => "");
3524
+ console.warn(`[mindstudio] stream chunk failed: ${res.status} ${text}`);
3525
+ }
3526
+ };
3527
+ // -------------------------------------------------------------------------
3472
3528
  // db + auth namespaces
3473
3529
  // -------------------------------------------------------------------------
3474
3530
  /**
@@ -3836,6 +3892,443 @@ var init_client = __esm({
3836
3892
  }
3837
3893
  });
3838
3894
 
3895
+ // src/ask/sse.ts
3896
+ async function* streamChat(params) {
3897
+ const { baseUrl, apiKey, ...body } = params;
3898
+ const url = `${baseUrl}/_internal/v2/agent/sdk/chat`;
3899
+ let res;
3900
+ try {
3901
+ res = await fetch(url, {
3902
+ method: "POST",
3903
+ headers: {
3904
+ "Content-Type": "application/json",
3905
+ Authorization: `Bearer ${apiKey}`
3906
+ },
3907
+ body: JSON.stringify(body)
3908
+ });
3909
+ } catch (err) {
3910
+ yield { type: "error", error: `Network error: ${err.message}` };
3911
+ return;
3912
+ }
3913
+ if (!res.ok) {
3914
+ let errorMessage = `HTTP ${res.status}`;
3915
+ try {
3916
+ const body2 = await res.json();
3917
+ if (body2.error) errorMessage = body2.error;
3918
+ if (body2.errorMessage) errorMessage = body2.errorMessage;
3919
+ } catch {
3920
+ }
3921
+ yield { type: "error", error: errorMessage };
3922
+ return;
3923
+ }
3924
+ const reader = res.body.getReader();
3925
+ const decoder = new TextDecoder();
3926
+ let buffer = "";
3927
+ while (true) {
3928
+ const { done, value } = await reader.read();
3929
+ if (done) break;
3930
+ buffer += decoder.decode(value, { stream: true });
3931
+ const lines = buffer.split("\n");
3932
+ buffer = lines.pop() ?? "";
3933
+ for (const line of lines) {
3934
+ if (!line.startsWith("data: ")) continue;
3935
+ try {
3936
+ const event = JSON.parse(line.slice(6));
3937
+ if (event.type === "text" || event.type === "tool_use" || event.type === "done" || event.type === "error") {
3938
+ yield event;
3939
+ }
3940
+ } catch {
3941
+ }
3942
+ }
3943
+ }
3944
+ if (buffer.startsWith("data: ")) {
3945
+ try {
3946
+ const event = JSON.parse(buffer.slice(6));
3947
+ if (event.type === "text" || event.type === "tool_use" || event.type === "done" || event.type === "error") {
3948
+ yield event;
3949
+ }
3950
+ } catch {
3951
+ }
3952
+ }
3953
+ }
3954
+ var init_sse = __esm({
3955
+ "src/ask/sse.ts"() {
3956
+ "use strict";
3957
+ }
3958
+ });
3959
+
3960
+ // src/ask/tools.ts
3961
+ function kebabToCamel(s) {
3962
+ return s.replace(/-([a-z])/g, (_, c) => c.toUpperCase());
3963
+ }
3964
+ async function toolGetActionDetails(input) {
3965
+ const { stepMetadata: stepMetadata2 } = await Promise.resolve().then(() => (init_metadata(), metadata_exports));
3966
+ const raw = input.actionName;
3967
+ const camel = raw.includes("-") ? kebabToCamel(raw) : raw;
3968
+ const meta = stepMetadata2[camel] ?? stepMetadata2[raw];
3969
+ if (!meta) {
3970
+ const keys = Object.keys(stepMetadata2);
3971
+ const lower = camel.toLowerCase();
3972
+ const match = keys.find((k) => k.toLowerCase() === lower);
3973
+ if (match) {
3974
+ return JSON.stringify(stepMetadata2[match], null, 2);
3975
+ }
3976
+ return JSON.stringify({
3977
+ error: `Unknown action: ${raw}. Available actions: ${keys.slice(0, 20).join(", ")}...`
3978
+ });
3979
+ }
3980
+ return JSON.stringify(meta, null, 2);
3981
+ }
3982
+ async function toolListModels(agent, input) {
3983
+ const type = input.type;
3984
+ const details = input.details;
3985
+ if (details) {
3986
+ const result2 = type ? await agent.listModelsByType(type) : await agent.listModels();
3987
+ return JSON.stringify(result2, null, 2);
3988
+ }
3989
+ const result = type ? await agent.listModelsSummaryByType(type) : await agent.listModelsSummary();
3990
+ return JSON.stringify(result, null, 2);
3991
+ }
3992
+ async function toolGetConnectorDetails(agent, input) {
3993
+ const serviceId = input.serviceId;
3994
+ const actionId = input.actionId;
3995
+ if (actionId) {
3996
+ const result2 = await agent.getConnectorAction(serviceId, actionId);
3997
+ return JSON.stringify(result2, null, 2);
3998
+ }
3999
+ const result = await agent.getConnector(serviceId);
4000
+ return JSON.stringify(result, null, 2);
4001
+ }
4002
+ async function executeTool(agent, name, input) {
4003
+ try {
4004
+ let result;
4005
+ switch (name) {
4006
+ case "getActionDetails":
4007
+ result = await toolGetActionDetails(input);
4008
+ break;
4009
+ case "listModels":
4010
+ result = await toolListModels(agent, input);
4011
+ break;
4012
+ case "getConnectorDetails":
4013
+ result = await toolGetConnectorDetails(agent, input);
4014
+ break;
4015
+ default:
4016
+ result = JSON.stringify({ error: `Unknown tool: ${name}` });
4017
+ }
4018
+ return { result, isError: result.includes('"error"') };
4019
+ } catch (err) {
4020
+ return { result: `Error: ${err.message}`, isError: true };
4021
+ }
4022
+ }
4023
+ var ASK_TOOLS;
4024
+ var init_tools = __esm({
4025
+ "src/ask/tools.ts"() {
4026
+ "use strict";
4027
+ ASK_TOOLS = [
4028
+ {
4029
+ name: "getActionDetails",
4030
+ description: "Get the full definition for a specific SDK action \u2014 JSON schema for inputs and outputs, usage notes, and description. Use this when you need exact parameter types, enum values, or optional fields to build correct code.",
4031
+ inputSchema: {
4032
+ type: "object",
4033
+ properties: {
4034
+ actionName: {
4035
+ type: "string",
4036
+ description: 'The action name in camelCase or kebab-case (e.g. "generateImage" or "generate-image")'
4037
+ }
4038
+ },
4039
+ required: ["actionName"]
4040
+ }
4041
+ },
4042
+ {
4043
+ name: "listModels",
4044
+ description: "List available AI models, optionally filtered by type. By default returns a compact summary (id, name, type, tags). With details=true, returns full model objects including the `inputs` array that defines config options (width, height, seed, etc.) \u2014 use this when you need to check model capabilities or build code with config options. You can filter the full list yourself instead of calling this multiple times.",
4045
+ inputSchema: {
4046
+ type: "object",
4047
+ properties: {
4048
+ type: {
4049
+ type: "string",
4050
+ description: 'Filter by model type: "llm_chat", "image_generation", "video_generation", "video_analysis", "text_to_speech", "vision", "transcription"'
4051
+ },
4052
+ details: {
4053
+ type: "boolean",
4054
+ description: "If true, returns full model objects with inputs/config arrays. Use this when you need to check supported inputs, config options, or model capabilities."
4055
+ }
4056
+ }
4057
+ }
4058
+ },
4059
+ {
4060
+ name: "getConnectorDetails",
4061
+ description: "Drill into an OAuth connector service. With just serviceId, returns the list of available actions. With serviceId + actionId, returns the full action config including all input fields needed to call it via runFromConnectorRegistry.",
4062
+ inputSchema: {
4063
+ type: "object",
4064
+ properties: {
4065
+ serviceId: {
4066
+ type: "string",
4067
+ description: 'The connector service ID (e.g. "hubspot", "slack", "google-drive")'
4068
+ },
4069
+ actionId: {
4070
+ type: "string",
4071
+ description: "Optional action ID within the service. If provided, returns full action config with input fields."
4072
+ }
4073
+ },
4074
+ required: ["serviceId"]
4075
+ }
4076
+ }
4077
+ ];
4078
+ }
4079
+ });
4080
+
4081
+ // src/generated/llms-content.ts
4082
+ var llms_content_exports = {};
4083
+ __export(llms_content_exports, {
4084
+ llmsContent: () => llmsContent
4085
+ });
4086
+ var llmsContent;
4087
+ var init_llms_content = __esm({
4088
+ "src/generated/llms-content.ts"() {
4089
+ "use strict";
4090
+ llmsContent = '# @mindstudio-ai/agent\n\nTypeScript SDK, CLI, and MCP server for MindStudio. One API key gives you access to 200+ AI models (OpenAI, Anthropic, Google, Meta, xAI, DeepSeek, etc.) and 1,000+ actions including 850+ connector actions across third-party services from the open-source MindStudio Connector Registry (https://github.com/mindstudio-ai/mscr). No separate provider API keys required.\n\nThis file is the complete API reference. No other documentation is needed to use the SDK.\n\n## Recommended workflow\n\nThere are 150+ actions available. Do NOT try to read or load them all at once. Follow this discovery flow:\n\n1. **Identify yourself** \u2014 Call `changeName` to set your display name (use your name or whatever your user calls you). If you have a profile picture or icon, call `uploadFile` to upload it, then `changeProfilePicture` with the returned URL. This helps users identify your requests in their logs.\n2. **Ask** \u2014 Use `mindstudio ask "your question"` (CLI) or the `ask` MCP tool for SDK guidance. It knows every action, model, and connector and returns working TypeScript code with real model IDs and config options. Examples: `mindstudio ask "generate an image with FLUX"`, `mindstudio ask "what models support vision?"`, `mindstudio ask "how do I send a Slack message?"`.\n3. **Browse** \u2014 For manual discovery, call `listActions` (MCP tool) or `mindstudio list-actions --summary` (CLI) to get a compact `{ action: description }` map of everything available (~3k tokens). Call `mindstudio info <action>` (CLI) for parameter details.\n4. **Call it** \u2014 Invoke the action with the required parameters. All actions share the same calling convention (see below).\n\nFor specific use cases:\n\n- **OAuth third-party integrations** (Slack, Google, HubSpot, etc.): These are optional OAuth connectors from the MindStudio Connector Registry \u2014 for most tasks, use actions directly instead. If you need a third-party integration: call `listConnectors()` to browse services \u2192 `getConnectorAction(serviceId, actionId)` for input fields \u2192 execute via `runFromConnectorRegistry`. Requires an OAuth connection set up in MindStudio first \u2014 call `listConnections()` to check available connections.\n- **Pre-built agents**: Call `listAgents()` to see what\'s available \u2192 `runAgent({ appId })` to execute one. **Important:** Not all agents are configured for API use. Do not try to run an agent just because it appears in the list \u2014 only run agents the user specifically asks you to run.\n- **Model selection**: Call `listModelsSummary()` or `listModelsSummaryByType("llm_chat")` to browse models, then pass the model ID as `modelOverride.model` to actions like `generateText`. Use the summary endpoints (not `listModels`) to keep token usage low.\n- **Cost estimation**: AI-powered actions (text generation, image generation, video, audio, etc.) cost money. Call `estimateStepCost(stepType, stepInput)` before running these and confirm with the user before proceeding \u2014 unless they\'ve explicitly given permission to go ahead. Non-AI actions (data lookups, OAuth connectors, etc.) are generally free.\n\n## Install\n\nStandalone binary (CLI/MCP, no dependencies):\n```bash\ncurl -fsSL https://msagent.ai/install.sh | bash\n```\n\nnpm (SDK + CLI):\n```bash\nnpm install @mindstudio-ai/agent\n```\n\nRequires Node.js >= 18.\n\n## CLI\n\nThe package includes a CLI for executing steps from the command line or scripts:\n\n```bash\n# Execute with named flags (kebab-case)\nmindstudio generate-image --prompt "A mountain landscape"\n\n# Execute with JSON input (JSON5-tolerant)\nmindstudio generate-image \'{prompt: "A mountain landscape"}\'\n\n# Extract a single output field\nmindstudio generate-image --prompt "A sunset" --output-key imageUrl\n\n# List all methods (compact JSON \u2014 best for LLM discovery)\nmindstudio list --summary\n\n# List all methods (human-readable table)\nmindstudio list\n\n# Show method details (params, types, output)\nmindstudio info generate-image\n\n# Run via npx without installing\nnpx @mindstudio-ai/agent generate-text --message "Hello"\n```\n\nAuth: run `mindstudio login`, set `MINDSTUDIO_API_KEY` env var, or pass `--api-key <key>`.\nMethod names are kebab-case on the CLI (camelCase also accepted). Flags are kebab-case (`--video-url` for `videoUrl`).\nUse `--output-key <key>` to extract a single field, `--no-meta` to strip $-prefixed metadata.\n\n### Authentication\n\n```bash\n# Interactive login (opens browser, saves key to ~/.mindstudio/config.json)\nmindstudio login\n\n# Check current auth status\nmindstudio whoami\n\n# Clear stored credentials\nmindstudio logout\n```\n\nAuth resolution order: `--api-key` flag > `MINDSTUDIO_API_KEY` env > `~/.mindstudio/config.json` > `CALLBACK_TOKEN` env.\n\n## MCP server\n\nThe package includes an MCP server exposing all methods as tools. Start by calling the `listSteps` tool to discover available methods.\n\n```bash\nmindstudio mcp\n```\n\nMCP client config (standalone binary \u2014 recommended):\n```json\n{\n "mcpServers": {\n "mindstudio": {\n "command": "mindstudio",\n "args": ["mcp"],\n "env": { "MINDSTUDIO_API_KEY": "your-api-key" }\n }\n }\n}\n```\n\n## Setup\n\n```typescript\nimport { MindStudioAgent } from \'@mindstudio-ai/agent\';\n\n// With API key (or set MINDSTUDIO_API_KEY env var)\nconst agent = new MindStudioAgent({ apiKey: \'your-key\' });\n```\n\nYour MindStudio API key authenticates all requests. MindStudio routes to the correct AI provider (OpenAI, Google, Anthropic, etc.) server-side \u2014 you do NOT need separate provider API keys.\n\nConstructor options:\n```typescript\nnew MindStudioAgent({\n apiKey?: string, // Auth token. Falls back to MINDSTUDIO_API_KEY env var.\n baseUrl?: string, // API base URL. Defaults to "https://v1.mindstudio-api.com".\n maxRetries?: number, // Retries on 429 rate limit (default: 3). Uses Retry-After header for delay.\n})\n```\n\n## Models\n\nDirect access to 200+ AI models from every major provider \u2014 all through a single API key, billed at cost with no markups.\n\nUse `listModels()` or `listModelsByType()` for full model details, or `listModelsSummary()` / `listModelsSummaryByType()` for a lightweight list (id, name, type, tags) suitable for LLM context windows. Pass a model ID to `modelOverride.model` in methods like `generateText` to select a specific model:\n\n```typescript\nconst { models } = await agent.listModelsByType(\'llm_chat\');\nconst model = models.find(m => m.name.includes("Gemini"));\n\nconst { content } = await agent.generateText({\n message: \'Hello\',\n modelOverride: {\n model: model.id,\n temperature: 0.7,\n maxResponseTokens: 1024,\n },\n});\n```\n\n## Calling convention\n\nEvery method has the signature:\n```typescript\nagent.methodName(input: InputType, options?: { appId?: string, threadId?: string }): Promise<OutputType & StepExecutionMeta>\n```\n\nThe first argument is the step-specific input object. The optional second argument controls thread/app context.\n\n**Results are returned flat** \u2014 output fields are spread at the top level alongside metadata:\n\n```typescript\nconst { content } = await agent.generateText({ message: \'Hello\' });\n\n// Full result shape for any method:\nconst result = await agent.generateText({ message: `Hello` });\nresult.content; // step-specific output field\nresult.$appId; // string \u2014 app ID for this execution\nresult.$threadId; // string \u2014 thread ID for this execution\nresult.$rateLimitRemaining; // number | undefined \u2014 API calls remaining in rate limit window\nresult.$billingCost; // number | undefined \u2014 cost in credits for this call\nresult.$billingEvents; // object[] | undefined \u2014 itemized billing events\n```\n\n## Thread persistence\n\nPass `$appId`/`$threadId` from a previous result to maintain conversation state, variable state, or other context across calls:\n\n```typescript\nconst r1 = await agent.generateText({ message: \'My name is Alice\' });\nconst r2 = await agent.generateText(\n { message: \'What is my name?\' },\n { threadId: r1.$threadId, appId: r1.$appId },\n);\n// r2.content => "Your name is Alice"\n```\n\n## Error handling\n\nAll errors throw `MindStudioError`:\n```typescript\nimport { MindStudioError } from \'@mindstudio-ai/agent\';\n\ntry {\n await agent.generateImage({ prompt: \'...\' });\n} catch (err) {\n if (err instanceof MindStudioError) {\n err.message; // Human-readable error message\n err.code; // Machine-readable code: "invalid_step_config", "api_error", "call_cap_exceeded", "output_fetch_error"\n err.status; // HTTP status code (400, 401, 429, etc.)\n err.details; // Raw error body from the API\n }\n}\n```\n\n429 rate limit errors are retried automatically (configurable via `maxRetries`).\n\n## Low-level access\n\nFor action types not covered by generated methods:\n```typescript\nconst result = await agent.executeStep(\'stepType\', { ...params });\n```\n\n## Batch execution\n\nExecute multiple steps in parallel in a single request. Maximum 50 steps per batch.\nIndividual step failures do not affect other steps \u2014 partial success is possible.\n\n```typescript\nconst result = await agent.executeStepBatch([\n { stepType: \'generateImage\', step: { prompt: \'a sunset\' } },\n { stepType: \'textToSpeech\', step: { text: \'hello world\' } },\n], { appId?, threadId? });\n\n// Result:\nresult.results; // BatchStepResult[] \u2014 same order as input\nresult.results[0].stepType; // string\nresult.results[0].output; // object | undefined (step output on success)\nresult.results[0].error; // string | undefined (error message on failure)\nresult.results[0].billingCost; // number | undefined (cost on success)\nresult.totalBillingCost; // number | undefined\nresult.appId; // string\nresult.threadId; // string\n```\n\nCLI:\n```bash\nmindstudio batch \'[{"stepType":"generateImage","step":{"prompt":"a cat"}}]\'\ncat steps.json | mindstudio batch\n```\n\n## Methods\n\nAll methods below are called on a `MindStudioAgent` instance (`agent.methodName(...)`).\nInput shows the first argument object. Output shows the fields available on the returned result.\n\n### General\n\n#### addSubtitlesToVideo\nAutomatically add subtitles to a video\n- Can control style of text and animation\n- Input: `{ videoUrl: string, language: string, fontName: string, fontSize: number, fontWeight: "normal" | "bold" | "black", fontColor: "white" | "black" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", highlightColor: "white" | "black" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", strokeWidth: number, strokeColor: "black" | "white" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta", backgroundColor: "black" | "white" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta" | "none", backgroundOpacity: number, position: "top" | "center" | "bottom", yOffset: number, wordsPerSubtitle: number, enableAnimation: boolean, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### analyzeImage\nAnalyze an image using a vision model based on a text prompt.\n- Uses the configured vision model to generate a text analysis of the image.\n- The prompt should describe what to look for or extract from the image.\n- Input: `{ prompt: string, imageUrl: string, visionModelOverride?: { model: string, config?: object } | { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ analysis: string }`\n\n#### analyzeVideo\nAnalyze a video using a video analysis model based on a text prompt.\n- Uses the configured video analysis model to generate a text analysis of the video.\n- The prompt should describe what to look for or extract from the video.\n- Input: `{ prompt: string, videoUrl: string, videoAnalysisModelOverride?: { model: string, config?: object } | { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ analysis: string }`\n\n#### captureThumbnail\nCapture a thumbnail from a video at a specified timestamp\n- Input: `{ videoUrl: string, at: number | string }`\n- Output: `{ thumbnailUrl: string }`\n\n#### checkAppRole\nCheck whether the current user has a specific app role and branch accordingly.\n- Checks if the current user has been assigned a specific role in this app.\n- If the user has the role, transitions to the "has role" path.\n- If the user does not have the role, transitions to the "no role" path, or errors if no path is configured.\n- Role names are defined by the app creator and assigned to users via the app roles system.\n- The roleName field supports {{variables}} for dynamic role checks.\n- Input: `{ roleName: string, hasRoleStepId?: string, hasRoleWorkflowId?: string, noRoleStepId?: string, noRoleWorkflowId?: string }`\n- Output: `{ hasRole: boolean, userRoles: string[] }`\n\n#### convertPdfToImages\nConvert each page of a PDF document into a PNG image.\n- Each page is converted to a separate PNG and re-hosted on the CDN.\n- Returns an array of image URLs, one per page.\n- Input: `{ pdfUrl: string }`\n- Output: `{ imageUrls: string[] }`\n\n#### createDataSource\nCreate a new empty vector data source for the current app.\n- Creates a new data source (vector database) associated with the current app version.\n- The data source is created empty \u2014 use the "Upload Data Source Document" block to add documents.\n- Returns the new data source ID which can be used in subsequent blocks.\n- Input: `{ name: string }`\n- Output: `unknown`\n\n#### createGmailDraft\nCreate a draft email in the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- The draft appears in the user\'s Gmail Drafts folder but is not sent.\n- messageType controls the body format: "plain" for plain text, "html" for raw HTML, "markdown" for auto-converted markdown.\n- Input: `{ to: string, subject: string, message: string, connectionId?: string, messageType: "plain" | "html" | "markdown" }`\n- Output: `{ draftId: string }`\n\n#### deleteDataSource\nDelete a vector data source from the current app.\n- Soft-deletes a data source (vector database) by marking it as deleted.\n- The Milvus partition is cleaned up asynchronously by a background cron job.\n- The data source must belong to the current app version.\n- Input: `{ dataSourceId: string }`\n- Output: `unknown`\n\n#### deleteDataSourceDocument\nDelete a single document from a data source.\n- Soft-deletes a document by marking it as deleted.\n- Requires both the data source ID and document ID.\n- After deletion, reloads vectors into Milvus so the data source reflects the change immediately.\n- Input: `{ dataSourceId: string, documentId: string }`\n- Output: `unknown`\n\n#### detectChanges\nDetect changes between runs by comparing current input against previously stored state. Routes execution based on whether a change occurred.\n- Persists state across runs using a global variable keyed to the step ID.\n- Two modes: "comparison" (default) uses strict string inequality; "ai" uses an LLM to determine if a meaningful change occurred.\n- First run always treats the value as "changed" since there is no previous state.\n- Each mode supports transitions to different steps/workflows for the "changed" and "unchanged" paths.\n- AI mode bills normally for the LLM call.\n- Input: `{ mode: "ai" | "comparison", input: string, prompt?: string, modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, previousValueVariable?: string, changedStepId?: string, changedWorkflowId?: string, unchangedStepId?: string, unchangedWorkflowId?: string }`\n- Output: `{ hasChanged: boolean, currentValue: string, previousValue: string, isFirstRun: boolean }`\n\n#### detectPII\nScan text for personally identifiable information using Microsoft Presidio.\n- In workflow mode, transitions to detectedStepId if PII is found, notDetectedStepId otherwise.\n- In direct execution, returns the detection results without transitioning.\n- If entities is empty, returns immediately with no detections.\n- Input: `{ input: string, language: string, entities: string[], detectedStepId?: string, notDetectedStepId?: string, outputLogVariable?: string | null }`\n- Output: `{ detected: boolean, detections: { entity_type: string, start: number, end: number, score: number }[] }`\n\n#### discordEditMessage\nEdit a previously sent Discord channel message. Use with the message ID returned by Send Discord Message.\n- Only messages sent by the bot can be edited.\n- The messageId is returned by the Send Discord Message step.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- When editing with an attachment, the new attachment replaces any previous attachments on the message.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Input: `{ botToken: string, channelId: string, messageId: string, text: string, attachmentUrl?: string }`\n- Output: `unknown`\n\n#### discordSendFollowUp\nSend a follow-up message to a Discord slash command interaction.\n- Requires the applicationId and interactionToken from the Discord trigger variables.\n- Follow-up messages appear as new messages in the channel after the initial response.\n- Returns the sent message ID.\n- Interaction tokens expire after 15 minutes.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Input: `{ applicationId: string, interactionToken: string, text: string, attachmentUrl?: string }`\n- Output: `{ messageId: string }`\n\n#### discordSendMessage\nSend a message to Discord \u2014 either edit the loading message or send a new channel message.\n- mode "edit" replaces the loading message (interaction response) with the final result. Uses applicationId and interactionToken from trigger variables. No bot permissions required.\n- mode "send" sends a new message to a channel. Uses botToken and channelId from trigger variables. Returns a messageId that can be used with Edit Discord Message.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Interaction tokens expire after 15 minutes.\n- Input: `{ mode: "edit" | "send", text: string, applicationId?: string, interactionToken?: string, botToken?: string, channelId?: string, attachmentUrl?: string }`\n- Output: `{ messageId?: string }`\n\n#### downloadVideo\nDownload a video file\n- Works with YouTube, TikTok, etc., by using ytdlp behind the scenes\n- Can save as mp4 or mp3\n- Input: `{ videoUrl: string, format: "mp4" | "mp3" }`\n- Output: `{ videoUrl: string }`\n\n#### enhanceImageGenerationPrompt\nGenerate or enhance an image generation prompt using a language model. Optionally generates a negative prompt.\n- Rewrites the user\'s prompt with added detail about style, lighting, colors, and composition.\n- Also useful for initial generation, it doesn\'t always need to be enhancing an existing prompt\n- When includeNegativePrompt is true, a second model call generates a negative prompt.\n- Input: `{ initialPrompt: string, includeNegativePrompt: boolean, negativePromptDestinationVariableName?: string, systemPrompt: string, modelOverride?: unknown }`\n- Output: `{ prompt: string, negativePrompt?: string }`\n\n#### enhanceVideoGenerationPrompt\nGenerate or enhance a video generation prompt using a language model. Optionally generates a negative prompt.\n- Rewrites the user\'s prompt with added detail about style, camera movement, lighting, and composition.\n- Also useful for initial generation, it doesn\'t always need to be enhancing an existing prompt\n- When includeNegativePrompt is true, a second model call generates a negative prompt.\n- Input: `{ initialPrompt: string, includeNegativePrompt: boolean, negativePromptDestinationVariableName?: string, systemPrompt: string, modelOverride?: unknown }`\n- Output: `{ prompt: string, negativePrompt?: string }`\n\n#### extractAudioFromVideo\nExtract audio MP3 from a video file\n- Input: `{ videoUrl: string }`\n- Output: `{ audioUrl: string }`\n\n#### extractText\nDownload a file from a URL and extract its text content. Supports PDFs, plain text files, and other document formats.\n- Best suited for PDFs and raw text/document files. For web pages, use the scrapeUrl step instead.\n- Accepts a single URL, a comma-separated list of URLs, or a JSON array of URLs.\n- Files are rehosted on the MindStudio CDN before extraction.\n- Maximum file size is 50MB per URL.\n- Input: `{ url: string | string[] }`\n- Output: `{ text: string | string[] }`\n\n#### fetchDataSourceDocument\nFetch the full extracted text contents of a document in a data source.\n- Loads a document by ID and returns its full extracted text content.\n- The document must have been successfully processed (status "done").\n- Also returns document metadata (name, summary, word count).\n- Input: `{ dataSourceId: string, documentId: string }`\n- Output: `unknown`\n\n#### fetchSlackChannelHistory\nFetch recent message history from a Slack channel.\n- The user is responsible for connecting their Slack workspace and selecting the channel\n- Input: `{ connectionId?: string, channelId: string, limit?: number, startDate?: string, endDate?: string, includeImages?: boolean, includeRawMessage?: boolean }`\n- Output: `{ messages: { from: string, content: string, timestamp?: string, images?: string[], rawMessage?: { app_id?: string, assistant_app_thread?: { first_user_thread_reply?: string, title?: string, title_blocks?: unknown[] }, attachments?: { actions?: unknown[], app_id?: string, app_unfurl_url?: string, author_icon?: string, author_id?: string, author_link?: string, author_name?: string, author_subname?: string, blocks?: unknown[], bot_id?: string, bot_team_id?: string, callback_id?: string, channel_id?: string, channel_name?: string, channel_team?: string, color?: string, fallback?: string, fields?: unknown[], file_id?: string, filename?: string, files?: unknown[], footer?: string, footer_icon?: string, from_url?: string, hide_border?: boolean, hide_color?: boolean, id?: number, image_bytes?: number, image_height?: number, image_url?: string, image_width?: number, indent?: boolean, is_app_unfurl?: boolean, is_file_attachment?: boolean, is_msg_unfurl?: boolean, is_reply_unfurl?: boolean, is_thread_root_unfurl?: boolean, list?: unknown, list_record?: unknown, list_record_id?: string, list_records?: unknown[], list_schema?: unknown[], list_view?: unknown, list_view_id?: string, message_blocks?: unknown[], metadata?: unknown, mimetype?: string, mrkdwn_in?: string[], msg_subtype?: string, original_url?: string, pretext?: string, preview?: unknown, service_icon?: string, service_name?: string, service_url?: string, size?: number, text?: string, thumb_height?: number, thumb_url?: string, thumb_width?: number, title?: string, title_link?: string, ts?: string, url?: string, video_html?: string, video_html_height?: number, video_html_width?: number, video_url?: string }[], blocks?: { accessory?: unknown, alt_text?: string, api_decoration_available?: boolean, app_collaborators?: string[], app_id?: string, author_name?: string, block_id?: string, bot_user_id?: string, button_label?: string, call?: unknown, call_id?: string, description?: unknown, developer_trace_id?: string, dispatch_action?: boolean, element?: unknown, elements?: unknown[], expand?: boolean, external_id?: string, fallback?: string, fields?: unknown[], file?: unknown, file_id?: string, function_trigger_id?: string, hint?: unknown, image_bytes?: number, image_height?: number, image_url?: string, image_width?: number, is_animated?: boolean, is_workflow_app?: boolean, label?: unknown, optional?: boolean, owning_team_id?: string, provider_icon_url?: string, provider_name?: string, sales_home_workflow_app_type?: number, share_url?: string, slack_file?: unknown, source?: string, text?: unknown, thumbnail_url?: string, title?: unknown, title_url?: string, trigger_subtype?: string, trigger_type?: string, type?: unknown, url?: string, video_url?: string, workflow_id?: string }[], bot_id?: string, bot_profile?: { app_id?: string, deleted?: boolean, icons?: unknown, id?: string, name?: string, team_id?: string, updated?: number }, client_msg_id?: string, display_as_bot?: boolean, edited?: { ts?: string, user?: string }, files?: { access?: string, alt_txt?: string, app_id?: string, app_name?: string, attachments?: unknown[], blocks?: unknown[], bot_id?: string, can_toggle_canvas_lock?: boolean, canvas_printing_enabled?: boolean, canvas_template_mode?: string, cc?: unknown[], channel_actions_count?: number, channel_actions_ts?: string, channels?: string[], comments_count?: number, converted_pdf?: string, created?: number, deanimate?: string, deanimate_gif?: string, display_as_bot?: boolean, dm_mpdm_users_with_file_access?: unknown[], duration_ms?: number, edit_link?: string, edit_timestamp?: number, editable?: boolean, editor?: string, editors?: string[], external_id?: string, external_type?: string, external_url?: string, favorites?: unknown[], file_access?: string, filetype?: string, from?: unknown[], groups?: string[], has_more?: boolean, has_more_shares?: boolean, has_rich_preview?: boolean, headers?: unknown, hls?: string, hls_embed?: string, id?: string, image_exif_rotation?: number, ims?: string[], initial_comment?: unknown, is_channel_space?: boolean, is_external?: boolean, is_public?: boolean, is_restricted_sharing_enabled?: boolean, is_starred?: boolean, last_editor?: string, last_read?: number, lines?: number, lines_more?: number, linked_channel_id?: string, list_csv_download_url?: string, list_limits?: unknown, list_metadata?: unknown, media_display_type?: string, media_progress?: unknown, mimetype?: string, mode?: string, mp4?: string, mp4_low?: string, name?: string, non_owner_editable?: boolean, num_stars?: number, org_or_workspace_access?: string, original_attachment_count?: number, original_h?: string, original_w?: string, permalink?: string, permalink_public?: string, pinned_to?: string[], pjpeg?: string, plain_text?: string, pretty_type?: string, preview?: string, preview_highlight?: string, preview_is_truncated?: boolean, preview_plain_text?: string, private_channels_with_file_access_count?: number, private_file_with_access_count?: number, public_url_shared?: boolean, quip_thread_id?: string, reactions?: unknown[], saved?: unknown, sent_to_self?: boolean, shares?: unknown, show_badge?: boolean, simplified_html?: string, size?: number, source_team?: string, subject?: string, subtype?: string, team_pref_version_history_enabled?: boolean, teams_shared_with?: unknown[], template_conversion_ts?: number, template_description?: string, template_icon?: string, template_name?: string, template_title?: string, thumb_1024?: string, thumb_1024_gif?: string, thumb_1024_h?: string, thumb_1024_w?: string, thumb_160?: string, thumb_160_gif?: string, thumb_160_h?: string, thumb_160_w?: string, thumb_360?: string, thumb_360_gif?: string, thumb_360_h?: string, thumb_360_w?: string, thumb_480?: string, thumb_480_gif?: string, thumb_480_h?: string, thumb_480_w?: string, thumb_64?: string, thumb_64_gif?: string, thumb_64_h?: string, thumb_64_w?: string, thumb_720?: string, thumb_720_gif?: string, thumb_720_h?: string, thumb_720_w?: string, thumb_80?: string, thumb_800?: string, thumb_800_gif?: string, thumb_800_h?: string, thumb_800_w?: string, thumb_80_gif?: string, thumb_80_h?: string, thumb_80_w?: string, thumb_960?: string, thumb_960_gif?: string, thumb_960_h?: string, thumb_960_w?: string, thumb_gif?: string, thumb_pdf?: string, thumb_pdf_h?: string, thumb_pdf_w?: string, thumb_tiny?: string, thumb_video?: string, thumb_video_h?: number, thumb_video_w?: number, timestamp?: number, title?: string, title_blocks?: unknown[], to?: unknown[], transcription?: unknown, update_notification?: number, updated?: number, url_private?: string, url_private_download?: string, url_static_preview?: string, user?: string, user_team?: string, username?: string, vtt?: string }[], icons?: { emoji?: string, image_36?: string, image_48?: string, image_64?: string, image_72?: string }, inviter?: string, is_locked?: boolean, latest_reply?: string, metadata?: { event_payload?: unknown, event_type?: string }, parent_user_id?: string, purpose?: string, reactions?: { count?: number, name?: string, url?: string, users?: string[] }[], reply_count?: number, reply_users?: string[], reply_users_count?: number, root?: { bot_id?: string, icons?: unknown, latest_reply?: string, parent_user_id?: string, reply_count?: number, reply_users?: string[], reply_users_count?: number, subscribed?: boolean, subtype?: string, text?: string, thread_ts?: string, ts?: string, type?: string, username?: string }, subscribed?: boolean, subtype?: string, team?: string, text?: string, thread_ts?: string, topic?: string, ts?: string, type?: string, upload?: boolean, user?: string, username?: string, x_files?: string[] } }[] }`\n\n#### generateAsset\nGenerate an HTML asset and export it as a webpage, PDF, or image\n- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)\n- Input: `{ source: string, sourceType: "html" | "markdown" | "spa" | "raw" | "dynamic" | "customInterface", outputFormat: "pdf" | "png" | "html" | "mp4" | "openGraph", pageSize: "full" | "letter" | "A4" | "custom", testData: object, options?: { pageWidthPx?: number, pageHeightPx?: number, pageOrientation?: "portrait" | "landscape", rehostMedia?: boolean, videoDurationSeconds?: number }, spaSource?: { source?: string, lastCompiledSource?: string, files?: object, paths: string[], root: string, zipUrl: string }, rawSource?: string, dynamicPrompt?: string, dynamicSourceModelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, transitionControl?: "default" | "native", shareControl?: "default" | "hidden", shareImageUrl?: string, intermediateAsset?: boolean }`\n- Output: `{ url: string }`\n\n#### generateChart\nCreate a chart image using QuickChart (Chart.js) and return the URL.\n- The data field must be a Chart.js-compatible JSON object serialized as a string.\n- Supported chart types: bar, line, pie.\n- Input: `{ chart: { chartType: "bar" | "line" | "pie", data: string, options: { width: string, height: string } } }`\n- Output: `{ chartUrl: string }`\n\n#### generateImage\nGenerate an image from a text prompt using an AI model.\n- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Images are automatically hosted on a CDN.\n- In foreground mode, the image is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple images are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.\n- Input: `{ prompt: string, intermediateAsset?: boolean, imageModelOverride?: { model: string, config?: object }, generateVariants?: boolean, numVariants?: number, addWatermark?: boolean }`\n- Output: `{ imageUrl: string | string[] }`\n\n#### generateLipsync\nGenerate a lip sync video from provided audio and image.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ intermediateAsset?: boolean, addWatermark?: boolean, lipsyncModelOverride?: { model: string, config?: object } }`\n- Output: `unknown`\n\n#### generateMusic\nGenerate an audio file from provided instructions (text) using a music model.\n- The text field contains the instructions (prompt) for the music generation.\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ text: string, intermediateAsset?: boolean, musicModelOverride?: { model: string, config?: object } }`\n- Output: `unknown`\n\n#### generateStaticVideoFromImage\nConvert a static image to an MP4\n- Can use to create slides/intertitles/slates for video composition\n- Input: `{ imageUrl: string, duration: string }`\n- Output: `{ videoUrl: string }`\n\n#### generateText\nSend a message to an AI model and return the response, or echo a system message.\n- Source "user" sends the message to an LLM and returns the model\'s response.\n- Source "system" echoes the message content directly (no AI call).\n- Mode "background" saves the result to a variable. Mode "foreground" streams it to the user (not available in direct execution).\n- Structured output (JSON/CSV) can be enforced via structuredOutputType and structuredOutputExample.\n- When executed inside a v2 app method (managed sandbox or local dev tunnel),\nLLM token output can be streamed to the frontend in real time via an SSE\nside-channel. The frontend opts in by passing { stream: true } to the method\ninvocation via @mindstudio-ai/interface. Tokens are published to Redis\npub/sub as they arrive and forwarded as SSE events on the invoke response.\nThe method code itself is unchanged \u2014 streaming is transparent to the\ndeveloper. See V2ExecutionService.ts and the invoke handler in V2Apps for\nthe server-side plumbing.\n- Input: `{ message: string, source?: "user" | "system", modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, structuredOutputType?: "text" | "json" | "csv", structuredOutputExample?: string, chatHistoryMode?: "include" | "exclude" }`\n- Output: `{ content: string }`\n\n#### generateVideo\nGenerate a video from a text prompt using an AI model.\n- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Videos are automatically hosted on a CDN.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple videos are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.\n- Input: `{ prompt: string, intermediateAsset?: boolean, videoModelOverride?: { model: string, config?: object }, generateVariants?: boolean, numVariants?: number, addWatermark?: boolean }`\n- Output: `{ videoUrl: string | string[] }`\n\n#### getGmailAttachments\nDownload attachments from a Gmail email and re-host them on CDN.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Attachments are uploaded to CDN and returned as URLs.\n- Attachments larger than 25MB are skipped.\n- Use the message ID from Search Gmail Emails, List Recent Gmail Emails, or Get Gmail Email steps.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### getGmailUnreadCount\nGet the number of unread emails in the connected Gmail inbox.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the unread message count for the inbox label.\n- This is a lightweight call that does not fetch any email content.\n- Input: `{ connectionId?: string }`\n- Output: `unknown`\n\n#### getMediaMetadata\nGet info about a media file\n- Input: `{ mediaUrl: string }`\n- Output: `{ metadata: string }`\n\n#### httpRequest\nMake an HTTP request to an external endpoint and return the response.\n- Supports GET, POST, PATCH, DELETE, and PUT methods.\n- Body can be raw JSON/text, URL-encoded form data, or multipart form data.\n- Input: `{ url: string, method: string, headers: object, queryParams: object, body: string, bodyItems: object, contentType: "none" | "application/json" | "application/x-www-form-urlencoded" | "multipart/form-data" | "custom", customContentType: string, testData?: object }`\n- Output: `{ ok: boolean, status: number, statusText: string, response: string }`\n\n#### imageFaceSwap\nReplace a face in an image with a face from another image using AI.\n- Requires both a target image and a face source image.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, faceImageUrl: string, engine: string }`\n- Output: `{ imageUrl: string }`\n\n#### imageRemoveWatermark\nRemove watermarks from an image using AI.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ imageUrl: string }`\n\n#### insertVideoClips\nInsert b-roll clips into a base video at a timecode, optionally with an xfade transition.\n- Input: `{ baseVideoUrl: string, overlayVideos: { videoUrl: string, startTimeSec: number }[], transition?: string, transitionDuration?: number, useOverlayAudio?: boolean, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### listDataSources\nList all data sources for the current app.\n- Returns metadata for every data source associated with the current app version.\n- Each entry includes the data source ID, name, description, status, and document list.\n- Input: `object`\n- Output: `unknown`\n\n#### listGmailLabels\nList all labels in the connected Gmail account. Use these label IDs or names with the Update Gmail Labels step.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns both system labels (INBOX, SENT, TRASH, etc.) and user-created labels.\n- Label type is "system" for built-in labels or "user" for custom labels.\n- Input: `{ connectionId?: string }`\n- Output: `unknown`\n\n#### listRecentGmailEmails\nList recent emails from the connected Gmail inbox.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns up to 100 emails (default 5), ordered by most recent first.\n- Functionally equivalent to Search Gmail Emails with an "in:inbox" query.\n- Input: `{ connectionId?: string, exportType: "json" | "text", limit: string }`\n- Output: `unknown`\n\n#### logic\nRoute execution to different branches based on AI evaluation, comparison operators, or workflow jumps.\n- Supports two modes: "ai" (default) uses an AI model to pick the most accurate statement; "comparison" uses operator-based checks.\n- In AI mode, the model picks the most accurate statement from the list. All possible cases must be specified.\n- In comparison mode, the context is the left operand and each case\'s condition is the right operand. First matching case wins. Use operator "default" as a fallback.\n- Requires at least two cases.\n- Each case can transition to a step in the current workflow (destinationStepId) or jump to another workflow (destinationWorkflowId).\n- Input: `{ mode?: "ai" | "comparison", context: string, cases: ({ id: string, condition: string, operator?: "eq" | "neq" | "gt" | "lt" | "gte" | "lte" | "exists" | "not_exists" | "contains" | "not_contains" | "default", destinationStepId?: string, destinationWorkflowId?: string } | string)[], modelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object } }`\n- Output: `{ selectedCase: number }`\n\n#### makeDotComRunScenario\nTrigger a Make.com (formerly Integromat) scenario via webhook and return the response.\n- The webhook URL must be configured in your Make.com scenario.\n- Input key-value pairs are sent as JSON in the POST body.\n- Response format depends on the Make.com scenario configuration.\n- Input: `{ webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### mergeAudio\nMerge one or more clips into a single audio file.\n- Input: `{ mp3Urls: string[], fileMetadata?: object, albumArtUrl?: string, intermediateAsset?: boolean }`\n- Output: `{ audioUrl: string }`\n\n#### mergeVideos\nMerge one or more clips into a single video.\n- Input: `{ videoUrls: string[], transition?: string, transitionDuration?: number, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### mixAudioIntoVideo\nMix an audio track into a video\n- Input: `{ videoUrl: string, audioUrl: string, options: { keepVideoAudio?: boolean, audioGainDb?: number, videoGainDb?: number, loopAudio?: boolean }, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### muteVideo\nMute a video file\n- Input: `{ videoUrl: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### n8nRunNode\nTrigger an n8n workflow node via webhook and return the response.\n- The webhook URL must be configured in your n8n workflow.\n- Supports GET and POST methods with optional Basic authentication.\n- For GET requests, input values are sent as query parameters. For POST, they are sent as JSON body.\n- Input: `{ method: string, authentication: "none" | "basic" | "string", user: string, password: string, webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### postToSlackChannel\nSend a message to a Slack channel via a connected bot.\n- The user is responsible for connecting their Slack workspace and selecting the channel\n- Supports both simple text messages and slack blocks messages\n- Text messages can use limited markdown (slack-only fomatting\u2014e.g., headers are just rendered as bold)\n- Input: `{ channelId: string, messageType: "string" | "blocks", message: string, connectionId?: string }`\n- Output: `unknown`\n\n#### postToZapier\nSend data to a Zapier Zap via webhook and return the response.\n- The webhook URL must be configured in the Zapier Zap settings\n- Input keys and values are sent as the JSON body of the POST request\n- The webhook response (JSON or plain text) is returned as the output\n- Input: `{ webhookUrl: string, input: object }`\n- Output: `{ data: unknown }`\n\n#### queryAppDatabase\nExecute a SQL query against the app managed database.\n- Executes raw SQL against a SQLite database managed by the app.\n- For SELECT queries, returns rows as JSON.\n- For INSERT/UPDATE/DELETE, returns the number of affected rows.\n- Use {{variables}} directly in your SQL. By default they are automatically extracted\nand passed as safe parameterized values (preventing SQL injection).\nExample: INSERT INTO contacts (name, comment) VALUES ({{name}}, {{comment}})\n- Full MindStudio handlebars syntax is supported, including helpers like {{json myVar}},\n{{get myVar "$.path"}}, {{global.orgName}}, etc.\n- Set parameterize to false for raw/dynamic SQL where variables are interpolated directly\ninto the query string. Use this when another step generates full or partial SQL, e.g.\na bulk INSERT with a precomputed VALUES list. The user is responsible for sanitization\nwhen parameterize is false.\n- Input: `{ databaseId: string, sql: string, parameterize?: boolean }`\n- Output: `{ rows: unknown[], changes: number }`\n\n#### queryDataSource\nSearch a vector data source (RAG) and return relevant document chunks.\n- Queries a vectorized data source and returns the most relevant chunks.\n- Useful for retrieval-augmented generation (RAG) workflows.\n- Input: `{ dataSourceId: string, query: string, maxResults: number }`\n- Output: `{ text: string, chunks: string[], query: string, citations: unknown[], latencyMs: number }`\n\n#### queryExternalDatabase\nExecute a SQL query against an external database connected to the workspace.\n- Requires a database connection configured in the workspace.\n- Supports PostgreSQL (including Supabase), MySQL, and MSSQL.\n- Results can be returned as JSON or CSV.\n- Input: `{ connectionId?: string, query: string, outputFormat: "json" | "csv" }`\n- Output: `{ data: unknown }`\n\n#### redactPII\nReplace personally identifiable information in text with placeholders using Microsoft Presidio.\n- PII is replaced with entity type placeholders (e.g. "Call me at <PHONE_NUMBER>").\n- If entities is empty, returns empty text immediately without processing.\n- Input: `{ input: string, language: string, entities: string[] }`\n- Output: `{ text: string }`\n\n#### removeBackgroundFromImage\nRemove the background from an image using AI, producing a transparent PNG.\n- Uses the Bria background removal model via fal.ai.\n- Output is re-hosted on the CDN as a PNG with transparency.\n- Input: `{ imageUrl: string }`\n- Output: `{ imageUrl: string }`\n\n#### resizeVideo\nResize a video file\n- Input: `{ videoUrl: string, mode: "fit" | "exact", maxWidth?: number, maxHeight?: number, width?: number, height?: number, strategy?: "pad" | "crop", intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### runFromConnectorRegistry\nRun a raw API connector to a third-party service\n- Use the /developer/v2/helpers/connectors endpoint to list available services and actions.\n- Use /developer/v2/helpers/connectors/{serviceId}/{actionId} to get the full input configuration for an action.\n- Use /developer/v2/helpers/connections to list your available OAuth connections.\n- The actionId format is "serviceId/actionId" (e.g., "slack/send-message").\n- Pass a __connectionId to authenticate the request with a specific OAuth connection, otherwise the default will be used (if configured).\n- Input: `{ actionId: string, displayName: string, icon: string, configurationValues: object, __connectionId?: string }`\n- Output: `{ data: object }`\n\n#### runPackagedWorkflow\nRun a packaged workflow ("custom block")\n- From the user\'s perspective, packaged workflows are just ordinary blocks. Behind the scenes, they operate like packages/libraries in a programming language, letting the user execute custom functionality.\n- Some of these packaged workflows are available as part of MindStudio\'s "Standard Library" and available to every user.\n- Available packaged workflows are documented here as individual blocks, but the runPackagedWorkflow block is how they need to be wrapped in order to be executed correctly.\n- Input: `{ appId: string, workflowId: string, inputVariables: object, outputVariables: object, name: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeLinkedInCompany\nScrape public company data from a LinkedIn company page.\n- Requires a LinkedIn company URL (e.g. https://www.linkedin.com/company/mindstudioai).\n- Returns structured company data including description, employees, updates, and similar companies.\n- Input: `{ url: string }`\n- Output: `{ company: unknown }`\n\n#### scrapeLinkedInProfile\nScrape public profile data from a LinkedIn profile page.\n- Requires a LinkedIn profile URL (e.g. https://www.linkedin.com/in/username).\n- Returns structured profile data including experience, education, articles, and activities.\n- Input: `{ url: string }`\n- Output: `{ profile: unknown }`\n\n#### scrapeUrl\nExtract text, HTML, or structured content from one or more web pages.\n- Accepts a single URL or multiple URLs (as a JSON array, comma-separated, or newline-separated).\n- Output format controls the result shape: "text" returns markdown, "html" returns raw HTML, "json" returns structured scraper data.\n- Can optionally capture a screenshot of each page.\n- Input: `{ url: string, service?: "default" | "firecrawl", autoEnhance?: boolean, pageOptions?: { onlyMainContent: boolean, screenshot: boolean, waitFor: number, replaceAllPathsWithAbsolutePaths: boolean, headers: object, removeTags: string[], mobile: boolean } }`\n- Output: `{ content: string | string[] | { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } | { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } }[], screenshot?: string }`\n\n#### scrapeXPost\nScrape data from a single X (Twitter) post by URL.\n- Returns structured post data (text, html, optional json/screenshot/metadata).\n- Optionally saves the text content to a variable.\n- Input: `{ url: string }`\n- Output: `{ post: { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } }`\n\n#### scrapeXProfile\nScrape public profile data from an X (Twitter) account by URL.\n- Returns structured profile data.\n- Optionally saves the result to a variable.\n- Input: `{ url: string }`\n- Output: `{ profile: { text: string, html: string, json?: object, screenshotUrl?: string, metadata?: { title: string, description: string, url: string, image: string } } }`\n\n#### searchGmailEmails\nSearch for emails in the connected Gmail account using a Gmail search query. To list recent inbox emails, pass an empty query string.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Uses Gmail search syntax (e.g. "from:user@example.com", "subject:invoice", "is:unread").\n- To list recent inbox emails, use an empty query string or "in:inbox".\n- Returns up to 100 emails (default 5). The variable receives text or JSON depending on exportType.\n- The direct execution output always returns structured email objects.\n- Input: `{ query: string, connectionId?: string, exportType: "json" | "text", limit: string }`\n- Output: `{ emails: { id: string, subject: string, from: string, to: string, date: string, plainBody: string, htmlBody: string, labels: string }[] }`\n\n#### searchGoogle\nSearch the web using Google and return structured results.\n- Defaults to us/english, but can optionally specify country and/or language.\n- Defaults to any time, but can optionally specify last hour, last day, week, month, or year.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, languageCode?: string, dateRange?: "hour" | "day" | "week" | "month" | "year" | "any", numResults?: number }`\n- Output: `{ results: { title: string, description: string, url: string }[] }`\n\n#### searchGoogleImages\nSearch Google Images and return image results with URLs and metadata.\n- Defaults to us/english, but can optionally specify country and/or language.\n- Defaults to any time, but can optionally specify last hour, last day, week, month, or year.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, languageCode?: string, dateRange?: "hour" | "day" | "week" | "month" | "year" | "any", numResults?: number }`\n- Output: `{ images: { title: string, imageUrl: string, imageWidth: number, imageHeight: number, thumbnailUrl: string, thumbnailWidth: number, thumbnailHeight: number, source: string, domain: string, link: string, googleUrl: string, position: number }[] }`\n\n#### searchGoogleNews\nSearch Google News for recent news articles matching a query.\n- Defaults to top 30 results, but can specify 1 to 100 results to return.\n- Input: `{ text: string, exportType: "text" | "json", numResults?: number }`\n- Output: `{ articles: { title: string, link: string, date: string, source: { name: string }, snippet?: string }[] }`\n\n#### searchGoogleTrends\nFetch Google Trends data for a search term.\n- date accepts shorthand ("now 1-H", "today 1-m", "today 5-y", etc.) or custom "yyyy-mm-dd yyyy-mm-dd" ranges.\n- data_type controls the shape of returned data: TIMESERIES, GEO_MAP, GEO_MAP_0, RELATED_TOPICS, or RELATED_QUERIES.\n- Input: `{ text: string, hl: string, geo: string, data_type: "TIMESERIES" | "GEO_MAP" | "GEO_MAP_0" | "RELATED_TOPICS" | "RELATED_QUERIES", cat: string, date: string, ts: string }`\n- Output: `{ trends: object }`\n\n#### searchPerplexity\nSearch the web using the Perplexity API and return structured results.\n- Defaults to US results. Use countryCode (ISO code) to filter by country.\n- Returns 10 results by default, configurable from 1 to 20.\n- The variable receives text or JSON depending on exportType. The direct execution output always returns structured results.\n- Input: `{ query: string, exportType: "text" | "json", countryCode?: string, numResults?: number }`\n- Output: `{ results: { title: string, description: string, url: string }[] }`\n\n#### sendEmail\nSend an email to one or more configured recipient addresses.\n- Recipient email addresses are resolved from OAuth connections configured by the app creator. The user running the workflow does not specify the recipient directly.\n- If the body is a URL to a hosted HTML file on the CDN, the HTML is fetched and used as the email body.\n- When generateHtml is enabled, the body text is converted to a styled HTML email using an AI model.\n- connectionId can be a comma-separated list to send to multiple recipients.\n- The special connectionId "trigger_email" uses the email address that triggered the workflow.\n- Input: `{ subject: string, body: string, connectionId?: string, generateHtml?: boolean, generateHtmlInstructions?: string, generateHtmlModelOverride?: { model: string, temperature: number, maxResponseTokens: number, ignorePreamble?: boolean, userMessagePreprocessor?: { dataSource?: string, messageTemplate?: string, maxResults?: number, enabled?: boolean, shouldInherit?: boolean }, preamble?: string, multiModelEnabled?: boolean, editResponseEnabled?: boolean, config?: object }, attachments?: string[] }`\n- Output: `{ recipients: string[] }`\n\n#### sendGmailDraft\nSend an existing draft from the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- The draft is sent and removed from the Drafts folder.\n- Use the draft ID returned by the Create Gmail Draft or List Gmail Drafts steps.\n- Input: `{ draftId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### sendGmailMessage\nSend an email from the connected Gmail account.\n- Requires a Google OAuth connection with Gmail compose scope.\n- messageType controls the body format: "plain" for plain text, "html" for raw HTML, "markdown" for auto-converted markdown.\n- Input: `{ to: string, subject: string, message: string, connectionId?: string, messageType: "plain" | "html" | "markdown" }`\n- Output: `{ messageId: string }`\n\n#### sendSMS\nSend an SMS or MMS message to a phone number configured via OAuth connection.\n- User is responsible for configuring the connection to the number (MindStudio requires double opt-in to prevent spam)\n- If mediaUrls are provided, the message is sent as MMS instead of SMS\n- MMS supports up to 10 media URLs (images, video, audio, PDF) with a 5MB limit per file\n- MMS is only supported on US and Canadian carriers; international numbers will receive SMS only (media silently dropped)\n- Input: `{ body: string, connectionId?: string, mediaUrls?: string[] }`\n- Output: `unknown`\n\n#### setGmailReadStatus\nMark one or more Gmail emails as read or unread.\n- Requires a Google OAuth connection with Gmail modify scope.\n- Accepts one or more message IDs as a comma-separated string or array.\n- Set markAsRead to true to mark as read, false to mark as unread.\n- Input: `{ messageIds: string, markAsRead: boolean, connectionId?: string }`\n- Output: `unknown`\n\n#### setRunTitle\nSet the title of the agent run for the user\'s history\n- Input: `{ title: string }`\n- Output: `unknown`\n\n#### setVariable\nExplicitly set a variable to a given value.\n- Useful for bootstrapping global variables or setting constants.\n- The variable name and value both support variable interpolation.\n- The type field is a UI hint only (controls input widget in the editor).\n- Input: `{ value: string | string[] }`\n- Output: `object`\n\n#### telegramEditMessage\nEdit a previously sent Telegram message. Use with the message ID returned by Send Telegram Message.\n- Only text messages sent by the bot can be edited.\n- The messageId is returned by the Send Telegram Message step.\n- Common pattern: send a "Processing..." message, do work, then edit it with the result.\n- Input: `{ botToken: string, chatId: string, messageId: string, text: string }`\n- Output: `unknown`\n\n#### telegramReplyToMessage\nSend a reply to a specific Telegram message. The reply will be visually threaded in the chat.\n- Use the rawMessage.message_id from the incoming trigger variables to reply to the user\'s message.\n- Especially useful in group chats where replies provide context.\n- Returns the sent message ID, which can be used with Edit Telegram Message.\n- Input: `{ botToken: string, chatId: string, replyToMessageId: string, text: string }`\n- Output: `{ messageId: number }`\n\n#### telegramSendAudio\nSend an audio file to a Telegram chat as music or a voice note via a bot.\n- "audio" mode sends as a standard audio file. "voice" mode sends as a voice message (re-uploads the file for large file support).\n- Input: `{ botToken: string, chatId: string, audioUrl: string, mode: "audio" | "voice", caption?: string }`\n- Output: `unknown`\n\n#### telegramSendFile\nSend a document/file to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, fileUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSendImage\nSend an image to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, imageUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSendMessage\nSend a text message to a Telegram chat via a bot.\n- Messages are sent using MarkdownV2 formatting. Special characters are auto-escaped.\n- botToken format is "botId:token" \u2014 both parts are required.\n- Returns the sent message ID, which can be used with Edit Telegram Message to update the message later.\n- Input: `{ botToken: string, chatId: string, text: string }`\n- Output: `{ messageId: number }`\n\n#### telegramSendVideo\nSend a video to a Telegram chat via a bot.\n- Input: `{ botToken: string, chatId: string, videoUrl: string, caption?: string }`\n- Output: `unknown`\n\n#### telegramSetTyping\nShow the "typing..." indicator in a Telegram chat via a bot.\n- The typing indicator automatically expires after a few seconds. Use this right before sending a message for a natural feel.\n- Input: `{ botToken: string, chatId: string }`\n- Output: `unknown`\n\n#### textToSpeech\nGenerate an audio file from provided text using a speech model.\n- The text field contains the exact words to be spoken (not instructions).\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.\n- Input: `{ text: string, intermediateAsset?: boolean, speechModelOverride?: { model: string, config?: object } }`\n- Output: `{ audioUrl: string }`\n\n#### transcribeAudio\nConvert an audio file to text using a transcription model.\n- The prompt field provides optional context to improve transcription accuracy (e.g. language, speaker names, domain).\n- Input: `{ audioUrl: string, prompt: string, transcriptionModelOverride?: { model: string, config?: object } }`\n- Output: `{ text: string }`\n\n#### trimMedia\nTrim an audio or video clip\n- Input: `{ inputUrl: string, start?: number | string, duration?: string | number, intermediateAsset?: boolean }`\n- Output: `{ mediaUrl: string }`\n\n#### updateGmailLabels\nAdd or remove labels on Gmail messages, identified by message IDs or a search query.\n- Requires a Google OAuth connection with Gmail modify scope.\n- Provide either a query (Gmail search syntax) or explicit messageIds to target messages.\n- Label IDs can be label names or Gmail label IDs \u2014 names are resolved automatically.\n- Input: `{ query: string, connectionId?: string, messageIds: string, addLabelIds: string, removeLabelIds: string }`\n- Output: `{ updatedMessageIds: string[] }`\n\n#### uploadDataSourceDocument\nUpload a file into an existing data source from a URL or raw text content.\n- If "file" is a single URL, the file is downloaded from that URL and uploaded.\n- If "file" is any other string, a .txt document is created from that content and uploaded.\n- The block waits (polls) for processing to complete before transitioning, up to 5 minutes.\n- Once processing finishes, vectors are loaded into Milvus so the data source is immediately queryable.\n- Supported file types (when using a URL) are the same as the data source upload UI (PDF, DOCX, TXT, etc.).\n- Input: `{ dataSourceId: string, file: string, fileName: string }`\n- Output: `unknown`\n\n#### upscaleImage\nIncrease the resolution of an image using AI upscaling.\n- Output is re-hosted on the CDN as a PNG.\n- Input: `{ imageUrl: string, targetResolution: "2k" | "4k" | "8k", engine: "standard" | "pro" }`\n- Output: `{ imageUrl: string }`\n\n#### upscaleVideo\nUpscale a video file\n- Input: `{ videoUrl: string, targetResolution: "720p" | "1080p" | "2K" | "4K", engine: "standard" | "pro" | "ultimate" | "flashvsr" | "seedance" | "seedvr2" | "runwayml/upscale-v1", intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoFaceSwap\nSwap faces in a video file\n- Input: `{ videoUrl: string, faceImageUrl: string, targetIndex: number, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoRemoveBackground\nRemove or replace background from a video\n- Input: `{ videoUrl: string, newBackground: "transparent" | "image", newBackgroundImageUrl?: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### videoRemoveWatermark\nRemove a watermark from a video\n- Input: `{ videoUrl: string, engine: string, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n#### watermarkImage\nOverlay a watermark image onto another image.\n- The watermark is placed at the specified corner with configurable padding and width.\n- Input: `{ imageUrl: string, watermarkImageUrl: string, corner: "top-left" | "top-right" | "bottom-left" | "bottom-right", paddingPx: number, widthPx: number, intermediateAsset?: boolean }`\n- Output: `{ imageUrl: string }`\n\n#### watermarkVideo\nAdd an image watermark to a video\n- Input: `{ videoUrl: string, imageUrl: string, corner: "top-left" | "top-right" | "bottom-left" | "bottom-right", paddingPx: number, widthPx: number, intermediateAsset?: boolean }`\n- Output: `{ videoUrl: string }`\n\n### ActiveCampaign\n\n#### activeCampaignAddNote\nAdd a note to an existing contact in ActiveCampaign.\n- Requires an ActiveCampaign OAuth connection (connectionId).\n- The contact must already exist \u2014 use the contact ID from a previous create or search step.\n- Input: `{ contactId: string, note: string, connectionId?: string }`\n- Output: `unknown`\n\n#### activeCampaignCreateContact\nCreate or sync a contact in ActiveCampaign.\n- Requires an ActiveCampaign OAuth connection (connectionId).\n- If a contact with the email already exists, it may be updated depending on ActiveCampaign settings.\n- Custom fields are passed as a key-value map where keys are field IDs.\n- Input: `{ email: string, firstName: string, lastName: string, phone: string, accountId: string, customFields: object, connectionId?: string }`\n- Output: `{ contactId: string }`\n\n### Airtable\n\n#### airtableCreateUpdateRecord\nCreate a new record or update an existing record in an Airtable table.\n- If recordId is provided, updates that record. Otherwise, creates a new one.\n- When updating with updateMode "onlySpecified", unspecified fields are left as-is. With "all", unspecified fields are cleared.\n- Array fields (e.g. multipleAttachments) accept arrays of values.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId?: string, updateMode?: "onlySpecified" | "all", fields: unknown, recordData: object }`\n- Output: `{ recordId: string }`\n\n#### airtableDeleteRecord\nDelete a record from an Airtable table by its record ID.\n- Requires an active Airtable OAuth connection (connectionId).\n- Silently succeeds if the record does not exist.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId: string }`\n- Output: `{ deleted: boolean }`\n\n#### airtableGetRecord\nFetch a single record from an Airtable table by its record ID.\n- Requires an active Airtable OAuth connection (connectionId).\n- If the record is not found, returns a string message instead of a record object.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, recordId: string }`\n- Output: `{ record: { id: string, createdTime: string, fields: object } | null }`\n\n#### airtableGetTableRecords\nFetch multiple records from an Airtable table with optional pagination.\n- Requires an active Airtable OAuth connection (connectionId).\n- Default limit is 100 records. Maximum is 1000.\n- When outputFormat is \'csv\', the variable receives CSV text. The direct execution output always returns parsed records.\n- Input: `{ connectionId?: string, baseId: string, tableId: string, outputFormat?: "json" | "csv", limit?: number }`\n- Output: `{ records: { id: string, createdTime: string, fields: object }[] }`\n\n### Apollo\n\n#### enrichPerson\nLook up professional information about a person using Apollo.io. Search by ID, name, LinkedIn URL, email, or domain.\n- At least one search parameter must be provided.\n- Returns enriched data from Apollo including contact details, employment info, and social profiles.\n- Input: `{ params: { id: string, name: string, linkedinUrl: string, email: string, domain: string } }`\n- Output: `{ data: unknown }`\n\n#### peopleSearch\nSearch for people matching specific criteria using Apollo.io. Supports natural language queries and advanced filters.\n- Can use a natural language "smartQuery" which is converted to Apollo search parameters by an AI model.\n- Advanced params can override or supplement the smart query results.\n- Optionally enriches returned people and/or their organizations for additional detail.\n- Results are paginated. Use limit and page to control the result window.\n- Input: `{ smartQuery: string, enrichPeople: boolean, enrichOrganizations: boolean, limit: string, page: string, params: { personTitles: string, includeSimilarTitles: string, qKeywords: string, personLocations: string, personSeniorities: string, organizationLocations: string, qOrganizationDomainsList: string, contactEmailStatus: string, organizationNumEmployeesRanges: string, revenueRangeMin: string, revenueRangeMax: string, currentlyUsingAllOfTechnologyUids: string, currentlyUsingAnyOfTechnologyUids: string, currentlyNotUsingAnyOfTechnologyUids: string } }`\n- Output: `{ results: unknown }`\n\n### Coda\n\n#### codaCreateUpdatePage\nCreate a new page or update an existing page in a Coda document.\n- Requires a Coda OAuth connection (connectionId).\n- If pageData.pageId is provided, updates that page. Otherwise, creates a new one.\n- Page content is provided as markdown and converted to Coda\'s canvas format.\n- When updating, insertionMode controls how content is applied (default: \'append\').\n- Input: `{ connectionId?: string, pageData: { docId: string, pageId?: string, name: string, subtitle: string, iconName: string, imageUrl: string, parentPageId?: string, pageContent: string | unknown, contentUpdate?: unknown, insertionMode?: string } }`\n- Output: `{ pageId: string }`\n\n#### codaCreateUpdateRow\nCreate a new row or update an existing row in a Coda table.\n- Requires a Coda OAuth connection (connectionId).\n- If rowId is provided, updates that row. Otherwise, creates a new one.\n- Row data keys are column IDs. Empty values are excluded.\n- Input: `{ connectionId?: string, docId: string, tableId: string, rowId?: string, rowData: object }`\n- Output: `{ rowId: string }`\n\n#### codaFindRow\nSearch for a row in a Coda table by matching column values.\n- Requires a Coda OAuth connection (connectionId).\n- Returns the first row matching all specified column values, or null if no match.\n- Search criteria in rowData are ANDed together.\n- Input: `{ connectionId?: string, docId: string, tableId: string, rowData: object }`\n- Output: `{ row: { id: string, values: object } | null }`\n\n#### codaGetPage\nExport and read the contents of a page from a Coda document.\n- Requires a Coda OAuth connection (connectionId).\n- Page export is asynchronous on Coda\'s side \u2014 there may be a brief delay while it processes.\n- If a page was just created in a prior step, there is an automatic 20-second retry if the first export attempt fails.\n- Input: `{ connectionId?: string, docId: string, pageId: string, outputFormat?: "html" | "markdown" }`\n- Output: `{ content: string }`\n\n#### codaGetTableRows\nFetch rows from a Coda table with optional pagination.\n- Requires a Coda OAuth connection (connectionId).\n- Default limit is 10000 rows. Rows are fetched in pages of 500.\n- When outputFormat is \'csv\', the variable receives CSV text. The direct execution output always returns parsed rows.\n- Input: `{ connectionId?: string, docId: string, tableId: string, limit?: number | string, outputFormat?: "json" | "csv" }`\n- Output: `{ rows: { id: string, values: object }[] }`\n\n### Facebook\n\n#### scrapeFacebookPage\nScrape a Facebook page\n- Input: `{ pageUrl: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeFacebookPosts\nGet all the posts for a Facebook page\n- Input: `{ pageUrl: string }`\n- Output: `{ data: unknown }`\n\n### Gmail\n\n#### deleteGmailEmail\nMove an email to trash in the connected Gmail account (recoverable delete).\n- Requires a Google OAuth connection with Gmail modify scope.\n- Uses trash (recoverable) rather than permanent delete.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `unknown`\n\n#### getGmailDraft\nRetrieve a specific draft from Gmail by draft ID.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the draft content including subject, recipients, sender, and body.\n- Input: `{ draftId: string, connectionId?: string }`\n- Output: `{ draftId: string, messageId: string, subject: string, to: string, from: string, body: string }`\n\n#### getGmailEmail\nRetrieve a specific email from Gmail by message ID.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns the email subject, sender, recipient, date, body (plain text preferred, falls back to HTML), and labels.\n- Input: `{ messageId: string, connectionId?: string }`\n- Output: `{ messageId: string, subject: string, from: string, to: string, date: string, body: string, labels: string }`\n\n#### listGmailDrafts\nList drafts in the connected Gmail account.\n- Requires a Google OAuth connection with Gmail readonly scope.\n- Returns up to 50 drafts (default 10).\n- The variable receives text or JSON depending on exportType.\n- Input: `{ connectionId?: string, limit?: string, exportType: "json" | "text" }`\n- Output: `{ drafts: { draftId: string, messageId: string, subject: string, to: string, snippet: string }[] }`\n\n#### replyToGmailEmail\nReply to an existing email in Gmail. The reply is threaded under the original message.\n- Requires a Google OAuth connection with Gmail compose and readonly scopes.\n- The reply is sent to the original sender and threaded under the original message.\n- messageType controls the body format: "plain", "html", or "markdown".\n- Input: `{ messageId: string, message: string, messageType: "plain" | "html" | "markdown", connectionId?: string }`\n- Output: `{ messageId: string }`\n\n### Google\n\n#### createGoogleDoc\nCreate a new Google Document and optionally populate it with content.\n- textType determines how the text field is interpreted: "plain" for plain text, "html" for HTML markup, "markdown" for Markdown.\n- Input: `{ title: string, text: string, connectionId?: string, textType: "plain" | "html" | "markdown" }`\n- Output: `{ documentUrl: string }`\n\n#### createGoogleSheet\nCreate a new Google Spreadsheet and populate it with CSV data.\n- Input: `{ title: string, text: string, connectionId?: string }`\n- Output: `{ spreadsheetUrl: string }`\n\n#### deleteGoogleSheetRows\nDelete a range of rows from a Google Spreadsheet.\n- Requires a Google OAuth connection with Drive scope.\n- startRow and endRow are 1-based row numbers (inclusive).\n- If sheetName is omitted, operates on the first sheet.\n- Input: `{ documentId: string, sheetName?: string, startRow: string, endRow: string, connectionId?: string }`\n- Output: `unknown`\n\n#### fetchGoogleDoc\nFetch the contents of an existing Google Document.\n- exportType controls the output format: "html" for HTML markup, "markdown" for Markdown, "json" for structured JSON, "plain" for plain text.\n- Input: `{ documentId: string, connectionId?: string, exportType: "html" | "markdown" | "json" | "plain" }`\n- Output: `{ content: string }`\n\n#### fetchGoogleSheet\nFetch contents of a Google Spreadsheet range.\n- range uses A1 notation (e.g. "Sheet1!A1:C10"). Omit to fetch the entire first sheet.\n- exportType controls the output format: "csv" for comma-separated values, "json" for structured JSON.\n- Input: `{ spreadsheetId: string, range: string, connectionId?: string, exportType: "csv" | "json" }`\n- Output: `{ content: string }`\n\n#### getGoogleSheetInfo\nGet metadata about a Google Spreadsheet including sheet names, row counts, and column counts.\n- Requires a Google OAuth connection with Drive scope.\n- Returns the spreadsheet title and a list of all sheets with their dimensions.\n- Input: `{ documentId: string, connectionId?: string }`\n- Output: `{ title: string, sheets: { sheetId: number, title: string, rowCount: number, columnCount: number }[] }`\n\n#### updateGoogleDoc\nUpdate the contents of an existing Google Document.\n- operationType controls how content is applied: "addToTop" prepends, "addToBottom" appends, "overwrite" replaces all content.\n- textType determines how the text field is interpreted: "plain" for plain text, "html" for HTML markup, "markdown" for Markdown.\n- Input: `{ documentId: string, connectionId?: string, text: string, textType: "plain" | "html" | "markdown", operationType: "addToTop" | "addToBottom" | "overwrite" }`\n- Output: `{ documentUrl: string }`\n\n#### updateGoogleSheet\nUpdate a Google Spreadsheet with new data.\n- operationType controls how data is written: "addToBottom" appends rows, "overwrite" replaces all data, "range" writes to a specific cell range.\n- Data should be provided as CSV in the text field.\n- Input: `{ text: string, connectionId?: string, spreadsheetId: string, range: string, operationType: "addToBottom" | "overwrite" | "range" }`\n- Output: `{ spreadsheetUrl: string }`\n\n### Google Calendar\n\n#### createGoogleCalendarEvent\nCreate a new event on a Google Calendar.\n- Requires a Google OAuth connection with Calendar events scope.\n- Date/time values must be ISO 8601 format (e.g. "2025-07-02T10:00:00-07:00").\n- Attendees are specified as one email address per line in a single string.\n- Set addMeetLink to true to automatically attach a Google Meet video call.\n- Input: `{ connectionId?: string, summary: string, description?: string, location?: string, startDateTime: string, endDateTime: string, attendees?: string, addMeetLink?: boolean, calendarId?: string }`\n- Output: `{ eventId: string, htmlLink: string }`\n\n#### deleteGoogleCalendarEvent\nRetrieve a specific event from a Google Calendar by event ID.\n- Requires a Google OAuth connection with Calendar events scope.\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns the structured event.\n- Input: `{ connectionId?: string, eventId: string, calendarId?: string }`\n- Output: `unknown`\n\n#### getGoogleCalendarEvent\nRetrieve a specific event from a Google Calendar by event ID.\n- Requires a Google OAuth connection with Calendar events scope.\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns the structured event.\n- Input: `{ connectionId?: string, eventId: string, exportType: "json" | "text", calendarId?: string }`\n- Output: `{ event: { id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null } }`\n\n#### listGoogleCalendarEvents\nList upcoming events from a Google Calendar, ordered by start time.\n- Requires a Google OAuth connection with Calendar events scope.\n- Only returns future events (timeMin = now).\n- The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns structured events.\n- Input: `{ connectionId?: string, limit: number, exportType: "json" | "text", calendarId?: string }`\n- Output: `{ events: ({ id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null })[] }`\n\n#### searchGoogleCalendarEvents\nSearch for events in a Google Calendar by keyword, date range, or both.\n- Requires a Google OAuth connection with Calendar events scope.\n- Supports keyword search via "query" and date filtering via "timeMin"/"timeMax" (ISO 8601 format).\n- Unlike "List Events" which only shows future events, this allows searching past events too.\n- Input: `{ query?: string, timeMin?: string, timeMax?: string, calendarId?: string, limit?: number, exportType: "json" | "text", connectionId?: string }`\n- Output: `{ events: ({ id?: string | null, status?: string | null, htmlLink?: string | null, created?: string | null, updated?: string | null, summary?: string | null, description?: string | null, location?: string | null, organizer?: { displayName?: string | null, email?: string | null } | null, start?: { dateTime?: string | null, timeZone?: string | null } | null, end?: { dateTime?: string | null, timeZone?: string | null } | null, attendees?: ({ displayName?: string | null, email?: string | null, responseStatus?: string | null })[] | null })[] }`\n\n#### updateGoogleCalendarEvent\nUpdate an existing event on a Google Calendar. Only specified fields are changed.\n- Requires a Google OAuth connection with Calendar events scope.\n- Fetches the existing event first, then applies only the provided updates. Omitted fields are left unchanged.\n- Attendees are specified as one email address per line, and replace the entire attendee list.\n- Input: `{ connectionId?: string, eventId: string, summary?: string, description?: string, location?: string, startDateTime?: string, endDateTime?: string, attendees?: string, calendarId?: string }`\n- Output: `{ eventId: string, htmlLink: string }`\n\n### Google Drive\n\n#### getGoogleDriveFile\nDownload a file from Google Drive and rehost it on the CDN. Returns a public CDN URL.\n- Requires a Google OAuth connection with Drive scope.\n- Google-native files (Docs, Sheets, Slides) cannot be downloaded \u2014 use dedicated steps instead.\n- Maximum file size: 200MB.\n- The file is downloaded and re-uploaded to the CDN; the returned URL is publicly accessible.\n- Input: `{ fileId: string, connectionId?: string }`\n- Output: `{ url: string, name: string, mimeType: string, size: number }`\n\n#### listGoogleDriveFiles\nList files in a Google Drive folder.\n- Requires a Google OAuth connection with Drive scope.\n- If folderId is omitted, lists files in the root folder.\n- Returns file metadata including name, type, size, and links.\n- Input: `{ folderId?: string, limit?: number, connectionId?: string, exportType: "json" | "text" }`\n- Output: `{ files: { id: string, name: string, mimeType: string, size: string, webViewLink: string, createdTime: string, modifiedTime: string }[] }`\n\n#### searchGoogleDrive\nSearch for files in Google Drive by keyword.\n- Requires a Google OAuth connection with Drive scope.\n- Searches file content and names using Google Drive\'s fullText search.\n- Input: `{ query: string, limit?: number, connectionId?: string, exportType: "json" | "text" }`\n- Output: `{ files: { id: string, name: string, mimeType: string, size: string, webViewLink: string, createdTime: string, modifiedTime: string }[] }`\n\n### HubSpot\n\n#### hubspotCreateCompany\nCreate a new company or update an existing one in HubSpot. Matches by domain.\n- Requires a HubSpot OAuth connection (connectionId).\n- If a company with the given domain already exists, it is updated. Otherwise, a new one is created.\n- Property values are type-checked against enabledProperties before being sent to HubSpot.\n- Input: `{ connectionId?: string, company: { domain: string, name: string }, enabledProperties: ({ label: string, value: string, type: "string" | "number" | "bool" })[] }`\n- Output: `{ companyId: string }`\n\n#### hubspotCreateContact\nCreate a new contact or update an existing one in HubSpot. Matches by email address.\n- Requires a HubSpot OAuth connection (connectionId).\n- If a contact with the given email already exists, it is updated. Otherwise, a new one is created.\n- If companyDomain is provided, the contact is associated with that company (creating the company if needed).\n- Property values are type-checked against enabledProperties before being sent to HubSpot.\n- Input: `{ connectionId?: string, contact: { email: string, firstname: string, lastname: string }, enabledProperties: ({ label: string, value: string, type: "string" | "number" | "bool" })[], companyDomain: string }`\n- Output: `{ contactId: string }`\n\n#### hubspotGetCompany\nLook up a HubSpot company by domain name or company ID.\n- Requires a HubSpot OAuth connection (connectionId).\n- Returns null if the company is not found.\n- When searching by domain, performs a search query then fetches the full company record.\n- Use additionalProperties to request specific HubSpot properties beyond the defaults.\n- Input: `{ connectionId?: string, searchBy: "domain" | "id", companyDomain: string, companyId: string, additionalProperties: string[] }`\n- Output: `{ company: { id: string, properties: object, createdAt: string, updatedAt: string, archived: boolean } | null }`\n\n#### hubspotGetContact\nLook up a HubSpot contact by email address or contact ID.\n- Requires a HubSpot OAuth connection (connectionId).\n- Returns null if the contact is not found.\n- Use additionalProperties to request specific HubSpot properties beyond the defaults.\n- Input: `{ connectionId?: string, searchBy: "email" | "id", contactEmail: string, contactId: string, additionalProperties: string[] }`\n- Output: `{ contact: { id: string, properties: object, createdAt: string, updatedAt: string, archived: boolean } | null }`\n\n### Hunter.io\n\n#### hunterApiCompanyEnrichment\nLook up company information by domain using Hunter.io.\n- Returns company name, description, location, industry, size, technologies, and more.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns null if the company is not found.\n- Input: `{ domain: string }`\n- Output: `{ data: { name: string, domain: string, description: string | null, country: string | null, state: string | null, city: string | null, industry: string | null, employees_range: string | null, logo_url: string | null, technologies: string[] } | null }`\n\n#### hunterApiDomainSearch\nSearch for email addresses associated with a domain using Hunter.io.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns a list of email addresses found for the domain along with organization info.\n- Input: `{ domain: string }`\n- Output: `{ data: { domain: string, disposable: boolean, webmail: boolean, accept_all: boolean, pattern: string, organization: string, country: string | null, state: string | null, emails: ({ value: string, type: string, confidence: number, first_name: string | null, last_name: string | null, position: string | null, seniority: string | null, department: string | null, linkedin: string | null, twitter: string | null, phone_number: string | null })[], linked_domains: string[] } }`\n\n#### hunterApiEmailFinder\nFind an email address for a specific person at a domain using Hunter.io.\n- Requires a first name, last name, and domain.\n- If the domain input is a full URL, the hostname is automatically extracted.\n- Returns the most likely email address with a confidence score.\n- Input: `{ domain: string, firstName: string, lastName: string }`\n- Output: `{ data: { first_name: string, last_name: string, email: string, score: number, domain: string, accept_all: boolean, position: string | null, twitter: string | null, linkedin_url: string | null, phone_number: string | null, company: string | null, sources: { domain: string, uri: string, extracted_on: string }[] } }`\n\n#### hunterApiEmailVerification\nVerify whether an email address is valid and deliverable using Hunter.io.\n- Checks email format, MX records, SMTP server, and mailbox deliverability.\n- Returns a status ("valid", "invalid", "accept_all", "webmail", "disposable", "unknown") and a score.\n- Input: `{ email: string }`\n- Output: `{ data: { status: string, result: string, score: number, email: string, regexp: boolean, gibberish: boolean, disposable: boolean, webmail: boolean, mx_records: boolean, smtp_server: boolean, smtp_check: boolean, accept_all: boolean, block: boolean, sources: { domain: string, uri: string, extracted_on: string }[] } }`\n\n#### hunterApiPersonEnrichment\nLook up professional information about a person by their email address using Hunter.io.\n- Returns name, job title, social profiles, and company information.\n- If the person is not found, returns an object with an error message instead of throwing.\n- Input: `{ email: string }`\n- Output: `{ data: { first_name: string, last_name: string, email: string, position: string | null, seniority: string | null, department: string | null, linkedin_url: string | null, twitter: string | null, phone_number: string | null, company: { name: string, domain: string, industry: string | null } | null } | { error: string } }`\n\n### Instagram\n\n#### scrapeInstagramComments\nGet all the comments for an Instagram post\n- Input: `{ postUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramMentions\nScrape an Instagram profile\'s mentions\n- Input: `{ profileUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramPosts\nGet all the posts for an Instagram profile\n- Input: `{ profileUrl: string, resultsLimit: string, onlyPostsNewerThan: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramProfile\nScrape an Instagram profile\n- Input: `{ profileUrl: string }`\n- Output: `{ data: unknown }`\n\n#### scrapeInstagramReels\nGet all the reels for an Instagram profile\n- Input: `{ profileUrl: string, resultsLimit: string }`\n- Output: `{ data: unknown }`\n\n### LinkedIn\n\n#### postToLinkedIn\nCreate a post on LinkedIn from the connected account.\n- Requires a LinkedIn OAuth connection (connectionId).\n- Supports text posts, image posts, video posts, document posts, and article posts.\n- Attach one media type per post: image, video, document, or article.\n- Documents support PDF, PPT, PPTX, DOC, DOCX (max 100MB, 300 pages). Displays as a slideshow carousel.\n- Articles create a link preview with optional custom title, description, and thumbnail.\n- Visibility controls who can see the post.\n- Input: `{ message: string, visibility: "PUBLIC" | "CONNECTIONS", imageUrl?: string, videoUrl?: string, documentUrl?: string, articleUrl?: string, titleText?: string, descriptionText?: string, connectionId?: string }`\n- Output: `unknown`\n\n### Meta Threads\n\n#### scrapeMetaThreadsProfile\nScrape a Meta Threads profile\n- Input: `{ profileUrl: string }`\n- Output: `{ data: unknown }`\n\n### Notion\n\n#### notionCreatePage\nCreate a new page in Notion as a child of an existing page.\n- Requires a Notion OAuth connection (connectionId).\n- Content is provided as markdown and converted to Notion blocks (headings, paragraphs, lists, code, quotes).\n- The page is created as a child of the specified parent page (pageId).\n- Input: `{ pageId: string, content: string, title: string, connectionId?: string }`\n- Output: `{ pageId: string, pageUrl: string }`\n\n#### notionUpdatePage\nUpdate the content of an existing Notion page.\n- Requires a Notion OAuth connection (connectionId).\n- Content is provided as markdown and converted to Notion blocks.\n- "append" mode adds content to the end of the page. "overwrite" mode deletes all existing blocks first.\n- Input: `{ pageId: string, content: string, mode: "append" | "overwrite", connectionId?: string }`\n- Output: `{ pageId: string, pageUrl: string }`\n\n### X\n\n#### postToX\nCreate a post on X (Twitter) from the connected account.\n- Requires an X OAuth connection (connectionId).\n- Maximum 280 characters of text.\n- Optionally attach up to 4 media items (images, GIFs, or videos) via mediaUrls.\n- Media URLs must be publicly accessible. The service fetches and uploads them to X.\n- Supported formats: JPEG, PNG, GIF, WEBP, MP4. Images up to 5MB, videos up to 512MB.\n- Input: `{ text: string, connectionId?: string, mediaUrls?: string[] }`\n- Output: `unknown`\n\n#### searchXPosts\nSearch recent X (Twitter) posts matching a query.\n- Searches only the past 7 days of posts.\n- Query supports X API v2 search operators (up to 512 characters).\nAvailable search operators in query:\n| Operator | Description |\n| -----------------| -------------------------------------------------|\n| from: | Posts from a specific user (e.g., from:elonmusk) |\n| to: | Posts sent to a specific user (e.g., to:NASA) |\n| @ | Mentions a user (e.g., @openai) |\n| # | Hashtag search (e.g., #AI) |\n| is:retweet | Filters retweets |\n| is:reply | Filters replies |\n| has:media | Posts containing media (images, videos, or GIFs) |\n| has:links | Posts containing URLs |\n| lang: | Filters by language (e.g., lang:en) |\n| - | Excludes specific terms (e.g., -spam) |\n| () | Groups terms or operators (e.g., (AI OR ML)) |\n| AND, OR, NOT | Boolean logic for combining or excluding terms |\nConjunction-Required Operators (must be combined with a standalone operator):\n| Operator | Description |\n| ------------ | -----------------------------------------------|\n| has:media | Posts containing media (images, videos, or GIFs) |\n| has:links | Posts containing URLs |\n| is:retweet | Filters retweets |\n| is:reply | Filters replies |\nFor example, has:media alone is invalid, but #AI has:media is valid.\n- Input: `{ query: string, scope: "recent" | "all", options: { startTime?: string, endTime?: string, maxResults?: number } }`\n- Output: `{ posts: { id: string, authorId: string, dateCreated: string, text: string, stats: { retweets: number, replies: number, likes: number } }[] }`\n\n### YouTube\n\n#### fetchYoutubeCaptions\nRetrieve the captions/transcript for a YouTube video.\n- Supports multiple languages via the language parameter.\n- "text" export produces timestamped plain text; "json" export produces structured transcript data.\n- Input: `{ videoUrl: string, exportType: "text" | "json", language: string }`\n- Output: `{ transcripts: { text: string, start: number }[] }`\n\n#### fetchYoutubeChannel\nRetrieve metadata and recent videos for a YouTube channel.\n- Accepts a YouTube channel URL (e.g. https://www.youtube.com/@ChannelName or /channel/ID).\n- Returns channel info and video listings as a JSON object.\n- Input: `{ channelUrl: string }`\n- Output: `{ channel: object }`\n\n#### fetchYoutubeComments\nRetrieve comments for a YouTube video.\n- Paginates through comments (up to 5 pages).\n- "text" export produces markdown-formatted text; "json" export produces structured comment data.\n- Input: `{ videoUrl: string, exportType: "text" | "json", limitPages: string }`\n- Output: `{ comments: { id: string, link: string, publishedDate: string, text: string, likes: number, replies: number, author: string, authorLink: string, authorImg: string }[] }`\n\n#### fetchYoutubeVideo\nRetrieve metadata for a YouTube video (title, description, stats, channel info).\n- Returns video metadata, channel info, and engagement stats.\n- Video format data is excluded from the response.\n- Input: `{ videoUrl: string }`\n- Output: `{ video: object }`\n\n#### searchYoutube\nSearch for YouTube videos by keyword.\n- Supports pagination (up to 5 pages) and country/language filters.\n- Use the filter/filterType fields for YouTube search parameter (sp) filters.\n- Input: `{ query: string, limitPages: string, filter: string, filterType: string, countryCode?: string, languageCode?: string }`\n- Output: `{ results: object }`\n\n#### searchYoutubeTrends\nRetrieve trending videos on YouTube by category and region.\n- Categories: "now" (trending now), "music", "gaming", "films".\n- Supports country and language filtering.\n- Input: `{ bp: "now" | "music" | "gaming" | "films", hl: string, gl: string }`\n- Output: `{ trends: object }`\n\n### Helpers\n\n#### `listModels()`\nList all available AI models across all categories.\n\nOutput:\n```typescript\n{\n models: {\n id: string;\n name: string; // Display name\n type: "llm_chat" | "image_generation" | "video_generation" | "video_analysis" | "text_to_speech" | "vision" | "transcription";\n maxTemperature: number;\n maxResponseSize: number;\n inputs: object[]; // Accepted input types\n }[]\n}\n```\n\n#### `listModelsByType(modelType)`\nList AI models filtered by type.\n- `modelType`: `"llm_chat"` | `"image_generation"` | `"video_generation"` | `"video_analysis"` | `"text_to_speech"` | `"vision"` | `"transcription"`\n- Output: same as `listModels()`\n\n#### `listModelsSummary()`\nList all available AI models (summary). Returns only id, name, type, and tags. Suitable for display or consumption inside a model context window.\n\nOutput:\n```typescript\n{\n models: {\n id: string;\n name: string;\n type: "llm_chat" | "image_generation" | "video_generation" | "video_analysis" | "text_to_speech" | "vision" | "transcription";\n tags: string; // Comma-separated tags\n }[]\n}\n```\n\n#### `listModelsSummaryByType(modelType)`\nList AI models (summary) filtered by type.\n- `modelType`: `"llm_chat"` | `"image_generation"` | `"video_generation"` | `"video_analysis"` | `"text_to_speech"` | `"vision"` | `"transcription"`\n- Output: same as `listModelsSummary()`\n\n#### `listConnectors()`\nList available OAuth connector services (Slack, Google, HubSpot, etc.) and their actions. These are third-party integrations \u2014 for most tasks, use actions directly instead.\n\nOutput:\n```typescript\n{\n services: {\n id: string;\n name: string;\n icon: string;\n actions: { id: string; name: string }[];\n }[]\n}\n```\n\n#### `getConnector(serviceId)`\nGet details for a single OAuth connector service by ID.\n\nOutput:\n```typescript\n{\n service: {\n id: string;\n name: string;\n icon: string;\n actions: { id: string; name: string }[];\n }\n}\n```\n\n#### `getConnectorAction(serviceId, actionId)`\nGet the full configuration for an OAuth connector action, including all input fields needed to call it via `runFromConnectorRegistry`. OAuth connectors are sourced from the open-source MindStudio Connector Registry (MSCR) with 850+ actions across third-party services.\n\nOutput:\n```typescript\n{\n action: {\n id: string;\n name: string;\n description: string;\n quickHelp: string;\n configuration: { title: string; items: { label: string; helpText: string; variable: string; type: string; defaultValue: string; placeholder: string; selectOptions?: object }[] }[];\n }\n}\n```\n\n#### `listConnections()`\nList OAuth connections for the organization (authenticated third-party service links). Use the returned connection IDs when calling OAuth connector actions. Connectors require the user to connect to the third-party service in MindStudio before they can be used.\n\nOutput:\n```typescript\n{\n connections: {\n id: string; // Connection ID to pass to connector actions\n provider: string; // Integration provider (e.g. slack, google)\n name: string; // Display name or account identifier\n }[]\n}\n```\n\n#### `estimateStepCost(stepType, step?, options?)`\nEstimate the cost of executing a step before running it. Pass the same step config you would use for execution.\n\n```typescript\nconst estimate = await agent.estimateStepCost(\'generateText\', { message: \'Hello\' });\n```\n\n- `stepType`: string \u2014 The action name (e.g. `"generateText"`).\n- `step`: object \u2014 Optional action input parameters for more accurate estimates.\n- `options`: `{ appId?: string, workflowId?: string }` \u2014 Optional context for pricing.\n\nOutput:\n```typescript\n{\n costType?: string; // "free" when the step has no cost\n estimates?: {\n eventType: string; // Billing event type\n label: string; // Human-readable cost label\n unitPrice: number; // Price per unit in billing units\n unitType: string; // What constitutes a unit (e.g. "token", "request")\n estimatedCost?: number; // Estimated total cost, or null if not estimable\n quantity: number; // Number of billable units\n }[]\n}\n```\n\n#### `changeName(displayName)`\nUpdate the display name of the authenticated agent. Useful for agents to set their own name after connecting.\n\n```typescript\nawait agent.changeName(\'My Agent\');\n```\n\n#### `changeProfilePicture(profilePictureUrl)`\nUpdate the profile picture of the authenticated agent. Useful for agents to set their own avatar after connecting.\n\n```typescript\nawait agent.changeProfilePicture(\'https://example.com/avatar.png\');\n```\n\n#### `uploadFile(content, options)`\nUpload a file to the MindStudio CDN. Gets a signed upload URL, PUTs the file content, and returns the permanent public URL.\n\n```typescript\nimport { readFileSync } from \'fs\';\nconst { url } = await agent.uploadFile(readFileSync(\'photo.png\'), { extension: \'png\', type: \'image/png\' });\n```\n\n- `content`: `Buffer | Uint8Array` \u2014 The file content.\n- `options.extension`: string \u2014 File extension without the dot (e.g. `"png"`, `"jpg"`, `"mp4"`).\n- `options.type`: string (optional) \u2014 MIME type (e.g. `"image/png"`). Determines which CDN subdomain is used.\n\nOutput: `{ url: string }` \u2014 The permanent public CDN URL.\n';
4091
+ }
4092
+ });
4093
+
4094
+ // src/ask/prompt.ts
4095
+ async function buildSystemPrompt(agent) {
4096
+ const [modelsResult, connectionsResult, connectorsResult, llmsResult] = await Promise.allSettled([
4097
+ agent.listModelsSummary(),
4098
+ agent.listConnections(),
4099
+ agent.listConnectors(),
4100
+ Promise.resolve().then(() => (init_llms_content(), llms_content_exports))
4101
+ ]);
4102
+ const modelsSummary = modelsResult.status === "fulfilled" ? modelsResult.value.models.map(
4103
+ (m) => `- ${m.id} (${m.name}, type: ${m.type}${m.tags ? ", tags: " + m.tags : ""})`
4104
+ ).join("\n") : "(Could not load models \u2014 use the listModels tool to look them up)";
4105
+ const connections = connectionsResult.status === "fulfilled" && connectionsResult.value.connections.length > 0 ? connectionsResult.value.connections.map((c) => `- ${c.provider}: ${c.name} (id: ${c.id})`).join("\n") : "No OAuth connections configured.";
4106
+ const connectorServices = connectorsResult.status === "fulfilled" ? connectorsResult.value.services.map(
4107
+ (s) => `- ${s.id}: ${s.name} (${s.actions?.length ?? 0} actions)`
4108
+ ).join("\n") : "(Could not load connectors \u2014 use the getConnectorDetails tool)";
4109
+ const llmsContent2 = llmsResult.status === "fulfilled" ? llmsResult.value.llmsContent : "(Could not load action reference \u2014 use getActionDetails tool)";
4110
+ const identity = `You are the MindStudio SDK assistant. You answer questions about the @mindstudio-ai/agent TypeScript SDK \u2014 actions, AI models, OAuth connectors, and integrations. Your consumers are AI agents that read your full output in one pass.`;
4111
+ const referenceDocs = `<sdk_reference>
4112
+ <quick_reference>
4113
+ Auth is always pre-configured. Use \`new MindStudioAgent()\` with no arguments in code examples.
4114
+ Calling convention: \`const result = await agent.methodName({ ...input })\`
4115
+ Results are flat: output fields + \`$appId\`, \`$threadId\`, \`$billingCost\` metadata.
4116
+ Thread persistence: pass \`{ threadId: result.$threadId, appId: result.$appId }\` as second arg.
4117
+ All 200+ models accessed through one API key \u2014 MindStudio routes to the provider server-side.
4118
+ </quick_reference>
4119
+
4120
+ <model_overrides>
4121
+ Actions that use AI models accept a model override object. Each model has its own config options (dimensions, seed, etc.) defined in its \`inputs\` array. The \`inputs[].variable\` values are the keys for the \`config\` object:
4122
+
4123
+ \`\`\`typescript
4124
+ const agent = new MindStudioAgent();
4125
+
4126
+ await agent.generateImage({
4127
+ prompt: 'a sunset',
4128
+ imageModelOverride: {
4129
+ model: 'flux-pro-2',
4130
+ config: {
4131
+ width: 1024,
4132
+ height: 768,
4133
+ seed: 42,
4134
+ }
4135
+ }
4136
+ });
4137
+ \`\`\`
4138
+
4139
+ Call listModels with details=true to discover the available config options for a model. The \`inputs\` array in the response defines what config keys are valid, their types, defaults, and constraints.
4140
+ </model_overrides>
4141
+
4142
+ <actions>
4143
+ ${llmsContent2}
4144
+ </actions>
4145
+
4146
+ <models>
4147
+ ${modelsSummary}
4148
+ </models>
4149
+
4150
+ <oauth_connections>
4151
+ ${connections}
4152
+ </oauth_connections>
4153
+
4154
+ <connector_services>
4155
+ OAuth connector services from the MindStudio Connector Registry. Each service has multiple actions (850+ total). Use the getConnectorDetails tool to drill into a service's actions and get input fields. Connector actions are executed via the \`runFromConnectorRegistry\` SDK action and require the user to have an OAuth connection set up for that service.
4156
+
4157
+ ${connectorServices}
4158
+ </connector_services>
4159
+ </sdk_reference>`;
4160
+ const instructions = `<instructions>
4161
+ <tools>
4162
+ You have 3 tools for detailed lookups. Most questions can be answered from the reference above without tools.
4163
+
4164
+ - **getActionDetails(actionName)** \u2014 Full JSON schema for a specific action. Use when you need exact param types/enums to write correct code.
4165
+ - **listModels(type?, details?)** \u2014 Model catalog. By default returns compact summaries. With \`details: true\`, returns full model objects including the \`inputs\` array that defines config options (width, height, seed, etc.). Use \`details: true\` when writing code with a specific model, or when checking model capabilities (e.g. which models support source images). You can filter the full response yourself \u2014 one call with details is better than many individual lookups.
4166
+ - **getConnectorDetails(serviceId, actionId?)** \u2014 Drill into a connector service. With just serviceId, lists available actions. With actionId, returns the full action config with input fields for use with \`runFromConnectorRegistry\`.
4167
+ </tools>
4168
+
4169
+ <response_format>
4170
+ - Be terse. Lead with code \u2014 if the question implies code, the code block is the first thing in your response.
4171
+ - Return complete, copy-paste-ready TypeScript code with correct model IDs, config options, and types.
4172
+ - When writing code that uses a specific model, call listModels with details=true to get the model's config options and include them.
4173
+ - When building code examples, use getActionDetails to get the exact input schema first.
4174
+ - After the code block, optionally list config constraints (ranges, defaults) in a compact format.
4175
+ - For discovery questions ("what can I do?"), return a compact list from the reference docs.
4176
+ - Assume the caller already knows what the SDK is, how to install it, and how auth works.
4177
+ - Only state facts from the data you have. Do not editorialize, recommend, or compare models/actions beyond what their metadata says. If the data does not say a model is "strong" or "best" at something, do not claim it is.
4178
+ - Model tags in the summary are editorial labels, not technical specs. When answering questions about model capabilities (supported inputs, config options, dimensions, etc.), call listModels with details=true to check the \`inputs\` array \u2014 that is the source of truth. For example, a model supports start frame images if it has an input with type "imageUrl" or "imageUrlArray", not because its tags say "Source Image".
4179
+ </response_format>
4180
+ </instructions>`;
4181
+ return `${identity}
4182
+
4183
+ ${referenceDocs}
4184
+
4185
+ ${instructions}`;
4186
+ }
4187
+ var init_prompt = __esm({
4188
+ "src/ask/prompt.ts"() {
4189
+ "use strict";
4190
+ }
4191
+ });
4192
+
4193
+ // src/ask/index.ts
4194
+ var ask_exports = {};
4195
+ __export(ask_exports, {
4196
+ cmdAsk: () => cmdAsk,
4197
+ runAsk: () => runAsk
4198
+ });
4199
+ function resolveCredentials(options) {
4200
+ const config = loadConfig();
4201
+ const apiKey = process.env.CALLBACK_TOKEN ?? options.apiKey ?? process.env.MINDSTUDIO_API_KEY ?? config.apiKey;
4202
+ if (!apiKey) {
4203
+ throw new Error(
4204
+ "Not authenticated. Run `mindstudio login` or set MINDSTUDIO_API_KEY."
4205
+ );
4206
+ }
4207
+ const baseUrl = options.baseUrl ?? process.env.MINDSTUDIO_BASE_URL ?? process.env.REMOTE_HOSTNAME ?? config.baseUrl ?? DEFAULT_BASE_URL2;
4208
+ return { apiKey, baseUrl };
4209
+ }
4210
+ async function runAsk(question, options = {}, onEvent) {
4211
+ const { apiKey, baseUrl } = resolveCredentials(options);
4212
+ const agent = new MindStudioAgent({
4213
+ apiKey: options.apiKey,
4214
+ baseUrl: options.baseUrl
4215
+ });
4216
+ const system = await buildSystemPrompt(agent);
4217
+ const messages = [{ role: "user", content: question }];
4218
+ while (true) {
4219
+ let assistantText = "";
4220
+ const toolCalls = [];
4221
+ let stopReason = "end_turn";
4222
+ for await (const event of streamChat({
4223
+ baseUrl,
4224
+ apiKey,
4225
+ system,
4226
+ messages,
4227
+ tools: ASK_TOOLS
4228
+ })) {
4229
+ switch (event.type) {
4230
+ case "text":
4231
+ assistantText += event.text;
4232
+ onEvent?.({ type: "text", text: event.text });
4233
+ break;
4234
+ case "tool_use":
4235
+ toolCalls.push({
4236
+ id: event.id,
4237
+ name: event.name,
4238
+ input: event.input
4239
+ });
4240
+ onEvent?.({
4241
+ type: "tool_start",
4242
+ name: event.name,
4243
+ input: event.input
4244
+ });
4245
+ break;
4246
+ case "done":
4247
+ stopReason = event.stopReason;
4248
+ break;
4249
+ case "error":
4250
+ throw new Error(event.error);
4251
+ }
4252
+ }
4253
+ messages.push({
4254
+ role: "assistant",
4255
+ content: assistantText,
4256
+ toolCalls: toolCalls.length > 0 ? toolCalls : void 0
4257
+ });
4258
+ if (stopReason !== "tool_use" || toolCalls.length === 0) {
4259
+ return assistantText;
4260
+ }
4261
+ const results = await Promise.all(
4262
+ toolCalls.map(async (tc) => {
4263
+ const { result, isError } = await executeTool(agent, tc.name, tc.input);
4264
+ onEvent?.({ type: "tool_done", name: tc.name, isError });
4265
+ return { id: tc.id, result, isError };
4266
+ })
4267
+ );
4268
+ for (const r of results) {
4269
+ messages.push({
4270
+ role: "user",
4271
+ content: r.result,
4272
+ toolCallId: r.id,
4273
+ isToolError: r.isError
4274
+ });
4275
+ }
4276
+ }
4277
+ }
4278
+ function summarizeInput(input) {
4279
+ const vals = Object.values(input).filter((v) => typeof v === "string");
4280
+ const summary = vals.join(", ");
4281
+ return summary.length > 60 ? summary.slice(0, 57) + "..." : summary;
4282
+ }
4283
+ async function cmdAsk(question, options) {
4284
+ try {
4285
+ const response = await runAsk(question, options, (event) => {
4286
+ switch (event.type) {
4287
+ case "text":
4288
+ process.stderr.write(event.text);
4289
+ break;
4290
+ case "tool_start":
4291
+ process.stderr.write(
4292
+ `
4293
+ ${ansi.cyan("\u27E1")} ${ansi.bold(event.name)} ${ansi.dim(summarizeInput(event.input))}
4294
+ `
4295
+ );
4296
+ break;
4297
+ case "tool_done":
4298
+ break;
4299
+ }
4300
+ });
4301
+ if (process.stdout.isTTY) {
4302
+ process.stderr.write("\n");
4303
+ } else {
4304
+ process.stdout.write(response + "\n");
4305
+ }
4306
+ } catch (err) {
4307
+ process.stderr.write(`Error: ${err.message}
4308
+ `);
4309
+ process.exit(1);
4310
+ }
4311
+ }
4312
+ var DEFAULT_BASE_URL2, ansi;
4313
+ var init_ask = __esm({
4314
+ "src/ask/index.ts"() {
4315
+ "use strict";
4316
+ init_config();
4317
+ init_client();
4318
+ init_sse();
4319
+ init_tools();
4320
+ init_prompt();
4321
+ DEFAULT_BASE_URL2 = "https://v1.mindstudio-api.com";
4322
+ ansi = {
4323
+ dim: (s) => `\x1B[2m${s}\x1B[0m`,
4324
+ green: (s) => `\x1B[32m${s}\x1B[0m`,
4325
+ red: (s) => `\x1B[31m${s}\x1B[0m`,
4326
+ cyan: (s) => `\x1B[36m${s}\x1B[0m`,
4327
+ bold: (s) => `\x1B[1m${s}\x1B[0m`
4328
+ };
4329
+ }
4330
+ });
4331
+
3839
4332
  // src/mcp.ts
3840
4333
  var mcp_exports = {};
3841
4334
  __export(mcp_exports, {
@@ -3884,7 +4377,7 @@ async function startMcpServer(options) {
3884
4377
  inputSchema: step.inputSchema
3885
4378
  })
3886
4379
  );
3887
- tools = [...stepTools, ...HELPER_TOOLS];
4380
+ tools = [ASK_TOOL, ...stepTools, ...HELPER_TOOLS];
3888
4381
  return tools;
3889
4382
  }
3890
4383
  async function handleMessage(msg) {
@@ -3896,9 +4389,9 @@ async function startMcpServer(options) {
3896
4389
  capabilities: { tools: {} },
3897
4390
  serverInfo: {
3898
4391
  name: "mindstudio-agent",
3899
- version: "0.1.15"
4392
+ version: "0.1.17"
3900
4393
  },
3901
- instructions: "Welcome to MindStudio \u2014 a platform with 200+ AI models, 850+ third-party integrations, and pre-built agents.\n\nGetting started:\n1. Call `listAgents` to verify your connection and see available agents.\n2. Call `changeName` to set your display name \u2014 use your name or whatever your user calls you. This is how you'll appear in MindStudio request logs.\n3. If you have a profile picture or icon, call `uploadFile` to upload it, then `changeProfilePicture` with the returned URL. This helps users identify your requests in their logs.\n4. Call `listActions` to discover all available actions.\n\nThen use the tools to generate text, images, video, audio, search the web, work with data sources, run agents, and more.\n\nImportant:\n- AI-powered actions (text generation, image generation, video, audio, etc.) cost money. Before running these, call `estimateActionCost` and confirm with the user before proceeding \u2014 unless they've explicitly told you to go ahead.\n- Not all agents from `listAgents` are configured for API use. Do not try to run an agent just because it appears in the list \u2014 it will likely fail. Only run agents the user specifically asks you to run."
4394
+ instructions: 'Welcome to MindStudio \u2014 a platform with 200+ AI models, 850+ third-party integrations, and pre-built agents.\n\nGetting started:\n1. Call `ask` with any question about the SDK \u2014 it knows every action, model, and connector and returns working code with real model IDs and config options. Examples: ask("generate an image with FLUX"), ask("what models support vision?"), ask("how do I send a Slack message?").\n2. Call `changeName` to set your display name \u2014 use your name or whatever your user calls you. This is how you\'ll appear in MindStudio request logs.\n3. If you have a profile picture or icon, call `uploadFile` to upload it, then `changeProfilePicture` with the returned URL.\n4. For manual browsing, call `listActions` to discover all available actions.\n\nThen use the tools to generate text, images, video, audio, search the web, work with data sources, run agents, and more.\n\nImportant:\n- AI-powered actions (text generation, image generation, video, audio, etc.) cost money. Before running these, call `estimateActionCost` and confirm with the user before proceeding \u2014 unless they\'ve explicitly told you to go ahead.\n- Not all agents from `listAgents` are configured for API use. Do not try to run an agent just because it appears in the list \u2014 it will likely fail. Only run agents the user specifically asks you to run.'
3902
4395
  });
3903
4396
  break;
3904
4397
  case "notifications/initialized":
@@ -3911,7 +4404,13 @@ async function startMcpServer(options) {
3911
4404
  const args = params.arguments ?? {};
3912
4405
  try {
3913
4406
  let result;
3914
- if (toolName === "listActions") {
4407
+ if (toolName === "ask") {
4408
+ const { runAsk: runAsk2 } = await Promise.resolve().then(() => (init_ask(), ask_exports));
4409
+ result = await runAsk2(
4410
+ args.question,
4411
+ options
4412
+ );
4413
+ } else if (toolName === "listActions") {
3915
4414
  const meta = await getMetadata();
3916
4415
  const summary = {};
3917
4416
  for (const [name, step] of Object.entries(meta)) {
@@ -4038,7 +4537,7 @@ async function startMcpServer(options) {
4038
4537
  }
4039
4538
  }
4040
4539
  }
4041
- var MCP_PROTOCOL_VERSION, MIME_TYPES, HELPER_DESCRIPTIONS, HELPER_TOOLS;
4540
+ var MCP_PROTOCOL_VERSION, MIME_TYPES, ASK_TOOL, HELPER_DESCRIPTIONS, HELPER_TOOLS;
4042
4541
  var init_mcp = __esm({
4043
4542
  "src/mcp.ts"() {
4044
4543
  "use strict";
@@ -4060,6 +4559,20 @@ var init_mcp = __esm({
4060
4559
  txt: "text/plain",
4061
4560
  csv: "text/csv"
4062
4561
  };
4562
+ ASK_TOOL = {
4563
+ name: "ask",
4564
+ description: 'Ask a question about the MindStudio SDK \u2014 available actions, AI models, OAuth connectors, integrations, and how to use them. Returns complete TypeScript code with real model IDs, config options, and correct types. Use this when you need to discover actions, find model IDs, look up connector details, or get working code examples.\n\nExample questions:\n- "generate an image with FLUX"\n- "what models support vision?"\n- "how do I send a Slack message with an attachment?"\n- "what connectors could I configure?"\n- "what are the config options for flux-max-2?"',
4565
+ inputSchema: {
4566
+ type: "object",
4567
+ properties: {
4568
+ question: {
4569
+ type: "string",
4570
+ description: "Natural language question about the MindStudio SDK"
4571
+ }
4572
+ },
4573
+ required: ["question"]
4574
+ }
4575
+ };
4063
4576
  HELPER_DESCRIPTIONS = {
4064
4577
  listModels: "List all available AI models.",
4065
4578
  listModelsByType: "List AI models filtered by type.",
@@ -4319,7 +4832,7 @@ import { extname as extname2 } from "path";
4319
4832
  function camelToKebab(s) {
4320
4833
  return s.replace(/[A-Z]/g, (m) => "-" + m.toLowerCase());
4321
4834
  }
4322
- function kebabToCamel(s) {
4835
+ function kebabToCamel2(s) {
4323
4836
  return s.replace(/-([a-z])/g, (_, c) => c.toUpperCase());
4324
4837
  }
4325
4838
  function parseJson5(input) {
@@ -4380,7 +4893,7 @@ async function readStdin() {
4380
4893
  }
4381
4894
  function resolveMethodOrFail(name, metadataKeys) {
4382
4895
  if (metadataKeys.has(name)) return name;
4383
- const camel = kebabToCamel(name);
4896
+ const camel = kebabToCamel2(name);
4384
4897
  if (metadataKeys.has(camel)) return camel;
4385
4898
  const kebab = name.includes("-") ? name : camelToKebab(name);
4386
4899
  let bestDist = Infinity;
@@ -4716,7 +5229,7 @@ function isNewerVersion(current, latest) {
4716
5229
  return false;
4717
5230
  }
4718
5231
  async function checkForUpdate() {
4719
- const currentVersion = "0.1.15";
5232
+ const currentVersion = "0.1.17";
4720
5233
  if (!currentVersion) return null;
4721
5234
  try {
4722
5235
  const { loadConfig: loadConfig2, saveConfig: saveConfig2 } = await Promise.resolve().then(() => (init_config(), config_exports));
@@ -4745,11 +5258,11 @@ async function checkForUpdate() {
4745
5258
  }
4746
5259
  }
4747
5260
  function printUpdateNotice(latestVersion) {
4748
- const currentVersion = "0.1.15";
5261
+ const currentVersion = "0.1.17";
4749
5262
  process.stderr.write(
4750
5263
  `
4751
- ${ansi.cyanBright("Update available")} ${ansi.gray(currentVersion + " \u2192")} ${ansi.cyanBold(latestVersion)}
4752
- ${ansi.gray("Run")} npm install -g @mindstudio-ai/agent ${ansi.gray("to update")}
5264
+ ${ansi2.cyanBright("Update available")} ${ansi2.gray(currentVersion + " \u2192")} ${ansi2.cyanBold(latestVersion)}
5265
+ ${ansi2.gray("Run")} npm install -g @mindstudio-ai/agent ${ansi2.gray("to update")}
4753
5266
  `
4754
5267
  );
4755
5268
  }
@@ -4795,23 +5308,23 @@ function maskKey(key) {
4795
5308
  return key.slice(0, 4) + "..." + key.slice(-4);
4796
5309
  }
4797
5310
  async function cmdLogin(options) {
4798
- const baseUrl = options.baseUrl ?? process.env.MINDSTUDIO_BASE_URL ?? process.env.REMOTE_HOSTNAME ?? DEFAULT_BASE_URL2;
5311
+ const baseUrl = options.baseUrl ?? process.env.MINDSTUDIO_BASE_URL ?? process.env.REMOTE_HOSTNAME ?? DEFAULT_BASE_URL3;
4799
5312
  process.stderr.write("\x1B[2J\x1B[H");
4800
5313
  process.stderr.write("\n");
4801
5314
  printLogo();
4802
5315
  process.stderr.write("\n");
4803
- const ver = "0.1.15";
5316
+ const ver = "0.1.17";
4804
5317
  process.stderr.write(
4805
- ` ${ansi.bold("MindStudio Agent")} ${ver ? " " + ansi.gray("v" + ver) : ""}
5318
+ ` ${ansi2.bold("MindStudio Agent")} ${ver ? " " + ansi2.gray("v" + ver) : ""}
4806
5319
  `
4807
5320
  );
4808
5321
  process.stderr.write(
4809
- ` ${ansi.gray("Connect your MindStudio account to get started.")}
5322
+ ` ${ansi2.gray("Connect your MindStudio account to get started.")}
4810
5323
 
4811
5324
  `
4812
5325
  );
4813
5326
  process.stderr.write(
4814
- ` ${ansi.cyanBright("Press any key to open the browser...")}
5327
+ ` ${ansi2.cyanBright("Press any key to open the browser...")}
4815
5328
 
4816
5329
 
4817
5330
 
@@ -4819,7 +5332,7 @@ async function cmdLogin(options) {
4819
5332
  );
4820
5333
  await waitForKeypress();
4821
5334
  process.stderr.write("\x1B[4A\r\x1B[J");
4822
- process.stderr.write(` ${ansi.gray("Requesting authorization...")}
5335
+ process.stderr.write(` ${ansi2.gray("Requesting authorization...")}
4823
5336
  `);
4824
5337
  const authRes = await fetch(
4825
5338
  `${baseUrl}/developer/v2/request-auth-url?agent=true`,
@@ -4838,10 +5351,10 @@ async function cmdLogin(options) {
4838
5351
  const { url, token } = await authRes.json();
4839
5352
  openBrowser(url);
4840
5353
  process.stderr.write(
4841
- ` ${ansi.cyanBright("Opening browser to authenticate...")}
5354
+ ` ${ansi2.cyanBright("Opening browser to authenticate...")}
4842
5355
 
4843
- ${ansi.gray("If the browser didn't open, visit:")}
4844
- ${ansi.cyan(url)}
5356
+ ${ansi2.gray("If the browser didn't open, visit:")}
5357
+ ${ansi2.cyan(url)}
4845
5358
 
4846
5359
  `
4847
5360
  );
@@ -4854,7 +5367,7 @@ async function cmdLogin(options) {
4854
5367
  MAX_ATTEMPTS * POLL_INTERVAL / 1e3 - (attempt + 1) * POLL_INTERVAL / 1e3
4855
5368
  );
4856
5369
  process.stderr.write(
4857
- `\r ${ansi.cyan(frame)} Waiting for browser authorization... ${ansi.gray(`(${remaining}s)`)}`
5370
+ `\r ${ansi2.cyan(frame)} Waiting for browser authorization... ${ansi2.gray(`(${remaining}s)`)}`
4858
5371
  );
4859
5372
  const pollRes = await fetch(`${baseUrl}/developer/v2/poll-auth-url`, {
4860
5373
  method: "POST",
@@ -4875,16 +5388,16 @@ async function cmdLogin(options) {
4875
5388
  const config = {
4876
5389
  apiKey: result.apiKey
4877
5390
  };
4878
- if (baseUrl !== DEFAULT_BASE_URL2) {
5391
+ if (baseUrl !== DEFAULT_BASE_URL3) {
4879
5392
  config.baseUrl = baseUrl;
4880
5393
  }
4881
5394
  saveConfig2(config);
4882
5395
  process.stderr.write(
4883
- ` ${ansi.greenBold("\u2714")} Authenticated successfully!
4884
- ${ansi.gray("Credentials saved to")} ${getConfigPath2()}
5396
+ ` ${ansi2.greenBold("\u2714")} Authenticated successfully!
5397
+ ${ansi2.gray("Credentials saved to")} ${getConfigPath2()}
4885
5398
 
4886
- ${ansi.bold("Using with Claude Code?")} Run once to enable the MCP server:
4887
- ${ansi.cyan("claude mcp add mindstudio -- mindstudio mcp")}
5399
+ ${ansi2.bold("Using with Claude Code?")} Run once to enable the MCP server:
5400
+ ${ansi2.cyan("claude mcp add mindstudio -- mindstudio mcp")}
4888
5401
 
4889
5402
  `
4890
5403
  );
@@ -4902,13 +5415,13 @@ async function cmdLogout() {
4902
5415
  const { loadConfig: loadConfig2, clearConfig: clearConfig2, getConfigPath: getConfigPath2 } = await Promise.resolve().then(() => (init_config(), config_exports));
4903
5416
  const config = loadConfig2();
4904
5417
  if (!config.apiKey) {
4905
- process.stderr.write(` ${ansi.gray("Not currently logged in.")}
5418
+ process.stderr.write(` ${ansi2.gray("Not currently logged in.")}
4906
5419
  `);
4907
5420
  return;
4908
5421
  }
4909
5422
  clearConfig2();
4910
5423
  process.stderr.write(
4911
- ` ${ansi.greenBold("\u2714")} Logged out. Credentials removed from ${ansi.gray(getConfigPath2())}
5424
+ ` ${ansi2.greenBold("\u2714")} Logged out. Credentials removed from ${ansi2.gray(getConfigPath2())}
4912
5425
  `
4913
5426
  );
4914
5427
  }
@@ -4916,36 +5429,36 @@ async function cmdWhoami(options) {
4916
5429
  let source;
4917
5430
  let detail = [];
4918
5431
  if (options.apiKey) {
4919
- source = `${ansi.bold("--api-key flag")} ${ansi.gray("(CLI argument)")}`;
5432
+ source = `${ansi2.bold("--api-key flag")} ${ansi2.gray("(CLI argument)")}`;
4920
5433
  } else if (process.env.MINDSTUDIO_API_KEY) {
4921
- source = `${ansi.bold("MINDSTUDIO_API_KEY")} ${ansi.gray("(environment variable)")}`;
5434
+ source = `${ansi2.bold("MINDSTUDIO_API_KEY")} ${ansi2.gray("(environment variable)")}`;
4922
5435
  detail.push(
4923
- ` ${ansi.gray("Key:")} ${maskKey(process.env.MINDSTUDIO_API_KEY)}`
5436
+ ` ${ansi2.gray("Key:")} ${maskKey(process.env.MINDSTUDIO_API_KEY)}`
4924
5437
  );
4925
5438
  } else {
4926
5439
  const { loadConfig: loadConfig2, getConfigPath: getConfigPath2 } = await Promise.resolve().then(() => (init_config(), config_exports));
4927
5440
  const config = loadConfig2();
4928
5441
  if (config.apiKey) {
4929
- source = `${ansi.bold("config file")} ${ansi.gray("(mindstudio login)")}`;
4930
- detail.push(` ${ansi.gray("File:")} ${getConfigPath2()}`);
4931
- detail.push(` ${ansi.gray("Key:")} ${maskKey(config.apiKey)}`);
5442
+ source = `${ansi2.bold("config file")} ${ansi2.gray("(mindstudio login)")}`;
5443
+ detail.push(` ${ansi2.gray("File:")} ${getConfigPath2()}`);
5444
+ detail.push(` ${ansi2.gray("Key:")} ${maskKey(config.apiKey)}`);
4932
5445
  if (config.baseUrl) {
4933
- detail.push(` ${ansi.gray("URL:")} ${config.baseUrl}`);
5446
+ detail.push(` ${ansi2.gray("URL:")} ${config.baseUrl}`);
4934
5447
  }
4935
5448
  } else if (process.env.CALLBACK_TOKEN) {
4936
- source = `${ansi.bold("CALLBACK_TOKEN")} ${ansi.gray("(managed/internal mode)")}`;
5449
+ source = `${ansi2.bold("CALLBACK_TOKEN")} ${ansi2.gray("(managed/internal mode)")}`;
4937
5450
  } else {
4938
5451
  process.stderr.write(
4939
- ` ${ansi.gray("\u25CB")} Not authenticated. Run ${ansi.cyan("mindstudio login")} to get started.
5452
+ ` ${ansi2.gray("\u25CB")} Not authenticated. Run ${ansi2.cyan("mindstudio login")} to get started.
4940
5453
  `
4941
5454
  );
4942
5455
  return;
4943
5456
  }
4944
5457
  }
4945
- process.stderr.write(` ${ansi.gray("Auth:")} ${source}
5458
+ process.stderr.write(` ${ansi2.gray("Auth:")} ${source}
4946
5459
  `);
4947
5460
  for (const line of detail) process.stderr.write(line + "\n");
4948
- process.stderr.write(` ${ansi.gray("Verifying...")} `);
5461
+ process.stderr.write(` ${ansi2.gray("Verifying...")} `);
4949
5462
  try {
4950
5463
  const { MindStudioAgent: MindStudioAgent2 } = await Promise.resolve().then(() => (init_client(), client_exports));
4951
5464
  const agent = new MindStudioAgent2({
@@ -4954,34 +5467,34 @@ async function cmdWhoami(options) {
4954
5467
  });
4955
5468
  const info = await agent.getUserInfo();
4956
5469
  process.stderr.write(
4957
- `\r\x1B[K ${ansi.greenBold("\u25CF")} ${ansi.green("Connected")}
5470
+ `\r\x1B[K ${ansi2.greenBold("\u25CF")} ${ansi2.green("Connected")}
4958
5471
 
4959
5472
  `
4960
5473
  );
4961
- process.stderr.write(` ${ansi.bold("User")}
5474
+ process.stderr.write(` ${ansi2.bold("User")}
4962
5475
  `);
4963
5476
  process.stderr.write(
4964
- ` ${ansi.gray("Name:")} ${info.displayName}
5477
+ ` ${ansi2.gray("Name:")} ${info.displayName}
4965
5478
  `
4966
5479
  );
4967
5480
  process.stderr.write(
4968
- ` ${ansi.gray("ID:")} ${ansi.gray(info.userId)}
5481
+ ` ${ansi2.gray("ID:")} ${ansi2.gray(info.userId)}
4969
5482
  `
4970
5483
  );
4971
5484
  process.stderr.write(`
4972
- ${ansi.bold("Organization")}
5485
+ ${ansi2.bold("Organization")}
4973
5486
  `);
4974
5487
  process.stderr.write(
4975
- ` ${ansi.gray("Name:")} ${info.organizationName}
5488
+ ` ${ansi2.gray("Name:")} ${info.organizationName}
4976
5489
  `
4977
5490
  );
4978
5491
  process.stderr.write(
4979
- ` ${ansi.gray("ID:")} ${ansi.gray(info.organizationId)}
5492
+ ` ${ansi2.gray("ID:")} ${ansi2.gray(info.organizationId)}
4980
5493
  `
4981
5494
  );
4982
5495
  if (info.members && info.members.length > 0) {
4983
5496
  process.stderr.write(`
4984
- ${ansi.bold("Members")}
5497
+ ${ansi2.bold("Members")}
4985
5498
  `);
4986
5499
  const nameWidth = Math.max(
4987
5500
  4,
@@ -4992,17 +5505,17 @@ async function cmdWhoami(options) {
4992
5505
  ...info.members.map((m) => m.role.length)
4993
5506
  );
4994
5507
  process.stderr.write(
4995
- ` ${ansi.gray("Name".padEnd(nameWidth))} ${ansi.gray("Role".padEnd(roleWidth))} ${ansi.gray("Type")}
5508
+ ` ${ansi2.gray("Name".padEnd(nameWidth))} ${ansi2.gray("Role".padEnd(roleWidth))} ${ansi2.gray("Type")}
4996
5509
  `
4997
5510
  );
4998
5511
  process.stderr.write(
4999
- ` ${ansi.gray("\u2500".repeat(nameWidth))} ${ansi.gray("\u2500".repeat(roleWidth))} ${ansi.gray("\u2500".repeat(5))}
5512
+ ` ${ansi2.gray("\u2500".repeat(nameWidth))} ${ansi2.gray("\u2500".repeat(roleWidth))} ${ansi2.gray("\u2500".repeat(5))}
5000
5513
  `
5001
5514
  );
5002
5515
  for (const member of info.members) {
5003
- const type = member.isAgent ? ansi.cyan("agent") : "user";
5516
+ const type = member.isAgent ? ansi2.cyan("agent") : "user";
5004
5517
  process.stderr.write(
5005
- ` ${member.displayName.padEnd(nameWidth)} ${ansi.gray(member.role.padEnd(roleWidth))} ${type}
5518
+ ` ${member.displayName.padEnd(nameWidth)} ${ansi2.gray(member.role.padEnd(roleWidth))} ${type}
5006
5519
  `
5007
5520
  );
5008
5521
  }
@@ -5011,7 +5524,7 @@ async function cmdWhoami(options) {
5011
5524
  } catch (err) {
5012
5525
  const message = err instanceof Error ? err.message : String(err);
5013
5526
  process.stderr.write(
5014
- `\r\x1B[K ${ansi.dim("\u25CF")} ${ansi.dim("Not connected")} ${ansi.gray("\u2014")} ${message}
5527
+ `\r\x1B[K ${ansi2.dim("\u25CF")} ${ansi2.dim("Not connected")} ${ansi2.gray("\u2014")} ${message}
5015
5528
  `
5016
5529
  );
5017
5530
  }
@@ -5022,7 +5535,7 @@ function parseStepFlags(argv) {
5022
5535
  const arg = argv[i];
5023
5536
  if (arg.startsWith("--") && i + 1 < argv.length) {
5024
5537
  const key = arg.slice(2);
5025
- result[kebabToCamel(key)] = coerce(argv[++i]);
5538
+ result[kebabToCamel2(key)] = coerce(argv[++i]);
5026
5539
  }
5027
5540
  }
5028
5541
  return result;
@@ -5116,6 +5629,39 @@ async function main() {
5116
5629
  });
5117
5630
  return;
5118
5631
  }
5632
+ if (command === "ask") {
5633
+ let question = positionals.slice(1).join(" ");
5634
+ if (!question && !process.stdin.isTTY) {
5635
+ question = (await readStdin()).trim();
5636
+ }
5637
+ if (!question) {
5638
+ usageBlock([
5639
+ "ask \u2014 Built-in SDK assistant",
5640
+ "",
5641
+ "Returns working code with real model IDs, config options,",
5642
+ "and correct types. Knows every action, model, and connector.",
5643
+ "",
5644
+ "Usage:",
5645
+ ' mindstudio ask "your question here"',
5646
+ ' echo "your question" | mindstudio ask',
5647
+ "",
5648
+ "Examples:",
5649
+ ' mindstudio ask "generate an image with FLUX"',
5650
+ ' mindstudio ask "what models support vision?"',
5651
+ ' mindstudio ask "how do I send a Slack message with an attachment?"',
5652
+ ' mindstudio ask "what connectors could I configure?"',
5653
+ ' mindstudio ask "what are the config options for flux-max-2?"',
5654
+ ' mindstudio ask "give me code to transcribe an audio file"',
5655
+ ` mindstudio ask "what's the difference between generateText and userMessage?"`
5656
+ ]);
5657
+ }
5658
+ const { cmdAsk: cmdAsk2 } = await Promise.resolve().then(() => (init_ask(), ask_exports));
5659
+ await cmdAsk2(question, {
5660
+ apiKey: values["api-key"],
5661
+ baseUrl: values["base-url"]
5662
+ });
5663
+ return;
5664
+ }
5119
5665
  if (command === "list-actions") {
5120
5666
  await cmdList(values.json, values.summary);
5121
5667
  return;
@@ -5489,12 +6035,16 @@ async function main() {
5489
6035
  if (latestVersion) printUpdateNotice(latestVersion);
5490
6036
  }
5491
6037
  }
5492
- var HELP, MIME_TYPES2, ansi, UPDATE_CHECK_INTERVAL, LOGO, DEFAULT_BASE_URL2, SPINNER_FRAMES, GLOBAL_STRING_FLAGS;
6038
+ var HELP, MIME_TYPES2, ansi2, UPDATE_CHECK_INTERVAL, LOGO, DEFAULT_BASE_URL3, SPINNER_FRAMES, GLOBAL_STRING_FLAGS;
5493
6039
  var init_cli = __esm({
5494
6040
  "src/cli.ts"() {
5495
6041
  "use strict";
5496
6042
  HELP = `Usage: mindstudio <command> [options]
5497
6043
 
6044
+ Ask:
6045
+ ask "<question>" Ask about actions, models, connectors
6046
+ Returns working code with real model IDs
6047
+
5498
6048
  Run actions:
5499
6049
  <action> [json | --flags] Run an action directly
5500
6050
  run <action> [json | --flags] Run an action (explicit form)
@@ -5542,6 +6092,8 @@ Options:
5542
6092
  --help Show this help
5543
6093
 
5544
6094
  Examples:
6095
+ mindstudio ask "generate an image with FLUX"
6096
+ mindstudio ask "what models support vision?"
5545
6097
  mindstudio generate-image --prompt "a sunset"
5546
6098
  mindstudio generate-text --message "hello" --no-meta
5547
6099
  mindstudio generate-image '{"prompt":"a sunset"}' --output-key imageUrl
@@ -5571,7 +6123,7 @@ Examples:
5571
6123
  txt: "text/plain",
5572
6124
  csv: "text/csv"
5573
6125
  };
5574
- ansi = {
6126
+ ansi2 = {
5575
6127
  cyan: (s) => `\x1B[36m${s}\x1B[0m`,
5576
6128
  cyanBright: (s) => `\x1B[96m${s}\x1B[0m`,
5577
6129
  cyanBold: (s) => `\x1B[96;1m${s}\x1B[0m`,
@@ -5591,7 +6143,7 @@ Examples:
5591
6143
  =@@@@@@@-.@@@@@@@#.-@@@@@@+
5592
6144
  :@@@@@@: +@@@@@#. .@@@@@@:
5593
6145
  .++: .-*-. .++:`;
5594
- DEFAULT_BASE_URL2 = "https://v1.mindstudio-api.com";
6146
+ DEFAULT_BASE_URL3 = "https://v1.mindstudio-api.com";
5595
6147
  SPINNER_FRAMES = [
5596
6148
  "\u28FE",
5597
6149
  "\u28FD",