@mindstudio-ai/agent 0.0.19 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -194,6 +194,13 @@ var init_metadata = __esm({
194
194
  inputSchema: { "type": "object", "properties": { "documentId": { "type": "string", "description": "Google Spreadsheet ID or URL" }, "sheetName": { "type": "string", "description": "Sheet/tab name (defaults to first sheet)" }, "startRow": { "type": "string", "description": "First row to delete (1-based, inclusive)" }, "endRow": { "type": "string", "description": "Last row to delete (1-based, inclusive)" }, "connectionId": { "type": "string", "description": "Google OAuth connection ID" } }, "required": ["documentId", "startRow", "endRow"] },
195
195
  outputSchema: { "description": "This step does not produce output data." }
196
196
  },
197
+ "detectChanges": {
198
+ stepType: "detectChanges",
199
+ description: "Detect changes between runs by comparing current input against previously stored state. Routes execution based on whether a change occurred.",
200
+ usageNotes: '- Persists state across runs using a global variable keyed to the step ID.\n- Two modes: "comparison" (default) uses strict string inequality; "ai" uses an LLM to determine if a meaningful change occurred.\n- First run always treats the value as "changed" since there is no previous state.\n- Each mode supports transitions to different steps/workflows for the "changed" and "unchanged" paths.\n- AI mode bills normally for the LLM call.',
201
+ inputSchema: { "type": "object", "properties": { "mode": { "enum": ["ai", "comparison"], "type": "string", "description": "Detection mode: 'comparison' for strict string inequality, 'ai' for LLM-based. Default: 'comparison'" }, "input": { "type": "string", "description": "Current value to check (variable template)" }, "prompt": { "type": "string", "description": "AI mode: what constitutes a meaningful change" }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "AI mode: model settings override" }, "previousValueVariable": { "type": "string", "description": "Optional variable name to store the previous value into for downstream access" }, "changedStepId": { "type": "string", "description": "Step to transition to if changed (same workflow)" }, "changedWorkflowId": { "type": "string", "description": "Workflow to jump to if changed (cross workflow)" }, "unchangedStepId": { "type": "string", "description": "Step to transition to if unchanged (same workflow)" }, "unchangedWorkflowId": { "type": "string", "description": "Workflow to jump to if unchanged (cross workflow)" } }, "required": ["mode", "input"], "description": "Configuration for the detect changes step" },
202
+ outputSchema: { "type": "object", "properties": { "hasChanged": { "type": "boolean", "description": "Whether a change was detected" }, "currentValue": { "type": "string", "description": "The resolved input value" }, "previousValue": { "type": "string", "description": "The stored value from last run (empty string on first run)" }, "isFirstRun": { "type": "boolean", "description": "True when no previous state exists" } }, "required": ["hasChanged", "currentValue", "previousValue", "isFirstRun"] }
203
+ },
197
204
  "detectPII": {
198
205
  stepType: "detectPII",
199
206
  description: "Scan text for personally identifiable information using Microsoft Presidio.",
@@ -201,6 +208,27 @@ var init_metadata = __esm({
201
208
  inputSchema: { "type": "object", "properties": { "input": { "type": "string", "description": "Text to scan for personally identifiable information" }, "language": { "type": "string", "description": 'Language code of the input text (e.g. "en")' }, "entities": { "type": "array", "items": { "type": "string" }, "description": 'PII entity types to scan for (e.g. ["PHONE_NUMBER", "EMAIL_ADDRESS"]). Empty array means nothing is scanned.' }, "detectedStepId": { "type": "string", "description": "Step to transition to if PII is detected (workflow mode)" }, "notDetectedStepId": { "type": "string", "description": "Step to transition to if no PII is detected (workflow mode)" }, "outputLogVariable": { "type": "string", "description": "Variable name to store the raw detection results" } }, "required": ["input", "language", "entities"] },
202
209
  outputSchema: { "type": "object", "properties": { "detected": { "type": "boolean", "description": "Whether any PII was found in the input text" }, "detections": { "type": "array", "items": { "type": "object", "properties": { "entity_type": { "type": "string", "description": 'PII entity type (e.g. "PHONE_NUMBER", "EMAIL_ADDRESS", "PERSON")' }, "start": { "type": "number", "description": "Start character index in the input text" }, "end": { "type": "number", "description": "End character index in the input text" }, "score": { "type": "number", "description": "Confidence score between 0 and 1" } }, "required": ["entity_type", "start", "end", "score"] }, "description": "List of detected PII entities with type, location, and confidence" } }, "required": ["detected", "detections"] }
203
210
  },
211
+ "discordEditMessage": {
212
+ stepType: "discordEditMessage",
213
+ description: "Edit a previously sent Discord channel message. Use with the message ID returned by Send Discord Message.",
214
+ usageNotes: "- Only messages sent by the bot can be edited.\n- The messageId is returned by the Send Discord Message step.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- When editing with an attachment, the new attachment replaces any previous attachments on the message.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).",
215
+ inputSchema: { "type": "object", "properties": { "botToken": { "type": "string", "description": "Discord bot token for authentication" }, "channelId": { "type": "string", "description": "Discord channel ID containing the message" }, "messageId": { "type": "string", "description": "ID of the message to edit (returned by Send Discord Message)" }, "text": { "type": "string", "description": "New message text to replace the existing content" }, "attachmentUrl": { "type": "string", "description": "URL of a file to download and attach to the message (replaces any previous attachments)" } }, "required": ["botToken", "channelId", "messageId", "text"] },
216
+ outputSchema: { "description": "This step does not produce output data." }
217
+ },
218
+ "discordSendFollowUp": {
219
+ stepType: "discordSendFollowUp",
220
+ description: "Send a follow-up message to a Discord slash command interaction.",
221
+ usageNotes: "- Requires the applicationId and interactionToken from the Discord trigger variables.\n- Follow-up messages appear as new messages in the channel after the initial response.\n- Returns the sent message ID.\n- Interaction tokens expire after 15 minutes.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).",
222
+ inputSchema: { "type": "object", "properties": { "applicationId": { "type": "string", "description": "Discord application ID from the bot registration" }, "interactionToken": { "type": "string", "description": "Interaction token provided by the Discord trigger \u2014 expires after 15 minutes" }, "text": { "type": "string", "description": "Message text to send as a follow-up" }, "attachmentUrl": { "type": "string", "description": "URL of a file to download and attach to the message" } }, "required": ["applicationId", "interactionToken", "text"] },
223
+ outputSchema: { "type": "object", "properties": { "messageId": { "type": "string", "description": "ID of the sent follow-up message" } }, "required": ["messageId"] }
224
+ },
225
+ "discordSendMessage": {
226
+ stepType: "discordSendMessage",
227
+ description: "Send a message to Discord \u2014 either edit the loading message or send a new channel message.",
228
+ usageNotes: '- mode "edit" replaces the loading message (interaction response) with the final result. Uses applicationId and interactionToken from trigger variables. No bot permissions required.\n- mode "send" sends a new message to a channel. Uses botToken and channelId from trigger variables. Returns a messageId that can be used with Edit Discord Message.\n- Optionally attach a file by providing a URL to attachmentUrl. The file is downloaded and uploaded to Discord.\n- URLs in the text are automatically embedded by Discord (link previews for images, videos, etc.).\n- Interaction tokens expire after 15 minutes.',
229
+ inputSchema: { "type": "object", "properties": { "mode": { "enum": ["edit", "send"], "type": "string", "description": '"edit" replaces the loading message, "send" sends a new channel message' }, "text": { "type": "string", "description": "Message text to send" }, "applicationId": { "type": "string", "description": 'Discord application ID from the bot registration (required for "reply" mode)' }, "interactionToken": { "type": "string", "description": 'Interaction token provided by the Discord trigger \u2014 expires after 15 minutes (required for "reply" mode)' }, "botToken": { "type": "string", "description": 'Discord bot token for authentication (required for "send" mode)' }, "channelId": { "type": "string", "description": 'Discord channel ID to send the message to (required for "send" mode)' }, "attachmentUrl": { "type": "string", "description": "URL of a file to download and attach to the message" } }, "required": ["mode", "text"] },
230
+ outputSchema: { "type": "object", "properties": { "messageId": { "type": "string", "description": 'ID of the sent Discord message, only present in "send" mode (use with Edit Discord Message)' } } }
231
+ },
204
232
  "downloadVideo": {
205
233
  stepType: "downloadVideo",
206
234
  description: "Download a video file",
@@ -528,9 +556,13 @@ var init_metadata = __esm({
528
556
  },
529
557
  "logic": {
530
558
  stepType: "logic",
531
- description: "Use an AI model to evaluate which condition from a list is most true, given a context prompt.",
532
- usageNotes: '- This is "fuzzy" logic evaluated by an AI model, not computational logic. The model picks the most accurate statement.\n- All possible cases must be specified \u2014 there is no default/fallback case.\n- Requires at least two cases.\n- In workflow mode, transitions to the destinationStepId of the winning case. In direct execution, returns the winning case ID and condition.',
533
- inputSchema: { "type": "object", "properties": { "context": { "type": "string", "description": "Prompt text providing context for the AI evaluation" }, "cases": { "type": "array", "items": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string", "description": "Unique case identifier" }, "condition": { "type": "string", "description": 'The statement to evaluate (e.g., "User selected a dog")' }, "destinationStepId": { "type": "string", "description": "Step to transition to if this case wins (workflow mode only)" } }, "required": ["id", "condition"] }, { "type": "string" }] }, "description": "List of conditions to evaluate (objects for managed UIs, strings for code)" } }, "required": ["context", "cases"], "description": "Configuration for the logic evaluation step" },
559
+ description: "Route execution to different branches based on AI evaluation, comparison operators, or workflow jumps.",
560
+ usageNotes: `- Supports two modes: "ai" (default) uses an AI model to pick the most accurate statement; "comparison" uses operator-based checks.
561
+ - In AI mode, the model picks the most accurate statement from the list. All possible cases must be specified.
562
+ - In comparison mode, the context is the left operand and each case's condition is the right operand. First matching case wins. Use operator "default" as a fallback.
563
+ - Requires at least two cases.
564
+ - Each case can transition to a step in the current workflow (destinationStepId) or jump to another workflow (destinationWorkflowId).`,
565
+ inputSchema: { "type": "object", "properties": { "mode": { "enum": ["ai", "comparison"], "type": "string", "description": "Evaluation mode: 'ai' for LLM-based, 'comparison' for operator-based. Default: 'ai'" }, "context": { "type": "string", "description": "AI mode: prompt context. Comparison mode: left operand (resolved via variables)." }, "cases": { "type": "array", "items": { "anyOf": [{ "type": "object", "properties": { "id": { "type": "string", "description": "Unique case identifier" }, "condition": { "type": "string", "description": "AI mode: statement to evaluate. Comparison mode: right operand value." }, "operator": { "enum": ["eq", "neq", "gt", "lt", "gte", "lte", "exists", "not_exists", "contains", "not_contains", "default"], "type": "string", "description": "Comparison operator (comparison mode only)" }, "destinationStepId": { "type": "string", "description": "Step to transition to if this case wins (workflow mode only)" }, "destinationWorkflowId": { "type": "string", "description": "Workflow to jump to if this case wins (uses that workflow's initial step)" } }, "required": ["id", "condition"] }, { "type": "string" }] }, "description": "List of conditions to evaluate (objects for managed UIs, strings for code)" }, "modelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Optional model settings override; uses the organization default if not specified (AI mode only)" } }, "required": ["context", "cases"], "description": "Configuration for the router step" },
534
566
  outputSchema: { "type": "object", "properties": { "selectedCase": { "type": "number", "description": "The index of the winning case" } }, "required": ["selectedCase"] }
535
567
  },
536
568
  "makeDotComRunScenario": {
@@ -666,6 +698,13 @@ var init_metadata = __esm({
666
698
  inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to resize" }, "mode": { "enum": ["fit", "exact"], "type": "string", "description": "Resize mode: 'fit' scales within max dimensions, 'exact' forces exact dimensions" }, "maxWidth": { "type": "number", "description": "Maximum width in pixels (used with 'fit' mode)" }, "maxHeight": { "type": "number", "description": "Maximum height in pixels (used with 'fit' mode)" }, "width": { "type": "number", "description": "Exact width in pixels (used with 'exact' mode)" }, "height": { "type": "number", "description": "Exact height in pixels (used with 'exact' mode)" }, "strategy": { "enum": ["pad", "crop"], "type": "string", "description": "Strategy for handling aspect ratio mismatch in 'exact' mode" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "mode"] },
667
699
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the resized video" } }, "required": ["videoUrl"] }
668
700
  },
701
+ "runFromConnectorRegistry": {
702
+ stepType: "runFromConnectorRegistry",
703
+ description: "Run a raw API connector to a third-party service",
704
+ usageNotes: '- Use the /developer/v2/helpers/connectors endpoint to list available services and actions.\n- Use /developer/v2/helpers/connectors/{serviceId}/{actionId} to get the full input configuration for an action.\n- Use /developer/v2/helpers/connections to list your available OAuth connections.\n- The actionId format is "serviceId/actionId" (e.g., "slack/send-message").\n- Pass a __connectionId to authenticate the request with a specific OAuth connection, otherwise the default will be used (if configured).',
705
+ inputSchema: { "type": "object", "properties": { "actionId": { "type": "string", "description": "The connector action identifier in the format serviceId/actionId" }, "displayName": { "type": "string", "description": "Human-readable name of the connector action" }, "icon": { "type": "string", "description": "Icon URL for the connector" }, "configurationValues": { "type": "object", "description": "Key-value configuration parameters for the connector action" }, "__connectionId": { "type": "string", "description": "OAuth connection ID used to authenticate the connector request" } }, "required": ["actionId", "displayName", "icon", "configurationValues"], "description": "Configuration for the connector registry step" },
706
+ outputSchema: { "type": "object", "properties": { "data": { "type": "object", "description": "Key-value map of output variables set by the connector" } }, "required": ["data"] }
707
+ },
669
708
  "runPackagedWorkflow": {
670
709
  stepType: "runPackagedWorkflow",
671
710
  description: 'Run a packaged workflow ("custom block")',
@@ -1295,9 +1334,21 @@ function applyStepMethods(AgentClass) {
1295
1334
  proto.deleteGoogleSheetRows = function(step, options) {
1296
1335
  return this.executeStep("deleteGoogleSheetRows", step, options);
1297
1336
  };
1337
+ proto.detectChanges = function(step, options) {
1338
+ return this.executeStep("detectChanges", step, options);
1339
+ };
1298
1340
  proto.detectPII = function(step, options) {
1299
1341
  return this.executeStep("detectPII", step, options);
1300
1342
  };
1343
+ proto.discordEditMessage = function(step, options) {
1344
+ return this.executeStep("discordEditMessage", step, options);
1345
+ };
1346
+ proto.discordSendFollowUp = function(step, options) {
1347
+ return this.executeStep("discordSendFollowUp", step, options);
1348
+ };
1349
+ proto.discordSendMessage = function(step, options) {
1350
+ return this.executeStep("discordSendMessage", step, options);
1351
+ };
1301
1352
  proto.downloadVideo = function(step, options) {
1302
1353
  return this.executeStep("downloadVideo", step, options);
1303
1354
  };
@@ -1490,6 +1541,9 @@ function applyStepMethods(AgentClass) {
1490
1541
  proto.resizeVideo = function(step, options) {
1491
1542
  return this.executeStep("resizeVideo", step, options);
1492
1543
  };
1544
+ proto.runFromConnectorRegistry = function(step, options) {
1545
+ return this.executeStep("runFromConnectorRegistry", step, options);
1546
+ };
1493
1547
  proto.runPackagedWorkflow = function(step, options) {
1494
1548
  return this.executeStep("runPackagedWorkflow", step, options);
1495
1549
  };
@@ -1663,12 +1717,24 @@ function applyHelperMethods(AgentClass) {
1663
1717
  proto.listModelsByType = function(modelType) {
1664
1718
  return this._request("GET", `/helpers/models/${modelType}`).then((r) => r.data);
1665
1719
  };
1720
+ proto.listModelsSummary = function() {
1721
+ return this._request("GET", "/helpers/models-summary").then((r) => r.data);
1722
+ };
1723
+ proto.listModelsSummaryByType = function(modelType) {
1724
+ return this._request("GET", `/helpers/models-summary/${modelType}`).then((r) => r.data);
1725
+ };
1666
1726
  proto.listConnectors = function() {
1667
1727
  return this._request("GET", "/helpers/connectors").then((r) => r.data);
1668
1728
  };
1669
1729
  proto.getConnector = function(serviceId) {
1670
1730
  return this._request("GET", `/helpers/connectors/${serviceId}`).then((r) => r.data);
1671
1731
  };
1732
+ proto.getConnectorAction = function(serviceId, actionId) {
1733
+ return this._request("GET", `/helpers/connectors/${serviceId}/${actionId}`).then((r) => r.data);
1734
+ };
1735
+ proto.listConnections = function() {
1736
+ return this._request("GET", "/helpers/connections").then((r) => r.data);
1737
+ };
1672
1738
  }
1673
1739
  var init_helpers = __esm({
1674
1740
  "src/generated/helpers.ts"() {
@@ -1922,7 +1988,7 @@ async function startMcpServer(options) {
1922
1988
  capabilities: { tools: {} },
1923
1989
  serverInfo: {
1924
1990
  name: "mindstudio-agent",
1925
- version: "0.0.19"
1991
+ version: "0.1.0"
1926
1992
  }
1927
1993
  });
1928
1994
  break;
@@ -1942,12 +2008,25 @@ async function startMcpServer(options) {
1942
2008
  result = await getAgent().listModelsByType(
1943
2009
  args.modelType
1944
2010
  );
2011
+ } else if (toolName === "listModelsSummary") {
2012
+ result = await getAgent().listModelsSummary();
2013
+ } else if (toolName === "listModelsSummaryByType") {
2014
+ result = await getAgent().listModelsSummaryByType(
2015
+ args.modelType
2016
+ );
1945
2017
  } else if (toolName === "listConnectors") {
1946
2018
  result = await getAgent().listConnectors();
1947
2019
  } else if (toolName === "getConnector") {
1948
2020
  result = await getAgent().getConnector(
1949
2021
  args.serviceId
1950
2022
  );
2023
+ } else if (toolName === "getConnectorAction") {
2024
+ result = await getAgent().getConnectorAction(
2025
+ args.serviceId,
2026
+ args.actionId
2027
+ );
2028
+ } else if (toolName === "listConnections") {
2029
+ result = await getAgent().listConnections();
1951
2030
  } else if (toolName === "listAgents") {
1952
2031
  result = await getAgent().listAgents();
1953
2032
  } else if (toolName === "runAgent") {
@@ -2036,9 +2115,36 @@ var init_mcp = __esm({
2036
2115
  required: ["modelType"]
2037
2116
  }
2038
2117
  },
2118
+ {
2119
+ name: "listModelsSummary",
2120
+ description: "List all available AI models (summary) with only id, name, type, and tags. Suitable for display or consumption inside a model context window.",
2121
+ inputSchema: { type: "object", properties: {} }
2122
+ },
2123
+ {
2124
+ name: "listModelsSummaryByType",
2125
+ description: "List AI models (summary) filtered by type.",
2126
+ inputSchema: {
2127
+ type: "object",
2128
+ properties: {
2129
+ modelType: {
2130
+ type: "string",
2131
+ enum: [
2132
+ "llm_chat",
2133
+ "image_generation",
2134
+ "video_generation",
2135
+ "video_analysis",
2136
+ "text_to_speech",
2137
+ "vision",
2138
+ "transcription"
2139
+ ]
2140
+ }
2141
+ },
2142
+ required: ["modelType"]
2143
+ }
2144
+ },
2039
2145
  {
2040
2146
  name: "listConnectors",
2041
- description: "List available connector services (Slack, Google, HubSpot, etc.).",
2147
+ description: "List available connector services (Slack, Google, HubSpot, etc.) and their actions.",
2042
2148
  inputSchema: { type: "object", properties: {} }
2043
2149
  },
2044
2150
  {
@@ -2050,6 +2156,29 @@ var init_mcp = __esm({
2050
2156
  required: ["serviceId"]
2051
2157
  }
2052
2158
  },
2159
+ {
2160
+ name: "getConnectorAction",
2161
+ description: "Get the full configuration for a connector action, including all input fields needed to call it via runFromConnectorRegistry.",
2162
+ inputSchema: {
2163
+ type: "object",
2164
+ properties: {
2165
+ serviceId: {
2166
+ type: "string",
2167
+ description: "The connector service ID."
2168
+ },
2169
+ actionId: {
2170
+ type: "string",
2171
+ description: 'The full action ID including service prefix (e.g. "slack/send-message").'
2172
+ }
2173
+ },
2174
+ required: ["serviceId", "actionId"]
2175
+ }
2176
+ },
2177
+ {
2178
+ name: "listConnections",
2179
+ description: "List OAuth connections for the organization. Use the returned connection IDs when calling connector actions.",
2180
+ inputSchema: { type: "object", properties: {} }
2181
+ },
2053
2182
  {
2054
2183
  name: "listAgents",
2055
2184
  description: "List all pre-built agents in the organization along with org metadata.",
@@ -2189,8 +2318,12 @@ async function readStdin() {
2189
2318
  var HELPER_NAMES = /* @__PURE__ */ new Set([
2190
2319
  "listModels",
2191
2320
  "listModelsByType",
2321
+ "listModelsSummary",
2322
+ "listModelsSummaryByType",
2192
2323
  "listConnectors",
2193
- "getConnector"
2324
+ "getConnector",
2325
+ "getConnectorAction",
2326
+ "listConnections"
2194
2327
  ]);
2195
2328
  function resolveMethodOrFail(name, metadataKeys) {
2196
2329
  if (metadataKeys.has(name)) return name;
@@ -2249,8 +2382,12 @@ async function cmdInfo(rawMethod) {
2249
2382
  const helpers = {
2250
2383
  listModels: { desc: "List all available AI models.", input: "(none)", output: "{ models: MindStudioModel[] }" },
2251
2384
  listModelsByType: { desc: "List AI models filtered by type.", input: "modelType: string (required)", output: "{ models: MindStudioModel[] }" },
2252
- listConnectors: { desc: "List available connector services.", input: "(none)", output: "{ services: Array }" },
2253
- getConnector: { desc: "Get details for a connector service.", input: "serviceId: string (required)", output: "{ service: object }" }
2385
+ listModelsSummary: { desc: "List all AI models (summary: id, name, type, tags).", input: "(none)", output: "{ models: MindStudioModelSummary[] }" },
2386
+ listModelsSummaryByType: { desc: "List AI models (summary) filtered by type.", input: "modelType: string (required)", output: "{ models: MindStudioModelSummary[] }" },
2387
+ listConnectors: { desc: "List available connector services and their actions.", input: "(none)", output: "{ services: ConnectorService[] }" },
2388
+ getConnector: { desc: "Get details for a connector service.", input: "serviceId: string (required)", output: "{ service: ConnectorService }" },
2389
+ getConnectorAction: { desc: "Get full configuration for a connector action.", input: "serviceId: string, actionId: string (both required)", output: "{ action: ConnectorActionDetail }" },
2390
+ listConnections: { desc: "List OAuth connections for the organization.", input: "(none)", output: "{ connections: Connection[] }" }
2254
2391
  };
2255
2392
  const h = helpers[method];
2256
2393
  process.stderr.write(`
@@ -2332,10 +2469,23 @@ async function cmdExec(method, input, options) {
2332
2469
  result = await agent.listModels();
2333
2470
  } else if (method === "listModelsByType") {
2334
2471
  result = await agent.listModelsByType(input.modelType);
2472
+ } else if (method === "listModelsSummary") {
2473
+ result = await agent.listModelsSummary();
2474
+ } else if (method === "listModelsSummaryByType") {
2475
+ result = await agent.listModelsSummaryByType(
2476
+ input.modelType
2477
+ );
2335
2478
  } else if (method === "listConnectors") {
2336
2479
  result = await agent.listConnectors();
2337
2480
  } else if (method === "getConnector") {
2338
2481
  result = await agent.getConnector(input.serviceId);
2482
+ } else if (method === "getConnectorAction") {
2483
+ result = await agent.getConnectorAction(
2484
+ input.serviceId,
2485
+ input.actionId
2486
+ );
2487
+ } else if (method === "listConnections") {
2488
+ result = await agent.listConnections();
2339
2489
  } else {
2340
2490
  const { stepMetadata: stepMetadata2 } = await Promise.resolve().then(() => (init_metadata(), metadata_exports));
2341
2491
  const meta = stepMetadata2[method];
@@ -2450,7 +2600,7 @@ function isNewerVersion(current, latest) {
2450
2600
  return false;
2451
2601
  }
2452
2602
  async function checkForUpdate() {
2453
- const currentVersion = "0.0.19";
2603
+ const currentVersion = "0.1.0";
2454
2604
  if (!currentVersion) return null;
2455
2605
  try {
2456
2606
  const { loadConfig: loadConfig2, saveConfig: saveConfig2 } = await Promise.resolve().then(() => (init_config(), config_exports));
@@ -2479,7 +2629,7 @@ async function checkForUpdate() {
2479
2629
  }
2480
2630
  }
2481
2631
  function printUpdateNotice(latestVersion) {
2482
- const currentVersion = "0.0.19";
2632
+ const currentVersion = "0.1.0";
2483
2633
  process.stderr.write(
2484
2634
  `
2485
2635
  ${ansi.cyanBright("Update available")} ${ansi.gray(currentVersion + " \u2192")} ${ansi.cyanBold(latestVersion)}
@@ -2553,7 +2703,7 @@ async function cmdLogin(options) {
2553
2703
  process.stderr.write("\n");
2554
2704
  printLogo();
2555
2705
  process.stderr.write("\n");
2556
- const ver = "0.0.19";
2706
+ const ver = "0.1.0";
2557
2707
  process.stderr.write(
2558
2708
  ` ${ansi.bold("MindStudio")} ${ansi.gray("CLI")}${ver ? " " + ansi.gray("v" + ver) : ""}
2559
2709
  `