@mindstudio-ai/agent 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,4727 @@
1
+ interface HttpClientConfig {
2
+ baseUrl: string;
3
+ token: string;
4
+ }
5
+
6
+ /** Configuration options for creating a {@link MindStudioAgent}. */
7
+ interface AgentOptions {
8
+ /**
9
+ * MindStudio API key. Used as a Bearer token for authentication.
10
+ *
11
+ * If omitted, the SDK looks for `MINDSTUDIO_API_KEY` in the environment,
12
+ * then falls back to `CALLBACK_TOKEN` (available automatically
13
+ * inside MindStudio custom functions).
14
+ */
15
+ apiKey?: string;
16
+ /**
17
+ * Base URL of the MindStudio API.
18
+ *
19
+ * If omitted, the SDK looks for `MINDSTUDIO_BASE_URL` in the environment,
20
+ * then `REMOTE_HOSTNAME` (available automatically inside MindStudio
21
+ * custom functions), then falls back to `https://v1.mindstudio-api.com`.
22
+ */
23
+ baseUrl?: string;
24
+ }
25
+ /** Options for a single step execution call. */
26
+ interface StepExecutionOptions {
27
+ /**
28
+ * App ID to execute within. When using an API key, omit this to let the
29
+ * API create a service account app automatically. Pass a previously returned
30
+ * `appId` to reuse an existing app context.
31
+ */
32
+ appId?: string;
33
+ /**
34
+ * Thread ID for state persistence across calls. Omit to create an ephemeral
35
+ * thread. Pass a previously returned `threadId` to maintain conversation
36
+ * history or variable state.
37
+ */
38
+ threadId?: string;
39
+ }
40
+ /** Result of a step execution call. */
41
+ interface StepExecutionResult<TOutput = unknown> {
42
+ /** The step's output data. */
43
+ output: TOutput;
44
+ /**
45
+ * Signed URL to fetch the output from S3.
46
+ * Present only when the output was too large to inline in the response body.
47
+ */
48
+ outputUrl?: string;
49
+ /** The app ID used for this execution. Pass to subsequent calls to reuse. */
50
+ appId: string;
51
+ /** The thread ID used for this execution. Pass to subsequent calls to maintain state. */
52
+ threadId: string;
53
+ }
54
+
55
+ interface ActiveCampaignAddNoteStepInput {
56
+ /** ActiveCampaign contact ID to add the note to */
57
+ contactId: string;
58
+ /** Note text content */
59
+ note: string;
60
+ /** ActiveCampaign OAuth connection ID */
61
+ connectionId: string;
62
+ }
63
+ type ActiveCampaignAddNoteStepOutput = unknown;
64
+ interface ActiveCampaignCreateContactStepInput {
65
+ /** Contact email address */
66
+ email: string;
67
+ /** Contact first name */
68
+ firstName: string;
69
+ /** Contact last name */
70
+ lastName: string;
71
+ /** Contact phone number */
72
+ phone: string;
73
+ /** ActiveCampaign account ID to associate the contact with */
74
+ accountId: string;
75
+ /** Custom field values keyed by field ID */
76
+ customFields: Record<string, unknown>;
77
+ /** ActiveCampaign OAuth connection ID */
78
+ connectionId: string;
79
+ /** Variable name to store the created contact ID */
80
+ destinationVar?: string;
81
+ }
82
+ interface ActiveCampaignCreateContactStepOutput {
83
+ /** ActiveCampaign contact ID of the created contact */
84
+ contactId: string;
85
+ }
86
+ interface AddSubtitlesToVideoStepInput {
87
+ /** URL of the source video */
88
+ videoUrl: string;
89
+ /** Variable name to store the output URL */
90
+ destinationVar: string;
91
+ /** ISO language code for subtitle transcription */
92
+ language: string;
93
+ /** Google Font name for subtitle text */
94
+ fontName: string;
95
+ /** Font size in pixels. Default: 100. */
96
+ fontSize: number;
97
+ /** Font weight for subtitle text */
98
+ fontWeight: "normal" | "bold" | "black";
99
+ /** Color of the subtitle text */
100
+ fontColor: "white" | "black" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta";
101
+ /** Color used to highlight the currently spoken word */
102
+ highlightColor: "white" | "black" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta";
103
+ /** Width of the text stroke outline in pixels */
104
+ strokeWidth: number;
105
+ /** Color of the text stroke outline */
106
+ strokeColor: "black" | "white" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta";
107
+ /** Background color behind subtitle text. Use 'none' for transparent. */
108
+ backgroundColor: "black" | "white" | "red" | "green" | "blue" | "yellow" | "orange" | "purple" | "pink" | "brown" | "gray" | "cyan" | "magenta" | "none";
109
+ /** Opacity of the subtitle background. 0.0 = fully transparent, 1.0 = fully opaque. */
110
+ backgroundOpacity: number;
111
+ /** Vertical position of subtitle text on screen */
112
+ position: "top" | "center" | "bottom";
113
+ /** Vertical offset in pixels from the position. Positive moves down, negative moves up. Default: 75. */
114
+ yOffset: number;
115
+ /** Maximum number of words per subtitle segment. Use 1 for single-word display, 2-3 for short phrases, or 8-12 for full sentences. Default: 3. */
116
+ wordsPerSubtitle: number;
117
+ /** When true, enables bounce-style entrance animation for subtitles. Default: true. */
118
+ enableAnimation: boolean;
119
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
120
+ skipAssetCreation?: boolean;
121
+ }
122
+ interface AddSubtitlesToVideoStepOutput {
123
+ /** URL of the video with subtitles added */
124
+ videoUrl: string;
125
+ }
126
+ interface AirtableCreateUpdateRecordStepInput {
127
+ /** Variable name to store the created/updated record ID */
128
+ destinationVar?: string;
129
+ /** Airtable OAuth connection ID */
130
+ connectionId: string;
131
+ /** Airtable base ID */
132
+ baseId: string;
133
+ /** Airtable table ID */
134
+ tableId: string;
135
+ /** Record ID to update. Omit to create a new record */
136
+ recordId?: string;
137
+ /** How to handle unspecified fields on update. 'onlySpecified' leaves them as-is, 'all' clears them */
138
+ updateMode?: "onlySpecified" | "all";
139
+ /** Field schema metadata used for type resolution */
140
+ fields: unknown;
141
+ /** Field values to set, keyed by field ID */
142
+ recordData: Record<string, unknown>;
143
+ }
144
+ interface AirtableCreateUpdateRecordStepOutput {
145
+ /** The Airtable record ID of the created or updated record */
146
+ recordId: string;
147
+ }
148
+ interface AirtableDeleteRecordStepInput {
149
+ /** Airtable OAuth connection ID */
150
+ connectionId: string;
151
+ /** Variable name to store the result */
152
+ destinationVar?: string;
153
+ /** Airtable base ID */
154
+ baseId: string;
155
+ /** Airtable table ID */
156
+ tableId: string;
157
+ /** Record ID to delete */
158
+ recordId: string;
159
+ }
160
+ interface AirtableDeleteRecordStepOutput {
161
+ /** Whether the record was successfully deleted */
162
+ deleted: boolean;
163
+ }
164
+ interface AirtableGetRecordStepInput {
165
+ /** Airtable OAuth connection ID */
166
+ connectionId: string;
167
+ /** Variable name to store the JSON result */
168
+ destinationVar?: string;
169
+ /** Airtable base ID (e.g. "appXXXXXX") */
170
+ baseId: string;
171
+ /** Airtable table ID (e.g. "tblXXXXXX") */
172
+ tableId: string;
173
+ /** Record ID to fetch (e.g. "recXXXXXX") */
174
+ recordId: string;
175
+ }
176
+ interface AirtableGetRecordStepOutput {
177
+ /** The retrieved Airtable record, or null if not found */
178
+ record: {
179
+ /** Airtable record ID */
180
+ id: string;
181
+ /** ISO 8601 timestamp when the record was created */
182
+ createdTime: string;
183
+ /** Field values keyed by field name */
184
+ fields: Record<string, unknown>;
185
+ } | null;
186
+ }
187
+ interface AirtableGetTableRecordsStepInput {
188
+ /** Airtable OAuth connection ID */
189
+ connectionId: string;
190
+ /** Variable name to store the result */
191
+ destinationVar?: string;
192
+ /** Airtable base ID (e.g. "appXXXXXX") */
193
+ baseId: string;
194
+ /** Airtable table ID (e.g. "tblXXXXXX") */
195
+ tableId: string;
196
+ /** Output format for the result. Defaults to 'json' */
197
+ outputFormat?: "json" | "csv";
198
+ /** Maximum number of records to return. Defaults to 100, max 1000 */
199
+ limit?: number;
200
+ }
201
+ interface AirtableGetTableRecordsStepOutput {
202
+ /** The list of records retrieved from the Airtable table */
203
+ records: {
204
+ /** Airtable record ID */
205
+ id: string;
206
+ /** ISO 8601 timestamp when the record was created */
207
+ createdTime: string;
208
+ /** Field values keyed by field name */
209
+ fields: Record<string, unknown>;
210
+ }[];
211
+ }
212
+ interface AnalyzeImageStepInput {
213
+ /** Instructions describing what to look for or extract from the image */
214
+ prompt: string;
215
+ /** URL of the image to analyze */
216
+ imageUrl: string;
217
+ /** Variable name to save the analysis text into */
218
+ destinationVar?: string;
219
+ /** Optional model configuration override. Uses the workflow's default vision model if not specified */
220
+ visionModelOverride?: {
221
+ model: string;
222
+ config?: Record<string, unknown>;
223
+ } | {
224
+ /** Model identifier (e.g. "gpt-4", "claude-3-opus") */
225
+ model: string;
226
+ /** Sampling temperature for the model (0-2) */
227
+ temperature: number;
228
+ /** Maximum number of tokens in the model's response */
229
+ maxResponseTokens: number;
230
+ /** Whether to skip the system preamble/instructions */
231
+ ignorePreamble?: boolean;
232
+ /** Preprocessor applied to user messages before sending to the model */
233
+ userMessagePreprocessor?: {
234
+ /** Data source identifier for the preprocessor */
235
+ dataSource?: string;
236
+ /** Template string applied to user messages before sending to the model */
237
+ messageTemplate?: string;
238
+ /** Maximum number of results to include from the data source */
239
+ maxResults?: number;
240
+ /** Whether the preprocessor is active */
241
+ enabled?: boolean;
242
+ /** Whether child steps should inherit this preprocessor configuration */
243
+ shouldInherit?: boolean;
244
+ };
245
+ /** System preamble/instructions for the model */
246
+ preamble?: string;
247
+ /** Whether multi-model candidate generation is enabled */
248
+ multiModelEnabled?: boolean;
249
+ /** Whether the user can edit the model's response */
250
+ editResponseEnabled?: boolean;
251
+ /** Additional model-specific configuration */
252
+ config?: Record<string, unknown>;
253
+ };
254
+ }
255
+ interface AnalyzeImageStepOutput {
256
+ /** Text analysis of the image generated by the vision model */
257
+ analysis: string;
258
+ }
259
+ interface AnalyzeVideoStepInput {
260
+ /** Instructions describing what to look for or extract from the video */
261
+ prompt: string;
262
+ /** URL of the video to analyze */
263
+ videoUrl: string;
264
+ /** Variable name to save the analysis text into */
265
+ destinationVar?: string;
266
+ /** Optional model configuration override. Uses the workflow's default video analysis model if not specified */
267
+ videoAnalysisModelOverride?: {
268
+ model: string;
269
+ config?: Record<string, unknown>;
270
+ } | {
271
+ /** Model identifier (e.g. "gpt-4", "claude-3-opus") */
272
+ model: string;
273
+ /** Sampling temperature for the model (0-2) */
274
+ temperature: number;
275
+ /** Maximum number of tokens in the model's response */
276
+ maxResponseTokens: number;
277
+ /** Whether to skip the system preamble/instructions */
278
+ ignorePreamble?: boolean;
279
+ /** Preprocessor applied to user messages before sending to the model */
280
+ userMessagePreprocessor?: {
281
+ /** Data source identifier for the preprocessor */
282
+ dataSource?: string;
283
+ /** Template string applied to user messages before sending to the model */
284
+ messageTemplate?: string;
285
+ /** Maximum number of results to include from the data source */
286
+ maxResults?: number;
287
+ /** Whether the preprocessor is active */
288
+ enabled?: boolean;
289
+ /** Whether child steps should inherit this preprocessor configuration */
290
+ shouldInherit?: boolean;
291
+ };
292
+ /** System preamble/instructions for the model */
293
+ preamble?: string;
294
+ /** Whether multi-model candidate generation is enabled */
295
+ multiModelEnabled?: boolean;
296
+ /** Whether the user can edit the model's response */
297
+ editResponseEnabled?: boolean;
298
+ /** Additional model-specific configuration */
299
+ config?: Record<string, unknown>;
300
+ };
301
+ }
302
+ interface AnalyzeVideoStepOutput {
303
+ /** Text analysis of the video generated by the video analysis model */
304
+ analysis: string;
305
+ }
306
+ interface CaptureThumbnailStepInput {
307
+ /** URL of the source video to capture a frame from */
308
+ videoUrl: string;
309
+ /** Variable name to store the output URL */
310
+ destinationVar: string;
311
+ /** Timestamp in seconds to capture the frame, or 'last' for the final frame */
312
+ at: number | string;
313
+ }
314
+ interface CaptureThumbnailStepOutput {
315
+ /** URL of the captured thumbnail image */
316
+ thumbnailUrl: string;
317
+ }
318
+ interface CodaCreateUpdatePageStepInput {
319
+ /** Variable name to store the created/updated page ID */
320
+ destinationVar?: string;
321
+ /** Coda OAuth connection ID */
322
+ connectionId: string;
323
+ /** Page configuration including document ID, title, content, and optional parent page */
324
+ pageData: {
325
+ /** Coda document ID */
326
+ docId: string;
327
+ /** Page ID to update. Omit to create a new page */
328
+ pageId?: string;
329
+ /** Page title */
330
+ name: string;
331
+ /** Page subtitle */
332
+ subtitle: string;
333
+ /** Page icon name */
334
+ iconName: string;
335
+ /** Page cover image URL */
336
+ imageUrl: string;
337
+ /** Parent page ID for nesting under another page */
338
+ parentPageId?: string;
339
+ /** Page content (markdown string or canvas content object) */
340
+ pageContent: string | unknown;
341
+ /** Content update payload for partial updates */
342
+ contentUpdate?: unknown;
343
+ /** How to insert content on update: "append" or "replace" */
344
+ insertionMode?: string;
345
+ };
346
+ }
347
+ interface CodaCreateUpdatePageStepOutput {
348
+ /** The Coda page ID of the created or updated page */
349
+ pageId: string;
350
+ }
351
+ interface CodaCreateUpdateRowStepInput {
352
+ /** Variable name to store the created/updated row ID */
353
+ destinationVar?: string;
354
+ /** Coda OAuth connection ID */
355
+ connectionId: string;
356
+ /** Coda document ID */
357
+ docId: string;
358
+ /** Table ID within the document */
359
+ tableId: string;
360
+ /** Row ID to update. Omit to create a new row */
361
+ rowId?: string;
362
+ /** Column values to set, keyed by column ID */
363
+ rowData: Record<string, unknown>;
364
+ }
365
+ interface CodaCreateUpdateRowStepOutput {
366
+ /** The Coda row ID of the created or updated row */
367
+ rowId: string;
368
+ }
369
+ interface CodaFindRowStepInput {
370
+ /** Variable name to store the found row as JSON */
371
+ destinationVar?: string;
372
+ /** Coda OAuth connection ID */
373
+ connectionId: string;
374
+ /** Coda document ID */
375
+ docId: string;
376
+ /** Table ID to search within */
377
+ tableId: string;
378
+ /** Column values to match against, keyed by column ID. All criteria are ANDed together */
379
+ rowData: Record<string, unknown>;
380
+ }
381
+ interface CodaFindRowStepOutput {
382
+ /** The first matching row, or null if no match was found */
383
+ row: {
384
+ /** Coda row ID */
385
+ id: string;
386
+ /** Column values keyed by column name */
387
+ values: Record<string, unknown>;
388
+ } | null;
389
+ }
390
+ interface CodaGetPageStepInput {
391
+ /** Variable name to store the page content */
392
+ destinationVar?: string;
393
+ /** Coda OAuth connection ID */
394
+ connectionId: string;
395
+ /** Coda document ID */
396
+ docId: string;
397
+ /** Page ID within the document */
398
+ pageId: string;
399
+ /** Export format for the page content. Defaults to 'html' */
400
+ outputFormat?: "html" | "markdown";
401
+ }
402
+ interface CodaGetPageStepOutput {
403
+ /** Page content in the requested format (HTML or Markdown) */
404
+ content: string;
405
+ }
406
+ interface CodaGetTableRowsStepInput {
407
+ /** Variable name to store the result */
408
+ destinationVar?: string;
409
+ /** Coda OAuth connection ID */
410
+ connectionId: string;
411
+ /** Coda document ID */
412
+ docId: string;
413
+ /** Table ID within the document */
414
+ tableId: string;
415
+ /** Maximum number of rows to return. Defaults to 10000 */
416
+ limit?: number | string;
417
+ /** Output format for the result. Defaults to 'json' */
418
+ outputFormat?: "json" | "csv";
419
+ }
420
+ interface CodaGetTableRowsStepOutput {
421
+ /** The list of rows retrieved from the Coda table */
422
+ rows: {
423
+ /** Coda row ID */
424
+ id: string;
425
+ /** Column values keyed by column name */
426
+ values: Record<string, unknown>;
427
+ }[];
428
+ }
429
+ interface ConvertPdfToImagesStepInput {
430
+ /** URL of the PDF document to convert */
431
+ pdfUrl: string;
432
+ /** Variable name to save the array of image URLs into */
433
+ destinationVar?: string;
434
+ }
435
+ interface ConvertPdfToImagesStepOutput {
436
+ /** CDN URLs of the generated page images, one per page of the PDF */
437
+ imageUrls: string[];
438
+ }
439
+ interface CreateGoogleCalendarEventStepInput {
440
+ /** Google OAuth connection ID */
441
+ connectionId: string;
442
+ /** Event title */
443
+ summary: string;
444
+ /** Event description */
445
+ description?: string;
446
+ /** Event location */
447
+ location?: string;
448
+ /** Start time in ISO 8601 format */
449
+ startDateTime: string;
450
+ /** End time in ISO 8601 format */
451
+ endDateTime: string;
452
+ /** Attendee email addresses, one per line */
453
+ attendees?: string;
454
+ /** Variable to store the result (JSON with eventId and htmlLink) */
455
+ destinationVar?: string;
456
+ /** Whether to attach a Google Meet video call link */
457
+ addMeetLink?: boolean;
458
+ /** Calendar ID (defaults to "primary" if omitted) */
459
+ calendarId?: string;
460
+ }
461
+ interface CreateGoogleCalendarEventStepOutput {
462
+ /** Google Calendar event ID */
463
+ eventId: string;
464
+ /** URL to view the event in Google Calendar */
465
+ htmlLink: string;
466
+ }
467
+ interface CreateGoogleDocStepInput {
468
+ /** Title for the new document */
469
+ title: string;
470
+ /** Document body content */
471
+ text: string;
472
+ /** Variable to store the resulting document URL */
473
+ destinationVar?: string;
474
+ /** Google OAuth connection ID */
475
+ connectionId: string;
476
+ /** Format of the text field: "plain", "html", or "markdown" */
477
+ textType: "plain" | "html" | "markdown";
478
+ }
479
+ interface CreateGoogleDocStepOutput {
480
+ /** URL of the newly created Google Document */
481
+ documentUrl: string;
482
+ }
483
+ interface CreateGoogleSheetStepInput {
484
+ /** Title for the new spreadsheet */
485
+ title: string;
486
+ /** CSV data to populate the sheet with */
487
+ text: string;
488
+ /** Variable to store the resulting spreadsheet URL */
489
+ destinationVar?: string;
490
+ /** Google OAuth connection ID */
491
+ connectionId: string;
492
+ }
493
+ interface CreateGoogleSheetStepOutput {
494
+ /** URL of the newly created Google Spreadsheet */
495
+ spreadsheetUrl: string;
496
+ }
497
+ interface DeleteGoogleCalendarEventStepInput {
498
+ /** Google OAuth connection ID */
499
+ connectionId: string;
500
+ /** Google Calendar event ID to delete */
501
+ eventId: string;
502
+ /** Calendar ID (defaults to "primary" if omitted) */
503
+ calendarId?: string;
504
+ }
505
+ type DeleteGoogleCalendarEventStepOutput = unknown;
506
+ interface DetectPIIStepInput {
507
+ /** Text to scan for personally identifiable information */
508
+ input: string;
509
+ /** Language code of the input text (e.g. "en") */
510
+ language: string;
511
+ /** PII entity types to scan for (e.g. ["PHONE_NUMBER", "EMAIL_ADDRESS"]). Empty array means nothing is scanned. */
512
+ entities: string[];
513
+ /** Step to transition to if PII is detected (workflow mode) */
514
+ detectedStepId?: string;
515
+ /** Step to transition to if no PII is detected (workflow mode) */
516
+ notDetectedStepId?: string;
517
+ /** Variable name to store the raw detection results */
518
+ outputLogVariable?: string | null;
519
+ }
520
+ interface DetectPIIStepOutput {
521
+ /** Whether any PII was found in the input text */
522
+ detected: boolean;
523
+ /** List of detected PII entities with type, location, and confidence */
524
+ detections: {
525
+ /** PII entity type (e.g. "PHONE_NUMBER", "EMAIL_ADDRESS", "PERSON") */
526
+ entity_type: string;
527
+ /** Start character index in the input text */
528
+ start: number;
529
+ /** End character index in the input text */
530
+ end: number;
531
+ /** Confidence score between 0 and 1 */
532
+ score: number;
533
+ }[];
534
+ }
535
+ interface DownloadVideoStepInput {
536
+ /** URL of the video to download (supports YouTube, TikTok, etc. via yt-dlp) */
537
+ videoUrl: string;
538
+ /** Output format for the downloaded file */
539
+ format: "mp4" | "mp3";
540
+ /** Variable name to store the output URL */
541
+ destinationVar: string;
542
+ }
543
+ interface DownloadVideoStepOutput {
544
+ /** URL of the downloaded and re-hosted video file */
545
+ videoUrl: string;
546
+ }
547
+ interface EnhanceImageGenerationPromptStepInput {
548
+ /** The raw prompt to enhance */
549
+ initialPrompt: string;
550
+ /** Whether to also generate a negative prompt */
551
+ includeNegativePrompt: boolean;
552
+ /** Variable name to save the enhanced prompt into */
553
+ destinationVariableName?: string;
554
+ /** Variable name to save the negative prompt into */
555
+ negativePromptDestinationVariableName?: string;
556
+ /** Custom system prompt for the enhancement model. Uses a default prompt if not provided */
557
+ systemPrompt: string;
558
+ /** Model override settings. Leave undefined to use the default model */
559
+ modelOverride?: unknown;
560
+ }
561
+ interface EnhanceImageGenerationPromptStepOutput {
562
+ /** The enhanced image generation prompt */
563
+ prompt: string;
564
+ /** The negative prompt, only present when includeNegativePrompt was true */
565
+ negativePrompt?: string;
566
+ }
567
+ interface EnhanceVideoGenerationPromptStepInput {
568
+ /** The raw prompt to enhance */
569
+ initialPrompt: string;
570
+ /** Whether to also generate a negative prompt */
571
+ includeNegativePrompt: boolean;
572
+ /** Variable name to save the enhanced prompt into */
573
+ destinationVariableName?: string;
574
+ /** Variable name to save the negative prompt into */
575
+ negativePromptDestinationVariableName?: string;
576
+ /** Custom system prompt for the enhancement model. Uses a default prompt if not provided */
577
+ systemPrompt: string;
578
+ /** Model override settings. Leave undefined to use the default model */
579
+ modelOverride?: unknown;
580
+ }
581
+ interface EnhanceVideoGenerationPromptStepOutput {
582
+ /** The enhanced video generation prompt */
583
+ prompt: string;
584
+ /** The negative prompt, only present when includeNegativePrompt was true */
585
+ negativePrompt?: string;
586
+ }
587
+ interface EnrichPersonStepInput {
588
+ /** Variable name to store the result as JSON */
589
+ destinationVar?: string;
590
+ /** Search parameters to identify the person (ID, name, LinkedIn URL, email, or domain) */
591
+ params: {
592
+ /** Apollo person ID */
593
+ id: string;
594
+ /** Person's full name */
595
+ name: string;
596
+ /** LinkedIn profile URL */
597
+ linkedinUrl: string;
598
+ /** Email address */
599
+ email: string;
600
+ /** Company domain */
601
+ domain: string;
602
+ };
603
+ }
604
+ interface EnrichPersonStepOutput {
605
+ /** Apollo enrichment result with contact details, employment history, and social profiles */
606
+ data: unknown;
607
+ }
608
+ interface ExtractAudioFromVideoStepInput {
609
+ /** URL of the source video to extract audio from */
610
+ videoUrl: string;
611
+ /** Variable name to store the output URL */
612
+ destinationVar: string;
613
+ }
614
+ interface ExtractAudioFromVideoStepOutput {
615
+ /** URL of the extracted audio MP3 file */
616
+ audioUrl: string;
617
+ }
618
+ interface ExtractTextStepInput {
619
+ /** URL or array of URLs to extract text from. Accepts a single URL, comma-separated list, or JSON array */
620
+ url: string | string[];
621
+ /** Variable name to save the extracted text into */
622
+ destinationVar?: string;
623
+ }
624
+ interface ExtractTextStepOutput {
625
+ /** Extracted text content. A single string for one URL, or an array for multiple URLs */
626
+ text: string | string[];
627
+ }
628
+ interface FetchGoogleDocStepInput {
629
+ /** Google Document ID (from the document URL) */
630
+ documentId: string;
631
+ /** Variable to store the fetched content */
632
+ destinationVar?: string;
633
+ /** Google OAuth connection ID */
634
+ connectionId: string;
635
+ /** Output format: "html", "markdown", "json", or "plain" */
636
+ exportType: "html" | "markdown" | "json" | "plain";
637
+ }
638
+ interface FetchGoogleDocStepOutput {
639
+ /** Document contents in the requested export format */
640
+ content: string;
641
+ }
642
+ interface FetchGoogleSheetStepInput {
643
+ /** Google Spreadsheet ID (from the spreadsheet URL) */
644
+ spreadsheetId: string;
645
+ /** Cell range in A1 notation (e.g. "Sheet1!A1:C10") */
646
+ range: string;
647
+ /** Variable to store the fetched content */
648
+ destinationVar?: string;
649
+ /** Google OAuth connection ID */
650
+ connectionId: string;
651
+ /** Output format: "csv" or "json" */
652
+ exportType: "csv" | "json";
653
+ }
654
+ interface FetchGoogleSheetStepOutput {
655
+ /** Spreadsheet data in the requested export format */
656
+ content: string;
657
+ }
658
+ interface FetchSlackChannelHistoryStepInput {
659
+ /** Slack OAuth connection ID (leave empty to allow user to select) */
660
+ connectionId: string;
661
+ /** Slack channel ID (leave empty to allow user to select a channel) */
662
+ channelId: string;
663
+ /** Variable to store the output as JSON */
664
+ destinationVar?: string;
665
+ /** Maximum number of messages to return (1-15) */
666
+ limit?: number;
667
+ /** Earliest date to include messages from */
668
+ startDate?: string;
669
+ /** Latest date to include messages up to */
670
+ endDate?: string;
671
+ /** Whether to include images in the output */
672
+ includeImages?: boolean;
673
+ /** Whether to include the raw Slack message object (useful for bot messages with complex attachments) */
674
+ includeRawMessage?: boolean;
675
+ }
676
+ interface FetchSlackChannelHistoryStepOutput {
677
+ /** List of messages from the channel history */
678
+ messages: {
679
+ from: string;
680
+ content: string;
681
+ timestamp?: string;
682
+ images?: string[];
683
+ rawMessage?: {
684
+ app_id?: string;
685
+ assistant_app_thread?: {
686
+ first_user_thread_reply?: string;
687
+ title?: string;
688
+ title_blocks?: unknown[];
689
+ };
690
+ attachments?: {
691
+ actions?: unknown[];
692
+ app_id?: string;
693
+ app_unfurl_url?: string;
694
+ author_icon?: string;
695
+ author_id?: string;
696
+ author_link?: string;
697
+ author_name?: string;
698
+ author_subname?: string;
699
+ blocks?: unknown[];
700
+ bot_id?: string;
701
+ bot_team_id?: string;
702
+ callback_id?: string;
703
+ channel_id?: string;
704
+ channel_name?: string;
705
+ channel_team?: string;
706
+ color?: string;
707
+ fallback?: string;
708
+ fields?: unknown[];
709
+ file_id?: string;
710
+ filename?: string;
711
+ files?: unknown[];
712
+ footer?: string;
713
+ footer_icon?: string;
714
+ from_url?: string;
715
+ hide_border?: boolean;
716
+ hide_color?: boolean;
717
+ id?: number;
718
+ image_bytes?: number;
719
+ image_height?: number;
720
+ image_url?: string;
721
+ image_width?: number;
722
+ indent?: boolean;
723
+ is_app_unfurl?: boolean;
724
+ is_file_attachment?: boolean;
725
+ is_msg_unfurl?: boolean;
726
+ is_reply_unfurl?: boolean;
727
+ is_thread_root_unfurl?: boolean;
728
+ list?: unknown;
729
+ list_record?: unknown;
730
+ list_record_id?: string;
731
+ list_records?: unknown[];
732
+ list_schema?: unknown[];
733
+ list_view?: unknown;
734
+ list_view_id?: string;
735
+ message_blocks?: unknown[];
736
+ metadata?: unknown;
737
+ mimetype?: string;
738
+ mrkdwn_in?: string[];
739
+ msg_subtype?: string;
740
+ original_url?: string;
741
+ pretext?: string;
742
+ preview?: unknown;
743
+ service_icon?: string;
744
+ service_name?: string;
745
+ service_url?: string;
746
+ size?: number;
747
+ text?: string;
748
+ thumb_height?: number;
749
+ thumb_url?: string;
750
+ thumb_width?: number;
751
+ title?: string;
752
+ title_link?: string;
753
+ ts?: string;
754
+ url?: string;
755
+ video_html?: string;
756
+ video_html_height?: number;
757
+ video_html_width?: number;
758
+ video_url?: string;
759
+ }[];
760
+ blocks?: {
761
+ accessory?: unknown;
762
+ alt_text?: string;
763
+ api_decoration_available?: boolean;
764
+ app_collaborators?: string[];
765
+ app_id?: string;
766
+ author_name?: string;
767
+ block_id?: string;
768
+ bot_user_id?: string;
769
+ button_label?: string;
770
+ call?: unknown;
771
+ call_id?: string;
772
+ description?: unknown;
773
+ developer_trace_id?: string;
774
+ dispatch_action?: boolean;
775
+ element?: unknown;
776
+ elements?: unknown[];
777
+ expand?: boolean;
778
+ external_id?: string;
779
+ fallback?: string;
780
+ fields?: unknown[];
781
+ file?: unknown;
782
+ file_id?: string;
783
+ function_trigger_id?: string;
784
+ hint?: unknown;
785
+ image_bytes?: number;
786
+ image_height?: number;
787
+ image_url?: string;
788
+ image_width?: number;
789
+ is_animated?: boolean;
790
+ is_workflow_app?: boolean;
791
+ label?: unknown;
792
+ optional?: boolean;
793
+ owning_team_id?: string;
794
+ provider_icon_url?: string;
795
+ provider_name?: string;
796
+ sales_home_workflow_app_type?: number;
797
+ share_url?: string;
798
+ slack_file?: unknown;
799
+ source?: string;
800
+ text?: unknown;
801
+ thumbnail_url?: string;
802
+ title?: unknown;
803
+ title_url?: string;
804
+ trigger_subtype?: string;
805
+ trigger_type?: string;
806
+ type?: unknown;
807
+ url?: string;
808
+ video_url?: string;
809
+ workflow_id?: string;
810
+ }[];
811
+ bot_id?: string;
812
+ bot_profile?: {
813
+ app_id?: string;
814
+ deleted?: boolean;
815
+ icons?: unknown;
816
+ id?: string;
817
+ name?: string;
818
+ team_id?: string;
819
+ updated?: number;
820
+ };
821
+ client_msg_id?: string;
822
+ display_as_bot?: boolean;
823
+ edited?: {
824
+ ts?: string;
825
+ user?: string;
826
+ };
827
+ files?: {
828
+ access?: string;
829
+ alt_txt?: string;
830
+ app_id?: string;
831
+ app_name?: string;
832
+ attachments?: unknown[];
833
+ blocks?: unknown[];
834
+ bot_id?: string;
835
+ can_toggle_canvas_lock?: boolean;
836
+ canvas_printing_enabled?: boolean;
837
+ canvas_template_mode?: string;
838
+ cc?: unknown[];
839
+ channel_actions_count?: number;
840
+ channel_actions_ts?: string;
841
+ channels?: string[];
842
+ comments_count?: number;
843
+ converted_pdf?: string;
844
+ created?: number;
845
+ deanimate?: string;
846
+ deanimate_gif?: string;
847
+ display_as_bot?: boolean;
848
+ dm_mpdm_users_with_file_access?: unknown[];
849
+ duration_ms?: number;
850
+ edit_link?: string;
851
+ edit_timestamp?: number;
852
+ editable?: boolean;
853
+ editor?: string;
854
+ editors?: string[];
855
+ external_id?: string;
856
+ external_type?: string;
857
+ external_url?: string;
858
+ favorites?: unknown[];
859
+ file_access?: string;
860
+ filetype?: string;
861
+ from?: unknown[];
862
+ groups?: string[];
863
+ has_more?: boolean;
864
+ has_more_shares?: boolean;
865
+ has_rich_preview?: boolean;
866
+ headers?: unknown;
867
+ hls?: string;
868
+ hls_embed?: string;
869
+ id?: string;
870
+ image_exif_rotation?: number;
871
+ ims?: string[];
872
+ initial_comment?: unknown;
873
+ is_channel_space?: boolean;
874
+ is_external?: boolean;
875
+ is_public?: boolean;
876
+ is_restricted_sharing_enabled?: boolean;
877
+ is_starred?: boolean;
878
+ last_editor?: string;
879
+ last_read?: number;
880
+ lines?: number;
881
+ lines_more?: number;
882
+ linked_channel_id?: string;
883
+ list_csv_download_url?: string;
884
+ list_limits?: unknown;
885
+ list_metadata?: unknown;
886
+ media_display_type?: string;
887
+ media_progress?: unknown;
888
+ mimetype?: string;
889
+ mode?: string;
890
+ mp4?: string;
891
+ mp4_low?: string;
892
+ name?: string;
893
+ non_owner_editable?: boolean;
894
+ num_stars?: number;
895
+ org_or_workspace_access?: string;
896
+ original_attachment_count?: number;
897
+ original_h?: string;
898
+ original_w?: string;
899
+ permalink?: string;
900
+ permalink_public?: string;
901
+ pinned_to?: string[];
902
+ pjpeg?: string;
903
+ plain_text?: string;
904
+ pretty_type?: string;
905
+ preview?: string;
906
+ preview_highlight?: string;
907
+ preview_is_truncated?: boolean;
908
+ preview_plain_text?: string;
909
+ private_channels_with_file_access_count?: number;
910
+ private_file_with_access_count?: number;
911
+ public_url_shared?: boolean;
912
+ quip_thread_id?: string;
913
+ reactions?: unknown[];
914
+ saved?: unknown;
915
+ sent_to_self?: boolean;
916
+ shares?: unknown;
917
+ show_badge?: boolean;
918
+ simplified_html?: string;
919
+ size?: number;
920
+ source_team?: string;
921
+ subject?: string;
922
+ subtype?: string;
923
+ team_pref_version_history_enabled?: boolean;
924
+ teams_shared_with?: unknown[];
925
+ template_conversion_ts?: number;
926
+ template_description?: string;
927
+ template_icon?: string;
928
+ template_name?: string;
929
+ template_title?: string;
930
+ thumb_1024?: string;
931
+ thumb_1024_gif?: string;
932
+ thumb_1024_h?: string;
933
+ thumb_1024_w?: string;
934
+ thumb_160?: string;
935
+ thumb_160_gif?: string;
936
+ thumb_160_h?: string;
937
+ thumb_160_w?: string;
938
+ thumb_360?: string;
939
+ thumb_360_gif?: string;
940
+ thumb_360_h?: string;
941
+ thumb_360_w?: string;
942
+ thumb_480?: string;
943
+ thumb_480_gif?: string;
944
+ thumb_480_h?: string;
945
+ thumb_480_w?: string;
946
+ thumb_64?: string;
947
+ thumb_64_gif?: string;
948
+ thumb_64_h?: string;
949
+ thumb_64_w?: string;
950
+ thumb_720?: string;
951
+ thumb_720_gif?: string;
952
+ thumb_720_h?: string;
953
+ thumb_720_w?: string;
954
+ thumb_80?: string;
955
+ thumb_800?: string;
956
+ thumb_800_gif?: string;
957
+ thumb_800_h?: string;
958
+ thumb_800_w?: string;
959
+ thumb_80_gif?: string;
960
+ thumb_80_h?: string;
961
+ thumb_80_w?: string;
962
+ thumb_960?: string;
963
+ thumb_960_gif?: string;
964
+ thumb_960_h?: string;
965
+ thumb_960_w?: string;
966
+ thumb_gif?: string;
967
+ thumb_pdf?: string;
968
+ thumb_pdf_h?: string;
969
+ thumb_pdf_w?: string;
970
+ thumb_tiny?: string;
971
+ thumb_video?: string;
972
+ thumb_video_h?: number;
973
+ thumb_video_w?: number;
974
+ timestamp?: number;
975
+ title?: string;
976
+ title_blocks?: unknown[];
977
+ to?: unknown[];
978
+ transcription?: unknown;
979
+ update_notification?: number;
980
+ updated?: number;
981
+ url_private?: string;
982
+ url_private_download?: string;
983
+ url_static_preview?: string;
984
+ user?: string;
985
+ user_team?: string;
986
+ username?: string;
987
+ vtt?: string;
988
+ }[];
989
+ icons?: {
990
+ emoji?: string;
991
+ image_36?: string;
992
+ image_48?: string;
993
+ image_64?: string;
994
+ image_72?: string;
995
+ };
996
+ inviter?: string;
997
+ is_locked?: boolean;
998
+ latest_reply?: string;
999
+ metadata?: {
1000
+ event_payload?: unknown;
1001
+ event_type?: string;
1002
+ };
1003
+ parent_user_id?: string;
1004
+ purpose?: string;
1005
+ reactions?: {
1006
+ count?: number;
1007
+ name?: string;
1008
+ url?: string;
1009
+ users?: string[];
1010
+ }[];
1011
+ reply_count?: number;
1012
+ reply_users?: string[];
1013
+ reply_users_count?: number;
1014
+ root?: {
1015
+ bot_id?: string;
1016
+ icons?: unknown;
1017
+ latest_reply?: string;
1018
+ parent_user_id?: string;
1019
+ reply_count?: number;
1020
+ reply_users?: string[];
1021
+ reply_users_count?: number;
1022
+ subscribed?: boolean;
1023
+ subtype?: string;
1024
+ text?: string;
1025
+ thread_ts?: string;
1026
+ ts?: string;
1027
+ type?: string;
1028
+ username?: string;
1029
+ };
1030
+ subscribed?: boolean;
1031
+ subtype?: string;
1032
+ team?: string;
1033
+ text?: string;
1034
+ thread_ts?: string;
1035
+ topic?: string;
1036
+ ts?: string;
1037
+ type?: string;
1038
+ upload?: boolean;
1039
+ user?: string;
1040
+ username?: string;
1041
+ x_files?: string[];
1042
+ };
1043
+ }[];
1044
+ }
1045
+ interface FetchYoutubeCaptionsStepInput {
1046
+ /** YouTube video URL to fetch captions for */
1047
+ videoUrl: string;
1048
+ /** Variable name to save the formatted captions */
1049
+ destinationVar?: string;
1050
+ /** Output format: "text" for timestamped plain text, "json" for structured transcript data */
1051
+ exportType: "text" | "json";
1052
+ /** Language code for the captions (e.g. "en") */
1053
+ language: string;
1054
+ }
1055
+ interface FetchYoutubeCaptionsStepOutput {
1056
+ /** Parsed transcript segments with text and start timestamps */
1057
+ transcripts: {
1058
+ /** Transcript text segment */
1059
+ text: string;
1060
+ /** Start time of the segment in seconds */
1061
+ start: number;
1062
+ }[];
1063
+ }
1064
+ interface FetchYoutubeChannelStepInput {
1065
+ /** YouTube channel URL (e.g. https://www.youtube.com/@ChannelName or /channel/ID) */
1066
+ channelUrl: string;
1067
+ /** Variable name to save the channel data */
1068
+ destinationVar?: string;
1069
+ }
1070
+ interface FetchYoutubeChannelStepOutput {
1071
+ /** Channel metadata and video listings */
1072
+ channel: Record<string, unknown>;
1073
+ }
1074
+ interface FetchYoutubeCommentsStepInput {
1075
+ /** YouTube video URL to fetch comments for */
1076
+ videoUrl: string;
1077
+ /** Variable name to save the formatted comments */
1078
+ destinationVar?: string;
1079
+ /** Output format: "text" for markdown-formatted text, "json" for structured comment data */
1080
+ exportType: "text" | "json";
1081
+ /** Maximum number of comment pages to fetch (1-5) */
1082
+ limitPages: string;
1083
+ }
1084
+ interface FetchYoutubeCommentsStepOutput {
1085
+ /** List of comments retrieved from the video */
1086
+ comments: {
1087
+ /** Unique comment identifier */
1088
+ id: string;
1089
+ /** Direct URL to the comment */
1090
+ link: string;
1091
+ /** Date the comment was published */
1092
+ publishedDate: string;
1093
+ /** Text content of the comment */
1094
+ text: string;
1095
+ /** Number of likes on the comment */
1096
+ likes: number;
1097
+ /** Number of replies to the comment */
1098
+ replies: number;
1099
+ /** Display name of the comment author */
1100
+ author: string;
1101
+ /** URL to the author's YouTube channel */
1102
+ authorLink: string;
1103
+ /** URL of the author's profile image */
1104
+ authorImg: string;
1105
+ }[];
1106
+ }
1107
+ interface FetchYoutubeVideoStepInput {
1108
+ /** YouTube video URL to fetch metadata for */
1109
+ videoUrl: string;
1110
+ /** Variable name to save the video data */
1111
+ destinationVar?: string;
1112
+ }
1113
+ interface FetchYoutubeVideoStepOutput {
1114
+ /** Video metadata including title, description, stats, and channel info */
1115
+ video: Record<string, unknown>;
1116
+ }
1117
+ interface GenerateChartStepInput {
1118
+ /** Chart configuration including type, data, and rendering options */
1119
+ chart: {
1120
+ /** The type of chart to generate */
1121
+ chartType: "bar" | "line" | "pie";
1122
+ /** Chart.js-compatible JSON data serialized as a string */
1123
+ data: string;
1124
+ /** Image rendering options */
1125
+ options: {
1126
+ /** Image width in pixels (e.g. "500") */
1127
+ width: string;
1128
+ /** Image height in pixels (e.g. "300") */
1129
+ height: string;
1130
+ };
1131
+ };
1132
+ /** Variable name to save the chart image URL into */
1133
+ destinationVar?: string;
1134
+ }
1135
+ interface GenerateChartStepOutput {
1136
+ /** URL of the generated chart image */
1137
+ chartUrl: string;
1138
+ }
1139
+ interface GenerateImageStepInput {
1140
+ /** Text prompt describing the image to generate */
1141
+ prompt: string;
1142
+ /** foreground = display to user, background = save URL to variable */
1143
+ mode: "foreground" | "background";
1144
+ /** Variable name to save the generated image URL(s) into */
1145
+ destinationVar?: string;
1146
+ /** If true, the image will not appear in the user's asset history */
1147
+ skipAssetCreation?: boolean;
1148
+ /** Optional model configuration override. Uses the workflow's default image model if not specified */
1149
+ imageModelOverride?: {
1150
+ /** Image generation model identifier */
1151
+ model: string;
1152
+ /** Additional model-specific configuration */
1153
+ config?: Record<string, unknown>;
1154
+ };
1155
+ /** Whether to generate multiple image variants in parallel */
1156
+ generateVariants?: boolean;
1157
+ /** Number of variants to generate (max 10) */
1158
+ numVariants?: number;
1159
+ /** How to handle multiple variants: userSelect prompts the user, saveAll saves all URLs */
1160
+ variantBehavior?: "userSelect" | "saveAll";
1161
+ /** Whether to add a MindStudio watermark to the generated image */
1162
+ addWatermark?: boolean;
1163
+ }
1164
+ interface GenerateImageStepOutput {
1165
+ /** CDN URL of the generated image, or array of URLs when generating multiple variants */
1166
+ imageUrl: string | string[];
1167
+ }
1168
+ interface GenerateLipsyncStepInput {
1169
+ /** foreground = display video to user, background = save URL to variable */
1170
+ mode: "foreground" | "background";
1171
+ /** Variable name to save the video URL into (used in background mode) */
1172
+ destinationVar?: string;
1173
+ /** If true, the generated video will not appear in the user's asset history */
1174
+ skipAssetCreation?: boolean;
1175
+ /** Whether to add a MindStudio watermark to the generated video */
1176
+ addWatermark?: boolean;
1177
+ /** Optional model configuration override. Uses the workflow's default lipsync model if not specified */
1178
+ lipsyncModelOverride?: {
1179
+ model: string;
1180
+ config?: Record<string, unknown>;
1181
+ };
1182
+ }
1183
+ type GenerateLipsyncStepOutput = unknown;
1184
+ interface GenerateMusicStepInput {
1185
+ /** The instructions (prompt) for the music generation */
1186
+ text: string;
1187
+ /** foreground = display audio to user, background = save URL to variable */
1188
+ mode: "foreground" | "background";
1189
+ /** Variable name to save the audio URL into (used in background mode) */
1190
+ destinationVar?: string;
1191
+ /** If true, the generated audio will not appear in the user's asset history */
1192
+ skipAssetCreation?: boolean;
1193
+ /** Optional model configuration override. Uses the workflow's default music model if not specified */
1194
+ musicModelOverride?: {
1195
+ model: string;
1196
+ config?: Record<string, unknown>;
1197
+ };
1198
+ }
1199
+ type GenerateMusicStepOutput = unknown;
1200
+ interface GeneratePdfStepInput {
1201
+ /** The HTML or Markdown source template for the asset */
1202
+ source: string;
1203
+ /** Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface */
1204
+ sourceType: "html" | "markdown" | "spa" | "raw" | "dynamic" | "customInterface";
1205
+ /** foreground = display the asset to the user, background = save the URL to a variable */
1206
+ mode: "foreground" | "background";
1207
+ /** Variable name to save the asset URL into (used in background mode) */
1208
+ destinationVar?: string;
1209
+ /** The output format for the generated asset */
1210
+ outputFormat: "pdf" | "png" | "html" | "mp4" | "openGraph";
1211
+ /** Page size for PDF, PNG, or MP4 output */
1212
+ pageSize: "full" | "letter" | "A4" | "custom";
1213
+ /** Test data used for previewing the template with sample variable values */
1214
+ testData: Record<string, unknown>;
1215
+ /** Additional rendering options */
1216
+ options?: {
1217
+ /** Custom page width in pixels (for custom pageSize) */
1218
+ pageWidthPx?: number;
1219
+ /** Custom page height in pixels (for custom pageSize) */
1220
+ pageHeightPx?: number;
1221
+ /** Page orientation for the rendered output */
1222
+ pageOrientation?: "portrait" | "landscape";
1223
+ /** Whether to re-host third-party images on the MindStudio CDN */
1224
+ rehostMedia?: boolean;
1225
+ /** Duration in seconds for MP4 video output */
1226
+ videoDurationSeconds?: number;
1227
+ };
1228
+ /** Single page app source configuration (advanced) */
1229
+ spaSource?: {
1230
+ /** Available route paths in the SPA */
1231
+ paths: string[];
1232
+ /** Root URL of the SPA bundle */
1233
+ root: string;
1234
+ /** URL of the zipped SPA bundle */
1235
+ zipUrl: string;
1236
+ };
1237
+ /** Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}}) */
1238
+ rawSource?: string;
1239
+ /** Prompt to generate the HTML dynamically when sourceType is "dynamic" */
1240
+ dynamicPrompt?: string;
1241
+ /** Model override for dynamic HTML generation. Leave undefined to use the default model */
1242
+ dynamicSourceModelOverride?: {
1243
+ /** Model identifier (e.g. "gpt-4", "claude-3-opus") */
1244
+ model: string;
1245
+ /** Sampling temperature for the model (0-2) */
1246
+ temperature: number;
1247
+ /** Maximum number of tokens in the model's response */
1248
+ maxResponseTokens: number;
1249
+ /** Whether to skip the system preamble/instructions */
1250
+ ignorePreamble?: boolean;
1251
+ /** Preprocessor applied to user messages before sending to the model */
1252
+ userMessagePreprocessor?: {
1253
+ /** Data source identifier for the preprocessor */
1254
+ dataSource?: string;
1255
+ /** Template string applied to user messages before sending to the model */
1256
+ messageTemplate?: string;
1257
+ /** Maximum number of results to include from the data source */
1258
+ maxResults?: number;
1259
+ /** Whether the preprocessor is active */
1260
+ enabled?: boolean;
1261
+ /** Whether child steps should inherit this preprocessor configuration */
1262
+ shouldInherit?: boolean;
1263
+ };
1264
+ /** System preamble/instructions for the model */
1265
+ preamble?: string;
1266
+ /** Whether multi-model candidate generation is enabled */
1267
+ multiModelEnabled?: boolean;
1268
+ /** Whether the user can edit the model's response */
1269
+ editResponseEnabled?: boolean;
1270
+ /** Additional model-specific configuration */
1271
+ config?: Record<string, unknown>;
1272
+ };
1273
+ /** Controls how the step transitions after displaying in foreground mode */
1274
+ transitionControl?: "default" | "native";
1275
+ /** Controls visibility of the share button on displayed assets */
1276
+ shareControl?: "default" | "hidden";
1277
+ /** URL of a custom Open Graph share image */
1278
+ shareImageUrl?: string;
1279
+ /** If true, the asset will not appear in the user's asset history */
1280
+ skipAssetCreation?: boolean;
1281
+ }
1282
+ interface GeneratePdfStepOutput {
1283
+ /** CDN URL of the generated asset (PDF, PNG, HTML, or MP4 depending on outputFormat) */
1284
+ url: string;
1285
+ }
1286
+ interface GenerateStaticVideoFromImageStepInput {
1287
+ /** URL of the source image to convert to video */
1288
+ imageUrl: string;
1289
+ /** Duration of the output video in seconds */
1290
+ duration: string;
1291
+ /** Variable name to store the output URL */
1292
+ destinationVar: string;
1293
+ }
1294
+ interface GenerateStaticVideoFromImageStepOutput {
1295
+ /** URL of the generated static video */
1296
+ videoUrl: string;
1297
+ }
1298
+ interface GenerateVideoStepInput {
1299
+ /** Text prompt describing the video to generate */
1300
+ prompt: string;
1301
+ /** foreground = display to user, background = save URL to variable */
1302
+ mode: "foreground" | "background";
1303
+ /** Variable name to save the generated video URL(s) into */
1304
+ destinationVar?: string;
1305
+ /** If true, the video will not appear in the user's asset history */
1306
+ skipAssetCreation?: boolean;
1307
+ /** Optional model configuration override. Uses the workflow's default video model if not specified */
1308
+ videoModelOverride?: {
1309
+ /** Video generation model identifier */
1310
+ model: string;
1311
+ /** Additional model-specific configuration */
1312
+ config?: Record<string, unknown>;
1313
+ };
1314
+ /** Whether to generate multiple video variants in parallel */
1315
+ generateVariants?: boolean;
1316
+ /** Number of variants to generate (max 10) */
1317
+ numVariants?: number;
1318
+ /** How to handle multiple variants: userSelect prompts the user, saveAll saves all URLs */
1319
+ variantBehavior?: "userSelect" | "saveAll";
1320
+ /** Whether to add a MindStudio watermark to the generated video */
1321
+ addWatermark?: boolean;
1322
+ }
1323
+ interface GenerateVideoStepOutput {
1324
+ /** CDN URL of the generated video, or array of URLs when generating multiple variants */
1325
+ videoUrl: string | string[];
1326
+ }
1327
+ interface GetGoogleCalendarEventStepInput {
1328
+ /** Google OAuth connection ID */
1329
+ connectionId: string;
1330
+ /** Google Calendar event ID to retrieve */
1331
+ eventId: string;
1332
+ /** Format for the variable output: "json" or "text" */
1333
+ exportType: "json" | "text";
1334
+ /** Variable to store the result */
1335
+ destinationVar?: string;
1336
+ /** Calendar ID (defaults to "primary" if omitted) */
1337
+ calendarId?: string;
1338
+ }
1339
+ interface GetGoogleCalendarEventStepOutput {
1340
+ /** The retrieved calendar event */
1341
+ event: {
1342
+ /** Google Calendar event ID */
1343
+ id?: string | null;
1344
+ /** Event status (e.g. "confirmed", "tentative", "cancelled") */
1345
+ status?: string | null;
1346
+ /** URL to view the event in Google Calendar */
1347
+ htmlLink?: string | null;
1348
+ /** Timestamp when the event was created */
1349
+ created?: string | null;
1350
+ /** Timestamp when the event was last updated */
1351
+ updated?: string | null;
1352
+ /** Event title */
1353
+ summary?: string | null;
1354
+ /** Event description */
1355
+ description?: string | null;
1356
+ /** Event location */
1357
+ location?: string | null;
1358
+ /** Event organizer */
1359
+ organizer?: {
1360
+ displayName?: string | null;
1361
+ email?: string | null;
1362
+ } | null;
1363
+ /** Event start time and timezone */
1364
+ start?: {
1365
+ dateTime?: string | null;
1366
+ timeZone?: string | null;
1367
+ } | null;
1368
+ /** Event end time and timezone */
1369
+ end?: {
1370
+ dateTime?: string | null;
1371
+ timeZone?: string | null;
1372
+ } | null;
1373
+ /** List of event attendees */
1374
+ attendees?: ({
1375
+ displayName?: string | null;
1376
+ email?: string | null;
1377
+ responseStatus?: string | null;
1378
+ })[] | null;
1379
+ };
1380
+ }
1381
+ interface GetMediaMetadataStepInput {
1382
+ /** URL of the audio or video file to analyze */
1383
+ mediaUrl: string;
1384
+ /** Variable name to store the metadata result */
1385
+ destinationVar: string;
1386
+ }
1387
+ interface GetMediaMetadataStepOutput {
1388
+ /** JSON string containing the media file metadata */
1389
+ metadata: string;
1390
+ }
1391
+ interface HttpRequestStepInput {
1392
+ /** The request URL */
1393
+ url: string;
1394
+ /** HTTP method (GET, POST, PATCH, DELETE, or PUT) */
1395
+ method: string;
1396
+ /** Custom request headers as key-value pairs */
1397
+ headers: Record<string, unknown>;
1398
+ /** Query string parameters as key-value pairs */
1399
+ queryParams: Record<string, unknown>;
1400
+ /** Raw request body (used for JSON or custom content types) */
1401
+ body: string;
1402
+ /** Key-value body items (used for form data or URL-encoded content types) */
1403
+ bodyItems: Record<string, unknown>;
1404
+ /** The content type for the request body */
1405
+ contentType: "none" | "application/json" | "application/x-www-form-urlencoded" | "multipart/form-data" | "custom";
1406
+ /** Custom Content-Type header value (used when contentType is "custom") */
1407
+ customContentType: string;
1408
+ /** Variable name to save the response body into */
1409
+ destinationVar?: string;
1410
+ /** Test data for debug/preview mode */
1411
+ testData?: Record<string, unknown>;
1412
+ }
1413
+ interface HttpRequestStepOutput {
1414
+ /** Whether the HTTP response status code is in the 2xx range */
1415
+ ok: boolean;
1416
+ /** HTTP response status code */
1417
+ status: number;
1418
+ /** HTTP response status text */
1419
+ statusText: string;
1420
+ /** Response body as a string */
1421
+ response: string;
1422
+ }
1423
+ interface HubspotCreateCompanyStepInput {
1424
+ /** HubSpot OAuth connection ID */
1425
+ connectionId: string;
1426
+ /** Variable name to store the company ID */
1427
+ destinationVar?: string;
1428
+ /** Company data including domain, name, and additional properties */
1429
+ company: {
1430
+ /** Company domain, used for matching existing companies */
1431
+ domain: string;
1432
+ /** Company name */
1433
+ name: string;
1434
+ };
1435
+ /** HubSpot properties enabled for this step, used for type validation */
1436
+ enabledProperties: ({
1437
+ /** Display label for the HubSpot property */
1438
+ label: string;
1439
+ /** HubSpot property internal name */
1440
+ value: string;
1441
+ /** Data type of the property value */
1442
+ type: "string" | "number" | "bool";
1443
+ })[];
1444
+ }
1445
+ interface HubspotCreateCompanyStepOutput {
1446
+ /** HubSpot company ID of the created or updated company */
1447
+ companyId: string;
1448
+ }
1449
+ interface HubspotCreateContactStepInput {
1450
+ /** HubSpot OAuth connection ID */
1451
+ connectionId: string;
1452
+ /** Variable name to store the contact ID */
1453
+ destinationVar?: string;
1454
+ /** Contact data including email, first name, last name, and additional properties */
1455
+ contact: {
1456
+ /** Contact email address, used for matching existing contacts */
1457
+ email: string;
1458
+ /** Contact first name */
1459
+ firstname: string;
1460
+ /** Contact last name */
1461
+ lastname: string;
1462
+ };
1463
+ /** HubSpot properties enabled for this step, used for type validation */
1464
+ enabledProperties: ({
1465
+ /** Display label for the HubSpot property */
1466
+ label: string;
1467
+ /** HubSpot property internal name */
1468
+ value: string;
1469
+ /** Data type of the property value */
1470
+ type: "string" | "number" | "bool";
1471
+ })[];
1472
+ /** Company domain to associate the contact with. Creates the company if it does not exist */
1473
+ companyDomain: string;
1474
+ }
1475
+ interface HubspotCreateContactStepOutput {
1476
+ /** HubSpot contact ID of the created or updated contact */
1477
+ contactId: string;
1478
+ }
1479
+ interface HubspotGetCompanyStepInput {
1480
+ /** HubSpot OAuth connection ID */
1481
+ connectionId: string;
1482
+ /** Variable name to store the result as JSON */
1483
+ destinationVar?: string;
1484
+ /** How to look up the company: by domain name or HubSpot company ID */
1485
+ searchBy: "domain" | "id";
1486
+ /** Domain to search by (used when searchBy is 'domain') */
1487
+ companyDomain: string;
1488
+ /** HubSpot company ID (used when searchBy is 'id') */
1489
+ companyId: string;
1490
+ /** Extra HubSpot property names to include in the response beyond the defaults */
1491
+ additionalProperties: string[];
1492
+ }
1493
+ interface HubspotGetCompanyStepOutput {
1494
+ /** The retrieved HubSpot company, or null if not found */
1495
+ company: {
1496
+ id: string;
1497
+ properties: Record<string, unknown>;
1498
+ createdAt: string;
1499
+ updatedAt: string;
1500
+ archived: boolean;
1501
+ } | null;
1502
+ }
1503
+ interface HubspotGetContactStepInput {
1504
+ /** HubSpot OAuth connection ID */
1505
+ connectionId: string;
1506
+ /** Variable name to store the result as JSON */
1507
+ destinationVar?: string;
1508
+ /** How to look up the contact: by email address or HubSpot contact ID */
1509
+ searchBy: "email" | "id";
1510
+ /** Email address to search by (used when searchBy is 'email') */
1511
+ contactEmail: string;
1512
+ /** HubSpot contact ID (used when searchBy is 'id') */
1513
+ contactId: string;
1514
+ /** Extra HubSpot property names to include in the response beyond the defaults */
1515
+ additionalProperties: string[];
1516
+ }
1517
+ interface HubspotGetContactStepOutput {
1518
+ /** The retrieved HubSpot contact, or null if not found */
1519
+ contact: {
1520
+ id: string;
1521
+ properties: Record<string, unknown>;
1522
+ createdAt: string;
1523
+ updatedAt: string;
1524
+ archived: boolean;
1525
+ } | null;
1526
+ }
1527
+ interface HunterApiCompanyEnrichmentStepInput {
1528
+ /** Domain or URL to look up (e.g. "example.com") */
1529
+ domain: string;
1530
+ /** Variable name to store the result as JSON */
1531
+ destinationVar?: string;
1532
+ }
1533
+ interface HunterApiCompanyEnrichmentStepOutput {
1534
+ /** Enriched company data, or null if the company was not found */
1535
+ data: {
1536
+ name: string;
1537
+ domain: string;
1538
+ description: string | null;
1539
+ country: string | null;
1540
+ state: string | null;
1541
+ city: string | null;
1542
+ industry: string | null;
1543
+ employees_range: string | null;
1544
+ logo_url: string | null;
1545
+ technologies: string[];
1546
+ } | null;
1547
+ }
1548
+ interface HunterApiDomainSearchStepInput {
1549
+ /** Domain or URL to search for email addresses (e.g. "example.com") */
1550
+ domain: string;
1551
+ /** Variable name to store the result as JSON */
1552
+ destinationVar?: string;
1553
+ }
1554
+ interface HunterApiDomainSearchStepOutput {
1555
+ /** Domain search results including emails and organization info */
1556
+ data: {
1557
+ /** The searched domain */
1558
+ domain: string;
1559
+ /** Whether the domain uses disposable email addresses */
1560
+ disposable: boolean;
1561
+ /** Whether the domain is a webmail provider */
1562
+ webmail: boolean;
1563
+ /** Whether the domain accepts all email addresses */
1564
+ accept_all: boolean;
1565
+ /** Common email pattern for the domain (e.g. "{first}.{last}") */
1566
+ pattern: string;
1567
+ /** Organization name associated with the domain */
1568
+ organization: string;
1569
+ /** Country of the organization */
1570
+ country: string | null;
1571
+ /** State or region of the organization */
1572
+ state: string | null;
1573
+ /** List of email addresses found for the domain */
1574
+ emails: ({
1575
+ /** Email address */
1576
+ value: string;
1577
+ /** Email type (e.g. "personal", "generic") */
1578
+ type: string;
1579
+ /** Confidence score (0-100) */
1580
+ confidence: number;
1581
+ /** Contact first name */
1582
+ first_name: string | null;
1583
+ /** Contact last name */
1584
+ last_name: string | null;
1585
+ /** Job title or position */
1586
+ position: string | null;
1587
+ /** Seniority level */
1588
+ seniority: string | null;
1589
+ /** Department within the organization */
1590
+ department: string | null;
1591
+ /** LinkedIn profile URL */
1592
+ linkedin: string | null;
1593
+ /** Twitter handle */
1594
+ twitter: string | null;
1595
+ /** Phone number */
1596
+ phone_number: string | null;
1597
+ })[];
1598
+ /** Other domains linked to this organization */
1599
+ linked_domains: string[];
1600
+ };
1601
+ }
1602
+ interface HunterApiEmailFinderStepInput {
1603
+ /** Domain to search (e.g. "example.com"). Full URLs are also accepted */
1604
+ domain: string;
1605
+ /** Person's first name */
1606
+ firstName: string;
1607
+ /** Person's last name */
1608
+ lastName: string;
1609
+ /** Variable name to store the result as JSON */
1610
+ destinationVar?: string;
1611
+ }
1612
+ interface HunterApiEmailFinderStepOutput {
1613
+ /** Email finder results including the found email and confidence score */
1614
+ data: {
1615
+ /** Person's first name */
1616
+ first_name: string;
1617
+ /** Person's last name */
1618
+ last_name: string;
1619
+ /** The found email address */
1620
+ email: string;
1621
+ /** Confidence score (0-100) */
1622
+ score: number;
1623
+ /** Domain searched */
1624
+ domain: string;
1625
+ /** Whether the domain accepts all email addresses */
1626
+ accept_all: boolean;
1627
+ /** Job title or position */
1628
+ position: string | null;
1629
+ /** Twitter handle */
1630
+ twitter: string | null;
1631
+ /** LinkedIn profile URL */
1632
+ linkedin_url: string | null;
1633
+ /** Phone number */
1634
+ phone_number: string | null;
1635
+ /** Company name */
1636
+ company: string | null;
1637
+ /** Sources where the email was found */
1638
+ sources: {
1639
+ /** Domain where the email was found */
1640
+ domain: string;
1641
+ /** URI of the page where the email was found */
1642
+ uri: string;
1643
+ /** Date when the email was extracted */
1644
+ extracted_on: string;
1645
+ }[];
1646
+ };
1647
+ }
1648
+ interface HunterApiEmailVerificationStepInput {
1649
+ /** Email address to verify */
1650
+ email: string;
1651
+ /** Variable name to store the result as JSON */
1652
+ destinationVar?: string;
1653
+ }
1654
+ interface HunterApiEmailVerificationStepOutput {
1655
+ /** Email verification results including status, deliverability, and confidence score */
1656
+ data: {
1657
+ /** Verification status (e.g. "valid", "invalid", "accept_all", "webmail", "disposable", "unknown") */
1658
+ status: string;
1659
+ /** Deliverability result */
1660
+ result: string;
1661
+ /** Confidence score (0-100) */
1662
+ score: number;
1663
+ /** The verified email address */
1664
+ email: string;
1665
+ /** Whether the email matches a valid format */
1666
+ regexp: boolean;
1667
+ /** Whether the email appears to be gibberish */
1668
+ gibberish: boolean;
1669
+ /** Whether the email uses a disposable email service */
1670
+ disposable: boolean;
1671
+ /** Whether the email is from a webmail provider */
1672
+ webmail: boolean;
1673
+ /** Whether MX records exist for the domain */
1674
+ mx_records: boolean;
1675
+ /** Whether the SMTP server is reachable */
1676
+ smtp_server: boolean;
1677
+ /** Whether the SMTP mailbox check passed */
1678
+ smtp_check: boolean;
1679
+ /** Whether the domain accepts all email addresses */
1680
+ accept_all: boolean;
1681
+ /** Whether the email is blocked */
1682
+ block: boolean;
1683
+ /** Sources where the email was found */
1684
+ sources: {
1685
+ /** Domain where the email was found */
1686
+ domain: string;
1687
+ /** URI of the page where the email was found */
1688
+ uri: string;
1689
+ /** Date when the email was extracted */
1690
+ extracted_on: string;
1691
+ }[];
1692
+ };
1693
+ }
1694
+ interface HunterApiPersonEnrichmentStepInput {
1695
+ /** Email address to look up */
1696
+ email: string;
1697
+ /** Variable name to store the result as JSON */
1698
+ destinationVar?: string;
1699
+ }
1700
+ interface HunterApiPersonEnrichmentStepOutput {
1701
+ /** Enriched person data, or an error object if the person was not found */
1702
+ data: {
1703
+ first_name: string;
1704
+ last_name: string;
1705
+ email: string;
1706
+ position: string | null;
1707
+ seniority: string | null;
1708
+ department: string | null;
1709
+ linkedin_url: string | null;
1710
+ twitter: string | null;
1711
+ phone_number: string | null;
1712
+ company: {
1713
+ name: string;
1714
+ domain: string;
1715
+ industry: string | null;
1716
+ } | null;
1717
+ } | {
1718
+ error: string;
1719
+ };
1720
+ }
1721
+ interface ImageFaceSwapStepInput {
1722
+ /** URL of the target image containing the face to replace */
1723
+ imageUrl: string;
1724
+ /** Variable name to store the output URL */
1725
+ destinationVar?: string;
1726
+ /** URL of the image containing the replacement face */
1727
+ faceImageUrl: string;
1728
+ /** Face swap engine to use */
1729
+ engine: string;
1730
+ }
1731
+ interface ImageFaceSwapStepOutput {
1732
+ /** CDN URL of the face-swapped image (PNG) */
1733
+ imageUrl: string;
1734
+ }
1735
+ interface ImageRemoveWatermarkStepInput {
1736
+ /** URL of the image to remove the watermark from */
1737
+ imageUrl: string;
1738
+ /** Variable name to store the output URL */
1739
+ destinationVar?: string;
1740
+ /** Watermark removal engine to use */
1741
+ engine: string;
1742
+ /** When true, the result will not appear in the user's asset history */
1743
+ skipAssetCreation?: boolean;
1744
+ }
1745
+ interface ImageRemoveWatermarkStepOutput {
1746
+ /** CDN URL of the processed image with watermark removed (PNG) */
1747
+ imageUrl: string;
1748
+ }
1749
+ interface InsertVideoClipsStepInput {
1750
+ /** URL of the base video to insert clips into */
1751
+ baseVideoUrl: string;
1752
+ /** Array of overlay clips to insert at specified timecodes */
1753
+ overlayVideos: {
1754
+ /** URL of the overlay video clip */
1755
+ videoUrl: string;
1756
+ /** Timecode in seconds at which to insert this clip */
1757
+ startTimeSec: number;
1758
+ }[];
1759
+ /** Optional xfade transition effect name between clips */
1760
+ transition?: string;
1761
+ /** Duration of the transition in seconds */
1762
+ transitionDuration?: number;
1763
+ /** When true, uses audio from the overlay clips instead of the base video audio during inserts */
1764
+ useOverlayAudio?: boolean;
1765
+ /** Variable name to store the output URL */
1766
+ destinationVar: string;
1767
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
1768
+ skipAssetCreation?: boolean;
1769
+ }
1770
+ interface InsertVideoClipsStepOutput {
1771
+ /** URL of the video with clips inserted */
1772
+ videoUrl: string;
1773
+ }
1774
+ interface ListGoogleCalendarEventsStepInput {
1775
+ /** Google OAuth connection ID */
1776
+ connectionId: string;
1777
+ /** Maximum number of events to return (default: 10) */
1778
+ limit: number;
1779
+ /** Format for the variable output: "json" or "text" */
1780
+ exportType: "json" | "text";
1781
+ /** Calendar ID (defaults to "primary" if omitted) */
1782
+ calendarId?: string;
1783
+ /** Variable to store the result */
1784
+ destinationVar?: string;
1785
+ }
1786
+ interface ListGoogleCalendarEventsStepOutput {
1787
+ /** List of upcoming calendar events ordered by start time */
1788
+ events: ({
1789
+ /** Google Calendar event ID */
1790
+ id?: string | null;
1791
+ /** Event status (e.g. "confirmed", "tentative", "cancelled") */
1792
+ status?: string | null;
1793
+ /** URL to view the event in Google Calendar */
1794
+ htmlLink?: string | null;
1795
+ /** Timestamp when the event was created */
1796
+ created?: string | null;
1797
+ /** Timestamp when the event was last updated */
1798
+ updated?: string | null;
1799
+ /** Event title */
1800
+ summary?: string | null;
1801
+ /** Event description */
1802
+ description?: string | null;
1803
+ /** Event location */
1804
+ location?: string | null;
1805
+ /** Event organizer */
1806
+ organizer?: {
1807
+ displayName?: string | null;
1808
+ email?: string | null;
1809
+ } | null;
1810
+ /** Event start time and timezone */
1811
+ start?: {
1812
+ dateTime?: string | null;
1813
+ timeZone?: string | null;
1814
+ } | null;
1815
+ /** Event end time and timezone */
1816
+ end?: {
1817
+ dateTime?: string | null;
1818
+ timeZone?: string | null;
1819
+ } | null;
1820
+ /** List of event attendees */
1821
+ attendees?: ({
1822
+ displayName?: string | null;
1823
+ email?: string | null;
1824
+ responseStatus?: string | null;
1825
+ })[] | null;
1826
+ })[];
1827
+ }
1828
+ interface LogicStepInput {
1829
+ /** Prompt text providing context for the AI evaluation */
1830
+ context: string;
1831
+ /** List of conditions to evaluate (objects for managed UIs, strings for code) */
1832
+ cases: ({
1833
+ /** Unique case identifier */
1834
+ id: string;
1835
+ /** The statement to evaluate (e.g., "User selected a dog") */
1836
+ condition: string;
1837
+ /** Step to transition to if this case wins (workflow mode only) */
1838
+ destinationStepId?: string;
1839
+ } | string)[];
1840
+ }
1841
+ interface LogicStepOutput {
1842
+ /** The index of the winning case */
1843
+ selectedCase: number;
1844
+ }
1845
+ interface MakeDotComRunScenarioStepInput {
1846
+ /** Make.com webhook URL for the scenario */
1847
+ webhookUrl: string;
1848
+ /** Key-value pairs to send as the JSON POST body */
1849
+ input: Record<string, unknown>;
1850
+ /** Variable name to store the scenario response */
1851
+ destinationVar?: string;
1852
+ }
1853
+ interface MakeDotComRunScenarioStepOutput {
1854
+ /** Response from the Make.com scenario (JSON or string depending on scenario configuration) */
1855
+ data: unknown;
1856
+ }
1857
+ interface MergeAudioStepInput {
1858
+ /** URLs of the MP3 audio clips to merge in order */
1859
+ mp3Urls: string[];
1860
+ /** Variable name to store the output URL */
1861
+ destinationVar: string;
1862
+ /** FFmpeg MP3 metadata key-value pairs to embed in the output file */
1863
+ fileMetadata?: Record<string, unknown>;
1864
+ /** URL of an image to embed as album art in the output file */
1865
+ albumArtUrl?: string;
1866
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
1867
+ skipAssetCreation?: boolean;
1868
+ }
1869
+ interface MergeAudioStepOutput {
1870
+ /** URL of the merged audio file */
1871
+ audioUrl: string;
1872
+ }
1873
+ interface MergeVideosStepInput {
1874
+ /** URLs of the video clips to merge in order */
1875
+ videoUrls: string[];
1876
+ /** Optional xfade transition effect name */
1877
+ transition?: string;
1878
+ /** Duration of the transition in seconds */
1879
+ transitionDuration?: number;
1880
+ /** Variable name to store the output URL */
1881
+ destinationVar: string;
1882
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
1883
+ skipAssetCreation?: boolean;
1884
+ }
1885
+ interface MergeVideosStepOutput {
1886
+ /** URL of the merged video */
1887
+ videoUrl: string;
1888
+ }
1889
+ interface MixAudioIntoVideoStepInput {
1890
+ /** URL of the source video */
1891
+ videoUrl: string;
1892
+ /** URL of the audio track to mix into the video */
1893
+ audioUrl: string;
1894
+ /** Variable name to store the output URL */
1895
+ destinationVar: string;
1896
+ /** Audio mixing options */
1897
+ options: {
1898
+ /** When true, preserves the original video audio alongside the new track. Defaults to false. */
1899
+ keepVideoAudio?: boolean;
1900
+ /** Volume adjustment for the new audio track in decibels. Defaults to 0. */
1901
+ audioGainDb?: number;
1902
+ /** Volume adjustment for the existing video audio in decibels. Defaults to 0. */
1903
+ videoGainDb?: number;
1904
+ /** When true, loops the audio track to match the video duration. Defaults to false. */
1905
+ loopAudio?: boolean;
1906
+ };
1907
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
1908
+ skipAssetCreation?: boolean;
1909
+ }
1910
+ interface MixAudioIntoVideoStepOutput {
1911
+ /** URL of the video with the mixed audio track */
1912
+ videoUrl: string;
1913
+ }
1914
+ interface MuteVideoStepInput {
1915
+ /** URL of the source video to mute */
1916
+ videoUrl: string;
1917
+ /** Variable name to store the output URL */
1918
+ destinationVar: string;
1919
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
1920
+ skipAssetCreation?: boolean;
1921
+ }
1922
+ interface MuteVideoStepOutput {
1923
+ /** URL of the muted video */
1924
+ videoUrl: string;
1925
+ }
1926
+ interface N8nRunNodeStepInput {
1927
+ /** Variable name to store the response */
1928
+ destinationVar?: string;
1929
+ /** HTTP method to use (GET or POST) */
1930
+ method: string;
1931
+ /** Authentication type for the webhook request */
1932
+ authentication: "none" | "basic" | "string";
1933
+ /** Username for Basic authentication */
1934
+ user: string;
1935
+ /** Password for Basic authentication */
1936
+ password: string;
1937
+ /** n8n webhook URL for the workflow node */
1938
+ webhookUrl: string;
1939
+ /** Key-value pairs sent as query params (GET) or JSON body (POST) */
1940
+ input: Record<string, unknown>;
1941
+ }
1942
+ interface N8nRunNodeStepOutput {
1943
+ /** Response from the n8n node (JSON or string depending on node configuration) */
1944
+ data: unknown;
1945
+ }
1946
+ interface NotionCreatePageStepInput {
1947
+ /** Parent page ID to create the new page under */
1948
+ pageId: string;
1949
+ /** Page content in markdown format */
1950
+ content: string;
1951
+ /** Page title */
1952
+ title: string;
1953
+ /** Notion OAuth connection ID */
1954
+ connectionId: string;
1955
+ }
1956
+ interface NotionCreatePageStepOutput {
1957
+ /** Notion page ID of the created page */
1958
+ pageId: string;
1959
+ /** URL to view the page in Notion */
1960
+ pageUrl: string;
1961
+ }
1962
+ interface NotionUpdatePageStepInput {
1963
+ /** Notion page ID to update */
1964
+ pageId: string;
1965
+ /** New content in markdown format */
1966
+ content: string;
1967
+ /** How to apply the content: 'append' adds to end, 'overwrite' replaces all existing content */
1968
+ mode: "append" | "overwrite";
1969
+ /** Notion OAuth connection ID */
1970
+ connectionId: string;
1971
+ }
1972
+ interface NotionUpdatePageStepOutput {
1973
+ /** Notion page ID of the updated page */
1974
+ pageId: string;
1975
+ /** URL to view the page in Notion */
1976
+ pageUrl: string;
1977
+ }
1978
+ interface PeopleSearchStepInput {
1979
+ /** Variable name to store the search results as JSON */
1980
+ destinationVar: string;
1981
+ /** Natural language search query (e.g. "marketing directors at SaaS companies in NYC") */
1982
+ smartQuery: string;
1983
+ /** Whether to enrich each result with full contact details */
1984
+ enrichPeople: boolean;
1985
+ /** Whether to enrich each result with full company details */
1986
+ enrichOrganizations: boolean;
1987
+ /** Maximum number of results to return */
1988
+ limit: string;
1989
+ /** Page number for pagination */
1990
+ page: string;
1991
+ /** Advanced search filter parameters */
1992
+ params: {
1993
+ /** Job titles to search for (comma-separated) */
1994
+ personTitles: string;
1995
+ /** Whether to include similar/related job titles */
1996
+ includeSimilarTitles: string;
1997
+ /** Keywords to search for in person profiles */
1998
+ qKeywords: string;
1999
+ /** Geographic locations of people (comma-separated) */
2000
+ personLocations: string;
2001
+ /** Seniority levels to filter by (comma-separated) */
2002
+ personSeniorities: string;
2003
+ /** Geographic locations of organizations (comma-separated) */
2004
+ organizationLocations: string;
2005
+ /** Organization domains to filter by (comma-separated) */
2006
+ qOrganizationDomainsList: string;
2007
+ /** Email verification status filter */
2008
+ contactEmailStatus: string;
2009
+ /** Employee count ranges as semicolon-separated pairs (e.g. "1,10; 250,500") */
2010
+ organizationNumEmployeesRanges: string;
2011
+ /** Minimum annual revenue filter */
2012
+ revenueRangeMin: string;
2013
+ /** Maximum annual revenue filter */
2014
+ revenueRangeMax: string;
2015
+ /** Technology UIDs the organization must use (all required) */
2016
+ currentlyUsingAllOfTechnologyUids: string;
2017
+ /** Technology UIDs the organization uses (any match) */
2018
+ currentlyUsingAnyOfTechnologyUids: string;
2019
+ /** Technology UIDs the organization must not use */
2020
+ currentlyNotUsingAnyOfTechnologyUids: string;
2021
+ };
2022
+ }
2023
+ interface PeopleSearchStepOutput {
2024
+ /** Apollo search results with matched people and optionally enriched data */
2025
+ results: unknown;
2026
+ }
2027
+ interface PostToLinkedInStepInput {
2028
+ /** The text content of the LinkedIn post */
2029
+ message: string;
2030
+ /** Who can see the post: "PUBLIC" or "CONNECTIONS" */
2031
+ visibility: "PUBLIC" | "CONNECTIONS";
2032
+ /** URL of a video to attach to the post */
2033
+ videoUrl?: string;
2034
+ /** Description text for link/media attachments */
2035
+ descriptionText?: string;
2036
+ /** Title text for link/media attachments */
2037
+ titleText?: string;
2038
+ /** URL of an image to attach to the post */
2039
+ imageUrl?: string;
2040
+ /** LinkedIn OAuth connection ID */
2041
+ connectionId: string;
2042
+ }
2043
+ type PostToLinkedInStepOutput = unknown;
2044
+ interface PostToSlackChannelStepInput {
2045
+ /** Slack channel ID (leave empty to allow user to select a channel) */
2046
+ channelId: string;
2047
+ /** Message format: "string" for plain text/markdown, "blocks" for Slack Block Kit JSON */
2048
+ messageType: "string" | "blocks";
2049
+ /** Message content (plain text/markdown for "string" type, or JSON for "blocks" type) */
2050
+ message: string;
2051
+ /** Slack OAuth connection ID (leave empty to allow user to select) */
2052
+ connectionId: string;
2053
+ }
2054
+ type PostToSlackChannelStepOutput = unknown;
2055
+ interface PostToXStepInput {
2056
+ /** The text content of the post (max 280 characters) */
2057
+ text: string;
2058
+ /** X (Twitter) OAuth connection ID */
2059
+ connectionId: string;
2060
+ }
2061
+ type PostToXStepOutput = unknown;
2062
+ interface PostToZapierStepInput {
2063
+ /** Variable name to store the webhook response */
2064
+ destinationVar?: string;
2065
+ /** Zapier webhook URL to send data to */
2066
+ webhookUrl: string;
2067
+ /** Key-value pairs to send as the JSON POST body */
2068
+ input: Record<string, unknown>;
2069
+ }
2070
+ interface PostToZapierStepOutput {
2071
+ /** Parsed webhook response from Zapier (JSON object, array, or string) */
2072
+ data: unknown;
2073
+ }
2074
+ interface QueryDataSourceStepInput {
2075
+ /** ID of the vector data source to query */
2076
+ dataSourceId: string;
2077
+ /** The search query to run against the data source */
2078
+ query: string;
2079
+ /** Variable name to save the joined result text into */
2080
+ destinationVar?: string;
2081
+ /** Maximum number of chunks to return (recommended 1-3) */
2082
+ maxResults: number;
2083
+ }
2084
+ interface QueryDataSourceStepOutput {
2085
+ /** All matching chunks joined with newlines */
2086
+ text: string;
2087
+ /** Individual matching text chunks from the data source */
2088
+ chunks: string[];
2089
+ /** The resolved search query that was executed */
2090
+ query: string;
2091
+ /** Source citations for the matched chunks */
2092
+ citations: unknown[];
2093
+ /** Query execution time in milliseconds */
2094
+ latencyMs: number;
2095
+ }
2096
+ interface QueryExternalDatabaseStepInput {
2097
+ /** Database connection ID configured in the workspace */
2098
+ connectionId: string;
2099
+ /** SQL query to execute (supports variable interpolation) */
2100
+ query: string;
2101
+ /** Variable name to save the formatted query result to */
2102
+ destinationVar?: string;
2103
+ /** Output format for the result variable */
2104
+ outputFormat: "json" | "csv";
2105
+ }
2106
+ interface QueryExternalDatabaseStepOutput {
2107
+ /** Query result rows (array of objects for JSON, CSV string for CSV format) */
2108
+ data: unknown;
2109
+ }
2110
+ interface RedactPIIStepInput {
2111
+ /** Text to redact PII from */
2112
+ input: string;
2113
+ /** Language code of the input text (e.g. "en") */
2114
+ language: string;
2115
+ /** PII entity types to redact (e.g. ["PHONE_NUMBER", "EMAIL_ADDRESS"]). Empty array means nothing is redacted. */
2116
+ entities: string[];
2117
+ /** Variable name to store the redacted text */
2118
+ destinationVar?: string;
2119
+ }
2120
+ interface RedactPIIStepOutput {
2121
+ /** The input text with detected PII replaced by entity type placeholders (e.g. "<PHONE_NUMBER>") */
2122
+ text: string;
2123
+ }
2124
+ interface RemoveBackgroundFromImageStepInput {
2125
+ /** URL of the source image to remove the background from */
2126
+ imageUrl: string;
2127
+ /** Variable name to store the output URL */
2128
+ destinationVar?: string;
2129
+ }
2130
+ interface RemoveBackgroundFromImageStepOutput {
2131
+ /** CDN URL of the image with background removed (transparent PNG) */
2132
+ imageUrl: string;
2133
+ }
2134
+ interface ResizeVideoStepInput {
2135
+ /** URL of the source video to resize */
2136
+ videoUrl: string;
2137
+ /** Variable name to store the output URL */
2138
+ destinationVar: string;
2139
+ /** Resize mode: 'fit' scales within max dimensions, 'exact' forces exact dimensions */
2140
+ mode: "fit" | "exact";
2141
+ /** Maximum width in pixels (used with 'fit' mode) */
2142
+ maxWidth?: number;
2143
+ /** Maximum height in pixels (used with 'fit' mode) */
2144
+ maxHeight?: number;
2145
+ /** Exact width in pixels (used with 'exact' mode) */
2146
+ width?: number;
2147
+ /** Exact height in pixels (used with 'exact' mode) */
2148
+ height?: number;
2149
+ /** Strategy for handling aspect ratio mismatch in 'exact' mode */
2150
+ strategy?: "pad" | "crop";
2151
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
2152
+ skipAssetCreation?: boolean;
2153
+ }
2154
+ interface ResizeVideoStepOutput {
2155
+ /** URL of the resized video */
2156
+ videoUrl: string;
2157
+ }
2158
+ interface RunPackagedWorkflowStepInput {
2159
+ /** The app ID of the packaged workflow source */
2160
+ appId: string;
2161
+ /** The source workflow ID to execute */
2162
+ workflowId: string;
2163
+ /** Variables to pass as input to the packaged workflow */
2164
+ inputVariables: Record<string, unknown>;
2165
+ /** Variables to capture from the packaged workflow output */
2166
+ outputVariables: Record<string, unknown>;
2167
+ /** Display name of the packaged workflow */
2168
+ name: string;
2169
+ }
2170
+ interface RunPackagedWorkflowStepOutput {
2171
+ /** The result data returned from the packaged workflow */
2172
+ data: unknown;
2173
+ }
2174
+ interface RunWorkflowStepInput {
2175
+ /** The app ID containing the workflow to run (empty string for same app) */
2176
+ appId: string;
2177
+ /** The name of the workflow to execute */
2178
+ workflowName: string;
2179
+ /** Execution mode: run once, map over an array, or reduce over an array */
2180
+ mode?: "once" | "map" | "reduce";
2181
+ /** Configuration for map/reduce mode execution */
2182
+ mapSettings?: {
2183
+ /** How to interpret the input: default uses AI extraction, stringArray splits by delimiter, array expects JSON */
2184
+ inputMode: "default" | "array" | "stringArray";
2185
+ /** The input string or variable reference to map over */
2186
+ input: string;
2187
+ /** Delimiter for splitting string input in stringArray mode (e.g., comma or newline) */
2188
+ stringArrayDelimiter?: string;
2189
+ /** AI prompt for extracting an array from unstructured input (default inputMode only) */
2190
+ extractPrompt: string;
2191
+ /** Variable name to save the aggregated output results to */
2192
+ outputVariableName: string;
2193
+ /** Whether to run mapped workflows in parallel or sequentially */
2194
+ executionMode: "parallel" | "sequential";
2195
+ /** Behavior when a mapped workflow fails: fail aborts all, ignore continues */
2196
+ errorBehavior: "fail" | "ignore";
2197
+ /** Number of retry attempts for failed workflow executions */
2198
+ numRetries?: number;
2199
+ };
2200
+ /** Variables to pass as input to the child workflow, keyed by variable name */
2201
+ inputVariables?: Record<string, unknown>;
2202
+ /** Variables to capture from the child workflow output, keyed by variable name */
2203
+ outputVariables: Record<string, unknown>;
2204
+ }
2205
+ interface RunWorkflowStepOutput {
2206
+ /** The result data returned from the spawned workflow(s) */
2207
+ data: unknown;
2208
+ }
2209
+ interface ScrapeFacebookPageStepInput {
2210
+ /** Full URL to the Facebook page to scrape */
2211
+ pageUrl: string;
2212
+ /** Variable name to store the scraped page data */
2213
+ destinationVar: string;
2214
+ }
2215
+ interface ScrapeFacebookPageStepOutput {
2216
+ /** The result data returned from the Apify actor run */
2217
+ data: unknown;
2218
+ }
2219
+ interface ScrapeFacebookPostsStepInput {
2220
+ /** Full URL to the Facebook page to scrape posts from */
2221
+ pageUrl: string;
2222
+ /** Variable name to store the scraped posts data */
2223
+ destinationVar: string;
2224
+ }
2225
+ interface ScrapeFacebookPostsStepOutput {
2226
+ /** The result data returned from the Apify actor run */
2227
+ data: unknown;
2228
+ }
2229
+ interface ScrapeInstagramCommentsStepInput {
2230
+ /** Full URL to the Instagram post to scrape comments from */
2231
+ postUrl: string;
2232
+ /** Variable name to store the scraped comments data */
2233
+ destinationVar: string;
2234
+ /** Maximum number of comments to return */
2235
+ resultsLimit: string;
2236
+ }
2237
+ interface ScrapeInstagramCommentsStepOutput {
2238
+ /** The result data returned from the Apify actor run */
2239
+ data: unknown;
2240
+ }
2241
+ interface ScrapeInstagramMentionsStepInput {
2242
+ /** Instagram profile URL or username to scrape mentions for */
2243
+ profileUrl: string;
2244
+ /** Variable name to store the scraped mentions data */
2245
+ destinationVar: string;
2246
+ /** Maximum number of mentions to return */
2247
+ resultsLimit: string;
2248
+ }
2249
+ interface ScrapeInstagramMentionsStepOutput {
2250
+ /** The result data returned from the Apify actor run */
2251
+ data: unknown;
2252
+ }
2253
+ interface ScrapeInstagramPostsStepInput {
2254
+ /** Instagram profile URL or username to scrape posts from */
2255
+ profileUrl: string;
2256
+ /** Variable name to store the scraped posts data */
2257
+ destinationVar: string;
2258
+ /** Maximum number of posts to return */
2259
+ resultsLimit: string;
2260
+ /** Only return posts newer than this date (ISO 8601 format) */
2261
+ onlyPostsNewerThan: string;
2262
+ }
2263
+ interface ScrapeInstagramPostsStepOutput {
2264
+ /** The result data returned from the Apify actor run */
2265
+ data: unknown;
2266
+ }
2267
+ interface ScrapeInstagramProfileStepInput {
2268
+ /** Instagram profile URL or username to scrape */
2269
+ profileUrl: string;
2270
+ /** Variable name to store the scraped profile data */
2271
+ destinationVar: string;
2272
+ }
2273
+ interface ScrapeInstagramProfileStepOutput {
2274
+ /** The result data returned from the Apify actor run */
2275
+ data: unknown;
2276
+ }
2277
+ interface ScrapeInstagramReelsStepInput {
2278
+ /** Instagram profile URL or username to scrape reels from */
2279
+ profileUrl: string;
2280
+ /** Variable name to store the scraped reels data */
2281
+ destinationVar: string;
2282
+ /** Maximum number of reels to return */
2283
+ resultsLimit: string;
2284
+ }
2285
+ interface ScrapeInstagramReelsStepOutput {
2286
+ /** The result data returned from the Apify actor run */
2287
+ data: unknown;
2288
+ }
2289
+ interface ScrapeLinkedInCompanyStepInput {
2290
+ /** LinkedIn company page URL (e.g. https://www.linkedin.com/company/mindstudioai) */
2291
+ url: string;
2292
+ /** Variable to store the scraped company data */
2293
+ destinationVar?: string;
2294
+ }
2295
+ interface ScrapeLinkedInCompanyStepOutput {
2296
+ /** Scraped LinkedIn company data */
2297
+ company: unknown;
2298
+ }
2299
+ interface ScrapeLinkedInProfileStepInput {
2300
+ /** LinkedIn profile URL (e.g. https://www.linkedin.com/in/username) */
2301
+ url: string;
2302
+ /** Variable to store the scraped profile data */
2303
+ destinationVar?: string;
2304
+ }
2305
+ interface ScrapeLinkedInProfileStepOutput {
2306
+ /** Scraped LinkedIn profile data */
2307
+ profile: unknown;
2308
+ }
2309
+ interface ScrapeMetaThreadsProfileStepInput {
2310
+ /** Meta Threads profile URL or username to scrape */
2311
+ profileUrl: string;
2312
+ /** Variable name to store the scraped profile data */
2313
+ destinationVar: string;
2314
+ }
2315
+ interface ScrapeMetaThreadsProfileStepOutput {
2316
+ /** The result data returned from the Apify actor run */
2317
+ data: unknown;
2318
+ }
2319
+ interface ScrapeUrlStepInput {
2320
+ /** URL(s) to scrape. Accepts a single URL, JSON array, or comma/newline-separated list */
2321
+ url: string;
2322
+ /** Variable name to save the scraped content into */
2323
+ destinationVar?: string;
2324
+ /** Variable name to save the screenshot URL(s) into */
2325
+ screenshotVar?: string;
2326
+ /** Scraping service to use */
2327
+ service: "default" | "firecrawl";
2328
+ /** Whether to enable enhanced scraping for social media URLs (e.g. Twitter, LinkedIn) */
2329
+ autoEnhance: boolean;
2330
+ /** Output format: text returns markdown, html returns raw HTML, json returns structured scraper data */
2331
+ outputFormat: "text" | "json" | "html";
2332
+ /** Page-level scraping options (content filtering, screenshots, headers, etc.) */
2333
+ pageOptions: {
2334
+ /** Whether to extract only the main content of the page, excluding navigation, footers, etc. */
2335
+ onlyMainContent: boolean;
2336
+ /** Whether to capture a screenshot of the page */
2337
+ screenshot: boolean;
2338
+ /** Milliseconds to wait before scraping (0 for immediate) */
2339
+ waitFor: number;
2340
+ /** Whether to convert relative URLs to absolute URLs in the result */
2341
+ replaceAllPathsWithAbsolutePaths: boolean;
2342
+ /** Custom HTTP request headers as key-value pairs */
2343
+ headers: Record<string, unknown>;
2344
+ /** HTML tags to remove from the scraped result */
2345
+ removeTags: string[];
2346
+ /** Whether to scrape using a mobile user-agent */
2347
+ mobile: boolean;
2348
+ };
2349
+ }
2350
+ interface ScrapeUrlStepOutput {
2351
+ /** Scraped content. Shape depends on outputFormat and number of URLs */
2352
+ content: string | string[] | {
2353
+ /** Markdown/plain-text content of the scraped page */
2354
+ text: string;
2355
+ /** Raw HTML content of the scraped page */
2356
+ html: string;
2357
+ /** Structured data extracted from the page */
2358
+ json?: Record<string, unknown>;
2359
+ /** Screenshot URL of the page (if requested) */
2360
+ screenshotUrl?: string;
2361
+ /** Page metadata (Open Graph / meta tags) */
2362
+ metadata?: {
2363
+ /** Page title */
2364
+ title: string;
2365
+ /** Page meta description */
2366
+ description: string;
2367
+ /** Canonical URL */
2368
+ url: string;
2369
+ /** Open Graph image URL */
2370
+ image: string;
2371
+ };
2372
+ } | {
2373
+ /** Markdown/plain-text content of the scraped page */
2374
+ text: string;
2375
+ /** Raw HTML content of the scraped page */
2376
+ html: string;
2377
+ /** Structured data extracted from the page */
2378
+ json?: Record<string, unknown>;
2379
+ /** Screenshot URL of the page (if requested) */
2380
+ screenshotUrl?: string;
2381
+ /** Page metadata (Open Graph / meta tags) */
2382
+ metadata?: {
2383
+ /** Page title */
2384
+ title: string;
2385
+ /** Page meta description */
2386
+ description: string;
2387
+ /** Canonical URL */
2388
+ url: string;
2389
+ /** Open Graph image URL */
2390
+ image: string;
2391
+ };
2392
+ }[];
2393
+ /** Screenshot URL, only present when screenshot was requested via pageOptions */
2394
+ screenshot?: string;
2395
+ }
2396
+ interface ScrapeXPostStepInput {
2397
+ /** Full URL to the X post (e.g. https://x.com/elonmusk/status/1655608985058267139) */
2398
+ url: string;
2399
+ /** Variable to store the scraped text content */
2400
+ destinationVar?: string;
2401
+ }
2402
+ interface ScrapeXPostStepOutput {
2403
+ /** Scraped post data including text, HTML, and optional structured JSON */
2404
+ post: {
2405
+ /** Markdown/plain-text content of the scraped page */
2406
+ text: string;
2407
+ /** Raw HTML content of the scraped page */
2408
+ html: string;
2409
+ /** Structured data extracted from the page */
2410
+ json?: Record<string, unknown>;
2411
+ /** Screenshot URL of the page (if requested) */
2412
+ screenshotUrl?: string;
2413
+ /** Page metadata (Open Graph / meta tags) */
2414
+ metadata?: {
2415
+ /** Page title */
2416
+ title: string;
2417
+ /** Page meta description */
2418
+ description: string;
2419
+ /** Canonical URL */
2420
+ url: string;
2421
+ /** Open Graph image URL */
2422
+ image: string;
2423
+ };
2424
+ };
2425
+ }
2426
+ interface ScrapeXProfileStepInput {
2427
+ /** Full URL or username for the X profile (e.g. https://x.com/elonmusk) */
2428
+ url: string;
2429
+ /** Variable to store the scraped profile data */
2430
+ destinationVar?: string;
2431
+ }
2432
+ interface ScrapeXProfileStepOutput {
2433
+ /** Scraped profile data including text, HTML, and optional structured JSON */
2434
+ profile: {
2435
+ /** Markdown/plain-text content of the scraped page */
2436
+ text: string;
2437
+ /** Raw HTML content of the scraped page */
2438
+ html: string;
2439
+ /** Structured data extracted from the page */
2440
+ json?: Record<string, unknown>;
2441
+ /** Screenshot URL of the page (if requested) */
2442
+ screenshotUrl?: string;
2443
+ /** Page metadata (Open Graph / meta tags) */
2444
+ metadata?: {
2445
+ /** Page title */
2446
+ title: string;
2447
+ /** Page meta description */
2448
+ description: string;
2449
+ /** Canonical URL */
2450
+ url: string;
2451
+ /** Open Graph image URL */
2452
+ image: string;
2453
+ };
2454
+ };
2455
+ }
2456
+ interface SearchGoogleStepInput {
2457
+ /** The search query to send to Google */
2458
+ query: string;
2459
+ /** Variable to store the results in */
2460
+ destinationVar?: string;
2461
+ /** Format for the variable value: "text" or "json" */
2462
+ exportType: "text" | "json";
2463
+ /** Google gl country code (defaults to US) */
2464
+ countryCode?: string;
2465
+ /** Google hl language code (defaults to "en") */
2466
+ languageCode?: string;
2467
+ /** Time range filter: "hour", "day", "week", "month", "year", or "any" */
2468
+ dateRange?: "hour" | "day" | "week" | "month" | "year" | "any";
2469
+ /** Number of results to return (1-100, default: 30) */
2470
+ numResults?: number;
2471
+ }
2472
+ interface SearchGoogleStepOutput {
2473
+ /** List of search result entries */
2474
+ results: {
2475
+ /** Title of the search result */
2476
+ title: string;
2477
+ /** Snippet/description of the search result */
2478
+ description: string;
2479
+ /** URL of the search result page */
2480
+ url: string;
2481
+ }[];
2482
+ }
2483
+ interface SearchGoogleImagesStepInput {
2484
+ /** The image search query */
2485
+ query: string;
2486
+ /** Variable to store the results in */
2487
+ destinationVar?: string;
2488
+ /** Format for the variable value: "text" or "json" */
2489
+ exportType: "text" | "json";
2490
+ /** Google gl country code (defaults to US) */
2491
+ countryCode?: string;
2492
+ /** Google hl language code (defaults to "en") */
2493
+ languageCode?: string;
2494
+ /** Time range filter: "hour", "day", "week", "month", "year", or "any" */
2495
+ dateRange?: "hour" | "day" | "week" | "month" | "year" | "any";
2496
+ /** Number of results to return (1-100, default: 30) */
2497
+ numResults?: number;
2498
+ }
2499
+ interface SearchGoogleImagesStepOutput {
2500
+ /** List of image search results with URLs and metadata */
2501
+ images: {
2502
+ /** Title/alt text of the image */
2503
+ title: string;
2504
+ /** Direct URL of the full-size image */
2505
+ imageUrl: string;
2506
+ /** Width of the full-size image in pixels */
2507
+ imageWidth: number;
2508
+ /** Height of the full-size image in pixels */
2509
+ imageHeight: number;
2510
+ /** URL of the thumbnail image */
2511
+ thumbnailUrl: string;
2512
+ /** Width of the thumbnail in pixels */
2513
+ thumbnailWidth: number;
2514
+ /** Height of the thumbnail in pixels */
2515
+ thumbnailHeight: number;
2516
+ /** Source website name */
2517
+ source: string;
2518
+ /** Domain of the source website */
2519
+ domain: string;
2520
+ /** URL of the page containing the image */
2521
+ link: string;
2522
+ /** Google Images URL for this result */
2523
+ googleUrl: string;
2524
+ /** Position/rank of this result in the search results */
2525
+ position: number;
2526
+ }[];
2527
+ }
2528
+ interface SearchGoogleNewsStepInput {
2529
+ /** The news search query */
2530
+ text: string;
2531
+ /** Variable to store the results in */
2532
+ destinationVar?: string;
2533
+ /** Format for the variable value: "text" or "json" */
2534
+ exportType: "text" | "json";
2535
+ /** Number of results to return (1-100, default: 30) */
2536
+ numResults?: number;
2537
+ }
2538
+ interface SearchGoogleNewsStepOutput {
2539
+ /** List of matching news articles */
2540
+ articles: {
2541
+ /** Headline of the news article */
2542
+ title: string;
2543
+ /** URL to the full article */
2544
+ link: string;
2545
+ /** Publication date of the article */
2546
+ date: string;
2547
+ /** Source publication */
2548
+ source: {
2549
+ /** Name of the news source */
2550
+ name: string;
2551
+ };
2552
+ /** Brief excerpt or summary of the article */
2553
+ snippet?: string;
2554
+ }[];
2555
+ }
2556
+ interface SearchGoogleTrendsStepInput {
2557
+ /** The search term to look up on Google Trends */
2558
+ text: string;
2559
+ /** Variable to store the results in */
2560
+ destinationVar?: string;
2561
+ /** Language code (e.g. "en") */
2562
+ hl: string;
2563
+ /** Geographic region: empty string for worldwide, or a two-letter country code */
2564
+ geo: string;
2565
+ /** Type of trend data to return */
2566
+ data_type: "TIMESERIES" | "GEO_MAP" | "GEO_MAP_0" | "RELATED_TOPICS" | "RELATED_QUERIES";
2567
+ /** Category filter ("0" for all categories) */
2568
+ cat: string;
2569
+ /** Date range for trend data. Available options: - "now 1-H" - Past hour - "now 4-H" - Past 4 hours - "now 1-d" - Past day - "now 7-d" - Past 7 days - "today 1-m" - Past 30 days - "today 3-m" - Past 90 days - "today 12-m" - Past 12 months - "today 5-y" - Past 5 years - "all - 2004" - present - You can also pass custom values: "yyyy-mm-dd yyyy-mm-dd" */
2570
+ date: string;
2571
+ /** Timezone offset in minutes (-1439 to 1439, default: 420 for PDT) */
2572
+ ts: string;
2573
+ }
2574
+ interface SearchGoogleTrendsStepOutput {
2575
+ /** Google Trends data for the searched term */
2576
+ trends: Record<string, unknown>;
2577
+ }
2578
+ interface SearchPerplexityStepInput {
2579
+ /** Search query to send to Perplexity */
2580
+ query: string;
2581
+ /** Variable name to store the search results */
2582
+ destinationVar?: string;
2583
+ /** Output format for the variable: plain text or structured JSON */
2584
+ exportType: "text" | "json";
2585
+ /** ISO country code to filter results by region (e.g. "us", "gb") */
2586
+ countryCode?: string;
2587
+ /** Number of results to return (1-20, default: 10) */
2588
+ numResults?: number;
2589
+ }
2590
+ interface SearchPerplexityStepOutput {
2591
+ /** List of structured search results */
2592
+ results: {
2593
+ /** Page title of the search result */
2594
+ title: string;
2595
+ /** Snippet or description of the search result */
2596
+ description: string;
2597
+ /** URL of the search result page */
2598
+ url: string;
2599
+ }[];
2600
+ }
2601
+ interface SearchXPostsStepInput {
2602
+ /** Search query (max 512 chars, supports X API v2 search operators) */
2603
+ query: string;
2604
+ /** Variable to store the JSON-stringified results */
2605
+ destinationVar?: string;
2606
+ /** Search scope: "recent" for past 7 days or "all" for full archive */
2607
+ scope: "recent" | "all";
2608
+ /** Additional search options */
2609
+ options: {
2610
+ /** ISO 8601 date; only return posts after this time */
2611
+ startTime?: string;
2612
+ /** ISO 8601 date; only return posts before this time */
2613
+ endTime?: string;
2614
+ /** Number of results to return (default: 50, max: 100) */
2615
+ maxResults?: number;
2616
+ };
2617
+ }
2618
+ interface SearchXPostsStepOutput {
2619
+ /** List of matching X posts */
2620
+ posts: {
2621
+ /** Unique post identifier */
2622
+ id: string;
2623
+ /** Author's X user ID */
2624
+ authorId: string;
2625
+ /** ISO 8601 timestamp when the post was created */
2626
+ dateCreated: string;
2627
+ /** Text content of the post */
2628
+ text: string;
2629
+ /** Engagement statistics for the post */
2630
+ stats: {
2631
+ /** Number of retweets/reposts */
2632
+ retweets: number;
2633
+ /** Number of replies */
2634
+ replies: number;
2635
+ /** Number of likes */
2636
+ likes: number;
2637
+ };
2638
+ }[];
2639
+ }
2640
+ interface SearchYoutubeStepInput {
2641
+ /** Search query for YouTube videos */
2642
+ query: string;
2643
+ /** Variable name to save the search results */
2644
+ destinationVar?: string;
2645
+ /** Maximum number of pages to fetch (1-5) */
2646
+ limitPages: string;
2647
+ /** YouTube search parameter (sp) filter value */
2648
+ filter: string;
2649
+ /** Filter type identifier */
2650
+ filterType: string;
2651
+ /** Google gl country code for regional results (default: "US") */
2652
+ countryCode?: string;
2653
+ /** Google hl language code for result language (default: "en") */
2654
+ languageCode?: string;
2655
+ }
2656
+ interface SearchYoutubeStepOutput {
2657
+ /** YouTube search results including video_results, channel_results, etc. */
2658
+ results: Record<string, unknown>;
2659
+ }
2660
+ interface SearchYoutubeTrendsStepInput {
2661
+ /** Variable name to save the trends data */
2662
+ destinationVar?: string;
2663
+ /** Trending category: "now" (trending now), "music", "gaming", or "films" */
2664
+ bp: "now" | "music" | "gaming" | "films";
2665
+ /** Language code (e.g. "en") */
2666
+ hl: string;
2667
+ /** Country code (e.g. "US") */
2668
+ gl: string;
2669
+ }
2670
+ interface SearchYoutubeTrendsStepOutput {
2671
+ /** Trending video data for the selected category and region */
2672
+ trends: Record<string, unknown>;
2673
+ }
2674
+ interface SendEmailStepInput {
2675
+ /** Email subject line */
2676
+ subject: string;
2677
+ /** Email body content (plain text, markdown, HTML, or a CDN URL to an HTML file) */
2678
+ body: string;
2679
+ /** OAuth connection ID(s) for the recipient(s), comma-separated for multiple */
2680
+ connectionId: string;
2681
+ /** When true, auto-convert the body text into a styled HTML email using AI */
2682
+ generateHtml?: boolean;
2683
+ /** Natural language instructions for the HTML generation style */
2684
+ generateHtmlInstructions?: string;
2685
+ /** Model settings override for HTML generation */
2686
+ generateHtmlModelOverride?: {
2687
+ /** Model identifier (e.g. "gpt-4", "claude-3-opus") */
2688
+ model: string;
2689
+ /** Sampling temperature for the model (0-2) */
2690
+ temperature: number;
2691
+ /** Maximum number of tokens in the model's response */
2692
+ maxResponseTokens: number;
2693
+ /** Whether to skip the system preamble/instructions */
2694
+ ignorePreamble?: boolean;
2695
+ /** Preprocessor applied to user messages before sending to the model */
2696
+ userMessagePreprocessor?: {
2697
+ /** Data source identifier for the preprocessor */
2698
+ dataSource?: string;
2699
+ /** Template string applied to user messages before sending to the model */
2700
+ messageTemplate?: string;
2701
+ /** Maximum number of results to include from the data source */
2702
+ maxResults?: number;
2703
+ /** Whether the preprocessor is active */
2704
+ enabled?: boolean;
2705
+ /** Whether child steps should inherit this preprocessor configuration */
2706
+ shouldInherit?: boolean;
2707
+ };
2708
+ /** System preamble/instructions for the model */
2709
+ preamble?: string;
2710
+ /** Whether multi-model candidate generation is enabled */
2711
+ multiModelEnabled?: boolean;
2712
+ /** Whether the user can edit the model's response */
2713
+ editResponseEnabled?: boolean;
2714
+ /** Additional model-specific configuration */
2715
+ config?: Record<string, unknown>;
2716
+ };
2717
+ /** URLs of files to attach to the email */
2718
+ attachments?: string[];
2719
+ }
2720
+ interface SendEmailStepOutput {
2721
+ /** Email addresses the message was sent to */
2722
+ recipients: string[];
2723
+ }
2724
+ interface SendSMSStepInput {
2725
+ /** SMS message body text */
2726
+ body: string;
2727
+ /** OAuth connection ID for the recipient phone number */
2728
+ connectionId: string;
2729
+ }
2730
+ type SendSMSStepOutput = unknown;
2731
+ interface SetRunTitleStepInput {
2732
+ /** The title to assign to the agent run (supports variable interpolation) */
2733
+ title: string;
2734
+ }
2735
+ type SetRunTitleStepOutput = unknown;
2736
+ interface SetVariableStepInput {
2737
+ /** Variable name to set (supports dynamic names via variable interpolation) */
2738
+ destinationVariableName: string;
2739
+ /** Value to assign (string or array of strings, supports variable interpolation) */
2740
+ value: string | string[];
2741
+ /** UI input type hint controlling the editor widget */
2742
+ type: "imageUrl" | "videoUrl" | "fileUrl" | "plaintext" | "textArray" | "imageUrlArray" | "videoUrlArray";
2743
+ }
2744
+ interface SetVariableStepOutput {
2745
+ /** The resolved variable name that was set */
2746
+ variableName: string;
2747
+ /** The resolved value that was assigned */
2748
+ value: string | string[];
2749
+ }
2750
+ interface TelegramSendAudioStepInput {
2751
+ /** Telegram bot token in "botId:token" format */
2752
+ botToken: string;
2753
+ /** Telegram chat ID to send the audio to */
2754
+ chatId: string;
2755
+ /** URL of the audio file to send */
2756
+ audioUrl: string;
2757
+ /** Send as a standard audio track ("audio") or as a voice note ("voice") */
2758
+ mode: "audio" | "voice";
2759
+ /** Optional caption text for the audio */
2760
+ caption?: string;
2761
+ }
2762
+ type TelegramSendAudioStepOutput = unknown;
2763
+ interface TelegramSendFileStepInput {
2764
+ /** Telegram bot token in "botId:token" format */
2765
+ botToken: string;
2766
+ /** Telegram chat ID to send the file to */
2767
+ chatId: string;
2768
+ /** URL of the document/file to send */
2769
+ fileUrl: string;
2770
+ /** Optional caption text for the file */
2771
+ caption?: string;
2772
+ }
2773
+ type TelegramSendFileStepOutput = unknown;
2774
+ interface TelegramSendImageStepInput {
2775
+ /** Telegram bot token in "botId:token" format */
2776
+ botToken: string;
2777
+ /** Telegram chat ID to send the image to */
2778
+ chatId: string;
2779
+ /** URL of the image to send */
2780
+ imageUrl: string;
2781
+ /** Optional caption text for the image */
2782
+ caption?: string;
2783
+ }
2784
+ type TelegramSendImageStepOutput = unknown;
2785
+ interface TelegramSendMessageStepInput {
2786
+ /** Telegram bot token in "botId:token" format */
2787
+ botToken: string;
2788
+ /** Telegram chat ID to send the message to */
2789
+ chatId: string;
2790
+ /** Message text to send (MarkdownV2 formatting supported) */
2791
+ text: string;
2792
+ }
2793
+ type TelegramSendMessageStepOutput = unknown;
2794
+ interface TelegramSendVideoStepInput {
2795
+ /** Telegram bot token in "botId:token" format */
2796
+ botToken: string;
2797
+ /** Telegram chat ID to send the video to */
2798
+ chatId: string;
2799
+ /** URL of the video to send */
2800
+ videoUrl: string;
2801
+ /** Optional caption text for the video */
2802
+ caption?: string;
2803
+ }
2804
+ type TelegramSendVideoStepOutput = unknown;
2805
+ interface TelegramSetTypingStepInput {
2806
+ /** Telegram bot token in "botId:token" format */
2807
+ botToken: string;
2808
+ /** Telegram chat ID to show the typing indicator in */
2809
+ chatId: string;
2810
+ }
2811
+ type TelegramSetTypingStepOutput = unknown;
2812
+ interface TextToSpeechStepInput {
2813
+ /** The text to convert to speech */
2814
+ text: string;
2815
+ /** foreground = display audio to user, background = save URL to variable */
2816
+ mode: "foreground" | "background";
2817
+ /** Variable name to save the audio URL into (used in background mode) */
2818
+ destinationVar?: string;
2819
+ skipAssetCreation?: boolean;
2820
+ /** Optional model configuration override. Uses the workflow's default speech model if not specified */
2821
+ speechModelOverride?: {
2822
+ /** Speech synthesis model identifier */
2823
+ model: string;
2824
+ /** Additional model-specific configuration */
2825
+ config?: Record<string, unknown>;
2826
+ };
2827
+ }
2828
+ interface TextToSpeechStepOutput {
2829
+ /** URL of the generated audio file */
2830
+ audioUrl: string;
2831
+ }
2832
+ interface TranscribeAudioStepInput {
2833
+ /** URL of the audio file to transcribe */
2834
+ audioUrl: string;
2835
+ /** Optional context to improve transcription accuracy (e.g. language, speaker names, domain terms) */
2836
+ prompt: string;
2837
+ /** Variable name to save the transcribed text into */
2838
+ destinationVar?: string;
2839
+ /** Optional model configuration override. Uses the workflow's default transcription model if not specified */
2840
+ transcriptionModelOverride?: {
2841
+ /** Audio transcription model identifier */
2842
+ model: string;
2843
+ /** Additional model-specific configuration */
2844
+ config?: Record<string, unknown>;
2845
+ };
2846
+ }
2847
+ interface TranscribeAudioStepOutput {
2848
+ /** The transcribed text from the audio file */
2849
+ text: string;
2850
+ }
2851
+ interface TrimMediaStepInput {
2852
+ /** URL of the source audio or video file to trim */
2853
+ inputUrl: string;
2854
+ /** Variable name to store the output URL */
2855
+ destinationVar: string;
2856
+ /** Start position in seconds for the trim */
2857
+ start?: number | string;
2858
+ /** Duration of the trimmed segment in seconds. Omit to trim to the end of the clip. */
2859
+ duration?: string | number;
2860
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
2861
+ skipAssetCreation?: boolean;
2862
+ }
2863
+ interface TrimMediaStepOutput {
2864
+ /** URL of the trimmed media file */
2865
+ mediaUrl: string;
2866
+ }
2867
+ interface UpdateGoogleCalendarEventStepInput {
2868
+ /** Google OAuth connection ID */
2869
+ connectionId: string;
2870
+ /** Google Calendar event ID to update */
2871
+ eventId: string;
2872
+ /** Updated event title */
2873
+ summary?: string;
2874
+ /** Updated event description */
2875
+ description?: string;
2876
+ /** Updated event location */
2877
+ location?: string;
2878
+ /** Updated start time in ISO 8601 format */
2879
+ startDateTime?: string;
2880
+ /** Updated end time in ISO 8601 format */
2881
+ endDateTime?: string;
2882
+ /** Updated attendee email addresses (one per line, replaces all existing attendees) */
2883
+ attendees?: string;
2884
+ /** Variable to store the result (JSON with eventId and htmlLink) */
2885
+ destinationVar?: string;
2886
+ /** Calendar ID (defaults to "primary" if omitted) */
2887
+ calendarId?: string;
2888
+ }
2889
+ interface UpdateGoogleCalendarEventStepOutput {
2890
+ /** Google Calendar event ID */
2891
+ eventId: string;
2892
+ /** URL to view the updated event in Google Calendar */
2893
+ htmlLink: string;
2894
+ }
2895
+ interface UpdateGoogleDocStepInput {
2896
+ /** Google Document ID to update */
2897
+ documentId: string;
2898
+ /** Google OAuth connection ID */
2899
+ connectionId: string;
2900
+ /** New content to write to the document */
2901
+ text: string;
2902
+ /** Format of the text field: "plain", "html", or "markdown" */
2903
+ textType: "plain" | "html" | "markdown";
2904
+ /** How to apply the content: "addToTop", "addToBottom", or "overwrite" */
2905
+ operationType: "addToTop" | "addToBottom" | "overwrite";
2906
+ }
2907
+ interface UpdateGoogleDocStepOutput {
2908
+ /** URL of the updated Google Document */
2909
+ documentUrl: string;
2910
+ }
2911
+ interface UpdateGoogleSheetStepInput {
2912
+ /** CSV data to write to the spreadsheet */
2913
+ text: string;
2914
+ /** Google OAuth connection ID */
2915
+ connectionId: string;
2916
+ /** Google Spreadsheet ID to update */
2917
+ spreadsheetId: string;
2918
+ /** Target cell range in A1 notation (used with "range" operationType) */
2919
+ range: string;
2920
+ /** How to apply the data: "addToBottom", "overwrite", or "range" */
2921
+ operationType: "addToBottom" | "overwrite" | "range";
2922
+ }
2923
+ interface UpdateGoogleSheetStepOutput {
2924
+ /** URL of the updated Google Spreadsheet */
2925
+ spreadsheetUrl: string;
2926
+ }
2927
+ interface UpscaleImageStepInput {
2928
+ /** URL of the image to upscale */
2929
+ imageUrl: string;
2930
+ /** Variable name to store the output URL */
2931
+ destinationVar?: string;
2932
+ /** Target output resolution */
2933
+ targetResolution: "2k" | "4k" | "8k";
2934
+ /** Upscaling engine quality tier */
2935
+ engine: "standard" | "pro";
2936
+ }
2937
+ interface UpscaleImageStepOutput {
2938
+ /** CDN URL of the upscaled image (PNG) */
2939
+ imageUrl: string;
2940
+ }
2941
+ interface UpscaleVideoStepInput {
2942
+ /** URL of the source video to upscale */
2943
+ videoUrl: string;
2944
+ /** Variable name to store the output URL */
2945
+ destinationVar: string;
2946
+ /** Target output resolution for the upscaled video */
2947
+ targetResolution: "720p" | "1080p" | "2K" | "4K";
2948
+ /** Upscaling engine to use. Higher tiers produce better quality at higher cost. */
2949
+ engine: "standard" | "pro" | "ultimate" | "flashvsr" | "seedance" | "seedvr2" | "runwayml/upscale-v1";
2950
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
2951
+ skipAssetCreation?: boolean;
2952
+ }
2953
+ interface UpscaleVideoStepOutput {
2954
+ /** URL of the upscaled video */
2955
+ videoUrl: string;
2956
+ }
2957
+ interface UserMessageStepInput {
2958
+ /** The message to send (prompt for AI, or text for system echo) */
2959
+ message: string;
2960
+ /** Output behavior: background saves to variable, foreground streams to user. In direct execution, this is ignored and all requests are "background" */
2961
+ mode?: "foreground" | "background";
2962
+ /** Message source: "user" sends to AI model, "system" echoes message content directly */
2963
+ source: "user" | "system";
2964
+ /** Variable name to save the response to */
2965
+ destinationVar?: string | null;
2966
+ /** Model configuration override. Optional; uses the workflow's default model if not specified */
2967
+ modelOverride?: {
2968
+ /** Model identifier (e.g. "gpt-4", "claude-3-opus") */
2969
+ model: string;
2970
+ /** Sampling temperature for the model (0-2) */
2971
+ temperature: number;
2972
+ /** Maximum number of tokens in the model's response */
2973
+ maxResponseTokens: number;
2974
+ /** Whether to skip the system preamble/instructions */
2975
+ ignorePreamble?: boolean;
2976
+ /** Preprocessor applied to user messages before sending to the model */
2977
+ userMessagePreprocessor?: {
2978
+ /** Data source identifier for the preprocessor */
2979
+ dataSource?: string;
2980
+ /** Template string applied to user messages before sending to the model */
2981
+ messageTemplate?: string;
2982
+ /** Maximum number of results to include from the data source */
2983
+ maxResults?: number;
2984
+ /** Whether the preprocessor is active */
2985
+ enabled?: boolean;
2986
+ /** Whether child steps should inherit this preprocessor configuration */
2987
+ shouldInherit?: boolean;
2988
+ };
2989
+ /** System preamble/instructions for the model */
2990
+ preamble?: string;
2991
+ /** Whether multi-model candidate generation is enabled */
2992
+ multiModelEnabled?: boolean;
2993
+ /** Whether the user can edit the model's response */
2994
+ editResponseEnabled?: boolean;
2995
+ /** Additional model-specific configuration */
2996
+ config?: Record<string, unknown>;
2997
+ };
2998
+ /** Output format constraint for structured responses */
2999
+ structuredOutputType?: "text" | "json" | "csv";
3000
+ /** Sample showing the desired output shape (for JSON/CSV formats). A TypeScript interface is also useful here for more complex types. */
3001
+ structuredOutputExample?: string;
3002
+ /** Media display type for foreground system messages. This must be used with mode=foreground and the content of the message must only be a URL to the media asset. */
3003
+ systemDisplayType?: "message" | "pdf" | "csv" | "image" | "html" | "video" | "xlsx" | "docx";
3004
+ /** Whether to include or exclude prior chat history in the AI context */
3005
+ chatHistoryMode?: "include" | "exclude";
3006
+ /** Transition control mode. Leave unset. */
3007
+ transitionControl?: "default" | "native";
3008
+ /** Share button visibility control */
3009
+ shareControl?: "default" | "hidden";
3010
+ }
3011
+ interface UserMessageStepOutput {
3012
+ /** The AI model's response or echoed system message content */
3013
+ content: string;
3014
+ }
3015
+ interface VideoFaceSwapStepInput {
3016
+ /** URL of the source video containing faces to swap */
3017
+ videoUrl: string;
3018
+ /** Variable name to store the output URL */
3019
+ destinationVar: string;
3020
+ /** URL of the image containing the replacement face */
3021
+ faceImageUrl: string;
3022
+ /** Zero-based index of the face to replace in the video */
3023
+ targetIndex: number;
3024
+ /** Face swap engine to use */
3025
+ engine: string;
3026
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
3027
+ skipAssetCreation?: boolean;
3028
+ }
3029
+ interface VideoFaceSwapStepOutput {
3030
+ /** URL of the face-swapped video */
3031
+ videoUrl: string;
3032
+ }
3033
+ interface VideoRemoveBackgroundStepInput {
3034
+ /** URL of the source video */
3035
+ videoUrl: string;
3036
+ /** Variable name to store the output URL */
3037
+ destinationVar: string;
3038
+ /** Whether to make the background transparent or replace it with an image */
3039
+ newBackground: "transparent" | "image";
3040
+ /** URL of a replacement background image. Required when newBackground is 'image'. */
3041
+ newBackgroundImageUrl?: string;
3042
+ /** Background removal engine to use */
3043
+ engine: string;
3044
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
3045
+ skipAssetCreation?: boolean;
3046
+ }
3047
+ interface VideoRemoveBackgroundStepOutput {
3048
+ /** URL of the video with background removed or replaced */
3049
+ videoUrl: string;
3050
+ }
3051
+ interface VideoRemoveWatermarkStepInput {
3052
+ /** URL of the source video containing a watermark */
3053
+ videoUrl: string;
3054
+ /** Variable name to store the output URL */
3055
+ destinationVar: string;
3056
+ /** Watermark removal engine to use */
3057
+ engine: string;
3058
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
3059
+ skipAssetCreation?: boolean;
3060
+ }
3061
+ interface VideoRemoveWatermarkStepOutput {
3062
+ /** URL of the video with watermark removed */
3063
+ videoUrl: string;
3064
+ }
3065
+ interface WatermarkImageStepInput {
3066
+ /** URL of the base image */
3067
+ imageUrl: string;
3068
+ /** URL of the watermark image to overlay */
3069
+ watermarkImageUrl: string;
3070
+ /** Variable name to store the output URL */
3071
+ destinationVar?: string;
3072
+ /** Corner position for the watermark placement */
3073
+ corner: "top-left" | "top-right" | "bottom-left" | "bottom-right";
3074
+ /** Padding from the corner in pixels */
3075
+ paddingPx: number;
3076
+ /** Width of the watermark overlay in pixels */
3077
+ widthPx: number;
3078
+ /** When true, the result will not appear in the user's asset history */
3079
+ skipAssetCreation?: boolean;
3080
+ }
3081
+ interface WatermarkImageStepOutput {
3082
+ /** CDN URL of the watermarked image */
3083
+ imageUrl: string;
3084
+ }
3085
+ interface WatermarkVideoStepInput {
3086
+ /** URL of the source video */
3087
+ videoUrl: string;
3088
+ /** URL of the watermark image to overlay */
3089
+ imageUrl: string;
3090
+ /** Variable name to store the output URL */
3091
+ destinationVar: string;
3092
+ /** Corner position for the watermark placement */
3093
+ corner: "top-left" | "top-right" | "bottom-left" | "bottom-right";
3094
+ /** Padding from the corner in pixels */
3095
+ paddingPx: number;
3096
+ /** Width of the watermark overlay in pixels */
3097
+ widthPx: number;
3098
+ /** When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps. */
3099
+ skipAssetCreation?: boolean;
3100
+ }
3101
+ interface WatermarkVideoStepOutput {
3102
+ /** URL of the watermarked video */
3103
+ videoUrl: string;
3104
+ }
3105
+ /** Union of all available step type names. */
3106
+ type StepName = "activeCampaignAddNote" | "activeCampaignCreateContact" | "addSubtitlesToVideo" | "airtableCreateUpdateRecord" | "airtableDeleteRecord" | "airtableGetRecord" | "airtableGetTableRecords" | "analyzeImage" | "analyzeVideo" | "captureThumbnail" | "codaCreateUpdatePage" | "codaCreateUpdateRow" | "codaFindRow" | "codaGetPage" | "codaGetTableRows" | "convertPdfToImages" | "createGoogleCalendarEvent" | "createGoogleDoc" | "createGoogleSheet" | "deleteGoogleCalendarEvent" | "detectPII" | "downloadVideo" | "enhanceImageGenerationPrompt" | "enhanceVideoGenerationPrompt" | "enrichPerson" | "extractAudioFromVideo" | "extractText" | "fetchGoogleDoc" | "fetchGoogleSheet" | "fetchSlackChannelHistory" | "fetchYoutubeCaptions" | "fetchYoutubeChannel" | "fetchYoutubeComments" | "fetchYoutubeVideo" | "generateChart" | "generateImage" | "generateLipsync" | "generateMusic" | "generatePdf" | "generateStaticVideoFromImage" | "generateVideo" | "getGoogleCalendarEvent" | "getMediaMetadata" | "httpRequest" | "hubspotCreateCompany" | "hubspotCreateContact" | "hubspotGetCompany" | "hubspotGetContact" | "hunterApiCompanyEnrichment" | "hunterApiDomainSearch" | "hunterApiEmailFinder" | "hunterApiEmailVerification" | "hunterApiPersonEnrichment" | "imageFaceSwap" | "imageRemoveWatermark" | "insertVideoClips" | "listGoogleCalendarEvents" | "logic" | "makeDotComRunScenario" | "mergeAudio" | "mergeVideos" | "mixAudioIntoVideo" | "muteVideo" | "n8nRunNode" | "notionCreatePage" | "notionUpdatePage" | "peopleSearch" | "postToLinkedIn" | "postToSlackChannel" | "postToX" | "postToZapier" | "queryDataSource" | "queryExternalDatabase" | "redactPII" | "removeBackgroundFromImage" | "resizeVideo" | "runPackagedWorkflow" | "runWorkflow" | "scrapeFacebookPage" | "scrapeFacebookPosts" | "scrapeInstagramComments" | "scrapeInstagramMentions" | "scrapeInstagramPosts" | "scrapeInstagramProfile" | "scrapeInstagramReels" | "scrapeLinkedInCompany" | "scrapeLinkedInProfile" | "scrapeMetaThreadsProfile" | "scrapeUrl" | "scrapeXPost" | "scrapeXProfile" | "searchGoogle" | "searchGoogleImages" | "searchGoogleNews" | "searchGoogleTrends" | "searchPerplexity" | "searchXPosts" | "searchYoutube" | "searchYoutubeTrends" | "sendEmail" | "sendSMS" | "setRunTitle" | "setVariable" | "telegramSendAudio" | "telegramSendFile" | "telegramSendImage" | "telegramSendMessage" | "telegramSendVideo" | "telegramSetTyping" | "textToSpeech" | "transcribeAudio" | "trimMedia" | "updateGoogleCalendarEvent" | "updateGoogleDoc" | "updateGoogleSheet" | "upscaleImage" | "upscaleVideo" | "userMessage" | "videoFaceSwap" | "videoRemoveBackground" | "videoRemoveWatermark" | "watermarkImage" | "watermarkVideo";
3107
+ /** Maps step names to their input types. */
3108
+ interface StepInputMap {
3109
+ activeCampaignAddNote: ActiveCampaignAddNoteStepInput;
3110
+ activeCampaignCreateContact: ActiveCampaignCreateContactStepInput;
3111
+ addSubtitlesToVideo: AddSubtitlesToVideoStepInput;
3112
+ airtableCreateUpdateRecord: AirtableCreateUpdateRecordStepInput;
3113
+ airtableDeleteRecord: AirtableDeleteRecordStepInput;
3114
+ airtableGetRecord: AirtableGetRecordStepInput;
3115
+ airtableGetTableRecords: AirtableGetTableRecordsStepInput;
3116
+ analyzeImage: AnalyzeImageStepInput;
3117
+ analyzeVideo: AnalyzeVideoStepInput;
3118
+ captureThumbnail: CaptureThumbnailStepInput;
3119
+ codaCreateUpdatePage: CodaCreateUpdatePageStepInput;
3120
+ codaCreateUpdateRow: CodaCreateUpdateRowStepInput;
3121
+ codaFindRow: CodaFindRowStepInput;
3122
+ codaGetPage: CodaGetPageStepInput;
3123
+ codaGetTableRows: CodaGetTableRowsStepInput;
3124
+ convertPdfToImages: ConvertPdfToImagesStepInput;
3125
+ createGoogleCalendarEvent: CreateGoogleCalendarEventStepInput;
3126
+ createGoogleDoc: CreateGoogleDocStepInput;
3127
+ createGoogleSheet: CreateGoogleSheetStepInput;
3128
+ deleteGoogleCalendarEvent: DeleteGoogleCalendarEventStepInput;
3129
+ detectPII: DetectPIIStepInput;
3130
+ downloadVideo: DownloadVideoStepInput;
3131
+ enhanceImageGenerationPrompt: EnhanceImageGenerationPromptStepInput;
3132
+ enhanceVideoGenerationPrompt: EnhanceVideoGenerationPromptStepInput;
3133
+ enrichPerson: EnrichPersonStepInput;
3134
+ extractAudioFromVideo: ExtractAudioFromVideoStepInput;
3135
+ extractText: ExtractTextStepInput;
3136
+ fetchGoogleDoc: FetchGoogleDocStepInput;
3137
+ fetchGoogleSheet: FetchGoogleSheetStepInput;
3138
+ fetchSlackChannelHistory: FetchSlackChannelHistoryStepInput;
3139
+ fetchYoutubeCaptions: FetchYoutubeCaptionsStepInput;
3140
+ fetchYoutubeChannel: FetchYoutubeChannelStepInput;
3141
+ fetchYoutubeComments: FetchYoutubeCommentsStepInput;
3142
+ fetchYoutubeVideo: FetchYoutubeVideoStepInput;
3143
+ generateChart: GenerateChartStepInput;
3144
+ generateImage: GenerateImageStepInput;
3145
+ generateLipsync: GenerateLipsyncStepInput;
3146
+ generateMusic: GenerateMusicStepInput;
3147
+ generatePdf: GeneratePdfStepInput;
3148
+ generateStaticVideoFromImage: GenerateStaticVideoFromImageStepInput;
3149
+ generateVideo: GenerateVideoStepInput;
3150
+ getGoogleCalendarEvent: GetGoogleCalendarEventStepInput;
3151
+ getMediaMetadata: GetMediaMetadataStepInput;
3152
+ httpRequest: HttpRequestStepInput;
3153
+ hubspotCreateCompany: HubspotCreateCompanyStepInput;
3154
+ hubspotCreateContact: HubspotCreateContactStepInput;
3155
+ hubspotGetCompany: HubspotGetCompanyStepInput;
3156
+ hubspotGetContact: HubspotGetContactStepInput;
3157
+ hunterApiCompanyEnrichment: HunterApiCompanyEnrichmentStepInput;
3158
+ hunterApiDomainSearch: HunterApiDomainSearchStepInput;
3159
+ hunterApiEmailFinder: HunterApiEmailFinderStepInput;
3160
+ hunterApiEmailVerification: HunterApiEmailVerificationStepInput;
3161
+ hunterApiPersonEnrichment: HunterApiPersonEnrichmentStepInput;
3162
+ imageFaceSwap: ImageFaceSwapStepInput;
3163
+ imageRemoveWatermark: ImageRemoveWatermarkStepInput;
3164
+ insertVideoClips: InsertVideoClipsStepInput;
3165
+ listGoogleCalendarEvents: ListGoogleCalendarEventsStepInput;
3166
+ logic: LogicStepInput;
3167
+ makeDotComRunScenario: MakeDotComRunScenarioStepInput;
3168
+ mergeAudio: MergeAudioStepInput;
3169
+ mergeVideos: MergeVideosStepInput;
3170
+ mixAudioIntoVideo: MixAudioIntoVideoStepInput;
3171
+ muteVideo: MuteVideoStepInput;
3172
+ n8nRunNode: N8nRunNodeStepInput;
3173
+ notionCreatePage: NotionCreatePageStepInput;
3174
+ notionUpdatePage: NotionUpdatePageStepInput;
3175
+ peopleSearch: PeopleSearchStepInput;
3176
+ postToLinkedIn: PostToLinkedInStepInput;
3177
+ postToSlackChannel: PostToSlackChannelStepInput;
3178
+ postToX: PostToXStepInput;
3179
+ postToZapier: PostToZapierStepInput;
3180
+ queryDataSource: QueryDataSourceStepInput;
3181
+ queryExternalDatabase: QueryExternalDatabaseStepInput;
3182
+ redactPII: RedactPIIStepInput;
3183
+ removeBackgroundFromImage: RemoveBackgroundFromImageStepInput;
3184
+ resizeVideo: ResizeVideoStepInput;
3185
+ runPackagedWorkflow: RunPackagedWorkflowStepInput;
3186
+ runWorkflow: RunWorkflowStepInput;
3187
+ scrapeFacebookPage: ScrapeFacebookPageStepInput;
3188
+ scrapeFacebookPosts: ScrapeFacebookPostsStepInput;
3189
+ scrapeInstagramComments: ScrapeInstagramCommentsStepInput;
3190
+ scrapeInstagramMentions: ScrapeInstagramMentionsStepInput;
3191
+ scrapeInstagramPosts: ScrapeInstagramPostsStepInput;
3192
+ scrapeInstagramProfile: ScrapeInstagramProfileStepInput;
3193
+ scrapeInstagramReels: ScrapeInstagramReelsStepInput;
3194
+ scrapeLinkedInCompany: ScrapeLinkedInCompanyStepInput;
3195
+ scrapeLinkedInProfile: ScrapeLinkedInProfileStepInput;
3196
+ scrapeMetaThreadsProfile: ScrapeMetaThreadsProfileStepInput;
3197
+ scrapeUrl: ScrapeUrlStepInput;
3198
+ scrapeXPost: ScrapeXPostStepInput;
3199
+ scrapeXProfile: ScrapeXProfileStepInput;
3200
+ searchGoogle: SearchGoogleStepInput;
3201
+ searchGoogleImages: SearchGoogleImagesStepInput;
3202
+ searchGoogleNews: SearchGoogleNewsStepInput;
3203
+ searchGoogleTrends: SearchGoogleTrendsStepInput;
3204
+ searchPerplexity: SearchPerplexityStepInput;
3205
+ searchXPosts: SearchXPostsStepInput;
3206
+ searchYoutube: SearchYoutubeStepInput;
3207
+ searchYoutubeTrends: SearchYoutubeTrendsStepInput;
3208
+ sendEmail: SendEmailStepInput;
3209
+ sendSMS: SendSMSStepInput;
3210
+ setRunTitle: SetRunTitleStepInput;
3211
+ setVariable: SetVariableStepInput;
3212
+ telegramSendAudio: TelegramSendAudioStepInput;
3213
+ telegramSendFile: TelegramSendFileStepInput;
3214
+ telegramSendImage: TelegramSendImageStepInput;
3215
+ telegramSendMessage: TelegramSendMessageStepInput;
3216
+ telegramSendVideo: TelegramSendVideoStepInput;
3217
+ telegramSetTyping: TelegramSetTypingStepInput;
3218
+ textToSpeech: TextToSpeechStepInput;
3219
+ transcribeAudio: TranscribeAudioStepInput;
3220
+ trimMedia: TrimMediaStepInput;
3221
+ updateGoogleCalendarEvent: UpdateGoogleCalendarEventStepInput;
3222
+ updateGoogleDoc: UpdateGoogleDocStepInput;
3223
+ updateGoogleSheet: UpdateGoogleSheetStepInput;
3224
+ upscaleImage: UpscaleImageStepInput;
3225
+ upscaleVideo: UpscaleVideoStepInput;
3226
+ userMessage: UserMessageStepInput;
3227
+ videoFaceSwap: VideoFaceSwapStepInput;
3228
+ videoRemoveBackground: VideoRemoveBackgroundStepInput;
3229
+ videoRemoveWatermark: VideoRemoveWatermarkStepInput;
3230
+ watermarkImage: WatermarkImageStepInput;
3231
+ watermarkVideo: WatermarkVideoStepInput;
3232
+ }
3233
+ /** Maps step names to their output types. */
3234
+ interface StepOutputMap {
3235
+ activeCampaignAddNote: ActiveCampaignAddNoteStepOutput;
3236
+ activeCampaignCreateContact: ActiveCampaignCreateContactStepOutput;
3237
+ addSubtitlesToVideo: AddSubtitlesToVideoStepOutput;
3238
+ airtableCreateUpdateRecord: AirtableCreateUpdateRecordStepOutput;
3239
+ airtableDeleteRecord: AirtableDeleteRecordStepOutput;
3240
+ airtableGetRecord: AirtableGetRecordStepOutput;
3241
+ airtableGetTableRecords: AirtableGetTableRecordsStepOutput;
3242
+ analyzeImage: AnalyzeImageStepOutput;
3243
+ analyzeVideo: AnalyzeVideoStepOutput;
3244
+ captureThumbnail: CaptureThumbnailStepOutput;
3245
+ codaCreateUpdatePage: CodaCreateUpdatePageStepOutput;
3246
+ codaCreateUpdateRow: CodaCreateUpdateRowStepOutput;
3247
+ codaFindRow: CodaFindRowStepOutput;
3248
+ codaGetPage: CodaGetPageStepOutput;
3249
+ codaGetTableRows: CodaGetTableRowsStepOutput;
3250
+ convertPdfToImages: ConvertPdfToImagesStepOutput;
3251
+ createGoogleCalendarEvent: CreateGoogleCalendarEventStepOutput;
3252
+ createGoogleDoc: CreateGoogleDocStepOutput;
3253
+ createGoogleSheet: CreateGoogleSheetStepOutput;
3254
+ deleteGoogleCalendarEvent: DeleteGoogleCalendarEventStepOutput;
3255
+ detectPII: DetectPIIStepOutput;
3256
+ downloadVideo: DownloadVideoStepOutput;
3257
+ enhanceImageGenerationPrompt: EnhanceImageGenerationPromptStepOutput;
3258
+ enhanceVideoGenerationPrompt: EnhanceVideoGenerationPromptStepOutput;
3259
+ enrichPerson: EnrichPersonStepOutput;
3260
+ extractAudioFromVideo: ExtractAudioFromVideoStepOutput;
3261
+ extractText: ExtractTextStepOutput;
3262
+ fetchGoogleDoc: FetchGoogleDocStepOutput;
3263
+ fetchGoogleSheet: FetchGoogleSheetStepOutput;
3264
+ fetchSlackChannelHistory: FetchSlackChannelHistoryStepOutput;
3265
+ fetchYoutubeCaptions: FetchYoutubeCaptionsStepOutput;
3266
+ fetchYoutubeChannel: FetchYoutubeChannelStepOutput;
3267
+ fetchYoutubeComments: FetchYoutubeCommentsStepOutput;
3268
+ fetchYoutubeVideo: FetchYoutubeVideoStepOutput;
3269
+ generateChart: GenerateChartStepOutput;
3270
+ generateImage: GenerateImageStepOutput;
3271
+ generateLipsync: GenerateLipsyncStepOutput;
3272
+ generateMusic: GenerateMusicStepOutput;
3273
+ generatePdf: GeneratePdfStepOutput;
3274
+ generateStaticVideoFromImage: GenerateStaticVideoFromImageStepOutput;
3275
+ generateVideo: GenerateVideoStepOutput;
3276
+ getGoogleCalendarEvent: GetGoogleCalendarEventStepOutput;
3277
+ getMediaMetadata: GetMediaMetadataStepOutput;
3278
+ httpRequest: HttpRequestStepOutput;
3279
+ hubspotCreateCompany: HubspotCreateCompanyStepOutput;
3280
+ hubspotCreateContact: HubspotCreateContactStepOutput;
3281
+ hubspotGetCompany: HubspotGetCompanyStepOutput;
3282
+ hubspotGetContact: HubspotGetContactStepOutput;
3283
+ hunterApiCompanyEnrichment: HunterApiCompanyEnrichmentStepOutput;
3284
+ hunterApiDomainSearch: HunterApiDomainSearchStepOutput;
3285
+ hunterApiEmailFinder: HunterApiEmailFinderStepOutput;
3286
+ hunterApiEmailVerification: HunterApiEmailVerificationStepOutput;
3287
+ hunterApiPersonEnrichment: HunterApiPersonEnrichmentStepOutput;
3288
+ imageFaceSwap: ImageFaceSwapStepOutput;
3289
+ imageRemoveWatermark: ImageRemoveWatermarkStepOutput;
3290
+ insertVideoClips: InsertVideoClipsStepOutput;
3291
+ listGoogleCalendarEvents: ListGoogleCalendarEventsStepOutput;
3292
+ logic: LogicStepOutput;
3293
+ makeDotComRunScenario: MakeDotComRunScenarioStepOutput;
3294
+ mergeAudio: MergeAudioStepOutput;
3295
+ mergeVideos: MergeVideosStepOutput;
3296
+ mixAudioIntoVideo: MixAudioIntoVideoStepOutput;
3297
+ muteVideo: MuteVideoStepOutput;
3298
+ n8nRunNode: N8nRunNodeStepOutput;
3299
+ notionCreatePage: NotionCreatePageStepOutput;
3300
+ notionUpdatePage: NotionUpdatePageStepOutput;
3301
+ peopleSearch: PeopleSearchStepOutput;
3302
+ postToLinkedIn: PostToLinkedInStepOutput;
3303
+ postToSlackChannel: PostToSlackChannelStepOutput;
3304
+ postToX: PostToXStepOutput;
3305
+ postToZapier: PostToZapierStepOutput;
3306
+ queryDataSource: QueryDataSourceStepOutput;
3307
+ queryExternalDatabase: QueryExternalDatabaseStepOutput;
3308
+ redactPII: RedactPIIStepOutput;
3309
+ removeBackgroundFromImage: RemoveBackgroundFromImageStepOutput;
3310
+ resizeVideo: ResizeVideoStepOutput;
3311
+ runPackagedWorkflow: RunPackagedWorkflowStepOutput;
3312
+ runWorkflow: RunWorkflowStepOutput;
3313
+ scrapeFacebookPage: ScrapeFacebookPageStepOutput;
3314
+ scrapeFacebookPosts: ScrapeFacebookPostsStepOutput;
3315
+ scrapeInstagramComments: ScrapeInstagramCommentsStepOutput;
3316
+ scrapeInstagramMentions: ScrapeInstagramMentionsStepOutput;
3317
+ scrapeInstagramPosts: ScrapeInstagramPostsStepOutput;
3318
+ scrapeInstagramProfile: ScrapeInstagramProfileStepOutput;
3319
+ scrapeInstagramReels: ScrapeInstagramReelsStepOutput;
3320
+ scrapeLinkedInCompany: ScrapeLinkedInCompanyStepOutput;
3321
+ scrapeLinkedInProfile: ScrapeLinkedInProfileStepOutput;
3322
+ scrapeMetaThreadsProfile: ScrapeMetaThreadsProfileStepOutput;
3323
+ scrapeUrl: ScrapeUrlStepOutput;
3324
+ scrapeXPost: ScrapeXPostStepOutput;
3325
+ scrapeXProfile: ScrapeXProfileStepOutput;
3326
+ searchGoogle: SearchGoogleStepOutput;
3327
+ searchGoogleImages: SearchGoogleImagesStepOutput;
3328
+ searchGoogleNews: SearchGoogleNewsStepOutput;
3329
+ searchGoogleTrends: SearchGoogleTrendsStepOutput;
3330
+ searchPerplexity: SearchPerplexityStepOutput;
3331
+ searchXPosts: SearchXPostsStepOutput;
3332
+ searchYoutube: SearchYoutubeStepOutput;
3333
+ searchYoutubeTrends: SearchYoutubeTrendsStepOutput;
3334
+ sendEmail: SendEmailStepOutput;
3335
+ sendSMS: SendSMSStepOutput;
3336
+ setRunTitle: SetRunTitleStepOutput;
3337
+ setVariable: SetVariableStepOutput;
3338
+ telegramSendAudio: TelegramSendAudioStepOutput;
3339
+ telegramSendFile: TelegramSendFileStepOutput;
3340
+ telegramSendImage: TelegramSendImageStepOutput;
3341
+ telegramSendMessage: TelegramSendMessageStepOutput;
3342
+ telegramSendVideo: TelegramSendVideoStepOutput;
3343
+ telegramSetTyping: TelegramSetTypingStepOutput;
3344
+ textToSpeech: TextToSpeechStepOutput;
3345
+ transcribeAudio: TranscribeAudioStepOutput;
3346
+ trimMedia: TrimMediaStepOutput;
3347
+ updateGoogleCalendarEvent: UpdateGoogleCalendarEventStepOutput;
3348
+ updateGoogleDoc: UpdateGoogleDocStepOutput;
3349
+ updateGoogleSheet: UpdateGoogleSheetStepOutput;
3350
+ upscaleImage: UpscaleImageStepOutput;
3351
+ upscaleVideo: UpscaleVideoStepOutput;
3352
+ userMessage: UserMessageStepOutput;
3353
+ videoFaceSwap: VideoFaceSwapStepOutput;
3354
+ videoRemoveBackground: VideoRemoveBackgroundStepOutput;
3355
+ videoRemoveWatermark: VideoRemoveWatermarkStepOutput;
3356
+ watermarkImage: WatermarkImageStepOutput;
3357
+ watermarkVideo: WatermarkVideoStepOutput;
3358
+ }
3359
+
3360
+ declare module "../client.js" {
3361
+ interface MindStudioAgent {
3362
+ /**
3363
+ * [ActiveCampaign] Add Note
3364
+ *
3365
+ * Add a note to an existing contact in ActiveCampaign.
3366
+ *
3367
+ * ## Usage Notes
3368
+ * - Requires an ActiveCampaign OAuth connection (connectionId).
3369
+ * - The contact must already exist — use the contact ID from a previous create or search step.
3370
+ */
3371
+ activeCampaignAddNote(step: ActiveCampaignAddNoteStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ActiveCampaignAddNoteStepOutput>>;
3372
+ /**
3373
+ * [ActiveCampaign] Create Contact
3374
+ *
3375
+ * Create or sync a contact in ActiveCampaign.
3376
+ *
3377
+ * ## Usage Notes
3378
+ * - Requires an ActiveCampaign OAuth connection (connectionId).
3379
+ * - If a contact with the email already exists, it may be updated depending on ActiveCampaign settings.
3380
+ * - Custom fields are passed as a key-value map where keys are field IDs.
3381
+ */
3382
+ activeCampaignCreateContact(step: ActiveCampaignCreateContactStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ActiveCampaignCreateContactStepOutput>>;
3383
+ /**
3384
+ * Add Subtitles To Video
3385
+ *
3386
+ * Automatically add subtitles to a video
3387
+ *
3388
+ * ## Usage Notes
3389
+ * - Can control style of text and animation
3390
+ */
3391
+ addSubtitlesToVideo(step: AddSubtitlesToVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<AddSubtitlesToVideoStepOutput>>;
3392
+ /**
3393
+ * [Airtable] Create/Update record
3394
+ *
3395
+ * Create a new record or update an existing record in an Airtable table.
3396
+ *
3397
+ * ## Usage Notes
3398
+ * - If recordId is provided, updates that record. Otherwise, creates a new one.
3399
+ * - When updating with updateMode "onlySpecified", unspecified fields are left as-is. With "all", unspecified fields are cleared.
3400
+ * - Array fields (e.g. multipleAttachments) accept arrays of values.
3401
+ */
3402
+ airtableCreateUpdateRecord(step: AirtableCreateUpdateRecordStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<AirtableCreateUpdateRecordStepOutput>>;
3403
+ /**
3404
+ * [Airtable] Delete record
3405
+ *
3406
+ * Delete a record from an Airtable table by its record ID.
3407
+ *
3408
+ * ## Usage Notes
3409
+ * - Requires an active Airtable OAuth connection (connectionId).
3410
+ * - Silently succeeds if the record does not exist.
3411
+ */
3412
+ airtableDeleteRecord(step: AirtableDeleteRecordStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<AirtableDeleteRecordStepOutput>>;
3413
+ /**
3414
+ * [Airtable] Get record
3415
+ *
3416
+ * Fetch a single record from an Airtable table by its record ID.
3417
+ *
3418
+ * ## Usage Notes
3419
+ * - Requires an active Airtable OAuth connection (connectionId).
3420
+ * - If the record is not found, returns a string message instead of a record object.
3421
+ */
3422
+ airtableGetRecord(step: AirtableGetRecordStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<AirtableGetRecordStepOutput>>;
3423
+ /**
3424
+ * [Airtable] Get table records
3425
+ *
3426
+ * Fetch multiple records from an Airtable table with optional pagination.
3427
+ *
3428
+ * ## Usage Notes
3429
+ * - Requires an active Airtable OAuth connection (connectionId).
3430
+ * - Default limit is 100 records. Maximum is 1000.
3431
+ * - When outputFormat is 'csv', the variable receives CSV text. The direct execution output always returns parsed records.
3432
+ */
3433
+ airtableGetTableRecords(step: AirtableGetTableRecordsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<AirtableGetTableRecordsStepOutput>>;
3434
+ /**
3435
+ * Analyze Image
3436
+ *
3437
+ * Analyze an image using a vision model based on a text prompt.
3438
+ *
3439
+ * ## Usage Notes
3440
+ * - Uses the configured vision model to generate a text analysis of the image.
3441
+ * - The prompt should describe what to look for or extract from the image.
3442
+ */
3443
+ analyzeImage(step: AnalyzeImageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<AnalyzeImageStepOutput>>;
3444
+ /**
3445
+ * Analyze Video
3446
+ *
3447
+ * Analyze a video using a video analysis model based on a text prompt.
3448
+ *
3449
+ * ## Usage Notes
3450
+ * - Uses the configured video analysis model to generate a text analysis of the video.
3451
+ * - The prompt should describe what to look for or extract from the video.
3452
+ */
3453
+ analyzeVideo(step: AnalyzeVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<AnalyzeVideoStepOutput>>;
3454
+ /**
3455
+ * Get Image from Video Frame
3456
+ *
3457
+ * Capture a thumbnail from a video at a specified timestamp
3458
+ *
3459
+ * ## Usage Notes
3460
+ *
3461
+ */
3462
+ captureThumbnail(step: CaptureThumbnailStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CaptureThumbnailStepOutput>>;
3463
+ /**
3464
+ * [Coda] Create/Update page
3465
+ *
3466
+ * Create a new page or update an existing page in a Coda document.
3467
+ *
3468
+ * ## Usage Notes
3469
+ * - Requires a Coda OAuth connection (connectionId).
3470
+ * - If pageData.pageId is provided, updates that page. Otherwise, creates a new one.
3471
+ * - Page content is provided as markdown and converted to Coda's canvas format.
3472
+ * - When updating, insertionMode controls how content is applied (default: 'append').
3473
+ */
3474
+ codaCreateUpdatePage(step: CodaCreateUpdatePageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CodaCreateUpdatePageStepOutput>>;
3475
+ /**
3476
+ * [Coda] Create/Update row
3477
+ *
3478
+ * Create a new row or update an existing row in a Coda table.
3479
+ *
3480
+ * ## Usage Notes
3481
+ * - Requires a Coda OAuth connection (connectionId).
3482
+ * - If rowId is provided, updates that row. Otherwise, creates a new one.
3483
+ * - Row data keys are column IDs. Empty values are excluded.
3484
+ */
3485
+ codaCreateUpdateRow(step: CodaCreateUpdateRowStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CodaCreateUpdateRowStepOutput>>;
3486
+ /**
3487
+ * [Coda] Find row
3488
+ *
3489
+ * Search for a row in a Coda table by matching column values.
3490
+ *
3491
+ * ## Usage Notes
3492
+ * - Requires a Coda OAuth connection (connectionId).
3493
+ * - Returns the first row matching all specified column values, or null if no match.
3494
+ * - Search criteria in rowData are ANDed together.
3495
+ */
3496
+ codaFindRow(step: CodaFindRowStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CodaFindRowStepOutput>>;
3497
+ /**
3498
+ * [Coda] Get page
3499
+ *
3500
+ * Export and read the contents of a page from a Coda document.
3501
+ *
3502
+ * ## Usage Notes
3503
+ * - Requires a Coda OAuth connection (connectionId).
3504
+ * - Page export is asynchronous on Coda's side — there may be a brief delay while it processes.
3505
+ * - If a page was just created in a prior step, there is an automatic 20-second retry if the first export attempt fails.
3506
+ */
3507
+ codaGetPage(step: CodaGetPageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CodaGetPageStepOutput>>;
3508
+ /**
3509
+ * [Coda] Get table rows
3510
+ *
3511
+ * Fetch rows from a Coda table with optional pagination.
3512
+ *
3513
+ * ## Usage Notes
3514
+ * - Requires a Coda OAuth connection (connectionId).
3515
+ * - Default limit is 10000 rows. Rows are fetched in pages of 500.
3516
+ * - When outputFormat is 'csv', the variable receives CSV text. The direct execution output always returns parsed rows.
3517
+ */
3518
+ codaGetTableRows(step: CodaGetTableRowsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CodaGetTableRowsStepOutput>>;
3519
+ /**
3520
+ * Convert PDF to Images
3521
+ *
3522
+ * Convert each page of a PDF document into a PNG image.
3523
+ *
3524
+ * ## Usage Notes
3525
+ * - Each page is converted to a separate PNG and re-hosted on the CDN.
3526
+ * - Returns an array of image URLs, one per page.
3527
+ */
3528
+ convertPdfToImages(step: ConvertPdfToImagesStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ConvertPdfToImagesStepOutput>>;
3529
+ /**
3530
+ * [Google Calendar] Create Event
3531
+ *
3532
+ * Create a new event on a Google Calendar.
3533
+ *
3534
+ * ## Usage Notes
3535
+ * - Requires a Google OAuth connection with Calendar events scope.
3536
+ * - Date/time values must be ISO 8601 format (e.g. "2025-07-02T10:00:00-07:00").
3537
+ * - Attendees are specified as one email address per line in a single string.
3538
+ * - Set addMeetLink to true to automatically attach a Google Meet video call.
3539
+ */
3540
+ createGoogleCalendarEvent(step: CreateGoogleCalendarEventStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CreateGoogleCalendarEventStepOutput>>;
3541
+ /**
3542
+ * [Google] Create Google Doc
3543
+ *
3544
+ * Create a new Google Document and optionally populate it with content.
3545
+ *
3546
+ * ## Usage Notes
3547
+ * - textType determines how the text field is interpreted: "plain" for plain text, "html" for HTML markup, "markdown" for Markdown.
3548
+ */
3549
+ createGoogleDoc(step: CreateGoogleDocStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CreateGoogleDocStepOutput>>;
3550
+ /**
3551
+ * [Google] Create Google Sheet
3552
+ *
3553
+ * Create a new Google Spreadsheet and populate it with CSV data.
3554
+ */
3555
+ createGoogleSheet(step: CreateGoogleSheetStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<CreateGoogleSheetStepOutput>>;
3556
+ /**
3557
+ * [Google Calendar] Get Event
3558
+ *
3559
+ * Retrieve a specific event from a Google Calendar by event ID.
3560
+ *
3561
+ * ## Usage Notes
3562
+ * - Requires a Google OAuth connection with Calendar events scope.
3563
+ * - The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns the structured event.
3564
+ */
3565
+ deleteGoogleCalendarEvent(step: DeleteGoogleCalendarEventStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<DeleteGoogleCalendarEventStepOutput>>;
3566
+ /**
3567
+ * Detect PII
3568
+ *
3569
+ * Scan text for personally identifiable information using Microsoft Presidio.
3570
+ *
3571
+ * ## Usage Notes
3572
+ * - In workflow mode, transitions to detectedStepId if PII is found, notDetectedStepId otherwise.
3573
+ * - In direct execution, returns the detection results without transitioning.
3574
+ * - If entities is empty, returns immediately with no detections.
3575
+ */
3576
+ detectPII(step: DetectPIIStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<DetectPIIStepOutput>>;
3577
+ /**
3578
+ * Download Video
3579
+ *
3580
+ * Download a video file
3581
+ *
3582
+ * ## Usage Notes
3583
+ * - Works with YouTube, TikTok, etc., by using ytdlp behind the scenes
3584
+ * - Can save as mp4 or mp3
3585
+ */
3586
+ downloadVideo(step: DownloadVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<DownloadVideoStepOutput>>;
3587
+ /**
3588
+ * Enhance Image Prompt
3589
+ *
3590
+ * Auto-enhance an image generation prompt using a language model. Optionally generates a negative prompt.
3591
+ *
3592
+ * ## Usage Notes
3593
+ * - Rewrites the user's prompt with added detail about style, lighting, colors, and composition.
3594
+ * - When includeNegativePrompt is true, a second model call generates a negative prompt.
3595
+ */
3596
+ enhanceImageGenerationPrompt(step: EnhanceImageGenerationPromptStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<EnhanceImageGenerationPromptStepOutput>>;
3597
+ /**
3598
+ * Enhance Video Prompt
3599
+ *
3600
+ * Auto-enhance a video generation prompt using a language model. Optionally generates a negative prompt.
3601
+ *
3602
+ * ## Usage Notes
3603
+ * - Rewrites the user's prompt with added detail about style, camera movement, lighting, and composition.
3604
+ * - When includeNegativePrompt is true, a second model call generates a negative prompt.
3605
+ */
3606
+ enhanceVideoGenerationPrompt(step: EnhanceVideoGenerationPromptStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<EnhanceVideoGenerationPromptStepOutput>>;
3607
+ /**
3608
+ * [Apollo] Enrich Person
3609
+ *
3610
+ * Look up professional information about a person using Apollo.io. Search by ID, name, LinkedIn URL, email, or domain.
3611
+ *
3612
+ * ## Usage Notes
3613
+ * - At least one search parameter must be provided.
3614
+ * - Returns enriched data from Apollo including contact details, employment info, and social profiles.
3615
+ */
3616
+ enrichPerson(step: EnrichPersonStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<EnrichPersonStepOutput>>;
3617
+ /**
3618
+ * Extract Audio from Video
3619
+ *
3620
+ * Extract audio MP3 from a video file
3621
+ *
3622
+ * ## Usage Notes
3623
+ *
3624
+ */
3625
+ extractAudioFromVideo(step: ExtractAudioFromVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ExtractAudioFromVideoStepOutput>>;
3626
+ /**
3627
+ * Extract Text from URL
3628
+ *
3629
+ * Download a file from a URL and extract its text content. Supports PDFs, plain text files, and other document formats.
3630
+ *
3631
+ * ## Usage Notes
3632
+ * - Best suited for PDFs and raw text/document files. For web pages, use the scrapeUrl step instead.
3633
+ * - Accepts a single URL, a comma-separated list of URLs, or a JSON array of URLs.
3634
+ * - Files are rehosted on the MindStudio CDN before extraction.
3635
+ * - Maximum file size is 50MB per URL.
3636
+ */
3637
+ extractText(step: ExtractTextStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ExtractTextStepOutput>>;
3638
+ /**
3639
+ * [Google] Fetch Google Doc
3640
+ *
3641
+ * Fetch the contents of an existing Google Document.
3642
+ *
3643
+ * ## Usage Notes
3644
+ * - exportType controls the output format: "html" for HTML markup, "markdown" for Markdown, "json" for structured JSON, "plain" for plain text.
3645
+ */
3646
+ fetchGoogleDoc(step: FetchGoogleDocStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<FetchGoogleDocStepOutput>>;
3647
+ /**
3648
+ * [Google] Fetch Google Sheet
3649
+ *
3650
+ * Fetch contents of a Google Spreadsheet range.
3651
+ *
3652
+ * ## Usage Notes
3653
+ * - range uses A1 notation (e.g. "Sheet1!A1:C10"). Omit to fetch the entire first sheet.
3654
+ * - exportType controls the output format: "csv" for comma-separated values, "json" for structured JSON.
3655
+ */
3656
+ fetchGoogleSheet(step: FetchGoogleSheetStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<FetchGoogleSheetStepOutput>>;
3657
+ /**
3658
+ * Fetch Slack Channel History
3659
+ *
3660
+ * Fetch recent message history from a Slack channel.
3661
+ *
3662
+ * ## Usage Notes
3663
+ * - The user is responsible for connecting their Slack workspace and selecting the channel
3664
+ */
3665
+ fetchSlackChannelHistory(step: FetchSlackChannelHistoryStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<FetchSlackChannelHistoryStepOutput>>;
3666
+ /**
3667
+ * [YouTube] Fetch Captions
3668
+ *
3669
+ * Retrieve the captions/transcript for a YouTube video.
3670
+ *
3671
+ * ## Usage Notes
3672
+ * - Supports multiple languages via the language parameter.
3673
+ * - "text" export produces timestamped plain text; "json" export produces structured transcript data.
3674
+ */
3675
+ fetchYoutubeCaptions(step: FetchYoutubeCaptionsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<FetchYoutubeCaptionsStepOutput>>;
3676
+ /**
3677
+ * [YouTube] Fetch Channel
3678
+ *
3679
+ * Retrieve metadata and recent videos for a YouTube channel.
3680
+ *
3681
+ * ## Usage Notes
3682
+ * - Accepts a YouTube channel URL (e.g. https://www.youtube.com/@ChannelName or /channel/ID).
3683
+ * - Returns channel info and video listings as a JSON object.
3684
+ */
3685
+ fetchYoutubeChannel(step: FetchYoutubeChannelStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<FetchYoutubeChannelStepOutput>>;
3686
+ /**
3687
+ * [YouTube] Fetch Comments
3688
+ *
3689
+ * Retrieve comments for a YouTube video.
3690
+ *
3691
+ * ## Usage Notes
3692
+ * - Paginates through comments (up to 5 pages).
3693
+ * - "text" export produces markdown-formatted text; "json" export produces structured comment data.
3694
+ */
3695
+ fetchYoutubeComments(step: FetchYoutubeCommentsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<FetchYoutubeCommentsStepOutput>>;
3696
+ /**
3697
+ * [YouTube] Fetch Video
3698
+ *
3699
+ * Retrieve metadata for a YouTube video (title, description, stats, channel info).
3700
+ *
3701
+ * ## Usage Notes
3702
+ * - Returns video metadata, channel info, and engagement stats.
3703
+ * - Video format data is excluded from the response.
3704
+ */
3705
+ fetchYoutubeVideo(step: FetchYoutubeVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<FetchYoutubeVideoStepOutput>>;
3706
+ /**
3707
+ * Generate Chart
3708
+ *
3709
+ * Create a chart image using QuickChart (Chart.js) and return the URL.
3710
+ *
3711
+ * ## Usage Notes
3712
+ * - The data field must be a Chart.js-compatible JSON object serialized as a string.
3713
+ * - Supported chart types: bar, line, pie.
3714
+ */
3715
+ generateChart(step: GenerateChartStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GenerateChartStepOutput>>;
3716
+ /**
3717
+ * Generate Image
3718
+ *
3719
+ * Generate an image from a text prompt using an AI model.
3720
+ *
3721
+ * ## Usage Notes
3722
+ * - Prompts should be descriptive but concise (roughly 3–6 sentences).
3723
+ * - Images are automatically hosted on a CDN.
3724
+ * - In foreground mode, the image is displayed to the user. In background mode, the URL is saved to a variable.
3725
+ * - When generateVariants is true with numVariants > 1, multiple images are generated in parallel.
3726
+ * - In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.
3727
+ */
3728
+ generateImage(step: GenerateImageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GenerateImageStepOutput>>;
3729
+ /**
3730
+ * Generate Lipsync
3731
+ *
3732
+ * Generate a lip sync video from provided audio and image.
3733
+ *
3734
+ * ## Usage Notes
3735
+ * - In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.
3736
+ */
3737
+ generateLipsync(step: GenerateLipsyncStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GenerateLipsyncStepOutput>>;
3738
+ /**
3739
+ * Generate Music
3740
+ *
3741
+ * Generate an audio file from provided instructions (text) using a music model.
3742
+ *
3743
+ * ## Usage Notes
3744
+ * - The text field contains the instructions (prompt) for the music generation.
3745
+ * - In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.
3746
+ */
3747
+ generateMusic(step: GenerateMusicStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GenerateMusicStepOutput>>;
3748
+ /**
3749
+ * Generate HTML Asset
3750
+ *
3751
+ * Generate an HTML asset and export it as a webpage, PDF, or image
3752
+ *
3753
+ * ## Usage Notes
3754
+ * - Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.
3755
+ * - The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.
3756
+ * - If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.
3757
+ * - Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate
3758
+ * - Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)
3759
+ */
3760
+ generatePdf(step: GeneratePdfStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GeneratePdfStepOutput>>;
3761
+ /**
3762
+ * Generate Static Video from Image
3763
+ *
3764
+ * Convert a static image to an MP4
3765
+ *
3766
+ * ## Usage Notes
3767
+ * - Can use to create slides/intertitles/slates for video composition
3768
+ */
3769
+ generateStaticVideoFromImage(step: GenerateStaticVideoFromImageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GenerateStaticVideoFromImageStepOutput>>;
3770
+ /**
3771
+ * Generate Video
3772
+ *
3773
+ * Generate a video from a text prompt using an AI model.
3774
+ *
3775
+ * ## Usage Notes
3776
+ * - Prompts should be descriptive but concise (roughly 3–6 sentences).
3777
+ * - Videos are automatically hosted on a CDN.
3778
+ * - In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.
3779
+ * - When generateVariants is true with numVariants > 1, multiple videos are generated in parallel.
3780
+ * - In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.
3781
+ */
3782
+ generateVideo(step: GenerateVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GenerateVideoStepOutput>>;
3783
+ /**
3784
+ * [Google Calendar] Get Event
3785
+ *
3786
+ * Retrieve a specific event from a Google Calendar by event ID.
3787
+ *
3788
+ * ## Usage Notes
3789
+ * - Requires a Google OAuth connection with Calendar events scope.
3790
+ * - The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns the structured event.
3791
+ */
3792
+ getGoogleCalendarEvent(step: GetGoogleCalendarEventStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GetGoogleCalendarEventStepOutput>>;
3793
+ /**
3794
+ * Get Media Metadata
3795
+ *
3796
+ * Get info about a media file
3797
+ *
3798
+ * ## Usage Notes
3799
+ *
3800
+ */
3801
+ getMediaMetadata(step: GetMediaMetadataStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<GetMediaMetadataStepOutput>>;
3802
+ /**
3803
+ * HTTP Request
3804
+ *
3805
+ * Make an HTTP request to an external endpoint and return the response.
3806
+ *
3807
+ * ## Usage Notes
3808
+ * - Supports GET, POST, PATCH, DELETE, and PUT methods.
3809
+ * - Body can be raw JSON/text, URL-encoded form data, or multipart form data.
3810
+ */
3811
+ httpRequest(step: HttpRequestStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HttpRequestStepOutput>>;
3812
+ /**
3813
+ * [HubSpot] Create/Update Company
3814
+ *
3815
+ * Create a new company or update an existing one in HubSpot. Matches by domain.
3816
+ *
3817
+ * ## Usage Notes
3818
+ * - Requires a HubSpot OAuth connection (connectionId).
3819
+ * - If a company with the given domain already exists, it is updated. Otherwise, a new one is created.
3820
+ * - Property values are type-checked against enabledProperties before being sent to HubSpot.
3821
+ */
3822
+ hubspotCreateCompany(step: HubspotCreateCompanyStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HubspotCreateCompanyStepOutput>>;
3823
+ /**
3824
+ * [HubSpot] Create/Update Contact
3825
+ *
3826
+ * Create a new contact or update an existing one in HubSpot. Matches by email address.
3827
+ *
3828
+ * ## Usage Notes
3829
+ * - Requires a HubSpot OAuth connection (connectionId).
3830
+ * - If a contact with the given email already exists, it is updated. Otherwise, a new one is created.
3831
+ * - If companyDomain is provided, the contact is associated with that company (creating the company if needed).
3832
+ * - Property values are type-checked against enabledProperties before being sent to HubSpot.
3833
+ */
3834
+ hubspotCreateContact(step: HubspotCreateContactStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HubspotCreateContactStepOutput>>;
3835
+ /**
3836
+ * [HubSpot] Get Company
3837
+ *
3838
+ * Look up a HubSpot company by domain name or company ID.
3839
+ *
3840
+ * ## Usage Notes
3841
+ * - Requires a HubSpot OAuth connection (connectionId).
3842
+ * - Returns null if the company is not found.
3843
+ * - When searching by domain, performs a search query then fetches the full company record.
3844
+ * - Use additionalProperties to request specific HubSpot properties beyond the defaults.
3845
+ */
3846
+ hubspotGetCompany(step: HubspotGetCompanyStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HubspotGetCompanyStepOutput>>;
3847
+ /**
3848
+ * [HubSpot] Get Contact
3849
+ *
3850
+ * Look up a HubSpot contact by email address or contact ID.
3851
+ *
3852
+ * ## Usage Notes
3853
+ * - Requires a HubSpot OAuth connection (connectionId).
3854
+ * - Returns null if the contact is not found.
3855
+ * - Use additionalProperties to request specific HubSpot properties beyond the defaults.
3856
+ */
3857
+ hubspotGetContact(step: HubspotGetContactStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HubspotGetContactStepOutput>>;
3858
+ /**
3859
+ * [Hunter.io] Enrich Company
3860
+ *
3861
+ * Look up company information by domain using Hunter.io.
3862
+ *
3863
+ * ## Usage Notes
3864
+ * - Returns company name, description, location, industry, size, technologies, and more.
3865
+ * - If the domain input is a full URL, the hostname is automatically extracted.
3866
+ * - Returns null if the company is not found.
3867
+ */
3868
+ hunterApiCompanyEnrichment(step: HunterApiCompanyEnrichmentStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HunterApiCompanyEnrichmentStepOutput>>;
3869
+ /**
3870
+ * [Hunter.io] Domain Search
3871
+ *
3872
+ * Search for email addresses associated with a domain using Hunter.io.
3873
+ *
3874
+ * ## Usage Notes
3875
+ * - If the domain input is a full URL, the hostname is automatically extracted.
3876
+ * - Returns a list of email addresses found for the domain along with organization info.
3877
+ */
3878
+ hunterApiDomainSearch(step: HunterApiDomainSearchStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HunterApiDomainSearchStepOutput>>;
3879
+ /**
3880
+ * [Hunter.io] Find Email
3881
+ *
3882
+ * Find an email address for a specific person at a domain using Hunter.io.
3883
+ *
3884
+ * ## Usage Notes
3885
+ * - Requires a first name, last name, and domain.
3886
+ * - If the domain input is a full URL, the hostname is automatically extracted.
3887
+ * - Returns the most likely email address with a confidence score.
3888
+ */
3889
+ hunterApiEmailFinder(step: HunterApiEmailFinderStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HunterApiEmailFinderStepOutput>>;
3890
+ /**
3891
+ * [Hunter.io] Verify Email
3892
+ *
3893
+ * Verify whether an email address is valid and deliverable using Hunter.io.
3894
+ *
3895
+ * ## Usage Notes
3896
+ * - Checks email format, MX records, SMTP server, and mailbox deliverability.
3897
+ * - Returns a status ("valid", "invalid", "accept_all", "webmail", "disposable", "unknown") and a score.
3898
+ */
3899
+ hunterApiEmailVerification(step: HunterApiEmailVerificationStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HunterApiEmailVerificationStepOutput>>;
3900
+ /**
3901
+ * [Hunter.io] Enrich Person
3902
+ *
3903
+ * Look up professional information about a person by their email address using Hunter.io.
3904
+ *
3905
+ * ## Usage Notes
3906
+ * - Returns name, job title, social profiles, and company information.
3907
+ * - If the person is not found, returns an object with an error message instead of throwing.
3908
+ */
3909
+ hunterApiPersonEnrichment(step: HunterApiPersonEnrichmentStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<HunterApiPersonEnrichmentStepOutput>>;
3910
+ /**
3911
+ * Image Face Swap
3912
+ *
3913
+ * Replace a face in an image with a face from another image using AI.
3914
+ *
3915
+ * ## Usage Notes
3916
+ * - Requires both a target image and a face source image.
3917
+ * - Output is re-hosted on the CDN as a PNG.
3918
+ */
3919
+ imageFaceSwap(step: ImageFaceSwapStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ImageFaceSwapStepOutput>>;
3920
+ /**
3921
+ * Remove Image Watermark
3922
+ *
3923
+ * Remove watermarks from an image using AI.
3924
+ *
3925
+ * ## Usage Notes
3926
+ * - Output is re-hosted on the CDN as a PNG.
3927
+ */
3928
+ imageRemoveWatermark(step: ImageRemoveWatermarkStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ImageRemoveWatermarkStepOutput>>;
3929
+ /**
3930
+ * Insert Video Clips
3931
+ *
3932
+ * Insert b-roll clips into a base video at a timecode, optionally with an xfade transition.
3933
+ *
3934
+ * ## Usage Notes
3935
+ *
3936
+ */
3937
+ insertVideoClips(step: InsertVideoClipsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<InsertVideoClipsStepOutput>>;
3938
+ /**
3939
+ * [Google Calendar] List Events
3940
+ *
3941
+ * List upcoming events from a Google Calendar, ordered by start time.
3942
+ *
3943
+ * ## Usage Notes
3944
+ * - Requires a Google OAuth connection with Calendar events scope.
3945
+ * - Only returns future events (timeMin = now).
3946
+ * - The variable receives JSON or XML-like text depending on exportType. The direct execution output always returns structured events.
3947
+ */
3948
+ listGoogleCalendarEvents(step: ListGoogleCalendarEventsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ListGoogleCalendarEventsStepOutput>>;
3949
+ /**
3950
+ * Evaluate Logic
3951
+ *
3952
+ * Use an AI model to evaluate which condition from a list is most true, given a context prompt.
3953
+ *
3954
+ * ## Usage Notes
3955
+ * - This is "fuzzy" logic evaluated by an AI model, not computational logic. The model picks the most accurate statement.
3956
+ * - All possible cases must be specified — there is no default/fallback case.
3957
+ * - Requires at least two cases.
3958
+ * - In workflow mode, transitions to the destinationStepId of the winning case. In direct execution, returns the winning case ID and condition.
3959
+ */
3960
+ logic(step: LogicStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<LogicStepOutput>>;
3961
+ /**
3962
+ * Make.com Run Scenario
3963
+ *
3964
+ * Trigger a Make.com (formerly Integromat) scenario via webhook and return the response.
3965
+ *
3966
+ * ## Usage Notes
3967
+ * - The webhook URL must be configured in your Make.com scenario.
3968
+ * - Input key-value pairs are sent as JSON in the POST body.
3969
+ * - Response format depends on the Make.com scenario configuration.
3970
+ */
3971
+ makeDotComRunScenario(step: MakeDotComRunScenarioStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<MakeDotComRunScenarioStepOutput>>;
3972
+ /**
3973
+ * Merge Audio
3974
+ *
3975
+ * Merge one or more clips into a single audio file.
3976
+ *
3977
+ * ## Usage Notes
3978
+ *
3979
+ */
3980
+ mergeAudio(step: MergeAudioStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<MergeAudioStepOutput>>;
3981
+ /**
3982
+ * Merge Videos
3983
+ *
3984
+ * Merge one or more clips into a single video.
3985
+ *
3986
+ * ## Usage Notes
3987
+ *
3988
+ */
3989
+ mergeVideos(step: MergeVideosStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<MergeVideosStepOutput>>;
3990
+ /**
3991
+ * Mix Audio into Video
3992
+ *
3993
+ * Mix an audio track into a video
3994
+ *
3995
+ * ## Usage Notes
3996
+ *
3997
+ */
3998
+ mixAudioIntoVideo(step: MixAudioIntoVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<MixAudioIntoVideoStepOutput>>;
3999
+ /**
4000
+ * Mute Video
4001
+ *
4002
+ * Mute a video file
4003
+ *
4004
+ * ## Usage Notes
4005
+ *
4006
+ */
4007
+ muteVideo(step: MuteVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<MuteVideoStepOutput>>;
4008
+ /**
4009
+ * N8N Run Node
4010
+ *
4011
+ * Trigger an n8n workflow node via webhook and return the response.
4012
+ *
4013
+ * ## Usage Notes
4014
+ * - The webhook URL must be configured in your n8n workflow.
4015
+ * - Supports GET and POST methods with optional Basic authentication.
4016
+ * - For GET requests, input values are sent as query parameters. For POST, they are sent as JSON body.
4017
+ */
4018
+ n8nRunNode(step: N8nRunNodeStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<N8nRunNodeStepOutput>>;
4019
+ /**
4020
+ * [Notion] Create Page
4021
+ *
4022
+ * Create a new page in Notion as a child of an existing page.
4023
+ *
4024
+ * ## Usage Notes
4025
+ * - Requires a Notion OAuth connection (connectionId).
4026
+ * - Content is provided as markdown and converted to Notion blocks (headings, paragraphs, lists, code, quotes).
4027
+ * - The page is created as a child of the specified parent page (pageId).
4028
+ */
4029
+ notionCreatePage(step: NotionCreatePageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<NotionCreatePageStepOutput>>;
4030
+ /**
4031
+ * [Notion] Update Page
4032
+ *
4033
+ * Update the content of an existing Notion page.
4034
+ *
4035
+ * ## Usage Notes
4036
+ * - Requires a Notion OAuth connection (connectionId).
4037
+ * - Content is provided as markdown and converted to Notion blocks.
4038
+ * - "append" mode adds content to the end of the page. "overwrite" mode deletes all existing blocks first.
4039
+ */
4040
+ notionUpdatePage(step: NotionUpdatePageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<NotionUpdatePageStepOutput>>;
4041
+ /**
4042
+ * [Apollo] People Search
4043
+ *
4044
+ * Search for people matching specific criteria using Apollo.io. Supports natural language queries and advanced filters.
4045
+ *
4046
+ * ## Usage Notes
4047
+ * - Can use a natural language "smartQuery" which is converted to Apollo search parameters by an AI model.
4048
+ * - Advanced params can override or supplement the smart query results.
4049
+ * - Optionally enriches returned people and/or their organizations for additional detail.
4050
+ * - Results are paginated. Use limit and page to control the result window.
4051
+ */
4052
+ peopleSearch(step: PeopleSearchStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<PeopleSearchStepOutput>>;
4053
+ /**
4054
+ * [LinkedIn] Create post
4055
+ *
4056
+ * Create a post on LinkedIn from the connected account.
4057
+ *
4058
+ * ## Usage Notes
4059
+ * - Requires a LinkedIn OAuth connection (connectionId).
4060
+ * - Supports text posts, image posts, and video posts.
4061
+ * - Visibility controls who can see the post.
4062
+ */
4063
+ postToLinkedIn(step: PostToLinkedInStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<PostToLinkedInStepOutput>>;
4064
+ /**
4065
+ * Post to Slack Channel
4066
+ *
4067
+ * Send a message to a Slack channel via a connected bot.
4068
+ *
4069
+ * ## Usage Notes
4070
+ * - The user is responsible for connecting their Slack workspace and selecting the channel
4071
+ * - Supports both simple text messages and slack blocks messages
4072
+ * - Text messages can use limited markdown (slack-only fomatting—e.g., headers are just rendered as bold)
4073
+ */
4074
+ postToSlackChannel(step: PostToSlackChannelStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<PostToSlackChannelStepOutput>>;
4075
+ /**
4076
+ * [X] Create post
4077
+ *
4078
+ * Create a post on X (Twitter) from the connected account.
4079
+ *
4080
+ * ## Usage Notes
4081
+ * - Requires an X OAuth connection (connectionId).
4082
+ * - Posts are plain text. Maximum 280 characters.
4083
+ */
4084
+ postToX(step: PostToXStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<PostToXStepOutput>>;
4085
+ /**
4086
+ * Post to Zapier
4087
+ *
4088
+ * Send data to a Zapier Zap via webhook and return the response.
4089
+ *
4090
+ * ## Usage Notes
4091
+ * - The webhook URL must be configured in the Zapier Zap settings
4092
+ * - Input keys and values are sent as the JSON body of the POST request
4093
+ * - The webhook response (JSON or plain text) is returned as the output
4094
+ */
4095
+ postToZapier(step: PostToZapierStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<PostToZapierStepOutput>>;
4096
+ /**
4097
+ * Query Data Source
4098
+ *
4099
+ * Search a vector data source (RAG) and return relevant document chunks.
4100
+ *
4101
+ * ## Usage Notes
4102
+ * - Queries a vectorized data source and returns the most relevant chunks.
4103
+ * - Useful for retrieval-augmented generation (RAG) workflows.
4104
+ */
4105
+ queryDataSource(step: QueryDataSourceStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<QueryDataSourceStepOutput>>;
4106
+ /**
4107
+ * Query External SQL Database
4108
+ *
4109
+ * Execute a SQL query against an external database connected to the workspace.
4110
+ *
4111
+ * ## Usage Notes
4112
+ * - Requires a database connection configured in the workspace.
4113
+ * - Supports PostgreSQL (including Supabase), MySQL, and MSSQL.
4114
+ * - Results can be returned as JSON or CSV.
4115
+ */
4116
+ queryExternalDatabase(step: QueryExternalDatabaseStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<QueryExternalDatabaseStepOutput>>;
4117
+ /**
4118
+ * Redact PII
4119
+ *
4120
+ * Replace personally identifiable information in text with placeholders using Microsoft Presidio.
4121
+ *
4122
+ * ## Usage Notes
4123
+ * - PII is replaced with entity type placeholders (e.g. "Call me at <PHONE_NUMBER>").
4124
+ * - If entities is empty, returns empty text immediately without processing.
4125
+ */
4126
+ redactPII(step: RedactPIIStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<RedactPIIStepOutput>>;
4127
+ /**
4128
+ * Remove Background From Image
4129
+ *
4130
+ * Remove the background from an image using AI, producing a transparent PNG.
4131
+ *
4132
+ * ## Usage Notes
4133
+ * - Uses the Bria background removal model via fal.ai.
4134
+ * - Output is re-hosted on the CDN as a PNG with transparency.
4135
+ */
4136
+ removeBackgroundFromImage(step: RemoveBackgroundFromImageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<RemoveBackgroundFromImageStepOutput>>;
4137
+ /**
4138
+ * Resize Video
4139
+ *
4140
+ * Resize a video file
4141
+ *
4142
+ * ## Usage Notes
4143
+ *
4144
+ */
4145
+ resizeVideo(step: ResizeVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ResizeVideoStepOutput>>;
4146
+ /**
4147
+ * Run Packaged Workflow
4148
+ *
4149
+ * Run a packaged workflow ("custom block")
4150
+ *
4151
+ * ## Usage Notes
4152
+ * - From the user's perspective, packaged workflows are just ordinary blocks. Behind the scenes, they operate like packages/libraries in a programming language, letting the user execute custom functionality.
4153
+ * - Some of these packaged workflows are available as part of MindStudio's "Standard Library" and available to every user.
4154
+ * - Available packaged workflows are documented here as individual blocks, but the runPackagedWorkflow block is how they need to be wrapped in order to be executed correctly.
4155
+ */
4156
+ runPackagedWorkflow(step: RunPackagedWorkflowStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<RunPackagedWorkflowStepOutput>>;
4157
+ /**
4158
+ * Run Workflow
4159
+ *
4160
+ * Spawn one or more child workflows (advanced)
4161
+ *
4162
+ * ## Usage Notes
4163
+ * - Spawned workflows are executed in their own contexts and scopes, they do not have access to any variables etc from the parent workflow
4164
+ * - This is an ADVANCED feature and should not be used unless you really know what you are doing
4165
+ * - Child workflows must be set up to receive launch variables and return variables via an end block. The input and output variables properties map variables between scopes
4166
+ */
4167
+ runWorkflow(step: RunWorkflowStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<RunWorkflowStepOutput>>;
4168
+ /**
4169
+ * [Facebook] Scrape Page
4170
+ *
4171
+ * Scrape a Facebook page
4172
+ */
4173
+ scrapeFacebookPage(step: ScrapeFacebookPageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeFacebookPageStepOutput>>;
4174
+ /**
4175
+ * [Facebook] Scrape Posts for Page
4176
+ *
4177
+ * Get all the posts for a Facebook page
4178
+ */
4179
+ scrapeFacebookPosts(step: ScrapeFacebookPostsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeFacebookPostsStepOutput>>;
4180
+ /**
4181
+ * [Instagram] Scrape Comments
4182
+ *
4183
+ * Get all the comments for an Instagram post
4184
+ */
4185
+ scrapeInstagramComments(step: ScrapeInstagramCommentsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeInstagramCommentsStepOutput>>;
4186
+ /**
4187
+ * [Instagram] Scrape Mentions
4188
+ *
4189
+ * Scrape an Instagram profile's mentions
4190
+ */
4191
+ scrapeInstagramMentions(step: ScrapeInstagramMentionsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeInstagramMentionsStepOutput>>;
4192
+ /**
4193
+ * [Instagram] Scrape Posts
4194
+ *
4195
+ * Get all the posts for an Instagram profile
4196
+ */
4197
+ scrapeInstagramPosts(step: ScrapeInstagramPostsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeInstagramPostsStepOutput>>;
4198
+ /**
4199
+ * [Instagram] Scrape Profile
4200
+ *
4201
+ * Scrape an Instagram profile
4202
+ */
4203
+ scrapeInstagramProfile(step: ScrapeInstagramProfileStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeInstagramProfileStepOutput>>;
4204
+ /**
4205
+ * [Instagram] Scrape Reels
4206
+ *
4207
+ * Get all the reels for an Instagram profile
4208
+ */
4209
+ scrapeInstagramReels(step: ScrapeInstagramReelsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeInstagramReelsStepOutput>>;
4210
+ /**
4211
+ * Scrape LinkedIn Company
4212
+ *
4213
+ * Scrape public company data from a LinkedIn company page.
4214
+ *
4215
+ * ## Usage Notes
4216
+ * - Requires a LinkedIn company URL (e.g. https://www.linkedin.com/company/mindstudioai).
4217
+ * - Returns structured company data including description, employees, updates, and similar companies.
4218
+ */
4219
+ scrapeLinkedInCompany(step: ScrapeLinkedInCompanyStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeLinkedInCompanyStepOutput>>;
4220
+ /**
4221
+ * Scrape LinkedIn Profile
4222
+ *
4223
+ * Scrape public profile data from a LinkedIn profile page.
4224
+ *
4225
+ * ## Usage Notes
4226
+ * - Requires a LinkedIn profile URL (e.g. https://www.linkedin.com/in/username).
4227
+ * - Returns structured profile data including experience, education, articles, and activities.
4228
+ */
4229
+ scrapeLinkedInProfile(step: ScrapeLinkedInProfileStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeLinkedInProfileStepOutput>>;
4230
+ /**
4231
+ * [Meta Threads] Scrape Profile
4232
+ *
4233
+ * Scrape a Meta Threads profile
4234
+ */
4235
+ scrapeMetaThreadsProfile(step: ScrapeMetaThreadsProfileStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeMetaThreadsProfileStepOutput>>;
4236
+ /**
4237
+ * Scrape URL
4238
+ *
4239
+ * Extract text, HTML, or structured content from one or more web pages.
4240
+ *
4241
+ * ## Usage Notes
4242
+ * - Accepts a single URL or multiple URLs (as a JSON array, comma-separated, or newline-separated).
4243
+ * - Output format controls the result shape: "text" returns markdown, "html" returns raw HTML, "json" returns structured scraper data.
4244
+ * - Can optionally capture a screenshot of each page.
4245
+ */
4246
+ scrapeUrl(step: ScrapeUrlStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeUrlStepOutput>>;
4247
+ /**
4248
+ * Scrape X Post
4249
+ *
4250
+ * Scrape data from a single X (Twitter) post by URL.
4251
+ *
4252
+ * ## Usage Notes
4253
+ * - Returns structured post data (text, html, optional json/screenshot/metadata).
4254
+ * - Optionally saves the text content to a variable.
4255
+ */
4256
+ scrapeXPost(step: ScrapeXPostStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeXPostStepOutput>>;
4257
+ /**
4258
+ * Scrape X Profile
4259
+ *
4260
+ * Scrape public profile data from an X (Twitter) account by URL.
4261
+ *
4262
+ * ## Usage Notes
4263
+ * - Returns structured profile data.
4264
+ * - Optionally saves the result to a variable.
4265
+ */
4266
+ scrapeXProfile(step: ScrapeXProfileStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<ScrapeXProfileStepOutput>>;
4267
+ /**
4268
+ * Search Google
4269
+ *
4270
+ * Search the web using Google and return structured results.
4271
+ *
4272
+ * ## Usage Notes
4273
+ * - Defaults to us/english, but can optionally specify country and/or language.
4274
+ * - Defaults to any time, but can optionally specify last hour, last day, week, month, or year.
4275
+ * - Defaults to top 30 results, but can specify 1 to 100 results to return.
4276
+ */
4277
+ searchGoogle(step: SearchGoogleStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SearchGoogleStepOutput>>;
4278
+ /**
4279
+ * Search Google Images
4280
+ *
4281
+ * Search Google Images and return image results with URLs and metadata.
4282
+ *
4283
+ * ## Usage Notes
4284
+ * - Defaults to us/english, but can optionally specify country and/or language.
4285
+ * - Defaults to any time, but can optionally specify last hour, last day, week, month, or year.
4286
+ * - Defaults to top 30 results, but can specify 1 to 100 results to return.
4287
+ */
4288
+ searchGoogleImages(step: SearchGoogleImagesStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SearchGoogleImagesStepOutput>>;
4289
+ /**
4290
+ * Search Google News
4291
+ *
4292
+ * Search Google News for recent news articles matching a query.
4293
+ *
4294
+ * ## Usage Notes
4295
+ * - Defaults to top 30 results, but can specify 1 to 100 results to return.
4296
+ */
4297
+ searchGoogleNews(step: SearchGoogleNewsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SearchGoogleNewsStepOutput>>;
4298
+ /**
4299
+ * Search Google Trends
4300
+ *
4301
+ * Fetch Google Trends data for a search term.
4302
+ *
4303
+ * ## Usage Notes
4304
+ * - date accepts shorthand ("now 1-H", "today 1-m", "today 5-y", etc.) or custom "yyyy-mm-dd yyyy-mm-dd" ranges.
4305
+ * - data_type controls the shape of returned data: TIMESERIES, GEO_MAP, GEO_MAP_0, RELATED_TOPICS, or RELATED_QUERIES.
4306
+ */
4307
+ searchGoogleTrends(step: SearchGoogleTrendsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SearchGoogleTrendsStepOutput>>;
4308
+ /**
4309
+ * Search Perplexity
4310
+ *
4311
+ * Search the web using the Perplexity API and return structured results.
4312
+ *
4313
+ * ## Usage Notes
4314
+ * - Defaults to US results. Use countryCode (ISO code) to filter by country.
4315
+ * - Returns 10 results by default, configurable from 1 to 20.
4316
+ * - The variable receives text or JSON depending on exportType. The direct execution output always returns structured results.
4317
+ */
4318
+ searchPerplexity(step: SearchPerplexityStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SearchPerplexityStepOutput>>;
4319
+ /**
4320
+ * [X] Search Posts
4321
+ *
4322
+ * Search recent X (Twitter) posts matching a query.
4323
+ *
4324
+ * ## Usage Notes
4325
+ * - Searches only the past 7 days of posts.
4326
+ * - Query supports X API v2 search operators (up to 512 characters).
4327
+ *
4328
+ * Available search operators in query:
4329
+ *
4330
+ * | Operator | Description |
4331
+ * | -----------------| -------------------------------------------------|
4332
+ * | from: | Posts from a specific user (e.g., from:elonmusk) |
4333
+ * | to: | Posts sent to a specific user (e.g., to:NASA) |
4334
+ * | @ | Mentions a user (e.g., @openai) |
4335
+ * | # | Hashtag search (e.g., #AI) |
4336
+ * | is:retweet | Filters retweets |
4337
+ * | is:reply | Filters replies |
4338
+ * | has:media | Posts containing media (images, videos, or GIFs) |
4339
+ * | has:links | Posts containing URLs |
4340
+ * | lang: | Filters by language (e.g., lang:en) |
4341
+ * | - | Excludes specific terms (e.g., -spam) |
4342
+ * | () | Groups terms or operators (e.g., (AI OR ML)) |
4343
+ * | AND, OR, NOT | Boolean logic for combining or excluding terms |
4344
+ *
4345
+ * Conjunction-Required Operators (must be combined with a standalone operator):
4346
+ *
4347
+ * | Operator | Description |
4348
+ * | ------------ | -----------------------------------------------|
4349
+ * | has:media | Posts containing media (images, videos, or GIFs) |
4350
+ * | has:links | Posts containing URLs |
4351
+ * | is:retweet | Filters retweets |
4352
+ * | is:reply | Filters replies |
4353
+ *
4354
+ * For example, has:media alone is invalid, but #AI has:media is valid.
4355
+ */
4356
+ searchXPosts(step: SearchXPostsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SearchXPostsStepOutput>>;
4357
+ /**
4358
+ * [YouTube] Search Videos
4359
+ *
4360
+ * Search for YouTube videos by keyword.
4361
+ *
4362
+ * ## Usage Notes
4363
+ * - Supports pagination (up to 5 pages) and country/language filters.
4364
+ * - Use the filter/filterType fields for YouTube search parameter (sp) filters.
4365
+ */
4366
+ searchYoutube(step: SearchYoutubeStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SearchYoutubeStepOutput>>;
4367
+ /**
4368
+ * [YouTube] Search Trends
4369
+ *
4370
+ * Retrieve trending videos on YouTube by category and region.
4371
+ *
4372
+ * ## Usage Notes
4373
+ * - Categories: "now" (trending now), "music", "gaming", "films".
4374
+ * - Supports country and language filtering.
4375
+ */
4376
+ searchYoutubeTrends(step: SearchYoutubeTrendsStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SearchYoutubeTrendsStepOutput>>;
4377
+ /**
4378
+ * Send Email
4379
+ *
4380
+ * Send an email to one or more configured recipient addresses.
4381
+ *
4382
+ * ## Usage Notes
4383
+ * - Recipient email addresses are resolved from OAuth connections configured by the app creator. The user running the workflow does not specify the recipient directly.
4384
+ * - If the body is a URL to a hosted HTML file on the CDN, the HTML is fetched and used as the email body.
4385
+ * - When generateHtml is enabled, the body text is converted to a styled HTML email using an AI model.
4386
+ * - connectionId can be a comma-separated list to send to multiple recipients.
4387
+ * - The special connectionId "trigger_email" uses the email address that triggered the workflow.
4388
+ */
4389
+ sendEmail(step: SendEmailStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SendEmailStepOutput>>;
4390
+ /**
4391
+ * Send SMS
4392
+ *
4393
+ * Send an SMS text message to a phone number configured via OAuth connection.
4394
+ *
4395
+ * ## Usage Notes
4396
+ * - User is responsible for configuring the connection to the number (MindStudio requires double opt-in to prevent spam)
4397
+ */
4398
+ sendSMS(step: SendSMSStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SendSMSStepOutput>>;
4399
+ /**
4400
+ * Set Run Title
4401
+ *
4402
+ * Set the title of the agent run for the user's history
4403
+ */
4404
+ setRunTitle(step: SetRunTitleStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SetRunTitleStepOutput>>;
4405
+ /**
4406
+ * Set Variable
4407
+ *
4408
+ * Explicitly set a variable to a given value.
4409
+ *
4410
+ * ## Usage Notes
4411
+ * - Useful for bootstrapping global variables or setting constants.
4412
+ * - The variable name and value both support variable interpolation.
4413
+ * - The type field is a UI hint only (controls input widget in the editor).
4414
+ */
4415
+ setVariable(step: SetVariableStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<SetVariableStepOutput>>;
4416
+ /**
4417
+ * Send Telegram Audio
4418
+ *
4419
+ * Send an audio file to a Telegram chat as music or a voice note via a bot.
4420
+ *
4421
+ * ## Usage Notes
4422
+ * - "audio" mode sends as a standard audio file. "voice" mode sends as a voice message (re-uploads the file for large file support).
4423
+ */
4424
+ telegramSendAudio(step: TelegramSendAudioStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TelegramSendAudioStepOutput>>;
4425
+ /**
4426
+ * Send Telegram File
4427
+ *
4428
+ * Send a document/file to a Telegram chat via a bot.
4429
+ */
4430
+ telegramSendFile(step: TelegramSendFileStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TelegramSendFileStepOutput>>;
4431
+ /**
4432
+ * Send Telegram Image
4433
+ *
4434
+ * Send an image to a Telegram chat via a bot.
4435
+ */
4436
+ telegramSendImage(step: TelegramSendImageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TelegramSendImageStepOutput>>;
4437
+ /**
4438
+ * Send Telegram Message
4439
+ *
4440
+ * Send a text message to a Telegram chat via a bot.
4441
+ *
4442
+ * ## Usage Notes
4443
+ * - Messages are sent using MarkdownV2 formatting. Special characters are auto-escaped.
4444
+ * - botToken format is "botId:token" — both parts are required.
4445
+ */
4446
+ telegramSendMessage(step: TelegramSendMessageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TelegramSendMessageStepOutput>>;
4447
+ /**
4448
+ * Send Telegram Video
4449
+ *
4450
+ * Send a video to a Telegram chat via a bot.
4451
+ */
4452
+ telegramSendVideo(step: TelegramSendVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TelegramSendVideoStepOutput>>;
4453
+ /**
4454
+ * Telegram Set Typing
4455
+ *
4456
+ * Show the "typing..." indicator in a Telegram chat via a bot.
4457
+ *
4458
+ * ## Usage Notes
4459
+ * - The typing indicator automatically expires after a few seconds. Use this right before sending a message for a natural feel.
4460
+ */
4461
+ telegramSetTyping(step: TelegramSetTypingStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TelegramSetTypingStepOutput>>;
4462
+ /**
4463
+ * Text to Speech
4464
+ *
4465
+ * Generate an audio file from provided text using a speech model.
4466
+ *
4467
+ * ## Usage Notes
4468
+ * - The text field contains the exact words to be spoken (not instructions).
4469
+ * - In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.
4470
+ */
4471
+ textToSpeech(step: TextToSpeechStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TextToSpeechStepOutput>>;
4472
+ /**
4473
+ * Transcribe Audio
4474
+ *
4475
+ * Convert an audio file to text using a transcription model.
4476
+ *
4477
+ * ## Usage Notes
4478
+ * - The prompt field provides optional context to improve transcription accuracy (e.g. language, speaker names, domain).
4479
+ */
4480
+ transcribeAudio(step: TranscribeAudioStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TranscribeAudioStepOutput>>;
4481
+ /**
4482
+ * Trim Media
4483
+ *
4484
+ * Trim an audio or video clip
4485
+ *
4486
+ * ## Usage Notes
4487
+ *
4488
+ */
4489
+ trimMedia(step: TrimMediaStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<TrimMediaStepOutput>>;
4490
+ /**
4491
+ * [Google Calendar] Update Event
4492
+ *
4493
+ * Update an existing event on a Google Calendar. Only specified fields are changed.
4494
+ *
4495
+ * ## Usage Notes
4496
+ * - Requires a Google OAuth connection with Calendar events scope.
4497
+ * - Fetches the existing event first, then applies only the provided updates. Omitted fields are left unchanged.
4498
+ * - Attendees are specified as one email address per line, and replace the entire attendee list.
4499
+ */
4500
+ updateGoogleCalendarEvent(step: UpdateGoogleCalendarEventStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<UpdateGoogleCalendarEventStepOutput>>;
4501
+ /**
4502
+ * [Google] Update Google Doc
4503
+ *
4504
+ * Update the contents of an existing Google Document.
4505
+ *
4506
+ * ## Usage Notes
4507
+ * - operationType controls how content is applied: "addToTop" prepends, "addToBottom" appends, "overwrite" replaces all content.
4508
+ * - textType determines how the text field is interpreted: "plain" for plain text, "html" for HTML markup, "markdown" for Markdown.
4509
+ */
4510
+ updateGoogleDoc(step: UpdateGoogleDocStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<UpdateGoogleDocStepOutput>>;
4511
+ /**
4512
+ * [Google] Update Google Sheet
4513
+ *
4514
+ * Update a Google Spreadsheet with new data.
4515
+ *
4516
+ * ## Usage Notes
4517
+ * - operationType controls how data is written: "addToBottom" appends rows, "overwrite" replaces all data, "range" writes to a specific cell range.
4518
+ * - Data should be provided as CSV in the text field.
4519
+ */
4520
+ updateGoogleSheet(step: UpdateGoogleSheetStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<UpdateGoogleSheetStepOutput>>;
4521
+ /**
4522
+ * Upscale Image
4523
+ *
4524
+ * Increase the resolution of an image using AI upscaling.
4525
+ *
4526
+ * ## Usage Notes
4527
+ * - Output is re-hosted on the CDN as a PNG.
4528
+ */
4529
+ upscaleImage(step: UpscaleImageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<UpscaleImageStepOutput>>;
4530
+ /**
4531
+ * Upscale Video
4532
+ *
4533
+ * Upscale a video file
4534
+ *
4535
+ * ## Usage Notes
4536
+ *
4537
+ */
4538
+ upscaleVideo(step: UpscaleVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<UpscaleVideoStepOutput>>;
4539
+ /**
4540
+ * User Message
4541
+ *
4542
+ * Send a message to an AI model and return the response, or echo a system message.
4543
+ *
4544
+ * ## Usage Notes
4545
+ * - Source "user" sends the message to an LLM and returns the model's response.
4546
+ * - Source "system" echoes the message content directly (no AI call).
4547
+ * - Mode "background" saves the result to a variable. Mode "foreground" streams it to the user (not available in direct execution).
4548
+ * - Structured output (JSON/CSV) can be enforced via structuredOutputType and structuredOutputExample.
4549
+ */
4550
+ userMessage(step: UserMessageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<UserMessageStepOutput>>;
4551
+ /**
4552
+ * Video Face Swap
4553
+ *
4554
+ * Swap faces in a video file
4555
+ *
4556
+ * ## Usage Notes
4557
+ *
4558
+ */
4559
+ videoFaceSwap(step: VideoFaceSwapStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<VideoFaceSwapStepOutput>>;
4560
+ /**
4561
+ * Remove Video Background
4562
+ *
4563
+ * Remove or replace background from a video
4564
+ *
4565
+ * ## Usage Notes
4566
+ *
4567
+ */
4568
+ videoRemoveBackground(step: VideoRemoveBackgroundStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<VideoRemoveBackgroundStepOutput>>;
4569
+ /**
4570
+ * Remove Video Watermark
4571
+ *
4572
+ * Remove a watermark from a video
4573
+ *
4574
+ * ## Usage Notes
4575
+ *
4576
+ */
4577
+ videoRemoveWatermark(step: VideoRemoveWatermarkStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<VideoRemoveWatermarkStepOutput>>;
4578
+ /**
4579
+ * Watermark Image
4580
+ *
4581
+ * Overlay a watermark image onto another image.
4582
+ *
4583
+ * ## Usage Notes
4584
+ * - The watermark is placed at the specified corner with configurable padding and width.
4585
+ */
4586
+ watermarkImage(step: WatermarkImageStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<WatermarkImageStepOutput>>;
4587
+ /**
4588
+ * Watermark Video
4589
+ *
4590
+ * Add an image watermark to a video
4591
+ *
4592
+ * ## Usage Notes
4593
+ *
4594
+ */
4595
+ watermarkVideo(step: WatermarkVideoStepInput, options?: StepExecutionOptions): Promise<StepExecutionResult<WatermarkVideoStepOutput>>;
4596
+ }
4597
+ }
4598
+ /** @internal Attaches typed step methods to the MindStudioAgent prototype. */
4599
+ declare function applyStepMethods(AgentClass: new (...args: any[]) => any): void;
4600
+
4601
+ /** An AI model available on MindStudio. */
4602
+ interface MindStudioModel {
4603
+ id?: string;
4604
+ /** Display name of the model. */
4605
+ name?: string;
4606
+ /** Full model identifier from the provider. */
4607
+ rawName?: string;
4608
+ /** One of: `llm_chat`, `image_generation`, `video_generation`, `video_analysis`, `text_to_speech`, `vision`, `transcription`. */
4609
+ type?: "llm_chat" | "image_generation" | "video_generation" | "video_analysis" | "text_to_speech" | "vision" | "transcription";
4610
+ publisher?: string;
4611
+ maxTemperature?: number;
4612
+ maxResponseSize?: number;
4613
+ /** Accepted input types for this model (text, imageUrl, videoUrl, etc.). */
4614
+ inputs?: Record<string, unknown>[];
4615
+ contextWindow?: number;
4616
+ tags?: string;
4617
+ }
4618
+ /** Supported model type categories for filtering. */
4619
+ type ModelType = "llm_chat" | "image_generation" | "video_generation" | "video_analysis" | "text_to_speech" | "vision" | "transcription";
4620
+ declare module "../client.js" {
4621
+ interface MindStudioAgent {
4622
+ /**
4623
+ * List all available AI models.
4624
+ *
4625
+ * Returns models across all categories (chat, image generation, video, etc.).
4626
+ * Use `listModelsByType()` to filter by category.
4627
+ */
4628
+ listModels(): Promise<{
4629
+ models: MindStudioModel[];
4630
+ }>;
4631
+ /**
4632
+ * List AI models filtered by type.
4633
+ *
4634
+ * @param modelType - The category to filter by (e.g. "llm_chat", "image_generation").
4635
+ */
4636
+ listModelsByType(modelType: ModelType): Promise<{
4637
+ models: MindStudioModel[];
4638
+ }>;
4639
+ /**
4640
+ * List all available connector services (Slack, Google, HubSpot, etc.).
4641
+ */
4642
+ listConnectors(): Promise<{
4643
+ services: Array<{
4644
+ service: Record<string, unknown>;
4645
+ actions: Record<string, unknown>[];
4646
+ }>;
4647
+ }>;
4648
+ /**
4649
+ * Get details for a single connector service.
4650
+ *
4651
+ * @param serviceId - The connector service ID.
4652
+ */
4653
+ getConnector(serviceId: string): Promise<{
4654
+ service: Record<string, unknown>;
4655
+ }>;
4656
+ }
4657
+ }
4658
+ /** @internal Attaches helper methods to the MindStudioAgent prototype. */
4659
+ declare function applyHelperMethods(AgentClass: new (...args: any[]) => any): void;
4660
+
4661
+ /**
4662
+ * Client for the MindStudio direct step execution API.
4663
+ *
4664
+ * Create an instance and call typed step methods directly:
4665
+ *
4666
+ * ```ts
4667
+ * const agent = new MindStudioAgent({ apiKey: "your-key" });
4668
+ * const result = await agent.generateImage({ prompt: "a sunset", mode: "background" });
4669
+ * console.log(result.output.imageUrl);
4670
+ * ```
4671
+ *
4672
+ * Authentication is resolved in order:
4673
+ * 1. `apiKey` passed to the constructor
4674
+ * 2. `MINDSTUDIO_API_KEY` environment variable
4675
+ * 3. `CALLBACK_TOKEN` environment variable (auto-set inside MindStudio custom functions)
4676
+ *
4677
+ * Base URL is resolved in order:
4678
+ * 1. `baseUrl` passed to the constructor
4679
+ * 2. `MINDSTUDIO_BASE_URL` environment variable
4680
+ * 3. `REMOTE_HOSTNAME` environment variable (auto-set inside MindStudio custom functions)
4681
+ * 4. `https://v1.mindstudio-api.com` (production default)
4682
+ */
4683
+ declare class MindStudioAgent {
4684
+ /** @internal */
4685
+ readonly _httpConfig: HttpClientConfig;
4686
+ constructor(options?: AgentOptions);
4687
+ /**
4688
+ * Execute any step by its type name. This is the low-level method that all
4689
+ * typed step methods delegate to. Use it as an escape hatch for step types
4690
+ * not yet covered by the generated methods.
4691
+ *
4692
+ * ```ts
4693
+ * const result = await agent.executeStep("generateImage", { prompt: "hello", mode: "background" });
4694
+ * ```
4695
+ */
4696
+ executeStep<TOutput = unknown>(stepType: string, step: Record<string, unknown>, options?: StepExecutionOptions): Promise<StepExecutionResult<TOutput>>;
4697
+ /** @internal Used by generated helper methods. */
4698
+ _request<T>(method: 'GET' | 'POST', path: string, body?: unknown): Promise<{
4699
+ data: T;
4700
+ headers: Headers;
4701
+ }>;
4702
+ }
4703
+
4704
+ /**
4705
+ * Error thrown when a MindStudio API request fails.
4706
+ *
4707
+ * Contains the HTTP status code, an error code from the API,
4708
+ * and any additional details returned in the response body.
4709
+ */
4710
+ declare class MindStudioError extends Error {
4711
+ /** Machine-readable error code from the API (e.g. "invalid_step_config"). */
4712
+ readonly code: string;
4713
+ /** HTTP status code of the failed request. */
4714
+ readonly status: number;
4715
+ /** Raw error body from the API, if available. */
4716
+ readonly details?: unknown | undefined;
4717
+ readonly name = "MindStudioError";
4718
+ constructor(message: string,
4719
+ /** Machine-readable error code from the API (e.g. "invalid_step_config"). */
4720
+ code: string,
4721
+ /** HTTP status code of the failed request. */
4722
+ status: number,
4723
+ /** Raw error body from the API, if available. */
4724
+ details?: unknown | undefined);
4725
+ }
4726
+
4727
+ export { type ActiveCampaignAddNoteStepInput, type ActiveCampaignAddNoteStepOutput, type ActiveCampaignCreateContactStepInput, type ActiveCampaignCreateContactStepOutput, type AddSubtitlesToVideoStepInput, type AddSubtitlesToVideoStepOutput, type AgentOptions, type AirtableCreateUpdateRecordStepInput, type AirtableCreateUpdateRecordStepOutput, type AirtableDeleteRecordStepInput, type AirtableDeleteRecordStepOutput, type AirtableGetRecordStepInput, type AirtableGetRecordStepOutput, type AirtableGetTableRecordsStepInput, type AirtableGetTableRecordsStepOutput, type AnalyzeImageStepInput, type AnalyzeImageStepOutput, type AnalyzeVideoStepInput, type AnalyzeVideoStepOutput, type CaptureThumbnailStepInput, type CaptureThumbnailStepOutput, type CodaCreateUpdatePageStepInput, type CodaCreateUpdatePageStepOutput, type CodaCreateUpdateRowStepInput, type CodaCreateUpdateRowStepOutput, type CodaFindRowStepInput, type CodaFindRowStepOutput, type CodaGetPageStepInput, type CodaGetPageStepOutput, type CodaGetTableRowsStepInput, type CodaGetTableRowsStepOutput, type ConvertPdfToImagesStepInput, type ConvertPdfToImagesStepOutput, type CreateGoogleCalendarEventStepInput, type CreateGoogleCalendarEventStepOutput, type CreateGoogleDocStepInput, type CreateGoogleDocStepOutput, type CreateGoogleSheetStepInput, type CreateGoogleSheetStepOutput, type DeleteGoogleCalendarEventStepInput, type DeleteGoogleCalendarEventStepOutput, type DetectPIIStepInput, type DetectPIIStepOutput, type DownloadVideoStepInput, type DownloadVideoStepOutput, type EnhanceImageGenerationPromptStepInput, type EnhanceImageGenerationPromptStepOutput, type EnhanceVideoGenerationPromptStepInput, type EnhanceVideoGenerationPromptStepOutput, type EnrichPersonStepInput, type EnrichPersonStepOutput, type ExtractAudioFromVideoStepInput, type ExtractAudioFromVideoStepOutput, type ExtractTextStepInput, type ExtractTextStepOutput, type FetchGoogleDocStepInput, type FetchGoogleDocStepOutput, type FetchGoogleSheetStepInput, type FetchGoogleSheetStepOutput, type FetchSlackChannelHistoryStepInput, type FetchSlackChannelHistoryStepOutput, type FetchYoutubeCaptionsStepInput, type FetchYoutubeCaptionsStepOutput, type FetchYoutubeChannelStepInput, type FetchYoutubeChannelStepOutput, type FetchYoutubeCommentsStepInput, type FetchYoutubeCommentsStepOutput, type FetchYoutubeVideoStepInput, type FetchYoutubeVideoStepOutput, type GenerateChartStepInput, type GenerateChartStepOutput, type GenerateImageStepInput, type GenerateImageStepOutput, type GenerateLipsyncStepInput, type GenerateLipsyncStepOutput, type GenerateMusicStepInput, type GenerateMusicStepOutput, type GeneratePdfStepInput, type GeneratePdfStepOutput, type GenerateStaticVideoFromImageStepInput, type GenerateStaticVideoFromImageStepOutput, type GenerateVideoStepInput, type GenerateVideoStepOutput, type GetGoogleCalendarEventStepInput, type GetGoogleCalendarEventStepOutput, type GetMediaMetadataStepInput, type GetMediaMetadataStepOutput, type HttpRequestStepInput, type HttpRequestStepOutput, type HubspotCreateCompanyStepInput, type HubspotCreateCompanyStepOutput, type HubspotCreateContactStepInput, type HubspotCreateContactStepOutput, type HubspotGetCompanyStepInput, type HubspotGetCompanyStepOutput, type HubspotGetContactStepInput, type HubspotGetContactStepOutput, type HunterApiCompanyEnrichmentStepInput, type HunterApiCompanyEnrichmentStepOutput, type HunterApiDomainSearchStepInput, type HunterApiDomainSearchStepOutput, type HunterApiEmailFinderStepInput, type HunterApiEmailFinderStepOutput, type HunterApiEmailVerificationStepInput, type HunterApiEmailVerificationStepOutput, type HunterApiPersonEnrichmentStepInput, type HunterApiPersonEnrichmentStepOutput, type ImageFaceSwapStepInput, type ImageFaceSwapStepOutput, type ImageRemoveWatermarkStepInput, type ImageRemoveWatermarkStepOutput, type InsertVideoClipsStepInput, type InsertVideoClipsStepOutput, type ListGoogleCalendarEventsStepInput, type ListGoogleCalendarEventsStepOutput, type LogicStepInput, type LogicStepOutput, type MakeDotComRunScenarioStepInput, type MakeDotComRunScenarioStepOutput, type MergeAudioStepInput, type MergeAudioStepOutput, type MergeVideosStepInput, type MergeVideosStepOutput, MindStudioAgent, MindStudioError, type MindStudioModel, type MixAudioIntoVideoStepInput, type MixAudioIntoVideoStepOutput, type ModelType, type MuteVideoStepInput, type MuteVideoStepOutput, type N8nRunNodeStepInput, type N8nRunNodeStepOutput, type NotionCreatePageStepInput, type NotionCreatePageStepOutput, type NotionUpdatePageStepInput, type NotionUpdatePageStepOutput, type PeopleSearchStepInput, type PeopleSearchStepOutput, type PostToLinkedInStepInput, type PostToLinkedInStepOutput, type PostToSlackChannelStepInput, type PostToSlackChannelStepOutput, type PostToXStepInput, type PostToXStepOutput, type PostToZapierStepInput, type PostToZapierStepOutput, type QueryDataSourceStepInput, type QueryDataSourceStepOutput, type QueryExternalDatabaseStepInput, type QueryExternalDatabaseStepOutput, type RedactPIIStepInput, type RedactPIIStepOutput, type RemoveBackgroundFromImageStepInput, type RemoveBackgroundFromImageStepOutput, type ResizeVideoStepInput, type ResizeVideoStepOutput, type RunPackagedWorkflowStepInput, type RunPackagedWorkflowStepOutput, type RunWorkflowStepInput, type RunWorkflowStepOutput, type ScrapeFacebookPageStepInput, type ScrapeFacebookPageStepOutput, type ScrapeFacebookPostsStepInput, type ScrapeFacebookPostsStepOutput, type ScrapeInstagramCommentsStepInput, type ScrapeInstagramCommentsStepOutput, type ScrapeInstagramMentionsStepInput, type ScrapeInstagramMentionsStepOutput, type ScrapeInstagramPostsStepInput, type ScrapeInstagramPostsStepOutput, type ScrapeInstagramProfileStepInput, type ScrapeInstagramProfileStepOutput, type ScrapeInstagramReelsStepInput, type ScrapeInstagramReelsStepOutput, type ScrapeLinkedInCompanyStepInput, type ScrapeLinkedInCompanyStepOutput, type ScrapeLinkedInProfileStepInput, type ScrapeLinkedInProfileStepOutput, type ScrapeMetaThreadsProfileStepInput, type ScrapeMetaThreadsProfileStepOutput, type ScrapeUrlStepInput, type ScrapeUrlStepOutput, type ScrapeXPostStepInput, type ScrapeXPostStepOutput, type ScrapeXProfileStepInput, type ScrapeXProfileStepOutput, type SearchGoogleImagesStepInput, type SearchGoogleImagesStepOutput, type SearchGoogleNewsStepInput, type SearchGoogleNewsStepOutput, type SearchGoogleStepInput, type SearchGoogleStepOutput, type SearchGoogleTrendsStepInput, type SearchGoogleTrendsStepOutput, type SearchPerplexityStepInput, type SearchPerplexityStepOutput, type SearchXPostsStepInput, type SearchXPostsStepOutput, type SearchYoutubeStepInput, type SearchYoutubeStepOutput, type SearchYoutubeTrendsStepInput, type SearchYoutubeTrendsStepOutput, type SendEmailStepInput, type SendEmailStepOutput, type SendSMSStepInput, type SendSMSStepOutput, type SetRunTitleStepInput, type SetRunTitleStepOutput, type SetVariableStepInput, type SetVariableStepOutput, type StepExecutionOptions, type StepExecutionResult, type StepInputMap, type StepName, type StepOutputMap, type TelegramSendAudioStepInput, type TelegramSendAudioStepOutput, type TelegramSendFileStepInput, type TelegramSendFileStepOutput, type TelegramSendImageStepInput, type TelegramSendImageStepOutput, type TelegramSendMessageStepInput, type TelegramSendMessageStepOutput, type TelegramSendVideoStepInput, type TelegramSendVideoStepOutput, type TelegramSetTypingStepInput, type TelegramSetTypingStepOutput, type TextToSpeechStepInput, type TextToSpeechStepOutput, type TranscribeAudioStepInput, type TranscribeAudioStepOutput, type TrimMediaStepInput, type TrimMediaStepOutput, type UpdateGoogleCalendarEventStepInput, type UpdateGoogleCalendarEventStepOutput, type UpdateGoogleDocStepInput, type UpdateGoogleDocStepOutput, type UpdateGoogleSheetStepInput, type UpdateGoogleSheetStepOutput, type UpscaleImageStepInput, type UpscaleImageStepOutput, type UpscaleVideoStepInput, type UpscaleVideoStepOutput, type UserMessageStepInput, type UserMessageStepOutput, type VideoFaceSwapStepInput, type VideoFaceSwapStepOutput, type VideoRemoveBackgroundStepInput, type VideoRemoveBackgroundStepOutput, type VideoRemoveWatermarkStepInput, type VideoRemoveWatermarkStepOutput, type WatermarkImageStepInput, type WatermarkImageStepOutput, type WatermarkVideoStepInput, type WatermarkVideoStepOutput, applyHelperMethods, applyStepMethods };