@mindstudio-ai/agent 0.1.7 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -137,6 +137,1100 @@ function loadConfig() {
137
137
  }
138
138
  }
139
139
 
140
+ // src/auth/index.ts
141
+ var AuthContext = class {
142
+ /** The current user's ID. */
143
+ userId;
144
+ /** The current user's roles in this app. */
145
+ roles;
146
+ /** All role assignments for this app (all users, all roles). */
147
+ _roleAssignments;
148
+ constructor(ctx) {
149
+ this.userId = ctx.userId;
150
+ this._roleAssignments = ctx.roleAssignments;
151
+ this.roles = ctx.roleAssignments.filter((a) => a.userId === ctx.userId).map((a) => a.roleName);
152
+ }
153
+ /**
154
+ * Check if the current user has **any** of the given roles.
155
+ * Returns true if at least one matches.
156
+ *
157
+ * @example
158
+ * ```ts
159
+ * if (auth.hasRole(Roles.admin, Roles.approver)) {
160
+ * // user is an admin OR an approver
161
+ * }
162
+ * ```
163
+ */
164
+ hasRole(...roles) {
165
+ return roles.some((r) => this.roles.includes(r));
166
+ }
167
+ /**
168
+ * Require the current user to have at least one of the given roles.
169
+ * Throws a `MindStudioError` with code `'forbidden'` and status 403
170
+ * if the user lacks all of the specified roles.
171
+ *
172
+ * Use this at the top of route handlers to gate access.
173
+ *
174
+ * @example
175
+ * ```ts
176
+ * auth.requireRole(Roles.admin);
177
+ * // code below only runs if user is an admin
178
+ * ```
179
+ */
180
+ requireRole(...roles) {
181
+ if (!this.hasRole(...roles)) {
182
+ throw new MindStudioError(
183
+ `User does not have required role: ${roles.join(", ")}`,
184
+ "forbidden",
185
+ 403
186
+ );
187
+ }
188
+ }
189
+ /**
190
+ * Get all user IDs that have the given role in this app.
191
+ * Synchronous — scans the preloaded role assignments.
192
+ *
193
+ * @example
194
+ * ```ts
195
+ * const reviewers = auth.getUsersByRole(Roles.reviewer);
196
+ * // ['user-id-1', 'user-id-2', ...]
197
+ * ```
198
+ */
199
+ getUsersByRole(role) {
200
+ return this._roleAssignments.filter((a) => a.roleName === role).map((a) => a.userId);
201
+ }
202
+ };
203
+ var Roles = new Proxy(
204
+ {},
205
+ {
206
+ get(_, prop) {
207
+ if (typeof prop === "string") return prop;
208
+ return void 0;
209
+ }
210
+ }
211
+ );
212
+
213
+ // src/db/sql.ts
214
+ function serializeParam(val) {
215
+ if (val === null || val === void 0) return null;
216
+ if (typeof val === "boolean") return val ? 1 : 0;
217
+ if (typeof val === "number" || typeof val === "string") return val;
218
+ return JSON.stringify(val);
219
+ }
220
+ function serializeColumnParam(val, columnName, columns) {
221
+ const col = columns.find((c) => c.name === columnName);
222
+ if (col?.type === "user" && typeof val === "string") {
223
+ return `@@user@@${val}`;
224
+ }
225
+ return serializeParam(val);
226
+ }
227
+ function escapeValue(val) {
228
+ if (val === null || val === void 0) return "NULL";
229
+ if (typeof val === "boolean") return val ? "1" : "0";
230
+ if (typeof val === "number") return String(val);
231
+ if (typeof val === "string") return `'${val.replace(/'/g, "''")}'`;
232
+ const json = JSON.stringify(val);
233
+ return `'${json.replace(/'/g, "''")}'`;
234
+ }
235
+ var USER_PREFIX = "@@user@@";
236
+ function deserializeRow(row, columns) {
237
+ const result = {};
238
+ for (const [key, value] of Object.entries(row)) {
239
+ const col = columns.find((c) => c.name === key);
240
+ if (col?.type === "user" && typeof value === "string" && value.startsWith(USER_PREFIX)) {
241
+ result[key] = value.slice(USER_PREFIX.length);
242
+ } else if (col?.type === "json" && typeof value === "string") {
243
+ try {
244
+ result[key] = JSON.parse(value);
245
+ } catch {
246
+ result[key] = value;
247
+ }
248
+ } else {
249
+ result[key] = value;
250
+ }
251
+ }
252
+ return result;
253
+ }
254
+ function buildSelect(table, options = {}) {
255
+ let sql = `SELECT * FROM ${table}`;
256
+ const params = [];
257
+ if (options.where) {
258
+ sql += ` WHERE ${options.where}`;
259
+ if (options.whereParams) params.push(...options.whereParams);
260
+ }
261
+ if (options.orderBy) sql += ` ORDER BY ${options.orderBy}${options.desc ? " DESC" : " ASC"}`;
262
+ if (options.limit != null) sql += ` LIMIT ${options.limit}`;
263
+ if (options.offset != null) sql += ` OFFSET ${options.offset}`;
264
+ return { sql, params: params.length > 0 ? params : void 0 };
265
+ }
266
+ function buildCount(table, where, whereParams) {
267
+ let sql = `SELECT COUNT(*) as count FROM ${table}`;
268
+ if (where) sql += ` WHERE ${where}`;
269
+ return { sql, params: whereParams?.length ? whereParams : void 0 };
270
+ }
271
+ function buildExists(table, where, whereParams, negate) {
272
+ const inner = where ? `SELECT 1 FROM ${table} WHERE ${where}` : `SELECT 1 FROM ${table}`;
273
+ const fn = negate ? "NOT EXISTS" : "EXISTS";
274
+ return { sql: `SELECT ${fn}(${inner}) as result`, params: whereParams?.length ? whereParams : void 0 };
275
+ }
276
+ function buildInsert(table, data, columns) {
277
+ const filtered = stripSystemColumns(data);
278
+ const keys = Object.keys(filtered);
279
+ const placeholders = keys.map(() => "?").join(", ");
280
+ const params = keys.map((k) => serializeColumnParam(filtered[k], k, columns));
281
+ return {
282
+ sql: `INSERT INTO ${table} (${keys.join(", ")}) VALUES (${placeholders}) RETURNING *`,
283
+ params
284
+ };
285
+ }
286
+ function buildUpdate(table, id, data, columns) {
287
+ const filtered = stripSystemColumns(data);
288
+ const keys = Object.keys(filtered);
289
+ const assignments = keys.map((k) => `${k} = ?`).join(", ");
290
+ const params = [
291
+ ...keys.map((k) => serializeColumnParam(filtered[k], k, columns)),
292
+ id
293
+ // for WHERE id = ?
294
+ ];
295
+ return {
296
+ sql: `UPDATE ${table} SET ${assignments} WHERE id = ? RETURNING *`,
297
+ params
298
+ };
299
+ }
300
+ function buildDelete(table, where, whereParams) {
301
+ let sql = `DELETE FROM ${table}`;
302
+ if (where) sql += ` WHERE ${where}`;
303
+ return { sql, params: whereParams?.length ? whereParams : void 0 };
304
+ }
305
+ var SYSTEM_COLUMNS = /* @__PURE__ */ new Set(["id", "createdAt", "updatedAt", "lastUpdatedBy"]);
306
+ function stripSystemColumns(data) {
307
+ const result = {};
308
+ for (const [key, value] of Object.entries(data)) {
309
+ if (!SYSTEM_COLUMNS.has(key)) {
310
+ result[key] = value;
311
+ }
312
+ }
313
+ return result;
314
+ }
315
+
316
+ // src/db/predicate.ts
317
+ function compilePredicate(fn) {
318
+ try {
319
+ const source = fn.toString();
320
+ const paramName = extractParamName(source);
321
+ if (!paramName) return { type: "js", fn };
322
+ const body = extractBody(source);
323
+ if (!body) return { type: "js", fn };
324
+ const tokens = tokenize(body);
325
+ if (tokens.length === 0) return { type: "js", fn };
326
+ const parser = new Parser(tokens, paramName, fn);
327
+ const ast = parser.parseExpression();
328
+ if (!ast) return { type: "js", fn };
329
+ if (parser.pos < tokens.length) return { type: "js", fn };
330
+ const where = compileNode(ast);
331
+ if (!where) return { type: "js", fn };
332
+ return { type: "sql", where };
333
+ } catch {
334
+ return { type: "js", fn };
335
+ }
336
+ }
337
+ function extractParamName(source) {
338
+ const match = source.match(
339
+ /^\s*(?:\(?\s*([a-zA-Z_$][a-zA-Z0-9_$]*)\s*(?::[^)]*?)?\)?\s*=>)/
340
+ );
341
+ return match?.[1] ?? null;
342
+ }
343
+ function extractBody(source) {
344
+ const arrowIdx = source.indexOf("=>");
345
+ if (arrowIdx === -1) return null;
346
+ let body = source.slice(arrowIdx + 2).trim();
347
+ if (body.startsWith("{")) {
348
+ const match = body.match(/^\{\s*return\s+([\s\S]+?)\s*;?\s*\}$/);
349
+ if (!match) return null;
350
+ body = match[1];
351
+ }
352
+ return body.trim() || null;
353
+ }
354
+ function tokenize(expr) {
355
+ const tokens = [];
356
+ let i = 0;
357
+ while (i < expr.length) {
358
+ const ch = expr[i];
359
+ if (/\s/.test(ch)) {
360
+ i++;
361
+ continue;
362
+ }
363
+ if (ch === "'" || ch === '"') {
364
+ const quote = ch;
365
+ let str = "";
366
+ i++;
367
+ while (i < expr.length && expr[i] !== quote) {
368
+ if (expr[i] === "\\") {
369
+ i++;
370
+ if (i < expr.length) str += expr[i];
371
+ } else {
372
+ str += expr[i];
373
+ }
374
+ i++;
375
+ }
376
+ if (i >= expr.length) return [];
377
+ i++;
378
+ tokens.push({ type: "string", value: str });
379
+ continue;
380
+ }
381
+ if (ch === "`") return [];
382
+ if (/[0-9]/.test(ch) || ch === "-" && i + 1 < expr.length && /[0-9]/.test(expr[i + 1])) {
383
+ let num = ch;
384
+ i++;
385
+ while (i < expr.length && /[0-9.]/.test(expr[i])) {
386
+ num += expr[i];
387
+ i++;
388
+ }
389
+ tokens.push({ type: "number", value: num });
390
+ continue;
391
+ }
392
+ if (expr.slice(i, i + 3) === "===" || expr.slice(i, i + 3) === "!==") {
393
+ tokens.push({ type: "operator", value: expr.slice(i, i + 3) });
394
+ i += 3;
395
+ continue;
396
+ }
397
+ if (expr.slice(i, i + 2) === "==" || expr.slice(i, i + 2) === "!=" || expr.slice(i, i + 2) === "<=" || expr.slice(i, i + 2) === ">=" || expr.slice(i, i + 2) === "&&" || expr.slice(i, i + 2) === "||") {
398
+ tokens.push({ type: "operator", value: expr.slice(i, i + 2) });
399
+ i += 2;
400
+ continue;
401
+ }
402
+ if (ch === "!" || ch === "<" || ch === ">") {
403
+ tokens.push({ type: "operator", value: ch });
404
+ i++;
405
+ continue;
406
+ }
407
+ if (ch === ".") {
408
+ tokens.push({ type: "dot", value: "." });
409
+ i++;
410
+ continue;
411
+ }
412
+ if (ch === "(") {
413
+ tokens.push({ type: "lparen", value: "(" });
414
+ i++;
415
+ continue;
416
+ }
417
+ if (ch === ")") {
418
+ tokens.push({ type: "rparen", value: ")" });
419
+ i++;
420
+ continue;
421
+ }
422
+ if (ch === "[") {
423
+ tokens.push({ type: "lbracket", value: "[" });
424
+ i++;
425
+ continue;
426
+ }
427
+ if (ch === "]") {
428
+ tokens.push({ type: "rbracket", value: "]" });
429
+ i++;
430
+ continue;
431
+ }
432
+ if (ch === ",") {
433
+ tokens.push({ type: "comma", value: "," });
434
+ i++;
435
+ continue;
436
+ }
437
+ if (/[a-zA-Z_$]/.test(ch)) {
438
+ let ident = ch;
439
+ i++;
440
+ while (i < expr.length && /[a-zA-Z0-9_$]/.test(expr[i])) {
441
+ ident += expr[i];
442
+ i++;
443
+ }
444
+ tokens.push({ type: "identifier", value: ident });
445
+ continue;
446
+ }
447
+ return [];
448
+ }
449
+ return tokens;
450
+ }
451
+ var Parser = class {
452
+ constructor(tokens, paramName, originalFn) {
453
+ this.tokens = tokens;
454
+ this.paramName = paramName;
455
+ this.originalFn = originalFn;
456
+ }
457
+ pos = 0;
458
+ /** Peek at the current token without consuming it. */
459
+ peek() {
460
+ return this.tokens[this.pos];
461
+ }
462
+ /** Consume the current token and advance. */
463
+ advance() {
464
+ return this.tokens[this.pos++];
465
+ }
466
+ /** Check if the current token matches an expected type and value. */
467
+ match(type, value) {
468
+ const t = this.peek();
469
+ if (!t) return false;
470
+ if (t.type !== type) return false;
471
+ if (value !== void 0 && t.value !== value) return false;
472
+ return true;
473
+ }
474
+ /** Consume a token if it matches, otherwise return false. */
475
+ eat(type, value) {
476
+ if (this.match(type, value)) {
477
+ this.advance();
478
+ return true;
479
+ }
480
+ return false;
481
+ }
482
+ // --- Grammar rules ---
483
+ /** Entry point: parse a full expression. */
484
+ parseExpression() {
485
+ return this.parseOr();
486
+ }
487
+ /** or_expr → and_expr ( '||' and_expr )* */
488
+ parseOr() {
489
+ let left = this.parseAnd();
490
+ if (!left) return null;
491
+ while (this.match("operator", "||")) {
492
+ this.advance();
493
+ const right = this.parseAnd();
494
+ if (!right) return null;
495
+ left = { kind: "logical", operator: "OR", left, right };
496
+ }
497
+ return left;
498
+ }
499
+ /** and_expr → not_expr ( '&&' not_expr )* */
500
+ parseAnd() {
501
+ let left = this.parseNot();
502
+ if (!left) return null;
503
+ while (this.match("operator", "&&")) {
504
+ this.advance();
505
+ const right = this.parseNot();
506
+ if (!right) return null;
507
+ left = { kind: "logical", operator: "AND", left, right };
508
+ }
509
+ return left;
510
+ }
511
+ /** not_expr → '!' not_expr | primary */
512
+ parseNot() {
513
+ if (this.match("operator", "!")) {
514
+ this.advance();
515
+ if (this.match("lparen")) {
516
+ this.advance();
517
+ const inner2 = this.parseExpression();
518
+ if (!inner2) return null;
519
+ if (!this.eat("rparen")) return null;
520
+ return { kind: "not", operand: inner2 };
521
+ }
522
+ const inner = this.parsePrimary();
523
+ if (!inner) return null;
524
+ if (inner.kind === "booleanField") {
525
+ return { ...inner, negated: !inner.negated };
526
+ }
527
+ return { kind: "not", operand: inner };
528
+ }
529
+ return this.parsePrimary();
530
+ }
531
+ /**
532
+ * primary → field_comparison | null_check | includes_expr | paren_expr | boolean_field
533
+ *
534
+ * This is the workhorse — handles the different patterns that can appear
535
+ * as atomic expressions within a larger &&/|| combination.
536
+ */
537
+ parsePrimary() {
538
+ if (this.match("lparen")) {
539
+ this.advance();
540
+ const inner = this.parseExpression();
541
+ if (!inner) return null;
542
+ if (!this.eat("rparen")) return null;
543
+ return inner;
544
+ }
545
+ if (this.match("lbracket")) {
546
+ return this.parseArrayIncludes();
547
+ }
548
+ if (this.match("identifier", this.paramName)) {
549
+ return this.parseFieldExpression();
550
+ }
551
+ if (this.match("identifier")) {
552
+ return this.parseNonParamExpression();
553
+ }
554
+ return null;
555
+ }
556
+ /**
557
+ * Parse an expression that starts with the parameter name (e.g. `o.field`).
558
+ *
559
+ * Could be:
560
+ * - `o.field === value` (comparison)
561
+ * - `o.field != null` (null check)
562
+ * - `o.field.includes('text')` (LIKE)
563
+ * - `o.field` alone (boolean field check)
564
+ */
565
+ parseFieldExpression() {
566
+ this.advance();
567
+ const field = this.parseFieldPath();
568
+ if (!field) return null;
569
+ const next = this.peek();
570
+ if (next?.type === "dot" && this.lookAheadForIncludes()) {
571
+ return this.parseFieldIncludes(field);
572
+ }
573
+ if (next?.type === "operator" && isComparisonOp(next.value)) {
574
+ return this.parseComparison(field);
575
+ }
576
+ return { kind: "booleanField", field, negated: false };
577
+ }
578
+ /**
579
+ * Parse a dot-separated field path after the parameter name.
580
+ * `o.status` → `"status"`, `o.address.city` → `"json_extract(address, '$.city')"`.
581
+ */
582
+ parseFieldPath() {
583
+ if (!this.eat("dot")) return null;
584
+ if (!this.match("identifier")) return null;
585
+ const parts = [this.advance().value];
586
+ while (this.match("dot") && this.tokens[this.pos + 1]?.type === "identifier") {
587
+ this.advance();
588
+ parts.push(this.advance().value);
589
+ }
590
+ if (parts.length === 1) {
591
+ return parts[0];
592
+ }
593
+ const root = parts[0];
594
+ const jsonPath = "$." + parts.slice(1).join(".");
595
+ return `json_extract(${root}, '${jsonPath}')`;
596
+ }
597
+ /**
598
+ * Parse a comparison: `field OP value`.
599
+ * The field has already been parsed; we need the operator and right-hand value.
600
+ */
601
+ parseComparison(field) {
602
+ const opToken = this.advance();
603
+ const jsOp = opToken.value;
604
+ const value = this.parseValue();
605
+ if (value === PARSE_FAILED) return null;
606
+ if (value === null || value === void 0) {
607
+ if (jsOp === "===" || jsOp === "==") {
608
+ return { kind: "nullCheck", field, isNull: true };
609
+ }
610
+ if (jsOp === "!==" || jsOp === "!=") {
611
+ return { kind: "nullCheck", field, isNull: false };
612
+ }
613
+ return null;
614
+ }
615
+ const sqlOp = JS_TO_SQL_OP[jsOp];
616
+ if (!sqlOp) return null;
617
+ return { kind: "comparison", field, operator: sqlOp, value };
618
+ }
619
+ /**
620
+ * Parse `o.field.includes('text')` → LIKE expression.
621
+ * The field name has already been parsed.
622
+ */
623
+ parseFieldIncludes(field) {
624
+ this.advance();
625
+ this.advance();
626
+ if (!this.eat("lparen")) return null;
627
+ const value = this.parseValue();
628
+ if (value === PARSE_FAILED || typeof value !== "string") return null;
629
+ if (!this.eat("rparen")) return null;
630
+ const escaped = value.replace(/%/g, "\\%").replace(/_/g, "\\_");
631
+ return { kind: "like", field, pattern: `%${escaped}%` };
632
+ }
633
+ /**
634
+ * Parse `['a', 'b', 'c'].includes(o.field)` → IN expression.
635
+ * The opening bracket has been peeked but not consumed.
636
+ */
637
+ parseArrayIncludes() {
638
+ this.advance();
639
+ const values = [];
640
+ while (!this.match("rbracket")) {
641
+ if (values.length > 0) {
642
+ if (!this.eat("comma")) return null;
643
+ }
644
+ const val = this.parseValue();
645
+ if (val === PARSE_FAILED) return null;
646
+ values.push(val);
647
+ }
648
+ this.advance();
649
+ if (!this.eat("dot")) return null;
650
+ if (!this.match("identifier", "includes")) return null;
651
+ this.advance();
652
+ if (!this.eat("lparen")) return null;
653
+ if (!this.match("identifier", this.paramName)) return null;
654
+ this.advance();
655
+ const field = this.parseFieldPath();
656
+ if (!field) return null;
657
+ if (!this.eat("rparen")) return null;
658
+ return { kind: "in", field, values };
659
+ }
660
+ /**
661
+ * Parse an expression that starts with an identifier that is NOT the
662
+ * parameter name. This could be:
663
+ * - A keyword literal: `true`, `false`, `null`, `undefined`
664
+ * - A closure variable used in a comparison (handled by backtracking)
665
+ */
666
+ parseNonParamExpression() {
667
+ const ident = this.peek().value;
668
+ if (ident === "true" || ident === "false") return null;
669
+ return null;
670
+ }
671
+ /**
672
+ * Parse a literal value or closure variable reference.
673
+ *
674
+ * Returns the parsed value, or PARSE_FAILED if parsing fails.
675
+ * Returns `null` or `undefined` for those keyword literals.
676
+ */
677
+ parseValue() {
678
+ const t = this.peek();
679
+ if (!t) return PARSE_FAILED;
680
+ if (t.type === "string") {
681
+ this.advance();
682
+ return t.value;
683
+ }
684
+ if (t.type === "number") {
685
+ this.advance();
686
+ return Number(t.value);
687
+ }
688
+ if (t.type === "identifier") {
689
+ if (t.value === "true") {
690
+ this.advance();
691
+ return true;
692
+ }
693
+ if (t.value === "false") {
694
+ this.advance();
695
+ return false;
696
+ }
697
+ if (t.value === "null") {
698
+ this.advance();
699
+ return null;
700
+ }
701
+ if (t.value === "undefined") {
702
+ this.advance();
703
+ return void 0;
704
+ }
705
+ return this.resolveClosureVariable();
706
+ }
707
+ if (t.type === "operator" && t.value === "-") {
708
+ this.advance();
709
+ const next = this.peek();
710
+ if (next?.type === "number") {
711
+ this.advance();
712
+ return -Number(next.value);
713
+ }
714
+ return PARSE_FAILED;
715
+ }
716
+ return PARSE_FAILED;
717
+ }
718
+ /**
719
+ * Attempt to resolve a closure variable by invoking the original function
720
+ * with a recording Proxy and inspecting what values it compares against.
721
+ *
722
+ * This handles the common pattern:
723
+ * ```ts
724
+ * const userId = auth.userId;
725
+ * orders.filter(o => o.requestedBy === userId)
726
+ * ```
727
+ *
728
+ * The Proxy captures property accesses on the parameter and we can then
729
+ * extract the comparison value from the function's behavior. However,
730
+ * this approach has limitations — if the function throws, has side effects,
731
+ * or uses the variable in a non-comparison context, we fall back to JS.
732
+ */
733
+ resolveClosureVariable() {
734
+ const identToken = this.advance();
735
+ let closureExpr = identToken.value;
736
+ while (this.match("dot") && this.tokens[this.pos + 1]?.type === "identifier") {
737
+ this.advance();
738
+ closureExpr += "." + this.advance().value;
739
+ }
740
+ try {
741
+ const MARKER = /* @__PURE__ */ Symbol("field_access_marker");
742
+ const accessed = [];
743
+ const proxy = new Proxy(
744
+ {},
745
+ {
746
+ get(_, prop) {
747
+ accessed.push(prop);
748
+ return new Proxy(() => MARKER, {
749
+ get(_2, nestedProp) {
750
+ accessed.push(nestedProp);
751
+ return MARKER;
752
+ }
753
+ });
754
+ }
755
+ }
756
+ );
757
+ void proxy;
758
+ return PARSE_FAILED;
759
+ } catch {
760
+ return PARSE_FAILED;
761
+ }
762
+ }
763
+ /**
764
+ * Look ahead to check if the next tokens form `.includes(`.
765
+ * Used to disambiguate `o.field.includes(...)` from `o.field.nested`.
766
+ */
767
+ lookAheadForIncludes() {
768
+ return this.tokens[this.pos]?.type === "dot" && this.tokens[this.pos + 1]?.type === "identifier" && this.tokens[this.pos + 1]?.value === "includes" && this.tokens[this.pos + 2]?.type === "lparen";
769
+ }
770
+ };
771
+ function compileNode(node) {
772
+ switch (node.kind) {
773
+ case "comparison":
774
+ return `${node.field} ${node.operator} ${escapeValue(node.value)}`;
775
+ case "nullCheck":
776
+ return `${node.field} ${node.isNull ? "IS NULL" : "IS NOT NULL"}`;
777
+ case "in": {
778
+ if (node.values.length === 0) return "0";
779
+ const vals = node.values.map(escapeValue).join(", ");
780
+ return `${node.field} IN (${vals})`;
781
+ }
782
+ case "like":
783
+ return `${node.field} LIKE ${escapeValue(node.pattern)}`;
784
+ case "booleanField":
785
+ return node.negated ? `${node.field} = 0` : `${node.field} = 1`;
786
+ case "logical": {
787
+ const left = compileNode(node.left);
788
+ const right = compileNode(node.right);
789
+ if (!left || !right) return null;
790
+ return `(${left} ${node.operator} ${right})`;
791
+ }
792
+ case "not": {
793
+ const inner = compileNode(node.operand);
794
+ if (!inner) return null;
795
+ return `NOT (${inner})`;
796
+ }
797
+ default:
798
+ return null;
799
+ }
800
+ }
801
+ var JS_TO_SQL_OP = {
802
+ "===": "=",
803
+ "==": "=",
804
+ "!==": "!=",
805
+ "!=": "!=",
806
+ "<": "<",
807
+ ">": ">",
808
+ "<=": "<=",
809
+ ">=": ">="
810
+ };
811
+ var PARSE_FAILED = /* @__PURE__ */ Symbol("PARSE_FAILED");
812
+ function isComparisonOp(value) {
813
+ return value in JS_TO_SQL_OP;
814
+ }
815
+
816
+ // src/db/query.ts
817
+ var Query = class _Query {
818
+ _predicates;
819
+ _sortAccessor;
820
+ _reversed;
821
+ _limit;
822
+ _offset;
823
+ _config;
824
+ constructor(config, options) {
825
+ this._config = config;
826
+ this._predicates = options?.predicates ?? [];
827
+ this._sortAccessor = options?.sortAccessor;
828
+ this._reversed = options?.reversed ?? false;
829
+ this._limit = options?.limit;
830
+ this._offset = options?.offset;
831
+ }
832
+ _clone(overrides) {
833
+ return new _Query(this._config, {
834
+ predicates: overrides.predicates ?? this._predicates,
835
+ sortAccessor: overrides.sortAccessor ?? this._sortAccessor,
836
+ reversed: overrides.reversed ?? this._reversed,
837
+ limit: overrides.limit ?? this._limit,
838
+ offset: overrides.offset ?? this._offset
839
+ });
840
+ }
841
+ // -------------------------------------------------------------------------
842
+ // Chain methods
843
+ // -------------------------------------------------------------------------
844
+ filter(predicate) {
845
+ return this._clone({ predicates: [...this._predicates, predicate] });
846
+ }
847
+ sortBy(accessor) {
848
+ return this._clone({ sortAccessor: accessor });
849
+ }
850
+ reverse() {
851
+ return this._clone({ reversed: !this._reversed });
852
+ }
853
+ take(n) {
854
+ return this._clone({ limit: n });
855
+ }
856
+ skip(n) {
857
+ return this._clone({ offset: n });
858
+ }
859
+ // -------------------------------------------------------------------------
860
+ // Terminal methods
861
+ // -------------------------------------------------------------------------
862
+ async first() {
863
+ const rows = await this._clone({ limit: 1 })._execute();
864
+ return rows[0] ?? null;
865
+ }
866
+ async last() {
867
+ const rows = await this._clone({ limit: 1, reversed: !this._reversed })._execute();
868
+ return rows[0] ?? null;
869
+ }
870
+ async count() {
871
+ const compiled = this._compilePredicates();
872
+ if (compiled.allSql) {
873
+ const query = buildCount(
874
+ this._config.tableName,
875
+ compiled.sqlWhere || void 0
876
+ );
877
+ const results = await this._config.executeBatch([query]);
878
+ const row = results[0]?.rows[0];
879
+ return row?.count ?? 0;
880
+ }
881
+ const rows = await this._fetchAndFilterInJs(compiled);
882
+ return rows.length;
883
+ }
884
+ async some() {
885
+ const compiled = this._compilePredicates();
886
+ if (compiled.allSql) {
887
+ const query = buildExists(
888
+ this._config.tableName,
889
+ compiled.sqlWhere || void 0
890
+ );
891
+ const results = await this._config.executeBatch([query]);
892
+ const row = results[0]?.rows[0];
893
+ return row?.result === 1;
894
+ }
895
+ const rows = await this._fetchAndFilterInJs(compiled);
896
+ return rows.length > 0;
897
+ }
898
+ async every() {
899
+ const compiled = this._compilePredicates();
900
+ if (compiled.allSql && compiled.sqlWhere) {
901
+ const query = buildExists(
902
+ this._config.tableName,
903
+ `NOT (${compiled.sqlWhere})`,
904
+ void 0,
905
+ true
906
+ );
907
+ const results = await this._config.executeBatch([query]);
908
+ const row = results[0]?.rows[0];
909
+ return row?.result === 1;
910
+ }
911
+ if (this._predicates.length === 0) return true;
912
+ const allRows = await this._fetchAllRows();
913
+ return allRows.every(
914
+ (row) => this._predicates.every((pred) => pred(row))
915
+ );
916
+ }
917
+ async min(accessor) {
918
+ return this.sortBy(accessor).first();
919
+ }
920
+ async max(accessor) {
921
+ return this.sortBy(accessor).reverse().first();
922
+ }
923
+ async groupBy(accessor) {
924
+ const rows = await this._execute();
925
+ const map = /* @__PURE__ */ new Map();
926
+ for (const row of rows) {
927
+ const key = accessor(row);
928
+ const group = map.get(key);
929
+ if (group) {
930
+ group.push(row);
931
+ } else {
932
+ map.set(key, [row]);
933
+ }
934
+ }
935
+ return map;
936
+ }
937
+ // -------------------------------------------------------------------------
938
+ // PromiseLike
939
+ // -------------------------------------------------------------------------
940
+ then(onfulfilled, onrejected) {
941
+ return this._execute().then(onfulfilled, onrejected);
942
+ }
943
+ // -------------------------------------------------------------------------
944
+ // Execution internals
945
+ // -------------------------------------------------------------------------
946
+ async _execute() {
947
+ const compiled = this._compilePredicates();
948
+ if (compiled.allSql) {
949
+ const sortField = this._sortAccessor ? extractFieldName(this._sortAccessor) : void 0;
950
+ const query = buildSelect(this._config.tableName, {
951
+ where: compiled.sqlWhere || void 0,
952
+ orderBy: sortField ?? void 0,
953
+ desc: this._reversed,
954
+ limit: this._limit,
955
+ offset: this._offset
956
+ });
957
+ const results = await this._config.executeBatch([query]);
958
+ return results[0].rows.map(
959
+ (row) => deserializeRow(
960
+ row,
961
+ this._config.columns
962
+ )
963
+ );
964
+ }
965
+ let rows = await this._fetchAndFilterInJs(compiled);
966
+ if (this._sortAccessor) {
967
+ const accessor = this._sortAccessor;
968
+ rows.sort((a, b) => {
969
+ const aVal = accessor(a);
970
+ const bVal = accessor(b);
971
+ if (aVal < bVal) return this._reversed ? 1 : -1;
972
+ if (aVal > bVal) return this._reversed ? -1 : 1;
973
+ return 0;
974
+ });
975
+ }
976
+ if (this._offset != null || this._limit != null) {
977
+ const start = this._offset ?? 0;
978
+ const end = this._limit != null ? start + this._limit : void 0;
979
+ rows = rows.slice(start, end);
980
+ }
981
+ return rows;
982
+ }
983
+ _compilePredicates() {
984
+ if (this._predicates.length === 0) {
985
+ return { allSql: true, sqlWhere: "", compiled: [] };
986
+ }
987
+ const compiled = this._predicates.map((pred) => compilePredicate(pred));
988
+ const allSql = compiled.every((c) => c.type === "sql");
989
+ let sqlWhere = "";
990
+ if (allSql) {
991
+ sqlWhere = compiled.map((c) => c.where).join(" AND ");
992
+ }
993
+ return { allSql, sqlWhere, compiled };
994
+ }
995
+ async _fetchAndFilterInJs(compiled) {
996
+ const allRows = await this._fetchAllRows();
997
+ if (compiled.compiled.some((c) => c.type === "js")) {
998
+ console.warn(
999
+ `[mindstudio] Filter on ${this._config.tableName} could not be compiled to SQL \u2014 scanning ${allRows.length} rows in JS`
1000
+ );
1001
+ }
1002
+ return allRows.filter(
1003
+ (row) => this._predicates.every((pred) => pred(row))
1004
+ );
1005
+ }
1006
+ async _fetchAllRows() {
1007
+ const query = buildSelect(this._config.tableName);
1008
+ const results = await this._config.executeBatch([query]);
1009
+ return results[0].rows.map(
1010
+ (row) => deserializeRow(row, this._config.columns)
1011
+ );
1012
+ }
1013
+ };
1014
+ function extractFieldName(accessor) {
1015
+ const source = accessor.toString();
1016
+ const match = source.match(
1017
+ /^\s*\(?([a-zA-Z_$][a-zA-Z0-9_$]*)\)?\s*=>\s*\1\.([a-zA-Z_$][a-zA-Z0-9_$]*)\s*$/
1018
+ );
1019
+ return match?.[2] ?? null;
1020
+ }
1021
+
1022
+ // src/db/table.ts
1023
+ var Table = class {
1024
+ /** @internal */
1025
+ _config;
1026
+ constructor(config) {
1027
+ this._config = config;
1028
+ }
1029
+ // -------------------------------------------------------------------------
1030
+ // Reads — direct
1031
+ // -------------------------------------------------------------------------
1032
+ async get(id) {
1033
+ const query = buildSelect(this._config.tableName, {
1034
+ where: `id = ?`,
1035
+ whereParams: [id],
1036
+ limit: 1
1037
+ });
1038
+ const results = await this._config.executeBatch([query]);
1039
+ if (results[0].rows.length === 0) return null;
1040
+ return deserializeRow(
1041
+ results[0].rows[0],
1042
+ this._config.columns
1043
+ );
1044
+ }
1045
+ async findOne(predicate) {
1046
+ return this.filter(predicate).first();
1047
+ }
1048
+ async count(predicate) {
1049
+ if (predicate) return this.filter(predicate).count();
1050
+ const query = buildCount(this._config.tableName);
1051
+ const results = await this._config.executeBatch([query]);
1052
+ const row = results[0]?.rows[0];
1053
+ return row?.count ?? 0;
1054
+ }
1055
+ async some(predicate) {
1056
+ return this.filter(predicate).some();
1057
+ }
1058
+ async every(predicate) {
1059
+ return this.filter(predicate).every();
1060
+ }
1061
+ async isEmpty() {
1062
+ const query = buildExists(this._config.tableName, void 0, void 0, true);
1063
+ const results = await this._config.executeBatch([query]);
1064
+ const row = results[0]?.rows[0];
1065
+ return row?.result === 1;
1066
+ }
1067
+ async min(accessor) {
1068
+ return this.sortBy(accessor).first();
1069
+ }
1070
+ async max(accessor) {
1071
+ return this.sortBy(accessor).reverse().first();
1072
+ }
1073
+ async groupBy(accessor) {
1074
+ return new Query(this._config).groupBy(accessor);
1075
+ }
1076
+ // -------------------------------------------------------------------------
1077
+ // Reads — chainable
1078
+ // -------------------------------------------------------------------------
1079
+ filter(predicate) {
1080
+ return new Query(this._config).filter(predicate);
1081
+ }
1082
+ sortBy(accessor) {
1083
+ return new Query(this._config).sortBy(accessor);
1084
+ }
1085
+ async push(data) {
1086
+ const isArray = Array.isArray(data);
1087
+ const items = isArray ? data : [data];
1088
+ const queries = items.map(
1089
+ (item) => buildInsert(
1090
+ this._config.tableName,
1091
+ item,
1092
+ this._config.columns
1093
+ )
1094
+ );
1095
+ const results = await this._config.executeBatch(queries);
1096
+ const rows = results.map((r) => {
1097
+ if (r.rows.length > 0) {
1098
+ return deserializeRow(
1099
+ r.rows[0],
1100
+ this._config.columns
1101
+ );
1102
+ }
1103
+ return void 0;
1104
+ });
1105
+ return isArray ? rows : rows[0];
1106
+ }
1107
+ /**
1108
+ * Update a row by ID. Only the provided fields are changed.
1109
+ * Returns the updated row via `UPDATE ... RETURNING *`.
1110
+ */
1111
+ async update(id, data) {
1112
+ const query = buildUpdate(
1113
+ this._config.tableName,
1114
+ id,
1115
+ data,
1116
+ this._config.columns
1117
+ );
1118
+ const results = await this._config.executeBatch([query]);
1119
+ return deserializeRow(
1120
+ results[0].rows[0],
1121
+ this._config.columns
1122
+ );
1123
+ }
1124
+ async remove(id) {
1125
+ const query = buildDelete(this._config.tableName, `id = ?`, [id]);
1126
+ await this._config.executeBatch([query]);
1127
+ }
1128
+ /**
1129
+ * Remove all rows matching a predicate. Returns the count removed.
1130
+ */
1131
+ async removeAll(predicate) {
1132
+ const compiled = compilePredicate(predicate);
1133
+ if (compiled.type === "sql") {
1134
+ const query = buildDelete(this._config.tableName, compiled.where);
1135
+ const results = await this._config.executeBatch([query]);
1136
+ return results[0].changes;
1137
+ }
1138
+ console.warn(
1139
+ `[mindstudio] removeAll predicate on ${this._config.tableName} could not be compiled to SQL \u2014 fetching all rows first`
1140
+ );
1141
+ const allQuery = buildSelect(this._config.tableName);
1142
+ const allResults = await this._config.executeBatch([allQuery]);
1143
+ const allRows = allResults[0].rows.map(
1144
+ (r) => deserializeRow(
1145
+ r,
1146
+ this._config.columns
1147
+ )
1148
+ );
1149
+ const matching = allRows.filter((row) => predicate(row));
1150
+ if (matching.length === 0) return 0;
1151
+ const deleteQueries = matching.filter((row) => row.id).map((row) => buildDelete(this._config.tableName, `id = ?`, [row.id]));
1152
+ if (deleteQueries.length > 0) {
1153
+ await this._config.executeBatch(deleteQueries);
1154
+ }
1155
+ return matching.length;
1156
+ }
1157
+ async clear() {
1158
+ const query = buildDelete(this._config.tableName);
1159
+ await this._config.executeBatch([query]);
1160
+ }
1161
+ };
1162
+
1163
+ // src/db/index.ts
1164
+ function createDb(databases, executeBatch) {
1165
+ return {
1166
+ defineTable(name, options) {
1167
+ const resolved = resolveTable(databases, name, options?.database);
1168
+ const config = {
1169
+ databaseId: resolved.databaseId,
1170
+ tableName: name,
1171
+ columns: resolved.columns,
1172
+ executeBatch: (queries) => executeBatch(resolved.databaseId, queries)
1173
+ };
1174
+ return new Table(config);
1175
+ },
1176
+ // --- Time helpers ---
1177
+ // Pure JS, no platform dependency. All timestamps are unix ms.
1178
+ now: () => Date.now(),
1179
+ days: (n) => n * 864e5,
1180
+ hours: (n) => n * 36e5,
1181
+ minutes: (n) => n * 6e4,
1182
+ ago: (ms) => Date.now() - ms,
1183
+ fromNow: (ms) => Date.now() + ms
1184
+ };
1185
+ }
1186
+ function resolveTable(databases, tableName, databaseHint) {
1187
+ if (databases.length === 0) {
1188
+ throw new MindStudioError(
1189
+ `No databases found in app context. Make sure the app has at least one database configured.`,
1190
+ "no_databases",
1191
+ 400
1192
+ );
1193
+ }
1194
+ if (databaseHint) {
1195
+ const targetDb = databases.find(
1196
+ (db2) => db2.id === databaseHint || db2.name === databaseHint
1197
+ );
1198
+ if (!targetDb) {
1199
+ const available = databases.map((db2) => db2.name || db2.id).join(", ");
1200
+ throw new MindStudioError(
1201
+ `Database "${databaseHint}" not found. Available databases: ${available}`,
1202
+ "database_not_found",
1203
+ 400
1204
+ );
1205
+ }
1206
+ const table = targetDb.tables.find((t) => t.name === tableName);
1207
+ if (!table) {
1208
+ const available = targetDb.tables.map((t) => t.name).join(", ");
1209
+ throw new MindStudioError(
1210
+ `Table "${tableName}" not found in database "${databaseHint}". Available tables: ${available || "(none)"}`,
1211
+ "table_not_found",
1212
+ 400
1213
+ );
1214
+ }
1215
+ return { databaseId: targetDb.id, columns: table.schema };
1216
+ }
1217
+ for (const db2 of databases) {
1218
+ const table = db2.tables.find((t) => t.name === tableName);
1219
+ if (table) {
1220
+ return {
1221
+ databaseId: db2.id,
1222
+ columns: table.schema
1223
+ };
1224
+ }
1225
+ }
1226
+ const availableTables = databases.flatMap((db2) => db2.tables.map((t) => t.name)).join(", ");
1227
+ throw new MindStudioError(
1228
+ `Table "${tableName}" not found in app databases. Available tables: ${availableTables || "(none)"}`,
1229
+ "table_not_found",
1230
+ 400
1231
+ );
1232
+ }
1233
+
140
1234
  // src/generated/steps.ts
141
1235
  function applyStepMethods(AgentClass) {
142
1236
  const proto = AgentClass.prototype;
@@ -170,6 +1264,9 @@ function applyStepMethods(AgentClass) {
170
1264
  proto.captureThumbnail = function(step, options) {
171
1265
  return this.executeStep("captureThumbnail", step, options);
172
1266
  };
1267
+ proto.checkAppRole = function(step, options) {
1268
+ return this.executeStep("checkAppRole", step, options);
1269
+ };
173
1270
  proto.codaCreateUpdatePage = function(step, options) {
174
1271
  return this.executeStep("codaCreateUpdatePage", step, options);
175
1272
  };
@@ -419,6 +1516,9 @@ function applyStepMethods(AgentClass) {
419
1516
  proto.postToZapier = function(step, options) {
420
1517
  return this.executeStep("postToZapier", step, options);
421
1518
  };
1519
+ proto.queryAppDatabase = function(step, options) {
1520
+ return this.executeStep("queryAppDatabase", step, options);
1521
+ };
422
1522
  proto.queryDataSource = function(step, options) {
423
1523
  return this.executeStep("queryDataSource", step, options);
424
1524
  };
@@ -620,17 +1720,46 @@ var MindStudioAgent = class {
620
1720
  _reuseThreadId;
621
1721
  /** @internal */
622
1722
  _threadId;
1723
+ // ---- App context (db + auth) ----
1724
+ /**
1725
+ * @internal App ID for context resolution. Resolved from:
1726
+ * constructor appId → MINDSTUDIO_APP_ID env → sandbox globals →
1727
+ * auto-detected from first executeStep response header.
1728
+ */
1729
+ _appId;
1730
+ /**
1731
+ * @internal Cached app context (auth + databases). Populated by
1732
+ * ensureContext() and cached for the lifetime of the instance.
1733
+ */
1734
+ _context;
1735
+ /**
1736
+ * @internal Deduplication promise for ensureContext(). Ensures only one
1737
+ * context fetch is in-flight at a time, even if multiple db/auth
1738
+ * operations trigger it concurrently.
1739
+ */
1740
+ _contextPromise;
1741
+ /** @internal Cached AuthContext instance, created during context hydration. */
1742
+ _auth;
1743
+ /** @internal Cached Db namespace instance, created during context hydration. */
1744
+ _db;
1745
+ /** @internal Auth type — 'internal' for CALLBACK_TOKEN (managed mode), 'apiKey' otherwise. */
1746
+ _authType;
623
1747
  constructor(options = {}) {
624
1748
  const config = loadConfig();
625
1749
  const { token, authType } = resolveToken(options.apiKey, config);
626
1750
  const baseUrl = options.baseUrl ?? process.env.MINDSTUDIO_BASE_URL ?? process.env.REMOTE_HOSTNAME ?? config.baseUrl ?? DEFAULT_BASE_URL;
627
1751
  this._reuseThreadId = options.reuseThreadId ?? /^(true|1)$/i.test(process.env.MINDSTUDIO_REUSE_THREAD_ID ?? "");
1752
+ this._appId = options.appId ?? process.env.MINDSTUDIO_APP_ID ?? void 0;
1753
+ this._authType = authType;
628
1754
  this._httpConfig = {
629
1755
  baseUrl,
630
1756
  token,
631
1757
  rateLimiter: new RateLimiter(authType),
632
1758
  maxRetries: options.maxRetries ?? DEFAULT_MAX_RETRIES
633
1759
  };
1760
+ if (authType === "internal") {
1761
+ this._trySandboxHydration();
1762
+ }
634
1763
  }
635
1764
  /**
636
1765
  * Execute any step by its type name. This is the low-level method that all
@@ -669,6 +1798,10 @@ var MindStudioAgent = class {
669
1798
  if (this._reuseThreadId && returnedThreadId) {
670
1799
  this._threadId = returnedThreadId;
671
1800
  }
1801
+ const returnedAppId = headers.get("x-mindstudio-app-id");
1802
+ if (!this._appId && returnedAppId) {
1803
+ this._appId = returnedAppId;
1804
+ }
672
1805
  const remaining = headers.get("x-ratelimit-remaining");
673
1806
  const billingCost = headers.get("x-mindstudio-billing-cost");
674
1807
  const billingEvents = headers.get("x-mindstudio-billing-events");
@@ -935,6 +2068,299 @@ var MindStudioAgent = class {
935
2068
  return data;
936
2069
  }
937
2070
  // -------------------------------------------------------------------------
2071
+ // db + auth namespaces
2072
+ // -------------------------------------------------------------------------
2073
+ /**
2074
+ * The `auth` namespace — synchronous role-based access control.
2075
+ *
2076
+ * Provides the current user's identity and roles. All methods are
2077
+ * synchronous since the role map is preloaded during context hydration.
2078
+ *
2079
+ * **Important**: Context must be hydrated before accessing `auth`.
2080
+ * - Inside the MindStudio sandbox: automatic (populated from globals)
2081
+ * - Outside the sandbox: call `await agent.ensureContext()` first,
2082
+ * or access `auth` after any `db` operation (which auto-hydrates)
2083
+ *
2084
+ * @throws {MindStudioError} if context has not been hydrated yet
2085
+ *
2086
+ * @example
2087
+ * ```ts
2088
+ * await agent.ensureContext();
2089
+ * agent.auth.requireRole(Roles.admin);
2090
+ * const admins = agent.auth.getUsersByRole(Roles.admin);
2091
+ * ```
2092
+ */
2093
+ get auth() {
2094
+ if (!this._auth) {
2095
+ this._trySandboxHydration();
2096
+ }
2097
+ if (!this._auth) {
2098
+ throw new MindStudioError(
2099
+ "Auth context not yet loaded. Call `await agent.ensureContext()` or perform any db operation first (which auto-hydrates context). Inside the MindStudio sandbox, context is loaded automatically.",
2100
+ "context_not_loaded",
2101
+ 400
2102
+ );
2103
+ }
2104
+ return this._auth;
2105
+ }
2106
+ /**
2107
+ * The `db` namespace — chainable collection API over managed databases.
2108
+ *
2109
+ * Use `db.defineTable<T>(name)` to get a typed Table<T>, then call
2110
+ * collection methods (filter, sortBy, push, update, etc.) on it.
2111
+ *
2112
+ * Context is auto-hydrated on first query execution — you can safely
2113
+ * call `defineTable()` at module scope without triggering any HTTP.
2114
+ *
2115
+ * @example
2116
+ * ```ts
2117
+ * const Orders = agent.db.defineTable<Order>('orders');
2118
+ * const active = await Orders.filter(o => o.status === 'active').take(10);
2119
+ * ```
2120
+ */
2121
+ get db() {
2122
+ if (!this._db) {
2123
+ this._trySandboxHydration();
2124
+ }
2125
+ if (this._db) return this._db;
2126
+ return this._createLazyDb();
2127
+ }
2128
+ /**
2129
+ * Hydrate the app context (auth + database metadata). This must be
2130
+ * called before using `auth` synchronously. For `db`, hydration happens
2131
+ * automatically on first query.
2132
+ *
2133
+ * Context is fetched once and cached for the instance's lifetime.
2134
+ * Calling `ensureContext()` multiple times is safe (no-op after first).
2135
+ *
2136
+ * Context sources (checked in order):
2137
+ * 1. Sandbox globals (`globalThis.ai.auth`, `globalThis.ai.databases`)
2138
+ * 2. HTTP: `GET /developer/v2/helpers/app-context?appId={appId}`
2139
+ *
2140
+ * @throws {MindStudioError} if no `appId` is available
2141
+ *
2142
+ * @example
2143
+ * ```ts
2144
+ * await agent.ensureContext();
2145
+ * // auth is now available synchronously
2146
+ * agent.auth.requireRole(Roles.admin);
2147
+ * ```
2148
+ */
2149
+ async ensureContext() {
2150
+ if (this._context) return;
2151
+ if (!this._contextPromise) {
2152
+ this._contextPromise = this._hydrateContext();
2153
+ }
2154
+ await this._contextPromise;
2155
+ }
2156
+ /**
2157
+ * @internal Fetch and cache app context, then create auth + db instances.
2158
+ *
2159
+ * In managed mode (CALLBACK_TOKEN), the platform resolves the app from
2160
+ * the token — no appId needed. With an API key, appId is required.
2161
+ */
2162
+ async _hydrateContext() {
2163
+ if (!this._appId && this._authType !== "internal") {
2164
+ throw new MindStudioError(
2165
+ "No app ID available for context resolution. Pass `appId` to the constructor, set the MINDSTUDIO_APP_ID environment variable, or make a step execution call first (which auto-detects the app ID).",
2166
+ "missing_app_id",
2167
+ 400
2168
+ );
2169
+ }
2170
+ const context = await this.getAppContext(this._appId);
2171
+ this._applyContext(context);
2172
+ }
2173
+ /**
2174
+ * @internal Apply a resolved context object — creates AuthContext and Db.
2175
+ * Used by both the HTTP path and sandbox hydration.
2176
+ */
2177
+ _applyContext(context) {
2178
+ this._context = context;
2179
+ this._auth = new AuthContext(context.auth);
2180
+ this._db = createDb(
2181
+ context.databases,
2182
+ this._executeDbBatch.bind(this)
2183
+ );
2184
+ }
2185
+ /**
2186
+ * @internal Try to hydrate context synchronously from sandbox globals.
2187
+ * Called in the constructor when CALLBACK_TOKEN auth is detected.
2188
+ *
2189
+ * The MindStudio sandbox pre-populates `globalThis.ai` with:
2190
+ * - `ai.auth`: { userId, roleAssignments[] }
2191
+ * - `ai.databases`: [{ id, name, tables[] }]
2192
+ */
2193
+ _trySandboxHydration() {
2194
+ const ai = globalThis.ai;
2195
+ if (ai?.auth && ai?.databases) {
2196
+ this._applyContext({
2197
+ auth: ai.auth,
2198
+ databases: ai.databases
2199
+ });
2200
+ }
2201
+ }
2202
+ /**
2203
+ * @internal Execute a batch of SQL queries against a managed database.
2204
+ * Used as the `executeBatch` callback for Table/Query instances.
2205
+ *
2206
+ * Calls `POST /_internal/v2/db/query` directly with the hook token
2207
+ * (raw, no Bearer prefix). All queries run on a single SQLite connection,
2208
+ * enabling RETURNING clauses and multi-statement batches.
2209
+ */
2210
+ async _executeDbBatch(databaseId, queries) {
2211
+ const url = `${this._httpConfig.baseUrl}/_internal/v2/db/query`;
2212
+ const res = await fetch(url, {
2213
+ method: "POST",
2214
+ headers: {
2215
+ "Content-Type": "application/json",
2216
+ Authorization: this._httpConfig.token
2217
+ },
2218
+ body: JSON.stringify({ databaseId, queries })
2219
+ });
2220
+ if (!res.ok) {
2221
+ let message = `Database query failed: ${res.status} ${res.statusText}`;
2222
+ try {
2223
+ const body = await res.json();
2224
+ if (body.error) message = body.error;
2225
+ } catch {
2226
+ }
2227
+ throw new MindStudioError(message, "db_query_error", res.status);
2228
+ }
2229
+ const data = await res.json();
2230
+ return data.results;
2231
+ }
2232
+ /**
2233
+ * @internal Create a lazy Db proxy that auto-hydrates context.
2234
+ *
2235
+ * defineTable() returns Table instances immediately (no async needed).
2236
+ * But the Table's executeBatch callback is wrapped to call ensureContext()
2237
+ * before the first query, so context is fetched lazily.
2238
+ */
2239
+ _createLazyDb() {
2240
+ const agent = this;
2241
+ return {
2242
+ defineTable(name, options) {
2243
+ const databaseHint = options?.database;
2244
+ return new Table({
2245
+ databaseId: "",
2246
+ tableName: name,
2247
+ columns: [],
2248
+ executeBatch: async (queries) => {
2249
+ await agent.ensureContext();
2250
+ const databases = agent._context.databases;
2251
+ let targetDb;
2252
+ if (databaseHint) {
2253
+ targetDb = databases.find(
2254
+ (d) => d.id === databaseHint || d.name === databaseHint
2255
+ );
2256
+ } else {
2257
+ targetDb = databases.find(
2258
+ (d) => d.tables.some((t) => t.name === name)
2259
+ );
2260
+ }
2261
+ const databaseId = targetDb?.id ?? databases[0]?.id ?? "";
2262
+ return agent._executeDbBatch(databaseId, queries);
2263
+ }
2264
+ });
2265
+ },
2266
+ // Time helpers work without context
2267
+ now: () => Date.now(),
2268
+ days: (n) => n * 864e5,
2269
+ hours: (n) => n * 36e5,
2270
+ minutes: (n) => n * 6e4,
2271
+ ago: (ms) => Date.now() - ms,
2272
+ fromNow: (ms) => Date.now() + ms
2273
+ };
2274
+ }
2275
+ // -------------------------------------------------------------------------
2276
+ // Helper methods — user resolution
2277
+ // -------------------------------------------------------------------------
2278
+ /**
2279
+ * Resolve a single user ID to display info (name, email, profile picture).
2280
+ *
2281
+ * Use this when you have a `User`-typed field value and need the person's
2282
+ * display name, email, or avatar. Returns null if the user ID is not found.
2283
+ *
2284
+ * Also available as a top-level import:
2285
+ * ```ts
2286
+ * import { resolveUser } from '@mindstudio-ai/agent';
2287
+ * ```
2288
+ *
2289
+ * @param userId - The user ID to resolve (a `User` branded string or plain UUID)
2290
+ * @returns Resolved user info, or null if not found
2291
+ *
2292
+ * @example
2293
+ * ```ts
2294
+ * const user = await agent.resolveUser(order.requestedBy);
2295
+ * if (user) {
2296
+ * console.log(user.name); // "Jane Smith"
2297
+ * console.log(user.email); // "jane@example.com"
2298
+ * console.log(user.profilePictureUrl); // "https://..." or null
2299
+ * }
2300
+ * ```
2301
+ */
2302
+ async resolveUser(userId) {
2303
+ const { users } = await this.resolveUsers([userId]);
2304
+ return users[0] ?? null;
2305
+ }
2306
+ /**
2307
+ * Resolve multiple user IDs to display info in a single request.
2308
+ * Maximum 100 user IDs per request.
2309
+ *
2310
+ * Use this for batch resolution when you have multiple user references
2311
+ * to display (e.g. all approvers on a purchase order, all team members).
2312
+ *
2313
+ * @param userIds - Array of user IDs to resolve (max 100)
2314
+ * @returns Object with `users` array of resolved user info
2315
+ *
2316
+ * @example
2317
+ * ```ts
2318
+ * // Resolve all approvers at once
2319
+ * const approverIds = approvals.map(a => a.assignedTo);
2320
+ * const { users } = await agent.resolveUsers(approverIds);
2321
+ *
2322
+ * for (const u of users) {
2323
+ * console.log(`${u.name} (${u.email})`);
2324
+ * }
2325
+ * ```
2326
+ */
2327
+ async resolveUsers(userIds) {
2328
+ const { data } = await request(
2329
+ this._httpConfig,
2330
+ "POST",
2331
+ "/helpers/resolve-users",
2332
+ { userIds }
2333
+ );
2334
+ return data;
2335
+ }
2336
+ // -------------------------------------------------------------------------
2337
+ // App context
2338
+ // -------------------------------------------------------------------------
2339
+ /**
2340
+ * Get auth and database context for an app.
2341
+ *
2342
+ * Returns role assignments and managed database schemas. Useful for
2343
+ * hydrating `auth` and `db` namespaces when running outside the sandbox.
2344
+ *
2345
+ * When called with a CALLBACK_TOKEN (managed mode), `appId` is optional —
2346
+ * the platform resolves the app from the token. With an API key, `appId`
2347
+ * is required.
2348
+ *
2349
+ * ```ts
2350
+ * const ctx = await agent.getAppContext('your-app-id');
2351
+ * console.log(ctx.auth.roleAssignments, ctx.databases);
2352
+ * ```
2353
+ */
2354
+ async getAppContext(appId) {
2355
+ const query = appId ? `?appId=${encodeURIComponent(appId)}` : "";
2356
+ const { data } = await request(
2357
+ this._httpConfig,
2358
+ "GET",
2359
+ `/helpers/app-context${query}`
2360
+ );
2361
+ return data;
2362
+ }
2363
+ // -------------------------------------------------------------------------
938
2364
  // Account methods
939
2365
  // -------------------------------------------------------------------------
940
2366
  /** Update the display name of the authenticated user/agent. */
@@ -1015,6 +2441,7 @@ var monacoSnippets = {
1015
2441
  "analyzeImage": { fields: [["prompt", "string"], ["imageUrl", "string"]], outputKeys: ["analysis"] },
1016
2442
  "analyzeVideo": { fields: [["prompt", "string"], ["videoUrl", "string"]], outputKeys: ["analysis"] },
1017
2443
  "captureThumbnail": { fields: [["videoUrl", "string"], ["at", "string"]], outputKeys: ["thumbnailUrl"] },
2444
+ "checkAppRole": { fields: [["roleName", "string"]], outputKeys: ["hasRole", "userRoles"] },
1018
2445
  "codaCreateUpdatePage": { fields: [["pageData", "object"]], outputKeys: ["pageId"] },
1019
2446
  "codaCreateUpdateRow": { fields: [["docId", "string"], ["tableId", "string"], ["rowData", "object"]], outputKeys: ["rowId"] },
1020
2447
  "codaFindRow": { fields: [["docId", "string"], ["tableId", "string"], ["rowData", "object"]], outputKeys: ["row"] },
@@ -1100,6 +2527,7 @@ var monacoSnippets = {
1100
2527
  "postToSlackChannel": { fields: [["channelId", "string"], ["messageType", ["string", "blocks"]], ["message", "string"]], outputKeys: [] },
1101
2528
  "postToX": { fields: [["text", "string"]], outputKeys: [] },
1102
2529
  "postToZapier": { fields: [["webhookUrl", "string"], ["input", "object"]], outputKeys: ["data"] },
2530
+ "queryAppDatabase": { fields: [["databaseId", "string"], ["sql", "string"]], outputKeys: ["rows", "changes"] },
1103
2531
  "queryDataSource": { fields: [["dataSourceId", "string"], ["query", "string"], ["maxResults", "number"]], outputKeys: ["text", "chunks", "query", "citations", "latencyMs"] },
1104
2532
  "queryExternalDatabase": { fields: [["query", "string"], ["outputFormat", ["json", "csv"]]], outputKeys: ["data"] },
1105
2533
  "redactPII": { fields: [["input", "string"], ["language", "string"], ["entities", "array"]], outputKeys: ["text"] },
@@ -1189,7 +2617,7 @@ var stepMetadata = {
1189
2617
  stepType: "addSubtitlesToVideo",
1190
2618
  description: "Automatically add subtitles to a video",
1191
2619
  usageNotes: "- Can control style of text and animation",
1192
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "language": { "type": "string", "description": "ISO language code for subtitle transcription" }, "fontName": { "type": "string", "description": "Google Font name for subtitle text" }, "fontSize": { "type": "number", "description": "Font size in pixels. Default: 100." }, "fontWeight": { "enum": ["normal", "bold", "black"], "type": "string", "description": "Font weight for subtitle text" }, "fontColor": { "enum": ["white", "black", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color of the subtitle text" }, "highlightColor": { "enum": ["white", "black", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color used to highlight the currently spoken word" }, "strokeWidth": { "type": "number", "description": "Width of the text stroke outline in pixels" }, "strokeColor": { "enum": ["black", "white", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color of the text stroke outline" }, "backgroundColor": { "enum": ["black", "white", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta", "none"], "type": "string", "description": "Background color behind subtitle text. Use 'none' for transparent." }, "backgroundOpacity": { "type": "number", "description": "Opacity of the subtitle background. 0.0 = fully transparent, 1.0 = fully opaque." }, "position": { "enum": ["top", "center", "bottom"], "type": "string", "description": "Vertical position of subtitle text on screen" }, "yOffset": { "type": "number", "description": "Vertical offset in pixels from the position. Positive moves down, negative moves up. Default: 75." }, "wordsPerSubtitle": { "type": "number", "description": "Maximum number of words per subtitle segment. Use 1 for single-word display, 2-3 for short phrases, or 8-12 for full sentences. Default: 3." }, "enableAnimation": { "type": "boolean", "description": "When true, enables bounce-style entrance animation for subtitles. Default: true." }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "language", "fontName", "fontSize", "fontWeight", "fontColor", "highlightColor", "strokeWidth", "strokeColor", "backgroundColor", "backgroundOpacity", "position", "yOffset", "wordsPerSubtitle", "enableAnimation"] },
2620
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "language": { "type": "string", "description": "ISO language code for subtitle transcription" }, "fontName": { "type": "string", "description": "Google Font name for subtitle text" }, "fontSize": { "type": "number", "description": "Font size in pixels. Default: 100." }, "fontWeight": { "enum": ["normal", "bold", "black"], "type": "string", "description": "Font weight for subtitle text" }, "fontColor": { "enum": ["white", "black", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color of the subtitle text" }, "highlightColor": { "enum": ["white", "black", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color used to highlight the currently spoken word" }, "strokeWidth": { "type": "number", "description": "Width of the text stroke outline in pixels" }, "strokeColor": { "enum": ["black", "white", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color of the text stroke outline" }, "backgroundColor": { "enum": ["black", "white", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta", "none"], "type": "string", "description": "Background color behind subtitle text. Use 'none' for transparent." }, "backgroundOpacity": { "type": "number", "description": "Opacity of the subtitle background. 0.0 = fully transparent, 1.0 = fully opaque." }, "position": { "enum": ["top", "center", "bottom"], "type": "string", "description": "Vertical position of subtitle text on screen" }, "yOffset": { "type": "number", "description": "Vertical offset in pixels from the position. Positive moves down, negative moves up. Default: 75." }, "wordsPerSubtitle": { "type": "number", "description": "Maximum number of words per subtitle segment. Use 1 for single-word display, 2-3 for short phrases, or 8-12 for full sentences. Default: 3." }, "enableAnimation": { "type": "boolean", "description": "When true, enables bounce-style entrance animation for subtitles. Default: true." }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "language", "fontName", "fontSize", "fontWeight", "fontColor", "highlightColor", "strokeWidth", "strokeColor", "backgroundColor", "backgroundOpacity", "position", "yOffset", "wordsPerSubtitle", "enableAnimation"] },
1193
2621
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with subtitles added" } }, "required": ["videoUrl"] }
1194
2622
  },
1195
2623
  "airtableCreateUpdateRecord": {
@@ -1241,6 +2669,13 @@ var stepMetadata = {
1241
2669
  inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to capture a frame from" }, "at": { "anyOf": [{ "type": "number" }, { "type": "string" }] } }, "required": ["videoUrl", "at"] },
1242
2670
  outputSchema: { "type": "object", "properties": { "thumbnailUrl": { "type": "string", "description": "URL of the captured thumbnail image" } }, "required": ["thumbnailUrl"] }
1243
2671
  },
2672
+ "checkAppRole": {
2673
+ stepType: "checkAppRole",
2674
+ description: "Check whether the current user has a specific app role and branch accordingly.",
2675
+ usageNotes: '- Checks if the current user has been assigned a specific role in this app.\n- If the user has the role, transitions to the "has role" path.\n- If the user does not have the role, transitions to the "no role" path, or errors if no path is configured.\n- Role names are defined by the app creator and assigned to users via the app roles system.\n- The roleName field supports {{variables}} for dynamic role checks.',
2676
+ inputSchema: { "type": "object", "properties": { "roleName": { "type": "string", "description": "The role name to check (supports {{variables}})" }, "hasRoleStepId": { "type": "string", "description": "Step to transition to if the user has the role (same workflow)" }, "hasRoleWorkflowId": { "type": "string", "description": "Workflow to jump to if the user has the role (cross workflow)" }, "noRoleStepId": { "type": "string", "description": "Step to transition to if the user does not have the role (same workflow)" }, "noRoleWorkflowId": { "type": "string", "description": "Workflow to jump to if the user does not have the role (cross workflow)" } }, "required": ["roleName"], "description": "Configuration for the check app role step" },
2677
+ outputSchema: { "type": "object", "properties": { "hasRole": { "type": "boolean", "description": "Whether the current user has the checked role" }, "userRoles": { "type": "array", "items": { "type": "string" }, "description": "All roles assigned to the current user for this app" } }, "required": ["hasRole", "userRoles"] }
2678
+ },
1244
2679
  "codaCreateUpdatePage": {
1245
2680
  stepType: "codaCreateUpdatePage",
1246
2681
  description: "Create a new page or update an existing page in a Coda document.",
@@ -1492,7 +2927,7 @@ var stepMetadata = {
1492
2927
  stepType: "generatePdf",
1493
2928
  description: "Generate an HTML asset and export it as a webpage, PDF, or image",
1494
2929
  usageNotes: '- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)',
1495
- inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the asset will not appear in the user's asset history" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
2930
+ inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
1496
2931
  outputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "CDN URL of the generated asset (PDF, PNG, HTML, or MP4 depending on outputFormat)" } }, "required": ["url"] }
1497
2932
  },
1498
2933
  "generateChart": {
@@ -1506,28 +2941,28 @@ var stepMetadata = {
1506
2941
  stepType: "generateImage",
1507
2942
  description: "Generate an image from a text prompt using an AI model.",
1508
2943
  usageNotes: "- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Images are automatically hosted on a CDN.\n- In foreground mode, the image is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple images are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.",
1509
- inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the image to generate" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the image will not appear in the user's asset history" }, "imageModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Image generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default image model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple image variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated image" } }, "required": ["prompt"] },
2944
+ inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the image to generate" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "imageModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Image generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default image model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple image variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated image" } }, "required": ["prompt"] },
1510
2945
  outputSchema: { "type": "object", "properties": { "imageUrl": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }] } }, "required": ["imageUrl"] }
1511
2946
  },
1512
2947
  "generateLipsync": {
1513
2948
  stepType: "generateLipsync",
1514
2949
  description: "Generate a lip sync video from provided audio and image.",
1515
2950
  usageNotes: "- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.",
1516
- inputSchema: { "type": "object", "properties": { "skipAssetCreation": { "type": "boolean", "description": "If true, the generated video will not appear in the user's asset history" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" }, "lipsyncModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default lipsync model if not specified" } } },
2951
+ inputSchema: { "type": "object", "properties": { "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" }, "lipsyncModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default lipsync model if not specified" } } },
1517
2952
  outputSchema: { "description": "This step does not produce output data." }
1518
2953
  },
1519
2954
  "generateMusic": {
1520
2955
  stepType: "generateMusic",
1521
2956
  description: "Generate an audio file from provided instructions (text) using a music model.",
1522
2957
  usageNotes: "- The text field contains the instructions (prompt) for the music generation.\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.",
1523
- inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The instructions (prompt) for the music generation" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the generated audio will not appear in the user's asset history" }, "musicModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default music model if not specified" } }, "required": ["text"] },
2958
+ inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The instructions (prompt) for the music generation" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "musicModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default music model if not specified" } }, "required": ["text"] },
1524
2959
  outputSchema: { "description": "This step does not produce output data." }
1525
2960
  },
1526
2961
  "generatePdf": {
1527
2962
  stepType: "generatePdf",
1528
2963
  description: "Generate an HTML asset and export it as a webpage, PDF, or image",
1529
2964
  usageNotes: '- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)',
1530
- inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the asset will not appear in the user's asset history" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
2965
+ inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
1531
2966
  outputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "CDN URL of the generated asset (PDF, PNG, HTML, or MP4 depending on outputFormat)" } }, "required": ["url"] }
1532
2967
  },
1533
2968
  "generateStaticVideoFromImage": {
@@ -1551,7 +2986,7 @@ var stepMetadata = {
1551
2986
  stepType: "generateVideo",
1552
2987
  description: "Generate a video from a text prompt using an AI model.",
1553
2988
  usageNotes: "- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Videos are automatically hosted on a CDN.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple videos are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.",
1554
- inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the video to generate" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the video will not appear in the user's asset history" }, "videoModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Video generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default video model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple video variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" } }, "required": ["prompt"] },
2989
+ inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the video to generate" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "videoModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Video generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default video model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple video variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" } }, "required": ["prompt"] },
1555
2990
  outputSchema: { "type": "object", "properties": { "videoUrl": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }] } }, "required": ["videoUrl"] }
1556
2991
  },
1557
2992
  "getGmailAttachments": {
@@ -1691,14 +3126,14 @@ var stepMetadata = {
1691
3126
  stepType: "imageRemoveWatermark",
1692
3127
  description: "Remove watermarks from an image using AI.",
1693
3128
  usageNotes: "- Output is re-hosted on the CDN as a PNG.",
1694
- inputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "URL of the image to remove the watermark from" }, "engine": { "type": "string", "description": "Watermark removal engine to use" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history" } }, "required": ["imageUrl", "engine"] },
3129
+ inputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "URL of the image to remove the watermark from" }, "engine": { "type": "string", "description": "Watermark removal engine to use" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["imageUrl", "engine"] },
1695
3130
  outputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "CDN URL of the processed image with watermark removed (PNG)" } }, "required": ["imageUrl"] }
1696
3131
  },
1697
3132
  "insertVideoClips": {
1698
3133
  stepType: "insertVideoClips",
1699
3134
  description: "Insert b-roll clips into a base video at a timecode, optionally with an xfade transition.",
1700
3135
  usageNotes: "",
1701
- inputSchema: { "type": "object", "properties": { "baseVideoUrl": { "type": "string", "description": "URL of the base video to insert clips into" }, "overlayVideos": { "type": "array", "items": { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the overlay video clip" }, "startTimeSec": { "type": "number", "description": "Timecode in seconds at which to insert this clip" } }, "required": ["videoUrl", "startTimeSec"] }, "description": "Array of overlay clips to insert at specified timecodes" }, "transition": { "type": "string", "description": "Optional xfade transition effect name between clips" }, "transitionDuration": { "type": "number", "description": "Duration of the transition in seconds" }, "useOverlayAudio": { "type": "boolean", "description": "When true, uses audio from the overlay clips instead of the base video audio during inserts" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["baseVideoUrl", "overlayVideos"] },
3136
+ inputSchema: { "type": "object", "properties": { "baseVideoUrl": { "type": "string", "description": "URL of the base video to insert clips into" }, "overlayVideos": { "type": "array", "items": { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the overlay video clip" }, "startTimeSec": { "type": "number", "description": "Timecode in seconds at which to insert this clip" } }, "required": ["videoUrl", "startTimeSec"] }, "description": "Array of overlay clips to insert at specified timecodes" }, "transition": { "type": "string", "description": "Optional xfade transition effect name between clips" }, "transitionDuration": { "type": "number", "description": "Duration of the transition in seconds" }, "useOverlayAudio": { "type": "boolean", "description": "When true, uses audio from the overlay clips instead of the base video audio during inserts" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["baseVideoUrl", "overlayVideos"] },
1702
3137
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with clips inserted" } }, "required": ["videoUrl"] }
1703
3138
  },
1704
3139
  "listDataSources": {
@@ -1765,28 +3200,28 @@ var stepMetadata = {
1765
3200
  stepType: "mergeAudio",
1766
3201
  description: "Merge one or more clips into a single audio file.",
1767
3202
  usageNotes: "",
1768
- inputSchema: { "type": "object", "properties": { "mp3Urls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the MP3 audio clips to merge in order" }, "fileMetadata": { "type": "object", "description": "FFmpeg MP3 metadata key-value pairs to embed in the output file" }, "albumArtUrl": { "type": "string", "description": "URL of an image to embed as album art in the output file" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["mp3Urls"] },
3203
+ inputSchema: { "type": "object", "properties": { "mp3Urls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the MP3 audio clips to merge in order" }, "fileMetadata": { "type": "object", "description": "FFmpeg MP3 metadata key-value pairs to embed in the output file" }, "albumArtUrl": { "type": "string", "description": "URL of an image to embed as album art in the output file" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["mp3Urls"] },
1769
3204
  outputSchema: { "type": "object", "properties": { "audioUrl": { "type": "string", "description": "URL of the merged audio file" } }, "required": ["audioUrl"] }
1770
3205
  },
1771
3206
  "mergeVideos": {
1772
3207
  stepType: "mergeVideos",
1773
3208
  description: "Merge one or more clips into a single video.",
1774
3209
  usageNotes: "",
1775
- inputSchema: { "type": "object", "properties": { "videoUrls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the video clips to merge in order" }, "transition": { "type": "string", "description": "Optional xfade transition effect name" }, "transitionDuration": { "type": "number", "description": "Duration of the transition in seconds" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrls"] },
3210
+ inputSchema: { "type": "object", "properties": { "videoUrls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the video clips to merge in order" }, "transition": { "type": "string", "description": "Optional xfade transition effect name" }, "transitionDuration": { "type": "number", "description": "Duration of the transition in seconds" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrls"] },
1776
3211
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the merged video" } }, "required": ["videoUrl"] }
1777
3212
  },
1778
3213
  "mixAudioIntoVideo": {
1779
3214
  stepType: "mixAudioIntoVideo",
1780
3215
  description: "Mix an audio track into a video",
1781
3216
  usageNotes: "",
1782
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "audioUrl": { "type": "string", "description": "URL of the audio track to mix into the video" }, "options": { "type": "object", "properties": { "keepVideoAudio": { "type": "boolean", "description": "When true, preserves the original video audio alongside the new track. Defaults to false." }, "audioGainDb": { "type": "number", "description": "Volume adjustment for the new audio track in decibels. Defaults to 0." }, "videoGainDb": { "type": "number", "description": "Volume adjustment for the existing video audio in decibels. Defaults to 0." }, "loopAudio": { "type": "boolean", "description": "When true, loops the audio track to match the video duration. Defaults to false." } }, "description": "Audio mixing options" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "audioUrl", "options"] },
3217
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "audioUrl": { "type": "string", "description": "URL of the audio track to mix into the video" }, "options": { "type": "object", "properties": { "keepVideoAudio": { "type": "boolean", "description": "When true, preserves the original video audio alongside the new track. Defaults to false." }, "audioGainDb": { "type": "number", "description": "Volume adjustment for the new audio track in decibels. Defaults to 0." }, "videoGainDb": { "type": "number", "description": "Volume adjustment for the existing video audio in decibels. Defaults to 0." }, "loopAudio": { "type": "boolean", "description": "When true, loops the audio track to match the video duration. Defaults to false." } }, "description": "Audio mixing options" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "audioUrl", "options"] },
1783
3218
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with the mixed audio track" } }, "required": ["videoUrl"] }
1784
3219
  },
1785
3220
  "muteVideo": {
1786
3221
  stepType: "muteVideo",
1787
3222
  description: "Mute a video file",
1788
3223
  usageNotes: "",
1789
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to mute" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl"] },
3224
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to mute" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl"] },
1790
3225
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the muted video" } }, "required": ["videoUrl"] }
1791
3226
  },
1792
3227
  "n8nRunNode": {
@@ -1820,8 +3255,8 @@ var stepMetadata = {
1820
3255
  "postToLinkedIn": {
1821
3256
  stepType: "postToLinkedIn",
1822
3257
  description: "Create a post on LinkedIn from the connected account.",
1823
- usageNotes: "- Requires a LinkedIn OAuth connection (connectionId).\n- Supports text posts, image posts, and video posts.\n- Visibility controls who can see the post.",
1824
- inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The text content of the LinkedIn post" }, "visibility": { "enum": ["PUBLIC", "CONNECTIONS"], "type": "string", "description": 'Who can see the post: "PUBLIC" or "CONNECTIONS"' }, "videoUrl": { "type": "string", "description": "URL of a video to attach to the post" }, "descriptionText": { "type": "string", "description": "Description text for link/media attachments" }, "titleText": { "type": "string", "description": "Title text for link/media attachments" }, "imageUrl": { "type": "string", "description": "URL of an image to attach to the post" }, "connectionId": { "type": "string", "description": "LinkedIn OAuth connection ID" } }, "required": ["message", "visibility"] },
3258
+ usageNotes: "- Requires a LinkedIn OAuth connection (connectionId).\n- Supports text posts, image posts, video posts, document posts, and article posts.\n- Attach one media type per post: image, video, document, or article.\n- Documents support PDF, PPT, PPTX, DOC, DOCX (max 100MB, 300 pages). Displays as a slideshow carousel.\n- Articles create a link preview with optional custom title, description, and thumbnail.\n- Visibility controls who can see the post.",
3259
+ inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The text content of the LinkedIn post" }, "visibility": { "enum": ["PUBLIC", "CONNECTIONS"], "type": "string", "description": 'Who can see the post: "PUBLIC" or "CONNECTIONS"' }, "imageUrl": { "type": "string", "description": "URL of an image to attach to the post" }, "videoUrl": { "type": "string", "description": "URL of a video to attach to the post" }, "documentUrl": { "type": "string", "description": "URL of a document (PDF, PPT, DOC) to attach to the post" }, "articleUrl": { "type": "string", "description": "URL to share as an article link preview" }, "titleText": { "type": "string", "description": "Title text for media or article attachments" }, "descriptionText": { "type": "string", "description": "Description text for article attachments" }, "connectionId": { "type": "string", "description": "LinkedIn OAuth connection ID" } }, "required": ["message", "visibility"] },
1825
3260
  outputSchema: { "description": "This step does not produce output data." }
1826
3261
  },
1827
3262
  "postToSlackChannel": {
@@ -1845,6 +3280,13 @@ var stepMetadata = {
1845
3280
  inputSchema: { "type": "object", "properties": { "webhookUrl": { "type": "string", "description": "Zapier webhook URL to send data to" }, "input": { "type": "object", "description": "Key-value pairs to send as the JSON POST body" } }, "required": ["webhookUrl", "input"] },
1846
3281
  outputSchema: { "type": "object", "properties": { "data": { "description": "Parsed webhook response from Zapier (JSON object, array, or string)" } }, "required": ["data"] }
1847
3282
  },
3283
+ "queryAppDatabase": {
3284
+ stepType: "queryAppDatabase",
3285
+ description: "Execute a SQL query against the app managed database.",
3286
+ usageNotes: '- Executes raw SQL against a SQLite database managed by the app.\n- For SELECT queries, returns rows as JSON.\n- For INSERT/UPDATE/DELETE, returns the number of affected rows.\n- Use {{variables}} directly in your SQL. By default they are automatically extracted\n and passed as safe parameterized values (preventing SQL injection).\n Example: INSERT INTO contacts (name, comment) VALUES ({{name}}, {{comment}})\n- Full MindStudio handlebars syntax is supported, including helpers like {{json myVar}},\n {{get myVar "$.path"}}, {{global.orgName}}, etc.\n- Set parameterize to false for raw/dynamic SQL where variables are interpolated directly\n into the query string. Use this when another step generates full or partial SQL, e.g.\n a bulk INSERT with a precomputed VALUES list. The user is responsible for sanitization\n when parameterize is false.',
3287
+ inputSchema: { "type": "object", "properties": { "databaseId": { "type": "string", "description": "Name or ID of the app data database to query" }, "sql": { "type": "string", "description": "SQL query to execute. Use {{variables}} directly in the SQL \u2014 they are handled according to the `parameterize` setting.\n\nWhen parameterize is true (default): {{variables}} are extracted from the SQL, replaced with ? placeholders, resolved via the full MindStudio handlebars pipeline, and passed as safe parameterized values to SQLite. This prevents SQL injection. Example: INSERT INTO contacts (name, email) VALUES ({{name}}, {{email}})\n\nWhen parameterize is false: The entire SQL string is resolved via compileString (standard handlebars interpolation) and executed as-is. Use this for dynamic/generated SQL where another step builds the query. The user is responsible for safety. Example: {{generatedInsertQuery}}\n\nAsk the user for the database schema if they have not already provided it." }, "parameterize": { "type": "boolean", "description": "Whether to treat {{variables}} as parameterized query values (default: true).\n\n- true: {{vars}} are extracted, replaced with ?, and passed as bind params. Safe from SQL injection. Use for standard CRUD operations.\n- false: {{vars}} are interpolated directly into the SQL string via handlebars. Use when another step generates full or partial SQL (e.g. bulk inserts with precomputed VALUES). The user is responsible for sanitization." } }, "required": ["databaseId", "sql"] },
3288
+ outputSchema: { "type": "object", "properties": { "rows": { "type": "array", "items": {}, "description": "Result rows for SELECT queries (empty array for write queries)" }, "changes": { "type": "number", "description": "Number of rows affected by INSERT, UPDATE, or DELETE queries (0 for SELECT)" } }, "required": ["rows", "changes"] }
3289
+ },
1848
3290
  "queryDataSource": {
1849
3291
  stepType: "queryDataSource",
1850
3292
  description: "Search a vector data source (RAG) and return relevant document chunks.",
@@ -1884,7 +3326,7 @@ var stepMetadata = {
1884
3326
  stepType: "resizeVideo",
1885
3327
  description: "Resize a video file",
1886
3328
  usageNotes: "",
1887
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to resize" }, "mode": { "enum": ["fit", "exact"], "type": "string", "description": "Resize mode: 'fit' scales within max dimensions, 'exact' forces exact dimensions" }, "maxWidth": { "type": "number", "description": "Maximum width in pixels (used with 'fit' mode)" }, "maxHeight": { "type": "number", "description": "Maximum height in pixels (used with 'fit' mode)" }, "width": { "type": "number", "description": "Exact width in pixels (used with 'exact' mode)" }, "height": { "type": "number", "description": "Exact height in pixels (used with 'exact' mode)" }, "strategy": { "enum": ["pad", "crop"], "type": "string", "description": "Strategy for handling aspect ratio mismatch in 'exact' mode" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "mode"] },
3329
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to resize" }, "mode": { "enum": ["fit", "exact"], "type": "string", "description": "Resize mode: 'fit' scales within max dimensions, 'exact' forces exact dimensions" }, "maxWidth": { "type": "number", "description": "Maximum width in pixels (used with 'fit' mode)" }, "maxHeight": { "type": "number", "description": "Maximum height in pixels (used with 'fit' mode)" }, "width": { "type": "number", "description": "Exact width in pixels (used with 'exact' mode)" }, "height": { "type": "number", "description": "Exact height in pixels (used with 'exact' mode)" }, "strategy": { "enum": ["pad", "crop"], "type": "string", "description": "Strategy for handling aspect ratio mismatch in 'exact' mode" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "mode"] },
1888
3330
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the resized video" } }, "required": ["videoUrl"] }
1889
3331
  },
1890
3332
  "runFromConnectorRegistry": {
@@ -2180,7 +3622,7 @@ var stepMetadata = {
2180
3622
  stepType: "textToSpeech",
2181
3623
  description: "Generate an audio file from provided text using a speech model.",
2182
3624
  usageNotes: "- The text field contains the exact words to be spoken (not instructions).\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.",
2183
- inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The text to convert to speech" }, "skipAssetCreation": { "type": "boolean" }, "speechModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Speech synthesis model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default speech model if not specified" } }, "required": ["text"] },
3625
+ inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The text to convert to speech" }, "intermediateAsset": { "type": "boolean" }, "speechModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Speech synthesis model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default speech model if not specified" } }, "required": ["text"] },
2184
3626
  outputSchema: { "type": "object", "properties": { "audioUrl": { "type": "string", "description": "URL of the generated audio file" } }, "required": ["audioUrl"] }
2185
3627
  },
2186
3628
  "transcribeAudio": {
@@ -2194,7 +3636,7 @@ var stepMetadata = {
2194
3636
  stepType: "trimMedia",
2195
3637
  description: "Trim an audio or video clip",
2196
3638
  usageNotes: "",
2197
- inputSchema: { "type": "object", "properties": { "inputUrl": { "type": "string", "description": "URL of the source audio or video file to trim" }, "start": { "type": ["number", "string"] }, "duration": { "type": ["string", "number"] }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["inputUrl"] },
3639
+ inputSchema: { "type": "object", "properties": { "inputUrl": { "type": "string", "description": "URL of the source audio or video file to trim" }, "start": { "type": ["number", "string"] }, "duration": { "type": ["string", "number"] }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["inputUrl"] },
2198
3640
  outputSchema: { "type": "object", "properties": { "mediaUrl": { "type": "string", "description": "URL of the trimmed media file" } }, "required": ["mediaUrl"] }
2199
3641
  },
2200
3642
  "updateGmailLabels": {
@@ -2243,7 +3685,7 @@ var stepMetadata = {
2243
3685
  stepType: "upscaleVideo",
2244
3686
  description: "Upscale a video file",
2245
3687
  usageNotes: "",
2246
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to upscale" }, "targetResolution": { "enum": ["720p", "1080p", "2K", "4K"], "type": "string", "description": "Target output resolution for the upscaled video" }, "engine": { "enum": ["standard", "pro", "ultimate", "flashvsr", "seedance", "seedvr2", "runwayml/upscale-v1"], "type": "string", "description": "Upscaling engine to use. Higher tiers produce better quality at higher cost." }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "targetResolution", "engine"] },
3688
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to upscale" }, "targetResolution": { "enum": ["720p", "1080p", "2K", "4K"], "type": "string", "description": "Target output resolution for the upscaled video" }, "engine": { "enum": ["standard", "pro", "ultimate", "flashvsr", "seedance", "seedvr2", "runwayml/upscale-v1"], "type": "string", "description": "Upscaling engine to use. Higher tiers produce better quality at higher cost." }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "targetResolution", "engine"] },
2247
3689
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the upscaled video" } }, "required": ["videoUrl"] }
2248
3690
  },
2249
3691
  "userMessage": {
@@ -2260,46 +3702,86 @@ var stepMetadata = {
2260
3702
  stepType: "videoFaceSwap",
2261
3703
  description: "Swap faces in a video file",
2262
3704
  usageNotes: "",
2263
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video containing faces to swap" }, "faceImageUrl": { "type": "string", "description": "URL of the image containing the replacement face" }, "targetIndex": { "type": "number", "description": "Zero-based index of the face to replace in the video" }, "engine": { "type": "string", "description": "Face swap engine to use" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "faceImageUrl", "targetIndex", "engine"] },
3705
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video containing faces to swap" }, "faceImageUrl": { "type": "string", "description": "URL of the image containing the replacement face" }, "targetIndex": { "type": "number", "description": "Zero-based index of the face to replace in the video" }, "engine": { "type": "string", "description": "Face swap engine to use" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "faceImageUrl", "targetIndex", "engine"] },
2264
3706
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the face-swapped video" } }, "required": ["videoUrl"] }
2265
3707
  },
2266
3708
  "videoRemoveBackground": {
2267
3709
  stepType: "videoRemoveBackground",
2268
3710
  description: "Remove or replace background from a video",
2269
3711
  usageNotes: "",
2270
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "newBackground": { "enum": ["transparent", "image"], "type": "string", "description": "Whether to make the background transparent or replace it with an image" }, "newBackgroundImageUrl": { "type": "string", "description": "URL of a replacement background image. Required when newBackground is 'image'." }, "engine": { "type": "string", "description": "Background removal engine to use" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "newBackground", "engine"] },
3712
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "newBackground": { "enum": ["transparent", "image"], "type": "string", "description": "Whether to make the background transparent or replace it with an image" }, "newBackgroundImageUrl": { "type": "string", "description": "URL of a replacement background image. Required when newBackground is 'image'." }, "engine": { "type": "string", "description": "Background removal engine to use" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "newBackground", "engine"] },
2271
3713
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with background removed or replaced" } }, "required": ["videoUrl"] }
2272
3714
  },
2273
3715
  "videoRemoveWatermark": {
2274
3716
  stepType: "videoRemoveWatermark",
2275
3717
  description: "Remove a watermark from a video",
2276
3718
  usageNotes: "",
2277
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video containing a watermark" }, "engine": { "type": "string", "description": "Watermark removal engine to use" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "engine"] },
3719
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video containing a watermark" }, "engine": { "type": "string", "description": "Watermark removal engine to use" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "engine"] },
2278
3720
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with watermark removed" } }, "required": ["videoUrl"] }
2279
3721
  },
2280
3722
  "watermarkImage": {
2281
3723
  stepType: "watermarkImage",
2282
3724
  description: "Overlay a watermark image onto another image.",
2283
3725
  usageNotes: "- The watermark is placed at the specified corner with configurable padding and width.",
2284
- inputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "URL of the base image" }, "watermarkImageUrl": { "type": "string", "description": "URL of the watermark image to overlay" }, "corner": { "enum": ["top-left", "top-right", "bottom-left", "bottom-right"], "type": "string", "description": "Corner position for the watermark placement" }, "paddingPx": { "type": "number", "description": "Padding from the corner in pixels" }, "widthPx": { "type": "number", "description": "Width of the watermark overlay in pixels" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history" } }, "required": ["imageUrl", "watermarkImageUrl", "corner", "paddingPx", "widthPx"] },
3726
+ inputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "URL of the base image" }, "watermarkImageUrl": { "type": "string", "description": "URL of the watermark image to overlay" }, "corner": { "enum": ["top-left", "top-right", "bottom-left", "bottom-right"], "type": "string", "description": "Corner position for the watermark placement" }, "paddingPx": { "type": "number", "description": "Padding from the corner in pixels" }, "widthPx": { "type": "number", "description": "Width of the watermark overlay in pixels" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["imageUrl", "watermarkImageUrl", "corner", "paddingPx", "widthPx"] },
2285
3727
  outputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "CDN URL of the watermarked image" } }, "required": ["imageUrl"] }
2286
3728
  },
2287
3729
  "watermarkVideo": {
2288
3730
  stepType: "watermarkVideo",
2289
3731
  description: "Add an image watermark to a video",
2290
3732
  usageNotes: "",
2291
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "imageUrl": { "type": "string", "description": "URL of the watermark image to overlay" }, "corner": { "enum": ["top-left", "top-right", "bottom-left", "bottom-right"], "type": "string", "description": "Corner position for the watermark placement" }, "paddingPx": { "type": "number", "description": "Padding from the corner in pixels" }, "widthPx": { "type": "number", "description": "Width of the watermark overlay in pixels" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "imageUrl", "corner", "paddingPx", "widthPx"] },
3733
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "imageUrl": { "type": "string", "description": "URL of the watermark image to overlay" }, "corner": { "enum": ["top-left", "top-right", "bottom-left", "bottom-right"], "type": "string", "description": "Corner position for the watermark placement" }, "paddingPx": { "type": "number", "description": "Padding from the corner in pixels" }, "widthPx": { "type": "number", "description": "Width of the watermark overlay in pixels" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "imageUrl", "corner", "paddingPx", "widthPx"] },
2292
3734
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the watermarked video" } }, "required": ["videoUrl"] }
2293
3735
  }
2294
3736
  };
2295
3737
 
2296
3738
  // src/index.ts
2297
3739
  var MindStudioAgent2 = MindStudioAgent;
3740
+ var _default;
3741
+ var mindstudio = new Proxy(
3742
+ {},
3743
+ {
3744
+ get(_, prop, receiver) {
3745
+ _default ??= new MindStudioAgent2();
3746
+ const value = Reflect.get(_default, prop, _default);
3747
+ return typeof value === "function" ? value.bind(_default) : value;
3748
+ }
3749
+ }
3750
+ );
3751
+ var index_default = mindstudio;
3752
+ var auth = new Proxy(
3753
+ {},
3754
+ {
3755
+ get(_, prop) {
3756
+ const target = mindstudio.auth;
3757
+ const value = Reflect.get(target, prop, target);
3758
+ return typeof value === "function" ? value.bind(target) : value;
3759
+ }
3760
+ }
3761
+ );
3762
+ var db = new Proxy(
3763
+ {},
3764
+ {
3765
+ get(_, prop) {
3766
+ const target = mindstudio.db;
3767
+ const value = Reflect.get(target, prop, target);
3768
+ return typeof value === "function" ? value.bind(target) : value;
3769
+ }
3770
+ }
3771
+ );
3772
+ var resolveUser = (userId) => mindstudio.resolveUser(userId);
2298
3773
  export {
3774
+ AuthContext,
2299
3775
  MindStudioAgent2 as MindStudioAgent,
2300
3776
  MindStudioError,
3777
+ Roles,
3778
+ auth,
2301
3779
  blockTypeAliases,
3780
+ db,
3781
+ index_default as default,
3782
+ mindstudio,
2302
3783
  monacoSnippets,
3784
+ resolveUser,
2303
3785
  stepMetadata
2304
3786
  };
2305
3787
  //# sourceMappingURL=index.js.map