@mindstudio-ai/agent 0.1.7 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -137,6 +137,1334 @@ function loadConfig() {
137
137
  }
138
138
  }
139
139
 
140
+ // src/auth/index.ts
141
+ var AuthContext = class {
142
+ /** The current user's ID. */
143
+ userId;
144
+ /** The current user's roles in this app. */
145
+ roles;
146
+ /** All role assignments for this app (all users, all roles). */
147
+ _roleAssignments;
148
+ constructor(ctx) {
149
+ this.userId = ctx.userId;
150
+ this._roleAssignments = ctx.roleAssignments;
151
+ this.roles = ctx.roleAssignments.filter((a) => a.userId === ctx.userId).map((a) => a.roleName);
152
+ }
153
+ /**
154
+ * Check if the current user has **any** of the given roles.
155
+ * Returns true if at least one matches.
156
+ *
157
+ * @example
158
+ * ```ts
159
+ * if (auth.hasRole(Roles.admin, Roles.approver)) {
160
+ * // user is an admin OR an approver
161
+ * }
162
+ * ```
163
+ */
164
+ hasRole(...roles) {
165
+ return roles.some((r) => this.roles.includes(r));
166
+ }
167
+ /**
168
+ * Require the current user to have at least one of the given roles.
169
+ * Throws a `MindStudioError` with code `'forbidden'` and status 403
170
+ * if the user lacks all of the specified roles.
171
+ *
172
+ * Use this at the top of route handlers to gate access.
173
+ *
174
+ * @example
175
+ * ```ts
176
+ * auth.requireRole(Roles.admin);
177
+ * // code below only runs if user is an admin
178
+ * ```
179
+ */
180
+ requireRole(...roles) {
181
+ if (!this.hasRole(...roles)) {
182
+ throw new MindStudioError(
183
+ `User does not have required role: ${roles.join(", ")}`,
184
+ "forbidden",
185
+ 403
186
+ );
187
+ }
188
+ }
189
+ /**
190
+ * Get all user IDs that have the given role in this app.
191
+ * Synchronous — scans the preloaded role assignments.
192
+ *
193
+ * @example
194
+ * ```ts
195
+ * const reviewers = auth.getUsersByRole(Roles.reviewer);
196
+ * // ['user-id-1', 'user-id-2', ...]
197
+ * ```
198
+ */
199
+ getUsersByRole(role) {
200
+ return this._roleAssignments.filter((a) => a.roleName === role).map((a) => a.userId);
201
+ }
202
+ };
203
+ var Roles = new Proxy(
204
+ {},
205
+ {
206
+ get(_, prop) {
207
+ if (typeof prop === "string") return prop;
208
+ return void 0;
209
+ }
210
+ }
211
+ );
212
+
213
+ // src/db/sql.ts
214
+ function escapeValue(val) {
215
+ if (val === null || val === void 0) return "NULL";
216
+ if (typeof val === "boolean") return val ? "1" : "0";
217
+ if (typeof val === "number") return String(val);
218
+ if (typeof val === "string") return `'${val.replace(/'/g, "''")}'`;
219
+ const json = JSON.stringify(val);
220
+ return `'${json.replace(/'/g, "''")}'`;
221
+ }
222
+ function serializeValue(val, columnName, columns) {
223
+ const col = columns.find((c) => c.name === columnName);
224
+ if (col?.type === "user" && typeof val === "string") {
225
+ return escapeValue(`@@user@@${val}`);
226
+ }
227
+ return escapeValue(val);
228
+ }
229
+ var USER_PREFIX = "@@user@@";
230
+ function deserializeRow(row, columns) {
231
+ const result = {};
232
+ for (const [key, value] of Object.entries(row)) {
233
+ const col = columns.find((c) => c.name === key);
234
+ if (col?.type === "user" && typeof value === "string" && value.startsWith(USER_PREFIX)) {
235
+ result[key] = value.slice(USER_PREFIX.length);
236
+ } else if (col?.type === "json" && typeof value === "string") {
237
+ try {
238
+ result[key] = JSON.parse(value);
239
+ } catch {
240
+ result[key] = value;
241
+ }
242
+ } else {
243
+ result[key] = value;
244
+ }
245
+ }
246
+ return result;
247
+ }
248
+ function buildSelect(table, options = {}) {
249
+ let sql = `SELECT * FROM ${table}`;
250
+ if (options.where) sql += ` WHERE ${options.where}`;
251
+ if (options.orderBy) sql += ` ORDER BY ${options.orderBy}${options.desc ? " DESC" : " ASC"}`;
252
+ if (options.limit != null) sql += ` LIMIT ${options.limit}`;
253
+ if (options.offset != null) sql += ` OFFSET ${options.offset}`;
254
+ return sql;
255
+ }
256
+ function buildCount(table, where) {
257
+ let sql = `SELECT COUNT(*) as count FROM ${table}`;
258
+ if (where) sql += ` WHERE ${where}`;
259
+ return sql;
260
+ }
261
+ function buildExists(table, where, negate) {
262
+ const inner = where ? `SELECT 1 FROM ${table} WHERE ${where}` : `SELECT 1 FROM ${table}`;
263
+ const fn = negate ? "NOT EXISTS" : "EXISTS";
264
+ return `SELECT ${fn}(${inner}) as result`;
265
+ }
266
+ function buildInsert(table, data, columns) {
267
+ const filtered = stripSystemColumns(data);
268
+ const keys = Object.keys(filtered);
269
+ const vals = keys.map((k) => serializeValue(filtered[k], k, columns));
270
+ return `INSERT INTO ${table} (${keys.join(", ")}) VALUES (${vals.join(", ")})`;
271
+ }
272
+ function buildUpdate(table, id, data, columns) {
273
+ const filtered = stripSystemColumns(data);
274
+ const assignments = Object.entries(filtered).map(([k, v]) => `${k} = ${serializeValue(v, k, columns)}`).join(", ");
275
+ return `UPDATE ${table} SET ${assignments} WHERE id = ${escapeValue(id)}`;
276
+ }
277
+ function buildDelete(table, where) {
278
+ let sql = `DELETE FROM ${table}`;
279
+ if (where) sql += ` WHERE ${where}`;
280
+ return sql;
281
+ }
282
+ var SYSTEM_COLUMNS = /* @__PURE__ */ new Set(["id", "createdAt", "updatedAt", "lastUpdatedBy"]);
283
+ function stripSystemColumns(data) {
284
+ const result = {};
285
+ for (const [key, value] of Object.entries(data)) {
286
+ if (!SYSTEM_COLUMNS.has(key)) {
287
+ result[key] = value;
288
+ }
289
+ }
290
+ return result;
291
+ }
292
+
293
+ // src/db/predicate.ts
294
+ function compilePredicate(fn) {
295
+ try {
296
+ const source = fn.toString();
297
+ const paramName = extractParamName(source);
298
+ if (!paramName) return { type: "js", fn };
299
+ const body = extractBody(source);
300
+ if (!body) return { type: "js", fn };
301
+ const tokens = tokenize(body);
302
+ if (tokens.length === 0) return { type: "js", fn };
303
+ const parser = new Parser(tokens, paramName, fn);
304
+ const ast = parser.parseExpression();
305
+ if (!ast) return { type: "js", fn };
306
+ if (parser.pos < tokens.length) return { type: "js", fn };
307
+ const where = compileNode(ast);
308
+ if (!where) return { type: "js", fn };
309
+ return { type: "sql", where };
310
+ } catch {
311
+ return { type: "js", fn };
312
+ }
313
+ }
314
+ function extractParamName(source) {
315
+ const match = source.match(
316
+ /^\s*(?:\(?\s*([a-zA-Z_$][a-zA-Z0-9_$]*)\s*(?::[^)]*?)?\)?\s*=>)/
317
+ );
318
+ return match?.[1] ?? null;
319
+ }
320
+ function extractBody(source) {
321
+ const arrowIdx = source.indexOf("=>");
322
+ if (arrowIdx === -1) return null;
323
+ let body = source.slice(arrowIdx + 2).trim();
324
+ if (body.startsWith("{")) {
325
+ const match = body.match(/^\{\s*return\s+([\s\S]+?)\s*;?\s*\}$/);
326
+ if (!match) return null;
327
+ body = match[1];
328
+ }
329
+ return body.trim() || null;
330
+ }
331
+ function tokenize(expr) {
332
+ const tokens = [];
333
+ let i = 0;
334
+ while (i < expr.length) {
335
+ const ch = expr[i];
336
+ if (/\s/.test(ch)) {
337
+ i++;
338
+ continue;
339
+ }
340
+ if (ch === "'" || ch === '"') {
341
+ const quote = ch;
342
+ let str = "";
343
+ i++;
344
+ while (i < expr.length && expr[i] !== quote) {
345
+ if (expr[i] === "\\") {
346
+ i++;
347
+ if (i < expr.length) str += expr[i];
348
+ } else {
349
+ str += expr[i];
350
+ }
351
+ i++;
352
+ }
353
+ if (i >= expr.length) return [];
354
+ i++;
355
+ tokens.push({ type: "string", value: str });
356
+ continue;
357
+ }
358
+ if (ch === "`") return [];
359
+ if (/[0-9]/.test(ch) || ch === "-" && i + 1 < expr.length && /[0-9]/.test(expr[i + 1])) {
360
+ let num = ch;
361
+ i++;
362
+ while (i < expr.length && /[0-9.]/.test(expr[i])) {
363
+ num += expr[i];
364
+ i++;
365
+ }
366
+ tokens.push({ type: "number", value: num });
367
+ continue;
368
+ }
369
+ if (expr.slice(i, i + 3) === "===" || expr.slice(i, i + 3) === "!==") {
370
+ tokens.push({ type: "operator", value: expr.slice(i, i + 3) });
371
+ i += 3;
372
+ continue;
373
+ }
374
+ if (expr.slice(i, i + 2) === "==" || expr.slice(i, i + 2) === "!=" || expr.slice(i, i + 2) === "<=" || expr.slice(i, i + 2) === ">=" || expr.slice(i, i + 2) === "&&" || expr.slice(i, i + 2) === "||") {
375
+ tokens.push({ type: "operator", value: expr.slice(i, i + 2) });
376
+ i += 2;
377
+ continue;
378
+ }
379
+ if (ch === "!" || ch === "<" || ch === ">") {
380
+ tokens.push({ type: "operator", value: ch });
381
+ i++;
382
+ continue;
383
+ }
384
+ if (ch === ".") {
385
+ tokens.push({ type: "dot", value: "." });
386
+ i++;
387
+ continue;
388
+ }
389
+ if (ch === "(") {
390
+ tokens.push({ type: "lparen", value: "(" });
391
+ i++;
392
+ continue;
393
+ }
394
+ if (ch === ")") {
395
+ tokens.push({ type: "rparen", value: ")" });
396
+ i++;
397
+ continue;
398
+ }
399
+ if (ch === "[") {
400
+ tokens.push({ type: "lbracket", value: "[" });
401
+ i++;
402
+ continue;
403
+ }
404
+ if (ch === "]") {
405
+ tokens.push({ type: "rbracket", value: "]" });
406
+ i++;
407
+ continue;
408
+ }
409
+ if (ch === ",") {
410
+ tokens.push({ type: "comma", value: "," });
411
+ i++;
412
+ continue;
413
+ }
414
+ if (/[a-zA-Z_$]/.test(ch)) {
415
+ let ident = ch;
416
+ i++;
417
+ while (i < expr.length && /[a-zA-Z0-9_$]/.test(expr[i])) {
418
+ ident += expr[i];
419
+ i++;
420
+ }
421
+ tokens.push({ type: "identifier", value: ident });
422
+ continue;
423
+ }
424
+ return [];
425
+ }
426
+ return tokens;
427
+ }
428
+ var Parser = class {
429
+ constructor(tokens, paramName, originalFn) {
430
+ this.tokens = tokens;
431
+ this.paramName = paramName;
432
+ this.originalFn = originalFn;
433
+ }
434
+ pos = 0;
435
+ /** Peek at the current token without consuming it. */
436
+ peek() {
437
+ return this.tokens[this.pos];
438
+ }
439
+ /** Consume the current token and advance. */
440
+ advance() {
441
+ return this.tokens[this.pos++];
442
+ }
443
+ /** Check if the current token matches an expected type and value. */
444
+ match(type, value) {
445
+ const t = this.peek();
446
+ if (!t) return false;
447
+ if (t.type !== type) return false;
448
+ if (value !== void 0 && t.value !== value) return false;
449
+ return true;
450
+ }
451
+ /** Consume a token if it matches, otherwise return false. */
452
+ eat(type, value) {
453
+ if (this.match(type, value)) {
454
+ this.advance();
455
+ return true;
456
+ }
457
+ return false;
458
+ }
459
+ // --- Grammar rules ---
460
+ /** Entry point: parse a full expression. */
461
+ parseExpression() {
462
+ return this.parseOr();
463
+ }
464
+ /** or_expr → and_expr ( '||' and_expr )* */
465
+ parseOr() {
466
+ let left = this.parseAnd();
467
+ if (!left) return null;
468
+ while (this.match("operator", "||")) {
469
+ this.advance();
470
+ const right = this.parseAnd();
471
+ if (!right) return null;
472
+ left = { kind: "logical", operator: "OR", left, right };
473
+ }
474
+ return left;
475
+ }
476
+ /** and_expr → not_expr ( '&&' not_expr )* */
477
+ parseAnd() {
478
+ let left = this.parseNot();
479
+ if (!left) return null;
480
+ while (this.match("operator", "&&")) {
481
+ this.advance();
482
+ const right = this.parseNot();
483
+ if (!right) return null;
484
+ left = { kind: "logical", operator: "AND", left, right };
485
+ }
486
+ return left;
487
+ }
488
+ /** not_expr → '!' not_expr | primary */
489
+ parseNot() {
490
+ if (this.match("operator", "!")) {
491
+ this.advance();
492
+ if (this.match("lparen")) {
493
+ this.advance();
494
+ const inner2 = this.parseExpression();
495
+ if (!inner2) return null;
496
+ if (!this.eat("rparen")) return null;
497
+ return { kind: "not", operand: inner2 };
498
+ }
499
+ const inner = this.parsePrimary();
500
+ if (!inner) return null;
501
+ if (inner.kind === "booleanField") {
502
+ return { ...inner, negated: !inner.negated };
503
+ }
504
+ return { kind: "not", operand: inner };
505
+ }
506
+ return this.parsePrimary();
507
+ }
508
+ /**
509
+ * primary → field_comparison | null_check | includes_expr | paren_expr | boolean_field
510
+ *
511
+ * This is the workhorse — handles the different patterns that can appear
512
+ * as atomic expressions within a larger &&/|| combination.
513
+ */
514
+ parsePrimary() {
515
+ if (this.match("lparen")) {
516
+ this.advance();
517
+ const inner = this.parseExpression();
518
+ if (!inner) return null;
519
+ if (!this.eat("rparen")) return null;
520
+ return inner;
521
+ }
522
+ if (this.match("lbracket")) {
523
+ return this.parseArrayIncludes();
524
+ }
525
+ if (this.match("identifier", this.paramName)) {
526
+ return this.parseFieldExpression();
527
+ }
528
+ if (this.match("identifier")) {
529
+ return this.parseNonParamExpression();
530
+ }
531
+ return null;
532
+ }
533
+ /**
534
+ * Parse an expression that starts with the parameter name (e.g. `o.field`).
535
+ *
536
+ * Could be:
537
+ * - `o.field === value` (comparison)
538
+ * - `o.field != null` (null check)
539
+ * - `o.field.includes('text')` (LIKE)
540
+ * - `o.field` alone (boolean field check)
541
+ */
542
+ parseFieldExpression() {
543
+ this.advance();
544
+ const field = this.parseFieldPath();
545
+ if (!field) return null;
546
+ const next = this.peek();
547
+ if (next?.type === "dot" && this.lookAheadForIncludes()) {
548
+ return this.parseFieldIncludes(field);
549
+ }
550
+ if (next?.type === "operator" && isComparisonOp(next.value)) {
551
+ return this.parseComparison(field);
552
+ }
553
+ return { kind: "booleanField", field, negated: false };
554
+ }
555
+ /**
556
+ * Parse a dot-separated field path after the parameter name.
557
+ * `o.status` → `"status"`, `o.address.city` → `"json_extract(address, '$.city')"`.
558
+ */
559
+ parseFieldPath() {
560
+ if (!this.eat("dot")) return null;
561
+ if (!this.match("identifier")) return null;
562
+ const parts = [this.advance().value];
563
+ while (this.match("dot") && this.tokens[this.pos + 1]?.type === "identifier") {
564
+ this.advance();
565
+ parts.push(this.advance().value);
566
+ }
567
+ if (parts.length === 1) {
568
+ return parts[0];
569
+ }
570
+ const root = parts[0];
571
+ const jsonPath = "$." + parts.slice(1).join(".");
572
+ return `json_extract(${root}, '${jsonPath}')`;
573
+ }
574
+ /**
575
+ * Parse a comparison: `field OP value`.
576
+ * The field has already been parsed; we need the operator and right-hand value.
577
+ */
578
+ parseComparison(field) {
579
+ const opToken = this.advance();
580
+ const jsOp = opToken.value;
581
+ const value = this.parseValue();
582
+ if (value === PARSE_FAILED) return null;
583
+ if (value === null || value === void 0) {
584
+ if (jsOp === "===" || jsOp === "==") {
585
+ return { kind: "nullCheck", field, isNull: true };
586
+ }
587
+ if (jsOp === "!==" || jsOp === "!=") {
588
+ return { kind: "nullCheck", field, isNull: false };
589
+ }
590
+ return null;
591
+ }
592
+ const sqlOp = JS_TO_SQL_OP[jsOp];
593
+ if (!sqlOp) return null;
594
+ return { kind: "comparison", field, operator: sqlOp, value };
595
+ }
596
+ /**
597
+ * Parse `o.field.includes('text')` → LIKE expression.
598
+ * The field name has already been parsed.
599
+ */
600
+ parseFieldIncludes(field) {
601
+ this.advance();
602
+ this.advance();
603
+ if (!this.eat("lparen")) return null;
604
+ const value = this.parseValue();
605
+ if (value === PARSE_FAILED || typeof value !== "string") return null;
606
+ if (!this.eat("rparen")) return null;
607
+ const escaped = value.replace(/%/g, "\\%").replace(/_/g, "\\_");
608
+ return { kind: "like", field, pattern: `%${escaped}%` };
609
+ }
610
+ /**
611
+ * Parse `['a', 'b', 'c'].includes(o.field)` → IN expression.
612
+ * The opening bracket has been peeked but not consumed.
613
+ */
614
+ parseArrayIncludes() {
615
+ this.advance();
616
+ const values = [];
617
+ while (!this.match("rbracket")) {
618
+ if (values.length > 0) {
619
+ if (!this.eat("comma")) return null;
620
+ }
621
+ const val = this.parseValue();
622
+ if (val === PARSE_FAILED) return null;
623
+ values.push(val);
624
+ }
625
+ this.advance();
626
+ if (!this.eat("dot")) return null;
627
+ if (!this.match("identifier", "includes")) return null;
628
+ this.advance();
629
+ if (!this.eat("lparen")) return null;
630
+ if (!this.match("identifier", this.paramName)) return null;
631
+ this.advance();
632
+ const field = this.parseFieldPath();
633
+ if (!field) return null;
634
+ if (!this.eat("rparen")) return null;
635
+ return { kind: "in", field, values };
636
+ }
637
+ /**
638
+ * Parse an expression that starts with an identifier that is NOT the
639
+ * parameter name. This could be:
640
+ * - A keyword literal: `true`, `false`, `null`, `undefined`
641
+ * - A closure variable used in a comparison (handled by backtracking)
642
+ */
643
+ parseNonParamExpression() {
644
+ const ident = this.peek().value;
645
+ if (ident === "true" || ident === "false") return null;
646
+ return null;
647
+ }
648
+ /**
649
+ * Parse a literal value or closure variable reference.
650
+ *
651
+ * Returns the parsed value, or PARSE_FAILED if parsing fails.
652
+ * Returns `null` or `undefined` for those keyword literals.
653
+ */
654
+ parseValue() {
655
+ const t = this.peek();
656
+ if (!t) return PARSE_FAILED;
657
+ if (t.type === "string") {
658
+ this.advance();
659
+ return t.value;
660
+ }
661
+ if (t.type === "number") {
662
+ this.advance();
663
+ return Number(t.value);
664
+ }
665
+ if (t.type === "identifier") {
666
+ if (t.value === "true") {
667
+ this.advance();
668
+ return true;
669
+ }
670
+ if (t.value === "false") {
671
+ this.advance();
672
+ return false;
673
+ }
674
+ if (t.value === "null") {
675
+ this.advance();
676
+ return null;
677
+ }
678
+ if (t.value === "undefined") {
679
+ this.advance();
680
+ return void 0;
681
+ }
682
+ return this.resolveClosureVariable();
683
+ }
684
+ if (t.type === "operator" && t.value === "-") {
685
+ this.advance();
686
+ const next = this.peek();
687
+ if (next?.type === "number") {
688
+ this.advance();
689
+ return -Number(next.value);
690
+ }
691
+ return PARSE_FAILED;
692
+ }
693
+ return PARSE_FAILED;
694
+ }
695
+ /**
696
+ * Attempt to resolve a closure variable by invoking the original function
697
+ * with a recording Proxy and inspecting what values it compares against.
698
+ *
699
+ * This handles the common pattern:
700
+ * ```ts
701
+ * const userId = auth.userId;
702
+ * orders.filter(o => o.requestedBy === userId)
703
+ * ```
704
+ *
705
+ * The Proxy captures property accesses on the parameter and we can then
706
+ * extract the comparison value from the function's behavior. However,
707
+ * this approach has limitations — if the function throws, has side effects,
708
+ * or uses the variable in a non-comparison context, we fall back to JS.
709
+ */
710
+ resolveClosureVariable() {
711
+ const identToken = this.advance();
712
+ let closureExpr = identToken.value;
713
+ while (this.match("dot") && this.tokens[this.pos + 1]?.type === "identifier") {
714
+ this.advance();
715
+ closureExpr += "." + this.advance().value;
716
+ }
717
+ try {
718
+ const MARKER = /* @__PURE__ */ Symbol("field_access_marker");
719
+ const accessed = [];
720
+ const proxy = new Proxy(
721
+ {},
722
+ {
723
+ get(_, prop) {
724
+ accessed.push(prop);
725
+ return new Proxy(() => MARKER, {
726
+ get(_2, nestedProp) {
727
+ accessed.push(nestedProp);
728
+ return MARKER;
729
+ }
730
+ });
731
+ }
732
+ }
733
+ );
734
+ void proxy;
735
+ return PARSE_FAILED;
736
+ } catch {
737
+ return PARSE_FAILED;
738
+ }
739
+ }
740
+ /**
741
+ * Look ahead to check if the next tokens form `.includes(`.
742
+ * Used to disambiguate `o.field.includes(...)` from `o.field.nested`.
743
+ */
744
+ lookAheadForIncludes() {
745
+ return this.tokens[this.pos]?.type === "dot" && this.tokens[this.pos + 1]?.type === "identifier" && this.tokens[this.pos + 1]?.value === "includes" && this.tokens[this.pos + 2]?.type === "lparen";
746
+ }
747
+ };
748
+ function compileNode(node) {
749
+ switch (node.kind) {
750
+ case "comparison":
751
+ return `${node.field} ${node.operator} ${escapeValue(node.value)}`;
752
+ case "nullCheck":
753
+ return `${node.field} ${node.isNull ? "IS NULL" : "IS NOT NULL"}`;
754
+ case "in": {
755
+ if (node.values.length === 0) return "0";
756
+ const vals = node.values.map(escapeValue).join(", ");
757
+ return `${node.field} IN (${vals})`;
758
+ }
759
+ case "like":
760
+ return `${node.field} LIKE ${escapeValue(node.pattern)}`;
761
+ case "booleanField":
762
+ return node.negated ? `${node.field} = 0` : `${node.field} = 1`;
763
+ case "logical": {
764
+ const left = compileNode(node.left);
765
+ const right = compileNode(node.right);
766
+ if (!left || !right) return null;
767
+ return `(${left} ${node.operator} ${right})`;
768
+ }
769
+ case "not": {
770
+ const inner = compileNode(node.operand);
771
+ if (!inner) return null;
772
+ return `NOT (${inner})`;
773
+ }
774
+ default:
775
+ return null;
776
+ }
777
+ }
778
+ var JS_TO_SQL_OP = {
779
+ "===": "=",
780
+ "==": "=",
781
+ "!==": "!=",
782
+ "!=": "!=",
783
+ "<": "<",
784
+ ">": ">",
785
+ "<=": "<=",
786
+ ">=": ">="
787
+ };
788
+ var PARSE_FAILED = /* @__PURE__ */ Symbol("PARSE_FAILED");
789
+ function isComparisonOp(value) {
790
+ return value in JS_TO_SQL_OP;
791
+ }
792
+
793
+ // src/db/query.ts
794
+ var Query = class _Query {
795
+ /** @internal Accumulated predicate functions to filter by. */
796
+ _predicates;
797
+ /** @internal The field accessor for sorting, if set. */
798
+ _sortAccessor;
799
+ /** @internal Whether the sort order is reversed (DESC). */
800
+ _reversed;
801
+ /** @internal Maximum number of results (SQL LIMIT). */
802
+ _limit;
803
+ /** @internal Number of results to skip (SQL OFFSET). */
804
+ _offset;
805
+ /** @internal Binding to the database execution layer. */
806
+ _config;
807
+ constructor(config, options) {
808
+ this._config = config;
809
+ this._predicates = options?.predicates ?? [];
810
+ this._sortAccessor = options?.sortAccessor;
811
+ this._reversed = options?.reversed ?? false;
812
+ this._limit = options?.limit;
813
+ this._offset = options?.offset;
814
+ }
815
+ /**
816
+ * Create a clone of this query with some options overridden.
817
+ * Used internally by chain methods to maintain immutability.
818
+ */
819
+ _clone(overrides) {
820
+ return new _Query(this._config, {
821
+ predicates: overrides.predicates ?? this._predicates,
822
+ sortAccessor: overrides.sortAccessor ?? this._sortAccessor,
823
+ reversed: overrides.reversed ?? this._reversed,
824
+ limit: overrides.limit ?? this._limit,
825
+ offset: overrides.offset ?? this._offset
826
+ });
827
+ }
828
+ // -------------------------------------------------------------------------
829
+ // Chain methods — return new Query instances
830
+ // -------------------------------------------------------------------------
831
+ /**
832
+ * Add a filter predicate. Multiple filters are ANDed together.
833
+ *
834
+ * @example
835
+ * ```ts
836
+ * const active = Orders.filter(o => o.status === 'active');
837
+ * const expensive = active.filter(o => o.amount > 5000);
838
+ * // WHERE status = 'active' AND amount > 5000
839
+ * ```
840
+ */
841
+ filter(predicate) {
842
+ return this._clone({
843
+ predicates: [...this._predicates, predicate]
844
+ });
845
+ }
846
+ /**
847
+ * Sort results by a field (ascending by default).
848
+ * Use `.reverse()` after `.sortBy()` for descending order.
849
+ *
850
+ * @example
851
+ * ```ts
852
+ * const newest = Orders.sortBy(o => o.createdAt).reverse();
853
+ * ```
854
+ */
855
+ sortBy(accessor) {
856
+ return this._clone({ sortAccessor: accessor });
857
+ }
858
+ /**
859
+ * Reverse the current sort order. If no sort is set, this has no effect.
860
+ */
861
+ reverse() {
862
+ return this._clone({ reversed: !this._reversed });
863
+ }
864
+ /**
865
+ * Limit the number of results returned.
866
+ *
867
+ * @example
868
+ * ```ts
869
+ * const top10 = Orders.sortBy(o => o.amount).reverse().take(10);
870
+ * ```
871
+ */
872
+ take(n) {
873
+ return this._clone({ limit: n });
874
+ }
875
+ /**
876
+ * Skip the first n results. Use with `.take()` for pagination.
877
+ *
878
+ * @example
879
+ * ```ts
880
+ * const page2 = Orders.sortBy(o => o.createdAt).skip(50).take(50);
881
+ * ```
882
+ */
883
+ skip(n) {
884
+ return this._clone({ offset: n });
885
+ }
886
+ // -------------------------------------------------------------------------
887
+ // Terminal methods — execute the query and return results
888
+ // -------------------------------------------------------------------------
889
+ /**
890
+ * Return the first matching row, or null if no rows match.
891
+ * Applies the current sort order before taking the first result.
892
+ */
893
+ async first() {
894
+ const rows = await this._clone({ limit: 1 })._execute();
895
+ return rows[0] ?? null;
896
+ }
897
+ /**
898
+ * Return the last matching row (per current sort), or null.
899
+ * Flips the sort direction and takes 1 row.
900
+ */
901
+ async last() {
902
+ const rows = await this._clone({ limit: 1, reversed: !this._reversed })._execute();
903
+ return rows[0] ?? null;
904
+ }
905
+ /**
906
+ * Count matching rows. Returns a number, not the rows themselves.
907
+ * Executes as `SELECT COUNT(*)` when predicates compile to SQL.
908
+ */
909
+ async count() {
910
+ const compiled = this._compilePredicates();
911
+ if (compiled.allSql) {
912
+ const where = compiled.sqlWhere || void 0;
913
+ const sql = buildCount(this._config.tableName, where);
914
+ const result = await this._config.executeQuery(sql);
915
+ const row = result.rows[0];
916
+ return row?.count ?? 0;
917
+ }
918
+ const rows = await this._fetchAndFilterInJs(compiled);
919
+ return rows.length;
920
+ }
921
+ /**
922
+ * Check if any row matches the current filters. Short-circuits —
923
+ * doesn't load all rows when using SQL.
924
+ */
925
+ async some() {
926
+ const compiled = this._compilePredicates();
927
+ if (compiled.allSql) {
928
+ const where = compiled.sqlWhere || void 0;
929
+ const sql = buildExists(this._config.tableName, where);
930
+ const result = await this._config.executeQuery(sql);
931
+ const row = result.rows[0];
932
+ return row?.result === 1;
933
+ }
934
+ const rows = await this._fetchAndFilterInJs(compiled);
935
+ return rows.length > 0;
936
+ }
937
+ /**
938
+ * Check if all rows match the current filters. Short-circuits on false.
939
+ *
940
+ * Implemented as NOT EXISTS(... WHERE NOT predicate) — returns true
941
+ * if no rows fail the predicate.
942
+ */
943
+ async every() {
944
+ const compiled = this._compilePredicates();
945
+ if (compiled.allSql && compiled.sqlWhere) {
946
+ const sql = buildExists(this._config.tableName, `NOT (${compiled.sqlWhere})`, true);
947
+ const result = await this._config.executeQuery(sql);
948
+ const row = result.rows[0];
949
+ return row?.result === 1;
950
+ }
951
+ if (this._predicates.length === 0) return true;
952
+ const allRows = await this._fetchAllRows();
953
+ return allRows.every(
954
+ (row) => this._predicates.every((pred) => pred(row))
955
+ );
956
+ }
957
+ /**
958
+ * Return the row with the minimum value for the given field.
959
+ * Executes as `ORDER BY field ASC LIMIT 1` in SQL.
960
+ */
961
+ async min(accessor) {
962
+ return this.sortBy(accessor).first();
963
+ }
964
+ /**
965
+ * Return the row with the maximum value for the given field.
966
+ * Executes as `ORDER BY field DESC LIMIT 1` in SQL.
967
+ */
968
+ async max(accessor) {
969
+ return this.sortBy(accessor).reverse().first();
970
+ }
971
+ /**
972
+ * Group rows by a field value. Returns a Map.
973
+ * Always executes in JS (no SQL equivalent for grouping into a Map).
974
+ */
975
+ async groupBy(accessor) {
976
+ const rows = await this._execute();
977
+ const map = /* @__PURE__ */ new Map();
978
+ for (const row of rows) {
979
+ const key = accessor(row);
980
+ const group = map.get(key);
981
+ if (group) {
982
+ group.push(row);
983
+ } else {
984
+ map.set(key, [row]);
985
+ }
986
+ }
987
+ return map;
988
+ }
989
+ // -------------------------------------------------------------------------
990
+ // PromiseLike implementation — makes `await query` work
991
+ // -------------------------------------------------------------------------
992
+ /**
993
+ * PromiseLike.then() — executes the query and pipes the result.
994
+ * This is what makes `const rows = await query` work.
995
+ */
996
+ then(onfulfilled, onrejected) {
997
+ return this._execute().then(onfulfilled, onrejected);
998
+ }
999
+ // -------------------------------------------------------------------------
1000
+ // Execution internals
1001
+ // -------------------------------------------------------------------------
1002
+ /**
1003
+ * Execute the query and return typed result rows.
1004
+ *
1005
+ * This is the core execution method. It:
1006
+ * 1. Tries to compile all predicates to SQL
1007
+ * 2. If all compile → builds and executes a single SQL query
1008
+ * 3. If any fail → fetches all rows and processes in JS
1009
+ * 4. Deserializes rows (user prefix stripping, JSON parsing)
1010
+ */
1011
+ async _execute() {
1012
+ const compiled = this._compilePredicates();
1013
+ if (compiled.allSql) {
1014
+ const sortField = this._sortAccessor ? extractFieldName(this._sortAccessor) : void 0;
1015
+ const sql = buildSelect(this._config.tableName, {
1016
+ where: compiled.sqlWhere || void 0,
1017
+ orderBy: sortField ?? void 0,
1018
+ desc: this._reversed,
1019
+ limit: this._limit,
1020
+ offset: this._offset
1021
+ });
1022
+ const result = await this._config.executeQuery(sql);
1023
+ return result.rows.map(
1024
+ (row) => deserializeRow(
1025
+ row,
1026
+ this._config.columns
1027
+ )
1028
+ );
1029
+ }
1030
+ let rows = await this._fetchAndFilterInJs(compiled);
1031
+ if (this._sortAccessor) {
1032
+ const accessor = this._sortAccessor;
1033
+ rows.sort((a, b) => {
1034
+ const aVal = accessor(a);
1035
+ const bVal = accessor(b);
1036
+ if (aVal < bVal) return this._reversed ? 1 : -1;
1037
+ if (aVal > bVal) return this._reversed ? -1 : 1;
1038
+ return 0;
1039
+ });
1040
+ }
1041
+ if (this._offset != null || this._limit != null) {
1042
+ const start = this._offset ?? 0;
1043
+ const end = this._limit != null ? start + this._limit : void 0;
1044
+ rows = rows.slice(start, end);
1045
+ }
1046
+ return rows;
1047
+ }
1048
+ /**
1049
+ * Compile all accumulated predicates and determine the execution strategy.
1050
+ *
1051
+ * Returns an object with:
1052
+ * - `allSql`: whether all predicates compiled to SQL
1053
+ * - `sqlWhere`: combined WHERE clause (ANDed) if all compiled
1054
+ * - `compiled`: individual compilation results
1055
+ */
1056
+ _compilePredicates() {
1057
+ if (this._predicates.length === 0) {
1058
+ return { allSql: true, sqlWhere: "", compiled: [] };
1059
+ }
1060
+ const compiled = this._predicates.map((pred) => compilePredicate(pred));
1061
+ const allSql = compiled.every((c) => c.type === "sql");
1062
+ let sqlWhere = "";
1063
+ if (allSql) {
1064
+ sqlWhere = compiled.map((c) => c.where).join(" AND ");
1065
+ }
1066
+ return { allSql, sqlWhere, compiled };
1067
+ }
1068
+ /**
1069
+ * Fetch all rows from the table and apply JS predicates.
1070
+ * This is the fallback path when SQL compilation fails.
1071
+ *
1072
+ * Logs a warning to stderr so developers know they're on the slow path.
1073
+ */
1074
+ async _fetchAndFilterInJs(compiled) {
1075
+ const allRows = await this._fetchAllRows();
1076
+ if (compiled.compiled.some((c) => c.type === "js")) {
1077
+ console.warn(
1078
+ `[mindstudio] Filter on ${this._config.tableName} could not be compiled to SQL \u2014 scanning ${allRows.length} rows in JS`
1079
+ );
1080
+ }
1081
+ return allRows.filter(
1082
+ (row) => this._predicates.every((pred) => pred(row))
1083
+ );
1084
+ }
1085
+ /**
1086
+ * Fetch all rows from the table (SELECT * with no WHERE).
1087
+ * Used by the JS fallback path.
1088
+ */
1089
+ async _fetchAllRows() {
1090
+ const sql = buildSelect(this._config.tableName);
1091
+ const result = await this._config.executeQuery(sql);
1092
+ return result.rows.map(
1093
+ (row) => deserializeRow(row, this._config.columns)
1094
+ );
1095
+ }
1096
+ };
1097
+ function extractFieldName(accessor) {
1098
+ const source = accessor.toString();
1099
+ const match = source.match(
1100
+ /^\s*\(?([a-zA-Z_$][a-zA-Z0-9_$]*)\)?\s*=>\s*\1\.([a-zA-Z_$][a-zA-Z0-9_$]*)\s*$/
1101
+ );
1102
+ return match?.[2] ?? null;
1103
+ }
1104
+
1105
+ // src/db/table.ts
1106
+ var Table = class {
1107
+ /** @internal Runtime config binding this table to the execution layer. */
1108
+ _config;
1109
+ constructor(config) {
1110
+ this._config = config;
1111
+ }
1112
+ // -------------------------------------------------------------------------
1113
+ // Reads — direct (return Promises)
1114
+ // -------------------------------------------------------------------------
1115
+ /**
1116
+ * Get a single row by ID. Returns null if not found.
1117
+ *
1118
+ * @example
1119
+ * ```ts
1120
+ * const order = await Orders.get('abc-123');
1121
+ * if (order) console.log(order.status);
1122
+ * ```
1123
+ */
1124
+ async get(id) {
1125
+ const sql = buildSelect(this._config.tableName, {
1126
+ where: `id = ${escapeValue(id)}`,
1127
+ limit: 1
1128
+ });
1129
+ const result = await this._config.executeQuery(sql);
1130
+ if (result.rows.length === 0) return null;
1131
+ return deserializeRow(
1132
+ result.rows[0],
1133
+ this._config.columns
1134
+ );
1135
+ }
1136
+ /**
1137
+ * Find the first row matching a predicate. Returns null if none match.
1138
+ *
1139
+ * @example
1140
+ * ```ts
1141
+ * const activeOrder = await Orders.findOne(o => o.status === 'active');
1142
+ * ```
1143
+ */
1144
+ async findOne(predicate) {
1145
+ return this.filter(predicate).first();
1146
+ }
1147
+ /**
1148
+ * Count rows, optionally filtered by a predicate.
1149
+ *
1150
+ * @example
1151
+ * ```ts
1152
+ * const total = await Orders.count();
1153
+ * const pending = await Orders.count(o => o.status === 'pending');
1154
+ * ```
1155
+ */
1156
+ async count(predicate) {
1157
+ if (predicate) {
1158
+ return this.filter(predicate).count();
1159
+ }
1160
+ const sql = buildCount(this._config.tableName);
1161
+ const result = await this._config.executeQuery(sql);
1162
+ const row = result.rows[0];
1163
+ return row?.count ?? 0;
1164
+ }
1165
+ /**
1166
+ * Check if any row matches a predicate. Short-circuits.
1167
+ *
1168
+ * @example
1169
+ * ```ts
1170
+ * const hasActive = await Orders.some(o => o.status === 'active');
1171
+ * ```
1172
+ */
1173
+ async some(predicate) {
1174
+ return this.filter(predicate).some();
1175
+ }
1176
+ /**
1177
+ * Check if all rows match a predicate.
1178
+ *
1179
+ * @example
1180
+ * ```ts
1181
+ * const allComplete = await Orders.every(o => o.status === 'completed');
1182
+ * ```
1183
+ */
1184
+ async every(predicate) {
1185
+ return this.filter(predicate).every();
1186
+ }
1187
+ /**
1188
+ * Check if the table has zero rows.
1189
+ *
1190
+ * @example
1191
+ * ```ts
1192
+ * if (await Orders.isEmpty()) console.log('No orders yet');
1193
+ * ```
1194
+ */
1195
+ async isEmpty() {
1196
+ const sql = buildExists(this._config.tableName, void 0, true);
1197
+ const result = await this._config.executeQuery(sql);
1198
+ const row = result.rows[0];
1199
+ return row?.result === 1;
1200
+ }
1201
+ /**
1202
+ * Return the row with the minimum value for a field.
1203
+ * Executes as `ORDER BY field ASC LIMIT 1`.
1204
+ *
1205
+ * @example
1206
+ * ```ts
1207
+ * const cheapest = await Orders.min(o => o.amount);
1208
+ * ```
1209
+ */
1210
+ async min(accessor) {
1211
+ return this.sortBy(accessor).first();
1212
+ }
1213
+ /**
1214
+ * Return the row with the maximum value for a field.
1215
+ * Executes as `ORDER BY field DESC LIMIT 1`.
1216
+ *
1217
+ * @example
1218
+ * ```ts
1219
+ * const mostExpensive = await Orders.max(o => o.amount);
1220
+ * ```
1221
+ */
1222
+ async max(accessor) {
1223
+ return this.sortBy(accessor).reverse().first();
1224
+ }
1225
+ /**
1226
+ * Group all rows by a field value. Returns a Map.
1227
+ *
1228
+ * @example
1229
+ * ```ts
1230
+ * const byStatus = await Orders.groupBy(o => o.status);
1231
+ * // Map { 'pending' => [...], 'approved' => [...] }
1232
+ * ```
1233
+ */
1234
+ async groupBy(accessor) {
1235
+ return new Query(this._config).groupBy(accessor);
1236
+ }
1237
+ // -------------------------------------------------------------------------
1238
+ // Reads — chainable (return Query<T>)
1239
+ // -------------------------------------------------------------------------
1240
+ /**
1241
+ * Filter rows by a predicate. Returns a chainable Query.
1242
+ *
1243
+ * The predicate is compiled to SQL when possible. If compilation fails,
1244
+ * the query falls back to fetching all rows and filtering in JS.
1245
+ *
1246
+ * @example
1247
+ * ```ts
1248
+ * const active = await Orders.filter(o => o.status === 'active');
1249
+ * const recentActive = await Orders
1250
+ * .filter(o => o.status === 'active')
1251
+ * .sortBy(o => o.createdAt)
1252
+ * .reverse()
1253
+ * .take(10);
1254
+ * ```
1255
+ */
1256
+ filter(predicate) {
1257
+ return new Query(this._config).filter(predicate);
1258
+ }
1259
+ /**
1260
+ * Sort all rows by a field. Returns a chainable Query.
1261
+ *
1262
+ * @example
1263
+ * ```ts
1264
+ * const newest = await Orders.sortBy(o => o.createdAt).reverse().take(5);
1265
+ * ```
1266
+ */
1267
+ sortBy(accessor) {
1268
+ return new Query(this._config).sortBy(accessor);
1269
+ }
1270
+ async push(data) {
1271
+ const isArray = Array.isArray(data);
1272
+ const items = isArray ? data : [data];
1273
+ const results = [];
1274
+ for (const item of items) {
1275
+ const insertSql = buildInsert(
1276
+ this._config.tableName,
1277
+ item,
1278
+ this._config.columns
1279
+ );
1280
+ await this._config.executeQuery(insertSql);
1281
+ const fetchSql = `SELECT * FROM ${this._config.tableName} WHERE rowid = last_insert_rowid()`;
1282
+ const fetchResult = await this._config.executeQuery(fetchSql);
1283
+ if (fetchResult.rows.length > 0) {
1284
+ results.push(
1285
+ deserializeRow(
1286
+ fetchResult.rows[0],
1287
+ this._config.columns
1288
+ )
1289
+ );
1290
+ }
1291
+ }
1292
+ return isArray ? results : results[0];
1293
+ }
1294
+ /**
1295
+ * Update a row by ID. Only the provided fields are changed.
1296
+ * Returns the updated row.
1297
+ *
1298
+ * System columns cannot be updated — they're stripped automatically.
1299
+ * `updatedAt` and `lastUpdatedBy` are set by the platform.
1300
+ *
1301
+ * @example
1302
+ * ```ts
1303
+ * const updated = await Orders.update(order.id, { status: 'approved' });
1304
+ * console.log(updated.updatedAt); // freshly updated
1305
+ * ```
1306
+ */
1307
+ async update(id, data) {
1308
+ const updateSql = buildUpdate(
1309
+ this._config.tableName,
1310
+ id,
1311
+ data,
1312
+ this._config.columns
1313
+ );
1314
+ await this._config.executeQuery(updateSql);
1315
+ const fetchSql = buildSelect(this._config.tableName, {
1316
+ where: `id = ${escapeValue(id)}`,
1317
+ limit: 1
1318
+ });
1319
+ const result = await this._config.executeQuery(fetchSql);
1320
+ return deserializeRow(
1321
+ result.rows[0],
1322
+ this._config.columns
1323
+ );
1324
+ }
1325
+ /**
1326
+ * Remove a row by ID.
1327
+ *
1328
+ * @example
1329
+ * ```ts
1330
+ * await Orders.remove('abc-123');
1331
+ * ```
1332
+ */
1333
+ async remove(id) {
1334
+ const sql = buildDelete(
1335
+ this._config.tableName,
1336
+ `id = ${escapeValue(id)}`
1337
+ );
1338
+ await this._config.executeQuery(sql);
1339
+ }
1340
+ /**
1341
+ * Remove all rows matching a predicate. Returns the count removed.
1342
+ *
1343
+ * The predicate is compiled to SQL when possible. If compilation fails,
1344
+ * the function fetches all matching rows, collects their IDs, and
1345
+ * deletes them individually.
1346
+ *
1347
+ * @example
1348
+ * ```ts
1349
+ * const removed = await Orders.removeAll(o => o.status === 'rejected');
1350
+ * console.log(`Removed ${removed} orders`);
1351
+ * ```
1352
+ */
1353
+ async removeAll(predicate) {
1354
+ const compiled = compilePredicate(predicate);
1355
+ if (compiled.type === "sql") {
1356
+ const sql = buildDelete(this._config.tableName, compiled.where);
1357
+ const result = await this._config.executeQuery(sql);
1358
+ return result.changes;
1359
+ }
1360
+ console.warn(
1361
+ `[mindstudio] removeAll predicate on ${this._config.tableName} could not be compiled to SQL \u2014 fetching all rows first`
1362
+ );
1363
+ const allSql = buildSelect(this._config.tableName);
1364
+ const allResult = await this._config.executeQuery(allSql);
1365
+ const allRows = allResult.rows.map(
1366
+ (r) => deserializeRow(
1367
+ r,
1368
+ this._config.columns
1369
+ )
1370
+ );
1371
+ const matching = allRows.filter((row) => predicate(row));
1372
+ let count = 0;
1373
+ for (const row of matching) {
1374
+ const id = row.id;
1375
+ if (id) {
1376
+ const sql = buildDelete(this._config.tableName, `id = ${escapeValue(id)}`);
1377
+ await this._config.executeQuery(sql);
1378
+ count++;
1379
+ }
1380
+ }
1381
+ return count;
1382
+ }
1383
+ /**
1384
+ * Remove all rows from the table.
1385
+ *
1386
+ * @example
1387
+ * ```ts
1388
+ * await Orders.clear();
1389
+ * ```
1390
+ */
1391
+ async clear() {
1392
+ const sql = buildDelete(this._config.tableName);
1393
+ await this._config.executeQuery(sql);
1394
+ }
1395
+ };
1396
+
1397
+ // src/db/index.ts
1398
+ function createDb(databases, executeQuery) {
1399
+ return {
1400
+ defineTable(name, options) {
1401
+ const resolved = resolveTable(databases, name, options?.database);
1402
+ const config = {
1403
+ databaseId: resolved.databaseId,
1404
+ tableName: name,
1405
+ columns: resolved.columns,
1406
+ executeQuery: (sql) => executeQuery(resolved.databaseId, sql)
1407
+ };
1408
+ return new Table(config);
1409
+ },
1410
+ // --- Time helpers ---
1411
+ // Pure JS, no platform dependency. All timestamps are unix ms.
1412
+ now: () => Date.now(),
1413
+ days: (n) => n * 864e5,
1414
+ hours: (n) => n * 36e5,
1415
+ minutes: (n) => n * 6e4,
1416
+ ago: (ms) => Date.now() - ms,
1417
+ fromNow: (ms) => Date.now() + ms
1418
+ };
1419
+ }
1420
+ function resolveTable(databases, tableName, databaseHint) {
1421
+ if (databases.length === 0) {
1422
+ throw new MindStudioError(
1423
+ `No databases found in app context. Make sure the app has at least one database configured.`,
1424
+ "no_databases",
1425
+ 400
1426
+ );
1427
+ }
1428
+ if (databaseHint) {
1429
+ const targetDb = databases.find(
1430
+ (db2) => db2.id === databaseHint || db2.name === databaseHint
1431
+ );
1432
+ if (!targetDb) {
1433
+ const available = databases.map((db2) => db2.name || db2.id).join(", ");
1434
+ throw new MindStudioError(
1435
+ `Database "${databaseHint}" not found. Available databases: ${available}`,
1436
+ "database_not_found",
1437
+ 400
1438
+ );
1439
+ }
1440
+ const table = targetDb.tables.find((t) => t.name === tableName);
1441
+ if (!table) {
1442
+ const available = targetDb.tables.map((t) => t.name).join(", ");
1443
+ throw new MindStudioError(
1444
+ `Table "${tableName}" not found in database "${databaseHint}". Available tables: ${available || "(none)"}`,
1445
+ "table_not_found",
1446
+ 400
1447
+ );
1448
+ }
1449
+ return { databaseId: targetDb.id, columns: table.schema };
1450
+ }
1451
+ for (const db2 of databases) {
1452
+ const table = db2.tables.find((t) => t.name === tableName);
1453
+ if (table) {
1454
+ return {
1455
+ databaseId: db2.id,
1456
+ columns: table.schema
1457
+ };
1458
+ }
1459
+ }
1460
+ const availableTables = databases.flatMap((db2) => db2.tables.map((t) => t.name)).join(", ");
1461
+ throw new MindStudioError(
1462
+ `Table "${tableName}" not found in app databases. Available tables: ${availableTables || "(none)"}`,
1463
+ "table_not_found",
1464
+ 400
1465
+ );
1466
+ }
1467
+
140
1468
  // src/generated/steps.ts
141
1469
  function applyStepMethods(AgentClass) {
142
1470
  const proto = AgentClass.prototype;
@@ -170,6 +1498,9 @@ function applyStepMethods(AgentClass) {
170
1498
  proto.captureThumbnail = function(step, options) {
171
1499
  return this.executeStep("captureThumbnail", step, options);
172
1500
  };
1501
+ proto.checkAppRole = function(step, options) {
1502
+ return this.executeStep("checkAppRole", step, options);
1503
+ };
173
1504
  proto.codaCreateUpdatePage = function(step, options) {
174
1505
  return this.executeStep("codaCreateUpdatePage", step, options);
175
1506
  };
@@ -419,6 +1750,9 @@ function applyStepMethods(AgentClass) {
419
1750
  proto.postToZapier = function(step, options) {
420
1751
  return this.executeStep("postToZapier", step, options);
421
1752
  };
1753
+ proto.queryAppDatabase = function(step, options) {
1754
+ return this.executeStep("queryAppDatabase", step, options);
1755
+ };
422
1756
  proto.queryDataSource = function(step, options) {
423
1757
  return this.executeStep("queryDataSource", step, options);
424
1758
  };
@@ -620,17 +1954,46 @@ var MindStudioAgent = class {
620
1954
  _reuseThreadId;
621
1955
  /** @internal */
622
1956
  _threadId;
1957
+ // ---- App context (db + auth) ----
1958
+ /**
1959
+ * @internal App ID for context resolution. Resolved from:
1960
+ * constructor appId → MINDSTUDIO_APP_ID env → sandbox globals →
1961
+ * auto-detected from first executeStep response header.
1962
+ */
1963
+ _appId;
1964
+ /**
1965
+ * @internal Cached app context (auth + databases). Populated by
1966
+ * ensureContext() and cached for the lifetime of the instance.
1967
+ */
1968
+ _context;
1969
+ /**
1970
+ * @internal Deduplication promise for ensureContext(). Ensures only one
1971
+ * context fetch is in-flight at a time, even if multiple db/auth
1972
+ * operations trigger it concurrently.
1973
+ */
1974
+ _contextPromise;
1975
+ /** @internal Cached AuthContext instance, created during context hydration. */
1976
+ _auth;
1977
+ /** @internal Cached Db namespace instance, created during context hydration. */
1978
+ _db;
1979
+ /** @internal Auth type — 'internal' for CALLBACK_TOKEN (managed mode), 'apiKey' otherwise. */
1980
+ _authType;
623
1981
  constructor(options = {}) {
624
1982
  const config = loadConfig();
625
1983
  const { token, authType } = resolveToken(options.apiKey, config);
626
1984
  const baseUrl = options.baseUrl ?? process.env.MINDSTUDIO_BASE_URL ?? process.env.REMOTE_HOSTNAME ?? config.baseUrl ?? DEFAULT_BASE_URL;
627
1985
  this._reuseThreadId = options.reuseThreadId ?? /^(true|1)$/i.test(process.env.MINDSTUDIO_REUSE_THREAD_ID ?? "");
1986
+ this._appId = options.appId ?? process.env.MINDSTUDIO_APP_ID ?? void 0;
1987
+ this._authType = authType;
628
1988
  this._httpConfig = {
629
1989
  baseUrl,
630
1990
  token,
631
1991
  rateLimiter: new RateLimiter(authType),
632
1992
  maxRetries: options.maxRetries ?? DEFAULT_MAX_RETRIES
633
1993
  };
1994
+ if (authType === "internal") {
1995
+ this._trySandboxHydration();
1996
+ }
634
1997
  }
635
1998
  /**
636
1999
  * Execute any step by its type name. This is the low-level method that all
@@ -669,6 +2032,10 @@ var MindStudioAgent = class {
669
2032
  if (this._reuseThreadId && returnedThreadId) {
670
2033
  this._threadId = returnedThreadId;
671
2034
  }
2035
+ const returnedAppId = headers.get("x-mindstudio-app-id");
2036
+ if (!this._appId && returnedAppId) {
2037
+ this._appId = returnedAppId;
2038
+ }
672
2039
  const remaining = headers.get("x-ratelimit-remaining");
673
2040
  const billingCost = headers.get("x-mindstudio-billing-cost");
674
2041
  const billingEvents = headers.get("x-mindstudio-billing-events");
@@ -935,6 +2302,278 @@ var MindStudioAgent = class {
935
2302
  return data;
936
2303
  }
937
2304
  // -------------------------------------------------------------------------
2305
+ // db + auth namespaces
2306
+ // -------------------------------------------------------------------------
2307
+ /**
2308
+ * The `auth` namespace — synchronous role-based access control.
2309
+ *
2310
+ * Provides the current user's identity and roles. All methods are
2311
+ * synchronous since the role map is preloaded during context hydration.
2312
+ *
2313
+ * **Important**: Context must be hydrated before accessing `auth`.
2314
+ * - Inside the MindStudio sandbox: automatic (populated from globals)
2315
+ * - Outside the sandbox: call `await agent.ensureContext()` first,
2316
+ * or access `auth` after any `db` operation (which auto-hydrates)
2317
+ *
2318
+ * @throws {MindStudioError} if context has not been hydrated yet
2319
+ *
2320
+ * @example
2321
+ * ```ts
2322
+ * await agent.ensureContext();
2323
+ * agent.auth.requireRole(Roles.admin);
2324
+ * const admins = agent.auth.getUsersByRole(Roles.admin);
2325
+ * ```
2326
+ */
2327
+ get auth() {
2328
+ if (!this._auth) {
2329
+ throw new MindStudioError(
2330
+ "Auth context not yet loaded. Call `await agent.ensureContext()` or perform any db operation first (which auto-hydrates context). Inside the MindStudio sandbox, context is loaded automatically.",
2331
+ "context_not_loaded",
2332
+ 400
2333
+ );
2334
+ }
2335
+ return this._auth;
2336
+ }
2337
+ /**
2338
+ * The `db` namespace — chainable collection API over managed databases.
2339
+ *
2340
+ * Use `db.defineTable<T>(name)` to get a typed Table<T>, then call
2341
+ * collection methods (filter, sortBy, push, update, etc.) on it.
2342
+ *
2343
+ * Context is auto-hydrated on first query execution — you can safely
2344
+ * call `defineTable()` at module scope without triggering any HTTP.
2345
+ *
2346
+ * @example
2347
+ * ```ts
2348
+ * const Orders = agent.db.defineTable<Order>('orders');
2349
+ * const active = await Orders.filter(o => o.status === 'active').take(10);
2350
+ * ```
2351
+ */
2352
+ get db() {
2353
+ if (this._db) return this._db;
2354
+ return this._createLazyDb();
2355
+ }
2356
+ /**
2357
+ * Hydrate the app context (auth + database metadata). This must be
2358
+ * called before using `auth` synchronously. For `db`, hydration happens
2359
+ * automatically on first query.
2360
+ *
2361
+ * Context is fetched once and cached for the instance's lifetime.
2362
+ * Calling `ensureContext()` multiple times is safe (no-op after first).
2363
+ *
2364
+ * Context sources (checked in order):
2365
+ * 1. Sandbox globals (`globalThis.ai.auth`, `globalThis.ai.databases`)
2366
+ * 2. HTTP: `GET /developer/v2/helpers/app-context?appId={appId}`
2367
+ *
2368
+ * @throws {MindStudioError} if no `appId` is available
2369
+ *
2370
+ * @example
2371
+ * ```ts
2372
+ * await agent.ensureContext();
2373
+ * // auth is now available synchronously
2374
+ * agent.auth.requireRole(Roles.admin);
2375
+ * ```
2376
+ */
2377
+ async ensureContext() {
2378
+ if (this._context) return;
2379
+ if (!this._contextPromise) {
2380
+ this._contextPromise = this._hydrateContext();
2381
+ }
2382
+ await this._contextPromise;
2383
+ }
2384
+ /**
2385
+ * @internal Fetch and cache app context, then create auth + db instances.
2386
+ *
2387
+ * In managed mode (CALLBACK_TOKEN), the platform resolves the app from
2388
+ * the token — no appId needed. With an API key, appId is required.
2389
+ */
2390
+ async _hydrateContext() {
2391
+ if (!this._appId && this._authType !== "internal") {
2392
+ throw new MindStudioError(
2393
+ "No app ID available for context resolution. Pass `appId` to the constructor, set the MINDSTUDIO_APP_ID environment variable, or make a step execution call first (which auto-detects the app ID).",
2394
+ "missing_app_id",
2395
+ 400
2396
+ );
2397
+ }
2398
+ const context = await this.getAppContext(this._appId);
2399
+ this._applyContext(context);
2400
+ }
2401
+ /**
2402
+ * @internal Apply a resolved context object — creates AuthContext and Db.
2403
+ * Used by both the HTTP path and sandbox hydration.
2404
+ */
2405
+ _applyContext(context) {
2406
+ this._context = context;
2407
+ this._auth = new AuthContext(context.auth);
2408
+ this._db = createDb(
2409
+ context.databases,
2410
+ this._executeDbQuery.bind(this)
2411
+ );
2412
+ }
2413
+ /**
2414
+ * @internal Try to hydrate context synchronously from sandbox globals.
2415
+ * Called in the constructor when CALLBACK_TOKEN auth is detected.
2416
+ *
2417
+ * The MindStudio sandbox pre-populates `globalThis.ai` with:
2418
+ * - `ai.auth`: { userId, roleAssignments[] }
2419
+ * - `ai.databases`: [{ id, name, tables[] }]
2420
+ */
2421
+ _trySandboxHydration() {
2422
+ const ai = globalThis.ai;
2423
+ if (ai?.auth && ai?.databases) {
2424
+ this._applyContext({
2425
+ auth: ai.auth,
2426
+ databases: ai.databases
2427
+ });
2428
+ }
2429
+ }
2430
+ /**
2431
+ * @internal Execute a SQL query against a managed database.
2432
+ * Used as the `executeQuery` callback for Table instances.
2433
+ *
2434
+ * Calls the `queryAppDatabase` step with `parameterize: false`
2435
+ * (the SDK builds fully-formed SQL with escaped inline values).
2436
+ */
2437
+ async _executeDbQuery(databaseId, sql) {
2438
+ const result = await this.executeStep("queryAppDatabase", {
2439
+ databaseId,
2440
+ sql,
2441
+ parameterize: false
2442
+ });
2443
+ return { rows: result.rows ?? [], changes: result.changes ?? 0 };
2444
+ }
2445
+ /**
2446
+ * @internal Create a lazy Db proxy that auto-hydrates context.
2447
+ *
2448
+ * defineTable() returns Table instances immediately (no async needed).
2449
+ * But the Table's executeQuery callback is wrapped to call ensureContext()
2450
+ * before the first query, so context is fetched lazily.
2451
+ */
2452
+ _createLazyDb() {
2453
+ const agent = this;
2454
+ return {
2455
+ defineTable(name, options) {
2456
+ const databaseHint = options?.database;
2457
+ return new Table({
2458
+ databaseId: "",
2459
+ tableName: name,
2460
+ columns: [],
2461
+ executeQuery: async (sql) => {
2462
+ await agent.ensureContext();
2463
+ const databases = agent._context.databases;
2464
+ let targetDb;
2465
+ if (databaseHint) {
2466
+ targetDb = databases.find(
2467
+ (d) => d.id === databaseHint || d.name === databaseHint
2468
+ );
2469
+ } else {
2470
+ targetDb = databases.find(
2471
+ (d) => d.tables.some((t) => t.name === name)
2472
+ );
2473
+ }
2474
+ const databaseId = targetDb?.id ?? databases[0]?.id ?? "";
2475
+ return agent._executeDbQuery(databaseId, sql);
2476
+ }
2477
+ });
2478
+ },
2479
+ // Time helpers work without context
2480
+ now: () => Date.now(),
2481
+ days: (n) => n * 864e5,
2482
+ hours: (n) => n * 36e5,
2483
+ minutes: (n) => n * 6e4,
2484
+ ago: (ms) => Date.now() - ms,
2485
+ fromNow: (ms) => Date.now() + ms
2486
+ };
2487
+ }
2488
+ // -------------------------------------------------------------------------
2489
+ // Helper methods — user resolution
2490
+ // -------------------------------------------------------------------------
2491
+ /**
2492
+ * Resolve a single user ID to display info (name, email, profile picture).
2493
+ *
2494
+ * Use this when you have a `User`-typed field value and need the person's
2495
+ * display name, email, or avatar. Returns null if the user ID is not found.
2496
+ *
2497
+ * Also available as a top-level import:
2498
+ * ```ts
2499
+ * import { resolveUser } from '@mindstudio-ai/agent';
2500
+ * ```
2501
+ *
2502
+ * @param userId - The user ID to resolve (a `User` branded string or plain UUID)
2503
+ * @returns Resolved user info, or null if not found
2504
+ *
2505
+ * @example
2506
+ * ```ts
2507
+ * const user = await agent.resolveUser(order.requestedBy);
2508
+ * if (user) {
2509
+ * console.log(user.name); // "Jane Smith"
2510
+ * console.log(user.email); // "jane@example.com"
2511
+ * console.log(user.profilePictureUrl); // "https://..." or null
2512
+ * }
2513
+ * ```
2514
+ */
2515
+ async resolveUser(userId) {
2516
+ const { users } = await this.resolveUsers([userId]);
2517
+ return users[0] ?? null;
2518
+ }
2519
+ /**
2520
+ * Resolve multiple user IDs to display info in a single request.
2521
+ * Maximum 100 user IDs per request.
2522
+ *
2523
+ * Use this for batch resolution when you have multiple user references
2524
+ * to display (e.g. all approvers on a purchase order, all team members).
2525
+ *
2526
+ * @param userIds - Array of user IDs to resolve (max 100)
2527
+ * @returns Object with `users` array of resolved user info
2528
+ *
2529
+ * @example
2530
+ * ```ts
2531
+ * // Resolve all approvers at once
2532
+ * const approverIds = approvals.map(a => a.assignedTo);
2533
+ * const { users } = await agent.resolveUsers(approverIds);
2534
+ *
2535
+ * for (const u of users) {
2536
+ * console.log(`${u.name} (${u.email})`);
2537
+ * }
2538
+ * ```
2539
+ */
2540
+ async resolveUsers(userIds) {
2541
+ const { data } = await request(
2542
+ this._httpConfig,
2543
+ "POST",
2544
+ "/helpers/resolve-users",
2545
+ { userIds }
2546
+ );
2547
+ return data;
2548
+ }
2549
+ // -------------------------------------------------------------------------
2550
+ // App context
2551
+ // -------------------------------------------------------------------------
2552
+ /**
2553
+ * Get auth and database context for an app.
2554
+ *
2555
+ * Returns role assignments and managed database schemas. Useful for
2556
+ * hydrating `auth` and `db` namespaces when running outside the sandbox.
2557
+ *
2558
+ * When called with a CALLBACK_TOKEN (managed mode), `appId` is optional —
2559
+ * the platform resolves the app from the token. With an API key, `appId`
2560
+ * is required.
2561
+ *
2562
+ * ```ts
2563
+ * const ctx = await agent.getAppContext('your-app-id');
2564
+ * console.log(ctx.auth.roleAssignments, ctx.databases);
2565
+ * ```
2566
+ */
2567
+ async getAppContext(appId) {
2568
+ const query = appId ? `?appId=${encodeURIComponent(appId)}` : "";
2569
+ const { data } = await request(
2570
+ this._httpConfig,
2571
+ "GET",
2572
+ `/helpers/app-context${query}`
2573
+ );
2574
+ return data;
2575
+ }
2576
+ // -------------------------------------------------------------------------
938
2577
  // Account methods
939
2578
  // -------------------------------------------------------------------------
940
2579
  /** Update the display name of the authenticated user/agent. */
@@ -1015,6 +2654,7 @@ var monacoSnippets = {
1015
2654
  "analyzeImage": { fields: [["prompt", "string"], ["imageUrl", "string"]], outputKeys: ["analysis"] },
1016
2655
  "analyzeVideo": { fields: [["prompt", "string"], ["videoUrl", "string"]], outputKeys: ["analysis"] },
1017
2656
  "captureThumbnail": { fields: [["videoUrl", "string"], ["at", "string"]], outputKeys: ["thumbnailUrl"] },
2657
+ "checkAppRole": { fields: [["roleName", "string"]], outputKeys: ["hasRole", "userRoles"] },
1018
2658
  "codaCreateUpdatePage": { fields: [["pageData", "object"]], outputKeys: ["pageId"] },
1019
2659
  "codaCreateUpdateRow": { fields: [["docId", "string"], ["tableId", "string"], ["rowData", "object"]], outputKeys: ["rowId"] },
1020
2660
  "codaFindRow": { fields: [["docId", "string"], ["tableId", "string"], ["rowData", "object"]], outputKeys: ["row"] },
@@ -1100,6 +2740,7 @@ var monacoSnippets = {
1100
2740
  "postToSlackChannel": { fields: [["channelId", "string"], ["messageType", ["string", "blocks"]], ["message", "string"]], outputKeys: [] },
1101
2741
  "postToX": { fields: [["text", "string"]], outputKeys: [] },
1102
2742
  "postToZapier": { fields: [["webhookUrl", "string"], ["input", "object"]], outputKeys: ["data"] },
2743
+ "queryAppDatabase": { fields: [["databaseId", "string"], ["sql", "string"]], outputKeys: ["rows", "changes"] },
1103
2744
  "queryDataSource": { fields: [["dataSourceId", "string"], ["query", "string"], ["maxResults", "number"]], outputKeys: ["text", "chunks", "query", "citations", "latencyMs"] },
1104
2745
  "queryExternalDatabase": { fields: [["query", "string"], ["outputFormat", ["json", "csv"]]], outputKeys: ["data"] },
1105
2746
  "redactPII": { fields: [["input", "string"], ["language", "string"], ["entities", "array"]], outputKeys: ["text"] },
@@ -1189,7 +2830,7 @@ var stepMetadata = {
1189
2830
  stepType: "addSubtitlesToVideo",
1190
2831
  description: "Automatically add subtitles to a video",
1191
2832
  usageNotes: "- Can control style of text and animation",
1192
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "language": { "type": "string", "description": "ISO language code for subtitle transcription" }, "fontName": { "type": "string", "description": "Google Font name for subtitle text" }, "fontSize": { "type": "number", "description": "Font size in pixels. Default: 100." }, "fontWeight": { "enum": ["normal", "bold", "black"], "type": "string", "description": "Font weight for subtitle text" }, "fontColor": { "enum": ["white", "black", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color of the subtitle text" }, "highlightColor": { "enum": ["white", "black", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color used to highlight the currently spoken word" }, "strokeWidth": { "type": "number", "description": "Width of the text stroke outline in pixels" }, "strokeColor": { "enum": ["black", "white", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color of the text stroke outline" }, "backgroundColor": { "enum": ["black", "white", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta", "none"], "type": "string", "description": "Background color behind subtitle text. Use 'none' for transparent." }, "backgroundOpacity": { "type": "number", "description": "Opacity of the subtitle background. 0.0 = fully transparent, 1.0 = fully opaque." }, "position": { "enum": ["top", "center", "bottom"], "type": "string", "description": "Vertical position of subtitle text on screen" }, "yOffset": { "type": "number", "description": "Vertical offset in pixels from the position. Positive moves down, negative moves up. Default: 75." }, "wordsPerSubtitle": { "type": "number", "description": "Maximum number of words per subtitle segment. Use 1 for single-word display, 2-3 for short phrases, or 8-12 for full sentences. Default: 3." }, "enableAnimation": { "type": "boolean", "description": "When true, enables bounce-style entrance animation for subtitles. Default: true." }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "language", "fontName", "fontSize", "fontWeight", "fontColor", "highlightColor", "strokeWidth", "strokeColor", "backgroundColor", "backgroundOpacity", "position", "yOffset", "wordsPerSubtitle", "enableAnimation"] },
2833
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "language": { "type": "string", "description": "ISO language code for subtitle transcription" }, "fontName": { "type": "string", "description": "Google Font name for subtitle text" }, "fontSize": { "type": "number", "description": "Font size in pixels. Default: 100." }, "fontWeight": { "enum": ["normal", "bold", "black"], "type": "string", "description": "Font weight for subtitle text" }, "fontColor": { "enum": ["white", "black", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color of the subtitle text" }, "highlightColor": { "enum": ["white", "black", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color used to highlight the currently spoken word" }, "strokeWidth": { "type": "number", "description": "Width of the text stroke outline in pixels" }, "strokeColor": { "enum": ["black", "white", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta"], "type": "string", "description": "Color of the text stroke outline" }, "backgroundColor": { "enum": ["black", "white", "red", "green", "blue", "yellow", "orange", "purple", "pink", "brown", "gray", "cyan", "magenta", "none"], "type": "string", "description": "Background color behind subtitle text. Use 'none' for transparent." }, "backgroundOpacity": { "type": "number", "description": "Opacity of the subtitle background. 0.0 = fully transparent, 1.0 = fully opaque." }, "position": { "enum": ["top", "center", "bottom"], "type": "string", "description": "Vertical position of subtitle text on screen" }, "yOffset": { "type": "number", "description": "Vertical offset in pixels from the position. Positive moves down, negative moves up. Default: 75." }, "wordsPerSubtitle": { "type": "number", "description": "Maximum number of words per subtitle segment. Use 1 for single-word display, 2-3 for short phrases, or 8-12 for full sentences. Default: 3." }, "enableAnimation": { "type": "boolean", "description": "When true, enables bounce-style entrance animation for subtitles. Default: true." }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "language", "fontName", "fontSize", "fontWeight", "fontColor", "highlightColor", "strokeWidth", "strokeColor", "backgroundColor", "backgroundOpacity", "position", "yOffset", "wordsPerSubtitle", "enableAnimation"] },
1193
2834
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with subtitles added" } }, "required": ["videoUrl"] }
1194
2835
  },
1195
2836
  "airtableCreateUpdateRecord": {
@@ -1241,6 +2882,13 @@ var stepMetadata = {
1241
2882
  inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to capture a frame from" }, "at": { "anyOf": [{ "type": "number" }, { "type": "string" }] } }, "required": ["videoUrl", "at"] },
1242
2883
  outputSchema: { "type": "object", "properties": { "thumbnailUrl": { "type": "string", "description": "URL of the captured thumbnail image" } }, "required": ["thumbnailUrl"] }
1243
2884
  },
2885
+ "checkAppRole": {
2886
+ stepType: "checkAppRole",
2887
+ description: "Check whether the current user has a specific app role and branch accordingly.",
2888
+ usageNotes: '- Checks if the current user has been assigned a specific role in this app.\n- If the user has the role, transitions to the "has role" path.\n- If the user does not have the role, transitions to the "no role" path, or errors if no path is configured.\n- Role names are defined by the app creator and assigned to users via the app roles system.\n- The roleName field supports {{variables}} for dynamic role checks.',
2889
+ inputSchema: { "type": "object", "properties": { "roleName": { "type": "string", "description": "The role name to check (supports {{variables}})" }, "hasRoleStepId": { "type": "string", "description": "Step to transition to if the user has the role (same workflow)" }, "hasRoleWorkflowId": { "type": "string", "description": "Workflow to jump to if the user has the role (cross workflow)" }, "noRoleStepId": { "type": "string", "description": "Step to transition to if the user does not have the role (same workflow)" }, "noRoleWorkflowId": { "type": "string", "description": "Workflow to jump to if the user does not have the role (cross workflow)" } }, "required": ["roleName"], "description": "Configuration for the check app role step" },
2890
+ outputSchema: { "type": "object", "properties": { "hasRole": { "type": "boolean", "description": "Whether the current user has the checked role" }, "userRoles": { "type": "array", "items": { "type": "string" }, "description": "All roles assigned to the current user for this app" } }, "required": ["hasRole", "userRoles"] }
2891
+ },
1244
2892
  "codaCreateUpdatePage": {
1245
2893
  stepType: "codaCreateUpdatePage",
1246
2894
  description: "Create a new page or update an existing page in a Coda document.",
@@ -1492,7 +3140,7 @@ var stepMetadata = {
1492
3140
  stepType: "generatePdf",
1493
3141
  description: "Generate an HTML asset and export it as a webpage, PDF, or image",
1494
3142
  usageNotes: '- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)',
1495
- inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the asset will not appear in the user's asset history" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
3143
+ inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
1496
3144
  outputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "CDN URL of the generated asset (PDF, PNG, HTML, or MP4 depending on outputFormat)" } }, "required": ["url"] }
1497
3145
  },
1498
3146
  "generateChart": {
@@ -1506,28 +3154,28 @@ var stepMetadata = {
1506
3154
  stepType: "generateImage",
1507
3155
  description: "Generate an image from a text prompt using an AI model.",
1508
3156
  usageNotes: "- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Images are automatically hosted on a CDN.\n- In foreground mode, the image is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple images are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.",
1509
- inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the image to generate" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the image will not appear in the user's asset history" }, "imageModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Image generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default image model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple image variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated image" } }, "required": ["prompt"] },
3157
+ inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the image to generate" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "imageModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Image generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default image model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple image variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated image" } }, "required": ["prompt"] },
1510
3158
  outputSchema: { "type": "object", "properties": { "imageUrl": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }] } }, "required": ["imageUrl"] }
1511
3159
  },
1512
3160
  "generateLipsync": {
1513
3161
  stepType: "generateLipsync",
1514
3162
  description: "Generate a lip sync video from provided audio and image.",
1515
3163
  usageNotes: "- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.",
1516
- inputSchema: { "type": "object", "properties": { "skipAssetCreation": { "type": "boolean", "description": "If true, the generated video will not appear in the user's asset history" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" }, "lipsyncModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default lipsync model if not specified" } } },
3164
+ inputSchema: { "type": "object", "properties": { "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" }, "lipsyncModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default lipsync model if not specified" } } },
1517
3165
  outputSchema: { "description": "This step does not produce output data." }
1518
3166
  },
1519
3167
  "generateMusic": {
1520
3168
  stepType: "generateMusic",
1521
3169
  description: "Generate an audio file from provided instructions (text) using a music model.",
1522
3170
  usageNotes: "- The text field contains the instructions (prompt) for the music generation.\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.",
1523
- inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The instructions (prompt) for the music generation" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the generated audio will not appear in the user's asset history" }, "musicModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default music model if not specified" } }, "required": ["text"] },
3171
+ inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The instructions (prompt) for the music generation" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "musicModelOverride": { "type": "object", "properties": { "model": { "type": "string" }, "config": { "type": "object" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default music model if not specified" } }, "required": ["text"] },
1524
3172
  outputSchema: { "description": "This step does not produce output data." }
1525
3173
  },
1526
3174
  "generatePdf": {
1527
3175
  stepType: "generatePdf",
1528
3176
  description: "Generate an HTML asset and export it as a webpage, PDF, or image",
1529
3177
  usageNotes: '- Agents can generate HTML documents and export as webpage, PDFs, images, or videos. They do this by using the "generatePdf" block, which defines an HTML page with variables, and then the generation process renders the page to create the output and save its URL at the specified variable.\n- The template for the HTML page is generated by a separate process, and it can only use variables that have already been defined in the workflow at the time of its execution. It has full access to handlebars to render the HTML template, including a handlebars helper to render a markdown variable string as HTML (which can be useful for creating templates that render long strings). The template can also create its own simple JavaScript to do things like format dates and strings.\n- If PDF or composited image generation are part of the workflow, assistant adds the block and leaves the "source" empty. In a separate step, assistant generates a detailed request for the developer who will write the HTML.\n- Can also auto-generate HTML from a prompt (like a generate text block to generate HTML). In these cases, create a prompt with variables in the dynamicPrompt variable describing, in detail, the document to generate\n- Can either display output directly to user (foreground mode) or save the URL of the asset to a variable (background mode)',
1530
- inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the asset will not appear in the user's asset history" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
3178
+ inputSchema: { "type": "object", "properties": { "source": { "type": "string", "description": "The HTML or Markdown source template for the asset" }, "sourceType": { "enum": ["html", "markdown", "spa", "raw", "dynamic", "customInterface"], "type": "string", "description": "Source type: html, markdown (auto-formatted), spa (single page app), raw (pre-generated HTML in a variable), dynamic (AI-generated from prompt), or customInterface" }, "outputFormat": { "enum": ["pdf", "png", "html", "mp4", "openGraph"], "type": "string", "description": "The output format for the generated asset" }, "pageSize": { "enum": ["full", "letter", "A4", "custom"], "type": "string", "description": "Page size for PDF, PNG, or MP4 output" }, "testData": { "type": "object", "description": "Test data used for previewing the template with sample variable values" }, "options": { "type": "object", "properties": { "pageWidthPx": { "type": "number", "description": "Custom page width in pixels (for custom pageSize)" }, "pageHeightPx": { "type": "number", "description": "Custom page height in pixels (for custom pageSize)" }, "pageOrientation": { "enum": ["portrait", "landscape"], "type": "string", "description": "Page orientation for the rendered output" }, "rehostMedia": { "type": "boolean", "description": "Whether to re-host third-party images on the MindStudio CDN" }, "videoDurationSeconds": { "type": "number", "description": "Duration in seconds for MP4 video output" } }, "description": "Additional rendering options" }, "spaSource": { "type": "object", "properties": { "source": { "type": "string", "description": "Source code of the SPA (legacy, use files instead)" }, "lastCompiledSource": { "type": "string", "description": "Last compiled source (cached)" }, "files": { "type": "object", "description": "Multi-file SPA source" }, "paths": { "type": "array", "items": { "type": "string" }, "description": "Available route paths in the SPA" }, "root": { "type": "string", "description": "Root URL of the SPA bundle" }, "zipUrl": { "type": "string", "description": "URL of the zipped SPA bundle" } }, "required": ["paths", "root", "zipUrl"], "description": "Single page app source configuration (advanced)" }, "rawSource": { "type": "string", "description": "Raw HTML source stored in a variable, using handlebars syntax (e.g. {{myHtmlVariable}})" }, "dynamicPrompt": { "type": "string", "description": 'Prompt to generate the HTML dynamically when sourceType is "dynamic"' }, "dynamicSourceModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": 'Model identifier (e.g. "gpt-4", "claude-3-opus")' }, "temperature": { "type": "number", "description": "Sampling temperature for the model (0-2)" }, "maxResponseTokens": { "type": "number", "description": "Maximum number of tokens in the model's response" }, "ignorePreamble": { "type": "boolean", "description": "Whether to skip the system preamble/instructions" }, "userMessagePreprocessor": { "type": "object", "properties": { "dataSource": { "type": "string", "description": "Data source identifier for the preprocessor" }, "messageTemplate": { "type": "string", "description": "Template string applied to user messages before sending to the model" }, "maxResults": { "type": "number", "description": "Maximum number of results to include from the data source" }, "enabled": { "type": "boolean", "description": "Whether the preprocessor is active" }, "shouldInherit": { "type": "boolean", "description": "Whether child steps should inherit this preprocessor configuration" } }, "description": "Preprocessor applied to user messages before sending to the model" }, "preamble": { "type": "string", "description": "System preamble/instructions for the model" }, "multiModelEnabled": { "type": "boolean", "description": "Whether multi-model candidate generation is enabled" }, "editResponseEnabled": { "type": "boolean", "description": "Whether the user can edit the model's response" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model", "temperature", "maxResponseTokens"], "description": "Model override for dynamic HTML generation. Leave undefined to use the default model" }, "transitionControl": { "enum": ["default", "native"], "type": "string", "description": "Controls how the step transitions after displaying in foreground mode" }, "shareControl": { "enum": ["default", "hidden"], "type": "string", "description": "Controls visibility of the share button on displayed assets" }, "shareImageUrl": { "type": "string", "description": "URL of a custom Open Graph share image" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["source", "sourceType", "outputFormat", "pageSize", "testData"] },
1531
3179
  outputSchema: { "type": "object", "properties": { "url": { "type": "string", "description": "CDN URL of the generated asset (PDF, PNG, HTML, or MP4 depending on outputFormat)" } }, "required": ["url"] }
1532
3180
  },
1533
3181
  "generateStaticVideoFromImage": {
@@ -1551,7 +3199,7 @@ var stepMetadata = {
1551
3199
  stepType: "generateVideo",
1552
3200
  description: "Generate a video from a text prompt using an AI model.",
1553
3201
  usageNotes: "- Prompts should be descriptive but concise (roughly 3\u20136 sentences).\n- Videos are automatically hosted on a CDN.\n- In foreground mode, the video is displayed to the user. In background mode, the URL is saved to a variable.\n- When generateVariants is true with numVariants > 1, multiple videos are generated in parallel.\n- In direct execution, foreground mode behaves as background, and userSelect variant behavior behaves as saveAll.",
1554
- inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the video to generate" }, "skipAssetCreation": { "type": "boolean", "description": "If true, the video will not appear in the user's asset history" }, "videoModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Video generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default video model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple video variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" } }, "required": ["prompt"] },
3202
+ inputSchema: { "type": "object", "properties": { "prompt": { "type": "string", "description": "Text prompt describing the video to generate" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" }, "videoModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Video generation model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default video model if not specified" }, "generateVariants": { "type": "boolean", "description": "Whether to generate multiple video variants in parallel" }, "numVariants": { "type": "number", "description": "Number of variants to generate (max 10)" }, "addWatermark": { "type": "boolean", "description": "Whether to add a MindStudio watermark to the generated video" } }, "required": ["prompt"] },
1555
3203
  outputSchema: { "type": "object", "properties": { "videoUrl": { "anyOf": [{ "type": "string" }, { "type": "array", "items": { "type": "string" } }] } }, "required": ["videoUrl"] }
1556
3204
  },
1557
3205
  "getGmailAttachments": {
@@ -1691,14 +3339,14 @@ var stepMetadata = {
1691
3339
  stepType: "imageRemoveWatermark",
1692
3340
  description: "Remove watermarks from an image using AI.",
1693
3341
  usageNotes: "- Output is re-hosted on the CDN as a PNG.",
1694
- inputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "URL of the image to remove the watermark from" }, "engine": { "type": "string", "description": "Watermark removal engine to use" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history" } }, "required": ["imageUrl", "engine"] },
3342
+ inputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "URL of the image to remove the watermark from" }, "engine": { "type": "string", "description": "Watermark removal engine to use" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["imageUrl", "engine"] },
1695
3343
  outputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "CDN URL of the processed image with watermark removed (PNG)" } }, "required": ["imageUrl"] }
1696
3344
  },
1697
3345
  "insertVideoClips": {
1698
3346
  stepType: "insertVideoClips",
1699
3347
  description: "Insert b-roll clips into a base video at a timecode, optionally with an xfade transition.",
1700
3348
  usageNotes: "",
1701
- inputSchema: { "type": "object", "properties": { "baseVideoUrl": { "type": "string", "description": "URL of the base video to insert clips into" }, "overlayVideos": { "type": "array", "items": { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the overlay video clip" }, "startTimeSec": { "type": "number", "description": "Timecode in seconds at which to insert this clip" } }, "required": ["videoUrl", "startTimeSec"] }, "description": "Array of overlay clips to insert at specified timecodes" }, "transition": { "type": "string", "description": "Optional xfade transition effect name between clips" }, "transitionDuration": { "type": "number", "description": "Duration of the transition in seconds" }, "useOverlayAudio": { "type": "boolean", "description": "When true, uses audio from the overlay clips instead of the base video audio during inserts" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["baseVideoUrl", "overlayVideos"] },
3349
+ inputSchema: { "type": "object", "properties": { "baseVideoUrl": { "type": "string", "description": "URL of the base video to insert clips into" }, "overlayVideos": { "type": "array", "items": { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the overlay video clip" }, "startTimeSec": { "type": "number", "description": "Timecode in seconds at which to insert this clip" } }, "required": ["videoUrl", "startTimeSec"] }, "description": "Array of overlay clips to insert at specified timecodes" }, "transition": { "type": "string", "description": "Optional xfade transition effect name between clips" }, "transitionDuration": { "type": "number", "description": "Duration of the transition in seconds" }, "useOverlayAudio": { "type": "boolean", "description": "When true, uses audio from the overlay clips instead of the base video audio during inserts" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["baseVideoUrl", "overlayVideos"] },
1702
3350
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with clips inserted" } }, "required": ["videoUrl"] }
1703
3351
  },
1704
3352
  "listDataSources": {
@@ -1765,28 +3413,28 @@ var stepMetadata = {
1765
3413
  stepType: "mergeAudio",
1766
3414
  description: "Merge one or more clips into a single audio file.",
1767
3415
  usageNotes: "",
1768
- inputSchema: { "type": "object", "properties": { "mp3Urls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the MP3 audio clips to merge in order" }, "fileMetadata": { "type": "object", "description": "FFmpeg MP3 metadata key-value pairs to embed in the output file" }, "albumArtUrl": { "type": "string", "description": "URL of an image to embed as album art in the output file" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["mp3Urls"] },
3416
+ inputSchema: { "type": "object", "properties": { "mp3Urls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the MP3 audio clips to merge in order" }, "fileMetadata": { "type": "object", "description": "FFmpeg MP3 metadata key-value pairs to embed in the output file" }, "albumArtUrl": { "type": "string", "description": "URL of an image to embed as album art in the output file" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["mp3Urls"] },
1769
3417
  outputSchema: { "type": "object", "properties": { "audioUrl": { "type": "string", "description": "URL of the merged audio file" } }, "required": ["audioUrl"] }
1770
3418
  },
1771
3419
  "mergeVideos": {
1772
3420
  stepType: "mergeVideos",
1773
3421
  description: "Merge one or more clips into a single video.",
1774
3422
  usageNotes: "",
1775
- inputSchema: { "type": "object", "properties": { "videoUrls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the video clips to merge in order" }, "transition": { "type": "string", "description": "Optional xfade transition effect name" }, "transitionDuration": { "type": "number", "description": "Duration of the transition in seconds" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrls"] },
3423
+ inputSchema: { "type": "object", "properties": { "videoUrls": { "type": "array", "items": { "type": "string" }, "description": "URLs of the video clips to merge in order" }, "transition": { "type": "string", "description": "Optional xfade transition effect name" }, "transitionDuration": { "type": "number", "description": "Duration of the transition in seconds" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrls"] },
1776
3424
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the merged video" } }, "required": ["videoUrl"] }
1777
3425
  },
1778
3426
  "mixAudioIntoVideo": {
1779
3427
  stepType: "mixAudioIntoVideo",
1780
3428
  description: "Mix an audio track into a video",
1781
3429
  usageNotes: "",
1782
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "audioUrl": { "type": "string", "description": "URL of the audio track to mix into the video" }, "options": { "type": "object", "properties": { "keepVideoAudio": { "type": "boolean", "description": "When true, preserves the original video audio alongside the new track. Defaults to false." }, "audioGainDb": { "type": "number", "description": "Volume adjustment for the new audio track in decibels. Defaults to 0." }, "videoGainDb": { "type": "number", "description": "Volume adjustment for the existing video audio in decibels. Defaults to 0." }, "loopAudio": { "type": "boolean", "description": "When true, loops the audio track to match the video duration. Defaults to false." } }, "description": "Audio mixing options" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "audioUrl", "options"] },
3430
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "audioUrl": { "type": "string", "description": "URL of the audio track to mix into the video" }, "options": { "type": "object", "properties": { "keepVideoAudio": { "type": "boolean", "description": "When true, preserves the original video audio alongside the new track. Defaults to false." }, "audioGainDb": { "type": "number", "description": "Volume adjustment for the new audio track in decibels. Defaults to 0." }, "videoGainDb": { "type": "number", "description": "Volume adjustment for the existing video audio in decibels. Defaults to 0." }, "loopAudio": { "type": "boolean", "description": "When true, loops the audio track to match the video duration. Defaults to false." } }, "description": "Audio mixing options" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "audioUrl", "options"] },
1783
3431
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with the mixed audio track" } }, "required": ["videoUrl"] }
1784
3432
  },
1785
3433
  "muteVideo": {
1786
3434
  stepType: "muteVideo",
1787
3435
  description: "Mute a video file",
1788
3436
  usageNotes: "",
1789
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to mute" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl"] },
3437
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to mute" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl"] },
1790
3438
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the muted video" } }, "required": ["videoUrl"] }
1791
3439
  },
1792
3440
  "n8nRunNode": {
@@ -1820,8 +3468,8 @@ var stepMetadata = {
1820
3468
  "postToLinkedIn": {
1821
3469
  stepType: "postToLinkedIn",
1822
3470
  description: "Create a post on LinkedIn from the connected account.",
1823
- usageNotes: "- Requires a LinkedIn OAuth connection (connectionId).\n- Supports text posts, image posts, and video posts.\n- Visibility controls who can see the post.",
1824
- inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The text content of the LinkedIn post" }, "visibility": { "enum": ["PUBLIC", "CONNECTIONS"], "type": "string", "description": 'Who can see the post: "PUBLIC" or "CONNECTIONS"' }, "videoUrl": { "type": "string", "description": "URL of a video to attach to the post" }, "descriptionText": { "type": "string", "description": "Description text for link/media attachments" }, "titleText": { "type": "string", "description": "Title text for link/media attachments" }, "imageUrl": { "type": "string", "description": "URL of an image to attach to the post" }, "connectionId": { "type": "string", "description": "LinkedIn OAuth connection ID" } }, "required": ["message", "visibility"] },
3471
+ usageNotes: "- Requires a LinkedIn OAuth connection (connectionId).\n- Supports text posts, image posts, video posts, document posts, and article posts.\n- Attach one media type per post: image, video, document, or article.\n- Documents support PDF, PPT, PPTX, DOC, DOCX (max 100MB, 300 pages). Displays as a slideshow carousel.\n- Articles create a link preview with optional custom title, description, and thumbnail.\n- Visibility controls who can see the post.",
3472
+ inputSchema: { "type": "object", "properties": { "message": { "type": "string", "description": "The text content of the LinkedIn post" }, "visibility": { "enum": ["PUBLIC", "CONNECTIONS"], "type": "string", "description": 'Who can see the post: "PUBLIC" or "CONNECTIONS"' }, "imageUrl": { "type": "string", "description": "URL of an image to attach to the post" }, "videoUrl": { "type": "string", "description": "URL of a video to attach to the post" }, "documentUrl": { "type": "string", "description": "URL of a document (PDF, PPT, DOC) to attach to the post" }, "articleUrl": { "type": "string", "description": "URL to share as an article link preview" }, "titleText": { "type": "string", "description": "Title text for media or article attachments" }, "descriptionText": { "type": "string", "description": "Description text for article attachments" }, "connectionId": { "type": "string", "description": "LinkedIn OAuth connection ID" } }, "required": ["message", "visibility"] },
1825
3473
  outputSchema: { "description": "This step does not produce output data." }
1826
3474
  },
1827
3475
  "postToSlackChannel": {
@@ -1845,6 +3493,13 @@ var stepMetadata = {
1845
3493
  inputSchema: { "type": "object", "properties": { "webhookUrl": { "type": "string", "description": "Zapier webhook URL to send data to" }, "input": { "type": "object", "description": "Key-value pairs to send as the JSON POST body" } }, "required": ["webhookUrl", "input"] },
1846
3494
  outputSchema: { "type": "object", "properties": { "data": { "description": "Parsed webhook response from Zapier (JSON object, array, or string)" } }, "required": ["data"] }
1847
3495
  },
3496
+ "queryAppDatabase": {
3497
+ stepType: "queryAppDatabase",
3498
+ description: "Execute a SQL query against the app managed database.",
3499
+ usageNotes: '- Executes raw SQL against a SQLite database managed by the app.\n- For SELECT queries, returns rows as JSON.\n- For INSERT/UPDATE/DELETE, returns the number of affected rows.\n- Use {{variables}} directly in your SQL. By default they are automatically extracted\n and passed as safe parameterized values (preventing SQL injection).\n Example: INSERT INTO contacts (name, comment) VALUES ({{name}}, {{comment}})\n- Full MindStudio handlebars syntax is supported, including helpers like {{json myVar}},\n {{get myVar "$.path"}}, {{global.orgName}}, etc.\n- Set parameterize to false for raw/dynamic SQL where variables are interpolated directly\n into the query string. Use this when another step generates full or partial SQL, e.g.\n a bulk INSERT with a precomputed VALUES list. The user is responsible for sanitization\n when parameterize is false.',
3500
+ inputSchema: { "type": "object", "properties": { "databaseId": { "type": "string", "description": "Name or ID of the app data database to query" }, "sql": { "type": "string", "description": "SQL query to execute. Use {{variables}} directly in the SQL \u2014 they are handled according to the `parameterize` setting.\n\nWhen parameterize is true (default): {{variables}} are extracted from the SQL, replaced with ? placeholders, resolved via the full MindStudio handlebars pipeline, and passed as safe parameterized values to SQLite. This prevents SQL injection. Example: INSERT INTO contacts (name, email) VALUES ({{name}}, {{email}})\n\nWhen parameterize is false: The entire SQL string is resolved via compileString (standard handlebars interpolation) and executed as-is. Use this for dynamic/generated SQL where another step builds the query. The user is responsible for safety. Example: {{generatedInsertQuery}}\n\nAsk the user for the database schema if they have not already provided it." }, "parameterize": { "type": "boolean", "description": "Whether to treat {{variables}} as parameterized query values (default: true).\n\n- true: {{vars}} are extracted, replaced with ?, and passed as bind params. Safe from SQL injection. Use for standard CRUD operations.\n- false: {{vars}} are interpolated directly into the SQL string via handlebars. Use when another step generates full or partial SQL (e.g. bulk inserts with precomputed VALUES). The user is responsible for sanitization." } }, "required": ["databaseId", "sql"] },
3501
+ outputSchema: { "type": "object", "properties": { "rows": { "type": "array", "items": {}, "description": "Result rows for SELECT queries (empty array for write queries)" }, "changes": { "type": "number", "description": "Number of rows affected by INSERT, UPDATE, or DELETE queries (0 for SELECT)" } }, "required": ["rows", "changes"] }
3502
+ },
1848
3503
  "queryDataSource": {
1849
3504
  stepType: "queryDataSource",
1850
3505
  description: "Search a vector data source (RAG) and return relevant document chunks.",
@@ -1884,7 +3539,7 @@ var stepMetadata = {
1884
3539
  stepType: "resizeVideo",
1885
3540
  description: "Resize a video file",
1886
3541
  usageNotes: "",
1887
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to resize" }, "mode": { "enum": ["fit", "exact"], "type": "string", "description": "Resize mode: 'fit' scales within max dimensions, 'exact' forces exact dimensions" }, "maxWidth": { "type": "number", "description": "Maximum width in pixels (used with 'fit' mode)" }, "maxHeight": { "type": "number", "description": "Maximum height in pixels (used with 'fit' mode)" }, "width": { "type": "number", "description": "Exact width in pixels (used with 'exact' mode)" }, "height": { "type": "number", "description": "Exact height in pixels (used with 'exact' mode)" }, "strategy": { "enum": ["pad", "crop"], "type": "string", "description": "Strategy for handling aspect ratio mismatch in 'exact' mode" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "mode"] },
3542
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to resize" }, "mode": { "enum": ["fit", "exact"], "type": "string", "description": "Resize mode: 'fit' scales within max dimensions, 'exact' forces exact dimensions" }, "maxWidth": { "type": "number", "description": "Maximum width in pixels (used with 'fit' mode)" }, "maxHeight": { "type": "number", "description": "Maximum height in pixels (used with 'fit' mode)" }, "width": { "type": "number", "description": "Exact width in pixels (used with 'exact' mode)" }, "height": { "type": "number", "description": "Exact height in pixels (used with 'exact' mode)" }, "strategy": { "enum": ["pad", "crop"], "type": "string", "description": "Strategy for handling aspect ratio mismatch in 'exact' mode" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "mode"] },
1888
3543
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the resized video" } }, "required": ["videoUrl"] }
1889
3544
  },
1890
3545
  "runFromConnectorRegistry": {
@@ -2180,7 +3835,7 @@ var stepMetadata = {
2180
3835
  stepType: "textToSpeech",
2181
3836
  description: "Generate an audio file from provided text using a speech model.",
2182
3837
  usageNotes: "- The text field contains the exact words to be spoken (not instructions).\n- In foreground mode, the audio is displayed to the user. In background mode, the URL is saved to a variable.",
2183
- inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The text to convert to speech" }, "skipAssetCreation": { "type": "boolean" }, "speechModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Speech synthesis model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default speech model if not specified" } }, "required": ["text"] },
3838
+ inputSchema: { "type": "object", "properties": { "text": { "type": "string", "description": "The text to convert to speech" }, "intermediateAsset": { "type": "boolean" }, "speechModelOverride": { "type": "object", "properties": { "model": { "type": "string", "description": "Speech synthesis model identifier" }, "config": { "type": "object", "description": "Additional model-specific configuration" } }, "required": ["model"], "description": "Optional model configuration override. Uses the workflow's default speech model if not specified" } }, "required": ["text"] },
2184
3839
  outputSchema: { "type": "object", "properties": { "audioUrl": { "type": "string", "description": "URL of the generated audio file" } }, "required": ["audioUrl"] }
2185
3840
  },
2186
3841
  "transcribeAudio": {
@@ -2194,7 +3849,7 @@ var stepMetadata = {
2194
3849
  stepType: "trimMedia",
2195
3850
  description: "Trim an audio or video clip",
2196
3851
  usageNotes: "",
2197
- inputSchema: { "type": "object", "properties": { "inputUrl": { "type": "string", "description": "URL of the source audio or video file to trim" }, "start": { "type": ["number", "string"] }, "duration": { "type": ["string", "number"] }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["inputUrl"] },
3852
+ inputSchema: { "type": "object", "properties": { "inputUrl": { "type": "string", "description": "URL of the source audio or video file to trim" }, "start": { "type": ["number", "string"] }, "duration": { "type": ["string", "number"] }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["inputUrl"] },
2198
3853
  outputSchema: { "type": "object", "properties": { "mediaUrl": { "type": "string", "description": "URL of the trimmed media file" } }, "required": ["mediaUrl"] }
2199
3854
  },
2200
3855
  "updateGmailLabels": {
@@ -2243,7 +3898,7 @@ var stepMetadata = {
2243
3898
  stepType: "upscaleVideo",
2244
3899
  description: "Upscale a video file",
2245
3900
  usageNotes: "",
2246
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to upscale" }, "targetResolution": { "enum": ["720p", "1080p", "2K", "4K"], "type": "string", "description": "Target output resolution for the upscaled video" }, "engine": { "enum": ["standard", "pro", "ultimate", "flashvsr", "seedance", "seedvr2", "runwayml/upscale-v1"], "type": "string", "description": "Upscaling engine to use. Higher tiers produce better quality at higher cost." }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "targetResolution", "engine"] },
3901
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video to upscale" }, "targetResolution": { "enum": ["720p", "1080p", "2K", "4K"], "type": "string", "description": "Target output resolution for the upscaled video" }, "engine": { "enum": ["standard", "pro", "ultimate", "flashvsr", "seedance", "seedvr2", "runwayml/upscale-v1"], "type": "string", "description": "Upscaling engine to use. Higher tiers produce better quality at higher cost." }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "targetResolution", "engine"] },
2247
3902
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the upscaled video" } }, "required": ["videoUrl"] }
2248
3903
  },
2249
3904
  "userMessage": {
@@ -2260,46 +3915,86 @@ var stepMetadata = {
2260
3915
  stepType: "videoFaceSwap",
2261
3916
  description: "Swap faces in a video file",
2262
3917
  usageNotes: "",
2263
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video containing faces to swap" }, "faceImageUrl": { "type": "string", "description": "URL of the image containing the replacement face" }, "targetIndex": { "type": "number", "description": "Zero-based index of the face to replace in the video" }, "engine": { "type": "string", "description": "Face swap engine to use" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "faceImageUrl", "targetIndex", "engine"] },
3918
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video containing faces to swap" }, "faceImageUrl": { "type": "string", "description": "URL of the image containing the replacement face" }, "targetIndex": { "type": "number", "description": "Zero-based index of the face to replace in the video" }, "engine": { "type": "string", "description": "Face swap engine to use" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "faceImageUrl", "targetIndex", "engine"] },
2264
3919
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the face-swapped video" } }, "required": ["videoUrl"] }
2265
3920
  },
2266
3921
  "videoRemoveBackground": {
2267
3922
  stepType: "videoRemoveBackground",
2268
3923
  description: "Remove or replace background from a video",
2269
3924
  usageNotes: "",
2270
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "newBackground": { "enum": ["transparent", "image"], "type": "string", "description": "Whether to make the background transparent or replace it with an image" }, "newBackgroundImageUrl": { "type": "string", "description": "URL of a replacement background image. Required when newBackground is 'image'." }, "engine": { "type": "string", "description": "Background removal engine to use" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "newBackground", "engine"] },
3925
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "newBackground": { "enum": ["transparent", "image"], "type": "string", "description": "Whether to make the background transparent or replace it with an image" }, "newBackgroundImageUrl": { "type": "string", "description": "URL of a replacement background image. Required when newBackground is 'image'." }, "engine": { "type": "string", "description": "Background removal engine to use" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "newBackground", "engine"] },
2271
3926
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with background removed or replaced" } }, "required": ["videoUrl"] }
2272
3927
  },
2273
3928
  "videoRemoveWatermark": {
2274
3929
  stepType: "videoRemoveWatermark",
2275
3930
  description: "Remove a watermark from a video",
2276
3931
  usageNotes: "",
2277
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video containing a watermark" }, "engine": { "type": "string", "description": "Watermark removal engine to use" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "engine"] },
3932
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video containing a watermark" }, "engine": { "type": "string", "description": "Watermark removal engine to use" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "engine"] },
2278
3933
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the video with watermark removed" } }, "required": ["videoUrl"] }
2279
3934
  },
2280
3935
  "watermarkImage": {
2281
3936
  stepType: "watermarkImage",
2282
3937
  description: "Overlay a watermark image onto another image.",
2283
3938
  usageNotes: "- The watermark is placed at the specified corner with configurable padding and width.",
2284
- inputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "URL of the base image" }, "watermarkImageUrl": { "type": "string", "description": "URL of the watermark image to overlay" }, "corner": { "enum": ["top-left", "top-right", "bottom-left", "bottom-right"], "type": "string", "description": "Corner position for the watermark placement" }, "paddingPx": { "type": "number", "description": "Padding from the corner in pixels" }, "widthPx": { "type": "number", "description": "Width of the watermark overlay in pixels" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history" } }, "required": ["imageUrl", "watermarkImageUrl", "corner", "paddingPx", "widthPx"] },
3939
+ inputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "URL of the base image" }, "watermarkImageUrl": { "type": "string", "description": "URL of the watermark image to overlay" }, "corner": { "enum": ["top-left", "top-right", "bottom-left", "bottom-right"], "type": "string", "description": "Corner position for the watermark placement" }, "paddingPx": { "type": "number", "description": "Padding from the corner in pixels" }, "widthPx": { "type": "number", "description": "Width of the watermark overlay in pixels" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["imageUrl", "watermarkImageUrl", "corner", "paddingPx", "widthPx"] },
2285
3940
  outputSchema: { "type": "object", "properties": { "imageUrl": { "type": "string", "description": "CDN URL of the watermarked image" } }, "required": ["imageUrl"] }
2286
3941
  },
2287
3942
  "watermarkVideo": {
2288
3943
  stepType: "watermarkVideo",
2289
3944
  description: "Add an image watermark to a video",
2290
3945
  usageNotes: "",
2291
- inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "imageUrl": { "type": "string", "description": "URL of the watermark image to overlay" }, "corner": { "enum": ["top-left", "top-right", "bottom-left", "bottom-right"], "type": "string", "description": "Corner position for the watermark placement" }, "paddingPx": { "type": "number", "description": "Padding from the corner in pixels" }, "widthPx": { "type": "number", "description": "Width of the watermark overlay in pixels" }, "skipAssetCreation": { "type": "boolean", "description": "When true, the result will not appear in the user's asset history. Useful for intermediate compositing steps." } }, "required": ["videoUrl", "imageUrl", "corner", "paddingPx", "widthPx"] },
3946
+ inputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the source video" }, "imageUrl": { "type": "string", "description": "URL of the watermark image to overlay" }, "corner": { "enum": ["top-left", "top-right", "bottom-left", "bottom-right"], "type": "string", "description": "Corner position for the watermark placement" }, "paddingPx": { "type": "number", "description": "Padding from the corner in pixels" }, "widthPx": { "type": "number", "description": "Width of the watermark overlay in pixels" }, "intermediateAsset": { "type": "boolean", "description": "When true, the asset is created but hidden from the user's gallery (tagged as intermediate)" } }, "required": ["videoUrl", "imageUrl", "corner", "paddingPx", "widthPx"] },
2292
3947
  outputSchema: { "type": "object", "properties": { "videoUrl": { "type": "string", "description": "URL of the watermarked video" } }, "required": ["videoUrl"] }
2293
3948
  }
2294
3949
  };
2295
3950
 
2296
3951
  // src/index.ts
2297
3952
  var MindStudioAgent2 = MindStudioAgent;
3953
+ var _default;
3954
+ var mindstudio = new Proxy(
3955
+ {},
3956
+ {
3957
+ get(_, prop, receiver) {
3958
+ _default ??= new MindStudioAgent2();
3959
+ const value = Reflect.get(_default, prop, _default);
3960
+ return typeof value === "function" ? value.bind(_default) : value;
3961
+ }
3962
+ }
3963
+ );
3964
+ var index_default = mindstudio;
3965
+ var auth = new Proxy(
3966
+ {},
3967
+ {
3968
+ get(_, prop) {
3969
+ const target = mindstudio.auth;
3970
+ const value = Reflect.get(target, prop, target);
3971
+ return typeof value === "function" ? value.bind(target) : value;
3972
+ }
3973
+ }
3974
+ );
3975
+ var db = new Proxy(
3976
+ {},
3977
+ {
3978
+ get(_, prop) {
3979
+ const target = mindstudio.db;
3980
+ const value = Reflect.get(target, prop, target);
3981
+ return typeof value === "function" ? value.bind(target) : value;
3982
+ }
3983
+ }
3984
+ );
3985
+ var resolveUser = (userId) => mindstudio.resolveUser(userId);
2298
3986
  export {
3987
+ AuthContext,
2299
3988
  MindStudioAgent2 as MindStudioAgent,
2300
3989
  MindStudioError,
3990
+ Roles,
3991
+ auth,
2301
3992
  blockTypeAliases,
3993
+ db,
3994
+ index_default as default,
3995
+ mindstudio,
2302
3996
  monacoSnippets,
3997
+ resolveUser,
2303
3998
  stepMetadata
2304
3999
  };
2305
4000
  //# sourceMappingURL=index.js.map