windmill-cli 1.693.5 → 1.695.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/esm/main.js +305 -56
  2. package/package.json +1 -1
package/esm/main.js CHANGED
@@ -5,25 +5,43 @@ var __getProtoOf = Object.getPrototypeOf;
5
5
  var __defProp = Object.defineProperty;
6
6
  var __getOwnPropNames = Object.getOwnPropertyNames;
7
7
  var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ function __accessProp(key) {
9
+ return this[key];
10
+ }
11
+ var __toESMCache_node;
12
+ var __toESMCache_esm;
8
13
  var __toESM = (mod, isNodeMode, target) => {
14
+ var canCache = mod != null && typeof mod === "object";
15
+ if (canCache) {
16
+ var cache = isNodeMode ? __toESMCache_node ??= new WeakMap : __toESMCache_esm ??= new WeakMap;
17
+ var cached = cache.get(mod);
18
+ if (cached)
19
+ return cached;
20
+ }
9
21
  target = mod != null ? __create(__getProtoOf(mod)) : {};
10
22
  const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
11
23
  for (let key of __getOwnPropNames(mod))
12
24
  if (!__hasOwnProp.call(to, key))
13
25
  __defProp(to, key, {
14
- get: () => mod[key],
26
+ get: __accessProp.bind(mod, key),
15
27
  enumerable: true
16
28
  });
29
+ if (canCache)
30
+ cache.set(mod, to);
17
31
  return to;
18
32
  };
19
33
  var __commonJS = (cb, mod) => () => (mod || cb((mod = { exports: {} }).exports, mod), mod.exports);
34
+ var __returnValue = (v) => v;
35
+ function __exportSetter(name, newValue) {
36
+ this[name] = __returnValue.bind(null, newValue);
37
+ }
20
38
  var __export = (target, all) => {
21
39
  for (var name in all)
22
40
  __defProp(target, name, {
23
41
  get: all[name],
24
42
  enumerable: true,
25
43
  configurable: true,
26
- set: (newValue) => all[name] = () => newValue
44
+ set: __exportSetter.bind(all, name)
27
45
  });
28
46
  };
29
47
  var __esm = (fn, res) => () => (fn && (res = fn(fn = 0)), res);
@@ -13185,7 +13203,7 @@ function sameValueZero(a, b) {
13185
13203
  }
13186
13204
  function equal(a, b) {
13187
13205
  const seen = new Map;
13188
- return function compare(a2, b2) {
13206
+ return function compare3(a2, b2) {
13189
13207
  if (sameValueZero(a2, b2))
13190
13208
  return true;
13191
13209
  if (isPrimitive(a2) || isPrimitive(b2))
@@ -13210,7 +13228,7 @@ function equal(a, b) {
13210
13228
  throw new TypeError("Cannot compare WeakSet instances");
13211
13229
  }
13212
13230
  if (a2 instanceof WeakRef) {
13213
- return compare(a2.deref(), b2.deref());
13231
+ return compare3(a2.deref(), b2.deref());
13214
13232
  }
13215
13233
  if (seen.get(a2) === b2) {
13216
13234
  return true;
@@ -13232,7 +13250,7 @@ function equal(a, b) {
13232
13250
  return a2.symmetricDifference(b2).size === 0;
13233
13251
  }
13234
13252
  for (const key of aKeys) {
13235
- if (!b2.has(key) || !compare(a2.get(key), b2.get(key))) {
13253
+ if (!b2.has(key) || !compare3(a2.get(key), b2.get(key))) {
13236
13254
  return false;
13237
13255
  }
13238
13256
  }
@@ -13241,9 +13259,9 @@ function equal(a, b) {
13241
13259
  let unmatchedEntries = a2.size;
13242
13260
  for (const [aKey, aValue] of a2.entries()) {
13243
13261
  for (const [bKey, bValue] of b2.entries()) {
13244
- if (!compare(aKey, bKey))
13262
+ if (!compare3(aKey, bKey))
13245
13263
  continue;
13246
- if (aKey === aValue && bKey === bValue || compare(aValue, bValue)) {
13264
+ if (aKey === aValue && bKey === bValue || compare3(aValue, bValue)) {
13247
13265
  unmatchedEntries--;
13248
13266
  break;
13249
13267
  }
@@ -13263,7 +13281,7 @@ function equal(a, b) {
13263
13281
  keys = getKeysDeep(a2).union(getKeysDeep(b2));
13264
13282
  }
13265
13283
  for (const key of keys) {
13266
- if (!compare(a2[key], b2[key])) {
13284
+ if (!compare3(a2[key], b2[key])) {
13267
13285
  return false;
13268
13286
  }
13269
13287
  if (key in a2 && !(key in b2) || key in b2 && !(key in a2)) {
@@ -16692,7 +16710,7 @@ var init_OpenAPI = __esm(() => {
16692
16710
  PASSWORD: undefined,
16693
16711
  TOKEN: getEnv3("WM_TOKEN"),
16694
16712
  USERNAME: undefined,
16695
- VERSION: "1.693.4",
16713
+ VERSION: "1.695.0",
16696
16714
  WITH_CREDENTIALS: true,
16697
16715
  interceptors: {
16698
16716
  request: new Interceptors,
@@ -25082,6 +25100,24 @@ async function requireLogin(opts) {
25082
25100
  throw new Error(`Network error: Could not connect to Windmill server at ${workspace.remote}`);
25083
25101
  }
25084
25102
  if (opts.token || opts.baseUrl) {
25103
+ const isApiError = error2 && typeof error2 === "object" && "name" in error2 && error2.name === "ApiError";
25104
+ if (isApiError) {
25105
+ const status = error2.status;
25106
+ const body = error2.body;
25107
+ let bodyStr = typeof body === "object" && body !== null ? JSON.stringify(body) : String(body ?? "").trim();
25108
+ bodyStr = bodyStr.replace(/\s*[@(]\w+\.rs:\d+[:\d]*\)?/g, "");
25109
+ bodyStr = bodyStr.replace(/^(Permission denied|Not authorized): /, "");
25110
+ if (status === 403) {
25111
+ info(colors.red(`Permission denied: the token is valid but lacks the required scope.${bodyStr ? `
25112
+ ${bodyStr}` : ""}`));
25113
+ } else if (status === 401) {
25114
+ info(colors.red(`Could not authenticate with the provided credentials. Please check your --token and --base-url and try again.${bodyStr ? `
25115
+ ${bodyStr}` : ""}`));
25116
+ } else {
25117
+ info(colors.red(`Request failed (${status ?? "unknown"}): ${bodyStr}`));
25118
+ }
25119
+ return process.exit(1);
25120
+ }
25085
25121
  info(colors.red("Could not authenticate with the provided credentials. Please check your --token and --base-url and try again."));
25086
25122
  return process.exit(1);
25087
25123
  }
@@ -30405,7 +30441,7 @@ var require_stream = __commonJS((exports, module) => {
30405
30441
  if (!duplex.push(data3))
30406
30442
  ws.pause();
30407
30443
  });
30408
- ws.once("error", function error(err) {
30444
+ ws.once("error", function error2(err) {
30409
30445
  if (duplex.destroyed)
30410
30446
  return;
30411
30447
  terminateOnDestroy = false;
@@ -30423,7 +30459,7 @@ var require_stream = __commonJS((exports, module) => {
30423
30459
  return;
30424
30460
  }
30425
30461
  let called = false;
30426
- ws.once("error", function error(err2) {
30462
+ ws.once("error", function error2(err2) {
30427
30463
  called = true;
30428
30464
  callback(err2);
30429
30465
  });
@@ -30437,7 +30473,7 @@ var require_stream = __commonJS((exports, module) => {
30437
30473
  };
30438
30474
  duplex._final = function(callback) {
30439
30475
  if (ws.readyState === ws.CONNECTING) {
30440
- ws.once("open", function open() {
30476
+ ws.once("open", function open2() {
30441
30477
  duplex._final(callback);
30442
30478
  });
30443
30479
  return;
@@ -30461,7 +30497,7 @@ var require_stream = __commonJS((exports, module) => {
30461
30497
  };
30462
30498
  duplex._write = function(chunk, encoding, callback) {
30463
30499
  if (ws.readyState === ws.CONNECTING) {
30464
- ws.once("open", function open() {
30500
+ ws.once("open", function open2() {
30465
30501
  duplex._write(chunk, encoding, callback);
30466
30502
  });
30467
30503
  return;
@@ -35512,7 +35548,7 @@ var require_BufferList = __commonJS((exports, module) => {
35512
35548
  this.tail = null;
35513
35549
  this.length = 0;
35514
35550
  }
35515
- BufferList.prototype.push = function push(v) {
35551
+ BufferList.prototype.push = function push2(v) {
35516
35552
  var entry = { data: v, next: null };
35517
35553
  if (this.length > 0)
35518
35554
  this.tail.next = entry;
@@ -35543,7 +35579,7 @@ var require_BufferList = __commonJS((exports, module) => {
35543
35579
  this.head = this.tail = null;
35544
35580
  this.length = 0;
35545
35581
  };
35546
- BufferList.prototype.join = function join(s) {
35582
+ BufferList.prototype.join = function join9(s) {
35547
35583
  if (this.length === 0)
35548
35584
  return "";
35549
35585
  var p = this.head;
@@ -37478,7 +37514,7 @@ var require_nodejsUtils = __commonJS((exports, module) => {
37478
37514
 
37479
37515
  // node_modules/set-immediate-shim/index.js
37480
37516
  var require_set_immediate_shim = __commonJS((exports, module) => {
37481
- module.exports = typeof setImmediate === "function" ? setImmediate : function setImmediate() {
37517
+ module.exports = typeof setImmediate === "function" ? setImmediate : function setImmediate2() {
37482
37518
  var args = [].slice.apply(arguments);
37483
37519
  args.splice(1, 0, 0);
37484
37520
  setTimeout.apply(null, args);
@@ -45833,7 +45869,7 @@ var require_headers = __commonJS((exports) => {
45833
45869
  }
45834
45870
  return result;
45835
45871
  };
45836
- exports.encode = function encode(opts) {
45872
+ exports.encode = function encode2(opts) {
45837
45873
  const buf = b4a.alloc(512);
45838
45874
  let name = opts.name;
45839
45875
  let prefix = "";
@@ -45874,7 +45910,7 @@ var require_headers = __commonJS((exports) => {
45874
45910
  b4a.write(buf, encodeOct(cksum(buf), 6), 148);
45875
45911
  return buf;
45876
45912
  };
45877
- exports.decode = function decode(buf, filenameEncoding, allowUnknownFormat) {
45913
+ exports.decode = function decode2(buf, filenameEncoding, allowUnknownFormat) {
45878
45914
  let typeflag = buf[156] === 0 ? 0 : buf[156] - ZERO_OFFSET;
45879
45915
  let name = decodeStr(buf, 0, 100, filenameEncoding);
45880
45916
  const mode = decodeOct(buf, 100, 8);
@@ -48785,7 +48821,7 @@ var require_subschema = __commonJS((exports) => {
48785
48821
 
48786
48822
  // node_modules/fast-deep-equal/index.js
48787
48823
  var require_fast_deep_equal = __commonJS((exports, module) => {
48788
- module.exports = function equal(a, b) {
48824
+ module.exports = function equal2(a, b) {
48789
48825
  if (a === b)
48790
48826
  return true;
48791
48827
  if (a && b && typeof a == "object" && typeof b == "object") {
@@ -48797,7 +48833,7 @@ var require_fast_deep_equal = __commonJS((exports, module) => {
48797
48833
  if (length != b.length)
48798
48834
  return false;
48799
48835
  for (i = length;i-- !== 0; )
48800
- if (!equal(a[i], b[i]))
48836
+ if (!equal2(a[i], b[i]))
48801
48837
  return false;
48802
48838
  return true;
48803
48839
  }
@@ -48816,7 +48852,7 @@ var require_fast_deep_equal = __commonJS((exports, module) => {
48816
48852
  return false;
48817
48853
  for (i = length;i-- !== 0; ) {
48818
48854
  var key = keys[i];
48819
- if (!equal(a[key], b[key]))
48855
+ if (!equal2(a[key], b[key]))
48820
48856
  return false;
48821
48857
  }
48822
48858
  return true;
@@ -72316,7 +72352,7 @@ async function pushSchedule(workspace, path19, schedule, localSchedule, permissi
72316
72352
  ...preserveFields
72317
72353
  }
72318
72354
  });
72319
- if (localSchedule.enabled != schedule.enabled) {
72355
+ if (localSchedule.enabled !== undefined && localSchedule.enabled !== schedule.enabled) {
72320
72356
  info(colors.bold.yellow(`Schedule ${path19} is ${localSchedule.enabled ? "enabled" : "disabled"} locally but not on remote, updating remote`));
72321
72357
  await setScheduleEnabled({
72322
72358
  workspace,
@@ -72351,13 +72387,31 @@ async function enable(opts, path19) {
72351
72387
  opts = await mergeConfigWithConfigFile(opts);
72352
72388
  const workspace = await resolveWorkspace(opts);
72353
72389
  await requireLogin(opts);
72354
- await setScheduleEnabled({
72355
- workspace: workspace.workspaceId,
72356
- path: path19,
72357
- requestBody: { enabled: true }
72358
- });
72390
+ try {
72391
+ await setScheduleEnabled({
72392
+ workspace: workspace.workspaceId,
72393
+ path: path19,
72394
+ requestBody: { enabled: true, force: opts.force }
72395
+ });
72396
+ } catch (e) {
72397
+ const conflict = parseForkConflict(e);
72398
+ if (conflict) {
72399
+ error(`Cannot enable schedule '${path19}': the parent workspace '${conflict.parentWorkspaceId}' has the same path configured. ` + `Both crons would fire on every tick and the script would run twice per scheduled time.
72400
+ ` + `Re-run with --force to enable anyway.`);
72401
+ process.exit(1);
72402
+ }
72403
+ throw e;
72404
+ }
72359
72405
  info(colors.green(`Schedule ${path19} enabled.`));
72360
72406
  }
72407
+ function parseForkConflict(e) {
72408
+ const body = e?.body;
72409
+ const raw = typeof body === "string" ? body : e?.message ?? "";
72410
+ const m = String(raw).match(/fork-conflict:([^:]+):(.+)/);
72411
+ if (!m)
72412
+ return;
72413
+ return { kind: m[1], parentWorkspaceId: m[2].trim() };
72414
+ }
72361
72415
  async function disable(opts, path19) {
72362
72416
  opts = await mergeConfigWithConfigFile(opts);
72363
72417
  const workspace = await resolveWorkspace(opts);
@@ -72398,7 +72452,7 @@ var init_schedule = __esm(async () => {
72398
72452
  init_types()
72399
72453
  ]);
72400
72454
  import_yaml26 = __toESM(require_dist(), 1);
72401
- command16 = new Command().description("schedule related commands").option("--json", "Output as JSON (for piping to jq)").action(list9).command("list", "list all schedules").option("--json", "Output as JSON (for piping to jq)").action(list9).command("get", "get a schedule's details").arguments("<path:string>").option("--json", "Output as JSON (for piping to jq)").action(get7).command("new", "create a new schedule locally").arguments("<path:string>").action(newSchedule).command("push", "push a local schedule spec. This overrides any remote versions.").arguments("<file_path:string> <remote_path:string>").action(push8).command("enable", "Enable a schedule").arguments("<path:string>").action(enable).command("disable", "Disable a schedule").arguments("<path:string>").action(disable).command("set-permissioned-as", "Set the email (run-as user) for a schedule (requires admin or wm_deployers group)").arguments("<path:string> <email:string>").action(async (opts, schedulePath, email) => {
72455
+ command16 = new Command().description("schedule related commands").option("--json", "Output as JSON (for piping to jq)").action(list9).command("list", "list all schedules").option("--json", "Output as JSON (for piping to jq)").action(list9).command("get", "get a schedule's details").arguments("<path:string>").option("--json", "Output as JSON (for piping to jq)").action(get7).command("new", "create a new schedule locally").arguments("<path:string>").action(newSchedule).command("push", "push a local schedule spec. This overrides any remote versions.").arguments("<file_path:string> <remote_path:string>").action(push8).command("enable", "Enable a schedule").option("--force", "Bypass the fork-conflict warning when the parent workspace has the same schedule (acknowledges that both crons will fire)").arguments("<path:string>").action(enable).command("disable", "Disable a schedule").arguments("<path:string>").action(disable).command("set-permissioned-as", "Set the email (run-as user) for a schedule (requires admin or wm_deployers group)").arguments("<path:string> <email:string>").action(async (opts, schedulePath, email) => {
72402
72456
  const workspace = await resolveWorkspace(opts);
72403
72457
  await requireLogin(opts);
72404
72458
  const cache3 = new Map;
@@ -78968,6 +79022,37 @@ Name the parameters by adding comments before the statement:
78968
79022
  -- @name2 (int64) = 0
78969
79023
  SELECT * FROM users WHERE name = @name1 AND age > @name2;
78970
79024
  \`\`\`
79025
+
79026
+ ## Receiving an S3Object as a script parameter
79027
+
79028
+ Declare the arg with type \`(s3object)\`. Windmill renders an S3 file picker for
79029
+ it, downloads the file, and binds it as a \`STRING\` JSON parameter — Parquet/CSV
79030
+ files are decoded server-side into a JSON array of records, JSON/JSONL pass
79031
+ through. Consume with \`JSON_EXTRACT_ARRAY\` / \`JSON_VALUE\`:
79032
+
79033
+ \`\`\`sql
79034
+ -- @file (s3object)
79035
+ SELECT
79036
+ CAST(JSON_VALUE(row, '$.id') AS INT64) AS id,
79037
+ JSON_VALUE(row, '$.name') AS name
79038
+ FROM UNNEST(JSON_EXTRACT_ARRAY(@file)) AS row;
79039
+ \`\`\`
79040
+
79041
+ ## Streaming query results to S3
79042
+
79043
+ Add a \`-- s3\` directive at the top of the script to stream the result set to S3
79044
+ instead of returning rows. Windmill writes the file and returns its \`S3Object\`
79045
+ as the script result.
79046
+
79047
+ \`\`\`sql
79048
+ -- s3 prefix=exports/users format=parquet
79049
+ SELECT id, name FROM users;
79050
+ \`\`\`
79051
+
79052
+ All keys are optional: \`prefix\` (object key prefix), \`storage\` (named storage —
79053
+ omit to use the workspace default), \`format\` (\`json\` (default), \`parquet\`, or
79054
+ \`csv\`). Use this for large result sets — rows stream directly to S3 instead of
79055
+ being buffered, bypassing the 10000-row return cap.
78971
79056
  `,
78972
79057
  "write-script-bun": `---
78973
79058
  name: write-script-bun
@@ -79091,19 +79176,20 @@ export async function preprocessor(event: Event) {
79091
79176
 
79092
79177
  ## S3 Object Operations
79093
79178
 
79094
- Windmill provides built-in support for S3-compatible storage operations.
79095
-
79096
- ### S3Object Type
79179
+ Windmill provides built-in support for S3-compatible storage operations. The \`wmill.S3Object\` type covers both the \`s3://storage/key\` URI form (\`s3:///key\` for the workspace default storage) and the \`{ s3, storage? }\` record form — always use it instead of redefining your own.
79097
79180
 
79098
- The S3Object type represents a file in S3 storage:
79181
+ ### Receiving an S3Object as a script parameter
79099
79182
 
79100
79183
  \`\`\`typescript
79101
- type S3Object = {
79102
- s3: string; // Path within the bucket
79103
- };
79184
+ import * as wmill from "windmill-client";
79185
+
79186
+ export async function main(file: wmill.S3Object) {
79187
+ const content = await wmill.loadS3File(file);
79188
+ // ...
79189
+ }
79104
79190
  \`\`\`
79105
79191
 
79106
- ## TypeScript Operations
79192
+ ### S3 operations
79107
79193
 
79108
79194
  \`\`\`typescript
79109
79195
  import * as wmill from "windmill-client";
@@ -79115,7 +79201,7 @@ const content: Uint8Array = await wmill.loadS3File(s3object);
79115
79201
  const blob: Blob = await wmill.loadS3FileStream(s3object);
79116
79202
 
79117
79203
  // Write file to S3
79118
- const result: S3Object = await wmill.writeS3File(
79204
+ const result: wmill.S3Object = await wmill.writeS3File(
79119
79205
  s3object, // Target path (or undefined to auto-generate)
79120
79206
  fileContent, // string or Blob
79121
79207
  s3ResourcePath // Optional: specific S3 resource to use
@@ -79781,19 +79867,20 @@ export async function preprocessor(event: Event) {
79781
79867
 
79782
79868
  ## S3 Object Operations
79783
79869
 
79784
- Windmill provides built-in support for S3-compatible storage operations.
79785
-
79786
- ### S3Object Type
79870
+ Windmill provides built-in support for S3-compatible storage operations. The \`wmill.S3Object\` type covers both the \`s3://storage/key\` URI form (\`s3:///key\` for the workspace default storage) and the \`{ s3, storage? }\` record form — always use it instead of redefining your own.
79787
79871
 
79788
- The S3Object type represents a file in S3 storage:
79872
+ ### Receiving an S3Object as a script parameter
79789
79873
 
79790
79874
  \`\`\`typescript
79791
- type S3Object = {
79792
- s3: string; // Path within the bucket
79793
- };
79875
+ import * as wmill from "windmill-client";
79876
+
79877
+ export async function main(file: wmill.S3Object) {
79878
+ const content = await wmill.loadS3File(file);
79879
+ // ...
79880
+ }
79794
79881
  \`\`\`
79795
79882
 
79796
- ## TypeScript Operations
79883
+ ### S3 operations
79797
79884
 
79798
79885
  \`\`\`typescript
79799
79886
  import * as wmill from "windmill-client";
@@ -79805,7 +79892,7 @@ const content: Uint8Array = await wmill.loadS3File(s3object);
79805
79892
  const blob: Blob = await wmill.loadS3FileStream(s3object);
79806
79893
 
79807
79894
  // Write file to S3
79808
- const result: S3Object = await wmill.writeS3File(
79895
+ const result: wmill.S3Object = await wmill.writeS3File(
79809
79896
  s3object, // Target path (or undefined to auto-generate)
79810
79897
  fileContent, // string or Blob
79811
79898
  s3ResourcePath // Optional: specific S3 resource to use
@@ -80559,19 +80646,20 @@ export async function preprocessor(event: Event) {
80559
80646
 
80560
80647
  ## S3 Object Operations
80561
80648
 
80562
- Windmill provides built-in support for S3-compatible storage operations.
80563
-
80564
- ### S3Object Type
80649
+ Windmill provides built-in support for S3-compatible storage operations. The \`wmill.S3Object\` type covers both the \`s3://storage/key\` URI form (\`s3:///key\` for the workspace default storage) and the \`{ s3, storage? }\` record form — always use it instead of redefining your own.
80565
80650
 
80566
- The S3Object type represents a file in S3 storage:
80651
+ ### Receiving an S3Object as a script parameter
80567
80652
 
80568
80653
  \`\`\`typescript
80569
- type S3Object = {
80570
- s3: string; // Path within the bucket
80571
- };
80654
+ import * as wmill from "windmill-client";
80655
+
80656
+ export async function main(file: wmill.S3Object) {
80657
+ const content = await wmill.loadS3File(file);
80658
+ // ...
80659
+ }
80572
80660
  \`\`\`
80573
80661
 
80574
- ## TypeScript Operations
80662
+ ### S3 operations
80575
80663
 
80576
80664
  \`\`\`typescript
80577
80665
  import * as wmill from "windmill-client";
@@ -80583,7 +80671,7 @@ const content: Uint8Array = await wmill.loadS3File(s3object);
80583
80671
  const blob: Blob = await wmill.loadS3FileStream(s3object);
80584
80672
 
80585
80673
  // Write file to S3
80586
- const result: S3Object = await wmill.writeS3File(
80674
+ const result: wmill.S3Object = await wmill.writeS3File(
80587
80675
  s3object, // Target path (or undefined to auto-generate)
80588
80676
  fileContent, // string or Blob
80589
80677
  s3ResourcePath // Optional: specific S3 resource to use
@@ -81220,6 +81308,30 @@ SELECT * FROM read_parquet('s3:///path/to/file.parquet');
81220
81308
  -- JSON files
81221
81309
  SELECT * FROM read_json('s3:///path/to/file.json');
81222
81310
  \`\`\`
81311
+
81312
+ ### Receiving an S3Object as a script parameter
81313
+
81314
+ Declare the arg with type \`(s3object)\`. Windmill renders an S3 file picker for it
81315
+ and binds the arg as the bare \`s3://storage/key\` URI, which DuckDB's reader
81316
+ functions consume directly:
81317
+
81318
+ \`\`\`sql
81319
+ -- $file (s3object)
81320
+ SELECT * FROM read_parquet($file);
81321
+ \`\`\`
81322
+
81323
+ Works with any DuckDB reader: \`read_csv($file)\`, \`read_json($file)\`, etc.
81324
+
81325
+ ### Writing query results to S3
81326
+
81327
+ DuckDB writes to S3 natively via \`COPY ... TO\`:
81328
+
81329
+ \`\`\`sql
81330
+ COPY (SELECT * FROM users) TO 's3:///exports/users.parquet' (FORMAT PARQUET);
81331
+ \`\`\`
81332
+
81333
+ Use this instead of the \`-- s3\` streaming directive supported by the other SQL
81334
+ dialects — that directive is not available in DuckDB.
81223
81335
  `,
81224
81336
  "write-script-go": `---
81225
81337
  name: write-script-go
@@ -81536,6 +81648,36 @@ Name the parameters by adding comments before the statement:
81536
81648
  -- @P2 name2 (int) = 0
81537
81649
  SELECT * FROM users WHERE name = @P1 AND age > @P2;
81538
81650
  \`\`\`
81651
+
81652
+ ## Receiving an S3Object as a script parameter
81653
+
81654
+ Declare the arg with type \`(s3object)\`. Windmill renders an S3 file picker for
81655
+ it, downloads the file, and binds it as \`nvarchar(max)\` JSON text — Parquet/CSV
81656
+ files are decoded server-side into a JSON array of records, JSON/JSONL pass
81657
+ through. Consume with \`OPENJSON\`:
81658
+
81659
+ \`\`\`sql
81660
+ -- @P1 file (s3object)
81661
+ SELECT id, name
81662
+ FROM OPENJSON(@P1)
81663
+ WITH (id INT, name NVARCHAR(200));
81664
+ \`\`\`
81665
+
81666
+ ## Streaming query results to S3
81667
+
81668
+ Add a \`-- s3\` directive at the top of the script to stream the result set to S3
81669
+ instead of returning rows. Windmill writes the file and returns its \`S3Object\`
81670
+ as the script result.
81671
+
81672
+ \`\`\`sql
81673
+ -- s3 prefix=exports/users format=parquet
81674
+ SELECT id, name FROM users;
81675
+ \`\`\`
81676
+
81677
+ All keys are optional: \`prefix\` (object key prefix), \`storage\` (named storage —
81678
+ omit to use the workspace default), \`format\` (\`json\` (default), \`parquet\`, or
81679
+ \`csv\`). Use this for large result sets — rows stream directly to S3 instead of
81680
+ being buffered as the script return value.
81539
81681
  `,
81540
81682
  "write-script-mysql": `---
81541
81683
  name: write-script-mysql
@@ -81588,6 +81730,37 @@ Name the parameters by adding comments before the statement:
81588
81730
  -- ? name2 (int) = 0
81589
81731
  SELECT * FROM users WHERE name = ? AND age > ?;
81590
81732
  \`\`\`
81733
+
81734
+ ## Receiving an S3Object as a script parameter
81735
+
81736
+ Declare the arg with type \`(s3object)\`. Windmill renders an S3 file picker for
81737
+ it, downloads the file, and binds it as JSON text — Parquet/CSV files are
81738
+ decoded server-side into a JSON array of records, JSON/JSONL pass through.
81739
+ Consume with \`JSON_TABLE\`:
81740
+
81741
+ \`\`\`sql
81742
+ -- ? file (s3object)
81743
+ SELECT id, name
81744
+ FROM JSON_TABLE(?, '$[*]'
81745
+ COLUMNS (id INT PATH '$.id', name VARCHAR(200) PATH '$.name')
81746
+ ) AS r;
81747
+ \`\`\`
81748
+
81749
+ ## Streaming query results to S3
81750
+
81751
+ Add a \`-- s3\` directive at the top of the script to stream the result set to S3
81752
+ instead of returning rows. Windmill writes the file and returns its \`S3Object\`
81753
+ as the script result.
81754
+
81755
+ \`\`\`sql
81756
+ -- s3 prefix=exports/users format=parquet
81757
+ SELECT id, name FROM users;
81758
+ \`\`\`
81759
+
81760
+ All keys are optional: \`prefix\` (object key prefix), \`storage\` (named storage —
81761
+ omit to use the workspace default), \`format\` (\`json\` (default), \`parquet\`, or
81762
+ \`csv\`). Use this for large result sets — rows stream directly to S3 instead of
81763
+ being buffered as the script return value.
81591
81764
  `,
81592
81765
  "write-script-nativets": `---
81593
81766
  name: write-script-nativets
@@ -82395,6 +82568,35 @@ Name the parameters by adding comments at the beginning of the script (without s
82395
82568
  -- $2 name2 = default_value
82396
82569
  SELECT * FROM users WHERE name = $1::TEXT AND age > $2::INT;
82397
82570
  \`\`\`
82571
+
82572
+ ## Receiving an S3Object as a script parameter
82573
+
82574
+ Declare the arg with type \`(s3object)\`. Windmill renders an S3 file picker for
82575
+ it, downloads the file, and binds it as a \`jsonb\` parameter — Parquet/CSV files
82576
+ are decoded server-side into a JSON array of records, JSON/JSONL pass through.
82577
+ Consume with \`jsonb_to_recordset\` (or any \`jsonb\` API):
82578
+
82579
+ \`\`\`sql
82580
+ -- $1 file (s3object)
82581
+ SELECT *
82582
+ FROM jsonb_to_recordset($1::jsonb) AS r(id INT, name TEXT);
82583
+ \`\`\`
82584
+
82585
+ ## Streaming query results to S3
82586
+
82587
+ Add a \`-- s3\` directive at the top of the script to stream the result set to S3
82588
+ instead of returning rows. Windmill writes the file and returns its \`S3Object\`
82589
+ as the script result.
82590
+
82591
+ \`\`\`sql
82592
+ -- s3 prefix=exports/users format=parquet
82593
+ SELECT id, name FROM users;
82594
+ \`\`\`
82595
+
82596
+ All keys are optional: \`prefix\` (object key prefix), \`storage\` (named storage —
82597
+ omit to use the workspace default), \`format\` (\`json\` (default), \`parquet\`, or
82598
+ \`csv\`). Use this for large result sets — rows stream directly to S3 instead of
82599
+ being buffered as the script return value.
82398
82600
  `,
82399
82601
  "write-script-powershell": `---
82400
82602
  name: write-script-powershell
@@ -82632,6 +82834,21 @@ def preprocessor(event: Event):
82632
82834
 
82633
82835
  Windmill provides built-in support for S3-compatible storage operations.
82634
82836
 
82837
+ ### Receiving an S3Object as a script parameter
82838
+
82839
+ To accept a file from S3 as input to a script, type the parameter with \`S3Object\` (imported from \`wmill\`):
82840
+
82841
+ \`\`\`python
82842
+ import wmill
82843
+ from wmill import S3Object
82844
+
82845
+ def main(file: S3Object):
82846
+ content = wmill.load_s3_file(file)
82847
+ # ...
82848
+ \`\`\`
82849
+
82850
+ ### S3 operations
82851
+
82635
82852
  \`\`\`python
82636
82853
  import wmill
82637
82854
 
@@ -83636,6 +83853,37 @@ Name the parameters by adding comments before the statement:
83636
83853
  -- ? name2 (number) = 0
83637
83854
  SELECT * FROM users WHERE name = ? AND age > ?;
83638
83855
  \`\`\`
83856
+
83857
+ ## Receiving an S3Object as a script parameter
83858
+
83859
+ Declare the arg with type \`(s3object)\`. Windmill renders an S3 file picker for
83860
+ it, downloads the file, and binds it as JSON text — Parquet/CSV files are
83861
+ decoded server-side into a JSON array of records, JSON/JSONL pass through.
83862
+ Wrap the bind with \`PARSE_JSON(?)\` and walk it with \`LATERAL FLATTEN\`:
83863
+
83864
+ \`\`\`sql
83865
+ -- ? file (s3object)
83866
+ SELECT
83867
+ v.value:id::NUMBER AS id,
83868
+ v.value:name::STRING AS name
83869
+ FROM LATERAL FLATTEN(input => PARSE_JSON(?)) v;
83870
+ \`\`\`
83871
+
83872
+ ## Streaming query results to S3
83873
+
83874
+ Add a \`-- s3\` directive at the top of the script to stream the result set to S3
83875
+ instead of returning rows. Windmill writes the file and returns its \`S3Object\`
83876
+ as the script result.
83877
+
83878
+ \`\`\`sql
83879
+ -- s3 prefix=exports/users format=parquet
83880
+ SELECT id, name FROM users;
83881
+ \`\`\`
83882
+
83883
+ All keys are optional: \`prefix\` (object key prefix), \`storage\` (named storage —
83884
+ omit to use the workspace default), \`format\` (\`json\` (default), \`parquet\`, or
83885
+ \`csv\`). Use this for large result sets — rows stream directly to S3 instead of
83886
+ being buffered, bypassing the 10000-row return cap.
83639
83887
  `,
83640
83888
  "write-flow": `---
83641
83889
  name: write-flow
@@ -85544,6 +85792,7 @@ schedule related commands
85544
85792
  - \`schedule new <path:string>\` - create a new schedule locally
85545
85793
  - \`schedule push <file_path:string> <remote_path:string>\` - push a local schedule spec. This overrides any remote versions.
85546
85794
  - \`schedule enable <path:string>\` - Enable a schedule
85795
+ - \`--force\` - Bypass the fork-conflict warning when the parent workspace has the same schedule (acknowledges that both crons will fire)
85547
85796
  - \`schedule disable <path:string>\` - Disable a schedule
85548
85797
  - \`schedule set-permissioned-as <path:string> <email:string>\` - Set the email (run-as user) for a schedule (requires admin or wm_deployers group)
85549
85798
 
@@ -88690,7 +88939,7 @@ var config_default = command35;
88690
88939
 
88691
88940
  // src/main.ts
88692
88941
  await init_context();
88693
- var VERSION = "1.693.4";
88942
+ var VERSION = "1.695.0";
88694
88943
  async function checkVersionSafe(cmd) {
88695
88944
  const mainCommand = cmd.getMainCommand();
88696
88945
  const upgradeCommand = mainCommand.getCommand("upgrade");
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "windmill-cli",
3
- "version": "1.693.5",
3
+ "version": "1.695.0",
4
4
  "description": "CLI for Windmill",
5
5
  "license": "Apache 2.0",
6
6
  "type": "module",