@bdkinc/knex-ibmi 0.4.0 → 0.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,6 +14,8 @@ For IBM i OSS docs, see https://ibmi-oss-docs.readthedocs.io/. ODBC guidance: ht
14
14
  - Query execution
15
15
  - Transactions
16
16
  - Streaming
17
+ - Multi-row insert strategies (auto | sequential | disabled)
18
+ - Emulated returning for UPDATE and DELETE
17
19
 
18
20
  ## Requirements
19
21
 
@@ -167,49 +169,36 @@ try {
167
169
 
168
170
  ### Streaming
169
171
 
172
+ There are two primary ways to consume a result stream: (1) classic Node stream piping with transform stages, and (2) async iteration with `for await` (which can be easier to reason about). Use a `fetchSize` to control how many rows are fetched from the driver per batch.
173
+
170
174
  ```ts
171
175
  import { knex } from "knex";
172
176
  import { DB2Dialect, DB2Config } from "@bdkinc/knex-ibmi";
173
177
  import { Transform } from "node:stream";
174
178
  import { finished } from "node:stream/promises";
175
179
 
176
- const config: DB2Config = {
177
- client: DB2Dialect,
178
- connection: {
179
- host: "your-ibm-i-host",
180
- database: "*LOCAL",
181
- user: "your-username",
182
- password: "your-password",
183
- driver: "IBM i Access ODBC Driver",
184
- connectionStringParams: {
185
- ALLOWPROCCALLS: 1,
186
- CMT: 0,
187
- DBQ: "MYLIB"
188
- },
189
- },
190
- pool: { min: 2, max: 10 },
191
- };
192
-
180
+ const config: DB2Config = { /* ...same as earlier examples... */ };
193
181
  const db = knex(config);
194
182
 
195
183
  try {
196
- const stream = await db.select("*").from("LARGETABLE").stream({ fetchSize: 100 });
184
+ const stream = await db("LARGETABLE").select("*").stream({ fetchSize: 100 });
197
185
 
186
+ // Approach 1: Pipe through a Transform stream
198
187
  const transform = new Transform({
199
188
  objectMode: true,
200
- transform(chunk, _enc, cb) {
201
- // Process each row
202
- console.log("Processing row:", chunk);
203
- cb(null, chunk);
189
+ transform(row, _enc, cb) {
190
+ // Process each row (side effects, enrichment, filtering, etc.)
191
+ console.log("Transforming row id=", row.ID);
192
+ cb(null, row);
204
193
  },
205
194
  });
206
-
207
195
  stream.pipe(transform);
208
- await finished(stream);
196
+ await finished(stream); // Wait until piping completes
209
197
 
210
- // Alternative: async iteration
211
- for await (const record of stream) {
212
- console.log(record);
198
+ // Approach 2: Async iteration (recommended for simplicity)
199
+ const iterStream = await db("LARGETABLE").select("*").stream({ fetchSize: 200 });
200
+ for await (const row of iterStream) {
201
+ console.log("Iter row id=", row.ID);
213
202
  }
214
203
  } catch (error) {
215
204
  console.error("Streaming error:", error);
@@ -332,11 +321,6 @@ ibmi-migrations migrate:currentVersion # Show current migration version
332
321
  ibmi-migrations migrate:list # List all migrations
333
322
  ibmi-migrations migrate:make <name> # Create new migration file
334
323
 
335
- # Legacy aliases (backward compatibility):
336
- ibmi-migrations latest # Same as migrate:latest
337
- ibmi-migrations rollback # Same as migrate:rollback
338
- ibmi-migrations status # Same as migrate:status
339
-
340
324
  # Options:
341
325
  ibmi-migrations migrate:status --env production
342
326
  ibmi-migrations migrate:latest --knexfile ./config/knexfile.js
@@ -365,6 +349,63 @@ const config = {
365
349
 
366
350
  **Warning**: Standard Knex migrations may still hang on lock operations. The built-in IBM i migration system is strongly recommended.
367
351
 
352
+ ## Multi-Row Insert Strategies
353
+
354
+ Configure via `ibmi.multiRowInsert` in the knex config:
355
+
356
+ ```ts
357
+ const db = knex({
358
+ client: DB2Dialect,
359
+ connection: { /* ... */ },
360
+ ibmi: { multiRowInsert: 'auto' } // 'auto' | 'sequential' | 'disabled'
361
+ });
362
+ ```
363
+
364
+ - `auto` (default): Generates a single INSERT with multiple VALUES lists. For `.returning('*')` or no explicit column list it returns all inserted rows (lenient fallback). Identity values are whatever DB2 ODBC surfaces for that multi-row statement.
365
+ - `sequential`: Compiler shows a single-row statement (first row) but at execution time each row is inserted individually inside a loop to reliably collect identity values (using `IDENTITY_VAL_LOCAL()` per row). Suitable when you need each generated identity.
366
+ - `disabled`: Falls back to legacy behavior: only the first row is inserted (others ignored). Useful for strict backward compatibility.
367
+
368
+ If you specify `.returning(['COL1', 'COL2'])` with multi-row inserts, those columns are selected; otherwise `IDENTITY_VAL_LOCAL()` (single-row) or `*` (multi-row) is used as a lenient fallback.
369
+
370
+ ## Returning Behavior (INSERT / UPDATE / DELETE)
371
+
372
+ Native `RETURNING` is not broadly supported over ODBC on IBM i. The dialect provides pragmatic emulation:
373
+
374
+ ### INSERT
375
+ - `auto` multi-row: generates a single multi-values INSERT. When no explicit column list is requested it returns all inserted rows (`*`) as a lenient fallback. Some installations may see this internally wrapped using a `SELECT * FROM FINAL TABLE( INSERT ... )` pattern in logs or debug output; that wrapper is only an implementation detail to surface inserted rows.
376
+ - `sequential`: inserts each row one at a time so it can reliably call `IDENTITY_VAL_LOCAL()` after each insert; builds an array of returned rows.
377
+ - `disabled`: legacy single-row insert behavior; additional rows in the values array are ignored.
378
+
379
+ ### UPDATE
380
+ - Executes the UPDATE.
381
+ - Re-selects the affected rows using the original WHERE clause when `.returning(...)` is requested.
382
+
383
+ ### DELETE
384
+ - Selects the rows to be deleted (capturing requested returning columns or `*`).
385
+ - Executes the DELETE.
386
+ - Returns the previously selected rows.
387
+
388
+ ### Notes
389
+ - `returning('*')` can be expensive on large result sets—limit the column list when possible.
390
+ - For guaranteed, ordered identity values across many inserted rows use the `sequential` strategy.
391
+
392
+ ## Configuration Summary
393
+
394
+ ```ts
395
+ interface IbmiDialectConfig {
396
+ multiRowInsert?: 'auto' | 'sequential' | 'disabled';
397
+ sequentialInsertTransactional?: boolean; // if true, wraps sequential loop in BEGIN/COMMIT
398
+ }
399
+ ```
400
+
401
+ Attach under the root knex config as `ibmi`.
402
+
403
+ ### Transactional Sequential Inserts
404
+
405
+ When `ibmi.sequentialInsertTransactional` is `true`, the dialect will attempt `BEGIN` before the per-row loop and `COMMIT` after. On commit failure it will attempt a `ROLLBACK`. If `BEGIN` is not supported, it logs a warning and continues non-transactionally.
406
+
407
+ <!-- Benchmarks section intentionally removed. Benchmarking is handled in the external test harness project -->
408
+
368
409
  ## Links
369
410
 
370
411
  - Knex: https://knexjs.org/
package/dist/index.d.mts CHANGED
@@ -54,6 +54,15 @@ declare class DB2Client extends knex.Client {
54
54
  destroyRawConnection(connection: any): Promise<any>;
55
55
  _getConnectionString(connectionConfig: DB2ConnectionConfig): string;
56
56
  _query(connection: Connection, obj: any): Promise<any>;
57
+ /**
58
+ * Execute UPDATE with returning clause using transaction + SELECT approach
59
+ * Since IBM i DB2 doesn't support FINAL TABLE with UPDATE, we:
60
+ * 1. Execute the UPDATE statement
61
+ * 2. Execute a SELECT to get the updated values using the same WHERE clause
62
+ */
63
+ private executeUpdateReturning;
64
+ private executeSequentialInsert;
65
+ private executeDeleteReturning;
57
66
  private normalizeQueryObject;
58
67
  private determineQueryMethod;
59
68
  private isSelectMethod;
@@ -71,6 +80,7 @@ declare class DB2Client extends knex.Client {
71
80
  }, stream: any, options: {
72
81
  fetchSize?: number;
73
82
  }): Promise<unknown>;
83
+ private calculateOptimalFetchSize;
74
84
  private _createCursorStream;
75
85
  transaction(container: any, config: any, outerTx: any): Knex.Transaction;
76
86
  schemaCompiler(tableBuilder: any): any;
@@ -80,7 +90,12 @@ declare class DB2Client extends knex.Client {
80
90
  createMigrationRunner(config?: Partial<IBMiMigrationConfig>): IBMiMigrationRunner;
81
91
  processResponse(obj: QueryObject | null, runner: any): any;
82
92
  private validateResponse;
93
+ private wrapError;
94
+ private shouldRetryQuery;
95
+ private retryQuery;
83
96
  private isConnectionError;
97
+ private isTimeoutError;
98
+ private isSQLError;
84
99
  private processSqlMethod;
85
100
  }
86
101
  interface DB2PoolConfig {
@@ -131,6 +146,10 @@ interface DB2Config extends Knex.Config {
131
146
  client: any;
132
147
  connection: DB2ConnectionConfig;
133
148
  pool?: DB2PoolConfig;
149
+ ibmi?: {
150
+ multiRowInsert?: "auto" | "sequential" | "disabled";
151
+ sequentialInsertTransactional?: boolean;
152
+ };
134
153
  }
135
154
  declare const DB2Dialect: typeof DB2Client;
136
155
 
package/dist/index.d.ts CHANGED
@@ -54,6 +54,15 @@ declare class DB2Client extends knex.Client {
54
54
  destroyRawConnection(connection: any): Promise<any>;
55
55
  _getConnectionString(connectionConfig: DB2ConnectionConfig): string;
56
56
  _query(connection: Connection, obj: any): Promise<any>;
57
+ /**
58
+ * Execute UPDATE with returning clause using transaction + SELECT approach
59
+ * Since IBM i DB2 doesn't support FINAL TABLE with UPDATE, we:
60
+ * 1. Execute the UPDATE statement
61
+ * 2. Execute a SELECT to get the updated values using the same WHERE clause
62
+ */
63
+ private executeUpdateReturning;
64
+ private executeSequentialInsert;
65
+ private executeDeleteReturning;
57
66
  private normalizeQueryObject;
58
67
  private determineQueryMethod;
59
68
  private isSelectMethod;
@@ -71,6 +80,7 @@ declare class DB2Client extends knex.Client {
71
80
  }, stream: any, options: {
72
81
  fetchSize?: number;
73
82
  }): Promise<unknown>;
83
+ private calculateOptimalFetchSize;
74
84
  private _createCursorStream;
75
85
  transaction(container: any, config: any, outerTx: any): Knex.Transaction;
76
86
  schemaCompiler(tableBuilder: any): any;
@@ -80,7 +90,12 @@ declare class DB2Client extends knex.Client {
80
90
  createMigrationRunner(config?: Partial<IBMiMigrationConfig>): IBMiMigrationRunner;
81
91
  processResponse(obj: QueryObject | null, runner: any): any;
82
92
  private validateResponse;
93
+ private wrapError;
94
+ private shouldRetryQuery;
95
+ private retryQuery;
83
96
  private isConnectionError;
97
+ private isTimeoutError;
98
+ private isSQLError;
84
99
  private processSqlMethod;
85
100
  }
86
101
  interface DB2PoolConfig {
@@ -131,6 +146,10 @@ interface DB2Config extends Knex.Config {
131
146
  client: any;
132
147
  connection: DB2ConnectionConfig;
133
148
  pool?: DB2PoolConfig;
149
+ ibmi?: {
150
+ multiRowInsert?: "auto" | "sequential" | "disabled";
151
+ sequentialInsertTransactional?: boolean;
152
+ };
134
153
  }
135
154
  declare const DB2Dialect: typeof DB2Client;
136
155