alepha 0.9.3 → 0.9.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/postgres.d.ts CHANGED
@@ -1,28 +1,17 @@
1
- import * as _alepha_core13 from "alepha";
2
1
  import * as _alepha_core1 from "alepha";
3
- import * as _alepha_core2 from "alepha";
4
- import * as _alepha_core0 from "alepha";
5
- import { Alepha, AlephaError, Descriptor, KIND, Service, Static, TObject, TSchema as TSchema$1 } from "alepha";
6
- import * as drizzle_orm0 from "drizzle-orm";
7
- import * as drizzle_orm2 from "drizzle-orm";
8
- import * as drizzle_orm9 from "drizzle-orm";
9
- import * as drizzle from "drizzle-orm";
2
+ import { Alepha, AlephaError, Descriptor, KIND, Service, Static, TNull, TObject, TOptional, TSchema as TSchema$1, TUnion } from "alepha";
3
+ import * as drizzle_orm6 from "drizzle-orm";
10
4
  import { BuildColumns, BuildExtraConfigColumns, SQL, SQLWrapper, TableConfig, sql } from "drizzle-orm";
11
5
  import * as pg$1 from "drizzle-orm/pg-core";
12
- import * as drizzle_orm_pg_core0 from "drizzle-orm/pg-core";
13
- import * as drizzle_orm_pg_core4 from "drizzle-orm/pg-core";
14
- import * as drizzle_orm_pg_core3 from "drizzle-orm/pg-core";
15
- import { AnyPgColumn, AnyPgTable, LockConfig, LockStrength, PgColumn, PgColumnBuilderBase, PgDatabase, PgInsertValue, PgSequenceOptions, PgTableExtraConfigValue, PgTableWithColumns, PgTransaction, PgTransactionConfig, TableConfig as TableConfig$1, UpdateDeleteAction } from "drizzle-orm/pg-core";
6
+ import { AnyPgColumn, LockConfig, LockStrength, PgColumn, PgColumnBuilderBase, PgDatabase, PgInsertValue, PgSequenceOptions, PgTableExtraConfigValue, PgTableWithColumns, PgTransaction, PgTransactionConfig, SelectedFields, TableConfig as TableConfig$1, UpdateDeleteAction } from "drizzle-orm/pg-core";
7
+ import { DateTime, DateTimeProvider } from "alepha/datetime";
8
+ import * as _alepha_logger1 from "alepha/logger";
16
9
  import * as _alepha_lock0 from "alepha/lock";
17
10
  import { PostgresJsDatabase } from "drizzle-orm/postgres-js";
18
11
  import postgres from "postgres";
19
12
  import * as _alepha_retry0 from "alepha/retry";
20
- import * as _sinclair_typebox2 from "@sinclair/typebox";
21
- import * as _sinclair_typebox9 from "@sinclair/typebox";
22
- import * as _sinclair_typebox10 from "@sinclair/typebox";
23
- import * as _sinclair_typebox11 from "@sinclair/typebox";
24
13
  import * as _sinclair_typebox0 from "@sinclair/typebox";
25
- import { Evaluate, IntegerOptions, Kind, NumberOptions, ObjectOptions, OptionalKind, Static as Static$1, StringOptions, TAdditionalProperties, TArray, TBoolean, TInteger, TIntersect, TNumber, TObject as TObject$1, TOptional, TOptionalWithFlag, TPick, TProperties, TReadonly, TRecord, TSchema as TSchema$2, TString } from "@sinclair/typebox";
14
+ import { Evaluate, IntegerOptions, Kind, NumberOptions, ObjectOptions, OptionalKind, Static as Static$1, StringOptions, TAdditionalProperties, TArray, TBoolean, TInteger, TIntersect, TNumber, TObject as TObject$1, TOptional as TOptional$1, TOptionalWithFlag, TPick, TProperties, TReadonly, TRecord, TSchema as TSchema$2, TString } from "@sinclair/typebox";
26
15
  import { PgTransactionConfig as PgTransactionConfig$1 } from "drizzle-orm/pg-core/session";
27
16
  import * as DrizzleKit from "drizzle-kit/api";
28
17
  import { MigrationConfig } from "drizzle-orm/migrator";
@@ -31,24 +20,21 @@ export * from "drizzle-orm/pg-core";
31
20
 
32
21
  //#region src/constants/PG_SCHEMA.d.ts
33
22
  declare const PG_SCHEMA: unique symbol;
34
- //# sourceMappingURL=PG_SCHEMA.d.ts.map
35
23
  //#endregion
36
24
  //#region src/constants/PG_SYMBOLS.d.ts
37
25
  declare const PG_DEFAULT: unique symbol;
38
26
  declare const PG_PRIMARY_KEY: unique symbol;
39
27
  declare const PG_CREATED_AT: unique symbol;
40
28
  declare const PG_UPDATED_AT: unique symbol;
29
+ declare const PG_DELETED_AT: unique symbol;
41
30
  declare const PG_VERSION: unique symbol;
42
31
  declare const PG_IDENTITY: unique symbol;
43
- declare const PG_MANY: unique symbol;
44
- declare const PG_ONE: unique symbol;
45
32
  declare const PG_REF: unique symbol;
46
33
  /**
47
34
  * @deprecated Use `PG_IDENTITY` instead.
48
35
  */
49
36
  declare const PG_SERIAL: unique symbol;
50
37
  type PgDefault = typeof PG_DEFAULT;
51
- type PgMany = typeof PG_MANY;
52
38
  type PgRef = typeof PG_REF;
53
39
  type PgPrimaryKey = typeof PG_PRIMARY_KEY;
54
40
  type PgSymbols = {
@@ -56,10 +42,9 @@ type PgSymbols = {
56
42
  [PG_PRIMARY_KEY]: {};
57
43
  [PG_CREATED_AT]: {};
58
44
  [PG_UPDATED_AT]: {};
45
+ [PG_DELETED_AT]: {};
59
46
  [PG_VERSION]: {};
60
47
  [PG_IDENTITY]: PgIdentityOptions;
61
- [PG_MANY]: PgManyOptions;
62
- [PG_ONE]: PgManyOptions;
63
48
  [PG_REF]: PgRefOptions;
64
49
  /**
65
50
  * @deprecated Use `PG_IDENTITY` instead.
@@ -72,11 +57,6 @@ type PgIdentityOptions = {
72
57
  } & PgSequenceOptions & {
73
58
  name?: string;
74
59
  };
75
- interface PgManyOptions {
76
- table: AnyPgTable;
77
- schema: TObject$1;
78
- foreignKey: string;
79
- }
80
60
  interface PgRefOptions {
81
61
  ref: () => AnyPgColumn;
82
62
  actions?: {
@@ -84,13 +64,12 @@ interface PgRefOptions {
84
64
  onDelete?: UpdateDeleteAction;
85
65
  };
86
66
  }
87
- //# sourceMappingURL=PG_SYMBOLS.d.ts.map
88
67
  //#endregion
89
- //#region src/interfaces/TInsertObject.d.ts
68
+ //#region src/schemas/insertSchema.d.ts
90
69
  /**
91
70
  * Fork of the original typebox schema "TObject".
92
71
  */
93
- interface TInsertObject<T extends TObject$1> extends TSchema$2, ObjectOptions {
72
+ interface TObjectInsert<T extends TObject$1> extends TSchema$2, ObjectOptions {
94
73
  [Kind]: "Object";
95
74
  static: ObjectStatic<{ [K in keyof T["properties"] as T["properties"][K] extends {
96
75
  [PG_DEFAULT]: any;
@@ -102,16 +81,43 @@ interface TInsertObject<T extends TObject$1> extends TSchema$2, ObjectOptions {
102
81
  [PG_DEFAULT]: any;
103
82
  } ? never : K]: T["properties"][K] };
104
83
  }
105
- type ReadonlyOptionalPropertyKeys<T extends TProperties> = { [K in keyof T]: T[K] extends TReadonly<TSchema$2> ? T[K] extends TOptional<T[K]> ? K : never : never }[keyof T];
106
- type ReadonlyPropertyKeys<T extends TProperties> = { [K in keyof T]: T[K] extends TReadonly<TSchema$2> ? T[K] extends TOptional<T[K]> ? never : K : never }[keyof T];
107
- type OptionalPropertyKeys<T extends TProperties> = { [K in keyof T]: T[K] extends TOptional<TSchema$2> ? T[K] extends TReadonly<T[K]> ? never : K : never }[keyof T];
84
+ type ReadonlyOptionalPropertyKeys<T extends TProperties> = { [K in keyof T]: T[K] extends TReadonly<TSchema$2> ? T[K] extends TOptional$1<T[K]> ? K : never : never }[keyof T];
85
+ type ReadonlyPropertyKeys<T extends TProperties> = { [K in keyof T]: T[K] extends TReadonly<TSchema$2> ? T[K] extends TOptional$1<T[K]> ? never : K : never }[keyof T];
86
+ type OptionalPropertyKeys<T extends TProperties> = { [K in keyof T]: T[K] extends TOptional$1<TSchema$2> ? T[K] extends TReadonly<T[K]> ? never : K : never }[keyof T];
108
87
  type RequiredPropertyKeys<T extends TProperties> = keyof Omit<T, ReadonlyOptionalPropertyKeys<T> | ReadonlyPropertyKeys<T> | OptionalPropertyKeys<T>>;
109
88
  type ObjectStaticProperties<T extends TProperties, R extends Record<keyof any, unknown>> = Evaluate<Readonly<Partial<Pick<R, ReadonlyOptionalPropertyKeys<T>>>> & Readonly<Pick<R, ReadonlyPropertyKeys<T>>> & Partial<Pick<R, OptionalPropertyKeys<T>>> & Required<Pick<R, RequiredPropertyKeys<T>>>>;
110
89
  type ObjectStatic<T extends TProperties, P extends unknown[]> = ObjectStaticProperties<T, { [K in keyof T]: Static$1<T[K], P> }>;
90
+ declare const insertSchema: <T extends TObject$1>(obj: T) => TObjectInsert<T>;
91
+ /**
92
+ * Enhance Typebox with a support of "Default" (PG_DEFAULT).
93
+ */
94
+ type StaticInsert<T extends TObject$1> = StaticEntry<T> & StaticDefaultEntry<T>;
95
+ type StaticDefaultEntry<T extends TObject$1> = { [K in keyof T["properties"] as T["properties"][K] extends {
96
+ [PG_DEFAULT]: any;
97
+ } | {
98
+ [OptionalKind]: "Optional";
99
+ } ? K : never]?: Static$1<T["properties"][K]> };
100
+ type StaticEntry<T extends TObject$1> = { [K in keyof T["properties"] as T["properties"][K] extends {
101
+ [PG_DEFAULT]: any;
102
+ } | {
103
+ [OptionalKind]: "Optional";
104
+ } ? never : K]: Static$1<T["properties"][K]> };
105
+ //#endregion
106
+ //#region src/schemas/updateSchema.d.ts
107
+ /**
108
+ * Transforms a TObject schema for update operations.
109
+ * All optional properties at the root level are made nullable (i.e., `T | null`).
110
+ * This allows an API endpoint to explicitly accept `null` to clear an optional field in the database.
111
+ *
112
+ * @example
113
+ * Before: { name?: string; age: number; }
114
+ * After: { name?: string | null; age: number; }
115
+ */
116
+ type TObjectUpdate<T extends TObject> = TObject<{ [K in keyof T["properties"]]: T["properties"][K] extends TOptional<infer U> ? TOptional<TUnion<[U, TNull]>> : T["properties"][K] }>;
111
117
  //#endregion
112
118
  //#region src/helpers/schemaToPgColumns.d.ts
113
119
  /**
114
- * Convert a Typebox Schema to Drizzle ORM Postgres columns (yes)
120
+ * Convert a Typebox Schema to Drizzle ORM Postgres columns
115
121
  */
116
122
  declare const schemaToPgColumns: <T extends TObject>(schema: T) => FromSchema<T>;
117
123
  /**
@@ -128,7 +134,7 @@ declare const mapFieldToColumn: (name: string, value: TSchema$1) => pg$1.PgSeria
128
134
  data: Buffer<ArrayBufferLike>;
129
135
  driverParam: unknown;
130
136
  enumValues: undefined;
131
- }> | pg$1.PgTimestampStringBuilderInitial<string> | pg$1.PgDateStringBuilderInitial<string> | pg$1.PgTextBuilderInitial<string, [string, ...string[]]> | pg$1.PgBooleanBuilderInitial<string> | drizzle_orm0.$Type<pg$1.PgCustomColumnBuilder<{
137
+ }> | pg$1.PgTimestampStringBuilderInitial<string> | pg$1.PgDateStringBuilderInitial<string> | pg$1.PgTextBuilderInitial<string, [string, ...string[]]> | pg$1.PgBooleanBuilderInitial<string> | drizzle_orm6.$Type<pg$1.PgCustomColumnBuilder<{
132
138
  name: string;
133
139
  dataType: "custom";
134
140
  columnType: "PgCustomColumn";
@@ -141,14 +147,14 @@ declare const mapFieldToColumn: (name: string, value: TSchema$1) => pg$1.PgSeria
141
147
  }>, {
142
148
  [x: string]: unknown;
143
149
  [x: number]: unknown;
144
- }> | drizzle_orm0.$Type<pg$1.PgCustomColumnBuilder<{
150
+ }> | drizzle_orm6.$Type<pg$1.PgCustomColumnBuilder<{
145
151
  name: string;
146
152
  dataType: "custom";
147
153
  columnType: "PgCustomColumn";
148
154
  data: {};
149
155
  driverParam: string;
150
156
  enumValues: undefined;
151
- }>, {}> | drizzle_orm0.$Type<pg$1.PgCustomColumnBuilder<{
157
+ }>, {}> | drizzle_orm6.$Type<pg$1.PgCustomColumnBuilder<{
152
158
  name: string;
153
159
  dataType: "custom";
154
160
  columnType: "PgCustomColumn";
@@ -273,32 +279,215 @@ type FromSchema<T extends TObject> = { [key in keyof T["properties"]]: PgColumnB
273
279
  type PgTableWithColumnsAndSchema<T extends TableConfig, R extends TObject> = PgTableWithColumns<T> & {
274
280
  get $table(): PgTableWithColumns<T>;
275
281
  get $schema(): R;
276
- get $insertSchema(): TInsertObject<R>;
282
+ get $insertSchema(): TObjectInsert<R>;
283
+ get $updateSchema(): TObjectUpdate<R>;
277
284
  };
278
- interface TableLike<T extends TObject = TObject> {
279
- $schema: T;
280
- }
281
- //# sourceMappingURL=schemaToPgColumns.d.ts.map
282
285
  //#endregion
283
286
  //#region src/descriptors/$entity.d.ts
284
287
  /**
285
- * Declare a new entity in the database.
286
- * This descriptor alone does not create the table, it only describes it.
287
- * It must be used with `$repository` to create the table and perform operations on it.
288
+ * Creates a database entity descriptor that defines table structure using TypeBox schemas.
289
+ *
290
+ * This descriptor provides a type-safe way to define database tables using JSON Schema
291
+ * syntax while generating the necessary database metadata for migrations and operations.
292
+ * It integrates with Drizzle ORM under the hood and works seamlessly with the $repository
293
+ * descriptor for complete database functionality.
294
+ *
295
+ * **Key Features**
296
+ *
297
+ * - **Type-Safe Schema Definition**: Uses TypeBox for full TypeScript type inference
298
+ * - **Automatic Table Generation**: Creates Drizzle ORM table structures automatically
299
+ * - **Index Management**: Supports single-column, multi-column, and unique indexes
300
+ * - **Constraint Support**: Foreign keys, unique constraints, and check constraints
301
+ * - **Audit Fields**: Built-in support for created_at, updated_at, deleted_at, and version fields
302
+ * - **Schema Validation**: Automatic insert/update schema generation with validation
303
+ *
304
+ * **Important Note**:
305
+ * This descriptor only defines the table structure - it does not create the physical
306
+ * database table. Use it with $repository to perform actual database operations,
307
+ * and run migrations to create the tables in your database.
288
308
  *
289
- * This is a convenience function to create a table with a json schema.
290
- * For now, it creates a drizzle-orm table under the hood.
309
+ * **Use Cases**
310
+ *
311
+ * Essential for defining database schema in type-safe applications:
312
+ * - User management and authentication tables
313
+ * - Business domain entities (products, orders, customers)
314
+ * - Audit and logging tables
315
+ * - Junction tables for many-to-many relationships
316
+ * - Configuration and settings tables
317
+ *
318
+ * @example
319
+ * **Basic entity with indexes:**
291
320
  * ```ts
292
321
  * import { $entity } from "alepha/postgres";
322
+ * import { pg, t } from "alepha";
293
323
  *
294
324
  * const User = $entity({
295
- * name: "user",
325
+ * name: "users",
296
326
  * schema: t.object({
297
327
  * id: pg.primaryKey(t.uuid()),
298
- * name: t.string(),
299
- * email: t.string(),
328
+ * email: t.string({ format: "email" }),
329
+ * username: t.string({ minLength: 3, maxLength: 30 }),
330
+ * firstName: t.string(),
331
+ * lastName: t.string(),
332
+ * isActive: t.boolean({ default: true }),
333
+ * createdAt: pg.createdAt(),
334
+ * updatedAt: pg.updatedAt(),
335
+ * deletedAt: pg.deletedAt()
336
+ * }),
337
+ * indexes: [
338
+ * "email", // Simple index on email
339
+ * "username", // Simple index on username
340
+ * { column: "email", unique: true }, // Unique constraint on email
341
+ * { columns: ["firstName", "lastName"] } // Composite index
342
+ * ]
343
+ * });
344
+ * ```
345
+ *
346
+ * @example
347
+ * **E-commerce product entity with relationships:**
348
+ * ```ts
349
+ * const Product = $entity({
350
+ * name: "products",
351
+ * schema: t.object({
352
+ * id: pg.primaryKey(t.uuid()),
353
+ * sku: t.string({ minLength: 3 }),
354
+ * name: t.string({ minLength: 1, maxLength: 200 }),
355
+ * description: t.optional(t.string()),
356
+ * price: t.number({ minimum: 0 }),
357
+ * categoryId: t.string({ format: "uuid" }),
358
+ * inStock: t.boolean({ default: true }),
359
+ * stockQuantity: t.integer({ minimum: 0, default: 0 }),
360
+ * tags: t.optional(t.array(t.string())), // PostgreSQL array column
361
+ * metadata: t.optional(t.record(t.string(), t.any())), // JSONB column
362
+ * version: pg.version(),
363
+ * createdAt: pg.createdAt(),
364
+ * updatedAt: pg.updatedAt()
365
+ * }),
366
+ * indexes: [
367
+ * { column: "sku", unique: true }, // Unique SKU
368
+ * "categoryId", // Foreign key index
369
+ * "inStock", // Filter frequently by stock status
370
+ * { columns: ["categoryId", "inStock"] }, // Composite for category + stock queries
371
+ * "createdAt" // For date-based queries
372
+ * ],
373
+ * foreignKeys: [
374
+ * {
375
+ * name: "fk_product_category",
376
+ * columns: ["categoryId"],
377
+ * foreignColumns: [Category.id] // Reference to Category entity
378
+ * }
379
+ * ]
380
+ * });
381
+ * ```
382
+ *
383
+ * @example
384
+ * **Audit log entity with constraints:**
385
+ * ```ts
386
+ * const AuditLog = $entity({
387
+ * name: "audit_logs",
388
+ * schema: t.object({
389
+ * id: pg.primaryKey(t.uuid()),
390
+ * tableName: t.string(),
391
+ * recordId: t.string(),
392
+ * action: t.enum(["CREATE", "UPDATE", "DELETE"]),
393
+ * userId: t.optional(t.string({ format: "uuid" })),
394
+ * oldValues: t.optional(t.record(t.string(), t.any())),
395
+ * newValues: t.optional(t.record(t.string(), t.any())),
396
+ * timestamp: pg.createdAt(),
397
+ * ipAddress: t.optional(t.string()),
398
+ * userAgent: t.optional(t.string())
399
+ * }),
400
+ * indexes: [
401
+ * "tableName",
402
+ * "recordId",
403
+ * "userId",
404
+ * "action",
405
+ * { columns: ["tableName", "recordId"] }, // Find all changes to a record
406
+ * { columns: ["userId", "timestamp"] }, // User activity timeline
407
+ * "timestamp" // Time-based queries
408
+ * ],
409
+ * constraints: [
410
+ * {
411
+ * name: "valid_action_values",
412
+ * columns: ["action"],
413
+ * check: sql`action IN ('CREATE', 'UPDATE', 'DELETE')`
414
+ * }
415
+ * ]
416
+ * });
417
+ * ```
418
+ *
419
+ * @example
420
+ * **Many-to-many junction table:**
421
+ * ```ts
422
+ * const UserRole = $entity({
423
+ * name: "user_roles",
424
+ * schema: t.object({
425
+ * id: pg.primaryKey(t.uuid()),
426
+ * userId: t.string({ format: "uuid" }),
427
+ * roleId: t.string({ format: "uuid" }),
428
+ * assignedBy: t.string({ format: "uuid" }),
429
+ * assignedAt: pg.createdAt(),
430
+ * expiresAt: t.optional(t.datetime())
300
431
  * }),
301
- * indexes: ["email"],
432
+ * indexes: [
433
+ * "userId",
434
+ * "roleId",
435
+ * "assignedBy",
436
+ * { columns: ["userId", "roleId"], unique: true }, // Prevent duplicate assignments
437
+ * "expiresAt" // For cleanup of expired roles
438
+ * ],
439
+ * foreignKeys: [
440
+ * {
441
+ * columns: ["userId"],
442
+ * foreignColumns: [User.id]
443
+ * },
444
+ * {
445
+ * columns: ["roleId"],
446
+ * foreignColumns: [Role.id]
447
+ * },
448
+ * {
449
+ * columns: ["assignedBy"],
450
+ * foreignColumns: [User.id]
451
+ * }
452
+ * ]
453
+ * });
454
+ * ```
455
+ *
456
+ * @example
457
+ * **Entity with custom Drizzle configuration:**
458
+ * ```ts
459
+ * const Order = $entity({
460
+ * name: "orders",
461
+ * schema: t.object({
462
+ * id: pg.primaryKey(t.uuid()),
463
+ * orderNumber: t.string(),
464
+ * customerId: t.string({ format: "uuid" }),
465
+ * status: t.enum(["pending", "processing", "shipped", "delivered"]),
466
+ * totalAmount: t.number({ minimum: 0 }),
467
+ * currency: t.string({ default: "USD" }),
468
+ * notes: t.optional(t.string()),
469
+ * createdAt: pg.createdAt(),
470
+ * updatedAt: pg.updatedAt(),
471
+ * version: pg.version()
472
+ * }),
473
+ * indexes: [
474
+ * { column: "orderNumber", unique: true },
475
+ * "customerId",
476
+ * "status",
477
+ * "createdAt",
478
+ * { columns: ["customerId", "status"] }
479
+ * ],
480
+ * // Advanced Drizzle ORM configuration
481
+ * config: (table) => [
482
+ * // Custom index with specific options
483
+ * index("idx_orders_amount_status")
484
+ * .on(table.totalAmount, table.status)
485
+ * .where(sql`status != 'cancelled'`), // Partial index
486
+ *
487
+ * // Full-text search index (PostgreSQL specific)
488
+ * index("idx_orders_search")
489
+ * .using("gin", table.notes)
490
+ * ]
302
491
  * });
303
492
  * ```
304
493
  *
@@ -310,87 +499,293 @@ declare const $entity: {
310
499
  };
311
500
  interface EntityDescriptorOptions<TTableName extends string, T extends TObject$1, Keys = keyof Static$1<T>> {
312
501
  /**
313
- * The name of the table. This is the name that will be used in the database.
314
- * @example
315
- * name: "user"
502
+ * The database table name that will be created for this entity.
503
+ *
504
+ * This name:
505
+ * - Must be unique within your database schema
506
+ * - Should follow your database naming conventions (typically snake_case)
507
+ * - Will be used in generated SQL queries and migrations
508
+ * - Should be descriptive of the entity's purpose
509
+ *
510
+ * **Naming Guidelines**:
511
+ * - Use plural nouns for table names ("users", "products", "orders")
512
+ * - Use snake_case for multi-word names ("user_profiles", "order_items")
513
+ * - Keep names concise but descriptive
514
+ * - Avoid SQL reserved words
515
+ *
516
+ * @example "users"
517
+ * @example "product_categories"
518
+ * @example "user_roles"
519
+ * @example "audit_logs"
316
520
  */
317
521
  name: TTableName;
318
522
  /**
319
- * The schema of the table. This is a TypeBox schema that describes the columns and their types.
523
+ * TypeBox schema defining the table structure and column types.
524
+ *
525
+ * This schema:
526
+ * - Defines all table columns with their types and constraints
527
+ * - Provides full TypeScript type inference for the entity
528
+ * - Supports validation rules and default values
529
+ * - Enables automatic insert/update schema generation
530
+ * - Must include exactly one primary key field marked with `pg.primaryKey()`
531
+ *
532
+ * **Supported PostgreSQL Types**:
533
+ * - `pg.primaryKey(t.uuid())` - UUID primary key
534
+ * - `t.string()` - VARCHAR column
535
+ * - `t.integer()`, `t.number()` - Numeric columns
536
+ * - `t.boolean()` - Boolean column
537
+ * - `t.array(t.string())` - PostgreSQL array column
538
+ * - `t.record(t.string(), t.any())` - JSONB column
539
+ * - `pg.createdAt()`, `pg.updatedAt()`, `pg.deletedAt()` - Audit timestamps
540
+ * - `pg.version()` - Optimistic locking version field
541
+ *
542
+ * **Schema Best Practices**:
543
+ * - Always include a primary key
544
+ * - Use appropriate TypeBox constraints (minLength, format, etc.)
545
+ * - Add audit fields for trackability
546
+ * - Use optional fields for nullable columns
547
+ * - Include foreign key columns for relationships
548
+ *
320
549
  * @example
321
- * schema: t.object({
322
- * id: t.uuid(),
323
- * name: t.string(),
324
- * email: t.string(),
325
- * phoneNumber: t.string(),
550
+ * ```ts
551
+ * t.object({
552
+ * id: pg.primaryKey(t.uuid()),
553
+ * email: t.string({ format: "email" }),
554
+ * firstName: t.string({ minLength: 1, maxLength: 100 }),
555
+ * lastName: t.string({ minLength: 1, maxLength: 100 }),
556
+ * age: t.optional(t.integer({ minimum: 0, maximum: 150 })),
557
+ * isActive: t.boolean({ default: true }),
558
+ * preferences: t.optional(t.record(t.string(), t.any())),
559
+ * tags: t.optional(t.array(t.string())),
560
+ * createdAt: pg.createdAt(),
561
+ * updatedAt: pg.updatedAt(),
562
+ * version: pg.version()
326
563
  * })
564
+ * ```
327
565
  */
328
566
  schema: T;
329
567
  /**
330
- * The indexes to create for the table. This can be a string or an object with the column name and options.
331
- * @example
332
- * indexes: ["name", { column: "email", unique: true }]
568
+ * Database indexes to create for query optimization.
569
+ *
570
+ * Indexes improve query performance but consume disk space and slow down writes.
571
+ * Choose indexes based on your actual query patterns and performance requirements.
572
+ *
573
+ * **Index Types**:
574
+ * - **Simple string**: Creates a single-column index
575
+ * - **Single column object**: Creates index on one column with options
576
+ * - **Multi-column object**: Creates composite index on multiple columns
577
+ *
578
+ * **Index Guidelines**:
579
+ * - Index frequently queried columns (WHERE, ORDER BY, JOIN conditions)
580
+ * - Create unique indexes for business constraints
581
+ * - Use composite indexes for multi-column queries
582
+ * - Index foreign key columns for join performance
583
+ * - Monitor index usage and remove unused indexes
584
+ *
585
+ * **Performance Considerations**:
586
+ * - Each index increases storage requirements
587
+ * - Indexes slow down INSERT/UPDATE/DELETE operations
588
+ * - PostgreSQL can use multiple indexes in complex queries
589
+ * - Partial indexes can be more efficient for filtered queries
590
+ *
591
+ * @example ["email", "createdAt", { column: "username", unique: true }]
592
+ * @example [{ columns: ["userId", "status"], name: "idx_user_status" }]
593
+ * @example ["categoryId", { columns: ["price", "inStock"] }]
333
594
  */
334
595
  indexes?: (Keys | {
596
+ /**
597
+ * Single column to index.
598
+ */
335
599
  column: Keys;
600
+ /**
601
+ * Whether this should be a unique index (enforces uniqueness constraint).
602
+ */
336
603
  unique?: boolean;
604
+ /**
605
+ * Custom name for the index. If not provided, generates name automatically.
606
+ */
337
607
  name?: string;
338
608
  } | {
609
+ /**
610
+ * Multiple columns for composite index (order matters for query optimization).
611
+ */
339
612
  columns: Keys[];
613
+ /**
614
+ * Whether this should be a unique index (enforces uniqueness constraint).
615
+ */
340
616
  unique?: boolean;
617
+ /**
618
+ * Custom name for the index. If not provided, generates name automatically.
619
+ */
341
620
  name?: string;
342
621
  })[];
622
+ /**
623
+ * Foreign key constraints to maintain referential integrity.
624
+ *
625
+ * Foreign keys ensure that values in specified columns must exist in the referenced table.
626
+ * They prevent orphaned records and maintain database consistency.
627
+ *
628
+ * **Foreign Key Benefits**:
629
+ * - Prevents invalid references to non-existent records
630
+ * - Maintains data integrity automatically
631
+ * - Provides clear schema documentation of relationships
632
+ * - Enables cascade operations (DELETE, UPDATE)
633
+ *
634
+ * **Considerations**:
635
+ * - Foreign keys can impact performance on large tables
636
+ * - They prevent deletion of referenced records
637
+ * - Consider cascade options for related data cleanup
638
+ *
639
+ * @example
640
+ * ```ts
641
+ * foreignKeys: [
642
+ * {
643
+ * name: "fk_user_role",
644
+ * columns: ["roleId"],
645
+ * foreignColumns: [Role.id]
646
+ * },
647
+ * {
648
+ * columns: ["createdBy"],
649
+ * foreignColumns: [User.id]
650
+ * }
651
+ * ]
652
+ * ```
653
+ */
343
654
  foreignKeys?: Array<{
655
+ /**
656
+ * Optional name for the foreign key constraint.
657
+ */
344
658
  name?: string;
659
+ /**
660
+ * Local columns that reference the foreign table.
661
+ */
345
662
  columns: Array<keyof Static$1<T>>;
663
+ /**
664
+ * Referenced columns in the foreign table.
665
+ */
346
666
  foreignColumns: Array<AnyPgColumn>;
347
667
  }>;
668
+ /**
669
+ * Additional table constraints for data validation.
670
+ *
671
+ * Constraints enforce business rules at the database level, providing
672
+ * an additional layer of data integrity beyond application validation.
673
+ *
674
+ * **Constraint Types**:
675
+ * - **Unique constraints**: Prevent duplicate values across columns
676
+ * - **Check constraints**: Enforce custom validation rules with SQL expressions
677
+ *
678
+ * **Use Cases**:
679
+ * - Enforce unique combinations of columns
680
+ * - Validate value ranges or patterns
681
+ * - Ensure consistent data states
682
+ * - Implement business rule validation
683
+ *
684
+ * @example
685
+ * ```ts
686
+ * constraints: [
687
+ * {
688
+ * name: "unique_user_email",
689
+ * columns: ["email"],
690
+ * unique: true
691
+ * },
692
+ * {
693
+ * name: "valid_age_range",
694
+ * columns: ["age"],
695
+ * check: sql`age >= 0 AND age <= 150`
696
+ * },
697
+ * {
698
+ * name: "unique_user_username_per_tenant",
699
+ * columns: ["tenantId", "username"],
700
+ * unique: true
701
+ * }
702
+ * ]
703
+ * ```
704
+ */
348
705
  constraints?: Array<{
706
+ /**
707
+ * Columns involved in this constraint.
708
+ */
349
709
  columns: Array<keyof Static$1<T>>;
710
+ /**
711
+ * Optional name for the constraint.
712
+ */
350
713
  name?: string;
714
+ /**
715
+ * Whether this is a unique constraint.
716
+ */
351
717
  unique?: boolean | {};
718
+ /**
719
+ * SQL expression for check constraint validation.
720
+ */
352
721
  check?: SQL;
353
722
  }>;
354
723
  /**
355
- * Extra configuration for the table. See drizzle-orm documentation for more details.
724
+ * Advanced Drizzle ORM configuration for complex table setups.
725
+ *
726
+ * This allows you to use advanced Drizzle ORM features that aren't covered
727
+ * by the simplified options above. Use this for:
728
+ * - Custom index types (GIN, GIST, etc.)
729
+ * - Partial indexes with WHERE clauses
730
+ * - Advanced constraint configurations
731
+ * - PostgreSQL-specific features
732
+ *
733
+ * **When to Use**:
734
+ * - Need PostgreSQL-specific index types
735
+ * - Require partial indexes for performance
736
+ * - Want fine-grained control over table creation
737
+ * - Using advanced PostgreSQL features
738
+ *
739
+ * See Drizzle ORM documentation for complete configuration options.
356
740
  *
357
- * @param self The table descriptor.
358
- * @returns The extra configuration for the table.
741
+ * @param self - The table columns available for configuration
742
+ * @returns Array of Drizzle table configuration objects
743
+ *
744
+ * @example
745
+ * ```ts
746
+ * config: (table) => [
747
+ * // Partial index for active users only
748
+ * index("idx_active_users_email")
749
+ * .on(table.email)
750
+ * .where(sql`is_active = true`),
751
+ *
752
+ * // GIN index for full-text search
753
+ * index("idx_content_search")
754
+ * .using("gin", table.searchVector),
755
+ *
756
+ * // Unique constraint with custom options
757
+ * uniqueIndex("idx_unique_slug_per_tenant")
758
+ * .on(table.tenantId, table.slug)
759
+ * ]
760
+ * ```
359
761
  */
360
762
  config?: (self: BuildExtraConfigColumns<string, FromSchema<T>, "pg">) => PgTableExtraConfigValue[];
361
763
  }
362
764
  type Entity<T extends TObject$1> = PgTableWithColumnsAndSchema<PgTableConfig<string, T, FromSchema<T>>, T>;
363
- /**
364
- * Create a table with a json schema.
365
- *
366
- * @param name The name of the table.
367
- * @param schema The json schema of the table.
368
- * @param extraConfig Extra configuration for the table.
369
- */
370
- declare const pgTableSchema: <TTableName extends string, TSchema extends TObject$1, TColumnsMap extends FromSchema<TSchema>>(name: TTableName, schema: TSchema, extraConfig?: (self: BuildExtraConfigColumns<TTableName, TColumnsMap, "pg">) => PgTableExtraConfigValue[]) => PgTableWithColumnsAndSchema<PgTableConfig<TTableName, TSchema, TColumnsMap>, TSchema>;
371
765
  type PgTableConfig<TTableName extends string, TSchema extends TObject$1, TColumnsMap extends FromSchema<TSchema>> = {
372
766
  name: TTableName;
373
767
  schema: any;
374
768
  columns: BuildColumns<TTableName, TColumnsMap, "pg">;
375
769
  dialect: "pg";
376
770
  };
377
- //# sourceMappingURL=$entity.d.ts.map
378
771
  //#endregion
379
- //#region src/helpers/nullToUndefined.d.ts
380
- /**
381
- * Replaces all null values in an object with undefined.
382
- * We need this for converting all nulls from Drizzle outputs.
383
- *
384
- * @param value - The object to be processed.
385
- * @return A new object with all null values replaced with undefined.
386
- */
387
- declare const nullToUndefined: <T extends object>(value: T) => NullToUndefined<T>;
772
+ //#region src/errors/PgError.d.ts
773
+ declare class PgError extends AlephaError {
774
+ name: string;
775
+ constructor(message: string, cause?: unknown);
776
+ }
777
+ //#endregion
778
+ //#region src/helpers/pgAttr.d.ts
388
779
  /**
389
- * Replaces all null values in an object with undefined.
780
+ * Type representation.
390
781
  */
391
- type NullToUndefined<T> = T extends null ? undefined : T extends null | undefined | string | number | boolean | symbol | bigint | Function | Date | RegExp ? T : T extends Array<infer U> ? Array<NullToUndefined<U>> : T extends Map<infer K, infer V> ? Map<K, NullToUndefined<V>> : T extends Set<infer U> ? Set<NullToUndefined<U>> : T extends object ? { [K in keyof T]: NullToUndefined<T[K]> } : unknown;
392
- type NullifyIfOptional<T> = { [K in keyof T]: undefined extends T[K] ? T[K] | null : T[K] };
393
- //# sourceMappingURL=nullToUndefined.d.ts.map
782
+ type PgAttr<T extends TSchema$1, TAttr extends PgSymbolKeys> = T & { [K in TAttr]: PgSymbols[K] };
783
+ interface PgAttrField {
784
+ key: string;
785
+ type: TSchema$1;
786
+ data: any;
787
+ nested?: any[];
788
+ }
394
789
  //#endregion
395
790
  //#region src/interfaces/FilterOperators.d.ts
396
791
  interface FilterOperators<TValue> {
@@ -747,27 +1142,10 @@ interface FilterOperators<TValue> {
747
1142
  */
748
1143
  arrayOverlaps?: TValue;
749
1144
  }
750
- //# sourceMappingURL=FilterOperators.d.ts.map
751
- //#endregion
752
- //#region src/interfaces/InferInsert.d.ts
753
- /**
754
- * Enhance Typebox with a support of "Default" (PG_DEFAULT).
755
- */
756
- type InferInsert<T extends TObject$1> = StaticEntry<T> & StaticDefaultEntry<T>;
757
- type StaticDefaultEntry<T extends TObject$1> = { [K in keyof T["properties"] as T["properties"][K] extends {
758
- [PG_DEFAULT]: any;
759
- } | {
760
- [OptionalKind]: "Optional";
761
- } ? K : never]?: Static$1<T["properties"][K]> };
762
- type StaticEntry<T extends TObject$1> = { [K in keyof T["properties"] as T["properties"][K] extends {
763
- [PG_DEFAULT]: any;
764
- } | {
765
- [OptionalKind]: "Optional";
766
- } ? never : K]: Static$1<T["properties"][K]> };
767
- //# sourceMappingURL=InferInsert.d.ts.map
768
1145
  //#endregion
769
1146
  //#region src/interfaces/PgQueryWhere.d.ts
770
- type PgQueryWhere<T extends object> = { [Key in keyof T]?: FilterOperators<T[Key]> } & {
1147
+ type PgQueryWhereOrSQL<T extends object> = SQLWrapper | PgQueryWhere<T>;
1148
+ type PgQueryWhere<T extends object> = { [Key in keyof T]?: FilterOperators<T[Key]> | T[Key] } & {
771
1149
  /**
772
1150
  * Combine a list of conditions with the `and` operator. Conditions
773
1151
  * that are equal `undefined` are automatically ignored.
@@ -784,7 +1162,7 @@ type PgQueryWhere<T extends object> = { [Key in keyof T]?: FilterOperators<T[Key
784
1162
  * )
785
1163
  * ```
786
1164
  */
787
- and?: Array<PgQueryWhere<T> | SQL>;
1165
+ and?: Array<PgQueryWhereOrSQL<T>>;
788
1166
  /**
789
1167
  * Combine a list of conditions with the `or` operator. Conditions
790
1168
  * that are equal `undefined` are automatically ignored.
@@ -801,7 +1179,7 @@ type PgQueryWhere<T extends object> = { [Key in keyof T]?: FilterOperators<T[Key
801
1179
  * )
802
1180
  * ```
803
1181
  */
804
- or?: Array<PgQueryWhere<T> | SQL>;
1182
+ or?: Array<PgQueryWhereOrSQL<T>>;
805
1183
  /**
806
1184
  * Negate the meaning of an expression using the `not` keyword.
807
1185
  *
@@ -813,7 +1191,7 @@ type PgQueryWhere<T extends object> = { [Key in keyof T]?: FilterOperators<T[Key
813
1191
  * .where(not(inArray(cars.make, ['GM', 'Ford'])))
814
1192
  * ```
815
1193
  */
816
- not?: PgQueryWhere<T>;
1194
+ not?: PgQueryWhereOrSQL<T>;
817
1195
  /**
818
1196
  * Test whether a subquery evaluates to have any rows.
819
1197
  *
@@ -836,36 +1214,18 @@ type PgQueryWhere<T extends object> = { [Key in keyof T]?: FilterOperators<T[Key
836
1214
  */
837
1215
  exists?: SQLWrapper;
838
1216
  };
839
- //# sourceMappingURL=PgQueryWhere.d.ts.map
840
1217
  //#endregion
841
1218
  //#region src/interfaces/PgQuery.d.ts
842
1219
  interface PgQuery<T extends TObject$1, Select extends (keyof Static$1<T>)[] = []> {
843
1220
  columns?: Select;
844
1221
  distinct?: boolean;
845
- where?: PgQueryWhereWithMany<T> | SQLWrapper;
1222
+ where?: PgQueryWhereOrSQL<Static$1<T>>;
846
1223
  limit?: number;
847
1224
  offset?: number;
848
1225
  sort?: { [key in keyof Static$1<T>]?: "asc" | "desc" };
849
1226
  groupBy?: (keyof Static$1<T>)[];
850
- relations?: PgQueryWithMap<T>;
851
1227
  }
852
1228
  type PgQueryResult<T extends TObject$1, Select extends (keyof Static$1<T>)[]> = TPick<T, Select>;
853
- type PgQueryWhereWithMany<T extends TObject$1> = PgQueryWhere<Static$1<RemoveManyRelations<T>>> & ExtractManyRelations<T>;
854
- type ExtractManyRelations<T extends TObject$1> = { [K in keyof T["properties"] as T["properties"][K] extends {
855
- [PG_MANY]: any;
856
- } ? T["properties"][K] extends TArray ? T["properties"][K]["items"] extends TObject$1 ? K : never : never : never]?: PgQueryWhere<Static$1<T["properties"][K]["items"]>> };
857
- type RemoveManyRelations<T extends TObject$1> = TObject$1<{ [K in keyof T["properties"] as T["properties"][K] extends {
858
- [PG_MANY]: any;
859
- } ? never : K]: T["properties"][K] }>;
860
- type PgQueryWithMap<T extends TObject$1> = { [K in keyof T["properties"] as T["properties"][K] extends {
861
- [PG_MANY]: any;
862
- } ? K : never]?: T["properties"][K] extends TObject$1 ? PgQueryWith<T["properties"][K]> : T["properties"][K] extends TArray ? PgQueryWith<T["properties"][K]> : never };
863
- type PgQueryWith<T extends TObject$1 | TArray> = true | {
864
- relations?: {
865
- [key: string]: PgQueryWith<T>;
866
- };
867
- };
868
- //# sourceMappingURL=PgQuery.d.ts.map
869
1229
  //#endregion
870
1230
  //#region src/providers/drivers/PostgresProvider.d.ts
871
1231
  type SQLLike = SQLWrapper | string;
@@ -876,16 +1236,14 @@ declare abstract class PostgresProvider {
876
1236
  abstract get dialect(): string;
877
1237
  abstract execute<T extends TObject$1 = any>(query: SQLLike, schema?: T): Promise<Array<T extends TObject$1 ? Static$1<T> : any>>;
878
1238
  }
879
- //# sourceMappingURL=PostgresProvider.d.ts.map
880
1239
  //#endregion
881
1240
  //#region src/schemas/pageQuerySchema.d.ts
882
- declare const pageQuerySchema: _sinclair_typebox2.TObject<{
883
- page: _sinclair_typebox2.TOptional<_sinclair_typebox2.TNumber>;
884
- size: _sinclair_typebox2.TOptional<_sinclair_typebox2.TNumber>;
885
- sort: _sinclair_typebox2.TOptional<_sinclair_typebox2.TString>;
1241
+ declare const pageQuerySchema: _sinclair_typebox0.TObject<{
1242
+ page: _sinclair_typebox0.TOptional<_sinclair_typebox0.TNumber>;
1243
+ size: _sinclair_typebox0.TOptional<_sinclair_typebox0.TNumber>;
1244
+ sort: _sinclair_typebox0.TOptional<_sinclair_typebox0.TString>;
886
1245
  }>;
887
1246
  type PageQuery = Static<typeof pageQuerySchema>;
888
- //# sourceMappingURL=pageQuerySchema.d.ts.map
889
1247
  //#endregion
890
1248
  //#region src/schemas/pageSchema.d.ts
891
1249
  /**
@@ -905,8 +1263,6 @@ type TPage<T extends TObject$1 | TIntersect | TRecord> = TObject$1<{
905
1263
  number: TInteger;
906
1264
  size: TInteger;
907
1265
  totalElements: TOptionalWithFlag<TInteger, true>;
908
- queryDuration: TOptionalWithFlag<TInteger, true>;
909
- countDuration: TOptionalWithFlag<TInteger, true>;
910
1266
  }>;
911
1267
  }>;
912
1268
  type Page<T> = {
@@ -919,14 +1275,321 @@ type Page<T> = {
919
1275
  number: number;
920
1276
  size: number;
921
1277
  totalElements?: number;
922
- queryDuration?: number;
923
- countDuration?: number;
924
1278
  };
925
1279
  };
926
- //# sourceMappingURL=pageSchema.d.ts.map
927
1280
  //#endregion
928
1281
  //#region src/descriptors/$repository.d.ts
929
1282
  /**
1283
+ * Creates a repository descriptor for database operations on a defined entity.
1284
+ *
1285
+ * This descriptor provides a comprehensive, type-safe interface for performing all
1286
+ * database operations on entities defined with $entity. It offers a rich set of
1287
+ * CRUD operations, advanced querying capabilities, pagination, transactions, and
1288
+ * built-in support for audit trails and soft deletes.
1289
+ *
1290
+ * **Key Features**
1291
+ *
1292
+ * - **Complete CRUD Operations**: Create, read, update, delete with full type safety
1293
+ * - **Advanced Querying**: Complex WHERE conditions, sorting, pagination, and aggregations
1294
+ * - **Transaction Support**: Database transactions for consistency and atomicity
1295
+ * - **Soft Delete Support**: Built-in soft delete functionality with `pg.deletedAt()` fields
1296
+ * - **Optimistic Locking**: Version-based conflict resolution with `pg.version()` fields
1297
+ * - **Audit Trail Integration**: Automatic handling of `createdAt`, `updatedAt` timestamps
1298
+ * - **Raw SQL Support**: Execute custom SQL queries when needed
1299
+ * - **Pagination**: Built-in pagination with metadata and navigation
1300
+ *
1301
+ * **Important Requirements**
1302
+ * - Must be used with an entity created by $entity
1303
+ * - Entity schema must include exactly one primary key field
1304
+ * - Database tables must be created via migrations before use
1305
+ *
1306
+ * **Use Cases**
1307
+ *
1308
+ * Essential for all database-driven applications:
1309
+ * - User management and authentication systems
1310
+ * - E-commerce product and order management
1311
+ * - Content management and blogging platforms
1312
+ * - Financial and accounting applications
1313
+ * - Any application requiring persistent data storage
1314
+ *
1315
+ * @example
1316
+ * **Basic repository with CRUD operations:**
1317
+ * ```ts
1318
+ * import { $entity, $repository } from "alepha/postgres";
1319
+ * import { pg, t } from "alepha";
1320
+ *
1321
+ * // First, define the entity
1322
+ * const User = $entity({
1323
+ * name: "users",
1324
+ * schema: t.object({
1325
+ * id: pg.primaryKey(t.uuid()),
1326
+ * email: t.string({ format: "email" }),
1327
+ * firstName: t.string(),
1328
+ * lastName: t.string(),
1329
+ * isActive: t.boolean({ default: true }),
1330
+ * createdAt: pg.createdAt(),
1331
+ * updatedAt: pg.updatedAt()
1332
+ * }),
1333
+ * indexes: [{ column: "email", unique: true }]
1334
+ * });
1335
+ *
1336
+ * class UserService {
1337
+ * users = $repository({ table: User });
1338
+ *
1339
+ * async createUser(userData: { email: string; firstName: string; lastName: string }) {
1340
+ * return await this.users.create({
1341
+ * id: generateUUID(),
1342
+ * email: userData.email,
1343
+ * firstName: userData.firstName,
1344
+ * lastName: userData.lastName,
1345
+ * isActive: true
1346
+ * });
1347
+ * }
1348
+ *
1349
+ * async getUserByEmail(email: string) {
1350
+ * return await this.users.findOne({ email });
1351
+ * }
1352
+ *
1353
+ * async updateUser(id: string, updates: { firstName?: string; lastName?: string }) {
1354
+ * return await this.users.updateById(id, updates);
1355
+ * }
1356
+ *
1357
+ * async deactivateUser(id: string) {
1358
+ * return await this.users.updateById(id, { isActive: false });
1359
+ * }
1360
+ * }
1361
+ * ```
1362
+ *
1363
+ * @example
1364
+ * **Advanced querying and filtering:**
1365
+ * ```ts
1366
+ * const Product = $entity({
1367
+ * name: "products",
1368
+ * schema: t.object({
1369
+ * id: pg.primaryKey(t.uuid()),
1370
+ * name: t.string(),
1371
+ * price: t.number({ minimum: 0 }),
1372
+ * categoryId: t.string({ format: "uuid" }),
1373
+ * inStock: t.boolean(),
1374
+ * tags: t.optional(t.array(t.string())),
1375
+ * createdAt: pg.createdAt(),
1376
+ * updatedAt: pg.updatedAt()
1377
+ * }),
1378
+ * indexes: ["categoryId", "inStock", "price"]
1379
+ * });
1380
+ *
1381
+ * class ProductService {
1382
+ * products = $repository({ table: Product });
1383
+ *
1384
+ * async searchProducts(filters: {
1385
+ * categoryId?: string;
1386
+ * minPrice?: number;
1387
+ * maxPrice?: number;
1388
+ * inStock?: boolean;
1389
+ * searchTerm?: string;
1390
+ * }, page: number = 0, size: number = 20) {
1391
+ * const query = this.products.createQuery({
1392
+ * where: {
1393
+ * and: [
1394
+ * filters.categoryId ? { categoryId: filters.categoryId } : {},
1395
+ * filters.inStock !== undefined ? { inStock: filters.inStock } : {},
1396
+ * filters.minPrice ? { price: { gte: filters.minPrice } } : {},
1397
+ * filters.maxPrice ? { price: { lte: filters.maxPrice } } : {},
1398
+ * filters.searchTerm ? { name: { ilike: `%${filters.searchTerm}%` } } : {}
1399
+ * ]
1400
+ * },
1401
+ * sort: { createdAt: "desc" }
1402
+ * });
1403
+ *
1404
+ * return await this.products.paginate({ page, size }, query, { count: true });
1405
+ * }
1406
+ *
1407
+ * async getTopSellingProducts(limit: number = 10) {
1408
+ * // Custom SQL query for complex analytics
1409
+ * return await this.products.query(
1410
+ * (table, db) => db
1411
+ * .select({
1412
+ * id: table.id,
1413
+ * name: table.name,
1414
+ * price: table.price,
1415
+ * salesCount: sql<number>`COALESCE(sales.count, 0)`
1416
+ * })
1417
+ * .from(table)
1418
+ * .leftJoin(
1419
+ * sql`(
1420
+ * SELECT product_id, COUNT(*) as count
1421
+ * FROM order_items
1422
+ * WHERE created_at > NOW() - INTERVAL '30 days'
1423
+ * GROUP BY product_id
1424
+ * ) sales`,
1425
+ * sql`sales.product_id = ${table.id}`
1426
+ * )
1427
+ * .orderBy(sql`sales.count DESC NULLS LAST`)
1428
+ * .limit(limit)
1429
+ * );
1430
+ * }
1431
+ * }
1432
+ * ```
1433
+ *
1434
+ * @example
1435
+ * **Transaction handling and data consistency:**
1436
+ * ```ts
1437
+ * class OrderService {
1438
+ * orders = $repository({ table: Order });
1439
+ * orderItems = $repository({ table: OrderItem });
1440
+ * products = $repository({ table: Product });
1441
+ *
1442
+ * async createOrderWithItems(orderData: {
1443
+ * customerId: string;
1444
+ * items: Array<{ productId: string; quantity: number; price: number }>;
1445
+ * }) {
1446
+ * return await this.orders.transaction(async (tx) => {
1447
+ * // Create the order
1448
+ * const order = await this.orders.create({
1449
+ * id: generateUUID(),
1450
+ * customerId: orderData.customerId,
1451
+ * status: 'pending',
1452
+ * totalAmount: orderData.items.reduce((sum, item) => sum + (item.price * item.quantity), 0)
1453
+ * }, { tx });
1454
+ *
1455
+ * // Create order items and update product inventory
1456
+ * for (const itemData of orderData.items) {
1457
+ * await this.orderItems.create({
1458
+ * id: generateUUID(),
1459
+ * orderId: order.id,
1460
+ * productId: itemData.productId,
1461
+ * quantity: itemData.quantity,
1462
+ * unitPrice: itemData.price
1463
+ * }, { tx });
1464
+ *
1465
+ * // Update product inventory using optimistic locking
1466
+ * const product = await this.products.findById(itemData.productId, { tx });
1467
+ * if (product.stockQuantity < itemData.quantity) {
1468
+ * throw new Error(`Insufficient stock for product ${itemData.productId}`);
1469
+ * }
1470
+ *
1471
+ * await this.products.save({
1472
+ * ...product,
1473
+ * stockQuantity: product.stockQuantity - itemData.quantity
1474
+ * }, { tx });
1475
+ * }
1476
+ *
1477
+ * return order;
1478
+ * });
1479
+ * }
1480
+ * }
1481
+ * ```
1482
+ *
1483
+ * @example
1484
+ * **Soft delete and audit trail:**
1485
+ * ```ts
1486
+ * const Document = $entity({
1487
+ * name: "documents",
1488
+ * schema: t.object({
1489
+ * id: pg.primaryKey(t.uuid()),
1490
+ * title: t.string(),
1491
+ * content: t.string(),
1492
+ * authorId: t.string({ format: "uuid" }),
1493
+ * version: pg.version(),
1494
+ * createdAt: pg.createdAt(),
1495
+ * updatedAt: pg.updatedAt(),
1496
+ * deletedAt: pg.deletedAt() // Enables soft delete
1497
+ * })
1498
+ * });
1499
+ *
1500
+ * class DocumentService {
1501
+ * documents = $repository({ table: Document });
1502
+ *
1503
+ * async updateDocument(id: string, updates: { title?: string; content?: string }) {
1504
+ * // This uses optimistic locking via the version field
1505
+ * const document = await this.documents.findById(id);
1506
+ * return await this.documents.save({
1507
+ * ...document,
1508
+ * ...updates // updatedAt will be set automatically
1509
+ * });
1510
+ * }
1511
+ *
1512
+ * async softDeleteDocument(id: string) {
1513
+ * // Soft delete - sets deletedAt timestamp
1514
+ * await this.documents.deleteById(id);
1515
+ * }
1516
+ *
1517
+ * async permanentDeleteDocument(id: string) {
1518
+ * // Hard delete - actually removes from database
1519
+ * await this.documents.deleteById(id, { force: true });
1520
+ * }
1521
+ *
1522
+ * async getActiveDocuments() {
1523
+ * // Automatically excludes soft-deleted records
1524
+ * return await this.documents.find({
1525
+ * where: { authorId: { isNotNull: true } },
1526
+ * sort: { updatedAt: "desc" }
1527
+ * });
1528
+ * }
1529
+ *
1530
+ * async getAllDocumentsIncludingDeleted() {
1531
+ * // Include soft-deleted records
1532
+ * return await this.documents.find({}, { force: true });
1533
+ * }
1534
+ * }
1535
+ * ```
1536
+ *
1537
+ * @example
1538
+ * **Complex filtering and aggregation:**
1539
+ * ```ts
1540
+ * class AnalyticsService {
1541
+ * users = $repository({ table: User });
1542
+ * orders = $repository({ table: Order });
1543
+ *
1544
+ * async getUserStatistics(filters: {
1545
+ * startDate?: string;
1546
+ * endDate?: string;
1547
+ * isActive?: boolean;
1548
+ * }) {
1549
+ * const whereConditions = [];
1550
+ *
1551
+ * if (filters.startDate) {
1552
+ * whereConditions.push({ createdAt: { gte: filters.startDate } });
1553
+ * }
1554
+ * if (filters.endDate) {
1555
+ * whereConditions.push({ createdAt: { lte: filters.endDate } });
1556
+ * }
1557
+ * if (filters.isActive !== undefined) {
1558
+ * whereConditions.push({ isActive: filters.isActive });
1559
+ * }
1560
+ *
1561
+ * const totalUsers = await this.users.count({
1562
+ * and: whereConditions
1563
+ * });
1564
+ *
1565
+ * const activeUsers = await this.users.count({
1566
+ * and: [...whereConditions, { isActive: true }]
1567
+ * });
1568
+ *
1569
+ * // Complex aggregation query
1570
+ * const recentActivity = await this.users.query(
1571
+ * sql`
1572
+ * SELECT
1573
+ * DATE_TRUNC('day', created_at) as date,
1574
+ * COUNT(*) as new_users,
1575
+ * COUNT(*) FILTER (WHERE is_active = true) as active_users
1576
+ * FROM users
1577
+ * WHERE created_at >= NOW() - INTERVAL '30 days'
1578
+ * GROUP BY DATE_TRUNC('day', created_at)
1579
+ * ORDER BY date DESC
1580
+ * `
1581
+ * );
1582
+ *
1583
+ * return {
1584
+ * totalUsers,
1585
+ * activeUsers,
1586
+ * inactiveUsers: totalUsers - activeUsers,
1587
+ * recentActivity
1588
+ * };
1589
+ * }
1590
+ * }
1591
+ * ```
1592
+ *
930
1593
  * @stability 3
931
1594
  */
932
1595
  declare const $repository: {
@@ -935,22 +1598,70 @@ declare const $repository: {
935
1598
  };
936
1599
  interface RepositoryDescriptorOptions<EntityTableConfig extends TableConfig, EntitySchema extends TObject$1> {
937
1600
  /**
938
- * The table to create the repository for.
1601
+ * The entity table definition created with $entity.
1602
+ *
1603
+ * This table:
1604
+ * - Must be created using the $entity descriptor
1605
+ * - Defines the schema, indexes, and constraints for the repository
1606
+ * - Provides type information for all repository operations
1607
+ * - Must include exactly one primary key field
1608
+ *
1609
+ * The repository will automatically:
1610
+ * - Generate typed CRUD operations based on the entity schema
1611
+ * - Handle audit fields like createdAt, updatedAt, deletedAt
1612
+ * - Support optimistic locking if version field is present
1613
+ * - Provide soft delete functionality if deletedAt field exists
1614
+ *
1615
+ * **Entity Requirements**:
1616
+ * - Must have been created with $entity descriptor
1617
+ * - Schema must include a primary key field marked with `pg.primaryKey()`
1618
+ * - Corresponding database table must exist (created via migrations)
1619
+ *
1620
+ * @example
1621
+ * ```ts
1622
+ * const User = $entity({
1623
+ * name: "users",
1624
+ * schema: t.object({
1625
+ * id: pg.primaryKey(t.uuid()),
1626
+ * email: t.string({ format: "email" }),
1627
+ * name: t.string()
1628
+ * })
1629
+ * });
1630
+ *
1631
+ * const userRepository = $repository({ table: User });
1632
+ * ```
939
1633
  */
940
1634
  table: PgTableWithColumnsAndSchema<EntityTableConfig, EntitySchema>;
941
1635
  /**
942
- * Override default provider.
1636
+ * Override the default PostgreSQL database provider.
1637
+ *
1638
+ * By default, the repository will use the injected PostgresProvider from the
1639
+ * dependency injection container. Use this option to:
1640
+ * - Connect to a different database
1641
+ * - Use a specific connection pool
1642
+ * - Implement custom database behavior
1643
+ * - Support multi-tenant architectures with database per tenant
1644
+ *
1645
+ * **Common Use Cases**:
1646
+ * - Multi-database applications
1647
+ * - Read replicas for query optimization
1648
+ * - Different databases for different entity types
1649
+ * - Testing with separate test databases
1650
+ *
1651
+ * @default Uses injected PostgresProvider
1652
+ *
1653
+ * @example ReadOnlyPostgresProvider
1654
+ * @example TenantSpecificPostgresProvider
1655
+ * @example TestDatabaseProvider
943
1656
  */
944
1657
  provider?: Service<PostgresProvider>;
945
1658
  }
946
1659
  declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, EntitySchema extends TObject$1> extends Descriptor<RepositoryDescriptorOptions<EntityTableConfig, EntitySchema>> {
1660
+ protected readonly dateTimeProvider: DateTimeProvider;
947
1661
  readonly provider: PostgresProvider;
948
1662
  protected readonly alepha: Alepha;
949
1663
  readonly schema: EntitySchema;
950
- readonly insertSchema: TInsertObject<EntitySchema>;
951
- protected readonly env: {
952
- POSTGRES_PAGINATION_COUNT_ENABLED: boolean;
953
- };
1664
+ readonly schemaInsert: TObjectInsert<EntitySchema>;
954
1665
  /**
955
1666
  * Represents the primary key of the table.
956
1667
  * - Key is the name of the primary key column.
@@ -974,15 +1685,12 @@ declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, Entity
974
1685
  /**
975
1686
  * Getter for the database connection from the database provider.
976
1687
  */
977
- protected get db(): PgDatabase<any, Record<string, never>, drizzle_orm2.ExtractTablesWithRelations<Record<string, never>>>;
978
- protected organization(): PgColumn;
1688
+ protected get db(): PgDatabase<any, Record<string, never>, drizzle_orm6.ExtractTablesWithRelations<Record<string, never>>>;
979
1689
  /**
980
1690
  * Execute a SQL query.
981
- *
982
- * @param query
983
- * @param schema
984
1691
  */
985
- execute<T extends TObject$1 = EntitySchema>(query: SQLLike | ((table: PgTableWithColumns<EntityTableConfig>, db: PgDatabase<any>) => SQLLike), schema?: T): Promise<Static$1<T>[]>;
1692
+ query<T extends TObject$1 = EntitySchema>(query: SQLLike | ((table: PgTableWithColumns<EntityTableConfig>, db: PgDatabase<any>) => SQLLike), schema?: T): Promise<Static$1<T>[]>;
1693
+ protected mapRawFieldsToEntity(row: any[]): any;
986
1694
  /**
987
1695
  * Get a Drizzle column from the table by his name.
988
1696
  *
@@ -1002,29 +1710,34 @@ declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, Entity
1002
1710
  *
1003
1711
  * @returns The SELECT query builder.
1004
1712
  */
1005
- protected select(opts?: StatementOptions): drizzle_orm_pg_core0.PgSelectBase<string, Record<string, PgColumn<drizzle_orm2.ColumnBaseConfig<drizzle_orm2.ColumnDataType, string>, {}, {}>>, "single", Record<string, "not-null">, false, never, {
1713
+ protected select(opts?: StatementOptions): pg$1.PgSelectBase<string, Record<string, PgColumn<drizzle_orm6.ColumnBaseConfig<drizzle_orm6.ColumnDataType, string>, {}, {}>>, "single", Record<string, "not-null">, false, never, {
1714
+ [x: string]: unknown;
1715
+ }[], {
1716
+ [x: string]: PgColumn<drizzle_orm6.ColumnBaseConfig<drizzle_orm6.ColumnDataType, string>, {}, {}>;
1717
+ }>;
1718
+ protected selectDistinct(opts: StatementOptions | undefined, fields: SelectedFields): pg$1.PgSelectBase<string, SelectedFields, "partial", Record<string, "not-null">, false, never, {
1006
1719
  [x: string]: unknown;
1007
1720
  }[], {
1008
- [x: string]: PgColumn<drizzle_orm2.ColumnBaseConfig<drizzle_orm2.ColumnDataType, string>, {}, {}>;
1721
+ [x: string]: never;
1009
1722
  }>;
1010
1723
  /**
1011
1724
  * Start an INSERT query on the table.
1012
1725
  *
1013
1726
  * @returns The INSERT query builder.
1014
1727
  */
1015
- protected insert(opts?: StatementOptions): drizzle_orm_pg_core0.PgInsertBuilder<PgTableWithColumns<EntityTableConfig>, any, false>;
1728
+ protected insert(opts?: StatementOptions): pg$1.PgInsertBuilder<PgTableWithColumns<EntityTableConfig>, any, false>;
1016
1729
  /**
1017
1730
  * Start an UPDATE query on the table.
1018
1731
  *
1019
1732
  * @returns The UPDATE query builder.
1020
1733
  */
1021
- protected update(opts?: StatementOptions): drizzle_orm_pg_core0.PgUpdateBuilder<PgTableWithColumns<EntityTableConfig>, any>;
1734
+ protected update(opts?: StatementOptions): pg$1.PgUpdateBuilder<PgTableWithColumns<EntityTableConfig>, any>;
1022
1735
  /**
1023
1736
  * Start a DELETE query on the table.
1024
1737
  *
1025
1738
  * @returns The DELETE query builder.
1026
1739
  */
1027
- protected delete(opts?: StatementOptions): drizzle_orm_pg_core0.PgDeleteBase<PgTableWithColumns<EntityTableConfig>, any, undefined, undefined, false, never>;
1740
+ protected delete(opts?: StatementOptions): pg$1.PgDeleteBase<PgTableWithColumns<EntityTableConfig>, any, undefined, undefined, false, never>;
1028
1741
  /**
1029
1742
  * Find entities.
1030
1743
  *
@@ -1043,20 +1756,14 @@ declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, Entity
1043
1756
  findOne<T extends Static$1<EntitySchema> = Static$1<EntitySchema>>(where: PgQueryWhere<T>, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1044
1757
  /**
1045
1758
  * Find an entity by ID.
1046
- *
1047
- * @param id
1048
- * @param opts
1049
1759
  */
1050
1760
  findById(id: string | number, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1051
1761
  /**
1052
1762
  * Paginate entities.
1053
- *
1054
- * @param pageQuery The pagination query.
1055
- * @param findQuery The find query.
1056
- * @param opts The statement options.
1057
- * @returns The paginated entities.
1058
1763
  */
1059
- paginate(pageQuery?: PageQuery, findQuery?: PgQuery<EntitySchema>, opts?: StatementOptions): Promise<Page<Static$1<EntitySchema>>>;
1764
+ paginate(pagination?: PageQuery, query?: PgQuery<EntitySchema>, opts?: StatementOptions & {
1765
+ count?: boolean;
1766
+ }): Promise<Page<Static$1<EntitySchema>>>;
1060
1767
  createQuery(query?: PgQuery<EntitySchema>): PgQuery<EntitySchema>;
1061
1768
  createQueryWhere(where?: PgQueryWhere<Static$1<EntitySchema>>): PgQueryWhere<Static$1<EntitySchema>>;
1062
1769
  /**
@@ -1066,7 +1773,7 @@ declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, Entity
1066
1773
  * @param opts The options for creating the entity.
1067
1774
  * @returns The ID of the created entity.
1068
1775
  */
1069
- create(data: InferInsert<EntitySchema>, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1776
+ create(data: StaticInsert<EntitySchema>, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1070
1777
  /**
1071
1778
  * Create many entities.
1072
1779
  *
@@ -1074,63 +1781,76 @@ declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, Entity
1074
1781
  * @param opts The statement options.
1075
1782
  * @returns The created entities.
1076
1783
  */
1077
- createMany(values: Array<InferInsert<EntitySchema>>, opts?: StatementOptions): Promise<Static$1<EntitySchema>[]>;
1784
+ createMany(values: Array<StaticInsert<EntitySchema>>, opts?: StatementOptions): Promise<Static$1<EntitySchema>[]>;
1078
1785
  /**
1079
- * Update an entity.
1080
- *
1081
- * @param query The where clause.
1082
- * @param data The data to update.
1083
- * @param opts The statement options.
1084
- * @returns The updated entity.
1786
+ * Find an entity and update it.
1085
1787
  */
1086
- updateOne(query: PgQueryWhere<Static$1<EntitySchema>>, data: Partial<NullifyIfOptional<Static$1<EntitySchema>>> | {
1087
- $append: Partial<NullifyIfOptional<Static$1<EntitySchema>>>;
1088
- }, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1089
- save(data: Static$1<EntitySchema>, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1788
+ updateOne(where: PgQueryWhereOrSQL<Static$1<EntitySchema>>, data: Partial<Static$1<TObjectUpdate<EntitySchema>>>, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1090
1789
  /**
1091
- * Update an entity by ID.
1790
+ * Save a given entity.
1791
+ *
1792
+ * @example
1793
+ * ```ts
1794
+ * const entity = await repository.findById(1);
1795
+ * entity.name = "New Name"; // update a field
1796
+ * delete entity.description; // delete a field
1797
+ * await repository.save(entity);
1798
+ * ```
1799
+ *
1800
+ * Difference with `updateById/updateOne`:
1801
+ *
1802
+ * - requires the entity to be fetched first (whole object is expected)
1803
+ * - check pg.version() if present -> optimistic locking
1804
+ * - validate entity against schema
1805
+ * - undefined values will be set to null, not ignored!
1092
1806
  *
1093
- * @param id
1094
- * @param data
1095
- * @param opts
1807
+ * @see {@link PostgresTypeProvider#version}
1808
+ * @see {@link PgVersionMismatchError}
1096
1809
  */
1097
- updateById(id: string | number, data: Partial<NullifyIfOptional<Static$1<EntitySchema>>>, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1810
+ save(entity: Static$1<EntitySchema>, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1098
1811
  /**
1099
- * Update entities.
1100
- *
1101
- * @param where The where clause.
1102
- * @param data The data to update.
1103
- * @param opts The statement options.
1104
- * @returns The updated entities.
1812
+ * Find an entity by ID and update it.
1105
1813
  */
1106
- updateMany(where: PgQueryWhere<Static$1<EntitySchema>>, data: Partial<NullifyIfOptional<Static$1<EntitySchema>>>, opts?: StatementOptions): Promise<Static$1<EntitySchema>[]>;
1814
+ updateById(id: string | number, data: Partial<Static$1<TObjectUpdate<EntitySchema>>>, opts?: StatementOptions): Promise<Static$1<EntitySchema>>;
1107
1815
  /**
1108
- * Delete entities.
1109
- *
1110
- * @param where Query.
1111
- * @param opts The statement options.
1816
+ * Find many entities and update all of them.
1817
+ */
1818
+ updateMany(where: PgQueryWhereOrSQL<Static$1<EntitySchema>>, data: Partial<Static$1<TObjectUpdate<EntitySchema>>>, opts?: StatementOptions): Promise<void>;
1819
+ /**
1820
+ * Find many and delete all of them.
1112
1821
  */
1113
1822
  deleteMany(where?: PgQueryWhere<Static$1<EntitySchema>>, opts?: StatementOptions): Promise<void>;
1114
1823
  /**
1115
1824
  * Delete all entities.
1116
- * @param opts
1117
1825
  */
1118
1826
  clear(opts?: StatementOptions): Promise<void>;
1119
1827
  /**
1120
- * Delete an entity by ID.
1828
+ * Delete the given entity.
1121
1829
  *
1122
- * @param id
1123
- * @param opts
1830
+ * You must fetch the entity first in order to delete it.
1831
+ */
1832
+ destroy(entity: Static$1<EntitySchema>, opts?: StatementOptions): Promise<void>;
1833
+ /**
1834
+ * Find an entity and delete it.
1835
+ */
1836
+ deleteOne(where?: PgQueryWhere<Static$1<EntitySchema>>, opts?: StatementOptions): Promise<void>;
1837
+ /**
1838
+ * Find an entity by ID and delete it.
1124
1839
  */
1125
1840
  deleteById(id: string | number, opts?: StatementOptions): Promise<void>;
1126
1841
  /**
1127
1842
  * Count entities.
1128
- *
1129
- * @param where The where clause.
1130
- * @param opts The statement options.
1131
- * @returns The count of entities.
1132
1843
  */
1133
- count(where?: PgQueryWhere<Static$1<EntitySchema>>, opts?: StatementOptions): Promise<number>;
1844
+ count(where?: PgQueryWhereOrSQL<Static$1<EntitySchema>>, opts?: StatementOptions): Promise<number>;
1845
+ protected conflictMessagePattern: string;
1846
+ protected handleError(error: unknown, message: string): PgError;
1847
+ protected withDeletedAt(where: PgQueryWhereOrSQL<Static$1<EntitySchema>>, opts?: {
1848
+ force?: boolean;
1849
+ }): PgQueryWhereOrSQL<(EntitySchema & {
1850
+ params: [];
1851
+ })["static"]>;
1852
+ protected get organization(): undefined;
1853
+ protected deletedAt(): PgAttrField | undefined;
1134
1854
  /**
1135
1855
  * Convert a query object to a SQL query.
1136
1856
  *
@@ -1138,7 +1858,7 @@ declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, Entity
1138
1858
  * @param schema The schema to use.
1139
1859
  * @param col The column to use.
1140
1860
  */
1141
- protected jsonQueryToSql(query: PgQueryWhere<Static$1<EntitySchema>>, schema?: TObject$1, col?: (key: string) => PgColumn): SQL | undefined;
1861
+ protected jsonQueryToSql(query: PgQueryWhereOrSQL<Static$1<EntitySchema>>, schema?: TObject$1, col?: (key: string) => PgColumn): SQL | undefined;
1142
1862
  /**
1143
1863
  * Map a filter operator to a SQL query.
1144
1864
  *
@@ -1146,7 +1866,7 @@ declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, Entity
1146
1866
  * @param column
1147
1867
  * @protected
1148
1868
  */
1149
- protected mapOperatorToSql(operator: FilterOperators<any>, column: PgColumn): SQL | undefined;
1869
+ protected mapOperatorToSql(operator: FilterOperators<any> | any, column: PgColumn): SQL | undefined;
1150
1870
  /**
1151
1871
  * Create a pagination object.
1152
1872
  *
@@ -1182,7 +1902,7 @@ declare class RepositoryDescriptor<EntityTableConfig extends TableConfig, Entity
1182
1902
  */
1183
1903
  protected getPrimaryKey(schema: TObject$1): {
1184
1904
  key: string;
1185
- col: PgColumn<drizzle_orm2.ColumnBaseConfig<drizzle_orm2.ColumnDataType, string>, {}, {}>;
1905
+ col: PgColumn<drizzle_orm6.ColumnBaseConfig<drizzle_orm6.ColumnDataType, string>, {}, {}>;
1186
1906
  type: TSchema$2;
1187
1907
  };
1188
1908
  }
@@ -1202,24 +1922,241 @@ interface StatementOptions {
1202
1922
  strength: LockStrength;
1203
1923
  };
1204
1924
  /**
1205
- * Organization ID.
1206
- *
1207
- * Multi-tenant support.
1208
- *
1209
- * If set, it will be used to filter the results by organization.
1925
+ * If true, ignore soft delete.
1210
1926
  */
1211
- organization?: string;
1212
- }
1213
- interface PgAttrField {
1214
- key: string;
1215
- type: TSchema$2;
1216
- data: any;
1217
- nested?: any[];
1927
+ force?: boolean;
1928
+ /**
1929
+ * Force the current time.
1930
+ */
1931
+ now?: DateTime;
1218
1932
  }
1219
- //# sourceMappingURL=$repository.d.ts.map
1220
1933
  //#endregion
1221
1934
  //#region src/descriptors/$sequence.d.ts
1222
1935
  /**
1936
+ * Creates a PostgreSQL sequence descriptor for generating unique numeric values.
1937
+ *
1938
+ * This descriptor provides a type-safe interface to PostgreSQL sequences, which are
1939
+ * database objects that generate unique numeric identifiers. Sequences are commonly
1940
+ * used for primary keys, order numbers, invoice numbers, and other cases where
1941
+ * guaranteed unique, incrementing values are needed across concurrent operations.
1942
+ *
1943
+ * **Key Features**
1944
+ *
1945
+ * - **Thread-Safe**: PostgreSQL sequences are inherently thread-safe and handle concurrency
1946
+ * - **Configurable Parameters**: Start value, increment, min/max bounds, and cycling behavior
1947
+ * - **Automatic Creation**: Sequences are created automatically when first used
1948
+ * - **Type Safety**: Full TypeScript support with numeric return types
1949
+ * - **Performance**: Optimized for high-throughput ID generation
1950
+ * - **Schema Support**: Works with PostgreSQL schemas for organization
1951
+ *
1952
+ * **Use Cases**
1953
+ *
1954
+ * Perfect for generating unique identifiers in concurrent environments:
1955
+ * - Primary key generation (alternative to UUIDs)
1956
+ * - Order numbers and invoice sequences
1957
+ * - Ticket numbers and reference IDs
1958
+ * - Version numbers and revision tracking
1959
+ * - Batch numbers for processing workflows
1960
+ * - Any scenario requiring guaranteed unique incrementing numbers
1961
+ *
1962
+ * @example
1963
+ * **Basic sequence for order numbers:**
1964
+ * ```ts
1965
+ * import { $sequence } from "alepha/postgres";
1966
+ *
1967
+ * class OrderService {
1968
+ * orderNumbers = $sequence({
1969
+ * name: "order_numbers",
1970
+ * start: 1000, // Start from order #1000
1971
+ * increment: 1 // Increment by 1 each time
1972
+ * });
1973
+ *
1974
+ * async createOrder(orderData: OrderData) {
1975
+ * const orderNumber = await this.orderNumbers.next();
1976
+ *
1977
+ * return await this.orders.create({
1978
+ * id: generateUUID(),
1979
+ * orderNumber,
1980
+ * ...orderData
1981
+ * });
1982
+ * }
1983
+ *
1984
+ * async getCurrentOrderNumber() {
1985
+ * // Get the last generated number without incrementing
1986
+ * return await this.orderNumbers.current();
1987
+ * }
1988
+ * }
1989
+ * ```
1990
+ *
1991
+ * @example
1992
+ * **Invoice numbering with yearly reset:**
1993
+ * ```ts
1994
+ * class InvoiceService {
1995
+ * // Separate sequence for each year
1996
+ * getInvoiceSequence(year: number) {
1997
+ * return $sequence({
1998
+ * name: `invoice_numbers_${year}`,
1999
+ * start: 1,
2000
+ * increment: 1
2001
+ * });
2002
+ * }
2003
+ *
2004
+ * async generateInvoiceNumber(): Promise<string> {
2005
+ * const year = new Date().getFullYear();
2006
+ * const sequence = this.getInvoiceSequence(year);
2007
+ * const number = await sequence.next();
2008
+ *
2009
+ * // Format as INV-2024-001, INV-2024-002, etc.
2010
+ * return `INV-${year}-${number.toString().padStart(3, '0')}`;
2011
+ * }
2012
+ * }
2013
+ * ```
2014
+ *
2015
+ * @example
2016
+ * **High-performance ID generation with custom increments:**
2017
+ * ```ts
2018
+ * class TicketService {
2019
+ * // Generate ticket numbers in increments of 10 for better distribution
2020
+ * ticketSequence = $sequence({
2021
+ * name: "ticket_numbers",
2022
+ * start: 1000,
2023
+ * increment: 10,
2024
+ * min: 1000,
2025
+ * max: 999999,
2026
+ * cycle: false // Don't cycle when max is reached
2027
+ * });
2028
+ *
2029
+ * priorityTicketSequence = $sequence({
2030
+ * name: "priority_ticket_numbers",
2031
+ * start: 1,
2032
+ * increment: 1,
2033
+ * min: 1,
2034
+ * max: 999,
2035
+ * cycle: true // Cycle when reaching max
2036
+ * });
2037
+ *
2038
+ * async generateTicketNumber(isPriority: boolean = false): Promise<number> {
2039
+ * if (isPriority) {
2040
+ * return await this.priorityTicketSequence.next();
2041
+ * }
2042
+ * return await this.ticketSequence.next();
2043
+ * }
2044
+ *
2045
+ * async getSequenceStatus() {
2046
+ * return {
2047
+ * currentTicketNumber: await this.ticketSequence.current(),
2048
+ * currentPriorityNumber: await this.priorityTicketSequence.current()
2049
+ * };
2050
+ * }
2051
+ * }
2052
+ * ```
2053
+ *
2054
+ * @example
2055
+ * **Batch processing with sequence-based coordination:**
2056
+ * ```ts
2057
+ * class BatchProcessor {
2058
+ * batchSequence = $sequence({
2059
+ * name: "batch_numbers",
2060
+ * start: 1,
2061
+ * increment: 1
2062
+ * });
2063
+ *
2064
+ * async processBatch(items: any[]) {
2065
+ * const batchNumber = await this.batchSequence.next();
2066
+ *
2067
+ * console.log(`Starting batch processing #${batchNumber} with ${items.length} items`);
2068
+ *
2069
+ * try {
2070
+ * // Process items with batch number for tracking
2071
+ * for (const item of items) {
2072
+ * await this.processItem(item, batchNumber);
2073
+ * }
2074
+ *
2075
+ * await this.auditLogger.log({
2076
+ * event: 'batch_completed',
2077
+ * batchNumber,
2078
+ * itemCount: items.length,
2079
+ * timestamp: new Date()
2080
+ * });
2081
+ *
2082
+ * return { batchNumber, processedCount: items.length };
2083
+ *
2084
+ * } catch (error) {
2085
+ * await this.auditLogger.log({
2086
+ * event: 'batch_failed',
2087
+ * batchNumber,
2088
+ * error: error.message,
2089
+ * timestamp: new Date()
2090
+ * });
2091
+ * throw error;
2092
+ * }
2093
+ * }
2094
+ *
2095
+ * async processItem(item: any, batchNumber: number) {
2096
+ * // Associate item processing with batch number
2097
+ * await this.items.update(item.id, {
2098
+ * ...item.updates,
2099
+ * batchNumber,
2100
+ * processedAt: new Date()
2101
+ * });
2102
+ * }
2103
+ * }
2104
+ * ```
2105
+ *
2106
+ * @example
2107
+ * **Multi-tenant sequence management:**
2108
+ * ```ts
2109
+ * class TenantSequenceService {
2110
+ * // Create tenant-specific sequences
2111
+ * getTenantSequence(tenantId: string, sequenceType: string) {
2112
+ * return $sequence({
2113
+ * name: `${tenantId}_${sequenceType}_seq`,
2114
+ * start: 1,
2115
+ * increment: 1
2116
+ * });
2117
+ * }
2118
+ *
2119
+ * async generateTenantOrderNumber(tenantId: string): Promise<string> {
2120
+ * const sequence = this.getTenantSequence(tenantId, 'orders');
2121
+ * const number = await sequence.next();
2122
+ *
2123
+ * return `${tenantId.toUpperCase()}-ORD-${number.toString().padStart(6, '0')}`;
2124
+ * }
2125
+ *
2126
+ * async generateTenantInvoiceNumber(tenantId: string): Promise<string> {
2127
+ * const sequence = this.getTenantSequence(tenantId, 'invoices');
2128
+ * const number = await sequence.next();
2129
+ *
2130
+ * return `${tenantId.toUpperCase()}-INV-${number.toString().padStart(6, '0')}`;
2131
+ * }
2132
+ *
2133
+ * async getTenantSequenceStatus(tenantId: string) {
2134
+ * const orderSeq = this.getTenantSequence(tenantId, 'orders');
2135
+ * const invoiceSeq = this.getTenantSequence(tenantId, 'invoices');
2136
+ *
2137
+ * return {
2138
+ * tenant: tenantId,
2139
+ * sequences: {
2140
+ * orders: {
2141
+ * current: await orderSeq.current(),
2142
+ * next: await orderSeq.next()
2143
+ * },
2144
+ * invoices: {
2145
+ * current: await invoiceSeq.current()
2146
+ * }
2147
+ * }
2148
+ * };
2149
+ * }
2150
+ * }
2151
+ * ```
2152
+ *
2153
+ * **Important Notes**:
2154
+ * - Sequences are created automatically when first used
2155
+ * - PostgreSQL sequences are atomic and handle high concurrency
2156
+ * - Sequence values are not rolled back in failed transactions
2157
+ * - Consider the impact of max values and cycling behavior
2158
+ * - Sequences are schema-scoped in PostgreSQL
2159
+ *
1223
2160
  * @stability 1
1224
2161
  */
1225
2162
  declare const $sequence: {
@@ -1227,11 +2164,130 @@ declare const $sequence: {
1227
2164
  [KIND]: typeof SequenceDescriptor;
1228
2165
  };
1229
2166
  interface SequenceDescriptorOptions {
2167
+ /**
2168
+ * Name of the PostgreSQL sequence to create.
2169
+ *
2170
+ * This name:
2171
+ * - Must be unique within the database schema
2172
+ * - Should follow PostgreSQL identifier conventions
2173
+ * - Will be used in generated SQL for sequence operations
2174
+ * - Should be descriptive of the sequence's purpose
2175
+ *
2176
+ * If not provided, defaults to the property key where the sequence is declared.
2177
+ *
2178
+ * **Naming Guidelines**:
2179
+ * - Use descriptive names like "order_numbers", "invoice_seq"
2180
+ * - Include the purpose or entity type in the name
2181
+ * - Consider adding "_seq" suffix for clarity
2182
+ * - Use snake_case for consistency with PostgreSQL conventions
2183
+ *
2184
+ * @example "order_numbers"
2185
+ * @example "invoice_sequence"
2186
+ * @example "ticket_numbers_seq"
2187
+ */
1230
2188
  name?: string;
2189
+ /**
2190
+ * The starting value for the sequence.
2191
+ *
2192
+ * This value:
2193
+ * - Determines the first number that will be generated
2194
+ * - Can be any integer within the sequence's range
2195
+ * - Is useful for avoiding conflicts with existing data
2196
+ * - Can be set higher for business reasons (e.g., starting invoices at 1000)
2197
+ *
2198
+ * **Common Patterns**:
2199
+ * - Start at 1 for simple counters
2200
+ * - Start at 1000+ for professional-looking numbers
2201
+ * - Start at current max + 1 when migrating existing data
2202
+ *
2203
+ * @default 1
2204
+ * @example 1 // Simple counter starting at 1
2205
+ * @example 1000 // Professional numbering starting at 1000
2206
+ * @example 10000 // Large starting number for established businesses
2207
+ */
1231
2208
  start?: number;
2209
+ /**
2210
+ * The increment value for each call to next().
2211
+ *
2212
+ * This value:
2213
+ * - Determines how much the sequence increases each time
2214
+ * - Can be any positive or negative integer
2215
+ * - Affects the gaps between generated numbers
2216
+ * - Can be used for number distribution strategies
2217
+ *
2218
+ * **Use Cases**:
2219
+ * - increment: 1 for consecutive numbering
2220
+ * - increment: 10 for distributed numbering (leaves gaps for manual entries)
2221
+ * - increment: -1 for countdown sequences
2222
+ *
2223
+ * @default 1
2224
+ * @example 1 // Standard consecutive numbering
2225
+ * @example 10 // Leave gaps between numbers
2226
+ * @example 100 // Large gaps for special numbering schemes
2227
+ */
1232
2228
  increment?: number;
2229
+ /**
2230
+ * The minimum value the sequence can generate.
2231
+ *
2232
+ * When the sequence reaches this value:
2233
+ * - It cannot generate values below this minimum
2234
+ * - Helps prevent negative numbers in contexts where they don't make sense
2235
+ * - Works with cycling behavior to define the lower bound
2236
+ *
2237
+ * **Considerations**:
2238
+ * - Set to 1 for positive-only sequences
2239
+ * - Set to 0 if zero values are acceptable
2240
+ * - Consider business rules about minimum valid numbers
2241
+ *
2242
+ * @example 1 // No zero or negative values
2243
+ * @example 0 // Allow zero values
2244
+ * @example 1000 // Maintain minimum professional appearance
2245
+ */
1233
2246
  min?: number;
2247
+ /**
2248
+ * The maximum value the sequence can generate.
2249
+ *
2250
+ * When the sequence reaches this value:
2251
+ * - It cannot generate values above this maximum
2252
+ * - Behavior depends on the cycle option
2253
+ * - Useful for preventing overflow or limiting number ranges
2254
+ *
2255
+ * **Planning Considerations**:
2256
+ * - Consider the expected volume of your application
2257
+ * - Account for business growth over time
2258
+ * - Factor in any formatting constraints (e.g., fixed-width displays)
2259
+ * - Remember that PostgreSQL sequences can handle very large numbers
2260
+ *
2261
+ * @example 999999 // Six-digit limit
2262
+ * @example 2147483647 // Maximum 32-bit signed integer
2263
+ * @example 9999 // Four-digit limit for display purposes
2264
+ */
1234
2265
  max?: number;
2266
+ /**
2267
+ * Whether the sequence should cycle back to the minimum when it reaches the maximum.
2268
+ *
2269
+ * **cycle: true**:
2270
+ * - When max is reached, next value will be the minimum value
2271
+ * - Useful for scenarios where number reuse is acceptable
2272
+ * - Common for temporary identifiers or rotating references
2273
+ *
2274
+ * **cycle: false (default)**:
2275
+ * - When max is reached, further calls will fail with an error
2276
+ * - Prevents unexpected number reuse
2277
+ * - Better for permanent identifiers where uniqueness is critical
2278
+ *
2279
+ * **Use Cases for Cycling**:
2280
+ * - Temporary ticket numbers that can be reused
2281
+ * - Session IDs with limited lifetime
2282
+ * - Batch numbers in rotating systems
2283
+ *
2284
+ * **Avoid Cycling For**:
2285
+ * - Primary keys and permanent identifiers
2286
+ * - Invoice numbers and financial references
2287
+ * - Audit logs and compliance records
2288
+ *
2289
+ * @default false
2290
+ */
1235
2291
  cycle?: boolean;
1236
2292
  }
1237
2293
  declare class SequenceDescriptor extends Descriptor<SequenceDescriptorOptions> {
@@ -1242,35 +2298,501 @@ declare class SequenceDescriptor extends Descriptor<SequenceDescriptorOptions> {
1242
2298
  next(): Promise<number>;
1243
2299
  current(): Promise<number>;
1244
2300
  }
1245
- //# sourceMappingURL=$sequence.d.ts.map
1246
2301
  //#endregion
1247
2302
  //#region src/descriptors/$transaction.d.ts
1248
2303
  /**
2304
+ * Creates a transaction descriptor for database operations requiring atomicity and consistency.
2305
+ *
2306
+ * This descriptor provides a convenient way to wrap database operations in PostgreSQL
2307
+ * transactions, ensuring ACID properties and automatic retry logic for version conflicts.
2308
+ * It integrates seamlessly with the repository pattern and provides built-in handling
2309
+ * for optimistic locking scenarios with automatic retry on version mismatches.
2310
+ *
2311
+ * **Key Features**
2312
+ *
2313
+ * - **ACID Compliance**: Full transaction support with commit/rollback functionality
2314
+ * - **Automatic Retry Logic**: Built-in retry for optimistic locking conflicts
2315
+ * - **Type Safety**: Full TypeScript support with generic parameters
2316
+ * - **Isolation Levels**: Configurable transaction isolation levels
2317
+ * - **Error Handling**: Automatic rollback on errors with proper error propagation
2318
+ * - **Repository Integration**: Seamless integration with $repository operations
2319
+ * - **Performance**: Efficient transaction management with connection reuse
2320
+ *
2321
+ * **Use Cases**
2322
+ *
2323
+ * Essential for operations requiring atomicity and consistency:
2324
+ * - Financial transactions and accounting operations
2325
+ * - Complex business workflows with multiple database operations
2326
+ * - Data migrations and bulk operations
2327
+ * - E-commerce order processing with inventory updates
2328
+ * - User registration with related data creation
2329
+ * - Audit trail creation with business operations
2330
+ *
2331
+ * @example
2332
+ * **Basic transaction for financial operations:**
2333
+ * ```ts
2334
+ * import { $transaction } from "alepha/postgres";
2335
+ *
2336
+ * class BankingService {
2337
+ * transfer = $transaction({
2338
+ * handler: async (tx, fromAccountId: string, toAccountId: string, amount: number) => {
2339
+ * // All operations within this transaction are atomic
2340
+ * console.log(`Processing transfer: $${amount} from ${fromAccountId} to ${toAccountId}`);
2341
+ *
2342
+ * // Get current account balances
2343
+ * const fromAccount = await this.accounts.findById(fromAccountId, { tx });
2344
+ * const toAccount = await this.accounts.findById(toAccountId, { tx });
2345
+ *
2346
+ * // Validate sufficient balance
2347
+ * if (fromAccount.balance < amount) {
2348
+ * throw new Error(`Insufficient funds. Balance: $${fromAccount.balance}, Required: $${amount}`);
2349
+ * }
2350
+ *
2351
+ * // Update account balances atomically
2352
+ * const updatedFromAccount = await this.accounts.updateById(
2353
+ * fromAccountId,
2354
+ * { balance: fromAccount.balance - amount },
2355
+ * { tx }
2356
+ * );
2357
+ *
2358
+ * const updatedToAccount = await this.accounts.updateById(
2359
+ * toAccountId,
2360
+ * { balance: toAccount.balance + amount },
2361
+ * { tx }
2362
+ * );
2363
+ *
2364
+ * // Create transaction record
2365
+ * const transactionRecord = await this.transactions.create({
2366
+ * id: generateUUID(),
2367
+ * fromAccountId,
2368
+ * toAccountId,
2369
+ * amount,
2370
+ * type: 'transfer',
2371
+ * status: 'completed',
2372
+ * processedAt: new Date().toISOString()
2373
+ * }, { tx });
2374
+ *
2375
+ * console.log(`Transfer completed successfully: ${transactionRecord.id}`);
2376
+ *
2377
+ * return {
2378
+ * transactionId: transactionRecord.id,
2379
+ * fromBalance: updatedFromAccount.balance,
2380
+ * toBalance: updatedToAccount.balance
2381
+ * };
2382
+ * }
2383
+ * });
2384
+ *
2385
+ * async transferFunds(fromAccountId: string, toAccountId: string, amount: number) {
2386
+ * // This will automatically retry if there's a version mismatch (optimistic locking)
2387
+ * return await this.transfer.run(fromAccountId, toAccountId, amount);
2388
+ * }
2389
+ * }
2390
+ * ```
2391
+ *
2392
+ * @example
2393
+ * **E-commerce order processing with inventory management:**
2394
+ * ```ts
2395
+ * class OrderService {
2396
+ * processOrder = $transaction({
2397
+ * config: {
2398
+ * isolationLevel: 'serializable' // Highest isolation for critical operations
2399
+ * },
2400
+ * handler: async (tx, orderData: {
2401
+ * customerId: string;
2402
+ * items: Array<{ productId: string; quantity: number; price: number }>;
2403
+ * shippingAddress: Address;
2404
+ * paymentMethodId: string;
2405
+ * }) => {
2406
+ * console.log(`Processing order for customer ${orderData.customerId}`);
2407
+ *
2408
+ * let totalAmount = 0;
2409
+ * const orderItems = [];
2410
+ *
2411
+ * // Process each item and update inventory atomically
2412
+ * for (const itemData of orderData.items) {
2413
+ * const product = await this.products.findById(itemData.productId, { tx });
2414
+ *
2415
+ * // Check inventory availability
2416
+ * if (product.stockQuantity < itemData.quantity) {
2417
+ * throw new Error(`Insufficient stock for ${product.name}. Available: ${product.stockQuantity}, Requested: ${itemData.quantity}`);
2418
+ * }
2419
+ *
2420
+ * // Update product inventory with optimistic locking
2421
+ * await this.products.save({
2422
+ * ...product,
2423
+ * stockQuantity: product.stockQuantity - itemData.quantity
2424
+ * }, { tx });
2425
+ *
2426
+ * // Calculate totals
2427
+ * const lineTotal = itemData.price * itemData.quantity;
2428
+ * totalAmount += lineTotal;
2429
+ *
2430
+ * orderItems.push({
2431
+ * id: generateUUID(),
2432
+ * productId: itemData.productId,
2433
+ * quantity: itemData.quantity,
2434
+ * unitPrice: itemData.price,
2435
+ * lineTotal
2436
+ * });
2437
+ * }
2438
+ *
2439
+ * // Create the main order record
2440
+ * const order = await this.orders.create({
2441
+ * id: generateUUID(),
2442
+ * customerId: orderData.customerId,
2443
+ * status: 'pending',
2444
+ * totalAmount,
2445
+ * shippingAddress: orderData.shippingAddress,
2446
+ * createdAt: new Date().toISOString()
2447
+ * }, { tx });
2448
+ *
2449
+ * // Create order items
2450
+ * for (const itemData of orderItems) {
2451
+ * await this.orderItems.create({
2452
+ * ...itemData,
2453
+ * orderId: order.id
2454
+ * }, { tx });
2455
+ * }
2456
+ *
2457
+ * // Process payment
2458
+ * const paymentResult = await this.paymentService.processPayment({
2459
+ * orderId: order.id,
2460
+ * amount: totalAmount,
2461
+ * paymentMethodId: orderData.paymentMethodId,
2462
+ * customerId: orderData.customerId
2463
+ * }, { tx });
2464
+ *
2465
+ * if (!paymentResult.success) {
2466
+ * throw new Error(`Payment failed: ${paymentResult.error}`);
2467
+ * }
2468
+ *
2469
+ * // Update order status
2470
+ * const completedOrder = await this.orders.updateById(
2471
+ * order.id,
2472
+ * {
2473
+ * status: 'paid',
2474
+ * paymentId: paymentResult.paymentId,
2475
+ * paidAt: new Date().toISOString()
2476
+ * },
2477
+ * { tx }
2478
+ * );
2479
+ *
2480
+ * console.log(`Order processed successfully: ${order.id}`);
2481
+ *
2482
+ * return {
2483
+ * orderId: order.id,
2484
+ * totalAmount,
2485
+ * paymentId: paymentResult.paymentId,
2486
+ * itemCount: orderItems.length
2487
+ * };
2488
+ * }
2489
+ * });
2490
+ * }
2491
+ * ```
2492
+ *
2493
+ * @example
2494
+ * **User registration with related data creation:**
2495
+ * ```ts
2496
+ * class UserService {
2497
+ * registerUser = $transaction({
2498
+ * handler: async (tx, registrationData: {
2499
+ * email: string;
2500
+ * password: string;
2501
+ * profile: {
2502
+ * firstName: string;
2503
+ * lastName: string;
2504
+ * dateOfBirth: string;
2505
+ * };
2506
+ * preferences: {
2507
+ * notifications: boolean;
2508
+ * newsletter: boolean;
2509
+ * };
2510
+ * }) => {
2511
+ * console.log(`Registering new user: ${registrationData.email}`);
2512
+ *
2513
+ * // Check if email already exists
2514
+ * const existingUser = await this.users.find(
2515
+ * { where: { email: registrationData.email } },
2516
+ * { tx }
2517
+ * );
2518
+ *
2519
+ * if (existingUser.length > 0) {
2520
+ * throw new Error(`User with email ${registrationData.email} already exists`);
2521
+ * }
2522
+ *
2523
+ * // Hash password
2524
+ * const hashedPassword = await this.hashPassword(registrationData.password);
2525
+ *
2526
+ * // Create user record
2527
+ * const user = await this.users.create({
2528
+ * id: generateUUID(),
2529
+ * email: registrationData.email,
2530
+ * passwordHash: hashedPassword,
2531
+ * isActive: true,
2532
+ * emailVerified: false
2533
+ * }, { tx });
2534
+ *
2535
+ * // Create user profile
2536
+ * const profile = await this.userProfiles.create({
2537
+ * id: generateUUID(),
2538
+ * userId: user.id,
2539
+ * firstName: registrationData.profile.firstName,
2540
+ * lastName: registrationData.profile.lastName,
2541
+ * dateOfBirth: registrationData.profile.dateOfBirth
2542
+ * }, { tx });
2543
+ *
2544
+ * // Create user preferences
2545
+ * const preferences = await this.userPreferences.create({
2546
+ * id: generateUUID(),
2547
+ * userId: user.id,
2548
+ * notifications: registrationData.preferences.notifications,
2549
+ * newsletter: registrationData.preferences.newsletter
2550
+ * }, { tx });
2551
+ *
2552
+ * // Create audit log entry
2553
+ * await this.auditLogs.create({
2554
+ * id: generateUUID(),
2555
+ * userId: user.id,
2556
+ * action: 'user_registered',
2557
+ * details: { email: user.email },
2558
+ * timestamp: new Date().toISOString()
2559
+ * }, { tx });
2560
+ *
2561
+ * console.log(`User registration completed: ${user.id}`);
2562
+ *
2563
+ * return {
2564
+ * userId: user.id,
2565
+ * email: user.email,
2566
+ * profile: {
2567
+ * firstName: profile.firstName,
2568
+ * lastName: profile.lastName
2569
+ * }
2570
+ * };
2571
+ * }
2572
+ * });
2573
+ * }
2574
+ * ```
2575
+ *
2576
+ * @example
2577
+ * **Data migration with progress tracking:**
2578
+ * ```ts
2579
+ * class MigrationService {
2580
+ * migrateUserData = $transaction({
2581
+ * config: {
2582
+ * isolationLevel: 'read_committed',
2583
+ * accessMode: 'read_write'
2584
+ * },
2585
+ * handler: async (tx, batchSize: number = 1000) => {
2586
+ * console.log(`Starting data migration with batch size ${batchSize}`);
2587
+ *
2588
+ * let totalMigrated = 0;
2589
+ * let hasMore = true;
2590
+ * let offset = 0;
2591
+ *
2592
+ * while (hasMore) {
2593
+ * // Get batch of users to migrate
2594
+ * const users = await this.legacyUsers.find({
2595
+ * limit: batchSize,
2596
+ * offset,
2597
+ * sort: { id: 'asc' }
2598
+ * }, { tx });
2599
+ *
2600
+ * if (users.length === 0) {
2601
+ * hasMore = false;
2602
+ * break;
2603
+ * }
2604
+ *
2605
+ * // Process each user in the batch
2606
+ * for (const legacyUser of users) {
2607
+ * try {
2608
+ * // Transform legacy data to new format
2609
+ * const newUser = {
2610
+ * id: generateUUID(),
2611
+ * email: legacyUser.email_address,
2612
+ * firstName: legacyUser.first_name,
2613
+ * lastName: legacyUser.last_name,
2614
+ * createdAt: legacyUser.created_date,
2615
+ * isActive: legacyUser.status === 'active'
2616
+ * };
2617
+ *
2618
+ * // Create new user record
2619
+ * await this.users.create(newUser, { tx });
2620
+ *
2621
+ * // Mark legacy user as migrated
2622
+ * await this.legacyUsers.updateById(
2623
+ * legacyUser.id,
2624
+ * {
2625
+ * migrated: true,
2626
+ * migratedAt: new Date().toISOString(),
2627
+ * newUserId: newUser.id
2628
+ * },
2629
+ * { tx }
2630
+ * );
2631
+ *
2632
+ * totalMigrated++;
2633
+ *
2634
+ * } catch (error) {
2635
+ * console.error(`Failed to migrate user ${legacyUser.id}:`, error.message);
2636
+ *
2637
+ * // Log failed migration
2638
+ * await this.migrationErrors.create({
2639
+ * id: generateUUID(),
2640
+ * legacyUserId: legacyUser.id,
2641
+ * error: error.message,
2642
+ * attemptedAt: new Date().toISOString()
2643
+ * }, { tx });
2644
+ * }
2645
+ * }
2646
+ *
2647
+ * offset += batchSize;
2648
+ * console.log(`Migrated ${totalMigrated} users so far...`);
2649
+ * }
2650
+ *
2651
+ * // Update migration status
2652
+ * await this.migrationStatus.updateById(
2653
+ * 'user_migration',
2654
+ * {
2655
+ * totalMigrated,
2656
+ * completedAt: new Date().toISOString(),
2657
+ * status: 'completed'
2658
+ * },
2659
+ * { tx }
2660
+ * );
2661
+ *
2662
+ * console.log(`Migration completed. Total migrated: ${totalMigrated}`);
2663
+ *
2664
+ * return { totalMigrated };
2665
+ * }
2666
+ * });
2667
+ * }
2668
+ * ```
2669
+ *
2670
+ * **Important Notes**:
2671
+ * - All operations within the transaction handler are atomic
2672
+ * - Automatic retry on `PgVersionMismatchError` for optimistic locking
2673
+ * - Pass `{ tx }` option to all repository operations within the transaction
2674
+ * - Transactions are automatically rolled back on any unhandled error
2675
+ * - Use appropriate isolation levels based on your consistency requirements
2676
+ *
1249
2677
  * @stability 2
1250
2678
  */
1251
2679
  declare const $transaction: <T extends any[], R>(opts: TransactionDescriptorOptions<T, R>) => _alepha_retry0.RetryDescriptorFn<(...args: T) => Promise<R>>;
1252
2680
  interface TransactionDescriptorOptions<T extends any[], R> {
2681
+ /**
2682
+ * Transaction handler function that contains all database operations to be executed atomically.
2683
+ *
2684
+ * This function:
2685
+ * - Receives a transaction object as the first parameter
2686
+ * - Should pass the transaction to all repository operations via `{ tx }` option
2687
+ * - All operations within are automatically rolled back if any error occurs
2688
+ * - Has access to the full Alepha dependency injection container
2689
+ * - Will be automatically retried if a `PgVersionMismatchError` occurs
2690
+ *
2691
+ * **Transaction Guidelines**:
2692
+ * - Keep transactions as short as possible to minimize lock contention
2693
+ * - Always pass the `tx` parameter to repository operations
2694
+ * - Handle expected business errors gracefully
2695
+ * - Log important operations for debugging and audit trails
2696
+ * - Consider the impact of long-running transactions on performance
2697
+ *
2698
+ * **Error Handling**:
2699
+ * - Throwing any error will automatically roll back the transaction
2700
+ * - `PgVersionMismatchError` triggers automatic retry logic
2701
+ * - Other database errors will be propagated after rollback
2702
+ * - Use try-catch within the handler for business-specific error handling
2703
+ *
2704
+ * @param tx - The PostgreSQL transaction object to use for all database operations
2705
+ * @param ...args - Additional arguments passed to the transaction function
2706
+ * @returns Promise resolving to the transaction result
2707
+ *
2708
+ * @example
2709
+ * ```ts
2710
+ * handler: async (tx, orderId: string, newStatus: string) => {
2711
+ * // Get the current order (with transaction)
2712
+ * const order = await this.orders.findById(orderId, { tx });
2713
+ *
2714
+ * // Validate business rules
2715
+ * if (!this.isValidStatusTransition(order.status, newStatus)) {
2716
+ * throw new Error(`Invalid status transition: ${order.status} -> ${newStatus}`);
2717
+ * }
2718
+ *
2719
+ * // Update order status (with transaction)
2720
+ * const updatedOrder = await this.orders.updateById(
2721
+ * orderId,
2722
+ * { status: newStatus },
2723
+ * { tx }
2724
+ * );
2725
+ *
2726
+ * // Create audit log (with transaction)
2727
+ * await this.auditLogs.create({
2728
+ * id: generateUUID(),
2729
+ * entityId: orderId,
2730
+ * action: 'status_change',
2731
+ * oldValue: order.status,
2732
+ * newValue: newStatus,
2733
+ * timestamp: new Date().toISOString()
2734
+ * }, { tx });
2735
+ *
2736
+ * return updatedOrder;
2737
+ * }
2738
+ * ```
2739
+ */
1253
2740
  handler: (tx: PgTransaction<any, any, any>, ...args: T) => Promise<R>;
2741
+ /**
2742
+ * PostgreSQL transaction configuration options.
2743
+ *
2744
+ * This allows you to customize transaction behavior including:
2745
+ * - **Isolation Level**: Controls visibility of concurrent transaction changes
2746
+ * - **Access Mode**: Whether the transaction is read-only or read-write
2747
+ * - **Deferrable**: For serializable transactions, allows deferring to avoid conflicts
2748
+ *
2749
+ * **Isolation Levels**:
2750
+ * - **read_uncommitted**: Lowest isolation, allows dirty reads (rarely used)
2751
+ * - **read_committed**: Default level, prevents dirty reads
2752
+ * - **repeatable_read**: Prevents dirty and non-repeatable reads
2753
+ * - **serializable**: Highest isolation, full ACID compliance
2754
+ *
2755
+ * **Access Modes**:
2756
+ * - **read_write**: Default, allows both read and write operations
2757
+ * - **read_only**: Only allows read operations, can provide performance benefits
2758
+ *
2759
+ * **When to Use Different Isolation Levels**:
2760
+ * - **read_committed**: Most common operations, good balance of consistency and performance
2761
+ * - **repeatable_read**: When you need consistent reads throughout the transaction
2762
+ * - **serializable**: Critical financial operations, when absolute consistency is required
2763
+ *
2764
+ * @example
2765
+ * ```ts
2766
+ * config: {
2767
+ * isolationLevel: 'serializable', // Highest consistency for financial operations
2768
+ * accessMode: 'read_write'
2769
+ * }
2770
+ * ```
2771
+ *
2772
+ * @example
2773
+ * ```ts
2774
+ * config: {
2775
+ * isolationLevel: 'read_committed', // Default level for most operations
2776
+ * accessMode: 'read_only' // Performance optimization for read-only operations
2777
+ * }
2778
+ * ```
2779
+ */
1254
2780
  config?: PgTransactionConfig$1;
1255
2781
  }
1256
2782
  type TransactionContext = PgTransaction<any, any, any>;
1257
- //# sourceMappingURL=$transaction.d.ts.map
1258
2783
  //#endregion
1259
2784
  //#region src/errors/PgEntityNotFoundError.d.ts
1260
- declare class PgEntityNotFoundError extends AlephaError {
2785
+ declare class PgEntityNotFoundError extends PgError {
1261
2786
  readonly name = "EntityNotFoundError";
1262
2787
  readonly status = 404;
1263
2788
  constructor(entityName: string);
1264
2789
  }
1265
- //# sourceMappingURL=PgEntityNotFoundError.d.ts.map
1266
2790
  //#endregion
1267
- //#region src/providers/RepositoryDescriptorProvider.d.ts
1268
- declare class RepositoryDescriptorProvider {
1269
- protected readonly log: _alepha_core13.Logger;
2791
+ //#region src/providers/RepositoryProvider.d.ts
2792
+ declare class RepositoryProvider {
2793
+ protected readonly log: _alepha_logger1.Logger;
1270
2794
  protected readonly alepha: Alepha;
1271
- get repositories(): RepositoryDescriptor<TableConfig$1, TObject$1>[];
1272
- constructor();
1273
- clearRepositories(): Promise<void>;
2795
+ protected get repositories(): RepositoryDescriptor<TableConfig$1, TObject$1>[];
1274
2796
  /**
1275
2797
  * Get all repositories.
1276
2798
  *
@@ -1282,19 +2804,18 @@ declare class RepositoryDescriptorProvider {
1282
2804
  *
1283
2805
  * @param provider
1284
2806
  */
1285
- getTables(provider?: PostgresProvider): drizzle_orm_pg_core4.PgTableWithColumns<TableConfig$1>[];
2807
+ getTables(provider?: PostgresProvider): pg$1.PgTableWithColumns<TableConfig$1>[];
1286
2808
  /**
1287
- * Get all providers from the repositories.
2809
+ * Get all pg providers from the repositories.
1288
2810
  */
1289
2811
  getProviders(): PostgresProvider[];
1290
2812
  }
1291
- //# sourceMappingURL=RepositoryDescriptorProvider.d.ts.map
1292
2813
  //#endregion
1293
2814
  //#region src/providers/DrizzleKitProvider.d.ts
1294
2815
  declare class DrizzleKitProvider {
1295
- protected readonly log: _alepha_core1.Logger;
2816
+ protected readonly log: _alepha_logger1.Logger;
1296
2817
  protected readonly alepha: Alepha;
1297
- protected readonly repositoryProvider: RepositoryDescriptorProvider;
2818
+ protected readonly repositoryProvider: RepositoryProvider;
1298
2819
  push(provider: PostgresProvider, schema?: string): Promise<void>;
1299
2820
  setPgSchema(provider: PostgresProvider): Promise<void>;
1300
2821
  /**
@@ -1325,7 +2846,6 @@ declare class DrizzleKitProvider {
1325
2846
  */
1326
2847
  protected importDrizzleKit(): Promise<typeof DrizzleKit>;
1327
2848
  }
1328
- //# sourceMappingURL=DrizzleKitProvider.d.ts.map
1329
2849
  //#endregion
1330
2850
  //#region src/providers/drivers/NodePostgresProvider.d.ts
1331
2851
  declare module "alepha" {
@@ -1339,13 +2859,13 @@ declare const envSchema: TObject$1<{
1339
2859
  * or
1340
2860
  * Example: postgres://user:password@localhost:5432/database?sslmode=require
1341
2861
  */
1342
- DATABASE_URL: _alepha_core2.TOptional<_alepha_core2.TString>;
2862
+ DATABASE_URL: _alepha_core1.TOptional<_alepha_core1.TString>;
1343
2863
  /**
1344
2864
  * In addition to the DATABASE_URL, you can specify the postgres schema name.
1345
2865
  *
1346
2866
  * It will monkey patch drizzle tables.
1347
2867
  */
1348
- POSTGRES_SCHEMA: _alepha_core2.TOptional<_alepha_core2.TString>;
2868
+ POSTGRES_SCHEMA: _alepha_core1.TOptional<_alepha_core1.TString>;
1349
2869
  /**
1350
2870
  * Synchronize the database schema with the models.
1351
2871
  * It uses a custom implementation, it's not related to `drizzle-kit push` command.
@@ -1355,7 +2875,7 @@ declare const envSchema: TObject$1<{
1355
2875
  *
1356
2876
  * @default false
1357
2877
  */
1358
- POSTGRES_SYNCHRONIZE: _alepha_core2.TOptional<_alepha_core2.TBoolean>;
2878
+ POSTGRES_SYNCHRONIZE: _alepha_core1.TOptional<_alepha_core1.TBoolean>;
1359
2879
  /**
1360
2880
  * Push the schema to the database.
1361
2881
  * It's like `drizzle-kit push` command.
@@ -1363,7 +2883,7 @@ declare const envSchema: TObject$1<{
1363
2883
  *
1364
2884
  * @default false
1365
2885
  */
1366
- POSTGRES_PUSH: _alepha_core2.TOptional<_alepha_core2.TBoolean>;
2886
+ POSTGRES_PUSH: _alepha_core1.TOptional<_alepha_core1.TBoolean>;
1367
2887
  }>;
1368
2888
  interface NodePostgresProviderOptions {
1369
2889
  migrations: MigrationConfig;
@@ -1372,7 +2892,7 @@ interface NodePostgresProviderOptions {
1372
2892
  declare class NodePostgresProvider extends PostgresProvider {
1373
2893
  readonly dialect = "postgres";
1374
2894
  protected readonly sslModes: readonly ["require", "allow", "prefer", "verify-full"];
1375
- protected readonly log: _alepha_core2.Logger;
2895
+ protected readonly log: _alepha_logger1.Logger;
1376
2896
  protected readonly env: {
1377
2897
  DATABASE_URL?: string | undefined;
1378
2898
  POSTGRES_SCHEMA?: string | undefined;
@@ -1393,8 +2913,8 @@ declare class NodePostgresProvider extends PostgresProvider {
1393
2913
  */
1394
2914
  get schema(): string;
1395
2915
  get db(): PostgresJsDatabase;
1396
- protected readonly configure: _alepha_core2.HookDescriptor<"start">;
1397
- protected readonly stop: _alepha_core2.HookDescriptor<"stop">;
2916
+ protected readonly configure: _alepha_core1.HookDescriptor<"start">;
2917
+ protected readonly stop: _alepha_core1.HookDescriptor<"stop">;
1398
2918
  execute<T extends TObject$1 = any>(query: SQLLike, schema?: T): Promise<Array<T extends TObject$1 ? Static<T> : any>>;
1399
2919
  connect(): Promise<void>;
1400
2920
  close(): Promise<void>;
@@ -1426,62 +2946,6 @@ declare class NodePostgresProvider extends PostgresProvider {
1426
2946
  protected mapResult<T extends TObject$1 = any>(result: Array<any>, schema?: T): Array<T extends TObject$1 ? Static<T> : any>;
1427
2947
  }
1428
2948
  //#endregion
1429
- //#region src/helpers/pgAttr.d.ts
1430
- /**
1431
- * Type representation.
1432
- */
1433
- type PgAttr<T extends TSchema$1, TAttr extends PgSymbolKeys> = T & { [K in TAttr]: PgSymbols[K] };
1434
- //#endregion
1435
- //#region src/schemas/createdAtSchema.d.ts
1436
- declare const createdAtSchema: PgAttr<PgAttr<_sinclair_typebox9.TString, typeof PG_CREATED_AT>, typeof PG_DEFAULT>;
1437
- //# sourceMappingURL=createdAtSchema.d.ts.map
1438
-
1439
- //#endregion
1440
- //#region src/schemas/legacyIdSchema.d.ts
1441
- /**
1442
- * @deprecated Use `pg.primaryKey()` instead.
1443
- */
1444
- declare const legacyIdSchema: PgAttr<PgAttr<PgAttr<_sinclair_typebox10.TInteger, typeof PG_PRIMARY_KEY>, typeof PG_SERIAL>, typeof PG_DEFAULT>;
1445
- //# sourceMappingURL=legacyIdSchema.d.ts.map
1446
- //#endregion
1447
- //#region src/schemas/updatedAtSchema.d.ts
1448
- declare const updatedAtSchema: PgAttr<PgAttr<_sinclair_typebox11.TString, typeof PG_UPDATED_AT>, typeof PG_DEFAULT>;
1449
- //# sourceMappingURL=updatedAtSchema.d.ts.map
1450
-
1451
- //#endregion
1452
- //#region src/schemas/entitySchema.d.ts
1453
- /**
1454
- * Entity Schema.
1455
- *
1456
- * Add some common SQL properties to an object.
1457
- */
1458
- declare const entitySchema: TObject$1<{
1459
- id: PgAttr<PgAttr<PgAttr<_sinclair_typebox0.TInteger, typeof PG_PRIMARY_KEY>, typeof PG_SERIAL>, typeof PG_DEFAULT>;
1460
- createdAt: PgAttr<PgAttr<_sinclair_typebox0.TString, typeof PG_CREATED_AT>, typeof PG_DEFAULT>;
1461
- updatedAt: PgAttr<PgAttr<_sinclair_typebox0.TString, typeof PG_UPDATED_AT>, typeof PG_DEFAULT>;
1462
- }>;
1463
- /**
1464
- * TypeBox Entity Type.
1465
- */
1466
- type TEntity<T extends TProperties> = TObject$1<T & {
1467
- id: typeof legacyIdSchema;
1468
- createdAt: typeof createdAtSchema;
1469
- updatedAt: typeof updatedAtSchema;
1470
- }>;
1471
- /**
1472
- * The base entity.
1473
- */
1474
- type BaseEntity = Static<typeof entitySchema>;
1475
- /**
1476
- * The keys of the base entity.
1477
- */
1478
- type BaseEntityKeys = keyof BaseEntity;
1479
- /**
1480
- * The keys of the base entity.
1481
- */
1482
- declare const entityKeys: readonly ["id", "createdAt", "updatedAt"];
1483
- //# sourceMappingURL=entitySchema.d.ts.map
1484
- //#endregion
1485
2949
  //#region src/providers/PostgresTypeProvider.d.ts
1486
2950
  declare module "alepha" {
1487
2951
  interface TypeProvider {
@@ -1503,20 +2967,29 @@ declare class PostgresTypeProvider {
1503
2967
  */
1504
2968
  readonly uuidPrimaryKey: () => PgAttr<PgAttr<TString, typeof PG_PRIMARY_KEY>, typeof PG_DEFAULT>;
1505
2969
  /**
1506
- *
2970
+ * Creates a primary key for a given type. Supports:
2971
+ * - `t.int()` -> PG INT (default)
2972
+ * - `t.bigint()` -> PG BIGINT
2973
+ * - `t.uuid()` -> PG UUID
1507
2974
  */
1508
- primaryKey(type: TString): PgAttr<PgAttr<TString, PgPrimaryKey>, PgDefault>;
1509
- primaryKey(type: TInteger): PgAttr<PgAttr<TInteger, PgPrimaryKey>, PgDefault>;
1510
- primaryKey(type: TNumber): PgAttr<PgAttr<TNumber, PgPrimaryKey>, PgDefault>;
2975
+ primaryKey(): PgAttr<PgAttr<TInteger, PgPrimaryKey>, PgDefault>;
2976
+ primaryKey(type: TString, options?: StringOptions): PgAttr<PgAttr<TString, PgPrimaryKey>, PgDefault>;
2977
+ primaryKey(type: TInteger, options?: IntegerOptions, identity?: PgIdentityOptions): PgAttr<PgAttr<TInteger, PgPrimaryKey>, PgDefault>;
2978
+ primaryKey(type: TNumber, options?: NumberOptions, identity?: PgIdentityOptions): PgAttr<PgAttr<TNumber, PgPrimaryKey>, PgDefault>;
1511
2979
  /**
1512
2980
  * Wrap a schema with "default" attribute.
1513
2981
  * This is used to set a default value for a column in the database.
1514
2982
  */
1515
2983
  readonly default: <T extends TSchema$2>(type: T, value?: Static$1<T>) => PgAttr<T, PgDefault>;
1516
2984
  /**
1517
- * Creates a column version.
2985
+ * Creates a column 'version'.
2986
+ *
1518
2987
  * This is used to track the version of a row in the database.
1519
- * You can use it for optimistic concurrency control.
2988
+ *
2989
+ * You can use it for optimistic concurrency control (OCC) with {@link RepositoryDescriptor#save}.
2990
+ *
2991
+ * @see {@link RepositoryDescriptor#save}
2992
+ * @see {@link PgVersionMismatchError}
1520
2993
  */
1521
2994
  readonly version: (options?: IntegerOptions) => PgAttr<PgAttr<TInteger, typeof PG_VERSION>, typeof PG_DEFAULT>;
1522
2995
  /**
@@ -1528,51 +3001,37 @@ declare class PostgresTypeProvider {
1528
3001
  */
1529
3002
  readonly updatedAt: (options?: StringOptions) => PgAttr<PgAttr<TString, typeof PG_UPDATED_AT>, typeof PG_DEFAULT>;
1530
3003
  /**
1531
- * @deprecated Build your own entity schema.
3004
+ * Creates a column Deleted At for soft delete functionality.
3005
+ * This is used to mark rows as deleted without actually removing them from the database.
3006
+ * The column is nullable - NULL means not deleted, timestamp means deleted.
1532
3007
  */
1533
- readonly entity: <T extends TProperties>(properties: T, options?: ObjectOptions) => TEntity<T>;
3008
+ readonly deletedAt: (options?: StringOptions) => PgAttr<_sinclair_typebox0.TOptional<TString>, typeof PG_DELETED_AT>;
1534
3009
  /**
1535
- * Creates an insert schema for a given object schema.
1536
- * - pg.default will be optional
1537
- */
1538
- readonly insert: <T extends TObject$1>(obj: T) => TInsertObject<T>;
1539
- /**
1540
- * @alias insert
1541
- */
1542
- readonly input: <T extends TObject$1>(obj: T) => TInsertObject<T>;
1543
- /**
1544
- * Creates a page schema for a given object schema.
1545
- */
1546
- readonly page: <T extends TObject$1>(resource: T, options?: ObjectOptions) => TPage<T>;
1547
- /**
1548
- * Creates a reference to another table or schema.
3010
+ * Creates a reference to another table or schema. Basically a foreign key.
1549
3011
  */
1550
3012
  readonly ref: <T extends TSchema$2>(type: T, ref: () => any, actions?: {
1551
3013
  onUpdate?: UpdateDeleteAction$1;
1552
3014
  onDelete?: UpdateDeleteAction$1;
1553
3015
  }) => PgAttr<T, PgRef>;
1554
3016
  /**
1555
- * @alias ref
1556
- */
1557
- references: <T extends TSchema$2>(type: T, ref: () => any, actions?: {
1558
- onUpdate?: UpdateDeleteAction$1;
1559
- onDelete?: UpdateDeleteAction$1;
1560
- }) => PgAttr<T, PgRef>;
1561
- /**
1562
- * Creates a reference to another table or schema with a foreign key.
1563
- *
1564
- * @experimental
3017
+ * Creates a page schema for a given object schema.
3018
+ * It's used by {@link RepositoryDescriptor#paginate} method.
1565
3019
  */
1566
- readonly many: <T extends TObject$1, Config extends TableConfig$1>(table: PgTableWithColumnsAndSchema<Config, T>, foreignKey: keyof T["properties"]) => TOptionalWithFlag<PgAttr<PgAttr<TArray<T>, PgMany>, PgDefault>, true>;
3020
+ readonly page: <T extends TObject$1>(resource: T, options?: ObjectOptions) => TPage<T>;
1567
3021
  }
1568
3022
  declare const pg: PostgresTypeProvider;
1569
- //# sourceMappingURL=PostgresTypeProvider.d.ts.map
3023
+ //#endregion
3024
+ //#region src/schemas/legacyIdSchema.d.ts
3025
+ /**
3026
+ * @deprecated Use `pg.primaryKey()` instead.
3027
+ */
3028
+ declare const legacyIdSchema: PgAttr<PgAttr<PgAttr<_sinclair_typebox0.TInteger, typeof PG_PRIMARY_KEY>, typeof PG_SERIAL>, typeof PG_DEFAULT>;
1570
3029
  //#endregion
1571
3030
  //#region src/types/schema.d.ts
1572
3031
  /**
1573
3032
  * Postgres schema type.
1574
3033
  */
1575
- declare const schema: <TDocument extends TSchema$2>(name: string, document: TDocument) => drizzle_orm9.$Type<drizzle_orm_pg_core3.PgCustomColumnBuilder<{
3034
+ declare const schema: <TDocument extends TSchema$2>(name: string, document: TDocument) => drizzle_orm6.$Type<pg$1.PgCustomColumnBuilder<{
1576
3035
  name: string;
1577
3036
  dataType: "custom";
1578
3037
  columnType: "PgCustomColumn";
@@ -1584,25 +3043,49 @@ declare const schema: <TDocument extends TSchema$2>(name: string, document: TDoc
1584
3043
  }>, (TDocument & {
1585
3044
  params: [];
1586
3045
  })["static"]>;
1587
- //# sourceMappingURL=schema.d.ts.map
1588
-
1589
3046
  //#endregion
1590
3047
  //#region src/index.d.ts
1591
3048
  /**
1592
- * Provides PostgreSQL (and SQLite!) database integration with type-safe ORM capabilities through Drizzle.
3049
+ * Postgres client based on Drizzle ORM, Alepha type-safe friendly.
1593
3050
  *
1594
- * The postgres module enables declarative database operations using descriptors like `$entity`, `$repository`.
1595
- * It offers automatic schema generation, type-safe queries, transactions,
1596
- * and database migrations with support for PostgreSQLs.
3051
+ * ```ts
3052
+ * const users = $entity({
3053
+ * name: "users",
3054
+ * schema: t.object({
3055
+ * id: pg.primaryKey(),
3056
+ * name: t.string(),
3057
+ * email: t.string(),
3058
+ * }),
3059
+ * });
3060
+ *
3061
+ * class Db {
3062
+ * users = $repository(users);
3063
+ * }
3064
+ *
3065
+ * const db = alepha.inject(Db);
3066
+ * const user = await db.users.one({ name: { eq: "John Doe" } });
3067
+ * ```
3068
+ *
3069
+ * This is not a full ORM, but rather a set of tools to work with Postgres databases in a type-safe way.
3070
+ *
3071
+ * It provides:
3072
+ * - A type-safe way to define entities and repositories. (via `$entity` and `$repository`)
3073
+ * - Custom query builders and filters.
3074
+ * - Built-in special columns like `createdAt`, `updatedAt`, `deletedAt`, `version`.
3075
+ * - Automatic JSONB support.
3076
+ * - Automatic synchronization of entities with the database schema (for testing and development).
3077
+ * - Fallback to raw SQL via Drizzle ORM `sql` function.
3078
+ *
3079
+ * Migrations are supported via Drizzle ORM, you need to use the `drizzle-kit` CLI tool to generate and run migrations.
3080
+ *
3081
+ * Relations are **NOT SUPPORTED** yet. If you need relations, please use the `drizzle-orm` package directly.
1597
3082
  *
1598
3083
  * @see {@link $entity}
1599
3084
  * @see {@link $repository}
1600
3085
  * @see {@link $transaction}
1601
3086
  * @module alepha.postgres
1602
3087
  */
1603
- declare const AlephaPostgres: _alepha_core0.Service<_alepha_core0.Module>;
1604
- //# sourceMappingURL=index.d.ts.map
1605
-
3088
+ declare const AlephaPostgres: _alepha_core1.Service<_alepha_core1.Module>;
1606
3089
  //#endregion
1607
- export { $entity, $repository, $sequence, $transaction, AlephaPostgres, BaseEntity, BaseEntityKeys, DrizzleKitProvider, Entity, EntityDescriptorOptions, ExtractManyRelations, FilterOperators, FromSchema, NodePostgresProvider, NodePostgresProviderOptions, NullToUndefined, NullifyIfOptional, PG_CREATED_AT, PG_DEFAULT, PG_IDENTITY, PG_MANY, PG_ONE, PG_PRIMARY_KEY, PG_REF, PG_SCHEMA, PG_SERIAL, PG_UPDATED_AT, PG_VERSION, Page, PageQuery, PgAttrField, PgDefault, PgEntityNotFoundError, PgIdentityOptions, PgMany, PgManyOptions, PgPrimaryKey, PgQuery, PgQueryResult, PgQueryWhere, PgQueryWhereWithMany, PgQueryWith, PgQueryWithMap, PgRef, PgRefOptions, PgSymbolKeys, PgSymbols, PgTableConfig, PgTableWithColumnsAndSchema, PostgresProvider, PostgresTypeProvider, RemoveManyRelations, RepositoryDescriptor, RepositoryDescriptorOptions, RepositoryDescriptorProvider, SQLLike, SequenceDescriptor, SequenceDescriptorOptions, StatementOptions, TEntity, TInsertObject, TPage, TableLike, TransactionContext, TransactionDescriptorOptions, camelToSnakeCase, drizzle, entityKeys, entitySchema, mapFieldToColumn, mapStringToColumn, nullToUndefined, pageQuerySchema, pageSchema, pg, pgTableSchema, schema, schemaToPgColumns, sql };
3090
+ export { $entity, $repository, $sequence, $transaction, AlephaPostgres, DrizzleKitProvider, Entity, EntityDescriptorOptions, FilterOperators, FromSchema, NodePostgresProvider, NodePostgresProviderOptions, PG_CREATED_AT, PG_DEFAULT, PG_DELETED_AT, PG_IDENTITY, PG_PRIMARY_KEY, PG_REF, PG_SCHEMA, PG_SERIAL, PG_UPDATED_AT, PG_VERSION, Page, PageQuery, PgDefault, PgEntityNotFoundError, PgIdentityOptions, PgPrimaryKey, PgQuery, PgQueryResult, PgQueryWhere, PgQueryWhereOrSQL, PgRef, PgRefOptions, PgSymbolKeys, PgSymbols, PgTableConfig, PgTableWithColumnsAndSchema, PostgresProvider, PostgresTypeProvider, RepositoryDescriptor, RepositoryDescriptorOptions, RepositoryProvider, SQLLike, SequenceDescriptor, SequenceDescriptorOptions, StatementOptions, StaticDefaultEntry, StaticEntry, StaticInsert, TObjectInsert, TPage, TransactionContext, TransactionDescriptorOptions, camelToSnakeCase, drizzle_orm6 as drizzle, insertSchema, legacyIdSchema, mapFieldToColumn, mapStringToColumn, pageQuerySchema, pageSchema, pg, schema, schemaToPgColumns, sql };
1608
3091
  //# sourceMappingURL=index.d.ts.map