effect-qb 0.12.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/README.md +1294 -0
  2. package/dist/mysql.js +57575 -0
  3. package/dist/postgres.js +6303 -0
  4. package/package.json +42 -0
  5. package/src/internal/aggregation-validation.ts +57 -0
  6. package/src/internal/case-analysis.ts +50 -0
  7. package/src/internal/coercion-analysis.ts +30 -0
  8. package/src/internal/coercion-errors.ts +29 -0
  9. package/src/internal/coercion-kind.ts +32 -0
  10. package/src/internal/coercion-normalize.ts +7 -0
  11. package/src/internal/coercion-rules.ts +25 -0
  12. package/src/internal/column-state.ts +453 -0
  13. package/src/internal/column.ts +417 -0
  14. package/src/internal/datatypes/define.ts +44 -0
  15. package/src/internal/datatypes/lookup.ts +280 -0
  16. package/src/internal/datatypes/shape.ts +72 -0
  17. package/src/internal/derived-table.ts +149 -0
  18. package/src/internal/dialect.ts +30 -0
  19. package/src/internal/executor.ts +390 -0
  20. package/src/internal/expression-ast.ts +349 -0
  21. package/src/internal/expression.ts +325 -0
  22. package/src/internal/grouping-key.ts +82 -0
  23. package/src/internal/json/ast.ts +63 -0
  24. package/src/internal/json/errors.ts +13 -0
  25. package/src/internal/json/path.ts +227 -0
  26. package/src/internal/json/shape.ts +1 -0
  27. package/src/internal/json/types.ts +386 -0
  28. package/src/internal/mysql-dialect.ts +39 -0
  29. package/src/internal/mysql-renderer.ts +37 -0
  30. package/src/internal/plan.ts +64 -0
  31. package/src/internal/postgres-dialect.ts +34 -0
  32. package/src/internal/postgres-renderer.ts +40 -0
  33. package/src/internal/predicate-analysis.ts +71 -0
  34. package/src/internal/predicate-atom.ts +43 -0
  35. package/src/internal/predicate-branches.ts +40 -0
  36. package/src/internal/predicate-context.ts +279 -0
  37. package/src/internal/predicate-formula.ts +100 -0
  38. package/src/internal/predicate-key.ts +28 -0
  39. package/src/internal/predicate-nnf.ts +12 -0
  40. package/src/internal/predicate-normalize.ts +202 -0
  41. package/src/internal/projection-alias.ts +15 -0
  42. package/src/internal/projections.ts +101 -0
  43. package/src/internal/query-ast.ts +297 -0
  44. package/src/internal/query-factory.ts +6757 -0
  45. package/src/internal/query-requirements.ts +40 -0
  46. package/src/internal/query.ts +1590 -0
  47. package/src/internal/renderer.ts +102 -0
  48. package/src/internal/runtime-normalize.ts +344 -0
  49. package/src/internal/runtime-schema.ts +428 -0
  50. package/src/internal/runtime-value.ts +85 -0
  51. package/src/internal/schema-derivation.ts +131 -0
  52. package/src/internal/sql-expression-renderer.ts +1353 -0
  53. package/src/internal/table-options.ts +225 -0
  54. package/src/internal/table.ts +674 -0
  55. package/src/mysql/column.ts +30 -0
  56. package/src/mysql/datatypes/index.ts +6 -0
  57. package/src/mysql/datatypes/spec.ts +180 -0
  58. package/src/mysql/errors/catalog.ts +51662 -0
  59. package/src/mysql/errors/fields.ts +21 -0
  60. package/src/mysql/errors/index.ts +18 -0
  61. package/src/mysql/errors/normalize.ts +232 -0
  62. package/src/mysql/errors/requirements.ts +73 -0
  63. package/src/mysql/executor.ts +134 -0
  64. package/src/mysql/query.ts +189 -0
  65. package/src/mysql/renderer.ts +19 -0
  66. package/src/mysql/table.ts +157 -0
  67. package/src/mysql.ts +18 -0
  68. package/src/postgres/column.ts +20 -0
  69. package/src/postgres/datatypes/index.ts +8 -0
  70. package/src/postgres/datatypes/spec.ts +264 -0
  71. package/src/postgres/errors/catalog.ts +452 -0
  72. package/src/postgres/errors/fields.ts +48 -0
  73. package/src/postgres/errors/index.ts +4 -0
  74. package/src/postgres/errors/normalize.ts +209 -0
  75. package/src/postgres/errors/requirements.ts +65 -0
  76. package/src/postgres/errors/types.ts +38 -0
  77. package/src/postgres/executor.ts +131 -0
  78. package/src/postgres/query.ts +189 -0
  79. package/src/postgres/renderer.ts +29 -0
  80. package/src/postgres/table.ts +157 -0
  81. package/src/postgres.ts +18 -0
package/README.md ADDED
@@ -0,0 +1,1294 @@
1
+ # effect-qb
2
+
3
+ Type-safe SQL query construction for PostgreSQL and MySQL, with query plans that carry result shapes, nullability, dialect compatibility, and statement constraints in the type system.
4
+
5
+ ## Table of Contents
6
+
7
+ - [Overview](#overview)
8
+ - [Why effect-qb](#why-effect-qb)
9
+ - [Installation](#installation)
10
+ - [Choose An Entrypoint](#choose-an-entrypoint)
11
+ - [Quick Start](#quick-start)
12
+ - [Execution Model](#execution-model)
13
+ - [Feature Map](#feature-map)
14
+ - [Effect Schema Integration](#effect-schema-integration)
15
+ - [Core Concepts](#core-concepts)
16
+ - [Derived Table Schemas](#derived-table-schemas)
17
+ - [Tables And Columns](#tables-and-columns)
18
+ - [Plans, Not Strings](#plans-not-strings)
19
+ - [ResultRow vs RuntimeResultRow](#resultrow-vs-runtimeresultrow)
20
+ - [Schema-backed JSON Columns](#schema-backed-json-columns)
21
+ - [Dialect-specific Entrypoints](#dialect-specific-entrypoints)
22
+ - [Query Guide](#query-guide)
23
+ - [Selecting Data](#selecting-data)
24
+ - [Bringing Sources Into Scope](#bringing-sources-into-scope)
25
+ - [Filtering Rows](#filtering-rows)
26
+ - [Shaping Results](#shaping-results)
27
+ - [Aggregating](#aggregating)
28
+ - [Combining Queries](#combining-queries)
29
+ - [Controlling Result Sets](#controlling-result-sets)
30
+ - [Mutations](#mutations)
31
+ - [Insert](#insert)
32
+ - [Update](#update)
33
+ - [Delete](#delete)
34
+ - [Conflicts And Upserts](#conflicts-and-upserts)
35
+ - [Returning](#returning)
36
+ - [Data-modifying CTEs](#data-modifying-ctes)
37
+ - [Rendering And Execution](#rendering-and-execution)
38
+ - [Renderer](#renderer)
39
+ - [Executor](#executor)
40
+ - [Query-sensitive Error Channels](#query-sensitive-error-channels)
41
+ - [Transaction Helpers](#transaction-helpers)
42
+ - [Error Handling](#error-handling)
43
+ - [Catalogs And Normalization](#catalogs-and-normalization)
44
+ - [Query-capability Narrowing](#query-capability-narrowing)
45
+ - [Matching Errors In Application Code](#matching-errors-in-application-code)
46
+ - [Type Safety](#type-safety)
47
+ - [Complete-plan Enforcement](#complete-plan-enforcement)
48
+ - [Predicate-driven Narrowing](#predicate-driven-narrowing)
49
+ - [Join Optionality](#join-optionality)
50
+ - [Grouped Query Validation](#grouped-query-validation)
51
+ - [Dialect Compatibility](#dialect-compatibility)
52
+ - [JSON Schema Compatibility In Mutations](#json-schema-compatibility-in-mutations)
53
+ - [Readable Branded Type Errors](#readable-branded-type-errors)
54
+ - [Dialect Support](#dialect-support)
55
+ - [PostgreSQL](#postgresql)
56
+ - [MySQL](#mysql)
57
+ - [Limitations](#limitations)
58
+ - [Contributing](#contributing)
59
+
60
+ ## Overview
61
+
62
+ `effect-qb` builds immutable query plans and pushes the interesting parts of SQL into the type system:
63
+
64
+ - exact projection shapes
65
+ - nullability and predicate-driven narrowing
66
+ - join optionality
67
+ - aggregate and grouping validation
68
+ - dialect compatibility
69
+ - statement and execution result types
70
+
71
+ The main contract is compile-time. `Query.ResultRow<typeof plan>` is the logical row type after query analysis, while `Query.RuntimeResultRow<typeof plan>` describes the conservative runtime remap shape. At runtime, the library renders SQL, executes it, and remaps aliased columns back into nested objects. It does not build or validate query-result schemas.
72
+
73
+ ## Why effect-qb
74
+
75
+ Use `effect-qb` when you want SQL plans to carry more than column names:
76
+
77
+ - exact nested projection shapes
78
+ - nullability refinement from predicates
79
+ - join optionality that changes with query structure
80
+ - grouped-query validation before SQL is rendered
81
+ - dialect-locked plans, renderers, and executor error channels
82
+
83
+ It is a query-construction library, not an ORM. It does not manage migrations, model identities, or runtime row decoding.
84
+
85
+ ## Installation
86
+
87
+ Install the library:
88
+
89
+ ```bash
90
+ bun add effect-qb
91
+ ```
92
+
93
+ For local development in this repository:
94
+
95
+ ```bash
96
+ bun install
97
+ ```
98
+
99
+ ## Choose An Entrypoint
100
+
101
+ Available entrypoints:
102
+
103
+ - `effect-qb`
104
+ - `effect-qb/postgres`
105
+ - `effect-qb/mysql`
106
+
107
+ Use `effect-qb` when Postgres defaults are acceptable and you want the shortest imports.
108
+
109
+ Use `effect-qb/postgres` when you want explicit Postgres branding throughout the plan, renderer, executor, datatypes, and errors.
110
+
111
+ Use `effect-qb/mysql` when you want the MySQL-specific DSL, renderer, executor, datatypes, and errors.
112
+
113
+ ## Quick Start
114
+
115
+ ```ts
116
+ import { Column as C, Query as Q, Renderer, Table } from "effect-qb"
117
+
118
+ const users = Table.make("users", {
119
+ id: C.uuid().pipe(C.primaryKey),
120
+ email: C.text()
121
+ })
122
+
123
+ const posts = Table.make("posts", {
124
+ id: C.uuid().pipe(C.primaryKey),
125
+ userId: C.uuid(),
126
+ title: C.text().pipe(C.nullable)
127
+ })
128
+
129
+ const postsPerUser = Q.select({
130
+ userId: users.id,
131
+ email: users.email,
132
+ postCount: Q.count(posts.id)
133
+ }).pipe(
134
+ Q.from(users),
135
+ Q.leftJoin(posts, Q.eq(users.id, posts.userId)),
136
+ Q.groupBy(users.id, users.email),
137
+ Q.orderBy(users.email)
138
+ )
139
+
140
+ type PostsPerUserRow = Q.ResultRow<typeof postsPerUser>
141
+ // {
142
+ // userId: string
143
+ // email: string
144
+ // postCount: number
145
+ // }
146
+
147
+ const rendered = Renderer.make().render(postsPerUser)
148
+ rendered.sql
149
+ rendered.params
150
+ ```
151
+
152
+ This is the core model: define typed tables, build a plan, let the plan define the row type, then render or execute it.
153
+
154
+ ## Execution Model
155
+
156
+ The runtime model is intentionally small:
157
+
158
+ 1. build a typed plan
159
+ 2. render SQL plus bind params
160
+ 3. execute the statement
161
+ 4. remap flat aliases like `profile__email` back into nested objects
162
+
163
+ What it does not do is decode rows against a runtime schema. `Q.ResultRow<typeof plan>` is the logical static result, while `Q.RuntimeResultRow<typeof plan>` is the conservative runtime shape.
164
+
165
+ ```ts
166
+ import * as SqlClient from "@effect/sql/SqlClient"
167
+ import * as Effect from "effect/Effect"
168
+ import * as Postgres from "effect-qb/postgres"
169
+
170
+ const renderer = Postgres.Renderer.make()
171
+ const executor = Postgres.Executor.fromSqlClient(renderer)
172
+
173
+ const rowsEffect = executor.execute(postsPerUser)
174
+
175
+ const rows = Effect.runSync(
176
+ Effect.provideService(rowsEffect, SqlClient.SqlClient, sqlClient)
177
+ )
178
+ ```
179
+
180
+ If you want runtime validation, add it after execution.
181
+
182
+ ## Feature Map
183
+
184
+ The rest of this README goes deeper, but the main surface area is:
185
+
186
+ - table builders with keys, indexes, nullability, defaults, and schema-backed JSON columns
187
+ - select plans with joins, CTEs, derived tables, `values(...)`, `unnest(...)`, subqueries, and set operators
188
+ - mutation plans for `insert`, `update`, `delete`, `returning`, and conflict handling
189
+ - renderers and executors for Postgres and MySQL
190
+ - type-level checks for missing sources, grouped selections, dialect compatibility, and JSON mutation compatibility
191
+
192
+ Dialect-specific capabilities are called out later. Postgres currently has the wider feature surface in a few areas such as `distinctOn(...)`, `generateSeries(...)`, and some JSON operators.
193
+
194
+ ## Effect Schema Integration
195
+
196
+ `effect-qb` is tightly integrated with `effect/Schema`.
197
+
198
+ That integration shows up in three places:
199
+
200
+ - built-in columns are backed by Effect Schema primitives like `Schema.String`, `Schema.UUID`, and `Schema.Date`
201
+ - custom and JSON columns can be defined directly from your own Effect Schemas
202
+ - every table derives runtime `select`, `insert`, and `update` schemas from its column definitions
203
+
204
+ There is no separate `C.schema(...)` helper. The schema-backed entrypoints are:
205
+
206
+ - `C.custom(schema, dbType)` for arbitrary non-JSON columns
207
+ - `C.json(schema)` for JSON columns
208
+ - `column.schema` when you need the underlying Effect Schema attached to a column definition
209
+
210
+ This means the same table definition drives:
211
+
212
+ - SQL construction
213
+ - static TypeScript row and payload types
214
+ - runtime validation for table-shaped inputs
215
+
216
+ Example:
217
+
218
+ ```ts
219
+ import * as Schema from "effect/Schema"
220
+ import { Column as C, Table } from "effect-qb"
221
+
222
+ const UserProfile = Schema.Struct({
223
+ displayName: Schema.String,
224
+ bio: Schema.NullOr(Schema.String)
225
+ })
226
+
227
+ const users = Table.make("users", {
228
+ id: C.uuid().pipe(C.primaryKey, C.generated),
229
+ email: C.text(),
230
+ profile: C.json(UserProfile),
231
+ createdAt: C.timestamp().pipe(C.hasDefault),
232
+ status: C.custom(
233
+ Schema.Literal("active", "disabled"),
234
+ { dialect: "postgres", kind: "text" } as const
235
+ )
236
+ })
237
+ ```
238
+
239
+ From that one definition you get:
240
+
241
+ - bound SQL columns like `users.email`
242
+ - a schema for reading table rows: `users.schemas.select`
243
+ - a schema for inserts that respects generated/default/nullable columns: `users.schemas.insert`
244
+ - a schema for updates that excludes primary keys and generated columns: `users.schemas.update`
245
+
246
+ The helper types line up with those schemas:
247
+
248
+ ```ts
249
+ type UserSelect = Table.SelectOf<typeof users>
250
+ type UserInsert = Table.InsertOf<typeof users>
251
+ type UserUpdate = Table.UpdateOf<typeof users>
252
+ ```
253
+
254
+ One important boundary: table schemas are runtime schemas for table-shaped data, while query plans remain schema-free at execution time. `effect-qb` derives and exposes Effect Schemas for tables and JSON columns, but `executor.execute(plan)` still remaps rows without decoding arbitrary query results through `effect/Schema`.
255
+
256
+ ## Core Concepts
257
+
258
+ ### Derived Table Schemas
259
+
260
+ Every table exposes derived Effect Schemas:
261
+
262
+ ```ts
263
+ import * as Schema from "effect/Schema"
264
+
265
+ const users = Table.make("users", {
266
+ id: C.uuid().pipe(C.primaryKey, C.generated),
267
+ email: C.text().pipe(C.unique),
268
+ bio: C.text().pipe(C.nullable),
269
+ createdAt: C.timestamp().pipe(C.hasDefault)
270
+ })
271
+
272
+ Schema.isSchema(users.schemas.select)
273
+ Schema.isSchema(users.schemas.insert)
274
+ Schema.isSchema(users.schemas.update)
275
+ ```
276
+
277
+ Those schemas are derived from column metadata, not maintained separately.
278
+
279
+ - `select` includes every column, with nullable columns wrapped in `Schema.NullOr(...)`
280
+ - `insert` omits generated columns and makes nullable/defaulted columns optional
281
+ - `update` omits generated columns and primary-key columns and makes the remaining columns optional
282
+
283
+ This is the main runtime bridge between the SQL DSL and Effect Schema. You can validate table payloads with the derived schemas without duplicating the model elsewhere.
284
+
285
+ ### Tables And Columns
286
+
287
+ Tables are typed sources, not loose name strings. Columns carry DB types, nullability, defaults, keys, and schema-backed JSON information.
288
+
289
+ ```ts
290
+ import * as Schema from "effect/Schema"
291
+ import { Column as C, Table } from "effect-qb"
292
+
293
+ const users = Table.make("users", {
294
+ id: C.uuid().pipe(C.primaryKey),
295
+ email: C.text(),
296
+ profile: C.json(Schema.Struct({
297
+ displayName: Schema.String,
298
+ bio: Schema.NullOr(Schema.String)
299
+ }))
300
+ })
301
+ ```
302
+
303
+ Schema-qualified tables are also typed:
304
+
305
+ ```ts
306
+ const analytics = Table.schema("analytics")
307
+
308
+ const events = analytics.table("events", {
309
+ id: C.uuid().pipe(C.primaryKey),
310
+ userId: C.uuid()
311
+ })
312
+ ```
313
+
314
+ ### Plans, Not Strings
315
+
316
+ `effect-qb` does not build rows from ad hoc string fragments. It builds typed plans. Partial plans are allowed while assembling a query, but rendering and execution require a complete plan.
317
+
318
+ That distinction is important:
319
+
320
+ - you can reference sources before they are in scope while composing
321
+ - the type system tracks what is still missing
322
+ - `render(...)`, `execute(...)`, and `Q.CompletePlan<typeof plan>` are the enforcement boundary
323
+
324
+ ### ResultRow vs RuntimeResultRow
325
+
326
+ `Q.ResultRow<typeof plan>` is the logical result type after static analysis. It includes things like:
327
+
328
+ - `where(isNotNull(...))` nullability refinement
329
+ - left-join promotion when predicates prove presence
330
+ - grouped-query validation
331
+ - branch pruning for expressions like `case()`
332
+
333
+ `Q.RuntimeResultRow<typeof plan>` is intentionally more conservative. It describes the schema-free runtime remap path only.
334
+
335
+ ```ts
336
+ const guaranteedPost = Q.select({
337
+ userId: users.id,
338
+ postId: posts.id
339
+ }).pipe(
340
+ Q.from(users),
341
+ Q.leftJoin(posts, Q.eq(users.id, posts.userId)),
342
+ Q.where(Q.isNotNull(posts.id))
343
+ )
344
+
345
+ type LogicalRow = Q.ResultRow<typeof guaranteedPost>
346
+ // {
347
+ // userId: string
348
+ // postId: string
349
+ // }
350
+
351
+ type RuntimeRow = Q.RuntimeResultRow<typeof guaranteedPost>
352
+ // {
353
+ // userId: string
354
+ // postId: string | null
355
+ // }
356
+ ```
357
+
358
+ ### Schema-backed JSON Columns
359
+
360
+ JSON columns can carry a schema. That schema feeds:
361
+
362
+ - JSON path typing
363
+ - JSON manipulation result typing
364
+ - insert/update compatibility checks
365
+
366
+ ```ts
367
+ import * as Schema from "effect/Schema"
368
+
369
+ const docs = Table.make("docs", {
370
+ id: C.uuid().pipe(C.primaryKey),
371
+ payload: C.json(Schema.Struct({
372
+ profile: Schema.Struct({
373
+ address: Schema.Struct({
374
+ city: Schema.String,
375
+ postcode: Schema.NullOr(Schema.String)
376
+ })
377
+ })
378
+ }))
379
+ })
380
+
381
+ const cityPath = Q.json.path(
382
+ Q.json.key("profile"),
383
+ Q.json.key("address"),
384
+ Q.json.key("city")
385
+ )
386
+
387
+ const city = Q.json.get(docs.payload, cityPath)
388
+ type City = Q.OutputOfExpression<typeof city, {
389
+ readonly docs: {
390
+ readonly name: "docs"
391
+ readonly mode: "required"
392
+ }
393
+ }>
394
+ // string
395
+ ```
396
+
397
+ ### Dialect-specific Entrypoints
398
+
399
+ The root entrypoint defaults to the Postgres-flavored `Query` and `Table` DSLs:
400
+
401
+ ```ts
402
+ import { Query as Q, Table } from "effect-qb"
403
+ ```
404
+
405
+ Dialect entrypoints expose dialect-specific builders:
406
+
407
+ ```ts
408
+ import * as Postgres from "effect-qb/postgres"
409
+ import * as Mysql from "effect-qb/mysql"
410
+ ```
411
+
412
+ This matters for:
413
+
414
+ - dialect-locked tables and columns
415
+ - dialect-only features like Postgres `distinctOn(...)`
416
+ - dialect-specific renderers and executors
417
+ - dialect-specific error unions
418
+
419
+ ## Query Guide
420
+
421
+ ### Selecting Data
422
+
423
+ Selections define the result type directly. Nested objects stay nested in the row type.
424
+
425
+ ```ts
426
+ const listUsers = Q.select({
427
+ id: users.id,
428
+ profile: {
429
+ email: users.email
430
+ },
431
+ hasPosts: Q.literal(true)
432
+ }).pipe(
433
+ Q.from(users)
434
+ )
435
+
436
+ type ListUsersRow = Q.ResultRow<typeof listUsers>
437
+ // {
438
+ // id: string
439
+ // profile: {
440
+ // email: string
441
+ // }
442
+ // hasPosts: boolean
443
+ // }
444
+ ```
445
+
446
+ Projection typing is local. You usually do not need to define row interfaces yourself.
447
+
448
+ ### Bringing Sources Into Scope
449
+
450
+ `from(...)` and joins make referenced sources available to the plan. Derived tables, CTEs, and correlated sources stay typed.
451
+
452
+ ```ts
453
+ const activePosts = Q.as(
454
+ Q.select({
455
+ userId: posts.userId,
456
+ title: posts.title
457
+ }).pipe(
458
+ Q.from(posts),
459
+ Q.where(Q.isNotNull(posts.title))
460
+ ),
461
+ "active_posts"
462
+ )
463
+
464
+ const usersWithPosts = Q.select({
465
+ userId: users.id,
466
+ title: activePosts.title
467
+ }).pipe(
468
+ Q.from(users),
469
+ Q.innerJoin(activePosts, Q.eq(users.id, activePosts.userId))
470
+ )
471
+
472
+ type UsersWithPostsRow = Q.ResultRow<typeof usersWithPosts>
473
+ // {
474
+ // userId: string
475
+ // title: string
476
+ // }
477
+ ```
478
+
479
+ The same source story applies to:
480
+
481
+ - `Q.with(subquery, alias)`
482
+ - `Q.withRecursive(subquery, alias)`
483
+ - `Q.lateral(subquery, alias)`
484
+ - `Q.values(...)`
485
+ - `Q.unnest(...)`
486
+
487
+ ### Filtering Rows
488
+
489
+ Predicates do more than render SQL. They can narrow result types.
490
+
491
+ ```ts
492
+ const allPosts = Q.select({
493
+ title: posts.title,
494
+ upperTitle: Q.upper(posts.title)
495
+ }).pipe(
496
+ Q.from(posts)
497
+ )
498
+
499
+ type AllPostsRow = Q.ResultRow<typeof allPosts>
500
+ // {
501
+ // title: string | null
502
+ // upperTitle: string | null
503
+ // }
504
+
505
+ const titledPosts = Q.select({
506
+ title: posts.title,
507
+ upperTitle: Q.upper(posts.title)
508
+ }).pipe(
509
+ Q.from(posts),
510
+ Q.where(Q.isNotNull(posts.title))
511
+ )
512
+
513
+ type TitledPostsRow = Q.ResultRow<typeof titledPosts>
514
+ // {
515
+ // title: string
516
+ // upperTitle: string
517
+ // }
518
+ ```
519
+
520
+ That same narrowing feeds:
521
+
522
+ - `coalesce(...)`
523
+ - `case()`
524
+ - `match(...)`
525
+ - joined-source promotion
526
+
527
+ ### Shaping Results
528
+
529
+ The expression surface is large, but the important point is that result-shaping expressions stay typed.
530
+
531
+ ```ts
532
+ import * as Schema from "effect/Schema"
533
+
534
+ const docs = Table.make("docs", {
535
+ id: C.uuid().pipe(C.primaryKey),
536
+ payload: C.json(Schema.Struct({
537
+ profile: Schema.Struct({
538
+ address: Schema.Struct({
539
+ city: Schema.String
540
+ })
541
+ })
542
+ }))
543
+ })
544
+
545
+ const cityPath = Q.json.path(
546
+ Q.json.key("profile"),
547
+ Q.json.key("address"),
548
+ Q.json.key("city")
549
+ )
550
+
551
+ const shapedDocs = Q.select({
552
+ title: Q.case()
553
+ .when(Q.isNull(posts.title), "missing")
554
+ .else(Q.upper(posts.title)),
555
+ profileCity: Q.json.text(docs.payload, cityPath),
556
+ titleAsText: Q.cast(posts.title, Q.type.text())
557
+ }).pipe(
558
+ Q.from(posts),
559
+ Q.leftJoin(docs, Q.eq(posts.id, docs.id))
560
+ )
561
+ ```
562
+
563
+ The same JSON path object can be reused across:
564
+
565
+ - `Q.json.get(...)`
566
+ - `Q.json.text(...)`
567
+ - `Q.json.set(...)`
568
+ - `Q.json.insert(...)`
569
+ - `Q.json.delete(...)`
570
+ - `Q.json.pathExists(...)`
571
+
572
+ Comparison and cast safety are dialect-aware. Incompatible operands are rejected unless you make the conversion explicit with `Q.cast(...)`.
573
+
574
+ ### Aggregating
575
+
576
+ Grouped queries are checked structurally, not just by source provenance.
577
+
578
+ ```ts
579
+ const postsPerUser = Q.select({
580
+ userId: users.id,
581
+ postCount: Q.count(posts.id),
582
+ rowNumber: Q.over(Q.rowNumber(), {
583
+ partitionBy: [users.id],
584
+ orderBy: [{ value: users.id, direction: "asc" }]
585
+ })
586
+ }).pipe(
587
+ Q.from(users),
588
+ Q.leftJoin(posts, Q.eq(users.id, posts.userId)),
589
+ Q.groupBy(users.id)
590
+ )
591
+
592
+ type PostsPerUserRow = Q.ResultRow<typeof postsPerUser>
593
+ // {
594
+ // userId: string
595
+ // postCount: number
596
+ // rowNumber: number
597
+ // }
598
+ ```
599
+
600
+ Scalar selections must be covered by `groupBy(...)` when aggregates are present. Invalid grouped selections are rejected at the complete-plan boundary.
601
+
602
+ ### Combining Queries
603
+
604
+ Subqueries and set operators stay part of the same typed plan model.
605
+
606
+ ```ts
607
+ const postsByUser = Q.select({
608
+ id: posts.id
609
+ }).pipe(
610
+ Q.from(posts),
611
+ Q.where(Q.eq(posts.userId, users.id))
612
+ )
613
+
614
+ const usersWithPosts = Q.select({
615
+ userId: users.id,
616
+ hasPosts: Q.exists(postsByUser)
617
+ }).pipe(
618
+ Q.from(users)
619
+ )
620
+
621
+ type UsersWithPostsRow = Q.ResultRow<typeof usersWithPosts>
622
+ // {
623
+ // userId: string
624
+ // hasPosts: boolean
625
+ // }
626
+ ```
627
+
628
+ Set operators require compatible row shapes:
629
+
630
+ - `Q.union(...)`
631
+ - `Q.unionAll(...)`
632
+ - `Q.intersect(...)`
633
+ - `Q.intersectAll(...)`
634
+ - `Q.except(...)`
635
+ - `Q.exceptAll(...)`
636
+
637
+ ### Controlling Result Sets
638
+
639
+ Ordering and result-set controls are regular plan transforms:
640
+
641
+ ```ts
642
+ const recentUsers = Q.select({
643
+ id: users.id,
644
+ email: users.email
645
+ }).pipe(
646
+ Q.from(users),
647
+ Q.distinct(),
648
+ Q.orderBy(users.email),
649
+ Q.limit(10),
650
+ Q.offset(20)
651
+ )
652
+ ```
653
+
654
+ Postgres-only `distinct on` is available from the Postgres entrypoint:
655
+
656
+ ```ts
657
+ import * as Postgres from "effect-qb/postgres"
658
+
659
+ const recentEmails = Postgres.Query.select({
660
+ id: users.id,
661
+ email: users.email
662
+ }).pipe(
663
+ Postgres.Query.from(users),
664
+ Postgres.Query.distinctOn(users.email),
665
+ Postgres.Query.orderBy(users.email)
666
+ )
667
+ ```
668
+
669
+ ## Mutations
670
+
671
+ ### Insert
672
+
673
+ Single-row inserts are direct:
674
+
675
+ ```ts
676
+ const insertUser = Q.insert(users, {
677
+ id: "user-1",
678
+ email: "alice@example.com"
679
+ })
680
+ ```
681
+
682
+ Composable sources are available when the input rows come from elsewhere:
683
+
684
+ ```ts
685
+ const pendingUsers = Q.values([
686
+ { id: "user-1", email: "alice@example.com" },
687
+ { id: "user-2", email: "bob@example.com" }
688
+ ], "pending_users")
689
+
690
+ const insertMany = Q.insertFrom(users, pendingUsers)
691
+ ```
692
+
693
+ `insertFrom(...)` also accepts `select(...)`, `unnest(...)`, and other compatible sources.
694
+
695
+ ### Update
696
+
697
+ Updates stay expression-aware and can use joined sources where the dialect supports it.
698
+
699
+ ```ts
700
+ const updateUsers = Q.innerJoin(posts, Q.eq(posts.userId, users.id))(
701
+ Q.update(users, {
702
+ email: "has-posts@example.com"
703
+ })
704
+ )
705
+ ```
706
+
707
+ The assigned values still have to be type-compatible with the target columns.
708
+
709
+ ### Delete
710
+
711
+ Deletes keep their own statement kind and can also participate in typed conditions and `returning(...)`.
712
+
713
+ ```ts
714
+ const deleteUser = Q.delete(users).pipe(
715
+ Q.where(Q.eq(users.id, "user-1"))
716
+ )
717
+ ```
718
+
719
+ ### Conflicts And Upserts
720
+
721
+ Conflict handling is modeled as a composable modifier instead of a string escape hatch.
722
+
723
+ ```ts
724
+ const insertOrIgnore = Q.onConflict(["id"] as const, {
725
+ action: "doNothing"
726
+ })(Q.insert(users, {
727
+ id: "user-1",
728
+ email: "alice@example.com"
729
+ }))
730
+
731
+ const upsertUser = Q.upsert(users, {
732
+ id: "user-1",
733
+ email: "alice@example.com"
734
+ }, ["id"] as const, {
735
+ email: "alice@example.com"
736
+ })
737
+ ```
738
+
739
+ Conflict targets are checked against the target table.
740
+
741
+ ### Returning
742
+
743
+ Mutation plans can project typed rows with `returning(...)`.
744
+
745
+ ```ts
746
+ const insertedUser = Q.returning({
747
+ id: users.id,
748
+ email: users.email
749
+ })(Q.insert(users, {
750
+ id: "user-1",
751
+ email: "alice@example.com"
752
+ }))
753
+
754
+ type InsertedUserRow = Q.ResultRow<typeof insertedUser>
755
+ // {
756
+ // id: string
757
+ // email: string
758
+ // }
759
+ ```
760
+
761
+ ### Data-modifying CTEs
762
+
763
+ Write plans can feed later reads in the same statement:
764
+
765
+ ```ts
766
+ const insertedUsers = Q.with(
767
+ Q.returning({
768
+ id: users.id,
769
+ email: users.email
770
+ })(Q.insert(users, {
771
+ id: "user-1",
772
+ email: "alice@example.com"
773
+ })),
774
+ "inserted_users"
775
+ )
776
+
777
+ const insertedUsersPlan = Q.select({
778
+ id: insertedUsers.id,
779
+ email: insertedUsers.email
780
+ }).pipe(
781
+ Q.from(insertedUsers)
782
+ )
783
+ ```
784
+
785
+ This is one of the places where the capability model matters: write-bearing nested plans keep write-required dialect errors in the executor error channel.
786
+
787
+ ## Rendering And Execution
788
+
789
+ ### Renderer
790
+
791
+ ```ts
792
+ import * as Postgres from "effect-qb/postgres"
793
+
794
+ const rendered = Postgres.Renderer.make().render(postsPerUser)
795
+
796
+ rendered.sql
797
+ rendered.params
798
+ rendered.projections
799
+ ```
800
+
801
+ Rendered queries carry:
802
+
803
+ - SQL text
804
+ - ordered bind params
805
+ - projection metadata
806
+ - the row type as a phantom type
807
+
808
+ They do not carry a query-result schema.
809
+
810
+ ### Executor
811
+
812
+ ```ts
813
+ import * as Postgres from "effect-qb/postgres"
814
+
815
+ const renderer = Postgres.Renderer.make()
816
+ const executor = Postgres.Executor.fromSqlClient(renderer)
817
+
818
+ const rowsEffect = executor.execute(postsPerUser)
819
+
820
+ type Rows = Postgres.Query.ResultRows<typeof postsPerUser>
821
+ type Error = Postgres.Executor.PostgresQueryError<typeof postsPerUser>
822
+ ```
823
+
824
+ Execution is:
825
+
826
+ 1. render the plan
827
+ 2. execute SQL
828
+ 3. remap flat aliases into nested objects
829
+
830
+ There is no query-result schema decode stage.
831
+
832
+ ### Query-sensitive Error Channels
833
+
834
+ Dialect executors expose query-sensitive error unions:
835
+
836
+ - `Postgres.Executor.PostgresQueryError<typeof plan>`
837
+ - `Mysql.Executor.MysqlQueryError<typeof plan>`
838
+
839
+ Those types are narrower than the raw dialect error catalogs. For example, known write-only failures are removed from read-query error channels, while write-bearing plans retain them.
840
+
841
+ ### Transaction Helpers
842
+
843
+ ```ts
844
+ import { Executor } from "effect-qb"
845
+
846
+ const transactional = Executor.withTransaction(rowsEffect)
847
+ const savepoint = Executor.withSavepoint(rowsEffect)
848
+ ```
849
+
850
+ These preserve the original effect type parameters and add the ambient SQL transaction boundary.
851
+
852
+ ## Error Handling
853
+
854
+ The error system does more than expose raw driver failures. It gives you:
855
+
856
+ - generated dialect catalogs for known Postgres SQLSTATEs and MySQL error symbols
857
+ - normalization from driver-specific wire shapes into stable tagged unions
858
+ - rendered query context attached to execution failures when available
859
+ - query-capability narrowing so read-only plans do not expose write-only failures directly
860
+
861
+ The unusual part is that these are not separate features bolted together. The built-in executors normalize every driver failure at the execution boundary, attach rendered-query context, preserve the raw payload, and then narrow the resulting error surface against the query plan capabilities. Runtime behavior and type-level behavior stay aligned.
862
+
863
+ ### Catalogs And Normalization
864
+
865
+ Both dialect entrypoints expose an `Errors` module:
866
+
867
+ ```ts
868
+ import * as Postgres from "effect-qb/postgres"
869
+ import * as Mysql from "effect-qb/mysql"
870
+ ```
871
+
872
+ The catalogs are backed by official vendor references:
873
+
874
+ - Postgres uses the SQLSTATE catalog from the current Appendix A docs
875
+ - MySQL uses the official server, client, and global error references
876
+
877
+ That means the tags and descriptor metadata are systematic, not handwritten one-offs.
878
+
879
+ Postgres errors normalize around SQLSTATE codes:
880
+
881
+ ```ts
882
+ const descriptor = Postgres.Errors.getPostgresErrorDescriptor("23505")
883
+ descriptor.tag
884
+ // "@postgres/integrity-constraint-violation/unique-violation"
885
+ descriptor.classCode
886
+ descriptor.className
887
+ descriptor.condition
888
+ descriptor.primaryFields
889
+
890
+ const postgresError = Postgres.Errors.normalizePostgresDriverError({
891
+ code: "23505",
892
+ message: "duplicate key value violates unique constraint",
893
+ constraint: "users_email_key"
894
+ })
895
+
896
+ postgresError._tag
897
+ postgresError.code
898
+ postgresError.constraintName
899
+ ```
900
+
901
+ MySQL errors normalize around official symbols and documented numbers:
902
+
903
+ ```ts
904
+ const descriptor = Mysql.Errors.getMysqlErrorDescriptor("ER_DUP_ENTRY")
905
+ descriptor.tag
906
+ // "@mysql/server/dup-entry"
907
+ descriptor.category
908
+ descriptor.number
909
+ descriptor.sqlState
910
+ descriptor.messageTemplate
911
+
912
+ const mysqlError = Mysql.Errors.normalizeMysqlDriverError({
913
+ code: "ER_DUP_ENTRY",
914
+ errno: 1062,
915
+ sqlState: "23000",
916
+ sqlMessage: "Duplicate entry 'alice@example.com' for key 'users.email'"
917
+ })
918
+
919
+ mysqlError._tag
920
+ mysqlError.symbol
921
+ mysqlError.number
922
+ ```
923
+
924
+ The two dialects are intentionally modeled differently:
925
+
926
+ - Postgres is SQLSTATE-first. Normalized errors expose `code`, `classCode`, `className`, `condition`, and the semantic fields associated with that SQLSTATE.
927
+ - MySQL is symbol-first. Normalized errors expose `symbol`, `number`, `category`, `documentedSqlState`, and the official message template from the generated catalog.
928
+
929
+ Normalization preserves structured fields where the driver provides them. For example:
930
+
931
+ - Postgres surfaces fields like `detail`, `hint`, `position`, `schemaName`, `tableName`, and `constraintName`
932
+ - MySQL surfaces fields like `errno`, `sqlState`, `sqlMessage`, `fatal`, `syscall`, `address`, and `port`
933
+
934
+ Normalized errors also preserve the original payload on `raw` for known and catalog-miss cases, so you can still reach driver-specific data without losing the stable tagged surface.
935
+
936
+ Unknown failures are still classified:
937
+
938
+ - Postgres uses `@postgres/unknown/sqlstate` for well-formed but uncataloged SQLSTATEs and `@postgres/unknown/driver` for non-Postgres failures
939
+ - MySQL uses `@mysql/unknown/code` for MySQL-like catalog misses and `@mysql/unknown/driver` for non-MySQL failures
940
+
941
+ That fallback behavior is deliberate. Future server versions can introduce new codes without collapsing the executor back to `unknown`.
942
+
943
+ The normalized runtime variants are:
944
+
945
+ - Postgres: known SQLSTATE error, unknown SQLSTATE error, unknown driver error
946
+ - MySQL: known catalog error, unknown MySQL code error, unknown driver error
947
+
948
+ When normalization happens during execution, the normalized error also carries `query.sql` and `query.params`.
949
+
950
+ One MySQL-specific detail: number lookups can be ambiguous because one documented number may correspond to multiple official symbols. The catalog API preserves that instead of guessing:
951
+
952
+ ```ts
953
+ const descriptors =
954
+ Mysql.Errors.findMysqlErrorDescriptorsByNumber("MY-015144")
955
+ ```
956
+
957
+ ### Query-capability Narrowing
958
+
959
+ Executors narrow their error channels based on what the plan is allowed to do.
960
+
961
+ This happens in the built-in `fromDriver(...)` and `fromSqlClient(...)` paths. They normalize the raw failure first, then decide whether the plan should expose the full dialect error surface or the read-only narrowed surface.
962
+
963
+ That matters most for read-only plans. If a raw driver error clearly requires write capabilities, the executor does not surface it directly on a read query. It wraps it in a query-requirements error instead:
964
+
965
+ - `@postgres/unknown/query-requirements`
966
+ - `@mysql/unknown/query-requirements`
967
+
968
+ Those wrappers include:
969
+
970
+ - `requiredCapabilities`
971
+ - `actualCapabilities`
972
+ - `cause`
973
+ - `query`
974
+
975
+ This makes the error channel honest about the plan you executed. A plain `select(...)` should not advertise direct unique-violation handling as though it were a write plan, even if the underlying driver returned one.
976
+
977
+ If the plan really is write-bearing, including write CTEs, the original normalized write error is preserved.
978
+
979
+ This is reflected at the type level too:
980
+
981
+ ```ts
982
+ type ReadError =
983
+ Postgres.Executor.PostgresQueryError<typeof readPlan>
984
+
985
+ type WriteError =
986
+ Postgres.Executor.PostgresQueryError<typeof writePlan>
987
+ ```
988
+
989
+ For a read-only plan, `ReadError` is the narrowed read-query surface. For a write-bearing plan, `WriteError` is the full normalized Postgres executor error surface. The MySQL executor follows the same rule.
990
+
991
+ You can also inspect requirements directly:
992
+
993
+ ```ts
994
+ const postgresRequirements =
995
+ Postgres.Errors.requirements_of_postgres_error(postgresError)
996
+
997
+ const mysqlRequirements =
998
+ Mysql.Errors.requirements_of_mysql_error(mysqlError)
999
+ ```
1000
+
1001
+ ### Matching Errors In Application Code
1002
+
1003
+ The executor error channel is intended to be pattern-matched, not string-parsed.
1004
+
1005
+ Use Effect tag handling for high-level branching:
1006
+
1007
+ ```ts
1008
+ import * as Effect from "effect/Effect"
1009
+
1010
+ const rows = executor.execute(plan).pipe(
1011
+ Effect.catchTag("@postgres/unknown/query-requirements", (error) =>
1012
+ Effect.fail(error.cause)
1013
+ )
1014
+ )
1015
+ ```
1016
+
1017
+ Use the dialect guards for precise narrowing inside shared helpers:
1018
+
1019
+ ```ts
1020
+ if (Postgres.Errors.hasSqlState(error, "23505")) {
1021
+ error.constraintName
1022
+ }
1023
+
1024
+ if (Mysql.Errors.hasSymbol(error, "ER_DUP_ENTRY")) {
1025
+ error.number
1026
+ }
1027
+
1028
+ if (Mysql.Errors.hasNumber(error, "1062")) {
1029
+ error.symbol
1030
+ }
1031
+ ```
1032
+
1033
+ The recommended pattern is:
1034
+
1035
+ - match `_tag` for application-level control flow
1036
+ - use `hasSqlState(...)`, `hasSymbol(...)`, or `hasNumber(...)` for dialect-specific detail work
1037
+ - fall back to `query`, `raw`, and structured fields when you need logging or translation
1038
+
1039
+ Because the tags are catalog-derived, they are stable enough to use as application error boundaries without inventing a second error taxonomy in your app.
1040
+
1041
+ In practice, the error flow is:
1042
+
1043
+ 1. driver throws some unknown failure
1044
+ 2. dialect normalizer turns it into a tagged dialect error
1045
+ 3. executor optionally narrows it against plan capabilities
1046
+ 4. application code matches on `_tag` or a dialect guard
1047
+ 5. application code decides whether to recover, rethrow, or translate the failure
1048
+
1049
+ ## Type Safety
1050
+
1051
+ This is the main reason to use `effect-qb`.
1052
+
1053
+ ### Complete-plan Enforcement
1054
+
1055
+ Partial plans are allowed while composing, but incomplete plans fail at the enforcement boundary.
1056
+
1057
+ ```ts
1058
+ const missingFrom = Q.select({
1059
+ userId: users.id
1060
+ })
1061
+
1062
+ type MissingFrom = Q.CompletePlan<typeof missingFrom>
1063
+ // {
1064
+ // __effect_qb_error__:
1065
+ // "effect-qb: query references sources that are not yet in scope"
1066
+ // __effect_qb_missing_sources__: "users"
1067
+ // __effect_qb_hint__:
1068
+ // "Add from(...) or a join for each referenced source before render or execute"
1069
+ // }
1070
+ ```
1071
+
1072
+ The same branded error shape applies when `where(...)`, joins, or projections reference sources that never enter scope.
1073
+
1074
+ ### Predicate-driven Narrowing
1075
+
1076
+ Predicates refine result types, not just SQL.
1077
+
1078
+ ```ts
1079
+ const filteredPosts = Q.select({
1080
+ title: posts.title,
1081
+ upperTitle: Q.upper(posts.title)
1082
+ }).pipe(
1083
+ Q.from(posts),
1084
+ Q.where(Q.isNotNull(posts.title))
1085
+ )
1086
+
1087
+ type FilteredPostsRow = Q.ResultRow<typeof filteredPosts>
1088
+ // {
1089
+ // title: string
1090
+ // upperTitle: string
1091
+ // }
1092
+ ```
1093
+
1094
+ This is one of the biggest differences between `ResultRow` and a hand-written row interface.
1095
+
1096
+ ### Join Optionality
1097
+
1098
+ Left joins start conservative. Predicates can promote them.
1099
+
1100
+ ```ts
1101
+ const maybePosts = Q.select({
1102
+ userId: users.id,
1103
+ postId: posts.id
1104
+ }).pipe(
1105
+ Q.from(users),
1106
+ Q.leftJoin(posts, Q.eq(users.id, posts.userId))
1107
+ )
1108
+
1109
+ type MaybePostsRow = Q.ResultRow<typeof maybePosts>
1110
+ // {
1111
+ // userId: string
1112
+ // postId: string | null
1113
+ // }
1114
+ ```
1115
+
1116
+ Add `where(Q.isNotNull(posts.id))` and the logical row type becomes non-null for `postId`.
1117
+
1118
+ ### Grouped Query Validation
1119
+
1120
+ Grouped queries are checked structurally:
1121
+
1122
+ ```ts
1123
+ const invalidGroupedPlan = Q.select({
1124
+ userId: users.id,
1125
+ title: posts.title,
1126
+ postCount: Q.count(posts.id)
1127
+ }).pipe(
1128
+ Q.from(users),
1129
+ Q.leftJoin(posts, Q.eq(users.id, posts.userId)),
1130
+ Q.groupBy(users.id)
1131
+ )
1132
+
1133
+ type InvalidGroupedPlan = Q.CompletePlan<typeof invalidGroupedPlan>
1134
+ // {
1135
+ // __effect_qb_error__: "effect-qb: invalid grouped selection"
1136
+ // __effect_qb_hint__:
1137
+ // "Scalar selections must be covered by groupBy(...) when aggregates are present"
1138
+ // }
1139
+ ```
1140
+
1141
+ This catches invalid grouped queries before rendering.
1142
+
1143
+ ### Dialect Compatibility
1144
+
1145
+ Plans, tables, renderers, and executors are dialect-branded.
1146
+
1147
+ ```ts
1148
+ import * as Mysql from "effect-qb/mysql"
1149
+ import * as Postgres from "effect-qb/postgres"
1150
+
1151
+ const mysqlUsers = Mysql.Table.make("users", {
1152
+ id: Mysql.Column.uuid().pipe(Mysql.Column.primaryKey)
1153
+ })
1154
+
1155
+ const mysqlPlan = Mysql.Query.select({
1156
+ id: mysqlUsers.id
1157
+ }).pipe(
1158
+ Mysql.Query.from(mysqlUsers)
1159
+ )
1160
+
1161
+ type WrongDialect =
1162
+ Postgres.Query.DialectCompatiblePlan<typeof mysqlPlan, "postgres">
1163
+ // {
1164
+ // __effect_qb_error__:
1165
+ // "effect-qb: plan dialect is not compatible with the target renderer or executor"
1166
+ // __effect_qb_plan_dialect__: "mysql"
1167
+ // __effect_qb_target_dialect__: "postgres"
1168
+ // __effect_qb_hint__:
1169
+ // "Use the matching dialect module or renderer/executor"
1170
+ // }
1171
+ ```
1172
+
1173
+ ### JSON Schema Compatibility In Mutations
1174
+
1175
+ Schema-backed JSON columns are checked on insert and update.
1176
+
1177
+ ```ts
1178
+ import * as Schema from "effect/Schema"
1179
+
1180
+ const docs = Table.make("docs", {
1181
+ id: C.uuid().pipe(C.primaryKey),
1182
+ payload: C.json(Schema.Struct({
1183
+ profile: Schema.Struct({
1184
+ address: Schema.Struct({
1185
+ city: Schema.String,
1186
+ postcode: Schema.NullOr(Schema.String)
1187
+ }),
1188
+ tags: Schema.Array(Schema.String)
1189
+ }),
1190
+ note: Schema.NullOr(Schema.String)
1191
+ }))
1192
+ })
1193
+
1194
+ const cityPath = Q.json.path(
1195
+ Q.json.key("profile"),
1196
+ Q.json.key("address"),
1197
+ Q.json.key("city")
1198
+ )
1199
+
1200
+ const incompatibleObject = Q.json.buildObject({
1201
+ profile: {
1202
+ address: {
1203
+ postcode: "1000"
1204
+ },
1205
+ tags: ["travel"]
1206
+ },
1207
+ note: null
1208
+ })
1209
+
1210
+ const deletedRequiredField = Q.json.delete(docs.payload, cityPath)
1211
+
1212
+ Q.insert(docs, {
1213
+ id: "doc-1",
1214
+ // @ts-expect-error nested json output must still satisfy the column schema
1215
+ payload: incompatibleObject
1216
+ })
1217
+ ```
1218
+
1219
+ For updates, column-derived JSON expressions are checked too:
1220
+
1221
+ ```ts
1222
+ Q.update(docs, {
1223
+ // @ts-expect-error deleting a required field makes the json output incompatible
1224
+ payload: deletedRequiredField
1225
+ })
1226
+ ```
1227
+
1228
+ The same compatibility checks apply anywhere a mutation assigns to a schema-backed JSON column.
1229
+
1230
+ ### Readable Branded Type Errors
1231
+
1232
+ The library favors branded type errors over silent `never` collapse. Typical diagnostics include:
1233
+
1234
+ - `__effect_qb_error__`
1235
+ - `__effect_qb_hint__`
1236
+ - `__effect_qb_missing_sources__`
1237
+ - `__effect_qb_plan_dialect__`
1238
+ - `__effect_qb_target_dialect__`
1239
+
1240
+ That makes invalid plans easier to inspect in editor tooltips and type aliases.
1241
+
1242
+ ## Dialect Support
1243
+
1244
+ ### PostgreSQL
1245
+
1246
+ - default root entrypoint
1247
+ - `distinctOn(...)`
1248
+ - wider JSON operator surface, including `json.pathMatch(...)`
1249
+ - schema-qualified tables default to `public`
1250
+ - `Postgres.Executor.PostgresQueryError<typeof plan>`
1251
+
1252
+ ### MySQL
1253
+
1254
+ - dialect-specific table and query entrypoint via `effect-qb/mysql`
1255
+ - `distinctOn(...)` is rejected with a branded type error
1256
+ - JSON support is broad but not identical to Postgres
1257
+ - schema names map to database-qualified table references
1258
+ - `Mysql.Executor.MysqlQueryError<typeof plan>`
1259
+
1260
+ Meaningful differences should be expected around:
1261
+
1262
+ - JSON operator support
1263
+ - mutation syntax
1264
+ - error normalization
1265
+ - schema defaults
1266
+
1267
+ ## Limitations
1268
+
1269
+ This README is curated. It documents the main workflows and type-safety contract, not every API detail.
1270
+
1271
+ Current practical limits:
1272
+
1273
+ - some features are dialect-specific by design
1274
+ - JSON support is not identical between Postgres and MySQL
1275
+ - admin and DDL workflows are not the focus of this README
1276
+ - runtime execution is schema-free, so the database is expected to honor the query contract
1277
+
1278
+ ## Contributing
1279
+
1280
+ Useful commands:
1281
+
1282
+ ```bash
1283
+ bun test
1284
+ bun run test:types
1285
+ ```
1286
+
1287
+ Useful places to start:
1288
+
1289
+ - [src/index.ts](./src/index.ts)
1290
+ - [src/internal/query-factory.ts](./src/internal/query-factory.ts)
1291
+ - [test/query.behavior.test.ts](./test/query.behavior.test.ts)
1292
+ - [test/types/query-composition-types.ts](./test/types/query-composition-types.ts)
1293
+
1294
+ The codebase is organized around typed plans, dialect-specialized entrypoints, and behavior-first tests.