@prisma-next/cli 0.3.0-dev.53 → 0.3.0-dev.55

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +24 -0
  2. package/dist/cli.mjs +5 -3
  3. package/dist/cli.mjs.map +1 -1
  4. package/dist/{client-BSZKpZTF.mjs → client-B7f4PZZ1.mjs} +367 -170
  5. package/dist/client-B7f4PZZ1.mjs.map +1 -0
  6. package/dist/commands/contract-emit.d.mts.map +1 -1
  7. package/dist/commands/contract-emit.mjs +7 -6
  8. package/dist/commands/contract-emit.mjs.map +1 -1
  9. package/dist/commands/db-init.d.mts.map +1 -1
  10. package/dist/commands/db-init.mjs +28 -76
  11. package/dist/commands/db-init.mjs.map +1 -1
  12. package/dist/commands/db-introspect.d.mts.map +1 -1
  13. package/dist/commands/db-introspect.mjs +12 -17
  14. package/dist/commands/db-introspect.mjs.map +1 -1
  15. package/dist/commands/db-schema-verify.d.mts.map +1 -1
  16. package/dist/commands/db-schema-verify.mjs +5 -4
  17. package/dist/commands/db-schema-verify.mjs.map +1 -1
  18. package/dist/commands/db-sign.d.mts.map +1 -1
  19. package/dist/commands/db-sign.mjs +6 -5
  20. package/dist/commands/db-sign.mjs.map +1 -1
  21. package/dist/commands/db-update.d.mts +7 -0
  22. package/dist/commands/db-update.d.mts.map +1 -0
  23. package/dist/commands/db-update.mjs +120 -0
  24. package/dist/commands/db-update.mjs.map +1 -0
  25. package/dist/commands/db-verify.d.mts.map +1 -1
  26. package/dist/commands/db-verify.mjs +5 -4
  27. package/dist/commands/db-verify.mjs.map +1 -1
  28. package/dist/{config-loader-BJ8HsEdA.mjs → config-loader-DqKf1qSa.mjs} +1 -1
  29. package/dist/{config-loader-BJ8HsEdA.mjs.map → config-loader-DqKf1qSa.mjs.map} +1 -1
  30. package/dist/config-loader.mjs +1 -1
  31. package/dist/exports/control-api.d.mts +96 -6
  32. package/dist/exports/control-api.d.mts.map +1 -1
  33. package/dist/exports/control-api.mjs +2 -2
  34. package/dist/exports/index.mjs +1 -3
  35. package/dist/exports/index.mjs.map +1 -1
  36. package/dist/migration-command-scaffold-BELw_do2.mjs +95 -0
  37. package/dist/migration-command-scaffold-BELw_do2.mjs.map +1 -0
  38. package/dist/{result-handler-BZPY7HX4.mjs → result-handler-BhmrXIvT.mjs} +63 -13
  39. package/dist/result-handler-BhmrXIvT.mjs.map +1 -0
  40. package/package.json +14 -10
  41. package/src/cli.ts +5 -0
  42. package/src/commands/contract-emit.ts +22 -6
  43. package/src/commands/db-init.ts +89 -197
  44. package/src/commands/db-introspect.ts +4 -8
  45. package/src/commands/db-schema-verify.ts +11 -2
  46. package/src/commands/db-sign.ts +13 -4
  47. package/src/commands/db-update.ts +220 -0
  48. package/src/commands/db-verify.ts +11 -2
  49. package/src/control-api/client.ts +109 -145
  50. package/src/control-api/errors.ts +9 -0
  51. package/src/control-api/operations/db-init.ts +39 -34
  52. package/src/control-api/operations/db-update.ts +221 -0
  53. package/src/control-api/operations/extract-sql-ddl.ts +47 -0
  54. package/src/control-api/operations/migration-helpers.ts +49 -0
  55. package/src/control-api/types.ts +104 -4
  56. package/src/exports/control-api.ts +5 -0
  57. package/src/utils/cli-errors.ts +2 -0
  58. package/src/utils/command-helpers.ts +81 -3
  59. package/src/utils/migration-command-scaffold.ts +189 -0
  60. package/src/utils/output.ts +43 -13
  61. package/dist/client-BSZKpZTF.mjs.map +0 -1
  62. package/dist/result-handler-BZPY7HX4.mjs.map +0 -1
@@ -5,12 +5,14 @@ import type {
5
5
  ControlFamilyInstance,
6
6
  MigrationPlan,
7
7
  MigrationPlannerResult,
8
- MigrationPlanOperation,
9
8
  MigrationRunnerResult,
10
9
  TargetMigrationsCapability,
11
10
  } from '@prisma-next/core-control-plane/types';
11
+ import { ifDefined } from '@prisma-next/utils/defined';
12
12
  import { notOk, ok } from '@prisma-next/utils/result';
13
13
  import type { DbInitResult, DbInitSuccess, OnControlProgress } from '../types';
14
+ import { extractSqlDdl } from './extract-sql-ddl';
15
+ import { createOperationCallbacks, stripOperations } from './migration-helpers';
14
16
 
15
17
  /**
16
18
  * Options for executing dbInit operation.
@@ -114,7 +116,7 @@ export async function executeDbInit<TFamilyId extends string, TTargetId extends
114
116
  action: 'dbInit',
115
117
  kind: 'spanStart',
116
118
  spanId: checkMarkerSpanId,
117
- label: 'Checking contract marker',
119
+ label: 'Checking database signature',
118
120
  });
119
121
  const existingMarker = await familyInstance.readMarker({ driver });
120
122
  if (existingMarker) {
@@ -134,15 +136,23 @@ export async function executeDbInit<TFamilyId extends string, TTargetId extends
134
136
  const result: DbInitSuccess = {
135
137
  mode,
136
138
  plan: { operations: [] },
137
- ...(mode === 'apply'
138
- ? {
139
- execution: { operationsPlanned: 0, operationsExecuted: 0 },
140
- marker: {
139
+ destination: {
140
+ storageHash: migrationPlan.destination.storageHash,
141
+ ...ifDefined('profileHash', migrationPlan.destination.profileHash),
142
+ },
143
+ ...ifDefined(
144
+ 'execution',
145
+ mode === 'apply' ? { operationsPlanned: 0, operationsExecuted: 0 } : undefined,
146
+ ),
147
+ ...ifDefined(
148
+ 'marker',
149
+ mode === 'apply'
150
+ ? {
141
151
  storageHash: existingMarker.storageHash,
142
152
  profileHash: existingMarker.profileHash,
143
- },
144
- }
145
- : {}),
153
+ }
154
+ : undefined,
155
+ ),
146
156
  summary: 'Database already at target contract state',
147
157
  };
148
158
  return ok(result);
@@ -181,9 +191,18 @@ export async function executeDbInit<TFamilyId extends string, TTargetId extends
181
191
 
182
192
  // Plan mode - don't execute
183
193
  if (mode === 'plan') {
194
+ const planSql =
195
+ familyInstance.familyId === 'sql' ? extractSqlDdl(migrationPlan.operations) : undefined;
184
196
  const result: DbInitSuccess = {
185
197
  mode: 'plan',
186
- plan: { operations: migrationPlan.operations },
198
+ plan: {
199
+ operations: stripOperations(migrationPlan.operations),
200
+ ...ifDefined('sql', planSql),
201
+ },
202
+ destination: {
203
+ storageHash: migrationPlan.destination.storageHash,
204
+ ...ifDefined('profileHash', migrationPlan.destination.profileHash),
205
+ },
187
206
  summary: `Planned ${migrationPlan.operations.length} operation(s)`,
188
207
  };
189
208
  return ok(result);
@@ -198,34 +217,14 @@ export async function executeDbInit<TFamilyId extends string, TTargetId extends
198
217
  label: 'Applying migration plan',
199
218
  });
200
219
 
201
- const callbacks = onProgress
202
- ? {
203
- onOperationStart: (op: MigrationPlanOperation) => {
204
- onProgress({
205
- action: 'dbInit',
206
- kind: 'spanStart',
207
- spanId: `operation:${op.id}`,
208
- parentSpanId: applySpanId,
209
- label: op.label,
210
- });
211
- },
212
- onOperationComplete: (op: MigrationPlanOperation) => {
213
- onProgress({
214
- action: 'dbInit',
215
- kind: 'spanEnd',
216
- spanId: `operation:${op.id}`,
217
- outcome: 'ok',
218
- });
219
- },
220
- }
221
- : undefined;
220
+ const callbacks = createOperationCallbacks(onProgress, 'dbInit', applySpanId);
222
221
 
223
222
  const runnerResult: MigrationRunnerResult = await runner.execute({
224
223
  plan: migrationPlan,
225
224
  driver,
226
225
  destinationContract: contractIR,
227
226
  policy,
228
- ...(callbacks ? { callbacks } : {}),
227
+ ...ifDefined('callbacks', callbacks),
229
228
  // db init plans and applies back-to-back from a fresh introspection, so per-operation
230
229
  // pre/postchecks and the idempotency probe are usually redundant overhead. We still
231
230
  // enforce marker/origin compatibility and a full schema verification after apply.
@@ -264,7 +263,13 @@ export async function executeDbInit<TFamilyId extends string, TTargetId extends
264
263
 
265
264
  const result: DbInitSuccess = {
266
265
  mode: 'apply',
267
- plan: { operations: migrationPlan.operations },
266
+ plan: {
267
+ operations: stripOperations(migrationPlan.operations),
268
+ },
269
+ destination: {
270
+ storageHash: migrationPlan.destination.storageHash,
271
+ ...ifDefined('profileHash', migrationPlan.destination.profileHash),
272
+ },
268
273
  execution: {
269
274
  operationsPlanned: execution.operationsPlanned,
270
275
  operationsExecuted: execution.operationsExecuted,
@@ -275,7 +280,7 @@ export async function executeDbInit<TFamilyId extends string, TTargetId extends
275
280
  profileHash: migrationPlan.destination.profileHash,
276
281
  }
277
282
  : { storageHash: migrationPlan.destination.storageHash },
278
- summary: `Applied ${execution.operationsExecuted} operation(s), marker written`,
283
+ summary: `Applied ${execution.operationsExecuted} operation(s), database signed`,
279
284
  };
280
285
  return ok(result);
281
286
  }
@@ -0,0 +1,221 @@
1
+ import type { TargetBoundComponentDescriptor } from '@prisma-next/contract/framework-components';
2
+ import type { ContractIR } from '@prisma-next/contract/ir';
3
+ import type {
4
+ ControlDriverInstance,
5
+ ControlFamilyInstance,
6
+ MigrationPlannerResult,
7
+ MigrationRunnerResult,
8
+ TargetMigrationsCapability,
9
+ } from '@prisma-next/core-control-plane/types';
10
+ import { ifDefined } from '@prisma-next/utils/defined';
11
+ import { notOk, ok } from '@prisma-next/utils/result';
12
+ import type { DbUpdateResult, DbUpdateSuccess, OnControlProgress } from '../types';
13
+ import { extractSqlDdl } from './extract-sql-ddl';
14
+ import { createOperationCallbacks, stripOperations } from './migration-helpers';
15
+
16
+ // F12: db update allows additive, widening, and destructive operations.
17
+ const DB_UPDATE_POLICY = {
18
+ allowedOperationClasses: ['additive', 'widening', 'destructive'] as const,
19
+ } as const;
20
+
21
+ /**
22
+ * Options for the executeDbUpdate operation.
23
+ * Config-agnostic: receives pre-resolved driver, family, contract, and migrations capability.
24
+ */
25
+ export interface ExecuteDbUpdateOptions<TFamilyId extends string, TTargetId extends string> {
26
+ readonly driver: ControlDriverInstance<TFamilyId, TTargetId>;
27
+ readonly familyInstance: ControlFamilyInstance<TFamilyId>;
28
+ readonly contractIR: ContractIR;
29
+ readonly mode: 'plan' | 'apply';
30
+ readonly migrations: TargetMigrationsCapability<
31
+ TFamilyId,
32
+ TTargetId,
33
+ ControlFamilyInstance<TFamilyId>
34
+ >;
35
+ readonly frameworkComponents: ReadonlyArray<TargetBoundComponentDescriptor<TFamilyId, TTargetId>>;
36
+ readonly acceptDataLoss?: boolean;
37
+ /** Optional progress callback for observing operation progress. */
38
+ readonly onProgress?: OnControlProgress;
39
+ }
40
+
41
+ /**
42
+ * Executes the db update operation: introspect → plan → (optionally) apply → marker.
43
+ *
44
+ * db update is a pure reconciliation command: it introspects the live schema, plans the diff
45
+ * to the destination contract, and applies operations. The marker is bookkeeping only — written
46
+ * after apply so that `verify` and `db init` can reference it, but never read or validated
47
+ * by db update itself. The runner creates the marker table if it does not exist.
48
+ */
49
+ export async function executeDbUpdate<TFamilyId extends string, TTargetId extends string>(
50
+ options: ExecuteDbUpdateOptions<TFamilyId, TTargetId>,
51
+ ): Promise<DbUpdateResult> {
52
+ const { driver, familyInstance, contractIR, mode, migrations, frameworkComponents, onProgress } =
53
+ options;
54
+
55
+ const planner = migrations.createPlanner(familyInstance);
56
+ const runner = migrations.createRunner(familyInstance);
57
+
58
+ const introspectSpanId = 'introspect';
59
+ onProgress?.({
60
+ action: 'dbUpdate',
61
+ kind: 'spanStart',
62
+ spanId: introspectSpanId,
63
+ label: 'Introspecting database schema',
64
+ });
65
+ const schemaIR = await familyInstance.introspect({ driver });
66
+ onProgress?.({
67
+ action: 'dbUpdate',
68
+ kind: 'spanEnd',
69
+ spanId: introspectSpanId,
70
+ outcome: 'ok',
71
+ });
72
+
73
+ const policy = DB_UPDATE_POLICY;
74
+
75
+ const planSpanId = 'plan';
76
+ onProgress?.({
77
+ action: 'dbUpdate',
78
+ kind: 'spanStart',
79
+ spanId: planSpanId,
80
+ label: 'Planning migration',
81
+ });
82
+ const plannerResult: MigrationPlannerResult = await planner.plan({
83
+ contract: contractIR,
84
+ schema: schemaIR,
85
+ policy,
86
+ frameworkComponents,
87
+ });
88
+ if (plannerResult.kind === 'failure') {
89
+ onProgress?.({
90
+ action: 'dbUpdate',
91
+ kind: 'spanEnd',
92
+ spanId: planSpanId,
93
+ outcome: 'error',
94
+ });
95
+ return notOk({
96
+ code: 'PLANNING_FAILED',
97
+ summary: 'Migration planning failed due to conflicts',
98
+ conflicts: plannerResult.conflicts,
99
+ why: undefined,
100
+ meta: undefined,
101
+ });
102
+ }
103
+ onProgress?.({
104
+ action: 'dbUpdate',
105
+ kind: 'spanEnd',
106
+ spanId: planSpanId,
107
+ outcome: 'ok',
108
+ });
109
+
110
+ const migrationPlan = plannerResult.plan;
111
+
112
+ if (mode === 'plan') {
113
+ const planSql =
114
+ familyInstance.familyId === 'sql' ? extractSqlDdl(migrationPlan.operations) : undefined;
115
+ const result: DbUpdateSuccess = {
116
+ mode: 'plan',
117
+ plan: {
118
+ operations: stripOperations(migrationPlan.operations),
119
+ ...(planSql !== undefined ? { sql: planSql } : {}),
120
+ },
121
+ destination: {
122
+ storageHash: migrationPlan.destination.storageHash,
123
+ ...ifDefined('profileHash', migrationPlan.destination.profileHash),
124
+ },
125
+ summary: `Planned ${migrationPlan.operations.length} operation(s)`,
126
+ };
127
+ return ok(result);
128
+ }
129
+
130
+ // When applying, require explicit acceptance for destructive operations
131
+ if (!options.acceptDataLoss) {
132
+ const destructiveOps = migrationPlan.operations
133
+ .filter((op) => op.operationClass === 'destructive')
134
+ .map((op) => ({ id: op.id, label: op.label }));
135
+ if (destructiveOps.length > 0) {
136
+ return notOk({
137
+ code: 'DESTRUCTIVE_CHANGES',
138
+ summary: `Planned ${destructiveOps.length} destructive operation(s) that require confirmation`,
139
+ why: 'Use --plan to preview destructive operations, then re-run with --accept-data-loss to apply',
140
+ conflicts: undefined,
141
+ meta: { destructiveOperations: destructiveOps },
142
+ });
143
+ }
144
+ }
145
+
146
+ const applySpanId = 'apply';
147
+ onProgress?.({
148
+ action: 'dbUpdate',
149
+ kind: 'spanStart',
150
+ spanId: applySpanId,
151
+ label: 'Applying migration plan',
152
+ });
153
+
154
+ const callbacks = createOperationCallbacks(onProgress, 'dbUpdate', applySpanId);
155
+
156
+ const runnerResult: MigrationRunnerResult = await runner.execute({
157
+ plan: migrationPlan,
158
+ driver,
159
+ destinationContract: contractIR,
160
+ policy,
161
+ ...(callbacks ? { callbacks } : {}),
162
+ // db update plans and applies from a single introspection pass, so per-operation pre/postchecks
163
+ // and idempotency probes are intentionally disabled to avoid redundant roundtrips.
164
+ executionChecks: {
165
+ prechecks: false,
166
+ postchecks: false,
167
+ idempotencyChecks: false,
168
+ },
169
+ frameworkComponents,
170
+ });
171
+
172
+ if (!runnerResult.ok) {
173
+ onProgress?.({
174
+ action: 'dbUpdate',
175
+ kind: 'spanEnd',
176
+ spanId: applySpanId,
177
+ outcome: 'error',
178
+ });
179
+ return notOk({
180
+ code: 'RUNNER_FAILED',
181
+ summary: runnerResult.failure.summary,
182
+ why: runnerResult.failure.why,
183
+ meta: runnerResult.failure.meta,
184
+ conflicts: undefined,
185
+ });
186
+ }
187
+
188
+ const execution = runnerResult.value;
189
+ onProgress?.({
190
+ action: 'dbUpdate',
191
+ kind: 'spanEnd',
192
+ spanId: applySpanId,
193
+ outcome: 'ok',
194
+ });
195
+
196
+ const result: DbUpdateSuccess = {
197
+ mode: 'apply',
198
+ plan: {
199
+ operations: stripOperations(migrationPlan.operations),
200
+ },
201
+ destination: {
202
+ storageHash: migrationPlan.destination.storageHash,
203
+ ...ifDefined('profileHash', migrationPlan.destination.profileHash),
204
+ },
205
+ execution: {
206
+ operationsPlanned: execution.operationsPlanned,
207
+ operationsExecuted: execution.operationsExecuted,
208
+ },
209
+ marker: migrationPlan.destination.profileHash
210
+ ? {
211
+ storageHash: migrationPlan.destination.storageHash,
212
+ profileHash: migrationPlan.destination.profileHash,
213
+ }
214
+ : { storageHash: migrationPlan.destination.storageHash },
215
+ summary:
216
+ execution.operationsExecuted === 0
217
+ ? 'Database already matches contract, signature updated'
218
+ : `Applied ${execution.operationsExecuted} operation(s), signature updated`,
219
+ };
220
+ return ok(result);
221
+ }
@@ -0,0 +1,47 @@
1
+ import type { MigrationPlanOperation } from '@prisma-next/core-control-plane/types';
2
+
3
+ /**
4
+ * Shape of an SQL execute step on SqlMigrationPlanOperation.
5
+ * Used for runtime type narrowing without importing the concrete SQL type.
6
+ */
7
+ interface SqlExecuteStep {
8
+ readonly sql: string;
9
+ }
10
+
11
+ function isDdlStatement(sqlStatement: string): boolean {
12
+ const trimmed = sqlStatement.trim().toLowerCase();
13
+ return (
14
+ trimmed.startsWith('create ') || trimmed.startsWith('alter ') || trimmed.startsWith('drop ')
15
+ );
16
+ }
17
+
18
+ function hasExecuteSteps(
19
+ operation: MigrationPlanOperation,
20
+ ): operation is MigrationPlanOperation & { readonly execute: readonly SqlExecuteStep[] } {
21
+ const candidate = operation as unknown as Record<string, unknown>;
22
+ if (!('execute' in candidate) || !Array.isArray(candidate['execute'])) {
23
+ return false;
24
+ }
25
+ return candidate['execute'].every(
26
+ (step: unknown) => typeof step === 'object' && step !== null && 'sql' in step,
27
+ );
28
+ }
29
+
30
+ /**
31
+ * Extracts a best-effort SQL DDL preview for CLI plan output.
32
+ * This helper is presentation-only and is never used to decide migration correctness.
33
+ */
34
+ export function extractSqlDdl(operations: readonly MigrationPlanOperation[]): string[] {
35
+ const statements: string[] = [];
36
+ for (const operation of operations) {
37
+ if (!hasExecuteSteps(operation)) {
38
+ continue;
39
+ }
40
+ for (const step of operation.execute) {
41
+ if (typeof step.sql === 'string' && isDdlStatement(step.sql)) {
42
+ statements.push(step.sql.trim());
43
+ }
44
+ }
45
+ }
46
+ return statements;
47
+ }
@@ -0,0 +1,49 @@
1
+ import type { MigrationPlanOperation } from '@prisma-next/core-control-plane/types';
2
+ import type { ControlActionName, OnControlProgress } from '../types';
3
+
4
+ /**
5
+ * Strips operation objects to their public shape (id, label, operationClass).
6
+ * Used at the API boundary to avoid leaking internal fields (precheck, execute, postcheck, etc.).
7
+ */
8
+ export function stripOperations(
9
+ operations: readonly MigrationPlanOperation[],
10
+ ): ReadonlyArray<{ readonly id: string; readonly label: string; readonly operationClass: string }> {
11
+ return operations.map((op) => ({
12
+ id: op.id,
13
+ label: op.label,
14
+ operationClass: op.operationClass,
15
+ }));
16
+ }
17
+
18
+ /**
19
+ * Creates per-operation progress callbacks for the runner.
20
+ * Returns undefined when no onProgress callback is provided.
21
+ */
22
+ export function createOperationCallbacks(
23
+ onProgress: OnControlProgress | undefined,
24
+ action: ControlActionName,
25
+ parentSpanId: string,
26
+ ) {
27
+ if (!onProgress) {
28
+ return undefined;
29
+ }
30
+ return {
31
+ onOperationStart: (op: MigrationPlanOperation) => {
32
+ onProgress({
33
+ action,
34
+ kind: 'spanStart',
35
+ spanId: `operation:${op.id}`,
36
+ parentSpanId,
37
+ label: op.label,
38
+ });
39
+ },
40
+ onOperationComplete: (op: MigrationPlanOperation) => {
41
+ onProgress({
42
+ action,
43
+ kind: 'spanEnd',
44
+ spanId: `operation:${op.id}`,
45
+ outcome: 'ok',
46
+ });
47
+ },
48
+ };
49
+ }
@@ -60,6 +60,7 @@ export interface ControlClientOptions {
60
60
  */
61
61
  export type ControlActionName =
62
62
  | 'dbInit'
63
+ | 'dbUpdate'
63
64
  | 'verify'
64
65
  | 'schemaVerify'
65
66
  | 'sign'
@@ -189,6 +190,36 @@ export interface DbInitOptions {
189
190
  readonly onProgress?: OnControlProgress;
190
191
  }
191
192
 
193
+ /**
194
+ * Options for the dbUpdate operation.
195
+ */
196
+ export interface DbUpdateOptions {
197
+ /** Contract IR or unvalidated JSON - validated at runtime via familyInstance.validateContractIR() */
198
+ readonly contractIR: unknown;
199
+ /**
200
+ * Mode for the dbUpdate operation.
201
+ * - 'plan': Returns planned operations without applying
202
+ * - 'apply': Applies operations and writes marker/ledger
203
+ */
204
+ readonly mode: 'plan' | 'apply';
205
+ /**
206
+ * Database connection. If provided, dbUpdate will connect before executing.
207
+ * If omitted, the client must already be connected.
208
+ * The type is driver-specific (e.g., string URL for Postgres).
209
+ */
210
+ readonly connection?: unknown;
211
+ /**
212
+ * When true, allows applying plans that contain destructive operations
213
+ * (e.g., DROP TABLE, DROP COLUMN, ALTER TYPE).
214
+ * When false (default), the operation returns a failure if the plan
215
+ * includes destructive operations, prompting the user to use --plan
216
+ * to preview and then re-run with --accept-data-loss.
217
+ */
218
+ readonly acceptDataLoss?: boolean;
219
+ /** Optional progress callback for observing operation progress */
220
+ readonly onProgress?: OnControlProgress;
221
+ }
222
+
192
223
  /**
193
224
  * Options for the introspect operation.
194
225
  */
@@ -249,6 +280,11 @@ export interface DbInitSuccess {
249
280
  readonly label: string;
250
281
  readonly operationClass: string;
251
282
  }>;
283
+ readonly sql?: ReadonlyArray<string>;
284
+ };
285
+ readonly destination: {
286
+ readonly storageHash: string;
287
+ readonly profileHash?: string;
252
288
  };
253
289
  readonly execution?: {
254
290
  readonly operationsPlanned: number;
@@ -291,6 +327,56 @@ export interface DbInitFailure {
291
327
  */
292
328
  export type DbInitResult = Result<DbInitSuccess, DbInitFailure>;
293
329
 
330
+ /**
331
+ * Successful dbUpdate result.
332
+ */
333
+ export interface DbUpdateSuccess {
334
+ readonly mode: 'plan' | 'apply';
335
+ readonly plan: {
336
+ readonly operations: ReadonlyArray<{
337
+ readonly id: string;
338
+ readonly label: string;
339
+ readonly operationClass: string;
340
+ }>;
341
+ readonly sql?: ReadonlyArray<string>;
342
+ };
343
+ readonly destination: {
344
+ readonly storageHash: string;
345
+ readonly profileHash?: string;
346
+ };
347
+ readonly execution?: {
348
+ readonly operationsPlanned: number;
349
+ readonly operationsExecuted: number;
350
+ };
351
+ readonly marker?: {
352
+ readonly storageHash: string;
353
+ readonly profileHash?: string;
354
+ };
355
+ readonly summary: string;
356
+ }
357
+
358
+ /**
359
+ * Failure codes for dbUpdate operation.
360
+ */
361
+ export type DbUpdateFailureCode = 'PLANNING_FAILED' | 'RUNNER_FAILED' | 'DESTRUCTIVE_CHANGES';
362
+
363
+ /**
364
+ * Failure details for dbUpdate operation.
365
+ */
366
+ export interface DbUpdateFailure {
367
+ readonly code: DbUpdateFailureCode;
368
+ readonly summary: string;
369
+ readonly why: string | undefined;
370
+ readonly conflicts: ReadonlyArray<MigrationPlannerConflict> | undefined;
371
+ readonly meta: Record<string, unknown> | undefined;
372
+ }
373
+
374
+ /**
375
+ * Result type for dbUpdate operation.
376
+ * Uses Result pattern: success returns DbUpdateSuccess, failure returns DbUpdateFailure.
377
+ */
378
+ export type DbUpdateResult = Result<DbUpdateSuccess, DbUpdateFailure>;
379
+
294
380
  /**
295
381
  * Successful emit result.
296
382
  * Contains the hashes and paths of emitted files.
@@ -311,7 +397,10 @@ export interface EmitSuccess {
311
397
  /**
312
398
  * Failure codes for emit operation.
313
399
  */
314
- export type EmitFailureCode = 'CONTRACT_SOURCE_INVALID' | 'EMIT_FAILED';
400
+ export type EmitFailureCode =
401
+ | 'CONTRACT_SOURCE_INVALID'
402
+ | 'CONTRACT_VALIDATION_FAILED'
403
+ | 'EMIT_FAILED';
315
404
 
316
405
  /**
317
406
  * Failure details for emit operation.
@@ -428,9 +517,9 @@ export interface ControlClient {
428
517
  schemaVerify(options: SchemaVerifyOptions): Promise<VerifyDatabaseSchemaResult>;
429
518
 
430
519
  /**
431
- * Signs the database with a contract marker.
432
- * Writes or updates the contract marker if schema verification passes.
433
- * Idempotent (no-op if marker already matches).
520
+ * Signs the database with a contract signature.
521
+ * Writes or updates the signature if schema verification passes.
522
+ * Idempotent (no-op if signature already matches).
434
523
  *
435
524
  * @returns Structured result
436
525
  * @throws If not connected or infrastructure failure
@@ -447,6 +536,17 @@ export interface ControlClient {
447
536
  */
448
537
  dbInit(options: DbInitOptions): Promise<DbInitResult>;
449
538
 
539
+ /**
540
+ * Updates a database schema to match the current contract.
541
+ * Creates the signature table if it does not exist. No preconditions required.
542
+ * Allows additive, widening, and destructive operation classes.
543
+ *
544
+ * @param options.mode - 'plan' to preview, 'apply' to execute
545
+ * @returns Result pattern: Ok with planned/executed operations, NotOk with failure details
546
+ * @throws If not connected, target doesn't support migrations, or infrastructure failure
547
+ */
548
+ dbUpdate(options: DbUpdateOptions): Promise<DbUpdateResult>;
549
+
450
550
  /**
451
551
  * Introspects the database schema.
452
552
  *
@@ -34,6 +34,11 @@ export type {
34
34
  DbInitOptions,
35
35
  DbInitResult,
36
36
  DbInitSuccess,
37
+ DbUpdateFailure,
38
+ DbUpdateFailureCode,
39
+ DbUpdateOptions,
40
+ DbUpdateResult,
41
+ DbUpdateSuccess,
37
42
  EmitContractConfig,
38
43
  EmitFailure,
39
44
  EmitFailureCode,
@@ -11,6 +11,7 @@ export {
11
11
  errorContractMissingExtensionPacks,
12
12
  errorContractValidationFailed,
13
13
  errorDatabaseConnectionRequired,
14
+ errorDestructiveChanges,
14
15
  errorDriverRequired,
15
16
  errorFamilyReadMarkerSqlRequired,
16
17
  errorFileNotFound,
@@ -19,6 +20,7 @@ export {
19
20
  errorMarkerMissing,
20
21
  errorMigrationPlanningFailed,
21
22
  errorQueryRunnerFactoryRequired,
23
+ errorRunnerFailed,
22
24
  errorRuntime,
23
25
  errorTargetMigrationNotSupported,
24
26
  errorTargetMismatch,