dzql 0.3.5 ā 0.3.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/cli.js +345 -23
- package/package.json +2 -2
- package/src/client/stores/README.md +95 -0
- package/src/compiler/codegen/operation-codegen.js +123 -13
- package/src/compiler/parser/entity-parser.js +16 -6
- package/src/database/migrations/001_schema.sql +2 -3
- package/src/database/migrations/002_functions.sql +20 -1
- package/src/database/migrations/003_operations.sql +179 -7
- package/src/database/migrations/004_search.sql +5 -0
- package/src/database/migrations/005_entities.sql +3 -10
- package/src/database/migrations/007_events.sql +1 -4
- package/src/database/migrations/010_fix_m2m_events.sql +94 -0
package/bin/cli.js
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
#!/usr/bin/env bun
|
|
2
2
|
|
|
3
|
-
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
|
|
4
|
-
import { resolve } from 'path';
|
|
3
|
+
import { readFileSync, writeFileSync, existsSync, mkdirSync, readdirSync } from 'fs';
|
|
4
|
+
import { resolve, join } from 'path';
|
|
5
5
|
import { DZQLCompiler } from '../src/compiler/compiler.js';
|
|
6
|
+
import postgres from 'postgres';
|
|
6
7
|
|
|
7
8
|
const command = process.argv[2];
|
|
8
9
|
const args = process.argv.slice(3);
|
|
@@ -21,6 +22,19 @@ switch (command) {
|
|
|
21
22
|
case 'compile':
|
|
22
23
|
await runCompile(args);
|
|
23
24
|
break;
|
|
25
|
+
case 'migrate:new':
|
|
26
|
+
case 'migrate:init':
|
|
27
|
+
await runMigrateNew(args);
|
|
28
|
+
break;
|
|
29
|
+
case 'migrate:up':
|
|
30
|
+
await runMigrateUp(args);
|
|
31
|
+
break;
|
|
32
|
+
case 'migrate:down':
|
|
33
|
+
await runMigrateDown(args);
|
|
34
|
+
break;
|
|
35
|
+
case 'migrate:status':
|
|
36
|
+
await runMigrateStatus(args);
|
|
37
|
+
break;
|
|
24
38
|
case '--version':
|
|
25
39
|
case '-v':
|
|
26
40
|
const pkg = await import('../package.json', { assert: { type: 'json' } });
|
|
@@ -31,17 +45,24 @@ switch (command) {
|
|
|
31
45
|
DZQL CLI
|
|
32
46
|
|
|
33
47
|
Usage:
|
|
34
|
-
dzql create <app-name>
|
|
35
|
-
dzql dev
|
|
36
|
-
dzql db:up
|
|
37
|
-
dzql db:down
|
|
38
|
-
dzql compile <input>
|
|
39
|
-
|
|
48
|
+
dzql create <app-name> Create a new DZQL application
|
|
49
|
+
dzql dev Start development server
|
|
50
|
+
dzql db:up Start PostgreSQL database
|
|
51
|
+
dzql db:down Stop PostgreSQL database
|
|
52
|
+
dzql compile <input> Compile entity definitions to SQL
|
|
53
|
+
|
|
54
|
+
dzql migrate:new <name> Create a new migration file
|
|
55
|
+
dzql migrate:up Apply pending migrations
|
|
56
|
+
dzql migrate:down Rollback last migration
|
|
57
|
+
dzql migrate:status Show migration status
|
|
58
|
+
|
|
59
|
+
dzql --version Show version
|
|
40
60
|
|
|
41
61
|
Examples:
|
|
42
62
|
dzql create my-venue-app
|
|
43
|
-
dzql
|
|
44
|
-
dzql
|
|
63
|
+
dzql compile entities/blog.sql -o init_db/
|
|
64
|
+
dzql migrate:new add_user_avatars
|
|
65
|
+
dzql migrate:up
|
|
45
66
|
`);
|
|
46
67
|
}
|
|
47
68
|
|
|
@@ -126,27 +147,56 @@ Examples:
|
|
|
126
147
|
console.log(`\nš Writing compiled files to: ${options.output}`);
|
|
127
148
|
|
|
128
149
|
// Write core DZQL infrastructure
|
|
129
|
-
const coreSQL = `-- DZQL Core Schema and
|
|
150
|
+
const coreSQL = `-- DZQL Core Schema and Tables
|
|
130
151
|
|
|
131
152
|
CREATE SCHEMA IF NOT EXISTS dzql;
|
|
132
153
|
|
|
154
|
+
-- Meta information
|
|
155
|
+
CREATE TABLE IF NOT EXISTS dzql.meta (
|
|
156
|
+
installed_at timestamptz DEFAULT now(),
|
|
157
|
+
version text NOT NULL
|
|
158
|
+
);
|
|
159
|
+
|
|
160
|
+
INSERT INTO dzql.meta (version) VALUES ('3.0.0') ON CONFLICT DO NOTHING;
|
|
161
|
+
|
|
162
|
+
-- Entity Configuration Table
|
|
163
|
+
CREATE TABLE IF NOT EXISTS dzql.entities (
|
|
164
|
+
table_name text PRIMARY KEY,
|
|
165
|
+
label_field text NOT NULL,
|
|
166
|
+
searchable_fields text[] NOT NULL,
|
|
167
|
+
fk_includes jsonb DEFAULT '{}',
|
|
168
|
+
soft_delete boolean DEFAULT false,
|
|
169
|
+
temporal_fields jsonb DEFAULT '{}',
|
|
170
|
+
notification_paths jsonb DEFAULT '{}',
|
|
171
|
+
permission_paths jsonb DEFAULT '{}',
|
|
172
|
+
graph_rules jsonb DEFAULT '{}',
|
|
173
|
+
field_defaults jsonb DEFAULT '{}',
|
|
174
|
+
many_to_many jsonb DEFAULT '{}'
|
|
175
|
+
);
|
|
176
|
+
|
|
177
|
+
-- Registry of callable functions
|
|
178
|
+
CREATE TABLE IF NOT EXISTS dzql.registry (
|
|
179
|
+
fn_regproc regproc PRIMARY KEY,
|
|
180
|
+
description text
|
|
181
|
+
);
|
|
182
|
+
|
|
133
183
|
-- Event Audit Table for real-time notifications
|
|
134
184
|
CREATE TABLE IF NOT EXISTS dzql.events (
|
|
135
185
|
event_id bigserial PRIMARY KEY,
|
|
136
186
|
table_name text NOT NULL,
|
|
137
|
-
op text NOT NULL,
|
|
138
|
-
pk jsonb NOT NULL,
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
at timestamptz DEFAULT now() -- when the change occurred
|
|
187
|
+
op text NOT NULL,
|
|
188
|
+
pk jsonb NOT NULL,
|
|
189
|
+
data jsonb,
|
|
190
|
+
user_id int,
|
|
191
|
+
notify_users int[],
|
|
192
|
+
at timestamptz DEFAULT now()
|
|
144
193
|
);
|
|
145
194
|
|
|
146
195
|
CREATE INDEX IF NOT EXISTS dzql_events_table_pk_idx ON dzql.events (table_name, pk, at);
|
|
196
|
+
CREATE INDEX IF NOT EXISTS dzql_events_user_idx ON dzql.events (user_id, at);
|
|
147
197
|
CREATE INDEX IF NOT EXISTS dzql_events_event_id_idx ON dzql.events (event_id);
|
|
148
198
|
|
|
149
|
-
-- Event notification trigger
|
|
199
|
+
-- Event notification trigger
|
|
150
200
|
CREATE OR REPLACE FUNCTION dzql.notify_event()
|
|
151
201
|
RETURNS TRIGGER LANGUAGE plpgsql AS $$
|
|
152
202
|
BEGIN
|
|
@@ -155,14 +205,11 @@ BEGIN
|
|
|
155
205
|
'table', NEW.table_name,
|
|
156
206
|
'op', NEW.op,
|
|
157
207
|
'pk', NEW.pk,
|
|
158
|
-
'data',
|
|
159
|
-
'before', NEW.before,
|
|
160
|
-
'after', NEW.after,
|
|
208
|
+
'data', NEW.data,
|
|
161
209
|
'user_id', NEW.user_id,
|
|
162
210
|
'at', NEW.at,
|
|
163
211
|
'notify_users', NEW.notify_users
|
|
164
212
|
)::text);
|
|
165
|
-
|
|
166
213
|
RETURN NULL;
|
|
167
214
|
END $$;
|
|
168
215
|
|
|
@@ -289,3 +336,278 @@ $$;
|
|
|
289
336
|
process.exit(1);
|
|
290
337
|
}
|
|
291
338
|
}
|
|
339
|
+
|
|
340
|
+
// ============================================================================
|
|
341
|
+
// Migration Commands
|
|
342
|
+
// ============================================================================
|
|
343
|
+
|
|
344
|
+
async function runMigrateNew(args) {
|
|
345
|
+
const migrationName = args[0];
|
|
346
|
+
|
|
347
|
+
if (!migrationName) {
|
|
348
|
+
console.error('Error: Migration name required');
|
|
349
|
+
console.log('Usage: dzql migrate:new <name>');
|
|
350
|
+
console.log('Example: dzql migrate:new add_user_avatars');
|
|
351
|
+
process.exit(1);
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
// Create migrations directory if it doesn't exist
|
|
355
|
+
const migrationsDir = './migrations';
|
|
356
|
+
if (!existsSync(migrationsDir)) {
|
|
357
|
+
mkdirSync(migrationsDir, { recursive: true });
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// Find next migration number
|
|
361
|
+
const fs = await import('fs/promises');
|
|
362
|
+
const files = await fs.readdir(migrationsDir).catch(() => []);
|
|
363
|
+
const existingNumbers = files
|
|
364
|
+
.filter(f => /^\d{3}_/.test(f))
|
|
365
|
+
.map(f => parseInt(f.substring(0, 3)))
|
|
366
|
+
.filter(n => !isNaN(n));
|
|
367
|
+
|
|
368
|
+
const nextNumber = existingNumbers.length > 0 ? Math.max(...existingNumbers) + 1 : 1;
|
|
369
|
+
const paddedNumber = String(nextNumber).padStart(3, '0');
|
|
370
|
+
const fileName = `${paddedNumber}_${migrationName}.sql`;
|
|
371
|
+
const filePath = resolve(migrationsDir, fileName);
|
|
372
|
+
|
|
373
|
+
if (existsSync(filePath)) {
|
|
374
|
+
console.error(`Error: Migration ${fileName} already exists`);
|
|
375
|
+
process.exit(1);
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
// Generate migration template
|
|
379
|
+
const template = `-- ============================================================================
|
|
380
|
+
-- Migration ${paddedNumber}: ${migrationName.replace(/_/g, ' ')}
|
|
381
|
+
-- Generated: ${new Date().toISOString().split('T')[0]}
|
|
382
|
+
-- ============================================================================
|
|
383
|
+
|
|
384
|
+
BEGIN;
|
|
385
|
+
|
|
386
|
+
-- Part 1: Schema Changes
|
|
387
|
+
-- ALTER TABLE example ADD COLUMN IF NOT EXISTS new_field TEXT;
|
|
388
|
+
|
|
389
|
+
-- Part 2: Drop Old DZQL Functions (if updating entity)
|
|
390
|
+
-- DROP FUNCTION IF EXISTS save_entity_name(INT, JSONB);
|
|
391
|
+
-- DROP FUNCTION IF EXISTS get_entity_name(INT, INT, TIMESTAMPTZ);
|
|
392
|
+
-- etc.
|
|
393
|
+
|
|
394
|
+
-- Part 3: Install New Compiled Functions
|
|
395
|
+
-- Compile your entities first: bun run compile
|
|
396
|
+
-- Then paste the compiled function SQL here from init_db/entity_name.sql
|
|
397
|
+
|
|
398
|
+
-- Part 4: Custom Functions (optional)
|
|
399
|
+
-- CREATE OR REPLACE FUNCTION my_custom_function(
|
|
400
|
+
-- p_user_id INT,
|
|
401
|
+
-- p_params JSONB
|
|
402
|
+
-- ) RETURNS JSONB AS $$
|
|
403
|
+
-- BEGIN
|
|
404
|
+
-- -- Your logic
|
|
405
|
+
-- RETURN jsonb_build_object('result', 'success');
|
|
406
|
+
-- END;
|
|
407
|
+
-- $$ LANGUAGE plpgsql SECURITY DEFINER;
|
|
408
|
+
|
|
409
|
+
-- Part 5: Register Custom Functions (optional)
|
|
410
|
+
-- INSERT INTO dzql.registry (fn_regproc, description)
|
|
411
|
+
-- VALUES
|
|
412
|
+
-- ('my_custom_function'::regproc, 'Description of function')
|
|
413
|
+
-- ON CONFLICT DO NOTHING;
|
|
414
|
+
|
|
415
|
+
COMMIT;
|
|
416
|
+
|
|
417
|
+
-- ============================================================================
|
|
418
|
+
-- Rollback (for migrate:down support)
|
|
419
|
+
-- ============================================================================
|
|
420
|
+
-- To support rollback, add reverse operations in comments:
|
|
421
|
+
--
|
|
422
|
+
-- ROLLBACK INSTRUCTIONS:
|
|
423
|
+
-- 1. Drop new functions
|
|
424
|
+
-- 2. Restore old functions
|
|
425
|
+
-- 3. Remove columns (if safe)
|
|
426
|
+
-- 4. Drop tables (if safe)
|
|
427
|
+
-- ============================================================================
|
|
428
|
+
`;
|
|
429
|
+
|
|
430
|
+
writeFileSync(filePath, template, 'utf-8');
|
|
431
|
+
|
|
432
|
+
console.log(`\nā
Created migration: ${fileName}`);
|
|
433
|
+
console.log(`š Edit: ${filePath}`);
|
|
434
|
+
console.log(`\nš” Next steps:`);
|
|
435
|
+
console.log(` 1. Update your entity definitions (entities/*.sql)`);
|
|
436
|
+
console.log(` 2. Run: bun run compile (generates updated functions in init_db/)`);
|
|
437
|
+
console.log(` 3. Copy compiled functions into migration file`);
|
|
438
|
+
console.log(` 4. Test migration: psql $DATABASE_URL -f ${filePath}`);
|
|
439
|
+
console.log(` 5. Apply to production: dzql migrate:up\n`);
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
async function runMigrateUp(args) {
|
|
443
|
+
const databaseUrl = process.env.DATABASE_URL;
|
|
444
|
+
|
|
445
|
+
if (!databaseUrl) {
|
|
446
|
+
console.error('Error: DATABASE_URL environment variable not set');
|
|
447
|
+
console.log('Set it to your PostgreSQL connection string:');
|
|
448
|
+
console.log(' export DATABASE_URL="postgresql://user:pass@localhost:5432/dbname"');
|
|
449
|
+
process.exit(1);
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
const migrationsDir = './migrations';
|
|
453
|
+
if (!existsSync(migrationsDir)) {
|
|
454
|
+
console.error(`Error: Migrations directory not found: ${migrationsDir}`);
|
|
455
|
+
console.log('Create it with: dzql migrate:new <name>');
|
|
456
|
+
process.exit(1);
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
const sql = postgres(databaseUrl);
|
|
460
|
+
|
|
461
|
+
try {
|
|
462
|
+
console.log('š Connected to database');
|
|
463
|
+
|
|
464
|
+
// 1. Create migrations table
|
|
465
|
+
await sql`
|
|
466
|
+
CREATE TABLE IF NOT EXISTS dzql.migrations (
|
|
467
|
+
id SERIAL PRIMARY KEY,
|
|
468
|
+
name TEXT NOT NULL UNIQUE,
|
|
469
|
+
applied_at TIMESTAMPTZ DEFAULT NOW()
|
|
470
|
+
);
|
|
471
|
+
`;
|
|
472
|
+
|
|
473
|
+
// 2. Get applied migrations
|
|
474
|
+
const appliedRows = await sql`SELECT name, applied_at FROM dzql.migrations ORDER BY applied_at`;
|
|
475
|
+
const appliedMigrations = new Set(appliedRows.map(row => row.name));
|
|
476
|
+
|
|
477
|
+
if (appliedRows.length > 0) {
|
|
478
|
+
console.log('\nš Already applied migrations:');
|
|
479
|
+
appliedRows.forEach(row => {
|
|
480
|
+
console.log(` ā ${row.name} (${new Date(row.applied_at).toISOString()})`);
|
|
481
|
+
});
|
|
482
|
+
} else {
|
|
483
|
+
console.log('\nš No migrations applied yet.');
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
// 3. Read migration files
|
|
487
|
+
console.log('š Reading migrations from:', migrationsDir);
|
|
488
|
+
|
|
489
|
+
const files = readdirSync(migrationsDir)
|
|
490
|
+
.filter(f => f.endsWith('.sql'))
|
|
491
|
+
.sort(); // Alphabetical order (001, 002, 003...)
|
|
492
|
+
|
|
493
|
+
console.log(`\nš Found ${files.length} migration file(s)\n`);
|
|
494
|
+
|
|
495
|
+
// 4. Apply new migrations
|
|
496
|
+
let appliedCount = 0;
|
|
497
|
+
for (const file of files) {
|
|
498
|
+
if (appliedMigrations.has(file)) {
|
|
499
|
+
console.log(` ā Skipping ${file} (already applied)`);
|
|
500
|
+
continue;
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
console.log(`\nš Applying migration: ${file}`);
|
|
504
|
+
const content = readFileSync(join(migrationsDir, file), 'utf-8');
|
|
505
|
+
|
|
506
|
+
try {
|
|
507
|
+
// Execute migration (it should have its own BEGIN/COMMIT)
|
|
508
|
+
await sql.unsafe(content);
|
|
509
|
+
|
|
510
|
+
// Record migration
|
|
511
|
+
await sql`INSERT INTO dzql.migrations (name) VALUES (${file})`;
|
|
512
|
+
|
|
513
|
+
console.log(`ā
Applied: ${file}`);
|
|
514
|
+
appliedCount++;
|
|
515
|
+
} catch (err) {
|
|
516
|
+
console.error(`\nā Failed to apply ${file}:`);
|
|
517
|
+
console.error(err.message);
|
|
518
|
+
console.error('\nš” Check your migration file for errors.');
|
|
519
|
+
console.error(' If migration has BEGIN/COMMIT, it should have rolled back.');
|
|
520
|
+
process.exit(1);
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
if (appliedCount === 0) {
|
|
525
|
+
console.log('\n⨠No new migrations to apply. Database is up to date.');
|
|
526
|
+
} else {
|
|
527
|
+
console.log(`\n⨠Successfully applied ${appliedCount} migration(s).`);
|
|
528
|
+
}
|
|
529
|
+
} catch (err) {
|
|
530
|
+
console.error('ā Migration failed:', err.message);
|
|
531
|
+
process.exit(1);
|
|
532
|
+
} finally {
|
|
533
|
+
await sql.end();
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
async function runMigrateDown(args) {
|
|
538
|
+
console.log('\nš§ Migration:down command - Coming soon!');
|
|
539
|
+
console.log('\nThis command will:');
|
|
540
|
+
console.log(' 1. Find last applied migration');
|
|
541
|
+
console.log(' 2. Parse rollback instructions');
|
|
542
|
+
console.log(' 3. Execute rollback');
|
|
543
|
+
console.log(' 4. Remove from dzql.migrations table\n');
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
async function runMigrateStatus(args) {
|
|
547
|
+
const databaseUrl = process.env.DATABASE_URL;
|
|
548
|
+
|
|
549
|
+
if (!databaseUrl) {
|
|
550
|
+
console.error('Error: DATABASE_URL environment variable not set');
|
|
551
|
+
process.exit(1);
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
const migrationsDir = './migrations';
|
|
555
|
+
if (!existsSync(migrationsDir)) {
|
|
556
|
+
console.log('š No migrations directory found');
|
|
557
|
+
return;
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
const sql = postgres(databaseUrl);
|
|
561
|
+
|
|
562
|
+
try {
|
|
563
|
+
console.log('š Connected to database\n');
|
|
564
|
+
|
|
565
|
+
// Create migrations table if it doesn't exist
|
|
566
|
+
await sql`
|
|
567
|
+
CREATE TABLE IF NOT EXISTS dzql.migrations (
|
|
568
|
+
id SERIAL PRIMARY KEY,
|
|
569
|
+
name TEXT NOT NULL UNIQUE,
|
|
570
|
+
applied_at TIMESTAMPTZ DEFAULT NOW()
|
|
571
|
+
);
|
|
572
|
+
`;
|
|
573
|
+
|
|
574
|
+
// Get applied migrations
|
|
575
|
+
const appliedRows = await sql`SELECT name, applied_at FROM dzql.migrations ORDER BY applied_at`;
|
|
576
|
+
const appliedMigrations = new Set(appliedRows.map(row => row.name));
|
|
577
|
+
|
|
578
|
+
// Get all migration files
|
|
579
|
+
const files = readdirSync(migrationsDir)
|
|
580
|
+
.filter(f => f.endsWith('.sql'))
|
|
581
|
+
.sort();
|
|
582
|
+
|
|
583
|
+
console.log('š Migration Status\n');
|
|
584
|
+
console.log(`Total migrations: ${files.length}`);
|
|
585
|
+
console.log(`Applied: ${appliedRows.length}`);
|
|
586
|
+
console.log(`Pending: ${files.length - appliedRows.length}\n`);
|
|
587
|
+
|
|
588
|
+
if (appliedRows.length > 0) {
|
|
589
|
+
console.log('ā
Applied Migrations:');
|
|
590
|
+
appliedRows.forEach(row => {
|
|
591
|
+
console.log(` ${row.name} - ${new Date(row.applied_at).toLocaleString()}`);
|
|
592
|
+
});
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
const pendingFiles = files.filter(f => !appliedMigrations.has(f));
|
|
596
|
+
if (pendingFiles.length > 0) {
|
|
597
|
+
console.log('\nā³ Pending Migrations:');
|
|
598
|
+
pendingFiles.forEach(file => {
|
|
599
|
+
console.log(` ${file}`);
|
|
600
|
+
});
|
|
601
|
+
console.log('\nRun "dzql migrate:up" to apply pending migrations.');
|
|
602
|
+
} else {
|
|
603
|
+
console.log('\n⨠Database is up to date.');
|
|
604
|
+
}
|
|
605
|
+
|
|
606
|
+
console.log();
|
|
607
|
+
} catch (err) {
|
|
608
|
+
console.error('ā Failed to get migration status:', err.message);
|
|
609
|
+
process.exit(1);
|
|
610
|
+
} finally {
|
|
611
|
+
await sql.end();
|
|
612
|
+
}
|
|
613
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "dzql",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.7",
|
|
4
4
|
"description": "PostgreSQL-powered framework with zero boilerplate CRUD operations and real-time WebSocket synchronization",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "src/server/index.js",
|
|
@@ -22,7 +22,7 @@
|
|
|
22
22
|
"LICENSE"
|
|
23
23
|
],
|
|
24
24
|
"scripts": {
|
|
25
|
-
"test": "bun test",
|
|
25
|
+
"test": "bun test ../../tests/core/*.test.js",
|
|
26
26
|
"prepublishOnly": "echo 'ā
Publishing DZQL v0.3.5...'"
|
|
27
27
|
},
|
|
28
28
|
"dependencies": {
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# DZQL Canonical Pinia Stores
|
|
2
|
+
|
|
3
|
+
**The official, AI-friendly Pinia stores for DZQL Vue.js applications.**
|
|
4
|
+
|
|
5
|
+
## Why These Stores Exist
|
|
6
|
+
|
|
7
|
+
When building DZQL apps, developers (and AI assistants) often struggle with:
|
|
8
|
+
|
|
9
|
+
1. **Three-phase lifecycle** - connecting ā login ā ready
|
|
10
|
+
2. **WebSocket connection management** - reconnection, error handling
|
|
11
|
+
3. **Authentication flow** - token storage, profile management
|
|
12
|
+
4. **Router integration** - navigation, state synchronization
|
|
13
|
+
5. **Inconsistent patterns** - every project does it differently
|
|
14
|
+
|
|
15
|
+
These canonical stores solve all of these problems with a **simple, consistent pattern** that AI can easily understand and replicate.
|
|
16
|
+
|
|
17
|
+
## The Stores
|
|
18
|
+
|
|
19
|
+
### `useWsStore` - WebSocket & Auth
|
|
20
|
+
|
|
21
|
+
Manages:
|
|
22
|
+
- WebSocket connection (with auto-reconnect)
|
|
23
|
+
- User authentication (login/register/logout)
|
|
24
|
+
- Connection state tracking
|
|
25
|
+
- Three-phase app lifecycle
|
|
26
|
+
|
|
27
|
+
### `useAppStore` - Application State
|
|
28
|
+
|
|
29
|
+
Manages:
|
|
30
|
+
- App initialization
|
|
31
|
+
- Router integration
|
|
32
|
+
- Entity metadata caching
|
|
33
|
+
- Navigation helpers
|
|
34
|
+
- UI state (sidebars, panels)
|
|
35
|
+
|
|
36
|
+
## Quick Example
|
|
37
|
+
|
|
38
|
+
```vue
|
|
39
|
+
<script setup>
|
|
40
|
+
import { computed } from 'vue'
|
|
41
|
+
import { useWsStore, useAppStore } from 'dzql/client/stores'
|
|
42
|
+
|
|
43
|
+
const wsStore = useWsStore()
|
|
44
|
+
const appStore = useAppStore()
|
|
45
|
+
|
|
46
|
+
const state = computed(() => wsStore.appState)
|
|
47
|
+
const ws = wsStore.getWs()
|
|
48
|
+
|
|
49
|
+
// Use DZQL API
|
|
50
|
+
const venues = await ws.api.search.venues({ limit: 50 })
|
|
51
|
+
</script>
|
|
52
|
+
|
|
53
|
+
<template>
|
|
54
|
+
<!-- Three-phase lifecycle -->
|
|
55
|
+
<div v-if="state === 'connecting'">Connecting...</div>
|
|
56
|
+
<LoginView v-else-if="state === 'login'" />
|
|
57
|
+
<MainApp v-else-if="state === 'ready'" />
|
|
58
|
+
</template>
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Documentation
|
|
62
|
+
|
|
63
|
+
- **[Quick Start](../../../docs/reference/client.md)** - Get running in 5 minutes
|
|
64
|
+
- **[Complete Guide](../../../docs/guides/client-stores.md)** - Full API reference and patterns
|
|
65
|
+
|
|
66
|
+
## Files
|
|
67
|
+
|
|
68
|
+
- `useWsStore.js` - WebSocket connection and authentication
|
|
69
|
+
- `useAppStore.js` - Application state and navigation
|
|
70
|
+
- `index.js` - Exports both stores
|
|
71
|
+
|
|
72
|
+
## For AI Assistants
|
|
73
|
+
|
|
74
|
+
When helping users with DZQL apps:
|
|
75
|
+
|
|
76
|
+
1. **Always use these stores** - Don't create custom connection logic
|
|
77
|
+
2. **Follow the three-phase lifecycle** - connecting ā login ā ready
|
|
78
|
+
3. **Use computed for reactive state** - `const profile = computed(() => wsStore.profile)`
|
|
79
|
+
4. **Get WS instance for API calls** - `const ws = wsStore.getWs()`
|
|
80
|
+
|
|
81
|
+
**Example prompt for AI:**
|
|
82
|
+
|
|
83
|
+
> "I'm using the canonical DZQL stores from `dzql/client/stores`. The pattern is:
|
|
84
|
+
> 1. useWsStore for WebSocket connection (three phases: connecting, login, ready)
|
|
85
|
+
> 2. useAppStore for app state and navigation
|
|
86
|
+
> 3. Access DZQL API via `wsStore.getWs().api.get.venues({ id: 1 })`
|
|
87
|
+
> Please follow this pattern."
|
|
88
|
+
|
|
89
|
+
## Version
|
|
90
|
+
|
|
91
|
+
These stores are available in DZQL v0.1.6+
|
|
92
|
+
|
|
93
|
+
## License
|
|
94
|
+
|
|
95
|
+
MIT
|
|
@@ -78,6 +78,7 @@ $$ LANGUAGE plpgsql SECURITY DEFINER;`;
|
|
|
78
78
|
const m2mExtraction = this._generateM2MExtraction();
|
|
79
79
|
const m2mSync = this._generateM2MSync();
|
|
80
80
|
const m2mExpansion = this._generateM2MExpansion();
|
|
81
|
+
const fieldDefaults = this._generateFieldDefaults();
|
|
81
82
|
|
|
82
83
|
return `-- SAVE operation for ${this.tableName}
|
|
83
84
|
CREATE OR REPLACE FUNCTION save_${this.tableName}(
|
|
@@ -88,6 +89,7 @@ DECLARE
|
|
|
88
89
|
v_result ${this.tableName}%ROWTYPE;
|
|
89
90
|
v_existing ${this.tableName}%ROWTYPE;
|
|
90
91
|
v_output JSONB;
|
|
92
|
+
v_before JSONB;
|
|
91
93
|
v_is_insert BOOLEAN := false;
|
|
92
94
|
v_notify_users INT[];
|
|
93
95
|
${m2mVariables}
|
|
@@ -116,6 +118,12 @@ ${m2mExtraction}
|
|
|
116
118
|
END IF;
|
|
117
119
|
END IF;
|
|
118
120
|
|
|
121
|
+
-- Expand M2M for existing record (for UPDATE events "before" field)
|
|
122
|
+
IF NOT v_is_insert THEN
|
|
123
|
+
v_before := to_jsonb(v_existing);
|
|
124
|
+
${this._generateM2MExpansionForBefore()}
|
|
125
|
+
END IF;
|
|
126
|
+
${fieldDefaults}
|
|
119
127
|
-- Perform UPSERT
|
|
120
128
|
IF v_is_insert THEN
|
|
121
129
|
-- Dynamic INSERT from JSONB
|
|
@@ -547,6 +555,49 @@ $$ LANGUAGE plpgsql SECURITY DEFINER;`;
|
|
|
547
555
|
};
|
|
548
556
|
}
|
|
549
557
|
|
|
558
|
+
/**
|
|
559
|
+
* Generate M2M expansion for existing record in SAVE (for "before" field)
|
|
560
|
+
* COMPILE TIME: Loop to generate code
|
|
561
|
+
* RUNTIME: Direct SQL queries (NO loops!)
|
|
562
|
+
* @private
|
|
563
|
+
*/
|
|
564
|
+
_generateM2MExpansionForBefore() {
|
|
565
|
+
const manyToMany = this.entity.manyToMany || {};
|
|
566
|
+
if (Object.keys(manyToMany).length === 0) return '';
|
|
567
|
+
|
|
568
|
+
const expansions = [];
|
|
569
|
+
|
|
570
|
+
// COMPILE TIME LOOP: Generate code for each M2M relationship
|
|
571
|
+
for (const [relationKey, config] of Object.entries(manyToMany)) {
|
|
572
|
+
const idField = config.id_field;
|
|
573
|
+
const junctionTable = config.junction_table;
|
|
574
|
+
const localKey = config.local_key;
|
|
575
|
+
const foreignKey = config.foreign_key;
|
|
576
|
+
const targetEntity = config.target_entity;
|
|
577
|
+
const expand = config.expand || false;
|
|
578
|
+
|
|
579
|
+
// Always add ID array (static SQL)
|
|
580
|
+
expansions.push(`
|
|
581
|
+
v_before := v_before || jsonb_build_object('${idField}',
|
|
582
|
+
(SELECT COALESCE(jsonb_agg(${foreignKey} ORDER BY ${foreignKey}), '[]'::jsonb)
|
|
583
|
+
FROM ${junctionTable} WHERE ${localKey} = v_existing.id)
|
|
584
|
+
);`);
|
|
585
|
+
|
|
586
|
+
// Conditionally expand full objects (known at compile time!)
|
|
587
|
+
if (expand) {
|
|
588
|
+
expansions.push(`
|
|
589
|
+
v_before := v_before || jsonb_build_object('${relationKey}',
|
|
590
|
+
(SELECT COALESCE(jsonb_agg(to_jsonb(t.*) ORDER BY t.id), '[]'::jsonb)
|
|
591
|
+
FROM ${junctionTable} jt
|
|
592
|
+
JOIN ${targetEntity} t ON t.id = jt.${foreignKey}
|
|
593
|
+
WHERE jt.${localKey} = v_existing.id)
|
|
594
|
+
);`);
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
return expansions.join('');
|
|
599
|
+
}
|
|
600
|
+
|
|
550
601
|
/**
|
|
551
602
|
* Generate M2M expansion for GET operation
|
|
552
603
|
* COMPILE TIME: Loop to generate code
|
|
@@ -616,21 +667,84 @@ $$ LANGUAGE plpgsql SECURITY DEFINER;`;
|
|
|
616
667
|
);`);
|
|
617
668
|
} else {
|
|
618
669
|
// Direct FK: single object
|
|
619
|
-
|
|
670
|
+
// Use JSONB to check field existence (like resolve_direct_fk)
|
|
620
671
|
expansions.push(`
|
|
621
672
|
-- Expand ${key} (foreign key)
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
)
|
|
627
|
-
|
|
673
|
+
DECLARE
|
|
674
|
+
v_fk_id INT;
|
|
675
|
+
BEGIN
|
|
676
|
+
-- Try field name directly first, then with _id suffix
|
|
677
|
+
IF to_jsonb(v_record) ? '${key}' THEN
|
|
678
|
+
v_fk_id := v_record.${key};
|
|
679
|
+
ELSIF to_jsonb(v_record) ? '${key}_id' THEN
|
|
680
|
+
v_fk_id := v_record.${key}_id;
|
|
681
|
+
END IF;
|
|
682
|
+
|
|
683
|
+
IF v_fk_id IS NOT NULL THEN
|
|
684
|
+
v_result := v_result || jsonb_build_object(
|
|
685
|
+
'${key}',
|
|
686
|
+
(SELECT to_jsonb(t.*) FROM ${targetTable} t WHERE t.id = v_fk_id)
|
|
687
|
+
);
|
|
688
|
+
END IF;
|
|
689
|
+
END;`);
|
|
628
690
|
}
|
|
629
691
|
}
|
|
630
692
|
|
|
631
693
|
return expansions.join('');
|
|
632
694
|
}
|
|
633
695
|
|
|
696
|
+
/**
|
|
697
|
+
* Generate field defaults application
|
|
698
|
+
* COMPILE TIME: Loop to generate code for each default
|
|
699
|
+
* RUNTIME: Direct value assignment (NO loops!)
|
|
700
|
+
* @private
|
|
701
|
+
*/
|
|
702
|
+
_generateFieldDefaults() {
|
|
703
|
+
const fieldDefaults = this.entity.fieldDefaults || {};
|
|
704
|
+
if (Object.keys(fieldDefaults).length === 0) return '';
|
|
705
|
+
|
|
706
|
+
const defaults = [];
|
|
707
|
+
|
|
708
|
+
// COMPILE TIME LOOP: Generate separate IF block for each field default
|
|
709
|
+
for (const [fieldName, defaultValue] of Object.entries(fieldDefaults)) {
|
|
710
|
+
if (defaultValue.startsWith('@')) {
|
|
711
|
+
// Resolve variable defaults (@user_id, @now, @today)
|
|
712
|
+
const resolved = this._resolveDefaultVariable(defaultValue, fieldName);
|
|
713
|
+
defaults.push(`
|
|
714
|
+
-- Apply field default: ${fieldName} = ${defaultValue}
|
|
715
|
+
IF v_is_insert AND NOT (p_data ? '${fieldName}') THEN
|
|
716
|
+
p_data := p_data || jsonb_build_object('${fieldName}', ${resolved});
|
|
717
|
+
END IF;`);
|
|
718
|
+
} else {
|
|
719
|
+
// Literal default value
|
|
720
|
+
defaults.push(`
|
|
721
|
+
-- Apply field default: ${fieldName} = ${defaultValue}
|
|
722
|
+
IF v_is_insert AND NOT (p_data ? '${fieldName}') THEN
|
|
723
|
+
p_data := p_data || jsonb_build_object('${fieldName}', '${defaultValue}');
|
|
724
|
+
END IF;`);
|
|
725
|
+
}
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
return defaults.join('');
|
|
729
|
+
}
|
|
730
|
+
|
|
731
|
+
/**
|
|
732
|
+
* Resolve a variable default (@user_id, @now, @today) to SQL expression
|
|
733
|
+
* @private
|
|
734
|
+
*/
|
|
735
|
+
_resolveDefaultVariable(variable, fieldName) {
|
|
736
|
+
switch (variable) {
|
|
737
|
+
case '@user_id':
|
|
738
|
+
return 'p_user_id';
|
|
739
|
+
case '@now':
|
|
740
|
+
return `to_char(NOW(), 'YYYY-MM-DD"T"HH24:MI:SS.MS"Z"')`;
|
|
741
|
+
case '@today':
|
|
742
|
+
return `to_char(CURRENT_DATE, 'YYYY-MM-DD')`;
|
|
743
|
+
default:
|
|
744
|
+
throw new Error(`Unknown field default variable: ${variable} for field ${fieldName}`);
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
|
|
634
748
|
/**
|
|
635
749
|
* Generate temporal filter
|
|
636
750
|
* @private
|
|
@@ -731,15 +845,13 @@ $$ LANGUAGE plpgsql SECURITY DEFINER;`;
|
|
|
731
845
|
table_name,
|
|
732
846
|
op,
|
|
733
847
|
pk,
|
|
734
|
-
|
|
735
|
-
after,
|
|
848
|
+
data,
|
|
736
849
|
user_id,
|
|
737
850
|
notify_users
|
|
738
851
|
) VALUES (
|
|
739
852
|
'${this.tableName}',
|
|
740
853
|
CASE WHEN v_is_insert THEN 'insert' ELSE 'update' END,
|
|
741
854
|
jsonb_build_object('id', v_result.id),
|
|
742
|
-
CASE WHEN NOT v_is_insert THEN to_jsonb(v_existing) ELSE NULL END,
|
|
743
855
|
v_output,
|
|
744
856
|
p_user_id,
|
|
745
857
|
v_notify_users
|
|
@@ -754,15 +866,13 @@ $$ LANGUAGE plpgsql SECURITY DEFINER;`;
|
|
|
754
866
|
table_name,
|
|
755
867
|
op,
|
|
756
868
|
pk,
|
|
757
|
-
|
|
758
|
-
after,
|
|
869
|
+
data,
|
|
759
870
|
user_id,
|
|
760
871
|
notify_users
|
|
761
872
|
) VALUES (
|
|
762
873
|
'${this.tableName}',
|
|
763
874
|
'delete',
|
|
764
875
|
jsonb_build_object('id', v_result.id),
|
|
765
|
-
to_jsonb(v_result),
|
|
766
876
|
NULL,
|
|
767
877
|
p_user_id,
|
|
768
878
|
v_notify_users
|
|
@@ -72,7 +72,17 @@ export class EntityParser {
|
|
|
72
72
|
params.push(currentParam.trim());
|
|
73
73
|
}
|
|
74
74
|
|
|
75
|
-
|
|
75
|
+
// Strip SQL comments (-- ...) from each parameter
|
|
76
|
+
return params.map(param => {
|
|
77
|
+
// Remove everything after -- (SQL line comment)
|
|
78
|
+
return param.split('\n').map(line => {
|
|
79
|
+
const commentIndex = line.indexOf('--');
|
|
80
|
+
if (commentIndex !== -1) {
|
|
81
|
+
return line.substring(0, commentIndex);
|
|
82
|
+
}
|
|
83
|
+
return line;
|
|
84
|
+
}).join('\n').trim();
|
|
85
|
+
});
|
|
76
86
|
}
|
|
77
87
|
|
|
78
88
|
/**
|
|
@@ -108,11 +118,11 @@ export class EntityParser {
|
|
|
108
118
|
*/
|
|
109
119
|
_cleanString(str) {
|
|
110
120
|
if (!str) return '';
|
|
111
|
-
// Remove
|
|
112
|
-
let cleaned = str.replace(
|
|
113
|
-
cleaned = cleaned.
|
|
114
|
-
cleaned = cleaned.replace(
|
|
115
|
-
return cleaned
|
|
121
|
+
// Remove SQL comments first, then outer quotes
|
|
122
|
+
let cleaned = str.replace(/--[^\n]*/g, ''); // Remove SQL comments
|
|
123
|
+
cleaned = cleaned.trim(); // Remove whitespace
|
|
124
|
+
cleaned = cleaned.replace(/^['"]|['"]$/g, ''); // Remove outer quotes
|
|
125
|
+
return cleaned;
|
|
116
126
|
}
|
|
117
127
|
|
|
118
128
|
/**
|
|
@@ -38,10 +38,9 @@ CREATE TABLE IF NOT EXISTS dzql.registry (
|
|
|
38
38
|
CREATE TABLE IF NOT EXISTS dzql.events (
|
|
39
39
|
event_id bigserial PRIMARY KEY,
|
|
40
40
|
table_name text NOT NULL,
|
|
41
|
-
op text NOT NULL, -- '
|
|
41
|
+
op text NOT NULL, -- 'insert', 'update', 'delete'
|
|
42
42
|
pk jsonb NOT NULL, -- primary key of affected record
|
|
43
|
-
|
|
44
|
-
after jsonb, -- new values (NULL for DELETE)
|
|
43
|
+
data jsonb, -- record state after this event (NULL for delete)
|
|
45
44
|
user_id int, -- who made the change
|
|
46
45
|
notify_users int[], -- who should be notified (NULL = everyone)
|
|
47
46
|
at timestamptz DEFAULT now() -- when the change occurred
|
|
@@ -749,7 +749,26 @@ BEGIN
|
|
|
749
749
|
CONTINUE;
|
|
750
750
|
END IF;
|
|
751
751
|
|
|
752
|
-
-- Check
|
|
752
|
+
-- Check for simpler CASCADE/SET NULL/RESTRICT format: {"delete": {"entity_name": "CASCADE"}}
|
|
753
|
+
IF l_trigger_key IN ('delete', 'update', 'create') AND jsonb_typeof(l_trigger_rules) = 'object' THEN
|
|
754
|
+
-- This is the simpler format - validate CASCADE/SET NULL/RESTRICT values
|
|
755
|
+
DECLARE
|
|
756
|
+
l_entity_name text;
|
|
757
|
+
l_action_value text;
|
|
758
|
+
BEGIN
|
|
759
|
+
FOR l_entity_name, l_action_value IN SELECT * FROM jsonb_each_text(l_trigger_rules)
|
|
760
|
+
LOOP
|
|
761
|
+
IF l_action_value NOT IN ('CASCADE', 'SET NULL', 'RESTRICT') THEN
|
|
762
|
+
RAISE WARNING 'Invalid graph rule action for entity %: %. Must be CASCADE, SET NULL, or RESTRICT', l_entity_name, l_action_value;
|
|
763
|
+
RETURN false;
|
|
764
|
+
END IF;
|
|
765
|
+
END LOOP;
|
|
766
|
+
-- Valid simpler format - skip complex validation
|
|
767
|
+
CONTINUE;
|
|
768
|
+
END;
|
|
769
|
+
END IF;
|
|
770
|
+
|
|
771
|
+
-- Check valid trigger types for complex format
|
|
753
772
|
IF l_trigger_key NOT IN ('on_create', 'on_update', 'on_delete', 'on_field_change') THEN
|
|
754
773
|
RAISE WARNING 'Invalid trigger type: %', l_trigger_key;
|
|
755
774
|
RETURN false;
|
|
@@ -370,6 +370,62 @@ BEGIN
|
|
|
370
370
|
END IF;
|
|
371
371
|
END IF;
|
|
372
372
|
|
|
373
|
+
-- Expand M2M relationships in existing record (for UPDATE events)
|
|
374
|
+
IF NOT l_is_insert AND l_existing_record IS NOT NULL AND l_entity_config.many_to_many IS NOT NULL AND l_entity_config.many_to_many != '{}'::jsonb THEN
|
|
375
|
+
DECLARE
|
|
376
|
+
l_m2m_key text;
|
|
377
|
+
l_m2m_config jsonb;
|
|
378
|
+
l_id_field text;
|
|
379
|
+
l_junction_table text;
|
|
380
|
+
l_local_key text;
|
|
381
|
+
l_foreign_key text;
|
|
382
|
+
l_target_entity text;
|
|
383
|
+
l_expand boolean;
|
|
384
|
+
l_record_id text;
|
|
385
|
+
l_id_array jsonb;
|
|
386
|
+
l_expanded_objects jsonb;
|
|
387
|
+
BEGIN
|
|
388
|
+
-- Get the primary key value from the existing record
|
|
389
|
+
l_record_id := l_existing_record->>l_pk_cols[1]; -- Assume single PK for now
|
|
390
|
+
|
|
391
|
+
FOR l_m2m_key IN SELECT jsonb_object_keys(l_entity_config.many_to_many)
|
|
392
|
+
LOOP
|
|
393
|
+
l_m2m_config := l_entity_config.many_to_many->l_m2m_key;
|
|
394
|
+
l_id_field := l_m2m_config->>'id_field';
|
|
395
|
+
l_junction_table := l_m2m_config->>'junction_table';
|
|
396
|
+
l_local_key := l_m2m_config->>'local_key';
|
|
397
|
+
l_foreign_key := l_m2m_config->>'foreign_key';
|
|
398
|
+
l_target_entity := l_m2m_config->>'target_entity';
|
|
399
|
+
l_expand := COALESCE((l_m2m_config->>'expand')::boolean, false);
|
|
400
|
+
|
|
401
|
+
-- Always include array of IDs
|
|
402
|
+
EXECUTE format('
|
|
403
|
+
SELECT COALESCE(jsonb_agg(%I), ''[]''::jsonb)
|
|
404
|
+
FROM %I
|
|
405
|
+
WHERE %I = $1::int
|
|
406
|
+
', l_foreign_key, l_junction_table, l_local_key)
|
|
407
|
+
INTO l_id_array
|
|
408
|
+
USING l_record_id;
|
|
409
|
+
|
|
410
|
+
l_existing_record := l_existing_record || jsonb_build_object(l_id_field, l_id_array);
|
|
411
|
+
|
|
412
|
+
-- Conditionally include expanded objects if expand: true
|
|
413
|
+
IF l_expand THEN
|
|
414
|
+
EXECUTE format('
|
|
415
|
+
SELECT COALESCE(jsonb_agg(to_jsonb(t.*)), ''[]''::jsonb)
|
|
416
|
+
FROM %I jt
|
|
417
|
+
JOIN %I t ON t.id = jt.%I
|
|
418
|
+
WHERE jt.%I = $1::int
|
|
419
|
+
', l_junction_table, l_target_entity, l_foreign_key, l_local_key)
|
|
420
|
+
INTO l_expanded_objects
|
|
421
|
+
USING l_record_id;
|
|
422
|
+
|
|
423
|
+
l_existing_record := l_existing_record || jsonb_build_object(l_m2m_key, l_expanded_objects);
|
|
424
|
+
END IF;
|
|
425
|
+
END LOOP;
|
|
426
|
+
END;
|
|
427
|
+
END IF;
|
|
428
|
+
|
|
373
429
|
IF NOT l_is_insert THEN
|
|
374
430
|
-- UPDATE: Merge with existing record
|
|
375
431
|
|
|
@@ -389,12 +445,14 @@ BEGIN
|
|
|
389
445
|
LOOP
|
|
390
446
|
-- Don't update any primary key columns
|
|
391
447
|
IF NOT (l_col_name = ANY(l_pk_cols)) THEN
|
|
392
|
-
-- Skip M2M ID fields (they're not real table columns)
|
|
448
|
+
-- Skip M2M ID fields and expanded fields (they're not real table columns)
|
|
393
449
|
IF l_entity_config.many_to_many IS NOT NULL THEN
|
|
394
450
|
DECLARE
|
|
395
451
|
l_m2m_id_field text;
|
|
452
|
+
l_m2m_key text;
|
|
396
453
|
l_skip boolean := false;
|
|
397
454
|
BEGIN
|
|
455
|
+
-- Skip M2M ID fields (e.g., tag_ids)
|
|
398
456
|
FOR l_m2m_id_field IN
|
|
399
457
|
SELECT value->>'id_field'
|
|
400
458
|
FROM jsonb_each(l_entity_config.many_to_many)
|
|
@@ -405,6 +463,19 @@ BEGIN
|
|
|
405
463
|
END IF;
|
|
406
464
|
END LOOP;
|
|
407
465
|
|
|
466
|
+
-- Skip M2M expanded fields (e.g., tags)
|
|
467
|
+
IF NOT l_skip THEN
|
|
468
|
+
FOR l_m2m_key IN
|
|
469
|
+
SELECT key
|
|
470
|
+
FROM jsonb_each(l_entity_config.many_to_many)
|
|
471
|
+
LOOP
|
|
472
|
+
IF l_col_name = l_m2m_key THEN
|
|
473
|
+
l_skip := true;
|
|
474
|
+
EXIT;
|
|
475
|
+
END IF;
|
|
476
|
+
END LOOP;
|
|
477
|
+
END IF;
|
|
478
|
+
|
|
408
479
|
IF NOT l_skip THEN
|
|
409
480
|
l_set_clauses := l_set_clauses || format('%I = %L', l_col_name, l_merged_data ->> l_col_name);
|
|
410
481
|
END IF;
|
|
@@ -640,8 +711,7 @@ BEGIN
|
|
|
640
711
|
table_name,
|
|
641
712
|
op,
|
|
642
713
|
pk,
|
|
643
|
-
|
|
644
|
-
after,
|
|
714
|
+
data,
|
|
645
715
|
user_id,
|
|
646
716
|
notify_users
|
|
647
717
|
) VALUES (
|
|
@@ -651,7 +721,6 @@ BEGIN
|
|
|
651
721
|
SELECT jsonb_object_agg(col, l_result ->> col)
|
|
652
722
|
FROM unnest(l_pk_cols) AS col
|
|
653
723
|
),
|
|
654
|
-
CASE WHEN NOT l_is_insert THEN l_existing_record ELSE NULL END,
|
|
655
724
|
l_result,
|
|
656
725
|
p_user_id,
|
|
657
726
|
dzql.resolve_notification_paths(p_entity, l_result)
|
|
@@ -736,6 +805,106 @@ BEGIN
|
|
|
736
805
|
RAISE EXCEPTION 'Permission denied: delete on %', p_entity;
|
|
737
806
|
END IF;
|
|
738
807
|
|
|
808
|
+
-- Apply CASCADE/SET NULL/RESTRICT rules from child entities
|
|
809
|
+
DECLARE
|
|
810
|
+
l_child_entity record;
|
|
811
|
+
l_child_graph_rules jsonb;
|
|
812
|
+
l_delete_rules jsonb;
|
|
813
|
+
l_rule_name text;
|
|
814
|
+
l_rule_action text;
|
|
815
|
+
l_fk_field text;
|
|
816
|
+
l_fk_key text;
|
|
817
|
+
l_child_count int;
|
|
818
|
+
BEGIN
|
|
819
|
+
-- Find all entities that reference this entity
|
|
820
|
+
FOR l_child_entity IN
|
|
821
|
+
SELECT * FROM dzql.entities
|
|
822
|
+
WHERE fk_includes IS NOT NULL
|
|
823
|
+
AND fk_includes != '{}'
|
|
824
|
+
LOOP
|
|
825
|
+
-- Check if this child entity has an FK pointing to the entity being deleted
|
|
826
|
+
FOR l_fk_key IN SELECT jsonb_object_keys(l_child_entity.fk_includes)
|
|
827
|
+
LOOP
|
|
828
|
+
IF l_child_entity.fk_includes->>l_fk_key = p_entity THEN
|
|
829
|
+
-- This child entity references the parent being deleted
|
|
830
|
+
l_child_graph_rules := l_child_entity.graph_rules;
|
|
831
|
+
|
|
832
|
+
IF l_child_graph_rules IS NOT NULL AND l_child_graph_rules != '{}' THEN
|
|
833
|
+
l_delete_rules := l_child_graph_rules->'delete';
|
|
834
|
+
|
|
835
|
+
IF l_delete_rules IS NOT NULL AND l_delete_rules != '{}' THEN
|
|
836
|
+
-- Check rules for this child entity
|
|
837
|
+
FOR l_rule_name, l_rule_action IN SELECT * FROM jsonb_each_text(l_delete_rules)
|
|
838
|
+
LOOP
|
|
839
|
+
-- The rule_name should match the child entity name
|
|
840
|
+
IF l_rule_name = l_child_entity.table_name THEN
|
|
841
|
+
-- Determine FK field name (try direct match then _id suffix)
|
|
842
|
+
l_fk_field := l_fk_key;
|
|
843
|
+
IF NOT EXISTS (
|
|
844
|
+
SELECT 1 FROM information_schema.columns
|
|
845
|
+
WHERE table_name = l_child_entity.table_name
|
|
846
|
+
AND column_name = l_fk_field
|
|
847
|
+
) THEN
|
|
848
|
+
l_fk_field := l_fk_key || '_id';
|
|
849
|
+
END IF;
|
|
850
|
+
|
|
851
|
+
-- Apply the rule action
|
|
852
|
+
CASE l_rule_action
|
|
853
|
+
WHEN 'CASCADE' THEN
|
|
854
|
+
-- Delete child records via generic_delete to trigger events
|
|
855
|
+
DECLARE
|
|
856
|
+
l_child_record record;
|
|
857
|
+
BEGIN
|
|
858
|
+
FOR l_child_record IN
|
|
859
|
+
EXECUTE format(
|
|
860
|
+
'SELECT * FROM %I WHERE %I = %L',
|
|
861
|
+
l_child_entity.table_name,
|
|
862
|
+
l_fk_field,
|
|
863
|
+
l_record->>'id'
|
|
864
|
+
)
|
|
865
|
+
LOOP
|
|
866
|
+
-- Call generic_delete for each child to ensure events are created
|
|
867
|
+
PERFORM dzql.generic_delete(
|
|
868
|
+
l_child_entity.table_name,
|
|
869
|
+
jsonb_build_object('id', l_child_record.id),
|
|
870
|
+
p_user_id
|
|
871
|
+
);
|
|
872
|
+
END LOOP;
|
|
873
|
+
END;
|
|
874
|
+
|
|
875
|
+
WHEN 'SET NULL' THEN
|
|
876
|
+
-- Set FK to NULL in child records
|
|
877
|
+
EXECUTE format(
|
|
878
|
+
'UPDATE %I SET %I = NULL WHERE %I = %L',
|
|
879
|
+
l_child_entity.table_name,
|
|
880
|
+
l_fk_field,
|
|
881
|
+
l_fk_field,
|
|
882
|
+
l_record->>'id'
|
|
883
|
+
);
|
|
884
|
+
|
|
885
|
+
WHEN 'RESTRICT' THEN
|
|
886
|
+
-- Check if children exist, prevent delete if so
|
|
887
|
+
EXECUTE format(
|
|
888
|
+
'SELECT COUNT(*) FROM %I WHERE %I = %L',
|
|
889
|
+
l_child_entity.table_name,
|
|
890
|
+
l_fk_field,
|
|
891
|
+
l_record->>'id'
|
|
892
|
+
) INTO l_child_count;
|
|
893
|
+
|
|
894
|
+
IF l_child_count > 0 THEN
|
|
895
|
+
RAISE EXCEPTION 'Cannot delete % - % child records exist in %',
|
|
896
|
+
p_entity, l_child_count, l_child_entity.table_name;
|
|
897
|
+
END IF;
|
|
898
|
+
END CASE;
|
|
899
|
+
END IF;
|
|
900
|
+
END LOOP;
|
|
901
|
+
END IF;
|
|
902
|
+
END IF;
|
|
903
|
+
END IF;
|
|
904
|
+
END LOOP;
|
|
905
|
+
END LOOP;
|
|
906
|
+
END;
|
|
907
|
+
|
|
739
908
|
-- Execute graph rules for delete
|
|
740
909
|
l_graph_rules_result := dzql.execute_graph_rules(
|
|
741
910
|
p_entity,
|
|
@@ -760,8 +929,7 @@ BEGIN
|
|
|
760
929
|
table_name,
|
|
761
930
|
op,
|
|
762
931
|
pk,
|
|
763
|
-
|
|
764
|
-
after,
|
|
932
|
+
data,
|
|
765
933
|
user_id,
|
|
766
934
|
notify_users
|
|
767
935
|
) VALUES (
|
|
@@ -771,7 +939,6 @@ BEGIN
|
|
|
771
939
|
SELECT jsonb_object_agg(col, l_record ->> col)
|
|
772
940
|
FROM unnest(l_pk_cols) AS col
|
|
773
941
|
),
|
|
774
|
-
l_record,
|
|
775
942
|
NULL,
|
|
776
943
|
p_user_id,
|
|
777
944
|
dzql.resolve_notification_paths(p_entity, l_record)
|
|
@@ -867,6 +1034,11 @@ BEGIN
|
|
|
867
1034
|
|
|
868
1035
|
l_where_clause := l_where_clause || l_temporal_filter;
|
|
869
1036
|
|
|
1037
|
+
-- Add soft delete filter if enabled for this entity
|
|
1038
|
+
IF l_entity_config.soft_delete THEN
|
|
1039
|
+
l_where_clause := l_where_clause || ' AND t.deleted_at IS NULL';
|
|
1040
|
+
END IF;
|
|
1041
|
+
|
|
870
1042
|
IF l_is_compound_key AND l_entity_config.fk_includes IS NOT NULL AND l_entity_config.fk_includes != '{}' THEN
|
|
871
1043
|
-- For compound keys with FK includes, build full dereferenced labels
|
|
872
1044
|
l_sql_stmt := format(
|
|
@@ -349,6 +349,11 @@ BEGIN
|
|
|
349
349
|
l_where_clause := l_where_clause || format(' AND dzql.check_permission(%L, ''view'', %L, to_jsonb(t.*))', p_user_id, p_entity);
|
|
350
350
|
END IF;
|
|
351
351
|
|
|
352
|
+
-- Add soft delete filter if enabled for this entity
|
|
353
|
+
IF l_entity_config.soft_delete THEN
|
|
354
|
+
l_where_clause := l_where_clause || ' AND t.deleted_at IS NULL';
|
|
355
|
+
END IF;
|
|
356
|
+
|
|
352
357
|
-- Build base SQL
|
|
353
358
|
l_base_sql := format('FROM %I t %s', p_entity, l_where_clause);
|
|
354
359
|
|
|
@@ -125,15 +125,13 @@ BEGIN
|
|
|
125
125
|
table_name,
|
|
126
126
|
op,
|
|
127
127
|
pk,
|
|
128
|
-
|
|
129
|
-
after,
|
|
128
|
+
data,
|
|
130
129
|
user_id,
|
|
131
130
|
notify_users
|
|
132
131
|
) VALUES (
|
|
133
132
|
p_entity,
|
|
134
133
|
'insert',
|
|
135
134
|
jsonb_build_object('id', p_data->>'id'),
|
|
136
|
-
NULL,
|
|
137
135
|
p_data,
|
|
138
136
|
p_user_id,
|
|
139
137
|
dzql.resolve_notification_paths(p_entity, p_data)
|
|
@@ -179,20 +177,17 @@ BEGIN
|
|
|
179
177
|
EXECUTE l_sql_stmt;
|
|
180
178
|
|
|
181
179
|
-- Create event for graph rule action
|
|
182
|
-
-- Note: We don't have the before/after data here, just logging the update occurred
|
|
183
180
|
INSERT INTO dzql.events (
|
|
184
181
|
table_name,
|
|
185
182
|
op,
|
|
186
183
|
pk,
|
|
187
|
-
|
|
188
|
-
after,
|
|
184
|
+
data,
|
|
189
185
|
user_id,
|
|
190
186
|
notify_users
|
|
191
187
|
) VALUES (
|
|
192
188
|
p_entity,
|
|
193
189
|
'update',
|
|
194
190
|
p_match,
|
|
195
|
-
NULL, -- We don't have the before state in this context
|
|
196
191
|
p_data,
|
|
197
192
|
p_user_id,
|
|
198
193
|
'[]'::int[] -- Graph rule updates don't have notification paths
|
|
@@ -232,15 +227,13 @@ BEGIN
|
|
|
232
227
|
table_name,
|
|
233
228
|
op,
|
|
234
229
|
pk,
|
|
235
|
-
|
|
236
|
-
after,
|
|
230
|
+
data,
|
|
237
231
|
user_id,
|
|
238
232
|
notify_users
|
|
239
233
|
) VALUES (
|
|
240
234
|
p_entity,
|
|
241
235
|
'delete',
|
|
242
236
|
p_match,
|
|
243
|
-
NULL, -- We don't have the before state in this context
|
|
244
237
|
NULL,
|
|
245
238
|
p_user_id,
|
|
246
239
|
'[]'::int[] -- Graph rule deletes don't have notification paths
|
|
@@ -10,15 +10,12 @@ CREATE OR REPLACE FUNCTION dzql.notify_event()
|
|
|
10
10
|
RETURNS TRIGGER LANGUAGE plpgsql AS $$
|
|
11
11
|
BEGIN
|
|
12
12
|
-- Send real-time notification to single channel
|
|
13
|
-
-- For DELETE operations, send the 'before' data since 'after' is NULL
|
|
14
13
|
PERFORM pg_notify('dzql', jsonb_build_object(
|
|
15
14
|
'event_id', NEW.event_id,
|
|
16
15
|
'table', NEW.table_name,
|
|
17
16
|
'op', NEW.op,
|
|
18
17
|
'pk', NEW.pk,
|
|
19
|
-
'data',
|
|
20
|
-
'before', NEW.before,
|
|
21
|
-
'after', NEW.after,
|
|
18
|
+
'data', NEW.data,
|
|
22
19
|
'user_id', NEW.user_id,
|
|
23
20
|
'at', NEW.at,
|
|
24
21
|
'notify_users', NEW.notify_users
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
-- ============================================================================
|
|
2
|
+
-- Migration 010: Fix M2M in Event "before" Field
|
|
3
|
+
-- ============================================================================
|
|
4
|
+
--
|
|
5
|
+
-- Issue: UPDATE events don't include M2M data in the "before" field
|
|
6
|
+
-- Root cause: l_existing_record in generic_save doesn't expand M2M relationships
|
|
7
|
+
-- Fix: Create a helper function to expand M2M, then update generic_save to use it
|
|
8
|
+
--
|
|
9
|
+
-- This ensures UPDATE events have complete before/after state including M2M data
|
|
10
|
+
-- ============================================================================
|
|
11
|
+
|
|
12
|
+
DO $$ BEGIN
|
|
13
|
+
RAISE NOTICE 'Migration 010: Fixing M2M in event before field...';
|
|
14
|
+
END $$;
|
|
15
|
+
|
|
16
|
+
-- Create helper function to expand M2M relationships for a record
|
|
17
|
+
CREATE OR REPLACE FUNCTION dzql.expand_m2m_for_record(
|
|
18
|
+
p_entity text,
|
|
19
|
+
p_record jsonb,
|
|
20
|
+
p_entity_config record,
|
|
21
|
+
p_pk_cols text[]
|
|
22
|
+
) RETURNS jsonb
|
|
23
|
+
LANGUAGE plpgsql
|
|
24
|
+
AS $$
|
|
25
|
+
DECLARE
|
|
26
|
+
l_result jsonb := p_record;
|
|
27
|
+
l_m2m_key text;
|
|
28
|
+
l_m2m_config jsonb;
|
|
29
|
+
l_id_field text;
|
|
30
|
+
l_junction_table text;
|
|
31
|
+
l_local_key text;
|
|
32
|
+
l_foreign_key text;
|
|
33
|
+
l_target_entity text;
|
|
34
|
+
l_expand boolean;
|
|
35
|
+
l_record_id text;
|
|
36
|
+
l_id_array jsonb;
|
|
37
|
+
l_expanded_objects jsonb;
|
|
38
|
+
BEGIN
|
|
39
|
+
-- Only expand if entity has M2M configuration
|
|
40
|
+
IF p_entity_config.many_to_many IS NULL OR p_entity_config.many_to_many = '{}'::jsonb THEN
|
|
41
|
+
RETURN l_result;
|
|
42
|
+
END IF;
|
|
43
|
+
|
|
44
|
+
-- Get the primary key value from the record
|
|
45
|
+
l_record_id := l_result->>p_pk_cols[1]; -- Assume single PK for now
|
|
46
|
+
|
|
47
|
+
IF l_record_id IS NULL THEN
|
|
48
|
+
RETURN l_result;
|
|
49
|
+
END IF;
|
|
50
|
+
|
|
51
|
+
-- Loop through all M2M relationships
|
|
52
|
+
FOR l_m2m_key IN SELECT jsonb_object_keys(p_entity_config.many_to_many)
|
|
53
|
+
LOOP
|
|
54
|
+
l_m2m_config := p_entity_config.many_to_many->l_m2m_key;
|
|
55
|
+
l_id_field := l_m2m_config->>'id_field';
|
|
56
|
+
l_junction_table := l_m2m_config->>'junction_table';
|
|
57
|
+
l_local_key := l_m2m_config->>'local_key';
|
|
58
|
+
l_foreign_key := l_m2m_config->>'foreign_key';
|
|
59
|
+
l_target_entity := l_m2m_config->>'target_entity';
|
|
60
|
+
l_expand := COALESCE((l_m2m_config->>'expand')::boolean, false);
|
|
61
|
+
|
|
62
|
+
-- Always include array of IDs
|
|
63
|
+
EXECUTE format('
|
|
64
|
+
SELECT COALESCE(jsonb_agg(%I ORDER BY %I), ''[]''::jsonb)
|
|
65
|
+
FROM %I
|
|
66
|
+
WHERE %I = $1::int
|
|
67
|
+
', l_foreign_key, l_foreign_key, l_junction_table, l_local_key)
|
|
68
|
+
INTO l_id_array
|
|
69
|
+
USING l_record_id;
|
|
70
|
+
|
|
71
|
+
l_result := l_result || jsonb_build_object(l_id_field, l_id_array);
|
|
72
|
+
|
|
73
|
+
-- Conditionally include expanded objects if expand: true
|
|
74
|
+
IF l_expand THEN
|
|
75
|
+
EXECUTE format('
|
|
76
|
+
SELECT COALESCE(jsonb_agg(to_jsonb(t.*) ORDER BY t.id), ''[]''::jsonb)
|
|
77
|
+
FROM %I jt
|
|
78
|
+
JOIN %I t ON t.id = jt.%I
|
|
79
|
+
WHERE jt.%I = $1::int
|
|
80
|
+
', l_junction_table, l_target_entity, l_foreign_key, l_local_key)
|
|
81
|
+
INTO l_expanded_objects
|
|
82
|
+
USING l_record_id;
|
|
83
|
+
|
|
84
|
+
l_result := l_result || jsonb_build_object(l_m2m_key, l_expanded_objects);
|
|
85
|
+
END IF;
|
|
86
|
+
END LOOP;
|
|
87
|
+
|
|
88
|
+
RETURN l_result;
|
|
89
|
+
END $$;
|
|
90
|
+
|
|
91
|
+
DO $$ BEGIN
|
|
92
|
+
RAISE NOTICE 'Migration 010: M2M expansion helper function created';
|
|
93
|
+
RAISE NOTICE 'Note: generic_save still needs updating to use this helper - will be done in next migration';
|
|
94
|
+
END $$;
|