omgkit 2.0.7 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +2 -2
- package/plugin/skills/backend/api-architecture/SKILL.md +857 -0
- package/plugin/skills/backend/caching-strategies/SKILL.md +755 -0
- package/plugin/skills/backend/event-driven-architecture/SKILL.md +753 -0
- package/plugin/skills/backend/real-time-systems/SKILL.md +635 -0
- package/plugin/skills/databases/database-optimization/SKILL.md +571 -0
- package/plugin/skills/devops/monorepo-management/SKILL.md +595 -0
- package/plugin/skills/devops/observability/SKILL.md +622 -0
- package/plugin/skills/devops/performance-profiling/SKILL.md +905 -0
- package/plugin/skills/frontend/advanced-ui-design/SKILL.md +426 -0
- package/plugin/skills/integrations/ai-integration/SKILL.md +730 -0
- package/plugin/skills/integrations/payment-integration/SKILL.md +735 -0
- package/plugin/skills/methodology/problem-solving/SKILL.md +355 -0
- package/plugin/skills/methodology/research-validation/SKILL.md +668 -0
- package/plugin/skills/methodology/sequential-thinking/SKILL.md +260 -0
- package/plugin/skills/mobile/mobile-development/SKILL.md +756 -0
- package/plugin/skills/security/security-hardening/SKILL.md +633 -0
- package/plugin/skills/tools/document-processing/SKILL.md +916 -0
- package/plugin/skills/tools/image-processing/SKILL.md +748 -0
- package/plugin/skills/tools/mcp-development/SKILL.md +883 -0
- package/plugin/skills/tools/media-processing/SKILL.md +831 -0
|
@@ -0,0 +1,571 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: database-optimization
|
|
3
|
+
description: Advanced database performance tuning including query optimization, indexing strategies, partitioning, and scaling patterns
|
|
4
|
+
category: databases
|
|
5
|
+
triggers:
|
|
6
|
+
- database optimization
|
|
7
|
+
- query optimization
|
|
8
|
+
- indexing
|
|
9
|
+
- database performance
|
|
10
|
+
- slow queries
|
|
11
|
+
- database scaling
|
|
12
|
+
- partitioning
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
# Database Optimization
|
|
16
|
+
|
|
17
|
+
Master **database performance tuning** for high-scale applications. This skill covers query optimization, indexing strategies, partitioning, and scaling patterns.
|
|
18
|
+
|
|
19
|
+
## Purpose
|
|
20
|
+
|
|
21
|
+
Optimize database performance for production workloads:
|
|
22
|
+
|
|
23
|
+
- Analyze and optimize slow queries
|
|
24
|
+
- Design effective indexing strategies
|
|
25
|
+
- Implement table partitioning
|
|
26
|
+
- Configure connection pooling
|
|
27
|
+
- Scale with read replicas
|
|
28
|
+
- Plan database migrations
|
|
29
|
+
|
|
30
|
+
## Features
|
|
31
|
+
|
|
32
|
+
### 1. Query Optimization
|
|
33
|
+
|
|
34
|
+
```sql
|
|
35
|
+
-- Identify slow queries
|
|
36
|
+
-- PostgreSQL
|
|
37
|
+
SELECT
|
|
38
|
+
query,
|
|
39
|
+
calls,
|
|
40
|
+
total_time / 1000 as total_seconds,
|
|
41
|
+
mean_time / 1000 as mean_seconds,
|
|
42
|
+
rows
|
|
43
|
+
FROM pg_stat_statements
|
|
44
|
+
ORDER BY total_time DESC
|
|
45
|
+
LIMIT 20;
|
|
46
|
+
|
|
47
|
+
-- Analyze query execution plan
|
|
48
|
+
EXPLAIN (ANALYZE, BUFFERS, FORMAT JSON)
|
|
49
|
+
SELECT u.*, COUNT(o.id) as order_count
|
|
50
|
+
FROM users u
|
|
51
|
+
LEFT JOIN orders o ON o.user_id = u.id
|
|
52
|
+
WHERE u.created_at > '2024-01-01'
|
|
53
|
+
GROUP BY u.id;
|
|
54
|
+
|
|
55
|
+
-- Before optimization (sequential scan, nested loop)
|
|
56
|
+
-- After optimization (index scan, hash join)
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
```typescript
|
|
60
|
+
// Query optimization patterns
|
|
61
|
+
|
|
62
|
+
// BAD: N+1 query problem
|
|
63
|
+
const users = await db.user.findMany();
|
|
64
|
+
for (const user of users) {
|
|
65
|
+
const orders = await db.order.findMany({ where: { userId: user.id } });
|
|
66
|
+
// Process orders...
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// GOOD: Eager loading
|
|
70
|
+
const users = await db.user.findMany({
|
|
71
|
+
include: {
|
|
72
|
+
orders: {
|
|
73
|
+
where: { status: 'completed' },
|
|
74
|
+
orderBy: { createdAt: 'desc' },
|
|
75
|
+
take: 10,
|
|
76
|
+
},
|
|
77
|
+
},
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
// GOOD: Batch loading with DataLoader
|
|
81
|
+
const userLoader = new DataLoader(async (userIds: string[]) => {
|
|
82
|
+
const users = await db.user.findMany({
|
|
83
|
+
where: { id: { in: userIds } },
|
|
84
|
+
});
|
|
85
|
+
return userIds.map(id => users.find(u => u.id === id));
|
|
86
|
+
});
|
|
87
|
+
|
|
88
|
+
// Pagination optimization
|
|
89
|
+
// BAD: OFFSET pagination (slow on large tables)
|
|
90
|
+
SELECT * FROM orders ORDER BY created_at LIMIT 20 OFFSET 10000;
|
|
91
|
+
|
|
92
|
+
// GOOD: Cursor-based pagination
|
|
93
|
+
async function getOrdersPage(cursor?: string, limit: number = 20) {
|
|
94
|
+
return db.order.findMany({
|
|
95
|
+
take: limit + 1, // Fetch one extra to check if there's more
|
|
96
|
+
cursor: cursor ? { id: cursor } : undefined,
|
|
97
|
+
skip: cursor ? 1 : 0,
|
|
98
|
+
orderBy: { createdAt: 'desc' },
|
|
99
|
+
});
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
// Selective column loading
|
|
103
|
+
// BAD: SELECT *
|
|
104
|
+
const users = await db.user.findMany();
|
|
105
|
+
|
|
106
|
+
// GOOD: Select only needed columns
|
|
107
|
+
const users = await db.user.findMany({
|
|
108
|
+
select: {
|
|
109
|
+
id: true,
|
|
110
|
+
name: true,
|
|
111
|
+
email: true,
|
|
112
|
+
},
|
|
113
|
+
});
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
### 2. Indexing Strategies
|
|
117
|
+
|
|
118
|
+
```sql
|
|
119
|
+
-- Single column index
|
|
120
|
+
CREATE INDEX idx_users_email ON users(email);
|
|
121
|
+
|
|
122
|
+
-- Composite index (column order matters!)
|
|
123
|
+
-- Good for: WHERE status = ? AND created_at > ?
|
|
124
|
+
CREATE INDEX idx_orders_status_created ON orders(status, created_at);
|
|
125
|
+
|
|
126
|
+
-- Partial index (smaller, faster)
|
|
127
|
+
CREATE INDEX idx_orders_pending ON orders(created_at)
|
|
128
|
+
WHERE status = 'pending';
|
|
129
|
+
|
|
130
|
+
-- Covering index (includes all needed columns)
|
|
131
|
+
CREATE INDEX idx_orders_user_covering ON orders(user_id)
|
|
132
|
+
INCLUDE (status, total, created_at);
|
|
133
|
+
|
|
134
|
+
-- Expression index
|
|
135
|
+
CREATE INDEX idx_users_email_lower ON users(LOWER(email));
|
|
136
|
+
|
|
137
|
+
-- GiST index for full-text search
|
|
138
|
+
CREATE INDEX idx_products_search ON products
|
|
139
|
+
USING GIN(to_tsvector('english', name || ' ' || description));
|
|
140
|
+
|
|
141
|
+
-- Index for JSON queries
|
|
142
|
+
CREATE INDEX idx_settings_preferences ON users
|
|
143
|
+
USING GIN((settings->'preferences'));
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
```typescript
|
|
147
|
+
// Index analysis tool
|
|
148
|
+
async function analyzeTableIndexes(tableName: string): Promise<IndexAnalysis> {
|
|
149
|
+
// Get existing indexes
|
|
150
|
+
const indexes = await db.$queryRaw`
|
|
151
|
+
SELECT
|
|
152
|
+
indexname,
|
|
153
|
+
indexdef,
|
|
154
|
+
pg_size_pretty(pg_relation_size(indexname::regclass)) as size
|
|
155
|
+
FROM pg_indexes
|
|
156
|
+
WHERE tablename = ${tableName}
|
|
157
|
+
`;
|
|
158
|
+
|
|
159
|
+
// Get index usage stats
|
|
160
|
+
const stats = await db.$queryRaw`
|
|
161
|
+
SELECT
|
|
162
|
+
indexrelname as index_name,
|
|
163
|
+
idx_scan as scans,
|
|
164
|
+
idx_tup_read as tuples_read,
|
|
165
|
+
idx_tup_fetch as tuples_fetched
|
|
166
|
+
FROM pg_stat_user_indexes
|
|
167
|
+
WHERE relname = ${tableName}
|
|
168
|
+
`;
|
|
169
|
+
|
|
170
|
+
// Find unused indexes
|
|
171
|
+
const unused = stats.filter(s => s.scans === 0);
|
|
172
|
+
|
|
173
|
+
// Find missing index opportunities
|
|
174
|
+
const missingIndexSuggestions = await db.$queryRaw`
|
|
175
|
+
SELECT
|
|
176
|
+
schemaname || '.' || relname as table,
|
|
177
|
+
seq_scan,
|
|
178
|
+
seq_tup_read,
|
|
179
|
+
idx_scan,
|
|
180
|
+
seq_tup_read / seq_scan as avg_seq_tuples
|
|
181
|
+
FROM pg_stat_user_tables
|
|
182
|
+
WHERE seq_scan > 0
|
|
183
|
+
AND relname = ${tableName}
|
|
184
|
+
AND seq_tup_read / seq_scan > 1000
|
|
185
|
+
`;
|
|
186
|
+
|
|
187
|
+
return {
|
|
188
|
+
indexes,
|
|
189
|
+
stats,
|
|
190
|
+
unusedIndexes: unused,
|
|
191
|
+
missingIndexSuggestions,
|
|
192
|
+
recommendations: generateRecommendations(indexes, stats, missingIndexSuggestions),
|
|
193
|
+
};
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
// Index recommendations
|
|
197
|
+
function generateRecommendations(indexes, stats, missing): string[] {
|
|
198
|
+
const recommendations: string[] = [];
|
|
199
|
+
|
|
200
|
+
// Check for unused indexes
|
|
201
|
+
for (const unused of stats.filter(s => s.scans === 0)) {
|
|
202
|
+
recommendations.push(
|
|
203
|
+
`Consider dropping unused index: ${unused.index_name}`
|
|
204
|
+
);
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
// Check for missing indexes on frequently scanned columns
|
|
208
|
+
if (missing.length > 0) {
|
|
209
|
+
recommendations.push(
|
|
210
|
+
`High sequential scans detected. Consider adding indexes.`
|
|
211
|
+
);
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
return recommendations;
|
|
215
|
+
}
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
### 3. Table Partitioning
|
|
219
|
+
|
|
220
|
+
```sql
|
|
221
|
+
-- Range partitioning by date (PostgreSQL)
|
|
222
|
+
CREATE TABLE orders (
|
|
223
|
+
id UUID PRIMARY KEY,
|
|
224
|
+
user_id UUID NOT NULL,
|
|
225
|
+
total DECIMAL(10, 2),
|
|
226
|
+
status VARCHAR(50),
|
|
227
|
+
created_at TIMESTAMP NOT NULL
|
|
228
|
+
) PARTITION BY RANGE (created_at);
|
|
229
|
+
|
|
230
|
+
-- Create partitions
|
|
231
|
+
CREATE TABLE orders_2024_q1 PARTITION OF orders
|
|
232
|
+
FOR VALUES FROM ('2024-01-01') TO ('2024-04-01');
|
|
233
|
+
|
|
234
|
+
CREATE TABLE orders_2024_q2 PARTITION OF orders
|
|
235
|
+
FOR VALUES FROM ('2024-04-01') TO ('2024-07-01');
|
|
236
|
+
|
|
237
|
+
-- Automatic partition creation
|
|
238
|
+
CREATE OR REPLACE FUNCTION create_partition_if_not_exists()
|
|
239
|
+
RETURNS TRIGGER AS $$
|
|
240
|
+
DECLARE
|
|
241
|
+
partition_name TEXT;
|
|
242
|
+
start_date DATE;
|
|
243
|
+
end_date DATE;
|
|
244
|
+
BEGIN
|
|
245
|
+
start_date := DATE_TRUNC('month', NEW.created_at);
|
|
246
|
+
end_date := start_date + INTERVAL '1 month';
|
|
247
|
+
partition_name := 'orders_' || TO_CHAR(start_date, 'YYYY_MM');
|
|
248
|
+
|
|
249
|
+
IF NOT EXISTS (SELECT 1 FROM pg_class WHERE relname = partition_name) THEN
|
|
250
|
+
EXECUTE format(
|
|
251
|
+
'CREATE TABLE IF NOT EXISTS %I PARTITION OF orders
|
|
252
|
+
FOR VALUES FROM (%L) TO (%L)',
|
|
253
|
+
partition_name, start_date, end_date
|
|
254
|
+
);
|
|
255
|
+
END IF;
|
|
256
|
+
|
|
257
|
+
RETURN NEW;
|
|
258
|
+
END;
|
|
259
|
+
$$ LANGUAGE plpgsql;
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
```typescript
|
|
263
|
+
// Partition management
|
|
264
|
+
class PartitionManager {
|
|
265
|
+
// Create future partitions proactively
|
|
266
|
+
async createFuturePartitions(tableName: string, monthsAhead: number = 3): Promise<void> {
|
|
267
|
+
const now = new Date();
|
|
268
|
+
|
|
269
|
+
for (let i = 0; i <= monthsAhead; i++) {
|
|
270
|
+
const partitionDate = new Date(now.getFullYear(), now.getMonth() + i, 1);
|
|
271
|
+
const nextMonth = new Date(now.getFullYear(), now.getMonth() + i + 1, 1);
|
|
272
|
+
|
|
273
|
+
const partitionName = `${tableName}_${partitionDate.getFullYear()}_${String(partitionDate.getMonth() + 1).padStart(2, '0')}`;
|
|
274
|
+
|
|
275
|
+
await db.$executeRaw`
|
|
276
|
+
CREATE TABLE IF NOT EXISTS ${partitionName}
|
|
277
|
+
PARTITION OF ${tableName}
|
|
278
|
+
FOR VALUES FROM (${partitionDate}) TO (${nextMonth})
|
|
279
|
+
`;
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
// Archive old partitions
|
|
284
|
+
async archiveOldPartitions(tableName: string, retentionMonths: number): Promise<void> {
|
|
285
|
+
const cutoff = new Date();
|
|
286
|
+
cutoff.setMonth(cutoff.getMonth() - retentionMonths);
|
|
287
|
+
|
|
288
|
+
const partitions = await this.getPartitions(tableName);
|
|
289
|
+
|
|
290
|
+
for (const partition of partitions) {
|
|
291
|
+
if (partition.endDate < cutoff) {
|
|
292
|
+
// Export to archive storage
|
|
293
|
+
await this.exportPartition(partition.name);
|
|
294
|
+
|
|
295
|
+
// Drop partition
|
|
296
|
+
await db.$executeRaw`DROP TABLE ${partition.name}`;
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
```
|
|
302
|
+
|
|
303
|
+
### 4. Connection Pooling
|
|
304
|
+
|
|
305
|
+
```typescript
|
|
306
|
+
// PgBouncer configuration
|
|
307
|
+
// pgbouncer.ini
|
|
308
|
+
[databases]
|
|
309
|
+
myapp = host=localhost dbname=myapp
|
|
310
|
+
|
|
311
|
+
[pgbouncer]
|
|
312
|
+
listen_addr = 127.0.0.1
|
|
313
|
+
listen_port = 6432
|
|
314
|
+
auth_type = md5
|
|
315
|
+
auth_file = /etc/pgbouncer/userlist.txt
|
|
316
|
+
pool_mode = transaction
|
|
317
|
+
max_client_conn = 1000
|
|
318
|
+
default_pool_size = 20
|
|
319
|
+
min_pool_size = 5
|
|
320
|
+
reserve_pool_size = 5
|
|
321
|
+
reserve_pool_timeout = 3
|
|
322
|
+
server_lifetime = 3600
|
|
323
|
+
server_idle_timeout = 600
|
|
324
|
+
server_connect_timeout = 15
|
|
325
|
+
server_login_retry = 1
|
|
326
|
+
|
|
327
|
+
// Application-level pooling with Prisma
|
|
328
|
+
const prisma = new PrismaClient({
|
|
329
|
+
datasources: {
|
|
330
|
+
db: {
|
|
331
|
+
url: process.env.DATABASE_URL,
|
|
332
|
+
},
|
|
333
|
+
},
|
|
334
|
+
log: ['query', 'warn', 'error'],
|
|
335
|
+
});
|
|
336
|
+
|
|
337
|
+
// Configure pool size
|
|
338
|
+
// DATABASE_URL="postgresql://user:pass@host:5432/db?connection_limit=20&pool_timeout=10"
|
|
339
|
+
|
|
340
|
+
// Connection pool monitoring
|
|
341
|
+
async function getPoolStats(): Promise<PoolStats> {
|
|
342
|
+
const stats = await prisma.$queryRaw`
|
|
343
|
+
SELECT
|
|
344
|
+
numbackends as active_connections,
|
|
345
|
+
xact_commit as commits,
|
|
346
|
+
xact_rollback as rollbacks,
|
|
347
|
+
blks_read as blocks_read,
|
|
348
|
+
blks_hit as blocks_hit,
|
|
349
|
+
tup_returned as rows_returned,
|
|
350
|
+
tup_fetched as rows_fetched,
|
|
351
|
+
tup_inserted as rows_inserted,
|
|
352
|
+
tup_updated as rows_updated,
|
|
353
|
+
tup_deleted as rows_deleted
|
|
354
|
+
FROM pg_stat_database
|
|
355
|
+
WHERE datname = current_database()
|
|
356
|
+
`;
|
|
357
|
+
|
|
358
|
+
return {
|
|
359
|
+
...stats[0],
|
|
360
|
+
cacheHitRatio: stats[0].blks_hit / (stats[0].blks_hit + stats[0].blks_read),
|
|
361
|
+
};
|
|
362
|
+
}
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
### 5. Read Replicas
|
|
366
|
+
|
|
367
|
+
```typescript
|
|
368
|
+
// Read/Write splitting
|
|
369
|
+
class DatabaseRouter {
|
|
370
|
+
private writeClient: PrismaClient;
|
|
371
|
+
private readClients: PrismaClient[];
|
|
372
|
+
private currentReadIndex = 0;
|
|
373
|
+
|
|
374
|
+
constructor() {
|
|
375
|
+
this.writeClient = new PrismaClient({
|
|
376
|
+
datasources: { db: { url: process.env.DATABASE_WRITE_URL } },
|
|
377
|
+
});
|
|
378
|
+
|
|
379
|
+
this.readClients = (process.env.DATABASE_READ_URLS || '')
|
|
380
|
+
.split(',')
|
|
381
|
+
.map(url => new PrismaClient({ datasources: { db: { url } } }));
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// Get client for write operations
|
|
385
|
+
get write(): PrismaClient {
|
|
386
|
+
return this.writeClient;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
// Round-robin load balancing for reads
|
|
390
|
+
get read(): PrismaClient {
|
|
391
|
+
if (this.readClients.length === 0) {
|
|
392
|
+
return this.writeClient;
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
const client = this.readClients[this.currentReadIndex];
|
|
396
|
+
this.currentReadIndex = (this.currentReadIndex + 1) % this.readClients.length;
|
|
397
|
+
return client;
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
// Transaction always uses write
|
|
401
|
+
async transaction<T>(fn: (tx: PrismaClient) => Promise<T>): Promise<T> {
|
|
402
|
+
return this.writeClient.$transaction(fn);
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
const db = new DatabaseRouter();
|
|
407
|
+
|
|
408
|
+
// Usage
|
|
409
|
+
// Reads go to replicas
|
|
410
|
+
const users = await db.read.user.findMany();
|
|
411
|
+
|
|
412
|
+
// Writes go to primary
|
|
413
|
+
const newUser = await db.write.user.create({ data: userData });
|
|
414
|
+
|
|
415
|
+
// Transactions use primary
|
|
416
|
+
await db.transaction(async (tx) => {
|
|
417
|
+
await tx.order.create({ data: orderData });
|
|
418
|
+
await tx.inventory.update({ where: { id }, data: { stock: { decrement: 1 } } });
|
|
419
|
+
});
|
|
420
|
+
```
|
|
421
|
+
|
|
422
|
+
### 6. Query Performance Monitoring
|
|
423
|
+
|
|
424
|
+
```typescript
|
|
425
|
+
// Slow query logging and analysis
|
|
426
|
+
class QueryMonitor {
|
|
427
|
+
private slowQueryThreshold = 1000; // 1 second
|
|
428
|
+
|
|
429
|
+
// Prisma middleware for query logging
|
|
430
|
+
setupMiddleware(prisma: PrismaClient): void {
|
|
431
|
+
prisma.$use(async (params, next) => {
|
|
432
|
+
const start = Date.now();
|
|
433
|
+
const result = await next(params);
|
|
434
|
+
const duration = Date.now() - start;
|
|
435
|
+
|
|
436
|
+
if (duration > this.slowQueryThreshold) {
|
|
437
|
+
this.logSlowQuery({
|
|
438
|
+
model: params.model,
|
|
439
|
+
action: params.action,
|
|
440
|
+
duration,
|
|
441
|
+
args: params.args,
|
|
442
|
+
});
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
// Record metrics
|
|
446
|
+
queryHistogram.observe({
|
|
447
|
+
model: params.model || 'unknown',
|
|
448
|
+
action: params.action,
|
|
449
|
+
}, duration / 1000);
|
|
450
|
+
|
|
451
|
+
return result;
|
|
452
|
+
});
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
private logSlowQuery(query: SlowQuery): void {
|
|
456
|
+
logger.warn({
|
|
457
|
+
type: 'slow_query',
|
|
458
|
+
...query,
|
|
459
|
+
}, `Slow query detected: ${query.model}.${query.action}`);
|
|
460
|
+
|
|
461
|
+
// Send to monitoring
|
|
462
|
+
metrics.increment('database.slow_queries', {
|
|
463
|
+
model: query.model,
|
|
464
|
+
action: query.action,
|
|
465
|
+
});
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
// Get query statistics
|
|
469
|
+
async getQueryStats(): Promise<QueryStats[]> {
|
|
470
|
+
return db.$queryRaw`
|
|
471
|
+
SELECT
|
|
472
|
+
query,
|
|
473
|
+
calls,
|
|
474
|
+
total_time,
|
|
475
|
+
mean_time,
|
|
476
|
+
rows,
|
|
477
|
+
shared_blks_hit,
|
|
478
|
+
shared_blks_read,
|
|
479
|
+
shared_blks_hit::float / NULLIF(shared_blks_hit + shared_blks_read, 0) as cache_hit_ratio
|
|
480
|
+
FROM pg_stat_statements
|
|
481
|
+
ORDER BY total_time DESC
|
|
482
|
+
LIMIT 50
|
|
483
|
+
`;
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
// Reset statistics
|
|
487
|
+
async resetStats(): Promise<void> {
|
|
488
|
+
await db.$executeRaw`SELECT pg_stat_statements_reset()`;
|
|
489
|
+
}
|
|
490
|
+
}
|
|
491
|
+
```
|
|
492
|
+
|
|
493
|
+
## Use Cases
|
|
494
|
+
|
|
495
|
+
### 1. E-commerce Query Optimization
|
|
496
|
+
|
|
497
|
+
```sql
|
|
498
|
+
-- Optimized product search query
|
|
499
|
+
SELECT p.*, c.name as category_name
|
|
500
|
+
FROM products p
|
|
501
|
+
JOIN categories c ON c.id = p.category_id
|
|
502
|
+
WHERE
|
|
503
|
+
p.status = 'active'
|
|
504
|
+
AND p.price BETWEEN 10 AND 100
|
|
505
|
+
AND to_tsvector('english', p.name || ' ' || p.description) @@ plainto_tsquery('english', 'wireless headphones')
|
|
506
|
+
ORDER BY p.popularity DESC, p.created_at DESC
|
|
507
|
+
LIMIT 20;
|
|
508
|
+
|
|
509
|
+
-- Index for this query
|
|
510
|
+
CREATE INDEX idx_products_search ON products
|
|
511
|
+
USING GIN(to_tsvector('english', name || ' ' || description))
|
|
512
|
+
WHERE status = 'active';
|
|
513
|
+
|
|
514
|
+
CREATE INDEX idx_products_price_popularity ON products(price, popularity DESC)
|
|
515
|
+
WHERE status = 'active';
|
|
516
|
+
```
|
|
517
|
+
|
|
518
|
+
### 2. Analytics Dashboard
|
|
519
|
+
|
|
520
|
+
```sql
|
|
521
|
+
-- Materialized view for dashboard
|
|
522
|
+
CREATE MATERIALIZED VIEW daily_sales_summary AS
|
|
523
|
+
SELECT
|
|
524
|
+
DATE_TRUNC('day', created_at) as date,
|
|
525
|
+
COUNT(*) as order_count,
|
|
526
|
+
SUM(total) as revenue,
|
|
527
|
+
AVG(total) as avg_order_value,
|
|
528
|
+
COUNT(DISTINCT user_id) as unique_customers
|
|
529
|
+
FROM orders
|
|
530
|
+
WHERE status = 'completed'
|
|
531
|
+
GROUP BY DATE_TRUNC('day', created_at);
|
|
532
|
+
|
|
533
|
+
-- Refresh periodically
|
|
534
|
+
CREATE UNIQUE INDEX idx_daily_sales_date ON daily_sales_summary(date);
|
|
535
|
+
|
|
536
|
+
-- Refresh concurrently (no locks)
|
|
537
|
+
REFRESH MATERIALIZED VIEW CONCURRENTLY daily_sales_summary;
|
|
538
|
+
```
|
|
539
|
+
|
|
540
|
+
## Best Practices
|
|
541
|
+
|
|
542
|
+
### Do's
|
|
543
|
+
|
|
544
|
+
- **Analyze query plans** - Use EXPLAIN ANALYZE
|
|
545
|
+
- **Use appropriate indexes** - Based on actual query patterns
|
|
546
|
+
- **Implement connection pooling** - PgBouncer or app-level
|
|
547
|
+
- **Monitor slow queries** - Set up alerts
|
|
548
|
+
- **Plan for growth** - Partitioning, sharding
|
|
549
|
+
- **Test with production-like data** - Not empty tables
|
|
550
|
+
|
|
551
|
+
### Don'ts
|
|
552
|
+
|
|
553
|
+
- Don't add indexes blindly
|
|
554
|
+
- Don't use SELECT *
|
|
555
|
+
- Don't ignore query plans
|
|
556
|
+
- Don't skip connection limits
|
|
557
|
+
- Don't forget about index maintenance
|
|
558
|
+
- Don't over-normalize
|
|
559
|
+
|
|
560
|
+
## Related Skills
|
|
561
|
+
|
|
562
|
+
- **postgresql** - PostgreSQL specific features
|
|
563
|
+
- **prisma** - ORM optimization
|
|
564
|
+
- **caching-strategies** - Query result caching
|
|
565
|
+
|
|
566
|
+
## Reference Resources
|
|
567
|
+
|
|
568
|
+
- [PostgreSQL Performance](https://www.postgresql.org/docs/current/performance-tips.html)
|
|
569
|
+
- [Use The Index, Luke](https://use-the-index-luke.com/)
|
|
570
|
+
- [PgBouncer Documentation](https://www.pgbouncer.org/config.html)
|
|
571
|
+
- [Prisma Performance](https://www.prisma.io/docs/guides/performance-and-optimization)
|