mastercontroller 1.3.10 → 1.3.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +4 -1
- package/.eslintrc.json +50 -0
- package/.github/workflows/ci.yml +317 -0
- package/.prettierrc +10 -0
- package/DEPLOYMENT.md +956 -0
- package/MasterControl.js +98 -16
- package/MasterRequest.js +42 -1
- package/MasterRouter.js +15 -5
- package/README.md +485 -28
- package/SENIOR_ENGINEER_AUDIT.md +2477 -0
- package/VERIFICATION_CHECKLIST.md +726 -0
- package/error/README.md +2452 -0
- package/monitoring/HealthCheck.js +347 -0
- package/monitoring/PrometheusExporter.js +416 -0
- package/package.json +64 -11
- package/security/MasterValidator.js +140 -10
- package/security/adapters/RedisCSRFStore.js +428 -0
- package/security/adapters/RedisRateLimiter.js +462 -0
- package/security/adapters/RedisSessionStore.js +476 -0
- package/FIXES_APPLIED.md +0 -378
- package/error/ErrorBoundary.js +0 -353
- package/error/HydrationMismatch.js +0 -265
- package/error/MasterError.js +0 -240
- package/error/MasterError.js.tmp +0 -0
- package/error/MasterErrorRenderer.js +0 -536
- package/error/MasterErrorRenderer.js.tmp +0 -0
- package/error/SSRErrorHandler.js +0 -273
package/DEPLOYMENT.md
ADDED
|
@@ -0,0 +1,956 @@
|
|
|
1
|
+
# MasterController Production Deployment Guide
|
|
2
|
+
|
|
3
|
+
## Table of Contents
|
|
4
|
+
|
|
5
|
+
1. [Overview](#overview)
|
|
6
|
+
2. [Prerequisites](#prerequisites)
|
|
7
|
+
3. [Docker Deployment](#docker-deployment)
|
|
8
|
+
4. [Kubernetes Deployment](#kubernetes-deployment)
|
|
9
|
+
5. [Load Balancer Configuration](#load-balancer-configuration)
|
|
10
|
+
6. [Redis Cluster Setup](#redis-cluster-setup)
|
|
11
|
+
7. [Environment Variables](#environment-variables)
|
|
12
|
+
8. [Health Checks & Monitoring](#health-checks--monitoring)
|
|
13
|
+
9. [Security Best Practices](#security-best-practices)
|
|
14
|
+
10. [Performance Tuning](#performance-tuning)
|
|
15
|
+
11. [Troubleshooting](#troubleshooting)
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Overview
|
|
20
|
+
|
|
21
|
+
This guide covers production deployment of MasterController applications for Fortune 500 enterprises, including:
|
|
22
|
+
|
|
23
|
+
- Horizontal scaling with load balancers
|
|
24
|
+
- Redis-backed distributed session/rate limiting
|
|
25
|
+
- Health checks for orchestration
|
|
26
|
+
- Monitoring with Prometheus
|
|
27
|
+
- Security hardening
|
|
28
|
+
- High availability setup
|
|
29
|
+
|
|
30
|
+
**Recommended Architecture:**
|
|
31
|
+
|
|
32
|
+
```
|
|
33
|
+
Internet → Load Balancer (Nginx/HAProxy)
|
|
34
|
+
↓
|
|
35
|
+
[ MasterController Instance 1 ]
|
|
36
|
+
[ MasterController Instance 2 ] ←→ Redis Cluster
|
|
37
|
+
[ MasterController Instance 3 ]
|
|
38
|
+
↓
|
|
39
|
+
Database (PostgreSQL/MySQL/MongoDB)
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
---
|
|
43
|
+
|
|
44
|
+
## Prerequisites
|
|
45
|
+
|
|
46
|
+
- **Node.js**: v18.x or higher (LTS recommended)
|
|
47
|
+
- **Redis**: v6.x or higher (v7.x for Redis Cluster)
|
|
48
|
+
- **Load Balancer**: Nginx 1.24+, HAProxy 2.8+, or AWS ALB
|
|
49
|
+
- **Monitoring**: Prometheus + Grafana (optional but recommended)
|
|
50
|
+
- **SSL Certificates**: Let's Encrypt or commercial CA
|
|
51
|
+
|
|
52
|
+
---
|
|
53
|
+
|
|
54
|
+
## Docker Deployment
|
|
55
|
+
|
|
56
|
+
### 1. Create Dockerfile
|
|
57
|
+
|
|
58
|
+
```dockerfile
|
|
59
|
+
# Dockerfile
|
|
60
|
+
FROM node:20-alpine
|
|
61
|
+
|
|
62
|
+
# Install security updates
|
|
63
|
+
RUN apk upgrade --no-cache
|
|
64
|
+
|
|
65
|
+
# Create app directory
|
|
66
|
+
WORKDIR /usr/src/app
|
|
67
|
+
|
|
68
|
+
# Copy package files
|
|
69
|
+
COPY package*.json ./
|
|
70
|
+
|
|
71
|
+
# Install production dependencies only
|
|
72
|
+
RUN npm ci --only=production --ignore-scripts
|
|
73
|
+
|
|
74
|
+
# Copy application code
|
|
75
|
+
COPY . .
|
|
76
|
+
|
|
77
|
+
# Create non-root user
|
|
78
|
+
RUN addgroup -g 1001 -S nodejs && \
|
|
79
|
+
adduser -S nodejs -u 1001
|
|
80
|
+
|
|
81
|
+
# Set ownership
|
|
82
|
+
RUN chown -R nodejs:nodejs /usr/src/app
|
|
83
|
+
|
|
84
|
+
# Switch to non-root user
|
|
85
|
+
USER nodejs
|
|
86
|
+
|
|
87
|
+
# Expose port
|
|
88
|
+
EXPOSE 3000
|
|
89
|
+
|
|
90
|
+
# Health check
|
|
91
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=40s --retries=3 \
|
|
92
|
+
CMD node -e "require('http').get('http://localhost:3000/_health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})"
|
|
93
|
+
|
|
94
|
+
# Start application
|
|
95
|
+
CMD ["node", "server.js"]
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
### 2. Create docker-compose.yml
|
|
99
|
+
|
|
100
|
+
```yaml
|
|
101
|
+
version: '3.8'
|
|
102
|
+
|
|
103
|
+
services:
|
|
104
|
+
app:
|
|
105
|
+
build: .
|
|
106
|
+
restart: always
|
|
107
|
+
ports:
|
|
108
|
+
- "3000:3000"
|
|
109
|
+
environment:
|
|
110
|
+
NODE_ENV: production
|
|
111
|
+
REDIS_HOST: redis
|
|
112
|
+
REDIS_PORT: 6379
|
|
113
|
+
SESSION_SECRET: ${SESSION_SECRET}
|
|
114
|
+
depends_on:
|
|
115
|
+
- redis
|
|
116
|
+
healthcheck:
|
|
117
|
+
test: ["CMD", "node", "-e", "require('http').get('http://localhost:3000/_health')"]
|
|
118
|
+
interval: 30s
|
|
119
|
+
timeout: 10s
|
|
120
|
+
retries: 3
|
|
121
|
+
start_period: 40s
|
|
122
|
+
deploy:
|
|
123
|
+
replicas: 3
|
|
124
|
+
resources:
|
|
125
|
+
limits:
|
|
126
|
+
cpus: '1'
|
|
127
|
+
memory: 1G
|
|
128
|
+
reservations:
|
|
129
|
+
cpus: '0.5'
|
|
130
|
+
memory: 512M
|
|
131
|
+
|
|
132
|
+
redis:
|
|
133
|
+
image: redis:7-alpine
|
|
134
|
+
restart: always
|
|
135
|
+
command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD}
|
|
136
|
+
volumes:
|
|
137
|
+
- redis-data:/data
|
|
138
|
+
healthcheck:
|
|
139
|
+
test: ["CMD", "redis-cli", "ping"]
|
|
140
|
+
interval: 10s
|
|
141
|
+
timeout: 3s
|
|
142
|
+
retries: 3
|
|
143
|
+
|
|
144
|
+
nginx:
|
|
145
|
+
image: nginx:alpine
|
|
146
|
+
restart: always
|
|
147
|
+
ports:
|
|
148
|
+
- "80:80"
|
|
149
|
+
- "443:443"
|
|
150
|
+
volumes:
|
|
151
|
+
- ./nginx.conf:/etc/nginx/nginx.conf:ro
|
|
152
|
+
- ./ssl:/etc/nginx/ssl:ro
|
|
153
|
+
depends_on:
|
|
154
|
+
- app
|
|
155
|
+
|
|
156
|
+
volumes:
|
|
157
|
+
redis-data:
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
### 3. Build and Run
|
|
161
|
+
|
|
162
|
+
```bash
|
|
163
|
+
# Build image
|
|
164
|
+
docker build -t mastercontroller-app:latest .
|
|
165
|
+
|
|
166
|
+
# Run with docker-compose
|
|
167
|
+
docker-compose up -d
|
|
168
|
+
|
|
169
|
+
# Scale instances
|
|
170
|
+
docker-compose up -d --scale app=5
|
|
171
|
+
|
|
172
|
+
# View logs
|
|
173
|
+
docker-compose logs -f app
|
|
174
|
+
|
|
175
|
+
# Stop
|
|
176
|
+
docker-compose down
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
---
|
|
180
|
+
|
|
181
|
+
## Kubernetes Deployment
|
|
182
|
+
|
|
183
|
+
### 1. Create Deployment Manifest
|
|
184
|
+
|
|
185
|
+
```yaml
|
|
186
|
+
# k8s/deployment.yaml
|
|
187
|
+
apiVersion: apps/v1
|
|
188
|
+
kind: Deployment
|
|
189
|
+
metadata:
|
|
190
|
+
name: mastercontroller
|
|
191
|
+
labels:
|
|
192
|
+
app: mastercontroller
|
|
193
|
+
spec:
|
|
194
|
+
replicas: 3
|
|
195
|
+
strategy:
|
|
196
|
+
type: RollingUpdate
|
|
197
|
+
rollingUpdate:
|
|
198
|
+
maxSurge: 1
|
|
199
|
+
maxUnavailable: 0
|
|
200
|
+
selector:
|
|
201
|
+
matchLabels:
|
|
202
|
+
app: mastercontroller
|
|
203
|
+
template:
|
|
204
|
+
metadata:
|
|
205
|
+
labels:
|
|
206
|
+
app: mastercontroller
|
|
207
|
+
spec:
|
|
208
|
+
containers:
|
|
209
|
+
- name: app
|
|
210
|
+
image: mastercontroller-app:latest
|
|
211
|
+
ports:
|
|
212
|
+
- containerPort: 3000
|
|
213
|
+
name: http
|
|
214
|
+
env:
|
|
215
|
+
- name: NODE_ENV
|
|
216
|
+
value: "production"
|
|
217
|
+
- name: REDIS_HOST
|
|
218
|
+
value: "redis-service"
|
|
219
|
+
- name: REDIS_PORT
|
|
220
|
+
value: "6379"
|
|
221
|
+
- name: SESSION_SECRET
|
|
222
|
+
valueFrom:
|
|
223
|
+
secretKeyRef:
|
|
224
|
+
name: app-secrets
|
|
225
|
+
key: session-secret
|
|
226
|
+
resources:
|
|
227
|
+
requests:
|
|
228
|
+
cpu: 500m
|
|
229
|
+
memory: 512Mi
|
|
230
|
+
limits:
|
|
231
|
+
cpu: 1000m
|
|
232
|
+
memory: 1Gi
|
|
233
|
+
livenessProbe:
|
|
234
|
+
httpGet:
|
|
235
|
+
path: /_health
|
|
236
|
+
port: 3000
|
|
237
|
+
initialDelaySeconds: 30
|
|
238
|
+
periodSeconds: 10
|
|
239
|
+
timeoutSeconds: 5
|
|
240
|
+
failureThreshold: 3
|
|
241
|
+
readinessProbe:
|
|
242
|
+
httpGet:
|
|
243
|
+
path: /_health
|
|
244
|
+
port: 3000
|
|
245
|
+
initialDelaySeconds: 10
|
|
246
|
+
periodSeconds: 5
|
|
247
|
+
timeoutSeconds: 3
|
|
248
|
+
failureThreshold: 2
|
|
249
|
+
---
|
|
250
|
+
apiVersion: v1
|
|
251
|
+
kind: Service
|
|
252
|
+
metadata:
|
|
253
|
+
name: mastercontroller-service
|
|
254
|
+
spec:
|
|
255
|
+
type: ClusterIP
|
|
256
|
+
selector:
|
|
257
|
+
app: mastercontroller
|
|
258
|
+
ports:
|
|
259
|
+
- protocol: TCP
|
|
260
|
+
port: 80
|
|
261
|
+
targetPort: 3000
|
|
262
|
+
---
|
|
263
|
+
apiVersion: autoscaling/v2
|
|
264
|
+
kind: HorizontalPodAutoscaler
|
|
265
|
+
metadata:
|
|
266
|
+
name: mastercontroller-hpa
|
|
267
|
+
spec:
|
|
268
|
+
scaleTargetRef:
|
|
269
|
+
apiVersion: apps/v1
|
|
270
|
+
kind: Deployment
|
|
271
|
+
name: mastercontroller
|
|
272
|
+
minReplicas: 3
|
|
273
|
+
maxReplicas: 10
|
|
274
|
+
metrics:
|
|
275
|
+
- type: Resource
|
|
276
|
+
resource:
|
|
277
|
+
name: cpu
|
|
278
|
+
target:
|
|
279
|
+
type: Utilization
|
|
280
|
+
averageUtilization: 70
|
|
281
|
+
- type: Resource
|
|
282
|
+
resource:
|
|
283
|
+
name: memory
|
|
284
|
+
target:
|
|
285
|
+
type: Utilization
|
|
286
|
+
averageUtilization: 80
|
|
287
|
+
```
|
|
288
|
+
|
|
289
|
+
### 2. Create Ingress
|
|
290
|
+
|
|
291
|
+
```yaml
|
|
292
|
+
# k8s/ingress.yaml
|
|
293
|
+
apiVersion: networking.k8s.io/v1
|
|
294
|
+
kind: Ingress
|
|
295
|
+
metadata:
|
|
296
|
+
name: mastercontroller-ingress
|
|
297
|
+
annotations:
|
|
298
|
+
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
|
299
|
+
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
|
300
|
+
spec:
|
|
301
|
+
tls:
|
|
302
|
+
- hosts:
|
|
303
|
+
- api.example.com
|
|
304
|
+
secretName: mastercontroller-tls
|
|
305
|
+
rules:
|
|
306
|
+
- host: api.example.com
|
|
307
|
+
http:
|
|
308
|
+
paths:
|
|
309
|
+
- path: /
|
|
310
|
+
pathType: Prefix
|
|
311
|
+
backend:
|
|
312
|
+
service:
|
|
313
|
+
name: mastercontroller-service
|
|
314
|
+
port:
|
|
315
|
+
number: 80
|
|
316
|
+
```
|
|
317
|
+
|
|
318
|
+
### 3. Deploy to Kubernetes
|
|
319
|
+
|
|
320
|
+
```bash
|
|
321
|
+
# Apply configurations
|
|
322
|
+
kubectl apply -f k8s/deployment.yaml
|
|
323
|
+
kubectl apply -f k8s/ingress.yaml
|
|
324
|
+
|
|
325
|
+
# Check status
|
|
326
|
+
kubectl get pods
|
|
327
|
+
kubectl get svc
|
|
328
|
+
kubectl get ingress
|
|
329
|
+
|
|
330
|
+
# Scale deployment
|
|
331
|
+
kubectl scale deployment mastercontroller --replicas=5
|
|
332
|
+
|
|
333
|
+
# View logs
|
|
334
|
+
kubectl logs -f deployment/mastercontroller
|
|
335
|
+
|
|
336
|
+
# Rollout update
|
|
337
|
+
kubectl set image deployment/mastercontroller app=mastercontroller-app:v2
|
|
338
|
+
kubectl rollout status deployment/mastercontroller
|
|
339
|
+
|
|
340
|
+
# Rollback if needed
|
|
341
|
+
kubectl rollout undo deployment/mastercontroller
|
|
342
|
+
```
|
|
343
|
+
|
|
344
|
+
---
|
|
345
|
+
|
|
346
|
+
## Load Balancer Configuration
|
|
347
|
+
|
|
348
|
+
### Nginx Configuration
|
|
349
|
+
|
|
350
|
+
```nginx
|
|
351
|
+
# /etc/nginx/nginx.conf
|
|
352
|
+
|
|
353
|
+
user nginx;
|
|
354
|
+
worker_processes auto;
|
|
355
|
+
error_log /var/log/nginx/error.log warn;
|
|
356
|
+
pid /var/run/nginx.pid;
|
|
357
|
+
|
|
358
|
+
events {
|
|
359
|
+
worker_connections 4096;
|
|
360
|
+
use epoll;
|
|
361
|
+
multi_accept on;
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
http {
|
|
365
|
+
include /etc/nginx/mime.types;
|
|
366
|
+
default_type application/octet-stream;
|
|
367
|
+
|
|
368
|
+
# Logging
|
|
369
|
+
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
|
370
|
+
'$status $body_bytes_sent "$http_referer" '
|
|
371
|
+
'"$http_user_agent" "$http_x_forwarded_for" '
|
|
372
|
+
'rt=$request_time uct="$upstream_connect_time" '
|
|
373
|
+
'uht="$upstream_header_time" urt="$upstream_response_time"';
|
|
374
|
+
|
|
375
|
+
access_log /var/log/nginx/access.log main;
|
|
376
|
+
|
|
377
|
+
# Performance
|
|
378
|
+
sendfile on;
|
|
379
|
+
tcp_nopush on;
|
|
380
|
+
tcp_nodelay on;
|
|
381
|
+
keepalive_timeout 65;
|
|
382
|
+
types_hash_max_size 2048;
|
|
383
|
+
client_max_body_size 50M;
|
|
384
|
+
|
|
385
|
+
# Gzip compression
|
|
386
|
+
gzip on;
|
|
387
|
+
gzip_vary on;
|
|
388
|
+
gzip_min_length 1024;
|
|
389
|
+
gzip_types text/plain text/css text/xml text/javascript
|
|
390
|
+
application/json application/javascript application/xml+rss;
|
|
391
|
+
|
|
392
|
+
# Upstream servers
|
|
393
|
+
upstream mastercontroller_backend {
|
|
394
|
+
least_conn; # Load balancing algorithm
|
|
395
|
+
|
|
396
|
+
server 10.0.1.10:3000 max_fails=3 fail_timeout=30s;
|
|
397
|
+
server 10.0.1.11:3000 max_fails=3 fail_timeout=30s;
|
|
398
|
+
server 10.0.1.12:3000 max_fails=3 fail_timeout=30s;
|
|
399
|
+
|
|
400
|
+
keepalive 32; # Connection pooling
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
# Rate limiting
|
|
404
|
+
limit_req_zone $binary_remote_addr zone=api_limit:10m rate=100r/s;
|
|
405
|
+
limit_req_zone $binary_remote_addr zone=login_limit:10m rate=5r/m;
|
|
406
|
+
limit_conn_zone $binary_remote_addr zone=conn_limit:10m;
|
|
407
|
+
|
|
408
|
+
# SSL session cache
|
|
409
|
+
ssl_session_cache shared:SSL:10m;
|
|
410
|
+
ssl_session_timeout 10m;
|
|
411
|
+
|
|
412
|
+
server {
|
|
413
|
+
listen 80;
|
|
414
|
+
server_name api.example.com;
|
|
415
|
+
|
|
416
|
+
# Redirect HTTP to HTTPS
|
|
417
|
+
return 301 https://$server_name$request_uri;
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
server {
|
|
421
|
+
listen 443 ssl http2;
|
|
422
|
+
server_name api.example.com;
|
|
423
|
+
|
|
424
|
+
# SSL Configuration
|
|
425
|
+
ssl_certificate /etc/nginx/ssl/fullchain.pem;
|
|
426
|
+
ssl_certificate_key /etc/nginx/ssl/privkey.pem;
|
|
427
|
+
ssl_protocols TLSv1.2 TLSv1.3;
|
|
428
|
+
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256';
|
|
429
|
+
ssl_prefer_server_ciphers off;
|
|
430
|
+
|
|
431
|
+
# Security headers
|
|
432
|
+
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
|
433
|
+
add_header X-Frame-Options "SAMEORIGIN" always;
|
|
434
|
+
add_header X-Content-Type-Options "nosniff" always;
|
|
435
|
+
add_header X-XSS-Protection "1; mode=block" always;
|
|
436
|
+
|
|
437
|
+
# Rate limiting
|
|
438
|
+
limit_req zone=api_limit burst=200 nodelay;
|
|
439
|
+
limit_conn conn_limit 10;
|
|
440
|
+
|
|
441
|
+
# Health check endpoint (bypass rate limiting)
|
|
442
|
+
location /_health {
|
|
443
|
+
access_log off;
|
|
444
|
+
proxy_pass http://mastercontroller_backend;
|
|
445
|
+
proxy_http_version 1.1;
|
|
446
|
+
proxy_set_header Connection "";
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
# Metrics endpoint (restrict access)
|
|
450
|
+
location /_metrics {
|
|
451
|
+
allow 10.0.0.0/8; # Internal network only
|
|
452
|
+
deny all;
|
|
453
|
+
proxy_pass http://mastercontroller_backend;
|
|
454
|
+
}
|
|
455
|
+
|
|
456
|
+
# API endpoints
|
|
457
|
+
location / {
|
|
458
|
+
proxy_pass http://mastercontroller_backend;
|
|
459
|
+
proxy_http_version 1.1;
|
|
460
|
+
|
|
461
|
+
# Headers
|
|
462
|
+
proxy_set_header Host $host;
|
|
463
|
+
proxy_set_header X-Real-IP $remote_addr;
|
|
464
|
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
465
|
+
proxy_set_header X-Forwarded-Proto $scheme;
|
|
466
|
+
proxy_set_header Connection "";
|
|
467
|
+
|
|
468
|
+
# Timeouts
|
|
469
|
+
proxy_connect_timeout 60s;
|
|
470
|
+
proxy_send_timeout 60s;
|
|
471
|
+
proxy_read_timeout 60s;
|
|
472
|
+
|
|
473
|
+
# Buffering
|
|
474
|
+
proxy_buffering on;
|
|
475
|
+
proxy_buffer_size 4k;
|
|
476
|
+
proxy_buffers 8 4k;
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
# Static files (if served by app)
|
|
480
|
+
location /static/ {
|
|
481
|
+
proxy_pass http://mastercontroller_backend;
|
|
482
|
+
proxy_cache_valid 200 1y;
|
|
483
|
+
add_header Cache-Control "public, immutable";
|
|
484
|
+
}
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
```
|
|
488
|
+
|
|
489
|
+
### HAProxy Configuration
|
|
490
|
+
|
|
491
|
+
```haproxy
|
|
492
|
+
# /etc/haproxy/haproxy.cfg
|
|
493
|
+
|
|
494
|
+
global
|
|
495
|
+
maxconn 4096
|
|
496
|
+
log /dev/log local0
|
|
497
|
+
log /dev/log local1 notice
|
|
498
|
+
chroot /var/lib/haproxy
|
|
499
|
+
stats socket /run/haproxy/admin.sock mode 660 level admin
|
|
500
|
+
stats timeout 30s
|
|
501
|
+
user haproxy
|
|
502
|
+
group haproxy
|
|
503
|
+
daemon
|
|
504
|
+
|
|
505
|
+
defaults
|
|
506
|
+
log global
|
|
507
|
+
mode http
|
|
508
|
+
option httplog
|
|
509
|
+
option dontlognull
|
|
510
|
+
option http-server-close
|
|
511
|
+
option forwardfor except 127.0.0.0/8
|
|
512
|
+
option redispatch
|
|
513
|
+
retries 3
|
|
514
|
+
timeout connect 5000
|
|
515
|
+
timeout client 50000
|
|
516
|
+
timeout server 50000
|
|
517
|
+
errorfile 400 /etc/haproxy/errors/400.http
|
|
518
|
+
errorfile 403 /etc/haproxy/errors/403.http
|
|
519
|
+
errorfile 408 /etc/haproxy/errors/408.http
|
|
520
|
+
errorfile 500 /etc/haproxy/errors/500.http
|
|
521
|
+
errorfile 502 /etc/haproxy/errors/502.http
|
|
522
|
+
errorfile 503 /etc/haproxy/errors/503.http
|
|
523
|
+
errorfile 504 /etc/haproxy/errors/504.http
|
|
524
|
+
|
|
525
|
+
frontend http_front
|
|
526
|
+
bind *:80
|
|
527
|
+
bind *:443 ssl crt /etc/haproxy/certs/api.example.com.pem
|
|
528
|
+
|
|
529
|
+
# Redirect HTTP to HTTPS
|
|
530
|
+
redirect scheme https if !{ ssl_fc }
|
|
531
|
+
|
|
532
|
+
# Security headers
|
|
533
|
+
http-response set-header Strict-Transport-Security "max-age=31536000; includeSubDomains"
|
|
534
|
+
http-response set-header X-Frame-Options "SAMEORIGIN"
|
|
535
|
+
http-response set-header X-Content-Type-Options "nosniff"
|
|
536
|
+
|
|
537
|
+
# Rate limiting (example)
|
|
538
|
+
stick-table type ip size 100k expire 30s store http_req_rate(10s)
|
|
539
|
+
http-request track-sc0 src
|
|
540
|
+
http-request deny deny_status 429 if { sc_http_req_rate(0) gt 100 }
|
|
541
|
+
|
|
542
|
+
default_backend mastercontroller_backend
|
|
543
|
+
|
|
544
|
+
backend mastercontroller_backend
|
|
545
|
+
balance leastconn
|
|
546
|
+
option httpchk GET /_health
|
|
547
|
+
http-check expect status 200
|
|
548
|
+
|
|
549
|
+
server app1 10.0.1.10:3000 check inter 5s fall 3 rise 2
|
|
550
|
+
server app2 10.0.1.11:3000 check inter 5s fall 3 rise 2
|
|
551
|
+
server app3 10.0.1.12:3000 check inter 5s fall 3 rise 2
|
|
552
|
+
|
|
553
|
+
listen stats
|
|
554
|
+
bind *:8404
|
|
555
|
+
stats enable
|
|
556
|
+
stats uri /stats
|
|
557
|
+
stats refresh 10s
|
|
558
|
+
stats admin if LOCALHOST
|
|
559
|
+
```
|
|
560
|
+
|
|
561
|
+
---
|
|
562
|
+
|
|
563
|
+
## Redis Cluster Setup
|
|
564
|
+
|
|
565
|
+
### Single Redis Instance (Development/Small Production)
|
|
566
|
+
|
|
567
|
+
```bash
|
|
568
|
+
# docker-compose.yml
|
|
569
|
+
redis:
|
|
570
|
+
image: redis:7-alpine
|
|
571
|
+
command: redis-server --requirepass ${REDIS_PASSWORD} --appendonly yes
|
|
572
|
+
volumes:
|
|
573
|
+
- redis-data:/data
|
|
574
|
+
ports:
|
|
575
|
+
- "6379:6379"
|
|
576
|
+
```
|
|
577
|
+
|
|
578
|
+
### Redis Cluster (High Availability)
|
|
579
|
+
|
|
580
|
+
```bash
|
|
581
|
+
# Create Redis cluster with 6 nodes (3 masters, 3 replicas)
|
|
582
|
+
docker run -d --name redis-node-1 -p 7001:7001 redis:7-alpine redis-server --port 7001 --cluster-enabled yes
|
|
583
|
+
docker run -d --name redis-node-2 -p 7002:7002 redis:7-alpine redis-server --port 7002 --cluster-enabled yes
|
|
584
|
+
docker run -d --name redis-node-3 -p 7003:7003 redis:7-alpine redis-server --port 7003 --cluster-enabled yes
|
|
585
|
+
docker run -d --name redis-node-4 -p 7004:7004 redis:7-alpine redis-server --port 7004 --cluster-enabled yes
|
|
586
|
+
docker run -d --name redis-node-5 -p 7005:7005 redis:7-alpine redis-server --port 7005 --cluster-enabled yes
|
|
587
|
+
docker run -d --name redis-node-6 -p 7006:7006 redis:7-alpine redis-server --port 7006 --cluster-enabled yes
|
|
588
|
+
|
|
589
|
+
# Create cluster
|
|
590
|
+
docker exec -it redis-node-1 redis-cli --cluster create \
|
|
591
|
+
127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 \
|
|
592
|
+
127.0.0.1:7004 127.0.0.1:7005 127.0.0.1:7006 \
|
|
593
|
+
--cluster-replicas 1
|
|
594
|
+
```
|
|
595
|
+
|
|
596
|
+
### Application Configuration
|
|
597
|
+
|
|
598
|
+
```javascript
|
|
599
|
+
// server.js
|
|
600
|
+
const Redis = require('ioredis');
|
|
601
|
+
const { RedisSessionStore } = require('./security/adapters/RedisSessionStore');
|
|
602
|
+
const { RedisRateLimiter } = require('./security/adapters/RedisRateLimiter');
|
|
603
|
+
|
|
604
|
+
// Single instance
|
|
605
|
+
const redis = new Redis({
|
|
606
|
+
host: process.env.REDIS_HOST || 'localhost',
|
|
607
|
+
port: process.env.REDIS_PORT || 6379,
|
|
608
|
+
password: process.env.REDIS_PASSWORD,
|
|
609
|
+
retryStrategy: (times) => Math.min(times * 50, 2000)
|
|
610
|
+
});
|
|
611
|
+
|
|
612
|
+
// OR Redis Cluster
|
|
613
|
+
const redis = new Redis.Cluster([
|
|
614
|
+
{ host: 'redis-node-1', port: 7001 },
|
|
615
|
+
{ host: 'redis-node-2', port: 7002 },
|
|
616
|
+
{ host: 'redis-node-3', port: 7003 }
|
|
617
|
+
], {
|
|
618
|
+
redisOptions: {
|
|
619
|
+
password: process.env.REDIS_PASSWORD
|
|
620
|
+
}
|
|
621
|
+
});
|
|
622
|
+
|
|
623
|
+
// Use Redis adapters
|
|
624
|
+
const sessionStore = new RedisSessionStore(redis);
|
|
625
|
+
const rateLimiter = new RedisRateLimiter(redis);
|
|
626
|
+
|
|
627
|
+
master.session.setStore(sessionStore);
|
|
628
|
+
master.pipeline.use(rateLimiter.middleware());
|
|
629
|
+
```
|
|
630
|
+
|
|
631
|
+
---
|
|
632
|
+
|
|
633
|
+
## Environment Variables
|
|
634
|
+
|
|
635
|
+
### Required Variables
|
|
636
|
+
|
|
637
|
+
```bash
|
|
638
|
+
# .env.production
|
|
639
|
+
NODE_ENV=production
|
|
640
|
+
|
|
641
|
+
# Server
|
|
642
|
+
PORT=3000
|
|
643
|
+
HOST=0.0.0.0
|
|
644
|
+
|
|
645
|
+
# Redis
|
|
646
|
+
REDIS_HOST=redis.example.com
|
|
647
|
+
REDIS_PORT=6379
|
|
648
|
+
REDIS_PASSWORD=your-secure-password
|
|
649
|
+
|
|
650
|
+
# Session
|
|
651
|
+
SESSION_SECRET=your-very-long-random-secret-key-min-32-chars
|
|
652
|
+
SESSION_NAME=mastercontroller.sid
|
|
653
|
+
SESSION_TTL=86400
|
|
654
|
+
|
|
655
|
+
# Security
|
|
656
|
+
CSRF_SECRET=another-long-random-secret
|
|
657
|
+
ALLOWED_ORIGINS=https://app.example.com,https://admin.example.com
|
|
658
|
+
|
|
659
|
+
# Rate Limiting
|
|
660
|
+
RATE_LIMIT_POINTS=100
|
|
661
|
+
RATE_LIMIT_DURATION=60
|
|
662
|
+
RATE_LIMIT_BLOCK_DURATION=300
|
|
663
|
+
|
|
664
|
+
# Logging
|
|
665
|
+
LOG_LEVEL=info
|
|
666
|
+
LOG_FILE=/var/log/mastercontroller/app.log
|
|
667
|
+
|
|
668
|
+
# Monitoring
|
|
669
|
+
PROMETHEUS_ENABLED=true
|
|
670
|
+
HEALTH_CHECK_ENABLED=true
|
|
671
|
+
|
|
672
|
+
# Database (if applicable)
|
|
673
|
+
DATABASE_URL=postgresql://user:pass@db.example.com:5432/myapp
|
|
674
|
+
```
|
|
675
|
+
|
|
676
|
+
### Optional Variables
|
|
677
|
+
|
|
678
|
+
```bash
|
|
679
|
+
# Performance
|
|
680
|
+
MAX_BODY_SIZE=10485760
|
|
681
|
+
MAX_JSON_SIZE=1048576
|
|
682
|
+
STREAM_THRESHOLD=1048576
|
|
683
|
+
|
|
684
|
+
# SSL/TLS
|
|
685
|
+
SSL_CERT_PATH=/etc/ssl/certs/cert.pem
|
|
686
|
+
SSL_KEY_PATH=/etc/ssl/private/key.pem
|
|
687
|
+
|
|
688
|
+
# Monitoring integrations
|
|
689
|
+
SENTRY_DSN=https://xxx@sentry.io/xxx
|
|
690
|
+
DATADOG_API_KEY=your-datadog-api-key
|
|
691
|
+
NEW_RELIC_LICENSE_KEY=your-newrelic-key
|
|
692
|
+
```
|
|
693
|
+
|
|
694
|
+
---
|
|
695
|
+
|
|
696
|
+
## Health Checks & Monitoring
|
|
697
|
+
|
|
698
|
+
### Enable Health Check Endpoint
|
|
699
|
+
|
|
700
|
+
```javascript
|
|
701
|
+
// server.js
|
|
702
|
+
const { healthCheck, createRedisCheck } = require('./monitoring/HealthCheck');
|
|
703
|
+
|
|
704
|
+
// Add custom health checks
|
|
705
|
+
healthCheck.addCheck('redis', createRedisCheck(redis));
|
|
706
|
+
healthCheck.addCheck('database', async () => {
|
|
707
|
+
try {
|
|
708
|
+
await db.ping();
|
|
709
|
+
return { healthy: true };
|
|
710
|
+
} catch (error) {
|
|
711
|
+
return { healthy: false, error: error.message };
|
|
712
|
+
}
|
|
713
|
+
});
|
|
714
|
+
|
|
715
|
+
// Register middleware
|
|
716
|
+
master.pipeline.use(healthCheck.middleware());
|
|
717
|
+
```
|
|
718
|
+
|
|
719
|
+
### Enable Prometheus Metrics
|
|
720
|
+
|
|
721
|
+
```javascript
|
|
722
|
+
const { prometheusExporter } = require('./monitoring/PrometheusExporter');
|
|
723
|
+
|
|
724
|
+
// Register middleware (tracks all HTTP requests)
|
|
725
|
+
master.pipeline.use(prometheusExporter.middleware());
|
|
726
|
+
|
|
727
|
+
// Custom metrics
|
|
728
|
+
prometheusExporter.registerMetric('orders_total', 'counter', 'Total orders processed');
|
|
729
|
+
prometheusExporter.incrementCounter('orders_total');
|
|
730
|
+
```
|
|
731
|
+
|
|
732
|
+
### Grafana Dashboard
|
|
733
|
+
|
|
734
|
+
```yaml
|
|
735
|
+
# prometheus.yml
|
|
736
|
+
scrape_configs:
|
|
737
|
+
- job_name: 'mastercontroller'
|
|
738
|
+
static_configs:
|
|
739
|
+
- targets: ['app1:3000', 'app2:3000', 'app3:3000']
|
|
740
|
+
metrics_path: '/_metrics'
|
|
741
|
+
scrape_interval: 15s
|
|
742
|
+
```
|
|
743
|
+
|
|
744
|
+
### Alerting (Prometheus AlertManager)
|
|
745
|
+
|
|
746
|
+
```yaml
|
|
747
|
+
# alerts.yml
|
|
748
|
+
groups:
|
|
749
|
+
- name: mastercontroller
|
|
750
|
+
rules:
|
|
751
|
+
- alert: HighErrorRate
|
|
752
|
+
expr: rate(mastercontroller_http_requests_total{status=~"5.."}[5m]) > 0.05
|
|
753
|
+
for: 5m
|
|
754
|
+
labels:
|
|
755
|
+
severity: critical
|
|
756
|
+
annotations:
|
|
757
|
+
summary: "High error rate detected"
|
|
758
|
+
|
|
759
|
+
- alert: HighMemoryUsage
|
|
760
|
+
expr: process_memory_heap_used_bytes / process_memory_heap_total_bytes > 0.9
|
|
761
|
+
for: 10m
|
|
762
|
+
labels:
|
|
763
|
+
severity: warning
|
|
764
|
+
annotations:
|
|
765
|
+
summary: "Memory usage above 90%"
|
|
766
|
+
|
|
767
|
+
- alert: HighLatency
|
|
768
|
+
expr: histogram_quantile(0.95, mastercontroller_http_request_duration_seconds_bucket) > 1
|
|
769
|
+
for: 5m
|
|
770
|
+
labels:
|
|
771
|
+
severity: warning
|
|
772
|
+
annotations:
|
|
773
|
+
summary: "95th percentile latency above 1 second"
|
|
774
|
+
```
|
|
775
|
+
|
|
776
|
+
---
|
|
777
|
+
|
|
778
|
+
## Security Best Practices
|
|
779
|
+
|
|
780
|
+
### 1. SSL/TLS Configuration
|
|
781
|
+
|
|
782
|
+
```bash
|
|
783
|
+
# Generate Let's Encrypt certificate
|
|
784
|
+
certbot certonly --webroot -w /var/www/html -d api.example.com
|
|
785
|
+
|
|
786
|
+
# Auto-renewal
|
|
787
|
+
echo "0 0 * * * certbot renew --quiet" | crontab -
|
|
788
|
+
```
|
|
789
|
+
|
|
790
|
+
### 2. Secrets Management
|
|
791
|
+
|
|
792
|
+
```bash
|
|
793
|
+
# Use environment variables (never commit to Git)
|
|
794
|
+
export SESSION_SECRET=$(openssl rand -base64 32)
|
|
795
|
+
export REDIS_PASSWORD=$(openssl rand -base64 32)
|
|
796
|
+
|
|
797
|
+
# Or use secrets management tools
|
|
798
|
+
# - HashiCorp Vault
|
|
799
|
+
# - AWS Secrets Manager
|
|
800
|
+
# - Azure Key Vault
|
|
801
|
+
# - Kubernetes Secrets
|
|
802
|
+
```
|
|
803
|
+
|
|
804
|
+
### 3. Firewall Rules
|
|
805
|
+
|
|
806
|
+
```bash
|
|
807
|
+
# UFW (Ubuntu)
|
|
808
|
+
ufw allow 80/tcp
|
|
809
|
+
ufw allow 443/tcp
|
|
810
|
+
ufw allow from 10.0.0.0/8 to any port 6379 # Redis (internal only)
|
|
811
|
+
ufw deny 6379 # Block external Redis access
|
|
812
|
+
ufw enable
|
|
813
|
+
|
|
814
|
+
# iptables
|
|
815
|
+
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
|
|
816
|
+
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
|
|
817
|
+
iptables -A INPUT -p tcp --dport 6379 -s 10.0.0.0/8 -j ACCEPT
|
|
818
|
+
iptables -A INPUT -p tcp --dport 6379 -j DROP
|
|
819
|
+
```
|
|
820
|
+
|
|
821
|
+
### 4. Security Headers (Already in Nginx/HAProxy configs above)
|
|
822
|
+
|
|
823
|
+
- `Strict-Transport-Security`
|
|
824
|
+
- `X-Frame-Options`
|
|
825
|
+
- `X-Content-Type-Options`
|
|
826
|
+
- `Content-Security-Policy`
|
|
827
|
+
|
|
828
|
+
### 5. Rate Limiting
|
|
829
|
+
|
|
830
|
+
Enable Redis-based rate limiting:
|
|
831
|
+
|
|
832
|
+
```javascript
|
|
833
|
+
const { RedisRateLimiter } = require('./security/adapters/RedisRateLimiter');
|
|
834
|
+
|
|
835
|
+
const rateLimiter = new RedisRateLimiter(redis, {
|
|
836
|
+
points: 100, // 100 requests
|
|
837
|
+
duration: 60, // per minute
|
|
838
|
+
blockDuration: 300 // block for 5 minutes on exceed
|
|
839
|
+
});
|
|
840
|
+
|
|
841
|
+
master.pipeline.use(rateLimiter.middleware({
|
|
842
|
+
keyGenerator: (ctx) => ctx.request.connection.remoteAddress
|
|
843
|
+
}));
|
|
844
|
+
```
|
|
845
|
+
|
|
846
|
+
---
|
|
847
|
+
|
|
848
|
+
## Performance Tuning
|
|
849
|
+
|
|
850
|
+
### Node.js Settings
|
|
851
|
+
|
|
852
|
+
```bash
|
|
853
|
+
# Increase memory limit
|
|
854
|
+
NODE_OPTIONS="--max-old-space-size=2048"
|
|
855
|
+
|
|
856
|
+
# Enable V8 optimizations
|
|
857
|
+
NODE_OPTIONS="--optimize-for-size"
|
|
858
|
+
|
|
859
|
+
# Cluster mode (multi-core)
|
|
860
|
+
pm2 start server.js -i max
|
|
861
|
+
```
|
|
862
|
+
|
|
863
|
+
### Redis Optimization
|
|
864
|
+
|
|
865
|
+
```conf
|
|
866
|
+
# redis.conf
|
|
867
|
+
maxmemory 2gb
|
|
868
|
+
maxmemory-policy allkeys-lru
|
|
869
|
+
save 900 1
|
|
870
|
+
save 300 10
|
|
871
|
+
appendonly yes
|
|
872
|
+
appendfsync everysec
|
|
873
|
+
```
|
|
874
|
+
|
|
875
|
+
### Load Testing
|
|
876
|
+
|
|
877
|
+
```bash
|
|
878
|
+
# Install Apache Bench
|
|
879
|
+
apt install apache2-utils
|
|
880
|
+
|
|
881
|
+
# Test
|
|
882
|
+
ab -n 10000 -c 100 https://api.example.com/
|
|
883
|
+
|
|
884
|
+
# Or use k6
|
|
885
|
+
k6 run --vus 100 --duration 30s loadtest.js
|
|
886
|
+
```
|
|
887
|
+
|
|
888
|
+
---
|
|
889
|
+
|
|
890
|
+
## Troubleshooting
|
|
891
|
+
|
|
892
|
+
### Check Logs
|
|
893
|
+
|
|
894
|
+
```bash
|
|
895
|
+
# Docker
|
|
896
|
+
docker logs -f mastercontroller-app
|
|
897
|
+
|
|
898
|
+
# Kubernetes
|
|
899
|
+
kubectl logs -f deployment/mastercontroller
|
|
900
|
+
|
|
901
|
+
# PM2
|
|
902
|
+
pm2 logs
|
|
903
|
+
|
|
904
|
+
# System logs
|
|
905
|
+
journalctl -u mastercontroller -f
|
|
906
|
+
```
|
|
907
|
+
|
|
908
|
+
### Common Issues
|
|
909
|
+
|
|
910
|
+
**Issue: High memory usage**
|
|
911
|
+
```bash
|
|
912
|
+
# Check memory
|
|
913
|
+
node --inspect server.js
|
|
914
|
+
# Connect Chrome DevTools to inspect heap
|
|
915
|
+
|
|
916
|
+
# Analyze
|
|
917
|
+
npm install -g clinic
|
|
918
|
+
clinic doctor -- node server.js
|
|
919
|
+
```
|
|
920
|
+
|
|
921
|
+
**Issue: Redis connection failures**
|
|
922
|
+
```bash
|
|
923
|
+
# Test Redis connectivity
|
|
924
|
+
redis-cli -h redis.example.com -p 6379 ping
|
|
925
|
+
|
|
926
|
+
# Check Redis logs
|
|
927
|
+
docker logs redis
|
|
928
|
+
|
|
929
|
+
# Verify password
|
|
930
|
+
redis-cli -h redis.example.com -a your-password ping
|
|
931
|
+
```
|
|
932
|
+
|
|
933
|
+
**Issue: 502 Bad Gateway (Load Balancer)**
|
|
934
|
+
```bash
|
|
935
|
+
# Check upstream health
|
|
936
|
+
curl http://10.0.1.10:3000/_health
|
|
937
|
+
|
|
938
|
+
# Verify load balancer config
|
|
939
|
+
nginx -t
|
|
940
|
+
haproxy -c -f /etc/haproxy/haproxy.cfg
|
|
941
|
+
```
|
|
942
|
+
|
|
943
|
+
---
|
|
944
|
+
|
|
945
|
+
## Support
|
|
946
|
+
|
|
947
|
+
For issues and questions:
|
|
948
|
+
|
|
949
|
+
- GitHub Issues: https://github.com/Tailor/MasterController/issues
|
|
950
|
+
- Documentation: https://github.com/Tailor/MasterController#readme
|
|
951
|
+
- Security Issues: security@mastercontroller.io (if applicable)
|
|
952
|
+
|
|
953
|
+
---
|
|
954
|
+
|
|
955
|
+
**Last Updated:** 2026-01-29
|
|
956
|
+
**Version:** 1.0.0
|