tlc-claude-code 1.7.0 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/server/lib/code-gate/first-commit-audit.js +138 -0
- package/server/lib/code-gate/first-commit-audit.test.js +203 -0
- package/server/lib/code-gate/multi-model-reviewer.js +172 -0
- package/server/lib/code-gate/multi-model-reviewer.test.js +217 -0
- package/server/lib/infra/infra-generator.js +331 -0
- package/server/lib/infra/infra-generator.test.js +146 -0
- package/server/lib/llm/adapters/api-adapter.js +95 -0
- package/server/lib/llm/adapters/api-adapter.test.js +81 -0
- package/server/lib/llm/adapters/codex-adapter.js +85 -0
- package/server/lib/llm/adapters/codex-adapter.test.js +54 -0
- package/server/lib/llm/adapters/gemini-adapter.js +100 -0
- package/server/lib/llm/adapters/gemini-adapter.test.js +54 -0
- package/server/lib/llm/index.js +109 -0
- package/server/lib/llm/index.test.js +147 -0
- package/server/lib/llm/provider-executor.js +168 -0
- package/server/lib/llm/provider-executor.test.js +244 -0
- package/server/lib/llm/provider-registry.js +104 -0
- package/server/lib/llm/provider-registry.test.js +157 -0
- package/server/lib/llm/review-service.js +222 -0
- package/server/lib/llm/review-service.test.js +220 -0
- package/server/lib/shame/shame-registry.js +224 -0
- package/server/lib/shame/shame-registry.test.js +202 -0
- package/server/lib/standards/cleanup-dry-run.js +254 -0
- package/server/lib/standards/cleanup-dry-run.test.js +220 -0
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Multi-Model Reviewer Tests
|
|
3
|
+
*
|
|
4
|
+
* Sends code reviews to 2+ LLM models and aggregates findings.
|
|
5
|
+
* Different models catch different bugs.
|
|
6
|
+
*/
|
|
7
|
+
import { describe, it, expect, vi } from 'vitest';
|
|
8
|
+
|
|
9
|
+
const {
|
|
10
|
+
createMultiModelReviewer,
|
|
11
|
+
sendToModels,
|
|
12
|
+
aggregateFindings,
|
|
13
|
+
deduplicateFindings,
|
|
14
|
+
calculateConsensus,
|
|
15
|
+
mergeSummaries,
|
|
16
|
+
} = require('./multi-model-reviewer.js');
|
|
17
|
+
|
|
18
|
+
describe('Multi-Model Reviewer', () => {
|
|
19
|
+
describe('sendToModels', () => {
|
|
20
|
+
it('sends to multiple models in parallel', async () => {
|
|
21
|
+
const mockReviewFn = vi.fn()
|
|
22
|
+
.mockResolvedValueOnce({
|
|
23
|
+
findings: [{ severity: 'warn', file: 'a.js', line: 1, rule: 'no-console', message: 'Remove console' }],
|
|
24
|
+
summary: 'Model A review',
|
|
25
|
+
})
|
|
26
|
+
.mockResolvedValueOnce({
|
|
27
|
+
findings: [{ severity: 'warn', file: 'b.js', line: 5, rule: 'no-unused', message: 'Unused var' }],
|
|
28
|
+
summary: 'Model B review',
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
const results = await sendToModels('diff content', ['model-a', 'model-b'], {
|
|
32
|
+
reviewFn: mockReviewFn,
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
expect(mockReviewFn).toHaveBeenCalledTimes(2);
|
|
36
|
+
expect(results).toHaveLength(2);
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
it('falls back when one model fails', async () => {
|
|
40
|
+
const mockReviewFn = vi.fn()
|
|
41
|
+
.mockResolvedValueOnce({
|
|
42
|
+
findings: [{ severity: 'warn', file: 'a.js', line: 1, rule: 'test', message: 'Issue' }],
|
|
43
|
+
summary: 'Model A OK',
|
|
44
|
+
})
|
|
45
|
+
.mockRejectedValueOnce(new Error('Model B timeout'));
|
|
46
|
+
|
|
47
|
+
const results = await sendToModels('diff', ['model-a', 'model-b'], {
|
|
48
|
+
reviewFn: mockReviewFn,
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
expect(results).toHaveLength(1);
|
|
52
|
+
expect(results[0].model).toBe('model-a');
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
it('returns empty when all models fail', async () => {
|
|
56
|
+
const mockReviewFn = vi.fn()
|
|
57
|
+
.mockRejectedValueOnce(new Error('fail'))
|
|
58
|
+
.mockRejectedValueOnce(new Error('fail'));
|
|
59
|
+
|
|
60
|
+
const results = await sendToModels('diff', ['a', 'b'], {
|
|
61
|
+
reviewFn: mockReviewFn,
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
expect(results).toHaveLength(0);
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
it('single model mode still works', async () => {
|
|
68
|
+
const mockReviewFn = vi.fn().mockResolvedValue({
|
|
69
|
+
findings: [{ severity: 'block', file: 'x.js', line: 1, rule: 'sec', message: 'XSS' }],
|
|
70
|
+
summary: 'Single model review',
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
const results = await sendToModels('diff', ['only-model'], {
|
|
74
|
+
reviewFn: mockReviewFn,
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
expect(results).toHaveLength(1);
|
|
78
|
+
expect(results[0].findings).toHaveLength(1);
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
it('timeout applies per-model', async () => {
|
|
82
|
+
const mockReviewFn = vi.fn().mockImplementation((diff, model) => {
|
|
83
|
+
if (model === 'slow-model') {
|
|
84
|
+
return new Promise((resolve) => setTimeout(resolve, 10000));
|
|
85
|
+
}
|
|
86
|
+
return Promise.resolve({ findings: [], summary: 'OK' });
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
const results = await sendToModels('diff', ['fast-model', 'slow-model'], {
|
|
90
|
+
reviewFn: mockReviewFn,
|
|
91
|
+
timeout: 50,
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
// fast-model should succeed, slow-model should timeout
|
|
95
|
+
expect(results.some(r => r.model === 'fast-model')).toBe(true);
|
|
96
|
+
});
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
describe('aggregateFindings', () => {
|
|
100
|
+
it('aggregates findings from 2 models', () => {
|
|
101
|
+
const modelResults = [
|
|
102
|
+
{
|
|
103
|
+
model: 'model-a',
|
|
104
|
+
findings: [
|
|
105
|
+
{ severity: 'warn', file: 'a.js', line: 1, rule: 'no-console', message: 'Console' },
|
|
106
|
+
],
|
|
107
|
+
summary: 'A review',
|
|
108
|
+
},
|
|
109
|
+
{
|
|
110
|
+
model: 'model-b',
|
|
111
|
+
findings: [
|
|
112
|
+
{ severity: 'block', file: 'b.js', line: 5, rule: 'security', message: 'XSS' },
|
|
113
|
+
],
|
|
114
|
+
summary: 'B review',
|
|
115
|
+
},
|
|
116
|
+
];
|
|
117
|
+
|
|
118
|
+
const result = aggregateFindings(modelResults);
|
|
119
|
+
expect(result.findings).toHaveLength(2);
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
it('tracks flaggedBy for each finding', () => {
|
|
123
|
+
const modelResults = [
|
|
124
|
+
{
|
|
125
|
+
model: 'model-a',
|
|
126
|
+
findings: [
|
|
127
|
+
{ severity: 'warn', file: 'a.js', line: 1, rule: 'no-console', message: 'Console' },
|
|
128
|
+
],
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
model: 'model-b',
|
|
132
|
+
findings: [
|
|
133
|
+
{ severity: 'warn', file: 'a.js', line: 1, rule: 'no-console', message: 'Console log' },
|
|
134
|
+
],
|
|
135
|
+
},
|
|
136
|
+
];
|
|
137
|
+
|
|
138
|
+
const result = aggregateFindings(modelResults);
|
|
139
|
+
// Both flagged same file+line+rule, so should be deduplicated
|
|
140
|
+
const finding = result.findings.find(f => f.rule === 'no-console');
|
|
141
|
+
expect(finding.flaggedBy).toContain('model-a');
|
|
142
|
+
expect(finding.flaggedBy).toContain('model-b');
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
it('higher severity wins on conflict', () => {
|
|
146
|
+
const modelResults = [
|
|
147
|
+
{
|
|
148
|
+
model: 'model-a',
|
|
149
|
+
findings: [
|
|
150
|
+
{ severity: 'warn', file: 'a.js', line: 1, rule: 'test-rule', message: 'Issue' },
|
|
151
|
+
],
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
model: 'model-b',
|
|
155
|
+
findings: [
|
|
156
|
+
{ severity: 'block', file: 'a.js', line: 1, rule: 'test-rule', message: 'Serious issue' },
|
|
157
|
+
],
|
|
158
|
+
},
|
|
159
|
+
];
|
|
160
|
+
|
|
161
|
+
const result = aggregateFindings(modelResults);
|
|
162
|
+
const finding = result.findings.find(f => f.rule === 'test-rule');
|
|
163
|
+
expect(finding.severity).toBe('block');
|
|
164
|
+
});
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
describe('deduplicateFindings', () => {
|
|
168
|
+
it('deduplicates identical findings by file+line+rule', () => {
|
|
169
|
+
const findings = [
|
|
170
|
+
{ severity: 'warn', file: 'a.js', line: 1, rule: 'no-console', message: 'Console', flaggedBy: ['model-a'] },
|
|
171
|
+
{ severity: 'warn', file: 'a.js', line: 1, rule: 'no-console', message: 'Console log', flaggedBy: ['model-b'] },
|
|
172
|
+
];
|
|
173
|
+
|
|
174
|
+
const deduped = deduplicateFindings(findings);
|
|
175
|
+
expect(deduped).toHaveLength(1);
|
|
176
|
+
expect(deduped[0].flaggedBy).toContain('model-a');
|
|
177
|
+
expect(deduped[0].flaggedBy).toContain('model-b');
|
|
178
|
+
});
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
describe('calculateConsensus', () => {
|
|
182
|
+
it('calculates consensus percentage', () => {
|
|
183
|
+
const finding = { flaggedBy: ['model-a', 'model-b'] };
|
|
184
|
+
const totalModels = 3;
|
|
185
|
+
|
|
186
|
+
const consensus = calculateConsensus(finding, totalModels);
|
|
187
|
+
// 2/3 = ~66.7%
|
|
188
|
+
expect(consensus).toBeCloseTo(66.7, 0);
|
|
189
|
+
});
|
|
190
|
+
});
|
|
191
|
+
|
|
192
|
+
describe('mergeSummaries', () => {
|
|
193
|
+
it('merges summaries from all models', () => {
|
|
194
|
+
const modelResults = [
|
|
195
|
+
{ model: 'model-a', summary: 'Found 1 issue' },
|
|
196
|
+
{ model: 'model-b', summary: 'Code looks mostly clean' },
|
|
197
|
+
];
|
|
198
|
+
|
|
199
|
+
const merged = mergeSummaries(modelResults);
|
|
200
|
+
expect(merged).toContain('model-a');
|
|
201
|
+
expect(merged).toContain('model-b');
|
|
202
|
+
expect(merged).toContain('Found 1 issue');
|
|
203
|
+
expect(merged).toContain('Code looks mostly clean');
|
|
204
|
+
});
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
describe('createMultiModelReviewer', () => {
|
|
208
|
+
it('respects model list from config', () => {
|
|
209
|
+
const reviewer = createMultiModelReviewer({
|
|
210
|
+
models: ['gpt-4', 'claude-3'],
|
|
211
|
+
reviewFn: vi.fn(),
|
|
212
|
+
});
|
|
213
|
+
expect(reviewer).toBeDefined();
|
|
214
|
+
expect(reviewer.models).toEqual(['gpt-4', 'claude-3']);
|
|
215
|
+
});
|
|
216
|
+
});
|
|
217
|
+
});
|
|
@@ -0,0 +1,331 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Infrastructure Blueprint Generator
|
|
3
|
+
*
|
|
4
|
+
* Generates Docker dev environment with observability stack
|
|
5
|
+
* (Prometheus, Grafana, MailHog, MinIO, pgAdmin) from selectable services.
|
|
6
|
+
*
|
|
7
|
+
* @module infra/infra-generator
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
/** Available infrastructure services */
|
|
11
|
+
const AVAILABLE_SERVICES = [
|
|
12
|
+
'postgres', 'redis', 'prometheus', 'grafana', 'mailhog', 'minio', 'pgadmin',
|
|
13
|
+
];
|
|
14
|
+
|
|
15
|
+
/** Default ports for each service */
|
|
16
|
+
const DEFAULT_PORTS = {
|
|
17
|
+
postgres: 5432,
|
|
18
|
+
redis: 6379,
|
|
19
|
+
prometheus: 9090,
|
|
20
|
+
grafana: 3000,
|
|
21
|
+
mailhog: 8025,
|
|
22
|
+
minio: 9000,
|
|
23
|
+
pgadmin: 5050,
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
/** Network assignments per service */
|
|
27
|
+
const SERVICE_NETWORKS = {
|
|
28
|
+
postgres: ['app', 'storage'],
|
|
29
|
+
redis: ['app'],
|
|
30
|
+
prometheus: ['monitoring'],
|
|
31
|
+
grafana: ['monitoring'],
|
|
32
|
+
mailhog: ['app'],
|
|
33
|
+
minio: ['app', 'storage'],
|
|
34
|
+
pgadmin: ['app', 'storage'],
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
/** Dollar sign helper for template literals */
|
|
38
|
+
const D = '$';
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Generate service YAML block
|
|
42
|
+
* @param {string} service - Service name
|
|
43
|
+
* @param {Object} ports - Port overrides
|
|
44
|
+
* @returns {string} YAML block for the service
|
|
45
|
+
*/
|
|
46
|
+
function generateServiceBlock(service, ports = {}) {
|
|
47
|
+
const port = ports[service] || DEFAULT_PORTS[service];
|
|
48
|
+
const networks = SERVICE_NETWORKS[service] || ['app'];
|
|
49
|
+
const networkYaml = networks.map(n => ' - ' + n).join('\n');
|
|
50
|
+
|
|
51
|
+
const lines = [];
|
|
52
|
+
|
|
53
|
+
switch (service) {
|
|
54
|
+
case 'postgres':
|
|
55
|
+
lines.push(
|
|
56
|
+
' postgres:',
|
|
57
|
+
' image: postgres:16-alpine',
|
|
58
|
+
' ports:',
|
|
59
|
+
' - "' + port + ':5432"',
|
|
60
|
+
' environment:',
|
|
61
|
+
' POSTGRES_USER: ' + D + '{POSTGRES_USER:-app}',
|
|
62
|
+
' POSTGRES_PASSWORD: ' + D + '{POSTGRES_PASSWORD:-secret}',
|
|
63
|
+
' POSTGRES_DB: ' + D + '{POSTGRES_DB:-app_dev}',
|
|
64
|
+
' volumes:',
|
|
65
|
+
' - postgres_data:/var/lib/postgresql/data',
|
|
66
|
+
' networks:',
|
|
67
|
+
networkYaml,
|
|
68
|
+
' healthcheck:',
|
|
69
|
+
' test: ["CMD-SHELL", "pg_isready -U ' + D + D + '{POSTGRES_USER:-app}"]',
|
|
70
|
+
' interval: 10s',
|
|
71
|
+
' timeout: 5s',
|
|
72
|
+
' retries: 5'
|
|
73
|
+
);
|
|
74
|
+
break;
|
|
75
|
+
|
|
76
|
+
case 'redis':
|
|
77
|
+
lines.push(
|
|
78
|
+
' redis:',
|
|
79
|
+
' image: redis:7-alpine',
|
|
80
|
+
' ports:',
|
|
81
|
+
' - "' + port + ':6379"',
|
|
82
|
+
' volumes:',
|
|
83
|
+
' - redis_data:/data',
|
|
84
|
+
' networks:',
|
|
85
|
+
networkYaml,
|
|
86
|
+
' healthcheck:',
|
|
87
|
+
' test: ["CMD", "redis-cli", "ping"]',
|
|
88
|
+
' interval: 10s',
|
|
89
|
+
' timeout: 5s',
|
|
90
|
+
' retries: 5'
|
|
91
|
+
);
|
|
92
|
+
break;
|
|
93
|
+
|
|
94
|
+
case 'prometheus':
|
|
95
|
+
lines.push(
|
|
96
|
+
' prometheus:',
|
|
97
|
+
' image: prom/prometheus:latest',
|
|
98
|
+
' ports:',
|
|
99
|
+
' - "' + port + ':9090"',
|
|
100
|
+
' volumes:',
|
|
101
|
+
' - prometheus_data:/prometheus',
|
|
102
|
+
' networks:',
|
|
103
|
+
networkYaml,
|
|
104
|
+
' healthcheck:',
|
|
105
|
+
' test: ["CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy"]',
|
|
106
|
+
' interval: 10s',
|
|
107
|
+
' timeout: 5s',
|
|
108
|
+
' retries: 5'
|
|
109
|
+
);
|
|
110
|
+
break;
|
|
111
|
+
|
|
112
|
+
case 'grafana':
|
|
113
|
+
lines.push(
|
|
114
|
+
' grafana:',
|
|
115
|
+
' image: grafana/grafana:latest',
|
|
116
|
+
' ports:',
|
|
117
|
+
' - "' + port + ':3000"',
|
|
118
|
+
' environment:',
|
|
119
|
+
' GF_SECURITY_ADMIN_PASSWORD: ' + D + '{GRAFANA_PASSWORD:-admin}',
|
|
120
|
+
' GF_DATASOURCES_DEFAULT_NAME: prometheus',
|
|
121
|
+
' GF_DATASOURCES_DEFAULT_TYPE: prometheus',
|
|
122
|
+
' GF_DATASOURCES_DEFAULT_URL: http://prometheus:9090',
|
|
123
|
+
' volumes:',
|
|
124
|
+
' - grafana_data:/var/lib/grafana',
|
|
125
|
+
' networks:',
|
|
126
|
+
networkYaml,
|
|
127
|
+
' healthcheck:',
|
|
128
|
+
' test: ["CMD", "wget", "--spider", "-q", "http://localhost:3000/api/health"]',
|
|
129
|
+
' interval: 10s',
|
|
130
|
+
' timeout: 5s',
|
|
131
|
+
' retries: 5'
|
|
132
|
+
);
|
|
133
|
+
break;
|
|
134
|
+
|
|
135
|
+
case 'mailhog':
|
|
136
|
+
lines.push(
|
|
137
|
+
' mailhog:',
|
|
138
|
+
' image: mailhog/mailhog:latest',
|
|
139
|
+
' ports:',
|
|
140
|
+
' - "' + port + ':8025"',
|
|
141
|
+
' - "1025:1025"',
|
|
142
|
+
' networks:',
|
|
143
|
+
networkYaml,
|
|
144
|
+
' healthcheck:',
|
|
145
|
+
' test: ["CMD", "wget", "--spider", "-q", "http://localhost:8025"]',
|
|
146
|
+
' interval: 10s',
|
|
147
|
+
' timeout: 5s',
|
|
148
|
+
' retries: 5'
|
|
149
|
+
);
|
|
150
|
+
break;
|
|
151
|
+
|
|
152
|
+
case 'minio':
|
|
153
|
+
lines.push(
|
|
154
|
+
' minio:',
|
|
155
|
+
' image: minio/minio:latest',
|
|
156
|
+
' ports:',
|
|
157
|
+
' - "' + port + ':9000"',
|
|
158
|
+
' - "9001:9001"',
|
|
159
|
+
' environment:',
|
|
160
|
+
' MINIO_ROOT_USER: ' + D + '{MINIO_ROOT_USER:-minioadmin}',
|
|
161
|
+
' MINIO_ROOT_PASSWORD: ' + D + '{MINIO_ROOT_PASSWORD:-minioadmin}',
|
|
162
|
+
' command: server /data --console-address ":9001"',
|
|
163
|
+
' volumes:',
|
|
164
|
+
' - minio_data:/data',
|
|
165
|
+
' networks:',
|
|
166
|
+
networkYaml,
|
|
167
|
+
' healthcheck:',
|
|
168
|
+
' test: ["CMD", "mc", "ready", "local"]',
|
|
169
|
+
' interval: 10s',
|
|
170
|
+
' timeout: 5s',
|
|
171
|
+
' retries: 5'
|
|
172
|
+
);
|
|
173
|
+
break;
|
|
174
|
+
|
|
175
|
+
case 'pgadmin':
|
|
176
|
+
lines.push(
|
|
177
|
+
' pgadmin:',
|
|
178
|
+
' image: dpage/pgadmin4:latest',
|
|
179
|
+
' ports:',
|
|
180
|
+
' - "' + port + ':80"',
|
|
181
|
+
' environment:',
|
|
182
|
+
' PGADMIN_DEFAULT_EMAIL: ' + D + '{PGADMIN_DEFAULT_EMAIL:-admin@local.dev}',
|
|
183
|
+
' PGADMIN_DEFAULT_PASSWORD: ' + D + '{PGADMIN_DEFAULT_PASSWORD:-admin}',
|
|
184
|
+
' volumes:',
|
|
185
|
+
' - pgadmin_data:/var/lib/pgadmin',
|
|
186
|
+
' networks:',
|
|
187
|
+
networkYaml,
|
|
188
|
+
' healthcheck:',
|
|
189
|
+
' test: ["CMD", "wget", "--spider", "-q", "http://localhost:80/misc/ping"]',
|
|
190
|
+
' interval: 10s',
|
|
191
|
+
' timeout: 5s',
|
|
192
|
+
' retries: 5'
|
|
193
|
+
);
|
|
194
|
+
break;
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
return lines.join('\n');
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Generate volume definition with explicit name
|
|
202
|
+
* @param {string} service - Service name
|
|
203
|
+
* @returns {string} Volume YAML with name property
|
|
204
|
+
*/
|
|
205
|
+
function generateVolumeBlock(service) {
|
|
206
|
+
const volumeName = service + '_data';
|
|
207
|
+
return ' ' + volumeName + ':\n name: ' + volumeName;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
/**
|
|
211
|
+
* Get unique networks needed for selected services
|
|
212
|
+
* @param {string[]} services - Selected services
|
|
213
|
+
* @returns {string[]} Unique network names
|
|
214
|
+
*/
|
|
215
|
+
function getRequiredNetworks(services) {
|
|
216
|
+
const networks = new Set();
|
|
217
|
+
for (const service of services) {
|
|
218
|
+
const serviceNets = SERVICE_NETWORKS[service] || ['app'];
|
|
219
|
+
for (const net of serviceNets) {
|
|
220
|
+
networks.add(net);
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
return Array.from(networks);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
/**
|
|
227
|
+
* Generate docker-compose.yml content
|
|
228
|
+
* @param {Object} options - Generation options
|
|
229
|
+
* @param {string[]} options.services - Services to include
|
|
230
|
+
* @param {Object} options.ports - Port overrides
|
|
231
|
+
* @returns {Object} { content, filename }
|
|
232
|
+
*/
|
|
233
|
+
function generateDockerCompose(options = {}) {
|
|
234
|
+
const { services = [], ports = {} } = options;
|
|
235
|
+
|
|
236
|
+
let content = 'version: "3.8"\n\nservices:\n';
|
|
237
|
+
|
|
238
|
+
if (services.length === 0) {
|
|
239
|
+
content += ' # No services selected\n';
|
|
240
|
+
} else {
|
|
241
|
+
const blocks = services
|
|
242
|
+
.filter(s => AVAILABLE_SERVICES.includes(s))
|
|
243
|
+
.map(s => generateServiceBlock(s, ports));
|
|
244
|
+
content += blocks.join('\n\n') + '\n';
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// Volumes section
|
|
248
|
+
if (services.length > 0) {
|
|
249
|
+
content += '\nvolumes:\n';
|
|
250
|
+
for (const service of services) {
|
|
251
|
+
if (AVAILABLE_SERVICES.includes(service)) {
|
|
252
|
+
content += generateVolumeBlock(service) + '\n';
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
// Networks section
|
|
258
|
+
const networks = getRequiredNetworks(services);
|
|
259
|
+
if (networks.length > 0) {
|
|
260
|
+
content += '\nnetworks:\n';
|
|
261
|
+
for (const net of networks) {
|
|
262
|
+
content += ' ' + net + ':\n name: ' + net + '\n';
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
return {
|
|
267
|
+
content,
|
|
268
|
+
filename: 'docker-compose.dev.yml',
|
|
269
|
+
};
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
/**
|
|
273
|
+
* Generate .env.example file content
|
|
274
|
+
* @param {Object} options - Generation options
|
|
275
|
+
* @param {string[]} options.services - Services to include
|
|
276
|
+
* @returns {Object} { content, filename }
|
|
277
|
+
*/
|
|
278
|
+
function generateEnvExample(options = {}) {
|
|
279
|
+
const { services = [] } = options;
|
|
280
|
+
const lines = ['# Infrastructure Environment Variables', ''];
|
|
281
|
+
|
|
282
|
+
const envVars = {
|
|
283
|
+
postgres: [
|
|
284
|
+
'POSTGRES_USER=app',
|
|
285
|
+
'POSTGRES_PASSWORD=secret',
|
|
286
|
+
'POSTGRES_DB=app_dev',
|
|
287
|
+
],
|
|
288
|
+
redis: [
|
|
289
|
+
'REDIS_URL=redis://localhost:6379',
|
|
290
|
+
],
|
|
291
|
+
prometheus: [
|
|
292
|
+
'PROMETHEUS_URL=http://localhost:9090',
|
|
293
|
+
],
|
|
294
|
+
grafana: [
|
|
295
|
+
'GRAFANA_PASSWORD=admin',
|
|
296
|
+
],
|
|
297
|
+
mailhog: [
|
|
298
|
+
'SMTP_HOST=localhost',
|
|
299
|
+
'SMTP_PORT=1025',
|
|
300
|
+
],
|
|
301
|
+
minio: [
|
|
302
|
+
'MINIO_ROOT_USER=minioadmin',
|
|
303
|
+
'MINIO_ROOT_PASSWORD=minioadmin',
|
|
304
|
+
'MINIO_ENDPOINT=http://localhost:9000',
|
|
305
|
+
],
|
|
306
|
+
pgadmin: [
|
|
307
|
+
'PGADMIN_DEFAULT_EMAIL=admin@local.dev',
|
|
308
|
+
'PGADMIN_DEFAULT_PASSWORD=admin',
|
|
309
|
+
],
|
|
310
|
+
};
|
|
311
|
+
|
|
312
|
+
for (const service of services) {
|
|
313
|
+
const vars = envVars[service];
|
|
314
|
+
if (vars) {
|
|
315
|
+
lines.push('# ' + service.charAt(0).toUpperCase() + service.slice(1));
|
|
316
|
+
lines.push(...vars);
|
|
317
|
+
lines.push('');
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
return {
|
|
322
|
+
content: lines.join('\n'),
|
|
323
|
+
filename: '.env.example',
|
|
324
|
+
};
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
module.exports = {
|
|
328
|
+
generateDockerCompose,
|
|
329
|
+
generateEnvExample,
|
|
330
|
+
AVAILABLE_SERVICES,
|
|
331
|
+
};
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Infrastructure Blueprint Generator Tests
|
|
3
|
+
*
|
|
4
|
+
* Generates Docker dev environment with observability stack.
|
|
5
|
+
*/
|
|
6
|
+
import { describe, it, expect } from 'vitest';
|
|
7
|
+
|
|
8
|
+
const {
|
|
9
|
+
generateDockerCompose,
|
|
10
|
+
generateEnvExample,
|
|
11
|
+
AVAILABLE_SERVICES,
|
|
12
|
+
} = require('./infra-generator.js');
|
|
13
|
+
|
|
14
|
+
describe('Infrastructure Blueprint Generator', () => {
|
|
15
|
+
describe('generateDockerCompose', () => {
|
|
16
|
+
it('generates docker-compose with postgres', () => {
|
|
17
|
+
const result = generateDockerCompose({ services: ['postgres'] });
|
|
18
|
+
|
|
19
|
+
expect(result.content).toContain('postgres');
|
|
20
|
+
expect(result.content).toContain('image:');
|
|
21
|
+
expect(result.content).toContain('volumes:');
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
it('generates docker-compose with full observability stack', () => {
|
|
25
|
+
const result = generateDockerCompose({
|
|
26
|
+
services: ['postgres', 'redis', 'prometheus', 'grafana', 'mailhog', 'minio', 'pgadmin'],
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
expect(result.content).toContain('postgres');
|
|
30
|
+
expect(result.content).toContain('redis');
|
|
31
|
+
expect(result.content).toContain('prometheus');
|
|
32
|
+
expect(result.content).toContain('grafana');
|
|
33
|
+
expect(result.content).toContain('mailhog');
|
|
34
|
+
expect(result.content).toContain('minio');
|
|
35
|
+
expect(result.content).toContain('pgadmin');
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
it('each volume has explicit name property', () => {
|
|
39
|
+
const result = generateDockerCompose({
|
|
40
|
+
services: ['postgres', 'redis'],
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
// Parse volumes section - every volume should have name: property
|
|
44
|
+
const volumesSection = result.content.split('volumes:').pop();
|
|
45
|
+
// Check that volume definitions include name:
|
|
46
|
+
expect(volumesSection).toContain('name:');
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
it('no external: true in any volume', () => {
|
|
50
|
+
const result = generateDockerCompose({
|
|
51
|
+
services: ['postgres', 'redis', 'prometheus', 'grafana'],
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
expect(result.content).not.toContain('external: true');
|
|
55
|
+
expect(result.content).not.toContain('external:true');
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
it('services on correct networks', () => {
|
|
59
|
+
const result = generateDockerCompose({
|
|
60
|
+
services: ['postgres', 'prometheus', 'minio'],
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
expect(result.content).toContain('app');
|
|
64
|
+
expect(result.content).toContain('monitoring');
|
|
65
|
+
expect(result.content).toContain('storage');
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
it('health checks present for each service', () => {
|
|
69
|
+
const services = ['postgres', 'redis', 'prometheus', 'grafana', 'mailhog', 'minio', 'pgadmin'];
|
|
70
|
+
const result = generateDockerCompose({ services });
|
|
71
|
+
|
|
72
|
+
// Count healthcheck occurrences - should be at least one per service
|
|
73
|
+
const healthcheckCount = (result.content.match(/healthcheck:/g) || []).length;
|
|
74
|
+
expect(healthcheckCount).toBe(services.length);
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
it('ports configurable via options', () => {
|
|
78
|
+
const result = generateDockerCompose({
|
|
79
|
+
services: ['postgres'],
|
|
80
|
+
ports: { postgres: 5555 },
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
expect(result.content).toContain('5555');
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
it('includes pgAdmin with connection pre-configured', () => {
|
|
87
|
+
const result = generateDockerCompose({
|
|
88
|
+
services: ['postgres', 'pgadmin'],
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
expect(result.content).toContain('pgadmin');
|
|
92
|
+
expect(result.content).toContain('PGADMIN_DEFAULT_EMAIL');
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
it('includes Grafana with Prometheus datasource', () => {
|
|
96
|
+
const result = generateDockerCompose({
|
|
97
|
+
services: ['prometheus', 'grafana'],
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
expect(result.content).toContain('grafana');
|
|
101
|
+
expect(result.content).toContain('prometheus');
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
it('empty service list returns minimal compose', () => {
|
|
105
|
+
const result = generateDockerCompose({ services: [] });
|
|
106
|
+
|
|
107
|
+
expect(result.content).toContain('version:');
|
|
108
|
+
expect(result.content).toContain('services:');
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
it('returns structured output (not writes files)', () => {
|
|
112
|
+
const result = generateDockerCompose({
|
|
113
|
+
services: ['postgres'],
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
expect(result).toMatchObject({
|
|
117
|
+
content: expect.any(String),
|
|
118
|
+
filename: expect.any(String),
|
|
119
|
+
});
|
|
120
|
+
});
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
describe('generateEnvExample', () => {
|
|
124
|
+
it('generates .env.example with all vars', () => {
|
|
125
|
+
const result = generateEnvExample({
|
|
126
|
+
services: ['postgres', 'redis', 'minio'],
|
|
127
|
+
});
|
|
128
|
+
|
|
129
|
+
expect(result.content).toContain('POSTGRES');
|
|
130
|
+
expect(result.content).toContain('REDIS');
|
|
131
|
+
expect(result.content).toContain('MINIO');
|
|
132
|
+
});
|
|
133
|
+
});
|
|
134
|
+
|
|
135
|
+
describe('AVAILABLE_SERVICES', () => {
|
|
136
|
+
it('lists all expected services', () => {
|
|
137
|
+
expect(AVAILABLE_SERVICES).toContain('postgres');
|
|
138
|
+
expect(AVAILABLE_SERVICES).toContain('redis');
|
|
139
|
+
expect(AVAILABLE_SERVICES).toContain('prometheus');
|
|
140
|
+
expect(AVAILABLE_SERVICES).toContain('grafana');
|
|
141
|
+
expect(AVAILABLE_SERVICES).toContain('mailhog');
|
|
142
|
+
expect(AVAILABLE_SERVICES).toContain('minio');
|
|
143
|
+
expect(AVAILABLE_SERVICES).toContain('pgadmin');
|
|
144
|
+
});
|
|
145
|
+
});
|
|
146
|
+
});
|