specweave 0.30.13 → 0.30.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. package/.claude-plugin/marketplace.json +0 -11
  2. package/CLAUDE.md +1 -1
  3. package/README.md +32 -0
  4. package/bin/fix-marketplace-errors.sh +1 -1
  5. package/bin/specweave.js +28 -0
  6. package/dist/src/cli/commands/commits.d.ts +7 -0
  7. package/dist/src/cli/commands/commits.d.ts.map +1 -0
  8. package/dist/src/cli/commands/commits.js +42 -0
  9. package/dist/src/cli/commands/commits.js.map +1 -0
  10. package/dist/src/cli/commands/living-docs.d.ts +29 -0
  11. package/dist/src/cli/commands/living-docs.d.ts.map +1 -0
  12. package/dist/src/cli/commands/living-docs.js +350 -0
  13. package/dist/src/cli/commands/living-docs.js.map +1 -0
  14. package/dist/src/cli/helpers/ado-area-selector.js +1 -1
  15. package/dist/src/cli/helpers/ado-area-selector.js.map +1 -1
  16. package/dist/src/core/background/index.d.ts +2 -2
  17. package/dist/src/core/background/index.d.ts.map +1 -1
  18. package/dist/src/core/background/index.js +1 -1
  19. package/dist/src/core/background/index.js.map +1 -1
  20. package/dist/src/core/living-docs/living-docs-sync.d.ts +34 -10
  21. package/dist/src/core/living-docs/living-docs-sync.d.ts.map +1 -1
  22. package/dist/src/core/living-docs/living-docs-sync.js +223 -32
  23. package/dist/src/core/living-docs/living-docs-sync.js.map +1 -1
  24. package/dist/src/importers/ado-importer.js +2 -2
  25. package/dist/src/importers/ado-importer.js.map +1 -1
  26. package/dist/src/importers/item-converter.d.ts +6 -1
  27. package/dist/src/importers/item-converter.d.ts.map +1 -1
  28. package/dist/src/importers/item-converter.js +15 -2
  29. package/dist/src/importers/item-converter.js.map +1 -1
  30. package/dist/src/integrations/ado/ado-pat-provider.d.ts +3 -3
  31. package/dist/src/integrations/ado/ado-pat-provider.js +3 -3
  32. package/dist/src/living-docs/epic-id-allocator.d.ts +1 -1
  33. package/dist/src/living-docs/epic-id-allocator.js +1 -1
  34. package/dist/src/living-docs/fs-id-allocator.d.ts +1 -1
  35. package/dist/src/living-docs/fs-id-allocator.js +1 -1
  36. package/dist/src/living-docs/smart-doc-organizer.js +1 -1
  37. package/dist/src/living-docs/smart-doc-organizer.js.map +1 -1
  38. package/dist/src/utils/auth-helpers.d.ts +23 -0
  39. package/dist/src/utils/auth-helpers.d.ts.map +1 -1
  40. package/dist/src/utils/auth-helpers.js +51 -0
  41. package/dist/src/utils/auth-helpers.js.map +1 -1
  42. package/dist/src/utils/feature-id-collision.d.ts +48 -5
  43. package/dist/src/utils/feature-id-collision.d.ts.map +1 -1
  44. package/dist/src/utils/feature-id-collision.js +251 -19
  45. package/dist/src/utils/feature-id-collision.js.map +1 -1
  46. package/dist/src/utils/validators/ado-validator.js +2 -2
  47. package/dist/src/utils/validators/ado-validator.js.map +1 -1
  48. package/package.json +12 -13
  49. package/plugins/PLUGINS-INDEX.md +2 -3
  50. package/plugins/specweave/commands/specweave-living-docs.md +321 -0
  51. package/plugins/specweave/commands/specweave-organize-docs.md +3 -3
  52. package/plugins/specweave/hooks/v2/handlers/github-sync-handler.sh +10 -1
  53. package/plugins/specweave/hooks/v2/handlers/living-docs-handler.sh +10 -1
  54. package/plugins/specweave-ado/agents/ado-manager/AGENT.md +58 -0
  55. package/plugins/specweave-ado/commands/{specweave-ado-close-workitem.md → close.md} +9 -5
  56. package/plugins/specweave-ado/commands/{specweave-ado-create-workitem.md → create.md} +9 -5
  57. package/plugins/specweave-ado/commands/pull.md +489 -0
  58. package/plugins/specweave-ado/commands/push.md +391 -0
  59. package/plugins/specweave-ado/commands/{specweave-ado-status.md → status.md} +12 -0
  60. package/plugins/specweave-ado/commands/{specweave-ado-sync.md → sync.md} +95 -3
  61. package/plugins/specweave-ado/hooks/README.md +1 -1
  62. package/plugins/specweave-docs/commands/generate.md +3 -3
  63. package/plugins/specweave-docs/commands/init.md +4 -4
  64. package/plugins/specweave-docs/commands/preview.md +5 -5
  65. package/plugins/specweave-github/agents/github-manager/AGENT.md +22 -0
  66. package/plugins/specweave-github/agents/user-story-updater/AGENT.md +1 -1
  67. package/plugins/specweave-github/commands/{specweave-github-close-issue.md → close.md} +2 -2
  68. package/plugins/specweave-github/commands/{specweave-github-create-issue.md → create.md} +2 -2
  69. package/plugins/specweave-github/commands/pull.md +142 -0
  70. package/plugins/specweave-github/commands/push.md +154 -0
  71. package/plugins/specweave-github/commands/{specweave-github-sync.md → sync.md} +19 -5
  72. package/plugins/specweave-github/commands/{specweave-github-update-user-story.md → update-user-story.md} +1 -1
  73. package/plugins/specweave-github/hooks/README.md +1 -1
  74. package/plugins/specweave-jira/agents/jira-manager/AGENT.md +30 -0
  75. package/plugins/specweave-jira/commands/pull.md +164 -0
  76. package/plugins/specweave-jira/commands/push.md +170 -0
  77. package/plugins/specweave-jira/commands/{specweave-jira-sync.md → sync.md} +18 -3
  78. package/plugins/specweave-jira/hooks/README.md +1 -1
  79. package/plugins/specweave-kafka/README.md +20 -0
  80. package/plugins/specweave-kafka/benchmarks/kafka-throughput.benchmark.ts +551 -0
  81. package/plugins/specweave-kafka/examples/README.md +191 -0
  82. package/plugins/specweave-kafka/examples/avro-schema-registry/.env.example +8 -0
  83. package/plugins/specweave-kafka/examples/avro-schema-registry/README.md +69 -0
  84. package/plugins/specweave-kafka/examples/avro-schema-registry/consumer.js +37 -0
  85. package/plugins/specweave-kafka/examples/avro-schema-registry/package.json +14 -0
  86. package/plugins/specweave-kafka/examples/avro-schema-registry/producer.js +57 -0
  87. package/plugins/specweave-kafka/examples/exactly-once-semantics/.env.example +5 -0
  88. package/plugins/specweave-kafka/examples/exactly-once-semantics/README.md +30 -0
  89. package/plugins/specweave-kafka/examples/exactly-once-semantics/eos-pipeline.js +79 -0
  90. package/plugins/specweave-kafka/examples/exactly-once-semantics/package.json +11 -0
  91. package/plugins/specweave-kafka/examples/kafka-streams-app/.env.example +4 -0
  92. package/plugins/specweave-kafka/examples/kafka-streams-app/README.md +30 -0
  93. package/plugins/specweave-kafka/examples/kafka-streams-app/package.json +11 -0
  94. package/plugins/specweave-kafka/examples/kafka-streams-app/windowed-aggregation.js +66 -0
  95. package/plugins/specweave-kafka/examples/n8n-workflow/README.md +54 -0
  96. package/plugins/specweave-kafka/examples/n8n-workflow/docker-compose.yml +19 -0
  97. package/plugins/specweave-kafka/examples/n8n-workflow/kafka-to-slack.json +50 -0
  98. package/plugins/specweave-kafka/examples/simple-producer-consumer/.env.example +15 -0
  99. package/plugins/specweave-kafka/examples/simple-producer-consumer/README.md +183 -0
  100. package/plugins/specweave-kafka/examples/simple-producer-consumer/consumer.js +60 -0
  101. package/plugins/specweave-kafka/examples/simple-producer-consumer/docker-compose.yml +30 -0
  102. package/plugins/specweave-kafka/examples/simple-producer-consumer/package.json +18 -0
  103. package/plugins/specweave-kafka/examples/simple-producer-consumer/producer.js +52 -0
  104. package/plugins/specweave-release/commands/specweave-release-npm.md +4 -4
  105. package/plugins/specweave-docs-preview/.claude-plugin/plugin.json +0 -21
  106. package/plugins/specweave-docs-preview/commands/build.md +0 -489
  107. package/plugins/specweave-docs-preview/commands/preview.md +0 -355
  108. package/plugins/specweave-docs-preview/skills/docs-preview/SKILL.md +0 -386
  109. /package/plugins/specweave-ado/commands/{specweave-ado-clone-repos.md → clone.md} +0 -0
  110. /package/plugins/specweave-ado/commands/{specweave-ado-import-areas.md → import-areas.md} +0 -0
  111. /package/plugins/specweave-ado/commands/{specweave-ado-import-projects.md → import-projects.md} +0 -0
  112. /package/plugins/specweave-github/commands/{specweave-github-cleanup-duplicates.md → cleanup-duplicates.md} +0 -0
  113. /package/plugins/specweave-github/commands/{specweave-github-reconcile.md → reconcile.md} +0 -0
  114. /package/plugins/specweave-github/commands/{specweave-github-status.md → status.md} +0 -0
  115. /package/plugins/specweave-jira/commands/{specweave-jira-import-boards.md → import-boards.md} +0 -0
  116. /package/plugins/specweave-jira/commands/{specweave-jira-import-projects.md → import-projects-full.md} +0 -0
@@ -227,11 +227,26 @@ Conflicts: None
227
227
  - Display rich output with links
228
228
  - Save sync results to test-results/ if requested
229
229
 
230
+ ## Simpler Alternatives
231
+
232
+ For most use cases, use the git-style commands:
233
+
234
+ | Command | Purpose |
235
+ |---------|---------|
236
+ | `/specweave-jira:pull` | Pull changes from Jira (read-only) |
237
+ | `/specweave-jira:push` | Push progress to Jira |
238
+
239
+ Use `/specweave-jira:sync` for advanced operations with explicit direction control.
240
+
230
241
  ## Related Commands
231
242
 
232
- - `/specweave-github:sync` - Sync to GitHub issues (also two-way by default)
233
- - `/specweave:increment` - Create new increment
234
- - `/specweave:validate` - Validate increment quality
243
+ | Command | Purpose |
244
+ |---------|---------|
245
+ | `/specweave-jira:pull` | Pull from Jira (git-style) |
246
+ | `/specweave-jira:push` | Push to Jira (git-style) |
247
+ | `/specweave-jira:import-boards` | Import Jira boards |
248
+ | `/specweave-github:sync` | Sync to GitHub issues |
249
+ | `/specweave:increment` | Create new increment |
235
250
 
236
251
  ---
237
252
 
@@ -193,7 +193,7 @@ Core hook (330 lines) JIRA plugin hook (150 lines)
193
193
 
194
194
  - **Core Plugin Hooks**: `plugins/specweave/hooks/README.md`
195
195
  - **Architecture Analysis**: `.specweave/increments/0018-strict-increment-discipline-enforcement/reports/HOOKS-ARCHITECTURE-ANALYSIS.md`
196
- - **JIRA Sync Command**: `plugins/specweave-jira/commands/specweave-jira-sync.md`
196
+ - **JIRA Sync Command**: `plugins/specweave-jira/commands/sync.md`
197
197
 
198
198
  ---
199
199
 
@@ -156,6 +156,13 @@ Skills work together in coordinated workflows:
156
156
 
157
157
  ## Usage Examples
158
158
 
159
+ **Complete runnable examples** are available in [`examples/`](./examples/):
160
+ - `simple-producer-consumer/` - Basic Kafka operations (beginner)
161
+ - `avro-schema-registry/` - Schema-based serialization (intermediate)
162
+ - `exactly-once-semantics/` - Zero message loss (advanced)
163
+ - `kafka-streams-app/` - Real-time stream processing (advanced)
164
+ - `n8n-workflow/` - No-code event-driven automation (beginner)
165
+
159
166
  ### Deploy to AWS MSK
160
167
 
161
168
  ```bash
@@ -207,6 +214,19 @@ npm run test:coverage
207
214
 
208
215
  **Coverage Target**: 85-90%
209
216
 
217
+ ## Benchmarks
218
+
219
+ Performance benchmarks are available in [`benchmarks/`](./benchmarks/):
220
+
221
+ ```bash
222
+ # Run Kafka throughput benchmarks
223
+ npx ts-node benchmarks/kafka-throughput.benchmark.ts
224
+ ```
225
+
226
+ Measures: producer/consumer throughput, end-to-end latency (p50/p95/p99), batch size impact, compression comparison, concurrent producers.
227
+
228
+ **Target**: 100K+ msgs/sec throughput.
229
+
210
230
  ## Documentation
211
231
 
212
232
  - **Getting Started**: `.specweave/docs/public/guides/kafka-getting-started.md`
@@ -0,0 +1,551 @@
1
+ /**
2
+ * Kafka Performance Benchmarks
3
+ *
4
+ * Measures throughput and latency for:
5
+ * - Producer performance (msgs/sec, MB/sec)
6
+ * - Consumer performance (msgs/sec, lag)
7
+ * - End-to-end latency (p50, p95, p99)
8
+ * - Batch processing efficiency
9
+ * - Compression impact
10
+ * - Concurrent operations
11
+ *
12
+ * Target: 100K+ msgs/sec throughput
13
+ *
14
+ * @benchmark
15
+ */
16
+
17
+ import { Kafka, Producer, Consumer, CompressionTypes } from 'kafkajs';
18
+ import { performance } from 'perf_hooks';
19
+ import { v4 as uuidv4 } from 'uuid';
20
+ import * as fs from 'fs';
21
+ import * as path from 'path';
22
+
23
+ interface BenchmarkResult {
24
+ name: string;
25
+ duration: number;
26
+ messageCount: number;
27
+ throughput: number; // msgs/sec
28
+ bytesPerSecond: number;
29
+ latencyP50?: number;
30
+ latencyP95?: number;
31
+ latencyP99?: number;
32
+ avgLatency?: number;
33
+ }
34
+
35
+ class KafkaBenchmark {
36
+ private kafka: Kafka;
37
+ private results: BenchmarkResult[] = [];
38
+
39
+ constructor() {
40
+ this.kafka = new Kafka({
41
+ clientId: 'benchmark-client',
42
+ brokers: process.env.KAFKA_BROKERS?.split(',') || ['localhost:9092'],
43
+ retry: {
44
+ retries: 3,
45
+ },
46
+ });
47
+ }
48
+
49
+ async runAll(): Promise<void> {
50
+ console.log('🚀 Starting Kafka Performance Benchmarks...\n');
51
+
52
+ await this.benchmarkProducerThroughput();
53
+ await this.benchmarkConsumerThroughput();
54
+ await this.benchmarkEndToEndLatency();
55
+ await this.benchmarkBatchSizes();
56
+ await this.benchmarkCompression();
57
+ await this.benchmarkConcurrentProducers();
58
+
59
+ this.generateReport();
60
+ }
61
+
62
+ /**
63
+ * Benchmark 1: Producer Throughput
64
+ * Target: 100K+ msgs/sec
65
+ */
66
+ private async benchmarkProducerThroughput(): Promise<void> {
67
+ console.log('📊 Benchmark 1: Producer Throughput\n');
68
+
69
+ const topic = `bench-producer-${uuidv4()}`;
70
+ const messageCount = 100000;
71
+ const messageSize = 1024; // 1KB
72
+
73
+ const admin = this.kafka.admin();
74
+ await admin.connect();
75
+ await admin.createTopics({
76
+ topics: [{ topic, numPartitions: 10 }],
77
+ });
78
+ await admin.disconnect();
79
+
80
+ const producer = this.kafka.producer({
81
+ allowAutoTopicCreation: false,
82
+ idempotent: true,
83
+ });
84
+
85
+ await producer.connect();
86
+
87
+ const message = {
88
+ value: Buffer.alloc(messageSize).toString('base64'),
89
+ };
90
+
91
+ const startTime = performance.now();
92
+
93
+ // Send in batches for better throughput
94
+ const batchSize = 1000;
95
+ for (let i = 0; i < messageCount; i += batchSize) {
96
+ const batch = Array(Math.min(batchSize, messageCount - i)).fill(message);
97
+ await producer.send({
98
+ topic,
99
+ messages: batch,
100
+ });
101
+
102
+ if ((i + batchSize) % 10000 === 0) {
103
+ process.stdout.write(`\rProgress: ${i + batchSize}/${messageCount}`);
104
+ }
105
+ }
106
+
107
+ const endTime = performance.now();
108
+ const duration = (endTime - startTime) / 1000; // seconds
109
+
110
+ await producer.disconnect();
111
+
112
+ const throughput = messageCount / duration;
113
+ const bytesPerSecond = (messageCount * messageSize) / duration;
114
+
115
+ this.results.push({
116
+ name: 'Producer Throughput',
117
+ duration,
118
+ messageCount,
119
+ throughput,
120
+ bytesPerSecond,
121
+ });
122
+
123
+ console.log(`\n✅ Completed: ${throughput.toFixed(0)} msgs/sec, ${(bytesPerSecond / 1024 / 1024).toFixed(2)} MB/sec\n`);
124
+ }
125
+
126
+ /**
127
+ * Benchmark 2: Consumer Throughput
128
+ */
129
+ private async benchmarkConsumerThroughput(): Promise<void> {
130
+ console.log('📊 Benchmark 2: Consumer Throughput\n');
131
+
132
+ const topic = `bench-consumer-${uuidv4()}`;
133
+ const messageCount = 50000;
134
+ const messageSize = 1024;
135
+
136
+ const admin = this.kafka.admin();
137
+ await admin.connect();
138
+ await admin.createTopics({
139
+ topics: [{ topic, numPartitions: 10 }],
140
+ });
141
+ await admin.disconnect();
142
+
143
+ // Produce messages first
144
+ const producer = this.kafka.producer();
145
+ await producer.connect();
146
+
147
+ const message = {
148
+ value: Buffer.alloc(messageSize).toString('base64'),
149
+ };
150
+
151
+ for (let i = 0; i < messageCount; i += 1000) {
152
+ const batch = Array(1000).fill(message);
153
+ await producer.send({ topic, messages: batch });
154
+ }
155
+
156
+ await producer.disconnect();
157
+
158
+ // Benchmark consumption
159
+ const consumer = this.kafka.consumer({
160
+ groupId: `bench-group-${uuidv4()}`,
161
+ });
162
+
163
+ await consumer.connect();
164
+ await consumer.subscribe({ topic, fromBeginning: true });
165
+
166
+ let consumedCount = 0;
167
+ const startTime = performance.now();
168
+
169
+ await new Promise<void>((resolve) => {
170
+ consumer.run({
171
+ eachBatch: async ({ batch }) => {
172
+ consumedCount += batch.messages.length;
173
+
174
+ if (consumedCount >= messageCount) {
175
+ resolve();
176
+ }
177
+ },
178
+ });
179
+ });
180
+
181
+ const endTime = performance.now();
182
+ const duration = (endTime - startTime) / 1000;
183
+
184
+ await consumer.disconnect();
185
+
186
+ const throughput = consumedCount / duration;
187
+ const bytesPerSecond = (consumedCount * messageSize) / duration;
188
+
189
+ this.results.push({
190
+ name: 'Consumer Throughput',
191
+ duration,
192
+ messageCount: consumedCount,
193
+ throughput,
194
+ bytesPerSecond,
195
+ });
196
+
197
+ console.log(`✅ Completed: ${throughput.toFixed(0)} msgs/sec, ${(bytesPerSecond / 1024 / 1024).toFixed(2)} MB/sec\n`);
198
+ }
199
+
200
+ /**
201
+ * Benchmark 3: End-to-End Latency
202
+ */
203
+ private async benchmarkEndToEndLatency(): Promise<void> {
204
+ console.log('📊 Benchmark 3: End-to-End Latency\n');
205
+
206
+ const topic = `bench-latency-${uuidv4()}`;
207
+ const messageCount = 10000;
208
+
209
+ const admin = this.kafka.admin();
210
+ await admin.connect();
211
+ await admin.createTopics({
212
+ topics: [{ topic, numPartitions: 1 }],
213
+ });
214
+ await admin.disconnect();
215
+
216
+ const producer = this.kafka.producer();
217
+ const consumer = this.kafka.consumer({
218
+ groupId: `bench-latency-group-${uuidv4()}`,
219
+ });
220
+
221
+ await producer.connect();
222
+ await consumer.connect();
223
+ await consumer.subscribe({ topic, fromBeginning: true });
224
+
225
+ const latencies: number[] = [];
226
+ const timestamps = new Map<string, number>();
227
+
228
+ const consumePromise = new Promise<void>((resolve) => {
229
+ consumer.run({
230
+ eachMessage: async ({ message }) => {
231
+ const id = message.value!.toString();
232
+ const sendTime = timestamps.get(id);
233
+
234
+ if (sendTime) {
235
+ const latency = performance.now() - sendTime;
236
+ latencies.push(latency);
237
+
238
+ if (latencies.length >= messageCount) {
239
+ resolve();
240
+ }
241
+ }
242
+ },
243
+ });
244
+ });
245
+
246
+ // Send messages with timestamps
247
+ for (let i = 0; i < messageCount; i++) {
248
+ const id = `msg-${i}`;
249
+ timestamps.set(id, performance.now());
250
+
251
+ await producer.send({
252
+ topic,
253
+ messages: [{ value: id }],
254
+ });
255
+
256
+ if (i % 1000 === 0) {
257
+ process.stdout.write(`\rProgress: ${i}/${messageCount}`);
258
+ }
259
+ }
260
+
261
+ await consumePromise;
262
+
263
+ await producer.disconnect();
264
+ await consumer.disconnect();
265
+
266
+ // Calculate percentiles
267
+ latencies.sort((a, b) => a - b);
268
+
269
+ const p50 = latencies[Math.floor(latencies.length * 0.50)];
270
+ const p95 = latencies[Math.floor(latencies.length * 0.95)];
271
+ const p99 = latencies[Math.floor(latencies.length * 0.99)];
272
+ const avg = latencies.reduce((sum, l) => sum + l, 0) / latencies.length;
273
+
274
+ this.results.push({
275
+ name: 'End-to-End Latency',
276
+ duration: 0,
277
+ messageCount,
278
+ throughput: 0,
279
+ bytesPerSecond: 0,
280
+ latencyP50: p50,
281
+ latencyP95: p95,
282
+ latencyP99: p99,
283
+ avgLatency: avg,
284
+ });
285
+
286
+ console.log(`\n✅ Latency - p50: ${p50.toFixed(2)}ms, p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms\n`);
287
+ }
288
+
289
+ /**
290
+ * Benchmark 4: Batch Size Impact
291
+ */
292
+ private async benchmarkBatchSizes(): Promise<void> {
293
+ console.log('📊 Benchmark 4: Batch Size Impact\n');
294
+
295
+ const batchSizes = [10, 100, 500, 1000, 5000];
296
+
297
+ for (const batchSize of batchSizes) {
298
+ const topic = `bench-batch-${batchSize}-${uuidv4()}`;
299
+ const messageCount = 50000;
300
+
301
+ const admin = this.kafka.admin();
302
+ await admin.connect();
303
+ await admin.createTopics({
304
+ topics: [{ topic, numPartitions: 5 }],
305
+ });
306
+ await admin.disconnect();
307
+
308
+ const producer = this.kafka.producer();
309
+ await producer.connect();
310
+
311
+ const startTime = performance.now();
312
+
313
+ for (let i = 0; i < messageCount; i += batchSize) {
314
+ const batch = Array(Math.min(batchSize, messageCount - i))
315
+ .fill({ value: 'benchmark-data' });
316
+
317
+ await producer.send({ topic, messages: batch });
318
+ }
319
+
320
+ const endTime = performance.now();
321
+ const duration = (endTime - startTime) / 1000;
322
+
323
+ await producer.disconnect();
324
+
325
+ const throughput = messageCount / duration;
326
+
327
+ this.results.push({
328
+ name: `Batch Size ${batchSize}`,
329
+ duration,
330
+ messageCount,
331
+ throughput,
332
+ bytesPerSecond: 0,
333
+ });
334
+
335
+ console.log(` Batch ${batchSize}: ${throughput.toFixed(0)} msgs/sec`);
336
+ }
337
+
338
+ console.log();
339
+ }
340
+
341
+ /**
342
+ * Benchmark 5: Compression Impact
343
+ */
344
+ private async benchmarkCompression(): Promise<void> {
345
+ console.log('📊 Benchmark 5: Compression Impact\n');
346
+
347
+ const compressionTypes = [
348
+ { type: CompressionTypes.None, name: 'None' },
349
+ { type: CompressionTypes.GZIP, name: 'GZIP' },
350
+ { type: CompressionTypes.Snappy, name: 'Snappy' },
351
+ { type: CompressionTypes.LZ4, name: 'LZ4' },
352
+ { type: CompressionTypes.ZSTD, name: 'ZSTD' },
353
+ ];
354
+
355
+ for (const compression of compressionTypes) {
356
+ const topic = `bench-compression-${compression.name}-${uuidv4()}`;
357
+ const messageCount = 10000;
358
+ const messageSize = 10240; // 10KB (compressible)
359
+
360
+ const admin = this.kafka.admin();
361
+ await admin.connect();
362
+ await admin.createTopics({
363
+ topics: [{ topic, numPartitions: 3 }],
364
+ });
365
+ await admin.disconnect();
366
+
367
+ const producer = this.kafka.producer();
368
+ await producer.connect();
369
+
370
+ const message = {
371
+ value: 'A'.repeat(messageSize), // Highly compressible
372
+ };
373
+
374
+ const startTime = performance.now();
375
+
376
+ for (let i = 0; i < messageCount; i += 100) {
377
+ const batch = Array(100).fill(message);
378
+ await producer.send({
379
+ topic,
380
+ compression: compression.type,
381
+ messages: batch,
382
+ });
383
+ }
384
+
385
+ const endTime = performance.now();
386
+ const duration = (endTime - startTime) / 1000;
387
+
388
+ await producer.disconnect();
389
+
390
+ const throughput = messageCount / duration;
391
+ const bytesPerSecond = (messageCount * messageSize) / duration;
392
+
393
+ this.results.push({
394
+ name: `Compression: ${compression.name}`,
395
+ duration,
396
+ messageCount,
397
+ throughput,
398
+ bytesPerSecond,
399
+ });
400
+
401
+ console.log(` ${compression.name}: ${throughput.toFixed(0)} msgs/sec, ${(bytesPerSecond / 1024 / 1024).toFixed(2)} MB/sec`);
402
+ }
403
+
404
+ console.log();
405
+ }
406
+
407
+ /**
408
+ * Benchmark 6: Concurrent Producers
409
+ */
410
+ private async benchmarkConcurrentProducers(): Promise<void> {
411
+ console.log('📊 Benchmark 6: Concurrent Producers\n');
412
+
413
+ const topic = `bench-concurrent-${uuidv4()}`;
414
+ const producerCount = 10;
415
+ const messagesPerProducer = 10000;
416
+
417
+ const admin = this.kafka.admin();
418
+ await admin.connect();
419
+ await admin.createTopics({
420
+ topics: [{ topic, numPartitions: 10 }],
421
+ });
422
+ await admin.disconnect();
423
+
424
+ const startTime = performance.now();
425
+
426
+ const producers = await Promise.all(
427
+ Array.from({ length: producerCount }, async () => {
428
+ const producer = this.kafka.producer();
429
+ await producer.connect();
430
+ return producer;
431
+ })
432
+ );
433
+
434
+ await Promise.all(
435
+ producers.map(async (producer) => {
436
+ for (let i = 0; i < messagesPerProducer; i += 100) {
437
+ const batch = Array(100).fill({ value: 'concurrent-test' });
438
+ await producer.send({ topic, messages: batch });
439
+ }
440
+ })
441
+ );
442
+
443
+ const endTime = performance.now();
444
+ const duration = (endTime - startTime) / 1000;
445
+
446
+ await Promise.all(producers.map(p => p.disconnect()));
447
+
448
+ const totalMessages = producerCount * messagesPerProducer;
449
+ const throughput = totalMessages / duration;
450
+
451
+ this.results.push({
452
+ name: `Concurrent Producers (${producerCount})`,
453
+ duration,
454
+ messageCount: totalMessages,
455
+ throughput,
456
+ bytesPerSecond: 0,
457
+ });
458
+
459
+ console.log(`✅ Completed: ${throughput.toFixed(0)} msgs/sec with ${producerCount} concurrent producers\n`);
460
+ }
461
+
462
+ /**
463
+ * Generate benchmark report
464
+ */
465
+ private generateReport(): void {
466
+ console.log('\n' + '='.repeat(80));
467
+ console.log('📈 BENCHMARK RESULTS SUMMARY');
468
+ console.log('='.repeat(80) + '\n');
469
+
470
+ this.results.forEach((result) => {
471
+ console.log(`${result.name}:`);
472
+ console.log(` Messages: ${result.messageCount.toLocaleString()}`);
473
+
474
+ if (result.throughput > 0) {
475
+ console.log(` Throughput: ${result.throughput.toFixed(0)} msgs/sec`);
476
+ }
477
+
478
+ if (result.bytesPerSecond > 0) {
479
+ console.log(` Bandwidth: ${(result.bytesPerSecond / 1024 / 1024).toFixed(2)} MB/sec`);
480
+ }
481
+
482
+ if (result.latencyP50) {
483
+ console.log(` Latency p50: ${result.latencyP50.toFixed(2)}ms`);
484
+ console.log(` Latency p95: ${result.latencyP95!.toFixed(2)}ms`);
485
+ console.log(` Latency p99: ${result.latencyP99!.toFixed(2)}ms`);
486
+ }
487
+
488
+ if (result.duration > 0) {
489
+ console.log(` Duration: ${result.duration.toFixed(2)}s`);
490
+ }
491
+
492
+ console.log();
493
+ });
494
+
495
+ // Save to file
496
+ const reportDir = path.join(process.cwd(), 'benchmark-results');
497
+ if (!fs.existsSync(reportDir)) {
498
+ fs.mkdirSync(reportDir, { recursive: true });
499
+ }
500
+
501
+ const timestamp = new Date().toISOString().replace(/:/g, '-');
502
+ const reportPath = path.join(reportDir, `benchmark-${timestamp}.json`);
503
+
504
+ fs.writeFileSync(reportPath, JSON.stringify({
505
+ timestamp: new Date().toISOString(),
506
+ results: this.results,
507
+ environment: {
508
+ nodeVersion: process.version,
509
+ platform: process.platform,
510
+ kafkaBrokers: process.env.KAFKA_BROKERS || 'localhost:9092',
511
+ },
512
+ }, null, 2));
513
+
514
+ console.log(`📄 Full report saved to: ${reportPath}\n`);
515
+
516
+ // Performance validation
517
+ const producerThroughput = this.results.find(r => r.name === 'Producer Throughput');
518
+ if (producerThroughput && producerThroughput.throughput >= 100000) {
519
+ console.log('✅ PASS: Producer throughput meets 100K+ msgs/sec target\n');
520
+ } else {
521
+ console.log('⚠️ WARNING: Producer throughput below 100K msgs/sec target\n');
522
+ }
523
+ }
524
+ }
525
+
526
+ // Run benchmarks
527
+ (async () => {
528
+ const benchmark = new KafkaBenchmark();
529
+ await benchmark.runAll();
530
+ process.exit(0);
531
+ })();
532
+
533
+ /**
534
+ * Benchmark Summary:
535
+ *
536
+ * 1. Producer Throughput - 100K+ msgs/sec target
537
+ * 2. Consumer Throughput - Maximum consumption rate
538
+ * 3. End-to-End Latency - p50, p95, p99 percentiles
539
+ * 4. Batch Size Impact - Optimal batch sizing
540
+ * 5. Compression Impact - Codec performance comparison
541
+ * 6. Concurrent Producers - Scalability validation
542
+ *
543
+ * Expected Results:
544
+ * - Producer: 100K-500K msgs/sec
545
+ * - Consumer: 100K-300K msgs/sec
546
+ * - Latency p50: <10ms
547
+ * - Latency p95: <50ms
548
+ * - Latency p99: <100ms
549
+ *
550
+ * Run: npm run benchmark
551
+ */