@onlineapps/service-wrapper 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,453 @@
1
+ # Service Wrapper - Performance & Optimization
2
+
3
+ ## Performance Metrics
4
+
5
+ ### Latency Breakdown (typical)
6
+ ```
7
+ Operation Time Notes
8
+ --------- ---- -----
9
+ Queue receive 1-2ms AMQP overhead
10
+ Header parsing 0.5ms Extract context
11
+ Message deserialize 1-3ms JSON.parse
12
+ Cache check 2-3ms Redis GET
13
+ API call 5-50ms HTTP to service (varies)
14
+ Cache store 1-2ms Redis SET
15
+ Result serialize 1-2ms JSON.stringify
16
+ Queue publish 2-3ms AMQP publish
17
+ -------------------------------------------------
18
+ Total overhead: ~15-20ms + service time
19
+ ```
20
+
21
+ ### Throughput Capabilities
22
+ ```
23
+ Single instance: 100-200 msg/sec
24
+ With caching (50% hits): 150-300 msg/sec
25
+ Parallel (10 prefetch): 500-1000 msg/sec
26
+ Multi-instance (5x): 2500-5000 msg/sec
27
+ ```
28
+
29
+ ## Optimization Strategies
30
+
31
+ ### 1. Cache-First Strategy
32
+ ```javascript
33
+ class CacheOptimizer {
34
+ constructor(cache, config) {
35
+ this.cache = cache;
36
+ this.config = config;
37
+ this.stats = {
38
+ hits: 0,
39
+ misses: 0,
40
+ errors: 0
41
+ };
42
+ }
43
+
44
+ async getWithCache(key, fetchFn) {
45
+ // Try cache first
46
+ try {
47
+ const cached = await this.cache.get(key);
48
+ if (cached) {
49
+ this.stats.hits++;
50
+ return cached;
51
+ }
52
+ } catch (error) {
53
+ this.stats.errors++;
54
+ // Continue without cache if error
55
+ }
56
+
57
+ // Cache miss - fetch from source
58
+ this.stats.misses++;
59
+ const result = await fetchFn();
60
+
61
+ // Store in cache (async, don't wait)
62
+ this.cache.set(key, result, { ttl: this.config.ttl })
63
+ .catch(err => console.error('Cache store failed:', err));
64
+
65
+ return result;
66
+ }
67
+
68
+ getHitRate() {
69
+ const total = this.stats.hits + this.stats.misses;
70
+ return total > 0 ? this.stats.hits / total : 0;
71
+ }
72
+ }
73
+ ```
74
+
75
+ ### 2. Connection Pooling
76
+ ```javascript
77
+ class ConnectionPool {
78
+ constructor(serviceUrl, maxSockets = 10) {
79
+ this.agent = new http.Agent({
80
+ keepAlive: true,
81
+ maxSockets: maxSockets,
82
+ maxFreeSockets: 5,
83
+ timeout: 60000,
84
+ keepAliveMsecs: 30000
85
+ });
86
+
87
+ this.client = axios.create({
88
+ baseURL: serviceUrl,
89
+ httpAgent: this.agent,
90
+ timeout: 5000,
91
+ maxRedirects: 0
92
+ });
93
+ }
94
+
95
+ async request(config) {
96
+ return this.client.request(config);
97
+ }
98
+
99
+ getStats() {
100
+ return {
101
+ activeSockets: Object.keys(this.agent.sockets).length,
102
+ freeSockets: Object.keys(this.agent.freeSockets).length,
103
+ requests: this.agent.requests.length
104
+ };
105
+ }
106
+ }
107
+ ```
108
+
109
+ ### 3. Batch Processing
110
+ ```javascript
111
+ class BatchProcessor {
112
+ constructor(processFunc, options = {}) {
113
+ this.processFunc = processFunc;
114
+ this.batchSize = options.batchSize || 10;
115
+ this.batchTimeout = options.batchTimeout || 100;
116
+ this.batch = [];
117
+ this.timer = null;
118
+ }
119
+
120
+ async add(item) {
121
+ return new Promise((resolve, reject) => {
122
+ this.batch.push({ item, resolve, reject });
123
+
124
+ if (this.batch.length >= this.batchSize) {
125
+ this.flush();
126
+ } else if (!this.timer) {
127
+ this.timer = setTimeout(() => this.flush(), this.batchTimeout);
128
+ }
129
+ });
130
+ }
131
+
132
+ async flush() {
133
+ if (this.timer) {
134
+ clearTimeout(this.timer);
135
+ this.timer = null;
136
+ }
137
+
138
+ if (this.batch.length === 0) return;
139
+
140
+ const currentBatch = this.batch;
141
+ this.batch = [];
142
+
143
+ try {
144
+ // Process batch in parallel
145
+ const items = currentBatch.map(b => b.item);
146
+ const results = await this.processFunc(items);
147
+
148
+ // Resolve individual promises
149
+ currentBatch.forEach((b, i) => {
150
+ b.resolve(results[i]);
151
+ });
152
+ } catch (error) {
153
+ currentBatch.forEach(b => b.reject(error));
154
+ }
155
+ }
156
+ }
157
+ ```
158
+
159
+ ### 4. Adaptive Prefetch
160
+ ```javascript
161
+ class AdaptivePrefetch {
162
+ constructor(channel, options = {}) {
163
+ this.channel = channel;
164
+ this.minPrefetch = options.minPrefetch || 1;
165
+ this.maxPrefetch = options.maxPrefetch || 20;
166
+ this.currentPrefetch = options.initialPrefetch || 10;
167
+ this.metrics = {
168
+ processed: 0,
169
+ errors: 0,
170
+ avgProcessTime: 0
171
+ };
172
+ }
173
+
174
+ async adjustPrefetch() {
175
+ const errorRate = this.metrics.errors / this.metrics.processed;
176
+ const memoryUsage = process.memoryUsage().heapUsed / 1024 / 1024; // MB
177
+
178
+ let newPrefetch = this.currentPrefetch;
179
+
180
+ // Reduce if high error rate or memory pressure
181
+ if (errorRate > 0.1 || memoryUsage > 200) {
182
+ newPrefetch = Math.max(this.minPrefetch,
183
+ Math.floor(this.currentPrefetch * 0.8));
184
+ }
185
+ // Increase if performing well
186
+ else if (errorRate < 0.01 && memoryUsage < 100) {
187
+ newPrefetch = Math.min(this.maxPrefetch,
188
+ Math.floor(this.currentPrefetch * 1.2));
189
+ }
190
+
191
+ if (newPrefetch !== this.currentPrefetch) {
192
+ await this.channel.prefetch(newPrefetch);
193
+ this.currentPrefetch = newPrefetch;
194
+
195
+ console.log(`Prefetch adjusted: ${this.currentPrefetch} -> ${newPrefetch}`);
196
+ }
197
+ }
198
+
199
+ recordMetric(success, processTime) {
200
+ this.metrics.processed++;
201
+ if (!success) this.metrics.errors++;
202
+
203
+ // Calculate running average
204
+ this.metrics.avgProcessTime =
205
+ (this.metrics.avgProcessTime * (this.metrics.processed - 1) + processTime) /
206
+ this.metrics.processed;
207
+
208
+ // Adjust every 100 messages
209
+ if (this.metrics.processed % 100 === 0) {
210
+ this.adjustPrefetch();
211
+ }
212
+ }
213
+ }
214
+ ```
215
+
216
+ ## Memory Management
217
+
218
+ ### Memory Monitoring
219
+ ```javascript
220
+ class MemoryMonitor {
221
+ constructor(threshold = 200) { // MB
222
+ this.threshold = threshold;
223
+ this.baseline = process.memoryUsage().heapUsed;
224
+ this.checks = [];
225
+ }
226
+
227
+ check() {
228
+ const current = process.memoryUsage();
229
+ const usedMB = current.heapUsed / 1024 / 1024;
230
+
231
+ this.checks.push({
232
+ timestamp: Date.now(),
233
+ heapUsed: usedMB,
234
+ heapTotal: current.heapTotal / 1024 / 1024,
235
+ external: current.external / 1024 / 1024,
236
+ rss: current.rss / 1024 / 1024
237
+ });
238
+
239
+ // Keep only last 100 checks
240
+ if (this.checks.length > 100) {
241
+ this.checks.shift();
242
+ }
243
+
244
+ // Check for memory leak (continuous growth)
245
+ if (this.checks.length >= 10) {
246
+ const recent = this.checks.slice(-10);
247
+ const growth = recent[9].heapUsed - recent[0].heapUsed;
248
+
249
+ if (growth > 50) { // 50MB growth in 10 checks
250
+ console.warn('Possible memory leak detected:', {
251
+ growth: `${growth.toFixed(2)}MB`,
252
+ current: `${usedMB.toFixed(2)}MB`
253
+ });
254
+ }
255
+ }
256
+
257
+ // Force GC if over threshold (if --expose-gc flag)
258
+ if (usedMB > this.threshold && global.gc) {
259
+ console.log(`Memory threshold exceeded (${usedMB.toFixed(2)}MB), forcing GC`);
260
+ global.gc();
261
+ }
262
+
263
+ return usedMB;
264
+ }
265
+
266
+ getStats() {
267
+ if (this.checks.length === 0) return null;
268
+
269
+ const heapValues = this.checks.map(c => c.heapUsed);
270
+ return {
271
+ current: heapValues[heapValues.length - 1],
272
+ min: Math.min(...heapValues),
273
+ max: Math.max(...heapValues),
274
+ avg: heapValues.reduce((a, b) => a + b, 0) / heapValues.length,
275
+ trend: this.calculateTrend()
276
+ };
277
+ }
278
+
279
+ calculateTrend() {
280
+ if (this.checks.length < 2) return 'stable';
281
+
282
+ const recent = this.checks.slice(-10);
283
+ const firstHalf = recent.slice(0, 5).map(c => c.heapUsed);
284
+ const secondHalf = recent.slice(5).map(c => c.heapUsed);
285
+
286
+ const avgFirst = firstHalf.reduce((a, b) => a + b, 0) / firstHalf.length;
287
+ const avgSecond = secondHalf.reduce((a, b) => a + b, 0) / secondHalf.length;
288
+
289
+ const diff = avgSecond - avgFirst;
290
+ if (diff > 10) return 'increasing';
291
+ if (diff < -10) return 'decreasing';
292
+ return 'stable';
293
+ }
294
+ }
295
+ ```
296
+
297
+ ## Event Loop Monitoring
298
+ ```javascript
299
+ class EventLoopMonitor {
300
+ constructor() {
301
+ this.lagThreshold = 10; // ms
302
+ this.checkInterval = 1000; // ms
303
+ this.lastCheck = Date.now();
304
+ this.lags = [];
305
+ }
306
+
307
+ start() {
308
+ this.interval = setInterval(() => {
309
+ const now = Date.now();
310
+ const expectedTime = this.lastCheck + this.checkInterval;
311
+ const lag = now - expectedTime;
312
+
313
+ if (lag > this.lagThreshold) {
314
+ console.warn(`Event loop lag detected: ${lag}ms`);
315
+ }
316
+
317
+ this.lags.push(lag);
318
+ if (this.lags.length > 60) {
319
+ this.lags.shift(); // Keep last 60 measurements
320
+ }
321
+
322
+ this.lastCheck = now;
323
+ }, this.checkInterval);
324
+ }
325
+
326
+ stop() {
327
+ if (this.interval) {
328
+ clearInterval(this.interval);
329
+ }
330
+ }
331
+
332
+ getStats() {
333
+ if (this.lags.length === 0) return null;
334
+
335
+ return {
336
+ current: this.lags[this.lags.length - 1],
337
+ avg: this.lags.reduce((a, b) => a + b, 0) / this.lags.length,
338
+ max: Math.max(...this.lags),
339
+ warnings: this.lags.filter(l => l > this.lagThreshold).length
340
+ };
341
+ }
342
+ }
343
+ ```
344
+
345
+ ## Performance Testing
346
+
347
+ ### Load Test Configuration
348
+ ```javascript
349
+ // load-test.js
350
+ const LoadTester = {
351
+ async runTest(config) {
352
+ const results = {
353
+ totalMessages: config.messages,
354
+ duration: 0,
355
+ throughput: 0,
356
+ latencies: [],
357
+ errors: 0
358
+ };
359
+
360
+ const startTime = Date.now();
361
+
362
+ // Generate test messages
363
+ const promises = [];
364
+ for (let i = 0; i < config.messages; i++) {
365
+ const message = this.generateTestMessage(i);
366
+ const promise = this.sendAndMeasure(message);
367
+ promises.push(promise);
368
+
369
+ // Control send rate
370
+ if (config.rateLimit && i % config.rateLimit === 0) {
371
+ await new Promise(resolve => setTimeout(resolve, 1000));
372
+ }
373
+ }
374
+
375
+ // Wait for all to complete
376
+ const measurements = await Promise.all(promises);
377
+
378
+ results.duration = Date.now() - startTime;
379
+ results.throughput = (config.messages / results.duration) * 1000;
380
+ results.latencies = measurements.filter(m => m.success).map(m => m.latency);
381
+ results.errors = measurements.filter(m => !m.success).length;
382
+
383
+ // Calculate percentiles
384
+ results.latencies.sort((a, b) => a - b);
385
+ results.p50 = results.latencies[Math.floor(results.latencies.length * 0.50)];
386
+ results.p95 = results.latencies[Math.floor(results.latencies.length * 0.95)];
387
+ results.p99 = results.latencies[Math.floor(results.latencies.length * 0.99)];
388
+
389
+ return results;
390
+ },
391
+
392
+ generateTestMessage(index) {
393
+ return {
394
+ workflowId: `test-workflow-${index}`,
395
+ cookbook: {
396
+ steps: [
397
+ {
398
+ service: 'hello-service',
399
+ operation: 'sayGoodDay',
400
+ params: { name: `User${index}` }
401
+ }
402
+ ]
403
+ }
404
+ };
405
+ },
406
+
407
+ async sendAndMeasure(message) {
408
+ const start = Date.now();
409
+ try {
410
+ await mqClient.publish('workflow.init', message);
411
+ // Wait for completion (simplified)
412
+ await waitForCompletion(message.workflowId);
413
+ return {
414
+ success: true,
415
+ latency: Date.now() - start
416
+ };
417
+ } catch (error) {
418
+ return {
419
+ success: false,
420
+ error: error.message
421
+ };
422
+ }
423
+ }
424
+ };
425
+ ```
426
+
427
+ ## Optimization Checklist
428
+
429
+ ### Quick Wins
430
+ - [ ] Enable connection keep-alive
431
+ - [ ] Implement basic caching
432
+ - [ ] Set appropriate prefetch (10-20)
433
+ - [ ] Use connection pooling
434
+
435
+ ### Advanced Optimizations
436
+ - [ ] Implement batch processing
437
+ - [ ] Add adaptive prefetch
438
+ - [ ] Enable memory monitoring
439
+ - [ ] Add event loop monitoring
440
+ - [ ] Implement circuit breaker for services
441
+ - [ ] Add request deduplication
442
+ - [ ] Enable compression for large messages
443
+
444
+ ### Monitoring Setup
445
+ - [ ] Track cache hit rate
446
+ - [ ] Monitor message processing time
447
+ - [ ] Watch memory usage trend
448
+ - [ ] Track event loop lag
449
+ - [ ] Monitor connection pool usage
450
+ - [ ] Set up alerts for anomalies
451
+
452
+ ---
453
+ *For implementation details, see [Process Flows](./PROCESS_FLOWS.md)*