claude-mpm 4.17.0__py3-none-any.whl → 4.18.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of claude-mpm might be problematic. Click here for more details.

Files changed (52) hide show
  1. claude_mpm/VERSION +1 -1
  2. claude_mpm/agents/BASE_ENGINEER.md +286 -0
  3. claude_mpm/agents/BASE_PM.md +48 -17
  4. claude_mpm/agents/agent_loader.py +4 -4
  5. claude_mpm/agents/templates/engineer.json +5 -1
  6. claude_mpm/agents/templates/svelte-engineer.json +225 -0
  7. claude_mpm/config/agent_config.py +2 -2
  8. claude_mpm/core/config.py +42 -0
  9. claude_mpm/core/factories.py +1 -1
  10. claude_mpm/core/optimized_agent_loader.py +3 -3
  11. claude_mpm/hooks/claude_hooks/response_tracking.py +35 -1
  12. claude_mpm/models/resume_log.py +340 -0
  13. claude_mpm/services/agents/auto_config_manager.py +1 -1
  14. claude_mpm/services/agents/deployment/agent_configuration_manager.py +1 -1
  15. claude_mpm/services/agents/deployment/agent_record_service.py +1 -1
  16. claude_mpm/services/agents/deployment/agent_validator.py +17 -1
  17. claude_mpm/services/agents/deployment/async_agent_deployment.py +1 -1
  18. claude_mpm/services/agents/deployment/local_template_deployment.py +1 -1
  19. claude_mpm/services/agents/local_template_manager.py +1 -1
  20. claude_mpm/services/cli/session_manager.py +87 -0
  21. claude_mpm/services/core/path_resolver.py +1 -1
  22. claude_mpm/services/infrastructure/resume_log_generator.py +439 -0
  23. claude_mpm/services/mcp_config_manager.py +2 -2
  24. claude_mpm/services/session_manager.py +205 -1
  25. claude_mpm/services/unified/deployment_strategies/local.py +1 -1
  26. claude_mpm/skills/bundled/api-documentation.md +393 -0
  27. claude_mpm/skills/bundled/async-testing.md +571 -0
  28. claude_mpm/skills/bundled/code-review.md +143 -0
  29. claude_mpm/skills/bundled/database-migration.md +199 -0
  30. claude_mpm/skills/bundled/docker-containerization.md +194 -0
  31. claude_mpm/skills/bundled/express-local-dev.md +1429 -0
  32. claude_mpm/skills/bundled/fastapi-local-dev.md +1199 -0
  33. claude_mpm/skills/bundled/git-workflow.md +414 -0
  34. claude_mpm/skills/bundled/imagemagick.md +204 -0
  35. claude_mpm/skills/bundled/json-data-handling.md +223 -0
  36. claude_mpm/skills/bundled/nextjs-local-dev.md +807 -0
  37. claude_mpm/skills/bundled/pdf.md +141 -0
  38. claude_mpm/skills/bundled/performance-profiling.md +567 -0
  39. claude_mpm/skills/bundled/refactoring-patterns.md +180 -0
  40. claude_mpm/skills/bundled/security-scanning.md +327 -0
  41. claude_mpm/skills/bundled/systematic-debugging.md +473 -0
  42. claude_mpm/skills/bundled/test-driven-development.md +378 -0
  43. claude_mpm/skills/bundled/vite-local-dev.md +1061 -0
  44. claude_mpm/skills/bundled/web-performance-optimization.md +2305 -0
  45. claude_mpm/skills/bundled/xlsx.md +157 -0
  46. claude_mpm/utils/agent_dependency_loader.py +2 -2
  47. {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/METADATA +68 -1
  48. {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/RECORD +52 -29
  49. {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/WHEEL +0 -0
  50. {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/entry_points.txt +0 -0
  51. {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/licenses/LICENSE +0 -0
  52. {claude_mpm-4.17.0.dist-info → claude_mpm-4.18.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,141 @@
1
+ ---
2
+ skill_id: pdf
3
+ skill_version: 0.1.0
4
+ description: Common PDF operations and libraries across languages.
5
+ updated_at: 2025-10-30T17:00:00Z
6
+ tags: [pdf, document-processing, manipulation, media]
7
+ ---
8
+
9
+ # PDF Manipulation
10
+
11
+ Common PDF operations and libraries across languages.
12
+
13
+ ## Python (PyPDF2 / pikepdf)
14
+
15
+ ### Reading PDF
16
+ ```python
17
+ from PyPDF2 import PdfReader
18
+
19
+ reader = PdfReader("document.pdf")
20
+ print(f"Pages: {len(reader.pages)}")
21
+
22
+ # Extract text
23
+ text = reader.pages[0].extract_text()
24
+ ```
25
+
26
+ ### Writing PDF
27
+ ```python
28
+ from PyPDF2 import PdfWriter, PdfReader
29
+
30
+ reader = PdfReader("input.pdf")
31
+ writer = PdfWriter()
32
+
33
+ # Copy pages
34
+ for page in reader.pages:
35
+ writer.add_page(page)
36
+
37
+ # Save
38
+ with open("output.pdf", "wb") as f:
39
+ writer.write(f)
40
+ ```
41
+
42
+ ### Merging PDFs
43
+ ```python
44
+ from PyPDF2 import PdfMerger
45
+
46
+ merger = PdfMerger()
47
+ merger.append("file1.pdf")
48
+ merger.append("file2.pdf")
49
+ merger.write("merged.pdf")
50
+ merger.close()
51
+ ```
52
+
53
+ ### Splitting PDF
54
+ ```python
55
+ reader = PdfReader("input.pdf")
56
+ for i, page in enumerate(reader.pages):
57
+ writer = PdfWriter()
58
+ writer.add_page(page)
59
+ with open(f"page_{i}.pdf", "wb") as f:
60
+ writer.write(f)
61
+ ```
62
+
63
+ ## JavaScript (pdf-lib)
64
+
65
+ ```javascript
66
+ import { PDFDocument } from 'pdf-lib';
67
+ import fs from 'fs';
68
+
69
+ // Load existing PDF
70
+ const existingPdfBytes = fs.readFileSync('input.pdf');
71
+ const pdfDoc = await PDFDocument.load(existingPdfBytes);
72
+
73
+ // Get pages
74
+ const pages = pdfDoc.getPages();
75
+ const firstPage = pages[0];
76
+
77
+ // Add text
78
+ firstPage.drawText('Hello World!', {
79
+ x: 50,
80
+ y: 50,
81
+ size: 30
82
+ });
83
+
84
+ // Save
85
+ const pdfBytes = await pdfDoc.save();
86
+ fs.writeFileSync('output.pdf', pdfBytes);
87
+ ```
88
+
89
+ ## Common Operations
90
+
91
+ ### Extracting Images
92
+ ```python
93
+ import fitz # PyMuPDF
94
+
95
+ doc = fitz.open("document.pdf")
96
+ for page_num in range(len(doc)):
97
+ page = doc[page_num]
98
+ images = page.get_images()
99
+
100
+ for img_index, img in enumerate(images):
101
+ xref = img[0]
102
+ base_image = doc.extract_image(xref)
103
+ image_bytes = base_image["image"]
104
+
105
+ with open(f"image_{page_num}_{img_index}.png", "wb") as f:
106
+ f.write(image_bytes)
107
+ ```
108
+
109
+ ### Adding Watermark
110
+ ```python
111
+ from PyPDF2 import PdfReader, PdfWriter
112
+
113
+ # Create watermark PDF first
114
+ watermark_reader = PdfReader("watermark.pdf")
115
+ watermark_page = watermark_reader.pages[0]
116
+
117
+ # Apply to document
118
+ reader = PdfReader("input.pdf")
119
+ writer = PdfWriter()
120
+
121
+ for page in reader.pages:
122
+ page.merge_page(watermark_page)
123
+ writer.add_page(page)
124
+
125
+ with open("watermarked.pdf", "wb") as f:
126
+ writer.write(f)
127
+ ```
128
+
129
+ ### Compressing PDF
130
+ ```python
131
+ from pikepdf import Pdf
132
+
133
+ with Pdf.open("input.pdf") as pdf:
134
+ pdf.save("compressed.pdf", compress_streams=True)
135
+ ```
136
+
137
+ ## Remember
138
+ - Check PDF file size before processing
139
+ - Handle corrupted PDFs gracefully
140
+ - Use appropriate library for task (pikepdf > PyPDF2 for complex ops)
141
+ - Consider memory usage for large PDFs
@@ -0,0 +1,567 @@
1
+ ---
2
+ skill_id: performance-profiling
3
+ skill_version: 0.1.0
4
+ description: Systematic approach to identifying and optimizing performance bottlenecks, eliminating redundant profiling guidance per agent.
5
+ updated_at: 2025-10-30T17:00:00Z
6
+ tags: [performance, profiling, optimization, benchmarking]
7
+ ---
8
+
9
+ # Performance Profiling
10
+
11
+ Systematic approach to identifying and optimizing performance bottlenecks. Eliminates ~200-250 lines of redundant profiling guidance per agent.
12
+
13
+ ## Core Principle: Measure Before Optimizing
14
+
15
+ **Never optimize without profiling data.** Intuition about performance is usually wrong.
16
+
17
+ > "Premature optimization is the root of all evil" - Donald Knuth
18
+
19
+ ## The Profiling Process
20
+
21
+ ### 1. Establish Baseline
22
+
23
+ Before any optimization:
24
+ ```
25
+ □ Measure current performance
26
+ □ Define acceptable performance targets
27
+ □ Identify critical code paths
28
+ □ Document current metrics
29
+ ```
30
+
31
+ ### 2. Profile to Find Bottlenecks
32
+
33
+ Use profiling tools to identify hot spots:
34
+ ```
35
+ □ CPU profiling (where time is spent)
36
+ □ Memory profiling (allocation patterns)
37
+ □ I/O profiling (disk/network bottlenecks)
38
+ □ Database query profiling
39
+ ```
40
+
41
+ ### 3. Optimize Targeted Areas
42
+
43
+ Focus on the biggest bottlenecks first (80/20 rule).
44
+
45
+ ### 4. Measure Impact
46
+
47
+ Verify that optimizations actually improved performance.
48
+
49
+ ## Language-Specific Profiling
50
+
51
+ ### Python
52
+
53
+ #### CPU Profiling with cProfile
54
+
55
+ ```python
56
+ import cProfile
57
+ import pstats
58
+ from pstats import SortKey
59
+
60
+ # Profile a function
61
+ profiler = cProfile.Profile()
62
+ profiler.enable()
63
+
64
+ slow_function()
65
+
66
+ profiler.disable()
67
+
68
+ # Analyze results
69
+ stats = pstats.Stats(profiler)
70
+ stats.sort_stats(SortKey.CUMULATIVE)
71
+ stats.print_stats(20) # Top 20 functions by cumulative time
72
+
73
+ # Output shows:
74
+ # - ncalls: number of calls
75
+ # - tottime: total time in function (excluding subcalls)
76
+ # - cumtime: cumulative time (including subcalls)
77
+ ```
78
+
79
+ #### Line-by-Line Profiling
80
+
81
+ ```python
82
+ from line_profiler import LineProfiler
83
+
84
+ @profile # Decorator for line_profiler
85
+ def bottleneck_function():
86
+ data = load_data()
87
+ processed = process_data(data)
88
+ result = analyze(processed)
89
+ return result
90
+
91
+ # Run: kernprof -l -v script.py
92
+ # Shows time spent on each line
93
+ ```
94
+
95
+ #### Memory Profiling
96
+
97
+ ```python
98
+ from memory_profiler import profile
99
+
100
+ @profile
101
+ def memory_intensive():
102
+ large_list = [i for i in range(10000000)] # Shows memory spike
103
+ processed = [x * 2 for x in large_list] # Shows peak memory
104
+ return sum(processed)
105
+
106
+ # Run: python -m memory_profiler script.py
107
+ # Shows line-by-line memory usage
108
+ ```
109
+
110
+ ### JavaScript/Node.js
111
+
112
+ #### Built-in Profiling
113
+
114
+ ```javascript
115
+ // CPU profiling
116
+ console.profile('MyOperation');
117
+ expensiveOperation();
118
+ console.profileEnd('MyOperation');
119
+
120
+ // Time measurement
121
+ console.time('operation');
122
+ performOperation();
123
+ console.timeEnd('operation');
124
+ ```
125
+
126
+ #### Node.js Performance API
127
+
128
+ ```javascript
129
+ const { performance, PerformanceObserver } = require('perf_hooks');
130
+
131
+ // Mark specific points
132
+ performance.mark('start-operation');
133
+ performOperation();
134
+ performance.mark('end-operation');
135
+
136
+ // Measure duration
137
+ performance.measure('operation-duration', 'start-operation', 'end-operation');
138
+
139
+ const observer = new PerformanceObserver((items) => {
140
+ items.getEntries().forEach((entry) => {
141
+ console.log(`${entry.name}: ${entry.duration}ms`);
142
+ });
143
+ });
144
+
145
+ observer.observe({ entryTypes: ['measure'] });
146
+ ```
147
+
148
+ #### Chrome DevTools Profiling
149
+
150
+ ```javascript
151
+ // Run with: node --inspect script.js
152
+ // Open chrome://inspect in Chrome
153
+ // Use Performance tab to record and analyze
154
+
155
+ function expensiveOperation() {
156
+ // CPU-intensive work
157
+ for (let i = 0; i < 1000000; i++) {
158
+ Math.sqrt(i);
159
+ }
160
+ }
161
+
162
+ expensiveOperation();
163
+ ```
164
+
165
+ ### Go
166
+
167
+ #### CPU Profiling
168
+
169
+ ```go
170
+ import (
171
+ "os"
172
+ "runtime/pprof"
173
+ )
174
+
175
+ func main() {
176
+ // Start CPU profiling
177
+ f, _ := os.Create("cpu.prof")
178
+ defer f.Close()
179
+ pprof.StartCPUProfile(f)
180
+ defer pprof.StopCPUProfile()
181
+
182
+ // Code to profile
183
+ expensiveOperation()
184
+
185
+ // Analyze with: go tool pprof cpu.prof
186
+ }
187
+ ```
188
+
189
+ #### Memory Profiling
190
+
191
+ ```go
192
+ import (
193
+ "os"
194
+ "runtime/pprof"
195
+ )
196
+
197
+ func main() {
198
+ // Code to profile
199
+ expensiveOperation()
200
+
201
+ // Write memory profile
202
+ f, _ := os.Create("mem.prof")
203
+ defer f.Close()
204
+ pprof.WriteHeapProfile(f)
205
+
206
+ // Analyze with: go tool pprof mem.prof
207
+ }
208
+ ```
209
+
210
+ #### Benchmarking
211
+
212
+ ```go
213
+ func BenchmarkExpensiveOperation(b *testing.B) {
214
+ for i := 0; i < b.N; i++ {
215
+ expensiveOperation()
216
+ }
217
+ }
218
+
219
+ // Run: go test -bench=. -benchmem
220
+ // Shows:
221
+ // - ns/op: nanoseconds per operation
222
+ // - B/op: bytes allocated per operation
223
+ // - allocs/op: allocations per operation
224
+ ```
225
+
226
+ ### Rust
227
+
228
+ #### Benchmarking with Criterion
229
+
230
+ ```rust
231
+ use criterion::{black_box, criterion_group, criterion_main, Criterion};
232
+
233
+ fn expensive_operation(n: u64) -> u64 {
234
+ // Operation to benchmark
235
+ (0..n).sum()
236
+ }
237
+
238
+ fn bench_operation(c: &mut Criterion) {
239
+ c.bench_function("expensive_operation", |b| {
240
+ b.iter(|| expensive_operation(black_box(1000)))
241
+ });
242
+ }
243
+
244
+ criterion_group!(benches, bench_operation);
245
+ criterion_main!(benches);
246
+
247
+ // Run: cargo bench
248
+ ```
249
+
250
+ #### Profiling with Flamegraph
251
+
252
+ ```bash
253
+ # Install flamegraph
254
+ cargo install flamegraph
255
+
256
+ # Profile and generate flamegraph
257
+ cargo flamegraph --bin my_binary
258
+
259
+ # Opens flamegraph.svg showing hot paths
260
+ ```
261
+
262
+ ## Common Performance Bottlenecks
263
+
264
+ ### 1. Database Queries (N+1 Problem)
265
+
266
+ ```python
267
+ # Bad: N+1 queries
268
+ def get_users_with_posts():
269
+ users = User.query.all() # 1 query
270
+ for user in users:
271
+ posts = Post.query.filter_by(user_id=user.id).all() # N queries!
272
+ user.posts = posts
273
+ return users
274
+
275
+ # Good: Single query with join
276
+ def get_users_with_posts():
277
+ return User.query.options(joinedload(User.posts)).all() # 1 query
278
+ ```
279
+
280
+ ### 2. Unnecessary Loops
281
+
282
+ ```python
283
+ # Bad: Nested loops O(n²)
284
+ def find_duplicates(items):
285
+ duplicates = []
286
+ for i, item1 in enumerate(items):
287
+ for j, item2 in enumerate(items[i+1:]):
288
+ if item1 == item2:
289
+ duplicates.append(item1)
290
+ return duplicates
291
+
292
+ # Good: Use set O(n)
293
+ def find_duplicates(items):
294
+ seen = set()
295
+ duplicates = set()
296
+ for item in items:
297
+ if item in seen:
298
+ duplicates.add(item)
299
+ seen.add(item)
300
+ return list(duplicates)
301
+ ```
302
+
303
+ ### 3. Inefficient Data Structures
304
+
305
+ ```python
306
+ # Bad: List lookups O(n)
307
+ def process_items(items, allowed_ids):
308
+ allowed_ids_list = [1, 2, 3, ...] # List
309
+ result = []
310
+ for item in items:
311
+ if item.id in allowed_ids_list: # O(n) lookup
312
+ result.append(item)
313
+ return result
314
+
315
+ # Good: Set lookups O(1)
316
+ def process_items(items, allowed_ids):
317
+ allowed_ids_set = {1, 2, 3, ...} # Set
318
+ result = []
319
+ for item in items:
320
+ if item.id in allowed_ids_set: # O(1) lookup
321
+ result.append(item)
322
+ return result
323
+ ```
324
+
325
+ ### 4. Excessive Memory Allocation
326
+
327
+ ```python
328
+ # Bad: Building string with concatenation
329
+ def build_large_string(items):
330
+ result = ""
331
+ for item in items:
332
+ result += str(item) + "\n" # Creates new string each time
333
+ return result
334
+
335
+ # Good: Use list and join
336
+ def build_large_string(items):
337
+ parts = []
338
+ for item in items:
339
+ parts.append(str(item))
340
+ return "\n".join(parts) # Single allocation
341
+ ```
342
+
343
+ ### 5. Synchronous I/O
344
+
345
+ ```python
346
+ # Bad: Sequential HTTP requests
347
+ def fetch_all_users(user_ids):
348
+ users = []
349
+ for user_id in user_ids:
350
+ response = requests.get(f"/users/{user_id}") # Blocks
351
+ users.append(response.json())
352
+ return users
353
+
354
+ # Good: Concurrent requests
355
+ async def fetch_all_users(user_ids):
356
+ async with aiohttp.ClientSession() as session:
357
+ tasks = [fetch_user(session, user_id) for user_id in user_ids]
358
+ return await asyncio.gather(*tasks)
359
+ ```
360
+
361
+ ## Performance Optimization Strategies
362
+
363
+ ### Strategy 1: Caching
364
+
365
+ ```python
366
+ from functools import lru_cache
367
+
368
+ @lru_cache(maxsize=128)
369
+ def expensive_calculation(n):
370
+ # Expensive computation
371
+ return result
372
+
373
+ # First call: computed
374
+ result1 = expensive_calculation(10)
375
+
376
+ # Second call: cached
377
+ result2 = expensive_calculation(10) # Instant
378
+ ```
379
+
380
+ ### Strategy 2: Lazy Loading
381
+
382
+ ```python
383
+ class ExpensiveResource:
384
+ def __init__(self):
385
+ self._data = None
386
+
387
+ @property
388
+ def data(self):
389
+ if self._data is None:
390
+ self._data = load_expensive_data() # Only load when needed
391
+ return self._data
392
+ ```
393
+
394
+ ### Strategy 3: Batching
395
+
396
+ ```python
397
+ # Bad: Process one at a time
398
+ for item in items:
399
+ process_single(item) # Many round trips
400
+
401
+ # Good: Process in batches
402
+ for batch in chunks(items, batch_size=100):
403
+ process_batch(batch) # Fewer round trips
404
+ ```
405
+
406
+ ### Strategy 4: Indexing (Databases)
407
+
408
+ ```sql
409
+ -- Bad: Sequential scan
410
+ SELECT * FROM users WHERE email = 'user@example.com';
411
+
412
+ -- Good: Use index
413
+ CREATE INDEX idx_users_email ON users(email);
414
+ SELECT * FROM users WHERE email = 'user@example.com';
415
+ ```
416
+
417
+ ### Strategy 5: Algorithmic Optimization
418
+
419
+ ```python
420
+ # Bad: Bubble sort O(n²)
421
+ def sort_items(items):
422
+ for i in range(len(items)):
423
+ for j in range(len(items) - 1):
424
+ if items[j] > items[j + 1]:
425
+ items[j], items[j + 1] = items[j + 1], items[j]
426
+
427
+ # Good: Use built-in sort O(n log n)
428
+ def sort_items(items):
429
+ return sorted(items)
430
+ ```
431
+
432
+ ## Performance Testing
433
+
434
+ ### Load Testing
435
+
436
+ ```python
437
+ # Using locust for load testing
438
+ from locust import HttpUser, task, between
439
+
440
+ class WebsiteUser(HttpUser):
441
+ wait_time = between(1, 5)
442
+
443
+ @task
444
+ def load_homepage(self):
445
+ self.client.get("/")
446
+
447
+ @task
448
+ def load_user_profile(self):
449
+ self.client.get("/profile/123")
450
+
451
+ # Run: locust -f locustfile.py
452
+ # Shows requests/sec, response times, failure rate
453
+ ```
454
+
455
+ ### Stress Testing
456
+
457
+ ```bash
458
+ # Using Apache Bench
459
+ ab -n 10000 -c 100 http://localhost:8000/
460
+
461
+ # n: total requests
462
+ # c: concurrent requests
463
+ # Shows: requests/sec, time per request, transfer rate
464
+ ```
465
+
466
+ ## Performance Monitoring
467
+
468
+ ### Key Metrics to Track
469
+
470
+ ```
471
+ CPU Usage:
472
+ - % CPU time
473
+ - CPU cores utilized
474
+ - System vs user time
475
+
476
+ Memory Usage:
477
+ - Heap size
478
+ - Peak memory
479
+ - Memory leaks (growing over time)
480
+
481
+ I/O:
482
+ - Disk read/write IOPS
483
+ - Network throughput
484
+ - Database query time
485
+
486
+ Response Time:
487
+ - p50 (median)
488
+ - p95 (95th percentile)
489
+ - p99 (99th percentile)
490
+ - Max response time
491
+ ```
492
+
493
+ ### Application Performance Monitoring (APM)
494
+
495
+ ```python
496
+ # Example: Using OpenTelemetry
497
+ from opentelemetry import trace
498
+ from opentelemetry.sdk.trace import TracerProvider
499
+
500
+ trace.set_tracer_provider(TracerProvider())
501
+ tracer = trace.get_tracer(__name__)
502
+
503
+ def process_request():
504
+ with tracer.start_as_current_span("process_request"):
505
+ # Your code here
506
+ with tracer.start_as_current_span("database_query"):
507
+ fetch_data()
508
+ with tracer.start_as_current_span("process_data"):
509
+ process_data()
510
+ ```
511
+
512
+ ## Optimization Anti-Patterns
513
+
514
+ ### ❌ Optimizing Before Profiling
515
+
516
+ ```python
517
+ # Bad: Optimizing blindly
518
+ # "This looks slow, let me make it faster"
519
+ result = [complex_operation(x) for x in items] # Is this actually slow?
520
+ ```
521
+
522
+ ### ❌ Micro-Optimizations
523
+
524
+ ```python
525
+ # Bad: Premature micro-optimization
526
+ # Optimizing: x = x + 1 vs x += 1
527
+ # Impact: negligible (nanoseconds)
528
+ # Don't waste time on this without proof it matters
529
+ ```
530
+
531
+ ### ❌ Sacrificing Readability
532
+
533
+ ```python
534
+ # Bad: Unreadable "optimized" code
535
+ r=[x for x in d if x>0and x<100and x%2==0] # What does this do?
536
+
537
+ # Good: Readable code (optimize only if profiling shows need)
538
+ even_numbers = [
539
+ num for num in data
540
+ if 0 < num < 100 and num % 2 == 0
541
+ ]
542
+ ```
543
+
544
+ ## Quick Profiling Checklist
545
+
546
+ Before optimizing:
547
+
548
+ ```
549
+ □ Have you profiled to identify bottlenecks?
550
+ □ Have you established baseline metrics?
551
+ □ Are you optimizing the critical path?
552
+ □ Have you checked for N+1 queries?
553
+ □ Have you looked at algorithm complexity?
554
+ □ Have you considered caching?
555
+ □ Have you tested with realistic data volumes?
556
+ □ Have you measured the impact of changes?
557
+ □ Does the optimization maintain correctness?
558
+ □ Is the code still readable?
559
+ ```
560
+
561
+ ## Remember
562
+
563
+ - **Profile first** - Don't guess where the bottleneck is
564
+ - **80/20 rule** - 20% of code accounts for 80% of execution time
565
+ - **Real data** - Profile with production-like data volumes
566
+ - **Diminishing returns** - Getting 2x faster is easy, 10x is hard
567
+ - **Correctness > Speed** - Fast but wrong is useless