codeforge-dev 1.5.7 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/.devcontainer/.env +2 -1
  2. package/.devcontainer/CHANGELOG.md +55 -9
  3. package/.devcontainer/CLAUDE.md +65 -15
  4. package/.devcontainer/README.md +67 -6
  5. package/.devcontainer/config/keybindings.json +5 -0
  6. package/.devcontainer/config/main-system-prompt.md +63 -2
  7. package/.devcontainer/config/settings.json +25 -6
  8. package/.devcontainer/devcontainer.json +23 -7
  9. package/.devcontainer/features/README.md +21 -7
  10. package/.devcontainer/features/ccburn/README.md +60 -0
  11. package/.devcontainer/features/ccburn/devcontainer-feature.json +38 -0
  12. package/.devcontainer/features/ccburn/install.sh +174 -0
  13. package/.devcontainer/features/ccstatusline/README.md +22 -21
  14. package/.devcontainer/features/ccstatusline/devcontainer-feature.json +1 -1
  15. package/.devcontainer/features/ccstatusline/install.sh +48 -16
  16. package/.devcontainer/features/claude-code/config/settings.json +60 -24
  17. package/.devcontainer/features/mcp-qdrant/devcontainer-feature.json +1 -1
  18. package/.devcontainer/features/mcp-reasoner/devcontainer-feature.json +1 -1
  19. package/.devcontainer/plugins/devs-marketplace/plugins/auto-formatter/scripts/__pycache__/format-on-stop.cpython-314.pyc +0 -0
  20. package/.devcontainer/plugins/devs-marketplace/plugins/auto-formatter/scripts/format-on-stop.py +21 -6
  21. package/.devcontainer/plugins/devs-marketplace/plugins/auto-linter/scripts/__pycache__/lint-file.cpython-314.pyc +0 -0
  22. package/.devcontainer/plugins/devs-marketplace/plugins/auto-linter/scripts/lint-file.py +7 -10
  23. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/REVIEW-RUBRIC.md +440 -0
  24. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/architect.md +190 -0
  25. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/bash-exec.md +173 -0
  26. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/claude-guide.md +155 -0
  27. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/dependency-analyst.md +248 -0
  28. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/doc-writer.md +233 -0
  29. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/explorer.md +235 -0
  30. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/generalist.md +125 -0
  31. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/git-archaeologist.md +242 -0
  32. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/migrator.md +195 -0
  33. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/perf-profiler.md +265 -0
  34. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/refactorer.md +209 -0
  35. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/researcher.md +195 -0
  36. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/security-auditor.md +289 -0
  37. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/spec-writer.md +284 -0
  38. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/statusline-config.md +188 -0
  39. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/agents/test-writer.md +245 -0
  40. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/hooks/hooks.json +12 -0
  41. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/__pycache__/guard-readonly-bash.cpython-314.pyc +0 -0
  42. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/__pycache__/redirect-builtin-agents.cpython-314.pyc +0 -0
  43. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/__pycache__/skill-suggester.cpython-314.pyc +0 -0
  44. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/__pycache__/syntax-validator.cpython-314.pyc +0 -0
  45. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/__pycache__/verify-no-regression.cpython-314.pyc +0 -0
  46. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/__pycache__/verify-tests-pass.cpython-314.pyc +0 -0
  47. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/guard-readonly-bash.py +611 -0
  48. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/redirect-builtin-agents.py +83 -0
  49. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/skill-suggester.py +85 -2
  50. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/syntax-validator.py +9 -4
  51. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/verify-no-regression.py +221 -0
  52. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/scripts/verify-tests-pass.py +176 -0
  53. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/claude-agent-sdk/SKILL.md +599 -0
  54. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/claude-agent-sdk/references/sdk-typescript-reference.md +954 -0
  55. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/git-forensics/SKILL.md +276 -0
  56. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/git-forensics/references/advanced-commands.md +332 -0
  57. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/git-forensics/references/investigation-playbooks.md +319 -0
  58. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/performance-profiling/SKILL.md +341 -0
  59. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/performance-profiling/references/interpreting-results.md +235 -0
  60. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/performance-profiling/references/tool-commands.md +395 -0
  61. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/refactoring-patterns/SKILL.md +344 -0
  62. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/refactoring-patterns/references/safe-transformations.md +247 -0
  63. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/refactoring-patterns/references/smell-catalog.md +332 -0
  64. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/security-checklist/SKILL.md +277 -0
  65. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/security-checklist/references/owasp-patterns.md +269 -0
  66. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/security-checklist/references/secrets-patterns.md +253 -0
  67. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/specification-writing/SKILL.md +288 -0
  68. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/specification-writing/references/criteria-patterns.md +245 -0
  69. package/.devcontainer/plugins/devs-marketplace/plugins/code-directive/skills/specification-writing/references/ears-templates.md +239 -0
  70. package/.devcontainer/plugins/devs-marketplace/plugins/protected-files-guard/scripts/__pycache__/guard-protected.cpython-314.pyc +0 -0
  71. package/.devcontainer/plugins/devs-marketplace/plugins/protected-files-guard/scripts/guard-protected.py +40 -39
  72. package/.devcontainer/scripts/setup-aliases.sh +10 -20
  73. package/.devcontainer/scripts/setup-config.sh +2 -0
  74. package/.devcontainer/scripts/setup-plugins.sh +38 -46
  75. package/.devcontainer/scripts/setup-projects.sh +175 -0
  76. package/.devcontainer/scripts/setup-symlink-claude.sh +36 -0
  77. package/.devcontainer/scripts/setup-update-claude.sh +11 -8
  78. package/.devcontainer/scripts/setup.sh +4 -2
  79. package/package.json +1 -1
  80. package/.devcontainer/scripts/setup-irie-claude.sh +0 -32
@@ -0,0 +1,235 @@
1
+ # Interpreting Profiler Output
2
+
3
+ How to read and analyze output from profiling tools, with annotated examples.
4
+
5
+ ## Contents
6
+
7
+ - [Reading cProfile Output](#reading-cprofile-output)
8
+ - [Reading Flamegraphs](#reading-flamegraphs)
9
+ - [Reading memory_profiler Output](#reading-memory_profiler-output)
10
+ - [Reading line_profiler Output](#reading-line_profiler-output)
11
+ - [Reading `time` Output](#reading-time-output)
12
+ - [Benchmark Result Analysis](#benchmark-result-analysis)
13
+ - [Common Pitfalls](#common-pitfalls)
14
+
15
+ ---
16
+
17
+ ## Reading cProfile Output
18
+
19
+ ### Annotated Example
20
+
21
+ ```
22
+ 2847 function calls (2832 primitive calls) in 1.234 seconds
23
+
24
+ Ordered by: cumulative time
25
+
26
+ ncalls tottime percall cumtime percall filename:lineno(function)
27
+ 1 0.000 0.000 1.234 1.234 app.py:1(<module>)
28
+ 1 0.002 0.002 1.230 1.230 app.py:45(process_data)
29
+ 100 0.850 0.009 1.100 0.011 app.py:60(parse_record)
30
+ 100 0.200 0.002 0.200 0.002 app.py:80(validate_fields)
31
+ 100 0.050 0.001 0.050 0.001 app.py:95(compute_hash)
32
+ 1000 0.030 0.000 0.030 0.000 {built-in method builtins.len}
33
+ 500 0.025 0.000 0.025 0.000 {method 'split' of 'str' objects}
34
+ ```
35
+
36
+ **Column meanings:**
37
+ - `ncalls`: Number of calls. `100/50` means 100 total calls, 50 non-recursive.
38
+ - `tottime`: Time spent **in this function only** (excluding subfunctions). This is where the CPU actually spent its cycles.
39
+ - `percall` (first): `tottime / ncalls`. Average time per call in this function body.
40
+ - `cumtime`: Time spent in this function **and all functions it calls**. This is the total "cost" of calling this function.
41
+ - `percall` (second): `cumtime / ncalls`. Average total cost per call.
42
+
43
+ **How to read this example:**
44
+ 1. `process_data` takes 1.23s cumulative but only 0.002s in its own body → it's a coordinator, not the bottleneck.
45
+ 2. `parse_record` takes 0.85s in its own body (`tottime`) and 1.1s cumulative → it's the hot function. The 0.25s difference (1.1 - 0.85) is spent in its subfunctions.
46
+ 3. `validate_fields` takes 0.2s → secondary target for optimization.
47
+ 4. Built-in functions (`len`, `split`) are fast — don't optimize these.
48
+
49
+ **Action:** Focus on `parse_record`. It's called 100 times, spending 8.5ms per call in its own body. Can you cache results, reduce calls, or use a faster parsing library?
50
+
51
+ ---
52
+
53
+ ## Reading Flamegraphs
54
+
55
+ ### Anatomy of a Flamegraph
56
+
57
+ A flamegraph is a visualization where:
58
+ - **X-axis** = stack frames sorted alphabetically (NOT time). Width = proportion of total samples.
59
+ - **Y-axis** = call stack depth. Bottom = entry point, top = leaf function.
60
+ - **Color** = typically random warm colors (no semantic meaning by default).
61
+
62
+ ### What to Look For
63
+
64
+ **Wide bars at the top (plateaus):**
65
+ These are leaf functions where the CPU actually spends time. A wide bar at the top of the graph means this function is consuming a large portion of CPU time directly.
66
+
67
+ ```
68
+ Example: A wide "json.loads" bar at the top means JSON parsing is the bottleneck.
69
+ Action: Reduce the number of parse calls, use a faster JSON library (orjson, ujson),
70
+ or change the data format.
71
+ ```
72
+
73
+ **Wide bars at the bottom:**
74
+ These are entry points that lead to expensive call trees. The function itself may be cheap, but its children are expensive.
75
+
76
+ ```
77
+ Example: A wide "handle_request" bar at the bottom that narrows into many children
78
+ means request handling is expensive collectively, but no single child dominates.
79
+ Action: Look for the widest children and optimize those first.
80
+ ```
81
+
82
+ **Towers (deep narrow stacks):**
83
+ Deep but narrow stacks are recursive calls or deeply nested abstractions. They're not usually bottlenecks unless they're also wide.
84
+
85
+ **Missing frames:**
86
+ If the flamegraph shows `[unknown]` or gaps, the profiler couldn't resolve the frame. This happens with:
87
+ - JIT-compiled code (Node.js, Java) — use `--perf-basic-prof` for Node
88
+ - Native extensions — use `py-spy --native` for Python
89
+ - Optimized code with frame pointers stripped — compile with `-fno-omit-frame-pointer`
90
+
91
+ ---
92
+
93
+ ## Reading memory_profiler Output
94
+
95
+ ### Annotated Example
96
+
97
+ ```
98
+ Line # Mem usage Increment Occurrences Line Contents
99
+ ============================================================
100
+ 10 50.2 MiB 50.2 MiB 1 @profile
101
+ 11 def process():
102
+ 12 550.2 MiB 500.0 MiB 1 data = load_csv("big.csv")
103
+ 13 750.2 MiB 200.0 MiB 1 expanded = expand_rows(data)
104
+ 14 780.5 MiB 30.3 MiB 1 result = aggregate(expanded)
105
+ 15 280.5 MiB -500.0 MiB 1 del data
106
+ 16 180.5 MiB -100.0 MiB 1 del expanded
107
+ 17 180.5 MiB 0.0 MiB 1 return result
108
+ ```
109
+
110
+ **How to read:**
111
+ - `Mem usage`: Total memory of the process at this line.
112
+ - `Increment`: Change in memory from the previous line.
113
+ - Line 12: Loading the CSV adds 500 MiB. This is the peak driver.
114
+ - Line 13: Expanding rows adds another 200 MiB. Peak memory is 780 MiB.
115
+ - Lines 15-16: Deleting intermediate data reclaims 600 MiB.
116
+ - **Peak memory: 780.5 MiB** (at line 14).
117
+
118
+ **Action:** If 780 MiB is too much:
119
+ 1. Process the CSV in chunks instead of loading all at once.
120
+ 2. Stream `expand_rows` as a generator instead of materializing the full list.
121
+ 3. If `data` is only needed for expansion, delete it before aggregation (already done here).
122
+
123
+ ---
124
+
125
+ ## Reading line_profiler Output
126
+
127
+ ### Annotated Example
128
+
129
+ ```
130
+ Timer unit: 1e-06 s
131
+
132
+ Total time: 2.5 s
133
+ File: parser.py
134
+ Function: parse_records at line 15
135
+
136
+ Line # Hits Time Per Hit % Time Line Contents
137
+ ==============================================================
138
+ 15 def parse_records(raw_data):
139
+ 16 1 5.0 5.0 0.0 results = []
140
+ 17 1000 2500.0 2.5 0.1 for line in raw_data:
141
+ 18 1000 1200000.0 1200.0 48.0 parsed = json.loads(line)
142
+ 19 1000 800000.0 800.0 32.0 validated = validate(parsed)
143
+ 20 950 450000.0 473.7 18.0 results.append(transform(validated))
144
+ 21 50 47500.0 950.0 1.9 log_invalid(parsed)
145
+ 22 1 2.0 2.0 0.0 return results
146
+ ```
147
+
148
+ **How to read:**
149
+ - `Hits`: How many times the line executed.
150
+ - `Time`: Total time spent on this line (microseconds).
151
+ - `Per Hit`: Average time per execution.
152
+ - `% Time`: Percentage of total function time.
153
+
154
+ **Analysis:**
155
+ - Line 18 (`json.loads`): 48% of time. 1.2ms per call × 1000 calls = 1.2s.
156
+ - Line 19 (`validate`): 32% of time. 0.8ms per call.
157
+ - Line 20 (`transform`): 18% of time. 0.47ms per call, but only 950 hits (50 were invalid).
158
+
159
+ **Action:** `json.loads` is the primary target. Options:
160
+ 1. Use `orjson.loads` (3-10x faster than `json.loads`).
161
+ 2. If the JSON structure is known, use a streaming parser.
162
+ 3. If data is coming from a controlled source, consider a faster format (msgpack).
163
+
164
+ ---
165
+
166
+ ## Reading `time` Output
167
+
168
+ ```bash
169
+ $ /usr/bin/time -v python script.py
170
+
171
+ Command being timed: "python script.py"
172
+ User time (seconds): 3.45 ← CPU time in user space
173
+ System time (seconds): 0.12 ← CPU time in kernel
174
+ Elapsed (wall clock) time: 8.23 ← actual time elapsed
175
+ Maximum resident set size (kbytes): 524288 ← peak memory (512 MB)
176
+ Major (requiring I/O) page faults: 0
177
+ Minor (reclaiming a frame) page faults: 131072
178
+ Voluntary context switches: 1523 ← waiting for I/O
179
+ Involuntary context switches: 45 ← preempted by scheduler
180
+ ```
181
+
182
+ **Interpretation patterns:**
183
+
184
+ | Condition | Meaning | Action |
185
+ |-----------|---------|--------|
186
+ | wall >> user + sys | I/O bound | Profile I/O: network calls, disk reads, sleep/wait |
187
+ | user >> sys | CPU bound (computation) | Profile CPU: use cProfile or py-spy |
188
+ | sys >> user | Kernel bound (syscalls) | Profile syscalls: use strace |
189
+ | high voluntary ctx switches | Lots of I/O waiting | Batch I/O, use async, reduce round-trips |
190
+ | high involuntary ctx switches | CPU contention | Reduce thread count, check other processes |
191
+ | high max RSS | Memory hungry | Profile memory: use memory_profiler or scalene |
192
+
193
+ **This example:** Wall time (8.23s) >> user + sys (3.57s) → the script is I/O bound, spending 4.66s waiting for something. Investigate network calls, database queries, or file I/O.
194
+
195
+ ---
196
+
197
+ ## Benchmark Result Analysis
198
+
199
+ ### hyperfine Output
200
+
201
+ ```
202
+ Benchmark 1: python v1.py
203
+ Time (mean ± σ): 1.234 s ± 0.056 s [User: 1.180 s, System: 0.045 s]
204
+ Range (min … max): 1.156 s … 1.345 s 10 runs
205
+
206
+ Benchmark 2: python v2.py
207
+ Time (mean ± σ): 0.876 s ± 0.034 s [User: 0.830 s, System: 0.040 s]
208
+ Range (min … max): 0.823 s … 0.934 s 10 runs
209
+
210
+ Summary
211
+ python v2.py ran
212
+ 1.41 ± 0.08 times faster than python v1.py
213
+ ```
214
+
215
+ **How to evaluate:**
216
+ 1. **Is the difference significant?** The 1.41x speedup is outside the standard deviation range, so yes.
217
+ 2. **Is the variance acceptable?** σ = 0.056s for v1, 0.034s for v2. Both are <5% of the mean — good.
218
+ 3. **Is the improvement meaningful?** 1.234s → 0.876s = 0.358s saved. For a batch job running once: marginal. For a request handler running 1000x/sec: substantial.
219
+
220
+ ### Statistical Significance Rules of Thumb
221
+
222
+ - **Difference > 2σ**: Likely real (p < 0.05 roughly).
223
+ - **Difference < 1σ**: Probably noise. Don't ship it.
224
+ - **Coefficient of variation (σ/mean) > 10%**: Your benchmark is noisy. Increase runs, reduce background load, or pin CPU frequency.
225
+ - **Outliers in range**: If min and max are far apart, investigate. Was there a GC pause? A background process?
226
+
227
+ ---
228
+
229
+ ## Common Pitfalls
230
+
231
+ 1. **Profiling optimized code with debug flags**: Debug builds disable optimizations. Profile release/production builds.
232
+ 2. **Profiling on a loaded machine**: Other processes compete for CPU. Use isolated environments for benchmarks.
233
+ 3. **Ignoring warmup**: JIT compilers (Node.js V8, PyPy) are slow on first run. Always warm up.
234
+ 4. **Optimizing by percentage**: A 50% improvement on a 2ms function saves 1ms. A 5% improvement on a 10s function saves 500ms. Optimize by absolute time, not percentage.
235
+ 5. **Micro-benchmarking in isolation**: A function that's fast alone may be slow under real load (cache eviction, memory pressure, GC pauses). Benchmark in realistic conditions.
@@ -0,0 +1,395 @@
1
+ # Performance Profiling: Tool Command Reference
2
+
3
+ Full command reference for Python, JavaScript, and system profiling tools.
4
+
5
+ ## Contents
6
+
7
+ - [Python Profiling Tools](#python-profiling-tools)
8
+ - [cProfile](#cprofile)
9
+ - [py-spy](#py-spy)
10
+ - [scalene](#scalene)
11
+ - [memory_profiler](#memory_profiler)
12
+ - [line_profiler](#line_profiler)
13
+ - [pytest-benchmark](#pytest-benchmark)
14
+ - [JavaScript / Node.js Profiling Tools](#javascript--nodejs-profiling-tools)
15
+ - [V8 Built-in Profiler](#v8-built-in-profiler)
16
+ - [clinic.js](#clinicjs)
17
+ - [Chrome DevTools (Node.js)](#chrome-devtools-nodejs)
18
+ - [Lighthouse](#lighthouse)
19
+ - [System Profiling Tools](#system-profiling-tools)
20
+ - [time](#time)
21
+ - [htop / top](#htop--top)
22
+ - [iostat](#iostat)
23
+ - [perf (Linux)](#perf-linux)
24
+ - [strace (Linux)](#strace-linux)
25
+ - [Benchmarking Tools](#benchmarking-tools)
26
+ - [hyperfine](#hyperfine)
27
+
28
+ ---
29
+
30
+ ## Python Profiling Tools
31
+
32
+ ### cProfile
33
+
34
+ ```bash
35
+ # Profile a script, sorted by cumulative time
36
+ python -m cProfile -s cumtime script.py
37
+
38
+ # Sort options: calls, cumulative, filename, line, module, name, nfl, pcalls,
39
+ # stdname, time, tottime
40
+ python -m cProfile -s tottime script.py # sort by time spent in function itself
41
+ python -m cProfile -s calls script.py # sort by call count
42
+
43
+ # Save profile data for later analysis
44
+ python -m cProfile -o output.prof script.py
45
+
46
+ # Analyze saved profile
47
+ python -c "
48
+ import pstats
49
+ p = pstats.Stats('output.prof')
50
+ p.strip_dirs()
51
+ p.sort_stats('cumulative')
52
+ p.print_stats(30) # top 30 functions
53
+ p.print_callers('function_name') # who calls this function
54
+ p.print_callees('function_name') # what does this function call
55
+ "
56
+
57
+ # Profile a specific function in code
58
+ import cProfile
59
+ profiler = cProfile.Profile()
60
+ profiler.enable()
61
+ result = my_function()
62
+ profiler.disable()
63
+ profiler.print_stats(sort='cumulative')
64
+ ```
65
+
66
+ ### py-spy
67
+
68
+ ```bash
69
+ # Install
70
+ pip install py-spy
71
+
72
+ # Record a flamegraph (SVG output)
73
+ py-spy record -o flamegraph.svg -- python script.py
74
+
75
+ # Record with specific rate (samples/second, default 100)
76
+ py-spy record --rate 200 -o flamegraph.svg -- python script.py
77
+
78
+ # Record native (C extension) frames too
79
+ py-spy record --native -o flamegraph.svg -- python script.py
80
+
81
+ # Attach to a running process
82
+ py-spy record -o flamegraph.svg --pid 12345
83
+
84
+ # Record for a specific duration (seconds)
85
+ py-spy record --duration 30 -o flamegraph.svg --pid 12345
86
+
87
+ # Top-like live view
88
+ py-spy top -- python script.py
89
+ py-spy top --pid 12345
90
+
91
+ # Dump current stack traces (one-shot)
92
+ py-spy dump --pid 12345
93
+
94
+ # Output formats
95
+ py-spy record -f speedscope -o profile.json -- python script.py # speedscope
96
+ py-spy record -f raw -o profile.txt -- python script.py # raw text
97
+
98
+ # Profile subprocesses too
99
+ py-spy record --subprocesses -o flamegraph.svg -- python script.py
100
+
101
+ # Filter by thread
102
+ py-spy record --threads -o flamegraph.svg -- python script.py
103
+ ```
104
+
105
+ ### scalene
106
+
107
+ ```bash
108
+ # Install
109
+ pip install scalene
110
+
111
+ # Basic profile
112
+ scalene script.py
113
+
114
+ # CPU only
115
+ scalene --cpu script.py
116
+
117
+ # Memory only
118
+ scalene --memory script.py
119
+
120
+ # Reduced profile (only functions with significant time)
121
+ scalene --reduced-profile script.py
122
+
123
+ # Profile specific files only
124
+ scalene --profile-only mymodule.py script.py
125
+
126
+ # Output formats
127
+ scalene --json --outfile profile.json script.py
128
+ scalene --html --outfile profile.html script.py
129
+
130
+ # Programmatic usage
131
+ from scalene import scalene_profiler
132
+ scalene_profiler.start()
133
+ # ... code to profile ...
134
+ scalene_profiler.stop()
135
+ ```
136
+
137
+ ### memory_profiler
138
+
139
+ ```bash
140
+ # Install
141
+ pip install memory_profiler
142
+
143
+ # Profile a script (requires @profile decorators in code)
144
+ python -m memory_profiler script.py
145
+
146
+ # Time-based memory usage plot (outputs to mprofile_*.dat)
147
+ mprof run script.py
148
+ mprof plot # opens matplotlib plot
149
+ mprof plot -o memory.png # save to file
150
+
151
+ # Track memory over time for a running process
152
+ mprof run --include-children script.py
153
+ ```
154
+
155
+ ### line_profiler
156
+
157
+ ```bash
158
+ # Install
159
+ pip install line_profiler
160
+
161
+ # Profile (requires @profile decorators in code)
162
+ kernprof -l script.py # generates script.py.lprof
163
+ python -m line_profiler script.py.lprof # view results
164
+
165
+ # Or combined
166
+ kernprof -l -v script.py # run and view immediately
167
+ ```
168
+
169
+ ### pytest-benchmark
170
+
171
+ ```bash
172
+ # Install
173
+ pip install pytest-benchmark
174
+
175
+ # Run benchmarks only
176
+ pytest --benchmark-only
177
+
178
+ # Sort by mean time
179
+ pytest --benchmark-sort=mean
180
+
181
+ # Other sort options: min, max, stddev, name, fullname, rounds
182
+ pytest --benchmark-sort=stddev
183
+
184
+ # Save baseline
185
+ pytest --benchmark-save=baseline
186
+
187
+ # Compare against saved baseline
188
+ pytest --benchmark-compare=0001_baseline
189
+
190
+ # Minimum rounds and warmup
191
+ pytest --benchmark-min-rounds=20 --benchmark-warmup=on
192
+
193
+ # Disable GC during benchmarks (more stable results)
194
+ pytest --benchmark-disable-gc
195
+
196
+ # Output formats
197
+ pytest --benchmark-json=results.json
198
+ pytest --benchmark-histogram=output # generates output.svg
199
+ ```
200
+
201
+ ---
202
+
203
+ ## JavaScript / Node.js Profiling Tools
204
+
205
+ ### V8 Built-in Profiler
206
+
207
+ ```bash
208
+ # Generate V8 profile log
209
+ node --prof app.js
210
+
211
+ # Process the log
212
+ node --prof-process isolate-*.log > profile.txt
213
+
214
+ # CPU profiling with V8 inspector
215
+ node --cpu-prof app.js
216
+ # Generates CPU.*.cpuprofile — open in Chrome DevTools
217
+
218
+ # Heap snapshot
219
+ node --heap-prof app.js
220
+ # Generates Heap.*.heapprofile
221
+ ```
222
+
223
+ ### clinic.js
224
+
225
+ ```bash
226
+ # Install
227
+ npm install -g clinic
228
+
229
+ # Doctor: overall health (event loop delays, GC, active handles)
230
+ clinic doctor -- node app.js
231
+
232
+ # Flame: CPU flamegraph
233
+ clinic flame -- node app.js
234
+
235
+ # Bubbleprof: async flow visualization
236
+ clinic bubbleprof -- node app.js
237
+
238
+ # HeapProfiler: memory allocation tracking
239
+ clinic heapprofiler -- node app.js
240
+
241
+ # Combine with autocannon for load testing
242
+ clinic doctor --autocannon [ /api/endpoint ] -- node app.js
243
+ clinic flame --autocannon [ -m POST /api/data ] -- node app.js
244
+ ```
245
+
246
+ ### Chrome DevTools (Node.js)
247
+
248
+ ```bash
249
+ # Start with inspector (attach when ready)
250
+ node --inspect app.js
251
+
252
+ # Start with inspector and break on first line
253
+ node --inspect-brk app.js
254
+
255
+ # Custom port
256
+ node --inspect=0.0.0.0:9229 app.js
257
+
258
+ # Then open chrome://inspect in Chrome and click "inspect"
259
+ ```
260
+
261
+ ### Lighthouse
262
+
263
+ ```bash
264
+ # Install
265
+ npm install -g lighthouse
266
+
267
+ # Basic audit
268
+ lighthouse https://example.com
269
+
270
+ # Output formats
271
+ lighthouse https://example.com --output json --output-path report.json
272
+ lighthouse https://example.com --output html --output-path report.html
273
+
274
+ # Specific categories
275
+ lighthouse https://example.com --only-categories=performance
276
+
277
+ # Mobile vs Desktop
278
+ lighthouse https://example.com --preset=desktop
279
+ lighthouse https://example.com --preset=perf # mobile (default)
280
+
281
+ # Headless Chrome flags
282
+ lighthouse https://example.com --chrome-flags="--headless --no-sandbox"
283
+ ```
284
+
285
+ ---
286
+
287
+ ## System Profiling Tools
288
+
289
+ ### time
290
+
291
+ ```bash
292
+ # Basic timing
293
+ time python script.py
294
+
295
+ # GNU time with more details (note: use \time or /usr/bin/time, not the shell builtin)
296
+ /usr/bin/time -v python script.py
297
+ # Outputs: wall clock, user CPU, system CPU, max RSS, page faults, context switches
298
+ ```
299
+
300
+ ### htop / top
301
+
302
+ ```bash
303
+ # Interactive process monitor
304
+ htop
305
+
306
+ # Monitor specific PID
307
+ htop -p 12345
308
+
309
+ # Sort by memory
310
+ htop --sort-key=PERCENT_MEM
311
+
312
+ # Non-interactive (for scripting)
313
+ top -b -n 1 -p 12345
314
+ ```
315
+
316
+ ### iostat
317
+
318
+ ```bash
319
+ # Disk I/O statistics, refresh every 1 second
320
+ iostat -x 1
321
+
322
+ # Specific device
323
+ iostat -x -d sda 1
324
+
325
+ # Key columns: r/s (reads/sec), w/s (writes/sec), %util (device utilization)
326
+ ```
327
+
328
+ ### perf (Linux)
329
+
330
+ ```bash
331
+ # Count hardware events
332
+ perf stat python script.py
333
+ # Reports: cycles, instructions, cache misses, branch misses
334
+
335
+ # Specific events
336
+ perf stat -e cache-misses,cache-references python script.py
337
+
338
+ # Record for flamegraph
339
+ perf record -g python script.py
340
+ perf script > perf.data.txt
341
+
342
+ # Generate flamegraph from perf data
343
+ # (requires FlameGraph tools: https://github.com/brendangregg/FlameGraph)
344
+ perf script | stackcollapse-perf.pl | flamegraph.pl > flamegraph.svg
345
+ ```
346
+
347
+ ### strace (Linux)
348
+
349
+ ```bash
350
+ # Summary of syscall time
351
+ strace -c python script.py
352
+
353
+ # Trace specific syscall categories
354
+ strace -e trace=network python script.py # network calls
355
+ strace -e trace=file python script.py # file operations
356
+ strace -e trace=memory python script.py # memory operations
357
+
358
+ # Trace with timestamps
359
+ strace -t python script.py # HH:MM:SS
360
+ strace -T python script.py # time spent in each syscall
361
+ ```
362
+
363
+ ---
364
+
365
+ ## Benchmarking Tools
366
+
367
+ ### hyperfine
368
+
369
+ ```bash
370
+ # Install: cargo install hyperfine, or brew install hyperfine
371
+
372
+ # Basic benchmark
373
+ hyperfine 'python script.py'
374
+
375
+ # With warmup runs
376
+ hyperfine --warmup 3 'python script.py'
377
+
378
+ # Compare two commands
379
+ hyperfine --warmup 3 'python v1.py' 'python v2.py'
380
+
381
+ # Parameter sweep
382
+ hyperfine --warmup 3 -P threads 1 8 'python script.py --threads {threads}'
383
+
384
+ # Parameter list
385
+ hyperfine --warmup 3 -L algo bubble,merge,quick 'python sort.py --algo {algo}'
386
+
387
+ # Minimum runs
388
+ hyperfine --min-runs 20 'python script.py'
389
+
390
+ # Setup and cleanup commands
391
+ hyperfine --setup 'python generate_data.py' --cleanup 'rm data.tmp' 'python script.py'
392
+
393
+ # Export results
394
+ hyperfine --warmup 3 --export-json results.json --export-markdown results.md 'python script.py'
395
+ ```