kollabor 0.4.9__py3-none-any.whl → 0.4.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. agents/__init__.py +2 -0
  2. agents/coder/__init__.py +0 -0
  3. agents/coder/agent.json +4 -0
  4. agents/coder/api-integration.md +2150 -0
  5. agents/coder/cli-pretty.md +765 -0
  6. agents/coder/code-review.md +1092 -0
  7. agents/coder/database-design.md +1525 -0
  8. agents/coder/debugging.md +1102 -0
  9. agents/coder/dependency-management.md +1397 -0
  10. agents/coder/git-workflow.md +1099 -0
  11. agents/coder/refactoring.md +1454 -0
  12. agents/coder/security-hardening.md +1732 -0
  13. agents/coder/system_prompt.md +1448 -0
  14. agents/coder/tdd.md +1367 -0
  15. agents/creative-writer/__init__.py +0 -0
  16. agents/creative-writer/agent.json +4 -0
  17. agents/creative-writer/character-development.md +1852 -0
  18. agents/creative-writer/dialogue-craft.md +1122 -0
  19. agents/creative-writer/plot-structure.md +1073 -0
  20. agents/creative-writer/revision-editing.md +1484 -0
  21. agents/creative-writer/system_prompt.md +690 -0
  22. agents/creative-writer/worldbuilding.md +2049 -0
  23. agents/data-analyst/__init__.py +30 -0
  24. agents/data-analyst/agent.json +4 -0
  25. agents/data-analyst/data-visualization.md +992 -0
  26. agents/data-analyst/exploratory-data-analysis.md +1110 -0
  27. agents/data-analyst/pandas-data-manipulation.md +1081 -0
  28. agents/data-analyst/sql-query-optimization.md +881 -0
  29. agents/data-analyst/statistical-analysis.md +1118 -0
  30. agents/data-analyst/system_prompt.md +928 -0
  31. agents/default/__init__.py +0 -0
  32. agents/default/agent.json +4 -0
  33. agents/default/dead-code.md +794 -0
  34. agents/default/explore-agent-system.md +585 -0
  35. agents/default/system_prompt.md +1448 -0
  36. agents/kollabor/__init__.py +0 -0
  37. agents/kollabor/analyze-plugin-lifecycle.md +175 -0
  38. agents/kollabor/analyze-terminal-rendering.md +388 -0
  39. agents/kollabor/code-review.md +1092 -0
  40. agents/kollabor/debug-mcp-integration.md +521 -0
  41. agents/kollabor/debug-plugin-hooks.md +547 -0
  42. agents/kollabor/debugging.md +1102 -0
  43. agents/kollabor/dependency-management.md +1397 -0
  44. agents/kollabor/git-workflow.md +1099 -0
  45. agents/kollabor/inspect-llm-conversation.md +148 -0
  46. agents/kollabor/monitor-event-bus.md +558 -0
  47. agents/kollabor/profile-performance.md +576 -0
  48. agents/kollabor/refactoring.md +1454 -0
  49. agents/kollabor/system_prompt copy.md +1448 -0
  50. agents/kollabor/system_prompt.md +757 -0
  51. agents/kollabor/trace-command-execution.md +178 -0
  52. agents/kollabor/validate-config.md +879 -0
  53. agents/research/__init__.py +0 -0
  54. agents/research/agent.json +4 -0
  55. agents/research/architecture-mapping.md +1099 -0
  56. agents/research/codebase-analysis.md +1077 -0
  57. agents/research/dependency-audit.md +1027 -0
  58. agents/research/performance-profiling.md +1047 -0
  59. agents/research/security-review.md +1359 -0
  60. agents/research/system_prompt.md +492 -0
  61. agents/technical-writer/__init__.py +0 -0
  62. agents/technical-writer/agent.json +4 -0
  63. agents/technical-writer/api-documentation.md +2328 -0
  64. agents/technical-writer/changelog-management.md +1181 -0
  65. agents/technical-writer/readme-writing.md +1360 -0
  66. agents/technical-writer/style-guide.md +1410 -0
  67. agents/technical-writer/system_prompt.md +653 -0
  68. agents/technical-writer/tutorial-creation.md +1448 -0
  69. core/__init__.py +0 -2
  70. core/application.py +343 -88
  71. core/cli.py +229 -10
  72. core/commands/menu_renderer.py +463 -59
  73. core/commands/registry.py +14 -9
  74. core/commands/system_commands.py +2461 -14
  75. core/config/loader.py +151 -37
  76. core/config/service.py +18 -6
  77. core/events/bus.py +29 -9
  78. core/events/executor.py +205 -75
  79. core/events/models.py +27 -8
  80. core/fullscreen/command_integration.py +20 -24
  81. core/fullscreen/components/__init__.py +10 -1
  82. core/fullscreen/components/matrix_components.py +1 -2
  83. core/fullscreen/components/space_shooter_components.py +654 -0
  84. core/fullscreen/plugin.py +5 -0
  85. core/fullscreen/renderer.py +52 -13
  86. core/fullscreen/session.py +52 -15
  87. core/io/__init__.py +29 -5
  88. core/io/buffer_manager.py +6 -1
  89. core/io/config_status_view.py +7 -29
  90. core/io/core_status_views.py +267 -347
  91. core/io/input/__init__.py +25 -0
  92. core/io/input/command_mode_handler.py +711 -0
  93. core/io/input/display_controller.py +128 -0
  94. core/io/input/hook_registrar.py +286 -0
  95. core/io/input/input_loop_manager.py +421 -0
  96. core/io/input/key_press_handler.py +502 -0
  97. core/io/input/modal_controller.py +1011 -0
  98. core/io/input/paste_processor.py +339 -0
  99. core/io/input/status_modal_renderer.py +184 -0
  100. core/io/input_errors.py +5 -1
  101. core/io/input_handler.py +211 -2452
  102. core/io/key_parser.py +7 -0
  103. core/io/layout.py +15 -3
  104. core/io/message_coordinator.py +111 -2
  105. core/io/message_renderer.py +129 -4
  106. core/io/status_renderer.py +147 -607
  107. core/io/terminal_renderer.py +97 -51
  108. core/io/terminal_state.py +21 -4
  109. core/io/visual_effects.py +816 -165
  110. core/llm/agent_manager.py +1063 -0
  111. core/llm/api_adapters/__init__.py +44 -0
  112. core/llm/api_adapters/anthropic_adapter.py +432 -0
  113. core/llm/api_adapters/base.py +241 -0
  114. core/llm/api_adapters/openai_adapter.py +326 -0
  115. core/llm/api_communication_service.py +167 -113
  116. core/llm/conversation_logger.py +322 -16
  117. core/llm/conversation_manager.py +556 -30
  118. core/llm/file_operations_executor.py +84 -32
  119. core/llm/llm_service.py +934 -103
  120. core/llm/mcp_integration.py +541 -57
  121. core/llm/message_display_service.py +135 -18
  122. core/llm/plugin_sdk.py +1 -2
  123. core/llm/profile_manager.py +1183 -0
  124. core/llm/response_parser.py +274 -56
  125. core/llm/response_processor.py +16 -3
  126. core/llm/tool_executor.py +6 -1
  127. core/logging/__init__.py +2 -0
  128. core/logging/setup.py +34 -6
  129. core/models/resume.py +54 -0
  130. core/plugins/__init__.py +4 -2
  131. core/plugins/base.py +127 -0
  132. core/plugins/collector.py +23 -161
  133. core/plugins/discovery.py +37 -3
  134. core/plugins/factory.py +6 -12
  135. core/plugins/registry.py +5 -17
  136. core/ui/config_widgets.py +128 -28
  137. core/ui/live_modal_renderer.py +2 -1
  138. core/ui/modal_actions.py +5 -0
  139. core/ui/modal_overlay_renderer.py +0 -60
  140. core/ui/modal_renderer.py +268 -7
  141. core/ui/modal_state_manager.py +29 -4
  142. core/ui/widgets/base_widget.py +7 -0
  143. core/updates/__init__.py +10 -0
  144. core/updates/version_check_service.py +348 -0
  145. core/updates/version_comparator.py +103 -0
  146. core/utils/config_utils.py +685 -526
  147. core/utils/plugin_utils.py +1 -1
  148. core/utils/session_naming.py +111 -0
  149. fonts/LICENSE +21 -0
  150. fonts/README.md +46 -0
  151. fonts/SymbolsNerdFont-Regular.ttf +0 -0
  152. fonts/SymbolsNerdFontMono-Regular.ttf +0 -0
  153. fonts/__init__.py +44 -0
  154. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/METADATA +54 -4
  155. kollabor-0.4.15.dist-info/RECORD +228 -0
  156. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/top_level.txt +2 -0
  157. plugins/agent_orchestrator/__init__.py +39 -0
  158. plugins/agent_orchestrator/activity_monitor.py +181 -0
  159. plugins/agent_orchestrator/file_attacher.py +77 -0
  160. plugins/agent_orchestrator/message_injector.py +135 -0
  161. plugins/agent_orchestrator/models.py +48 -0
  162. plugins/agent_orchestrator/orchestrator.py +403 -0
  163. plugins/agent_orchestrator/plugin.py +976 -0
  164. plugins/agent_orchestrator/xml_parser.py +191 -0
  165. plugins/agent_orchestrator_plugin.py +9 -0
  166. plugins/enhanced_input/box_styles.py +1 -0
  167. plugins/enhanced_input/color_engine.py +19 -4
  168. plugins/enhanced_input/config.py +2 -2
  169. plugins/enhanced_input_plugin.py +61 -11
  170. plugins/fullscreen/__init__.py +6 -2
  171. plugins/fullscreen/example_plugin.py +1035 -222
  172. plugins/fullscreen/setup_wizard_plugin.py +592 -0
  173. plugins/fullscreen/space_shooter_plugin.py +131 -0
  174. plugins/hook_monitoring_plugin.py +436 -78
  175. plugins/query_enhancer_plugin.py +66 -30
  176. plugins/resume_conversation_plugin.py +1494 -0
  177. plugins/save_conversation_plugin.py +98 -32
  178. plugins/system_commands_plugin.py +70 -56
  179. plugins/tmux_plugin.py +154 -78
  180. plugins/workflow_enforcement_plugin.py +94 -92
  181. system_prompt/default.md +952 -886
  182. core/io/input_mode_manager.py +0 -402
  183. core/io/modal_interaction_handler.py +0 -315
  184. core/io/raw_input_processor.py +0 -946
  185. core/storage/__init__.py +0 -5
  186. core/storage/state_manager.py +0 -84
  187. core/ui/widget_integration.py +0 -222
  188. core/utils/key_reader.py +0 -171
  189. kollabor-0.4.9.dist-info/RECORD +0 -128
  190. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/WHEEL +0 -0
  191. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/entry_points.txt +0 -0
  192. {kollabor-0.4.9.dist-info → kollabor-0.4.15.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1047 @@
1
+ <!-- Performance Profiling skill - identify bottlenecks and performance issues -->
2
+
3
+ performance-profiling mode: MEASURE AND REPORT ONLY
4
+
5
+ when this skill is active, you follow performance investigation discipline.
6
+ this is a comprehensive guide to finding performance bottlenecks.
7
+ you DO NOT implement optimizations - you report findings for the coder agent.
8
+
9
+
10
+ PHASE 0: PROFILING TOOLKIT VERIFICATION
11
+
12
+ before conducting ANY performance analysis, verify your tools are ready.
13
+
14
+
15
+ check for python profilers
16
+
17
+ <terminal>python -c "import cProfile; print('cProfile available')"</terminal>
18
+ <terminal>python -c "import pstats; print('pstats available')"</terminal>
19
+
20
+ if not available (should be in stdlib):
21
+ <terminal>python -m pip install --upgrade pip --quiet</terminal>
22
+
23
+
24
+ check for advanced profilers
25
+
26
+ <terminal>python -c "import line_profiler; print('line_profiler installed')" 2>/dev/null || echo "line_profiler not installed"</terminal>
27
+ <terminal>python -c "import memory_profiler; print('memory_profiler installed')" 2>/dev/null || echo "memory_profiler not installed"</terminal>
28
+ <terminal>python -c "import py-spy; print('py-spy installed')" 2>/dev/null || echo "py-spy not installed"</terminal>
29
+
30
+ if not installed:
31
+ <terminal>pip install line_profiler memory_profiler py-spy --quiet</terminal>
32
+
33
+ verify installation:
34
+ <terminal>kernprof --version 2>/dev/null || echo "kernprof needs install"</terminal>
35
+ <terminal>mprof --version 2>/dev/null || echo "mprof needs install"</terminal>
36
+
37
+
38
+ check for visualization tools
39
+
40
+ <terminal>python -c "import snakeviz; print('snakeviz installed')" 2>/dev/null || echo "snakeviz not installed"</terminal>
41
+ <terminal>python -c "import tuna; print('tuna installed')" 2>/dev/null || echo "tuna not installed"</terminal>
42
+
43
+ if not installed:
44
+ <terminal>pip install snakeviz tuna --quiet</terminal>
45
+
46
+
47
+ check for system monitoring tools
48
+
49
+ <terminal>which time 2>/dev/null || echo "time not found"</terminal>
50
+ <terminal>which ps 2>/dev/null || echo "ps not found"</terminal>
51
+ <terminal>which top 2>/dev/null || echo "top not found"</terminal>
52
+ <terminal>which htop 2>/dev/null || echo "htop not found"</terminal>
53
+
54
+ these help monitor resource usage during profiling.
55
+
56
+
57
+ check project structure
58
+
59
+ <terminal>ls -la</terminal>
60
+ <terminal>find . -name "*.py" -type f | head -20</terminal>
61
+ <terminal>grep -r "if __name__" --include="*.py" . 2>/dev/null | head -10</terminal>
62
+
63
+ identify:
64
+ - main entry points
65
+ - long-running functions
66
+ - data processing modules
67
+ - API endpoints
68
+
69
+
70
+ verify baseline measurements
71
+
72
+ <terminal>time python -c "print('python works')"</terminal>
73
+
74
+ if python not responding quickly, investigate system issues first.
75
+
76
+
77
+ PHASE 1: ESTABLISHING BASELINE METRICS
78
+
79
+ you cannot improve what you do not measure.
80
+
81
+
82
+ identify performance-critical paths
83
+
84
+ <read><file>main.py</file></read>
85
+
86
+ look for:
87
+ - main loops
88
+ - request handlers
89
+ - data processing pipelines
90
+ - file I/O operations
91
+ - network calls
92
+ - heavy computations
93
+
94
+ document these as primary profiling targets.
95
+
96
+
97
+ measure execution time
98
+
99
+ <terminal>time python main.py 2>&1 | tee /tmp/baseline_time.txt</terminal>
100
+
101
+ record:
102
+ - real (wall clock) time
103
+ - user CPU time
104
+ - system CPU time
105
+
106
+ baseline establishes what "normal" performance looks like.
107
+
108
+
109
+ measure memory baseline
110
+
111
+ <terminal>python -c "import psutil; print(f'Memory: {psutil.Process().memory_info().rss / 1024 / 1024:.1f} MB')"</terminal>
112
+
113
+ record baseline memory usage before profiling.
114
+
115
+
116
+ measure with python timeit
117
+
118
+ <terminal>python -m timeit "import main; main.main()" 2>&1 | tee /tmp/timeit_results.txt</terminal>
119
+
120
+ timeit runs code multiple times for statistical accuracy.
121
+
122
+
123
+ create reproducible workload
124
+
125
+ for consistent profiling, you need consistent input.
126
+
127
+ identify test data:
128
+ <terminal>find . -name "test*.json" -o -name "test*.csv" -o -name "fixtures*" 2>/dev/null | head -10</terminal>
129
+
130
+ use the same workload for all profiling runs.
131
+
132
+
133
+ PHASE 2: CPU PROFILING WITH CPROFILE
134
+
135
+
136
+ basic cProfile usage
137
+
138
+ <terminal>python -m cProfile -o /tmp/profile.stats main.py 2>&1</terminal>
139
+
140
+ this creates a binary statistics file.
141
+
142
+
143
+ view cProfile output in text
144
+
145
+ <terminal>python -m pstats /tmp/profile.stats</terminal>
146
+
147
+ in pstats interactive mode:
148
+ - stats 10 # show top 10 functions
149
+ - strip # strip directory names
150
+ - sort cumulative # sort by cumulative time
151
+ - sort time # sort by own time
152
+ - callers functionName # show who calls a function
153
+ - callees functionName # show what function calls
154
+
155
+
156
+ view sorted by different metrics
157
+
158
+ <terminal>python -c "import pstats; p = pstats.Stats('/tmp/profile.stats'); p.sort_stats('cumulative').print_stats(20)"</terminal>
159
+
160
+ metrics to sort by:
161
+ - cumulative: total time including subcalls
162
+ - time: time in function excluding subcalls
163
+ - calls: number of calls
164
+ - filename: file name
165
+
166
+
167
+ profile with time output
168
+
169
+ <terminal>python -m cProfile -s time main.py 2>&1 | tee /tmp/profile_sorted.txt</terminal>
170
+
171
+ <terminal>python -m cProfile -s cumulative main.py 2>&1 | tee /tmp/profile_cumulative.txt</terminal>
172
+
173
+
174
+ profile specific function
175
+
176
+ <read><file>path/to/module.py</file></read>
177
+
178
+ create test script:
179
+ <terminal>python -c "import cProfile; from module import function; cProfile.run('function()', 'function_profile.stats')"</terminal>
180
+
181
+
182
+ profile with context manager
183
+
184
+ <read><file>path/to/module.py</file></read>
185
+
186
+ look for existing profiling code:
187
+ <terminal>grep -rn "cProfile\|pstats" --include="*.py" . 2>/dev/null</terminal>
188
+
189
+
190
+ PHASE 3: INTERPRETING CPROFILE OUTPUT
191
+
192
+
193
+ understanding the columns
194
+
195
+ ncalls: number of calls
196
+ tottime: total time in function (excluding subcalls)
197
+ percall: tottime / ncalls
198
+ cumtime: cumulative time (including subcalls)
199
+ percall: cumtime / ncalls
200
+ filename:lineno(function): location
201
+
202
+ key insights:
203
+ - high cumtime + low tottime = function calls slow sub-functions
204
+ - high tottime = function itself is slow
205
+ - high ncalls = function called many times (optimization target)
206
+
207
+
208
+ identifying bottlenecks
209
+
210
+ look for:
211
+ [1] functions with high cumulative time
212
+ these are the slowest paths through code
213
+
214
+ [2] functions called many times
215
+ optimization candidates: memoization, caching
216
+
217
+ [3] functions with high self-time
218
+ the computation itself is slow
219
+
220
+ <terminal>python -c "import pstats; p = pstats.Stats('/tmp/profile.stats'); p.sort_stats('cumulative').print_stats(30)"</terminal>
221
+
222
+
223
+ identifying hot loops
224
+
225
+ <terminal>python -c "import pstats; p = pstats.Stats('/tmp/profile.stats'); p.sort_stats('calls').print_stats(30)"</terminal>
226
+
227
+ many calls to same function = hot loop candidate.
228
+
229
+
230
+ finding unexpected calls
231
+
232
+ <terminal>python -c "import pstats; p = pstats.Stats('/tmp/profile.stats'); p.print_callers('function_name')"</terminal>
233
+
234
+ discover who is calling expensive functions unexpectedly.
235
+
236
+
237
+ PHASE 4: LINE PROFILING WITH LINE_PROFILER
238
+
239
+
240
+ install line_profiler
241
+
242
+ <terminal>pip install line_profiler --quiet</terminal>
243
+
244
+ verify:
245
+ <terminal>kernprof --version</terminal>
246
+
247
+
248
+ decorate functions for line profiling
249
+
250
+ <read><file>path/to/module.py</file></read>
251
+
252
+ add @profile decorator to functions:
253
+ <terminal>grep -rn "@profile" --include="*.py" . 2>/dev/null | head -10</terminal>
254
+
255
+ if decorators exist, they are for line_profiler.
256
+
257
+
258
+ run line profiler
259
+
260
+ <terminal>kernprof -l -v main.py 2>&1 | tee /tmp/line_profile.txt</terminal>
261
+
262
+ output shows time per line of code.
263
+
264
+
265
+ understanding line profiler output
266
+
267
+ Line # Hits Time Per Hit % Time Line Contents
268
+ ================================================================
269
+ 1 @profile
270
+ 2 def process(data):
271
+ 3 1000 50 0.0 5.0 result = []
272
+ 4 1000000 10000 0.0 50.0 for item in data:
273
+ 5 1000000 8000 0.0 40.0 result.append(transform(item))
274
+ 6 1000 200 0.2 5.0 return result
275
+
276
+ key metrics:
277
+ - Hits: how many times line executed
278
+ - Time: total time spent on this line
279
+ - Per Hit: average time per execution
280
+ - % Time: percentage of total function time
281
+
282
+ identify slow lines by high % Time.
283
+
284
+
285
+ profile specific functions only
286
+
287
+ <terminal>kernprof -l -v -b main.py 2>&1 | tee /tmp/line_profile_specific.txt</terminal>
288
+
289
+ or create test script:
290
+ <terminal>python -m line_profiler module.py::function_name 2>&1</terminal>
291
+
292
+
293
+ PHASE 5: MEMORY PROFILING
294
+
295
+
296
+ memory usage with memory_profiler
297
+
298
+ <terminal>pip install memory_profiler --quiet</terminal>
299
+
300
+ verify:
301
+ <terminal>mprof --version</terminal>
302
+
303
+
304
+ profile memory line by line
305
+
306
+ add @profile decorator:
307
+ <read><file>path/to/module.py</file></read>
308
+
309
+ check for existing @profile decorators:
310
+ <terminal>grep -rn "^@profile" --include="*.py" . 2>/dev/null</terminal>
311
+
312
+
313
+ run memory profiler
314
+
315
+ <terminal>python -m memory_profiler main.py 2>&1 | tee /tmp/memory_profile.txt</terminal>
316
+
317
+
318
+ understanding memory profiler output
319
+
320
+ Line # Mem usage Increment Occurrences Line Contents
321
+ =================================================================
322
+ 1 50.0 MiB 50.0 MiB 1 @profile
323
+ 2 50.0 MiB 0.0 MiB 1 def process():
324
+ 3 50.0 MiB 0.0 MiB 1 data = []
325
+ 4 150.0 MiB 100.0 MiB 10000 for i in range(10000):
326
+ 5 150.0 MiB 0.0 MiB 10000 data.append(large_object(i))
327
+ 6 50.0 MiB -100.0 MiB 1 return data
328
+
329
+ key metrics:
330
+ - Mem usage: total memory at that line
331
+ - Increment: memory change on that line
332
+ - Occurrences: how many times executed
333
+
334
+ identify memory allocations by large increments.
335
+
336
+
337
+ track memory over time
338
+
339
+ <terminal>mprof run main.py 2>&1</terminal>
340
+ <terminal>mprof plot --output /tmp/memory_plot.png</terminal>
341
+
342
+ this shows memory usage timeline.
343
+
344
+
345
+ find memory leaks
346
+
347
+ run mprof for extended duration:
348
+ <terminal>mprof run --interval 0.1 --python python main.py 2>&1</terminal>
349
+ <terminal>mprof plot</terminal>
350
+
351
+ look for:
352
+ - continuous memory growth
353
+ - memory not released after operations
354
+ - baseline increase over time
355
+
356
+
357
+ compare memory before/after
358
+
359
+ <terminal>python -m memory_tracer --output /tmp/memory_trace.json main.py 2>&1</terminal>
360
+
361
+ or use memory_profiler with timestamps.
362
+
363
+
364
+ PHASE 6: PROFILING SPECIFIC PATTERNS
365
+
366
+
367
+ profiling I/O bound operations
368
+
369
+ <terminal>grep -rn "open(.*r\|\.read(\|\.write(" --include="*.py" . 2>/dev/null | head -20</terminal>
370
+
371
+ I/O issues:
372
+ - many small file reads
373
+ - no buffering
374
+ - synchronous I/O in loops
375
+ - unnecessary file operations
376
+
377
+ profile I/O specifically:
378
+ <terminal>python -m cProfile -s time main.py 2>&1 | grep -E "(read|write|open)"</terminal>
379
+
380
+
381
+ profiling network operations
382
+
383
+ <terminal>grep -rn "requests\.\|urllib\|aiohttp\|httpx" --include="*.py" . 2>/dev/null | head -20</terminal>
384
+
385
+ network issues:
386
+ - no connection pooling
387
+ - requests in loops
388
+ - no timeouts configured
389
+ - sequential vs parallel requests
390
+
391
+ identify network hotspots:
392
+ <terminal>python -c "import cProfile; import pstats; p = pstats.Stats('profile.stats'); p.print_stats(); p.sort_stats('cumulative'); p.print_stats('request|connect')" 2>&1</terminal>
393
+
394
+
395
+ profiling database queries
396
+
397
+ <terminal>grep -rn "\.execute\|\.query\|\.fetchall\|\.fetchone" --include="*.py" . 2>/dev/null | head -30</terminal>
398
+
399
+ database issues:
400
+ - N+1 query patterns
401
+ - missing indexes
402
+ - fetching too much data
403
+ - no query result caching
404
+ - queries in loops
405
+
406
+ find N+1 patterns:
407
+ <terminal>grep -rn "for.*:" --include="*.py" . 2>/dev/null | xargs -I{} grep -l "execute\|query" {} 2>/dev/null | head -10</terminal>
408
+
409
+
410
+ profiling data processing
411
+
412
+ <terminal>grep -rn "for.*in.*:\|while.*:" --include="*.py" . 2>/dev/null | head -30</terminal>
413
+
414
+ data processing issues:
415
+ - nested loops (O(n^2) or worse)
416
+ - repeated calculations
417
+ - no memoization
418
+ - wrong data structure choice
419
+ - list instead of set for membership
420
+
421
+ identify algorithmic complexity:
422
+ <read><file>path/to/algorithm.py</file></read>
423
+
424
+ look for:
425
+ - nested loops
426
+ - repeated function calls in loops
427
+ - growing lists without preallocation
428
+
429
+
430
+ profiling async code
431
+
432
+ <terminal>grep -rn "async def\|await " --include="*.py" . 2>/dev/null | head -30</terminal>
433
+
434
+ async issues:
435
+ - not using await properly
436
+ - blocking calls in async functions
437
+ - sequential awaits instead of gather
438
+ - no async/await where beneficial
439
+
440
+ profile async with cProfile:
441
+ <terminal>python -m cProfile -o async_profile.stats -m asyncio.run main() 2>&1</terminal>
442
+
443
+
444
+ PHASE 7: SYSTEM RESOURCE PROFILING
445
+
446
+
447
+ CPU profiling with py-spy
448
+
449
+ <terminal>pip install py-spy --quiet</terminal>
450
+
451
+ spy on running process:
452
+ <terminal>py-spy top --pid $(pgrep -f "python main.py")</terminal>
453
+
454
+ live flame graph:
455
+ <terminal>py-spy record --pid $(pgrep -f "python main.py") --output /tmp/py_spy.svg --duration 30</terminal>
456
+
457
+
458
+ memory sampling with py-spy
459
+
460
+ <terminal>py-spy record --pid $(pgrep -f "python main.py") --output /tmp/py_spy_memory.svg --format memory --duration 30</terminal>
461
+
462
+
463
+ system monitoring during profiling
464
+
465
+ <terminal>while true; do ps aux | grep python | head -5; sleep 1; done</terminal>
466
+
467
+ track CPU and memory over time.
468
+
469
+
470
+ using /usr/bin/time for detailed metrics
471
+
472
+ <terminal>/usr/bin/time -v python main.py 2>&1 | tee /tmp/detailed_time.txt</terminal>
473
+
474
+ provides:
475
+ - maximum resident set size
476
+ - page faults
477
+ - context switches
478
+ - CPU usage percentage
479
+
480
+
481
+ PHASE 8: PROFILING WEB APPLICATIONS
482
+
483
+
484
+ profile Flask applications
485
+
486
+ <terminal>grep -rn "from flask import\|import flask" --include="*.py" . 2>/dev/null | head -5</terminal>
487
+
488
+ if Flask exists:
489
+ <terminal>python -c "from app import app; app.run(profile=True)"</terminal>
490
+
491
+ or use Werkzeug profiler:
492
+ <terminal>python -m werkzeug.serving --profile app:app</terminal>
493
+
494
+
495
+ profile FastAPI applications
496
+
497
+ <terminal>grep -rn "from fastapi import\|import fastapi" --include="*.py" . 2>/dev/null | head -5</terminal>
498
+
499
+ if FastAPI exists, use middleware:
500
+ <read><file>path/to/main.py</file></read>
501
+
502
+ look for existing profiling middleware.
503
+
504
+
505
+ profile Django applications
506
+
507
+ <terminal>grep -rn "from django import\|import django" --include="*.py" . 2>/dev/null | head -5</terminal>
508
+
509
+ Django debug toolbar for profiling:
510
+ <terminal>grep -rn "debug_toolbar" --include="*.py" . 2>/dev/null</terminal>
511
+
512
+
513
+ PHASE 9: VISUALIZING PROFILE DATA
514
+
515
+
516
+ using snakeviz
517
+
518
+ <terminal>pip install snakeviz --quiet</terminal>
519
+
520
+ view profile:
521
+ <terminal>snakeviz /tmp/profile.stats</terminal>
522
+
523
+ opens interactive visualization in browser.
524
+
525
+
526
+ using tuna
527
+
528
+ <terminal>pip install tuna --quiet</terminal>
529
+
530
+ visualize:
531
+ <terminal>tuna /tmp/profile.stats</terminal>
532
+
533
+ creates interactive icicle plot.
534
+
535
+
536
+ creating flame graphs
537
+
538
+ <terminal>pip install flameprof --quiet</terminal>
539
+
540
+ <terminal>flameprof /tmp/profile.stats > /tmp/flamegraph.svg</terminal>
541
+
542
+ view flamegraph in browser.
543
+
544
+
545
+ PHASE 10: COMPARATIVE PROFILING
546
+
547
+
548
+ compare before/after
549
+
550
+ <terminal>python -m cProfile -o before.stats main.py</terminal>
551
+ <terminal>python -m cProfile -o after.stats main_modified.py</terminal>
552
+
553
+ compare:
554
+ <terminal>python -c "import pstats; p1 = pstats.Stats('before.stats'); p2 = pstats.Stats('after.stats'); print('Before:', p1.total_calls); print('After:', p2.total_calls)"</terminal>
555
+
556
+
557
+ statistically significant measurements
558
+
559
+ <terminal>for i in {1..10}; do time python main.py; done 2>&1 | tee /tmp/timing_samples.txt</terminal>
560
+
561
+ analyze:
562
+ - calculate mean
563
+ - calculate standard deviation
564
+ - identify outliers
565
+
566
+
567
+ PHASE 11: INTERPRETING RESULTS
568
+
569
+
570
+ classify performance issues
571
+
572
+ category 1: algorithmic complexity
573
+ - O(n^2) or worse
574
+ - nested loops over large datasets
575
+ - repeated work without memoization
576
+
577
+ category 2: I/O bound
578
+ - slow file operations
579
+ - network latency
580
+ - database queries
581
+ - disk I/O
582
+
583
+ category 3: memory issues
584
+ - memory leaks
585
+ - excessive allocations
586
+ - large data structures
587
+ - garbage collection pauses
588
+
589
+ category 4: concurrency issues
590
+ - underutilized CPU
591
+ - blocking operations
592
+ - lack of parallelization
593
+
594
+ category 5: framework/overhead
595
+ - abstraction layers
596
+ - unnecessary serialization
597
+ - reflection/dynamic code
598
+
599
+
600
+ identify quick wins
601
+
602
+ common quick wins:
603
+ - caching computed values
604
+ - using set/list for O(1) lookup
605
+ - batch I/O operations
606
+ - pre-allocating lists
607
+ - using generators instead of lists
608
+
609
+
610
+ identify complex fixes
611
+
612
+ requires more thought:
613
+ - algorithm redesign
614
+ - data structure changes
615
+ - architecture modifications
616
+ - introducing concurrency
617
+
618
+
619
+ PHASE 12: PERFORMANCE REPORT TEMPLATE
620
+
621
+
622
+ performance analysis report template
623
+
624
+ executive summary:
625
+ - application: [name]
626
+ - date: [date]
627
+ - baseline performance: [metrics]
628
+ - primary bottleneck: [what]
629
+
630
+ methodology:
631
+ - profiling tools used
632
+ - workload description
633
+ - measurement approach
634
+ - limitations
635
+
636
+ findings:
637
+
638
+ bottleneck 1: [name]
639
+ category: [cpu|memory|i/o|network|algorithm]
640
+ severity: [critical|high|medium|low]
641
+ location:
642
+ file: [path]
643
+ function: [name]
644
+ lines: [range]
645
+
646
+ evidence:
647
+ - cProfile output showing hot function
648
+ - percentage of total time
649
+ - comparison to baseline
650
+
651
+ impact:
652
+ - current performance
653
+ - user experience impact
654
+ - resource cost
655
+
656
+ recommendations:
657
+ - specific optimization approach
658
+ - expected improvement
659
+ - complexity estimate
660
+
661
+ [repeat for each bottleneck...]
662
+
663
+ recommendations summary:
664
+ prioritized by impact/effort:
665
+ [1] quick wins with high impact
666
+ [2] medium effort, medium impact
667
+ [3] complex fixes for long-term benefit
668
+
669
+ appendix:
670
+ - full profile output
671
+ - charts/graphs
672
+ - detailed metrics
673
+
674
+
675
+ PHASE 13: COMMON PERFORMANCE PATTERNS
676
+
677
+
678
+ pattern 1: nested loops over large data
679
+
680
+ symptoms:
681
+ - cProfile shows high cumulative time in nested function
682
+ - O(n^2) complexity visible in code
683
+ - performance degrades quadratically with input size
684
+
685
+ detection:
686
+ <terminal>grep -rn "for.*in.*:" --include="*.py" -A2 . 2>/dev/null | grep "for.*in.*:" | head -20</terminal>
687
+
688
+ look for: for within for with large datasets.
689
+
690
+
691
+ pattern 2: repeated expensive computation
692
+
693
+ symptoms:
694
+ - function called many times with same inputs
695
+ - high ncalls in cProfile
696
+ - result could be cached
697
+
698
+ detection:
699
+ <terminal>python -c "import pstats; p = pstats.Stats('/tmp/profile.stats'); p.sort_stats('calls').print_stats(20)"</terminal>
700
+
701
+ look for: same function name repeated with high ncalls.
702
+
703
+
704
+ pattern 3: list membership on large lists
705
+
706
+ symptoms:
707
+ - "if item in large_list" with large list
708
+ - O(n) lookup repeated many times
709
+
710
+ detection:
711
+ <terminal>grep -rn "in.*\[.*\]" --include="*.py" . 2>/dev/null | head -20</terminal>
712
+
713
+ look for: membership tests on list literals.
714
+
715
+
716
+ pattern 4: string concatenation in loops
717
+
718
+ symptoms:
719
+ - building string with += in loop
720
+ - O(n^2) due to string immutability
721
+
722
+ detection:
723
+ <terminal>grep -rn "for.*:.*+=" --include="*.py" . 2>/dev/null | grep -E "(str|s\\b)\\s*\\+=" | head -20</terminal>
724
+
725
+ look for: string concatenation in loop bodies.
726
+
727
+
728
+ pattern 5: unnecessary I/O
729
+
730
+ symptoms:
731
+ - file opened/closed repeatedly
732
+ - same file read multiple times
733
+ - many small read/write operations
734
+
735
+ detection:
736
+ <terminal>grep -rn "with open\|open(" --include="*.py" . 2>/dev/null | wc -l</terminal>
737
+ <terminal>grep -rn "for.*:.*open\|while.*:.*open" --include="*.py" . 2>/dev/null | head -20</terminal>
738
+
739
+
740
+ pattern 6: missing index hints
741
+
742
+ symptoms:
743
+ - database queries slow
744
+ - full table scans
745
+
746
+ detection:
747
+ <terminal>grep -rn "\.execute\|\.query" --include="*.py" . 2>/dev/null | head -20</terminal>
748
+
749
+ examine query patterns for unindexed lookups.
750
+
751
+
752
+ pattern 7: synchronous I/O blocking
753
+
754
+ symptoms:
755
+ - waiting for I/O
756
+ - no concurrency during I/O operations
757
+
758
+ detection:
759
+ <terminal>grep -rn "requests\.get\|urllib\.request\|urlopen" --include="*.py" . 2>/dev/null | head -20</terminal>
760
+
761
+ look for: synchronous HTTP calls in potentially parallelizable code.
762
+
763
+
764
+ pattern 8: large object copies
765
+
766
+ symptoms:
767
+ - unexpected memory allocations
768
+ - high memory usage
769
+
770
+ detection:
771
+ <terminal>grep -rn "\.copy(\|list(data)\|dict(data)" --include="*.py" . 2>/dev/null | head -20</terminal>
772
+
773
+ look for: unnecessary copying of large data structures.
774
+
775
+
776
+ pattern 9: inefficient data structure
777
+
778
+ symptoms:
779
+ - operation slower than expected
780
+ - wrong complexity for access pattern
781
+
782
+ detection:
783
+ <terminal>grep -rn "\\[.*\\].*\\[.*\\]" --include="*.py" . 2>/dev/null | head -20</terminal>
784
+
785
+ look for: nested list access that could be dict or set.
786
+
787
+
788
+ pattern 10:过早优化
789
+
790
+ wait, that's the opposite problem. trust profiling, not intuition.
791
+
792
+
793
+ PHASE 14: PROFILING CHECKLIST
794
+
795
+
796
+ before profiling
797
+
798
+ [ ] have clear performance goal
799
+ [ ] establish baseline metrics
800
+ [ ] identify critical path
801
+ [ ] prepare reproducible workload
802
+ [ ] verify tools are installed
803
+
804
+
805
+ during profiling
806
+
807
+ [ ] use consistent workload
808
+ [ ] run multiple iterations
809
+ [ ] profile realistic scenarios
810
+ [ ] measure both CPU and memory
811
+ [ ] record environment details
812
+
813
+
814
+ analysis phase
815
+
816
+ [ ] identify top time consumers
817
+ [ ] identify memory allocations
818
+ [ ] classify bottlenecks by type
819
+ [ ] prioritize by impact
820
+ [ ] verify findings are reproducible
821
+
822
+
823
+ reporting phase
824
+
825
+ [ ] document methodology
826
+ [ ] provide specific file/line references
827
+ [ ] include evidence (screenshots, logs)
828
+ [ ] classify severity
829
+ [ ] suggest remediation approaches
830
+
831
+
832
+ PHASE 15: PERFORMANCE PROFILING RULES
833
+
834
+
835
+ while this skill is active, these rules are MANDATORY:
836
+
837
+ [1] NEVER optimize without profiling first
838
+ intuition is often wrong about performance
839
+ measure, then optimize
840
+
841
+ [2] ALWAYS establish a baseline
842
+ you cannot improve what you cannot measure
843
+ record before/after metrics
844
+
845
+ [3] profile realistic workloads
846
+ synthetic benchmarks may mislead
847
+ use production-like data and scenarios
848
+
849
+ [4] focus on hot paths
850
+ 80/20 rule: 20% of code accounts for 80% of time
851
+ optimize the critical path first
852
+
853
+ [5] measure twice, cut once
854
+ verify findings with multiple approaches
855
+ cross-check with different tools
856
+
857
+ [6] consider both time and space
858
+ fastest solution may use too much memory
859
+ balance trade-offs
860
+
861
+ [7] account for measurement overhead
862
+ profiling itself affects performance
863
+ understand tool overhead
864
+
865
+ [8] document findings thoroughly
866
+ file paths, line numbers, function names
867
+ include evidence and metrics
868
+
869
+ [9] prioritize by impact
870
+ what matters most to users?
871
+ what costs the most resources?
872
+
873
+ [10] recommend, don't implement
874
+ this is a research skill
875
+ provide detailed guidance for coder agent
876
+
877
+
878
+ PHASE 16: WORKFLOW GUIDE
879
+
880
+
881
+ step 1: understand the problem
882
+
883
+ [ ] what is the performance complaint?
884
+ [ ] what is acceptable performance?
885
+ [ ] when is the slowness noticed?
886
+ [ ] what are the usage patterns?
887
+
888
+
889
+ step 2: establish baseline
890
+
891
+ [ ] measure current performance
892
+ [ ] identify critical path
893
+ [ ] document system specs
894
+ [ ] record resource usage
895
+
896
+
897
+ step 3: profile
898
+
899
+ [ ] run cProfile for CPU analysis
900
+ [ ] run memory_profiler for memory
901
+ [ ] identify hot functions
902
+ [ ] identify memory allocations
903
+ [ ] classify bottleneck types
904
+
905
+
906
+ step 4: analyze
907
+
908
+ [ ] review hot path code
909
+ [ ] identify root cause
910
+ [ ] research optimization approaches
911
+ [ ] estimate improvement potential
912
+
913
+
914
+ step 5: report
915
+
916
+ [ ] document findings
917
+ [ ] provide specific recommendations
918
+ [ ] include before/after comparison potential
919
+ [ ] prioritize by impact/effort
920
+
921
+
922
+ step 6: validate recommendations
923
+
924
+ [ ] ensure suggestions are actionable
925
+ [ ] verify approach is sound
926
+ [ ] check for side effects
927
+ [ ] estimate improvement range
928
+
929
+
930
+ PHASE 17: INTERPRETING COMMON METRICS
931
+
932
+
933
+ wall clock time (real)
934
+
935
+ what it measures: total elapsed time
936
+ what it indicates: user-perceived performance
937
+ factors: CPU + I/O + waiting
938
+
939
+
940
+ user CPU time
941
+
942
+ what it measures: CPU time in user mode
943
+ what it indicates: application computation
944
+ factors: algorithmic complexity, code efficiency
945
+
946
+
947
+ system CPU time
948
+
949
+ what it measures: CPU time in kernel mode
950
+ what it indicates: system calls, I/O
951
+ factors: file operations, network, context switches
952
+
953
+
954
+ memory usage (RSS)
955
+
956
+ what it measures: resident set size
957
+ what it indicates: physical memory used
958
+ factors: data structures, allocations
959
+
960
+
961
+ memory increment
962
+
963
+ what it measures: change in memory
964
+ what it indicates: allocation at specific point
965
+ factors: large objects, caching
966
+
967
+
968
+ hit count
969
+
970
+ what it measures: how many times executed
971
+ what it indicates: loop iterations, function calls
972
+ factors: algorithm, input size
973
+
974
+
975
+ PHASE 18: TOOL REFERENCE
976
+
977
+
978
+ cProfile quick reference
979
+
980
+ basic profile:
981
+ python -m cProfile -o output.stats script.py
982
+
983
+ sorted output:
984
+ python -m cProfile -s cumulative script.py
985
+
986
+ interactive analysis:
987
+ python -m pstats output.stats
988
+
989
+ useful pstats commands:
990
+ stats [n] # show top n
991
+ strip # remove path
992
+ sort <metric> # cumulative|time|calls
993
+ callers <name> # who calls function
994
+ callees <name> # what function calls
995
+
996
+
997
+ line_profiler quick reference
998
+
999
+ add @profile decorator to function
1000
+ run: kernprof -l -v script.py
1001
+
1002
+ or use module:
1003
+ python -m line_profiler script.py
1004
+
1005
+
1006
+ memory_profiler quick reference
1007
+
1008
+ add @profile decorator
1009
+ run: python -m memory_profiler script.py
1010
+
1011
+ mprof commands:
1012
+ mprof run script.py
1013
+ mprof plot
1014
+ mprof clean
1015
+
1016
+
1017
+ py-spy quick reference
1018
+
1019
+ sudo py-spy top --pid <pid>
1020
+ sudo py-spy record --pid <pid> --output output.svg
1021
+ sudo py-spy dump --pid <pid>
1022
+
1023
+
1024
+ FINAL REMINDERS
1025
+
1026
+
1027
+ profiling reveals truth
1028
+
1029
+ your code is not slow where you think it is.
1030
+ only data can tell you where to focus.
1031
+ trust the profiler, not your intuition.
1032
+
1033
+
1034
+ performance is a feature
1035
+
1036
+ slow applications frustrate users.
1037
+ efficient applications scale better.
1038
+ your analysis enables better software.
1039
+
1040
+
1041
+ measure what matters
1042
+
1043
+ profile realistic scenarios.
1044
+ measure user-visible latency.
1045
+ optimize the critical path.
1046
+
1047
+ find the bottlenecks before the users do.