code2llm 0.5.14__tar.gz → 0.5.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. {code2llm-0.5.14 → code2llm-0.5.16}/PKG-INFO +34 -69
  2. {code2llm-0.5.14 → code2llm-0.5.16}/README.md +33 -68
  3. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/__init__.py +1 -1
  4. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/analyzer.py +48 -8
  5. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/core/refactoring.py +20 -1
  6. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/nlp/__init__.py +1 -1
  7. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm.egg-info/PKG-INFO +34 -69
  8. {code2llm-0.5.14 → code2llm-0.5.16}/pyproject.toml +1 -1
  9. {code2llm-0.5.14 → code2llm-0.5.16}/LICENSE +0 -0
  10. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/__main__.py +0 -0
  11. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/__init__.py +0 -0
  12. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/call_graph.py +0 -0
  13. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/cfg.py +0 -0
  14. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/coupling.py +0 -0
  15. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/data_analysis.py +0 -0
  16. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/dfg.py +0 -0
  17. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/pipeline_detector.py +0 -0
  18. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/side_effects.py +0 -0
  19. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/smells.py +0 -0
  20. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/analysis/type_inference.py +0 -0
  21. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/cli.py +0 -0
  22. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/__init__.py +0 -0
  23. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/config.py +0 -0
  24. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/core/__init__.py +0 -0
  25. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/core/file_analyzer.py +0 -0
  26. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/core/file_cache.py +0 -0
  27. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/core/file_filter.py +0 -0
  28. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/models.py +0 -0
  29. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/streaming/__init__.py +0 -0
  30. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/streaming/cache.py +0 -0
  31. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/streaming/incremental.py +0 -0
  32. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/streaming/prioritizer.py +0 -0
  33. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/streaming/scanner.py +0 -0
  34. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/streaming/strategies.py +0 -0
  35. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/core/streaming_analyzer.py +0 -0
  36. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/__init__.py +0 -0
  37. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/base.py +0 -0
  38. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/context_exporter.py +0 -0
  39. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/evolution_exporter.py +0 -0
  40. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/flow_constants.py +0 -0
  41. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/flow_exporter.py +0 -0
  42. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/flow_renderer.py +0 -0
  43. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/json_exporter.py +0 -0
  44. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/llm_exporter.py +0 -0
  45. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/map_exporter.py +0 -0
  46. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/mermaid_exporter.py +0 -0
  47. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/readme_exporter.py +0 -0
  48. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/toon/__init__.py +0 -0
  49. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/toon/helpers.py +0 -0
  50. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/toon/metrics.py +0 -0
  51. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/toon/module_detail.py +0 -0
  52. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/toon/renderer.py +0 -0
  53. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/toon.py +0 -0
  54. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/exporters/yaml_exporter.py +0 -0
  55. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/generators/__init__.py +0 -0
  56. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/generators/llm_flow.py +0 -0
  57. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/generators/llm_task.py +0 -0
  58. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/generators/mermaid.py +0 -0
  59. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/nlp/config.py +0 -0
  60. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/nlp/entity_resolution.py +0 -0
  61. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/nlp/intent_matching.py +0 -0
  62. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/nlp/normalization.py +0 -0
  63. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/nlp/pipeline.py +0 -0
  64. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/patterns/__init__.py +0 -0
  65. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/patterns/detector.py +0 -0
  66. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/refactor/__init__.py +0 -0
  67. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm/refactor/prompt_engine.py +0 -0
  68. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm.egg-info/SOURCES.txt +0 -0
  69. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm.egg-info/dependency_links.txt +0 -0
  70. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm.egg-info/entry_points.txt +0 -0
  71. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm.egg-info/requires.txt +0 -0
  72. {code2llm-0.5.14 → code2llm-0.5.16}/code2llm.egg-info/top_level.txt +0 -0
  73. {code2llm-0.5.14 → code2llm-0.5.16}/setup.cfg +0 -0
  74. {code2llm-0.5.14 → code2llm-0.5.16}/setup.py +0 -0
  75. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_advanced_analysis.py +0 -0
  76. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_analyzer.py +0 -0
  77. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_deep_analysis.py +0 -0
  78. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_edge_cases.py +0 -0
  79. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_flow_exporter.py +0 -0
  80. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_format_quality.py +0 -0
  81. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_nlp_pipeline.py +0 -0
  82. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_pipeline_detector.py +0 -0
  83. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_prompt_engine.py +0 -0
  84. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_prompt_txt.py +0 -0
  85. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_refactoring_engine.py +0 -0
  86. {code2llm-0.5.14 → code2llm-0.5.16}/tests/test_toon_v2.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.14
3
+ Version: 0.5.16
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -60,48 +60,18 @@ When you run `code2llm ./ -f all`, the following files are created:
60
60
 
61
61
  | File | Format | Purpose | Key Insights |
62
62
  |------|--------|---------|--------------|
63
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 45 critical functions, 0 god modules |
64
- | `evolution.toon` | **TOON** | **📋 Refactoring queue** - Prioritized improvements | 0 refactoring actions needed |
65
- | `flow.toon` | **TOON** | **🔄 Data flow analysis** - Pipelines, contracts, types | Data dependencies and side effects |
66
- | `map.toon` | **TOON** | **🗺️ Structural map** - Modules, imports, signatures | Project architecture overview |
67
- | `project.toon` | **TOON** | **🧠 Project logic** - Code2Logic compact module view | Generated via code2logic integration |
63
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
64
+ | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
68
65
 
69
66
  ### 🤖 LLM-Ready Documentation
70
67
 
71
68
  | File | Format | Purpose | Use Case |
72
69
  |------|--------|---------|----------|
73
70
  | `context.md` | **Markdown** | **📖 LLM narrative** - Architecture summary | Paste into ChatGPT/Claude for code analysis |
74
- | `prompt.txt` | **Text** | **📝 Ready-to-send prompt** - Lists all generated files with instructions | Attach to LLM conversation as context guide |
75
- | `analysis.yaml` | **YAML** | **📊 Structured data** - Machine-readable | For scripts and automated processing |
76
- | `analysis.json` | **JSON** | **🔧 API format** - Programmatic access | For integration with other tools |
77
71
 
78
- ### 📊 Visualizations
79
-
80
- | File | Format | Purpose | Description |
81
- |------|--------|---------|-------------|
82
- | `flow.mmd` | **Mermaid** | **🔄 Control flow diagram** | Function call paths with complexity styling |
83
- | `calls.mmd` | **Mermaid** | **📞 Call graph** | Function dependencies (edges only) |
84
- | `compact_flow.mmd` | **Mermaid** | **📦 Module overview** | Aggregated module-level view |
85
- | `*.png` | **PNG** | **🖼️ Visual diagrams** | Rendered versions of Mermaid files |
86
72
 
87
73
  ## 🚀 Quick Start Commands
88
74
 
89
- ### Complete LLM Analysis (with Code2Logic)
90
- ```bash
91
- # Generate complete analysis with code2logic integration
92
- # Creates: analysis.toon, evolution.toon, project.toon, prompt.txt, README.md
93
-
94
- # Using specific formats (recommended)
95
- code2llm ./ -f toon,evolution,code2logic -o ./project
96
-
97
- # Using 'all' format (includes code2logic)
98
- code2llm ./ -f all -o ./project
99
- ```
100
-
101
- The `code2logic` format automatically:
102
- 1. Runs `code2logic` tool to generate `project.toon` (compact module view)
103
- 2. Creates `prompt.txt` - ready-to-send prompt listing all generated files
104
-
105
75
  ### Basic Analysis
106
76
  ```bash
107
77
  # Quick health check (TOON format only)
@@ -114,12 +84,6 @@ code2llm ./ -f all
114
84
  code2llm ./ -f context
115
85
  ```
116
86
 
117
- Przykład z projektu toonic, z użyciem modelu Kimi K2.5 w VScode Windsurf:
118
- ![img_1.png](img_1.png)
119
- ```bash
120
- code2llm ./ -f toon,evolution -o ./project
121
- ```
122
-
123
87
  ### Performance Options
124
88
  ```bash
125
89
  # Fast analysis for large projects
@@ -213,23 +177,6 @@ cat map.toon | head -50
213
177
  grep "SIGNATURES" map.toon
214
178
  ```
215
179
 
216
- ### `context.md` - LLM Narrative
217
- **Purpose**: Ready-to-paste context for AI assistants
218
- **Key sections**:
219
- - **Overview**: Project statistics
220
- - **Architecture**: Module breakdown
221
- - **Entry Points**: Public interfaces
222
- - **Patterns**: Design patterns detected
223
-
224
- **Example usage**:
225
- ```bash
226
- # Copy to clipboard for LLM
227
- cat context.md | pbcopy # macOS
228
- cat context.md | xclip -sel clip # Linux
229
-
230
- # Use with Claude/ChatGPT for code analysis
231
- ```
232
-
233
180
  ### `project.toon` - Project Logic (Code2Logic)
234
181
  **Purpose**: Compact module view generated by code2logic integration
235
182
  **Key sections**:
@@ -239,31 +186,49 @@ cat context.md | xclip -sel clip # Linux
239
186
 
240
187
  **When to use**: When you need a lightweight project overview combined with code2llm analysis
241
188
 
242
- **Generation**:
189
+ **Example usage**:
243
190
  ```bash
244
- # Generated automatically when using -f code2logic
245
- code2llm ./ -f toon,evolution,code2logic -o ./project
191
+ # View compact project structure
192
+ cat project.toon | head -30
193
+
194
+ # Find largest files
195
+ grep -E "^ .*[0-9]{3,}$" project.toon | sort -t',' -k2 -n -r | head -10
246
196
  ```
247
197
 
248
198
  ### `prompt.txt` - Ready-to-Send LLM Prompt
249
199
  **Purpose**: Pre-formatted prompt listing all generated files for LLM conversation
250
200
  **Contents**:
251
- - **Files section**: Lists all existing generated files (analysis.toon, context.md, etc.)
201
+ - **Files section**: Lists all existing generated files with descriptions
252
202
  - **Missing section**: Shows which files weren't generated (if any)
253
203
  - **Task section**: Instructions for LLM analysis
254
- - **Constraints section**: Guidelines for suggested changes
204
+ - **Requirements section**: Guidelines for suggested changes
255
205
 
256
206
  **Example usage**:
257
207
  ```bash
258
208
  # View the prompt
259
- cat project/prompt.txt
209
+ cat prompt.txt
260
210
 
261
211
  # Copy to clipboard and paste into ChatGPT/Claude
262
- cat project/prompt.txt | pbcopy # macOS
263
- cat project/prompt.txt | xclip -sel clip # Linux
212
+ cat prompt.txt | pbcopy # macOS
213
+ cat prompt.txt | xclip -sel clip # Linux
264
214
  ```
265
215
 
266
- **Generated when**: Using `-f code2logic` or `-f all`
216
+ ### `context.md` - LLM Narrative
217
+ **Purpose**: Ready-to-paste context for AI assistants
218
+ **Key sections**:
219
+ - **Overview**: Project statistics
220
+ - **Architecture**: Module breakdown
221
+ - **Entry Points**: Public interfaces
222
+ - **Patterns**: Design patterns detected
223
+
224
+ **Example usage**:
225
+ ```bash
226
+ # Copy to clipboard for LLM
227
+ cat context.md | pbcopy # macOS
228
+ cat context.md | xclip -sel clip # Linux
229
+
230
+ # Use with Claude/ChatGPT for code analysis
231
+ ```
267
232
 
268
233
  ### Visualization Files (`*.mmd`, `*.png`)
269
234
  **Purpose**: Visual understanding of code structure
@@ -409,10 +374,10 @@ code2llm ./ -f yaml --separate-orphans
409
374
  ---
410
375
 
411
376
  **Generated by**: `code2llm ./ -f all --readme`
412
- **Analysis Date**: 2026-03-01
413
- **Total Functions**: 645
414
- **Total Classes**: 94
415
- **Modules**: 76
377
+ **Analysis Date**: 2026-03-03
378
+ **Total Functions**: 700
379
+ **Total Classes**: 97
380
+ **Modules**: 83
416
381
 
417
382
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
418
383
 
@@ -10,48 +10,18 @@ When you run `code2llm ./ -f all`, the following files are created:
10
10
 
11
11
  | File | Format | Purpose | Key Insights |
12
12
  |------|--------|---------|--------------|
13
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 45 critical functions, 0 god modules |
14
- | `evolution.toon` | **TOON** | **📋 Refactoring queue** - Prioritized improvements | 0 refactoring actions needed |
15
- | `flow.toon` | **TOON** | **🔄 Data flow analysis** - Pipelines, contracts, types | Data dependencies and side effects |
16
- | `map.toon` | **TOON** | **🗺️ Structural map** - Modules, imports, signatures | Project architecture overview |
17
- | `project.toon` | **TOON** | **🧠 Project logic** - Code2Logic compact module view | Generated via code2logic integration |
13
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
14
+ | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
18
15
 
19
16
  ### 🤖 LLM-Ready Documentation
20
17
 
21
18
  | File | Format | Purpose | Use Case |
22
19
  |------|--------|---------|----------|
23
20
  | `context.md` | **Markdown** | **📖 LLM narrative** - Architecture summary | Paste into ChatGPT/Claude for code analysis |
24
- | `prompt.txt` | **Text** | **📝 Ready-to-send prompt** - Lists all generated files with instructions | Attach to LLM conversation as context guide |
25
- | `analysis.yaml` | **YAML** | **📊 Structured data** - Machine-readable | For scripts and automated processing |
26
- | `analysis.json` | **JSON** | **🔧 API format** - Programmatic access | For integration with other tools |
27
21
 
28
- ### 📊 Visualizations
29
-
30
- | File | Format | Purpose | Description |
31
- |------|--------|---------|-------------|
32
- | `flow.mmd` | **Mermaid** | **🔄 Control flow diagram** | Function call paths with complexity styling |
33
- | `calls.mmd` | **Mermaid** | **📞 Call graph** | Function dependencies (edges only) |
34
- | `compact_flow.mmd` | **Mermaid** | **📦 Module overview** | Aggregated module-level view |
35
- | `*.png` | **PNG** | **🖼️ Visual diagrams** | Rendered versions of Mermaid files |
36
22
 
37
23
  ## 🚀 Quick Start Commands
38
24
 
39
- ### Complete LLM Analysis (with Code2Logic)
40
- ```bash
41
- # Generate complete analysis with code2logic integration
42
- # Creates: analysis.toon, evolution.toon, project.toon, prompt.txt, README.md
43
-
44
- # Using specific formats (recommended)
45
- code2llm ./ -f toon,evolution,code2logic -o ./project
46
-
47
- # Using 'all' format (includes code2logic)
48
- code2llm ./ -f all -o ./project
49
- ```
50
-
51
- The `code2logic` format automatically:
52
- 1. Runs `code2logic` tool to generate `project.toon` (compact module view)
53
- 2. Creates `prompt.txt` - ready-to-send prompt listing all generated files
54
-
55
25
  ### Basic Analysis
56
26
  ```bash
57
27
  # Quick health check (TOON format only)
@@ -64,12 +34,6 @@ code2llm ./ -f all
64
34
  code2llm ./ -f context
65
35
  ```
66
36
 
67
- Przykład z projektu toonic, z użyciem modelu Kimi K2.5 w VScode Windsurf:
68
- ![img_1.png](img_1.png)
69
- ```bash
70
- code2llm ./ -f toon,evolution -o ./project
71
- ```
72
-
73
37
  ### Performance Options
74
38
  ```bash
75
39
  # Fast analysis for large projects
@@ -163,23 +127,6 @@ cat map.toon | head -50
163
127
  grep "SIGNATURES" map.toon
164
128
  ```
165
129
 
166
- ### `context.md` - LLM Narrative
167
- **Purpose**: Ready-to-paste context for AI assistants
168
- **Key sections**:
169
- - **Overview**: Project statistics
170
- - **Architecture**: Module breakdown
171
- - **Entry Points**: Public interfaces
172
- - **Patterns**: Design patterns detected
173
-
174
- **Example usage**:
175
- ```bash
176
- # Copy to clipboard for LLM
177
- cat context.md | pbcopy # macOS
178
- cat context.md | xclip -sel clip # Linux
179
-
180
- # Use with Claude/ChatGPT for code analysis
181
- ```
182
-
183
130
  ### `project.toon` - Project Logic (Code2Logic)
184
131
  **Purpose**: Compact module view generated by code2logic integration
185
132
  **Key sections**:
@@ -189,31 +136,49 @@ cat context.md | xclip -sel clip # Linux
189
136
 
190
137
  **When to use**: When you need a lightweight project overview combined with code2llm analysis
191
138
 
192
- **Generation**:
139
+ **Example usage**:
193
140
  ```bash
194
- # Generated automatically when using -f code2logic
195
- code2llm ./ -f toon,evolution,code2logic -o ./project
141
+ # View compact project structure
142
+ cat project.toon | head -30
143
+
144
+ # Find largest files
145
+ grep -E "^ .*[0-9]{3,}$" project.toon | sort -t',' -k2 -n -r | head -10
196
146
  ```
197
147
 
198
148
  ### `prompt.txt` - Ready-to-Send LLM Prompt
199
149
  **Purpose**: Pre-formatted prompt listing all generated files for LLM conversation
200
150
  **Contents**:
201
- - **Files section**: Lists all existing generated files (analysis.toon, context.md, etc.)
151
+ - **Files section**: Lists all existing generated files with descriptions
202
152
  - **Missing section**: Shows which files weren't generated (if any)
203
153
  - **Task section**: Instructions for LLM analysis
204
- - **Constraints section**: Guidelines for suggested changes
154
+ - **Requirements section**: Guidelines for suggested changes
205
155
 
206
156
  **Example usage**:
207
157
  ```bash
208
158
  # View the prompt
209
- cat project/prompt.txt
159
+ cat prompt.txt
210
160
 
211
161
  # Copy to clipboard and paste into ChatGPT/Claude
212
- cat project/prompt.txt | pbcopy # macOS
213
- cat project/prompt.txt | xclip -sel clip # Linux
162
+ cat prompt.txt | pbcopy # macOS
163
+ cat prompt.txt | xclip -sel clip # Linux
214
164
  ```
215
165
 
216
- **Generated when**: Using `-f code2logic` or `-f all`
166
+ ### `context.md` - LLM Narrative
167
+ **Purpose**: Ready-to-paste context for AI assistants
168
+ **Key sections**:
169
+ - **Overview**: Project statistics
170
+ - **Architecture**: Module breakdown
171
+ - **Entry Points**: Public interfaces
172
+ - **Patterns**: Design patterns detected
173
+
174
+ **Example usage**:
175
+ ```bash
176
+ # Copy to clipboard for LLM
177
+ cat context.md | pbcopy # macOS
178
+ cat context.md | xclip -sel clip # Linux
179
+
180
+ # Use with Claude/ChatGPT for code analysis
181
+ ```
217
182
 
218
183
  ### Visualization Files (`*.mmd`, `*.png`)
219
184
  **Purpose**: Visual understanding of code structure
@@ -359,10 +324,10 @@ code2llm ./ -f yaml --separate-orphans
359
324
  ---
360
325
 
361
326
  **Generated by**: `code2llm ./ -f all --readme`
362
- **Analysis Date**: 2026-03-01
363
- **Total Functions**: 645
364
- **Total Classes**: 94
365
- **Modules**: 76
327
+ **Analysis Date**: 2026-03-03
328
+ **Total Functions**: 700
329
+ **Total Classes**: 97
330
+ **Modules**: 83
366
331
 
367
332
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
368
333
 
@@ -8,7 +8,7 @@ Includes NLP Processing Pipeline for query normalization, intent matching,
8
8
  and entity resolution with multilingual support.
9
9
  """
10
10
 
11
- __version__ = "0.5.13"
11
+ __version__ = "0.5.16"
12
12
  __author__ = "STTS Project"
13
13
 
14
14
  # Core analysis components
@@ -37,6 +37,7 @@ class ProjectAnalyzer:
37
37
 
38
38
  if self.config.verbose:
39
39
  print(f"Found {len(files)} files to analyze")
40
+ print(f" - Parallel: {self.config.performance.parallel_enabled}, Workers: {self.config.performance.parallel_workers}")
40
41
 
41
42
  # Analyze files
42
43
  if self.config.performance.parallel_enabled and len(files) > 1:
@@ -54,7 +55,11 @@ class ProjectAnalyzer:
54
55
  self._detect_patterns(merged)
55
56
 
56
57
  # Refactoring analysis
58
+ if self.config.verbose:
59
+ print(f" - Running refactoring analysis...", flush=True)
57
60
  self.refactoring_analyzer.perform_refactoring_analysis(merged)
61
+ if self.config.verbose:
62
+ print(f" - Refactoring analysis complete", flush=True)
58
63
 
59
64
  # Calculate stats
60
65
  elapsed = time.time() - start_time
@@ -121,6 +126,7 @@ class ProjectAnalyzer:
121
126
  }
122
127
 
123
128
  # Collect results as they complete
129
+ completed = 0
124
130
  for future in as_completed(future_to_file):
125
131
  file_path, module_name = future_to_file[future]
126
132
  try:
@@ -130,6 +136,9 @@ class ProjectAnalyzer:
130
136
  except Exception as e:
131
137
  if self.config.verbose:
132
138
  print(f"Error analyzing {file_path}: {e}")
139
+ completed += 1
140
+ if self.config.verbose and completed % 10 == 0:
141
+ print(f" - Progress: {completed}/{len(files)} files analyzed ({completed*100//len(files)}%)", flush=True)
133
142
 
134
143
  return results
135
144
 
@@ -137,8 +146,9 @@ class ProjectAnalyzer:
137
146
  """Analyze files sequentially."""
138
147
  results = []
139
148
  analyzer = FileAnalyzer(self.config, self.cache)
149
+ total = len(files)
140
150
 
141
- for file_path, module_name in files:
151
+ for i, (file_path, module_name) in enumerate(files, 1):
142
152
  try:
143
153
  result = analyzer.analyze_file(file_path, module_name)
144
154
  if result:
@@ -146,6 +156,8 @@ class ProjectAnalyzer:
146
156
  except Exception as e:
147
157
  if self.config.verbose:
148
158
  print(f"Error analyzing {file_path}: {e}")
159
+ if self.config.verbose and (i % 10 == 0 or i == total):
160
+ print(f" - Progress: {i}/{total} files analyzed ({i*100//total}%)", flush=True)
149
161
 
150
162
  return results
151
163
 
@@ -184,20 +196,48 @@ class ProjectAnalyzer:
184
196
 
185
197
  def _build_call_graph(self, result: AnalysisResult) -> None:
186
198
  """Build call graph and find entry points."""
199
+ if self.config.verbose:
200
+ print(f" - Building call graph for {len(result.functions)} functions...", flush=True)
201
+
202
+ # Build lookup maps for O(1) resolution
203
+ # Map simple name -> list of full names (for overloaded methods)
204
+ simple_to_full: Dict[str, List[str]] = {}
205
+ for known_name in result.functions:
206
+ simple_name = known_name.split('.')[-1]
207
+ if simple_name not in simple_to_full:
208
+ simple_to_full[simple_name] = []
209
+ simple_to_full[simple_name].append(known_name)
210
+
187
211
  # Map calls between functions
188
212
  for func_name, func in result.functions.items():
189
- for called in func.calls:
190
- # Try to resolve to a known function
191
- for known_name in result.functions:
192
- if known_name.endswith(f".{called}") or known_name == called:
193
- func.calls[func.calls.index(called)] = known_name
194
- result.functions[known_name].called_by.append(func_name)
195
- break
213
+ for idx, called in enumerate(func.calls):
214
+ # Try exact match first, then suffix match
215
+ if called in result.functions:
216
+ resolved = called
217
+ elif called in simple_to_full:
218
+ # Use first match if multiple (common case: single match)
219
+ candidates = simple_to_full[called]
220
+ # Prefer exact module match if available
221
+ resolved = None
222
+ for cand in candidates:
223
+ if func_name.rsplit('.', 1)[0] == cand.rsplit('.', 1)[0]:
224
+ resolved = cand
225
+ break
226
+ if resolved is None:
227
+ resolved = candidates[0]
228
+ else:
229
+ continue # Unknown function
230
+
231
+ func.calls[idx] = resolved
232
+ result.functions[resolved].called_by.append(func_name)
196
233
 
197
234
  # Find entry points (not called by anything)
198
235
  for func_name, func in result.functions.items():
199
236
  if not func.called_by:
200
237
  result.entry_points.append(func_name)
238
+
239
+ if self.config.verbose:
240
+ print(f" - Call graph complete: {len(result.entry_points)} entry points found", flush=True)
201
241
 
202
242
  def _detect_patterns(self, result: AnalysisResult) -> None:
203
243
  """Detect behavioral patterns."""
@@ -67,7 +67,16 @@ class RefactoringAnalyzer:
67
67
  """Calculate betweenness centrality for bottleneck detection."""
68
68
  if len(call_graph) > 0:
69
69
  try:
70
- centrality = nx.betweenness_centrality(call_graph)
70
+ node_count = len(call_graph)
71
+ # For large graphs, use sampling to avoid exponential time complexity
72
+ if node_count > 500:
73
+ if self.config.verbose:
74
+ print(f" Large graph ({node_count} nodes), using sampled centrality...")
75
+ # Sample 20% of nodes, max 500
76
+ k = min(int(node_count * 0.2), 500)
77
+ centrality = nx.betweenness_centrality(call_graph, k=k)
78
+ else:
79
+ centrality = nx.betweenness_centrality(call_graph)
71
80
  for func_name, score in centrality.items():
72
81
  if func_name in result.functions:
73
82
  result.functions[func_name].centrality = score
@@ -78,6 +87,11 @@ class RefactoringAnalyzer:
78
87
  def _detect_cycles(self, call_graph: nx.DiGraph, result: AnalysisResult) -> None:
79
88
  """Detect circular dependencies."""
80
89
  try:
90
+ # Limit cycle detection for large graphs
91
+ if len(call_graph) > 1000:
92
+ if self.config.verbose:
93
+ print(f" Skipping cycle detection for large graph ({len(call_graph)} nodes)")
94
+ return
81
95
  cycles = list(nx.simple_cycles(call_graph))
82
96
  if cycles:
83
97
  result.metrics["project"] = result.metrics.get("project", {})
@@ -89,6 +103,11 @@ class RefactoringAnalyzer:
89
103
  def _detect_communities(self, call_graph: nx.DiGraph, result: AnalysisResult) -> None:
90
104
  """Detect communities (module groups)."""
91
105
  try:
106
+ # Limit community detection for large graphs
107
+ if len(call_graph) > 1000:
108
+ if self.config.verbose:
109
+ print(f" Skipping community detection for large graph ({len(call_graph)} nodes)")
110
+ return
92
111
  from networkx.algorithms import community
93
112
  # Using Louvain if available, otherwise greedy modularity
94
113
  if hasattr(community, 'louvain_communities'):
@@ -4,7 +4,7 @@ Provides query normalization, intent matching, and entity resolution
4
4
  with multilingual support and fuzzy matching.
5
5
  """
6
6
 
7
- __version__ = "0.5.13"
7
+ __version__ = "0.5.16"
8
8
 
9
9
  from .pipeline import NLPPipeline
10
10
  from .normalization import QueryNormalizer
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.14
3
+ Version: 0.5.16
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -60,48 +60,18 @@ When you run `code2llm ./ -f all`, the following files are created:
60
60
 
61
61
  | File | Format | Purpose | Key Insights |
62
62
  |------|--------|---------|--------------|
63
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 45 critical functions, 0 god modules |
64
- | `evolution.toon` | **TOON** | **📋 Refactoring queue** - Prioritized improvements | 0 refactoring actions needed |
65
- | `flow.toon` | **TOON** | **🔄 Data flow analysis** - Pipelines, contracts, types | Data dependencies and side effects |
66
- | `map.toon` | **TOON** | **🗺️ Structural map** - Modules, imports, signatures | Project architecture overview |
67
- | `project.toon` | **TOON** | **🧠 Project logic** - Code2Logic compact module view | Generated via code2logic integration |
63
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
64
+ | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
68
65
 
69
66
  ### 🤖 LLM-Ready Documentation
70
67
 
71
68
  | File | Format | Purpose | Use Case |
72
69
  |------|--------|---------|----------|
73
70
  | `context.md` | **Markdown** | **📖 LLM narrative** - Architecture summary | Paste into ChatGPT/Claude for code analysis |
74
- | `prompt.txt` | **Text** | **📝 Ready-to-send prompt** - Lists all generated files with instructions | Attach to LLM conversation as context guide |
75
- | `analysis.yaml` | **YAML** | **📊 Structured data** - Machine-readable | For scripts and automated processing |
76
- | `analysis.json` | **JSON** | **🔧 API format** - Programmatic access | For integration with other tools |
77
71
 
78
- ### 📊 Visualizations
79
-
80
- | File | Format | Purpose | Description |
81
- |------|--------|---------|-------------|
82
- | `flow.mmd` | **Mermaid** | **🔄 Control flow diagram** | Function call paths with complexity styling |
83
- | `calls.mmd` | **Mermaid** | **📞 Call graph** | Function dependencies (edges only) |
84
- | `compact_flow.mmd` | **Mermaid** | **📦 Module overview** | Aggregated module-level view |
85
- | `*.png` | **PNG** | **🖼️ Visual diagrams** | Rendered versions of Mermaid files |
86
72
 
87
73
  ## 🚀 Quick Start Commands
88
74
 
89
- ### Complete LLM Analysis (with Code2Logic)
90
- ```bash
91
- # Generate complete analysis with code2logic integration
92
- # Creates: analysis.toon, evolution.toon, project.toon, prompt.txt, README.md
93
-
94
- # Using specific formats (recommended)
95
- code2llm ./ -f toon,evolution,code2logic -o ./project
96
-
97
- # Using 'all' format (includes code2logic)
98
- code2llm ./ -f all -o ./project
99
- ```
100
-
101
- The `code2logic` format automatically:
102
- 1. Runs `code2logic` tool to generate `project.toon` (compact module view)
103
- 2. Creates `prompt.txt` - ready-to-send prompt listing all generated files
104
-
105
75
  ### Basic Analysis
106
76
  ```bash
107
77
  # Quick health check (TOON format only)
@@ -114,12 +84,6 @@ code2llm ./ -f all
114
84
  code2llm ./ -f context
115
85
  ```
116
86
 
117
- Przykład z projektu toonic, z użyciem modelu Kimi K2.5 w VScode Windsurf:
118
- ![img_1.png](img_1.png)
119
- ```bash
120
- code2llm ./ -f toon,evolution -o ./project
121
- ```
122
-
123
87
  ### Performance Options
124
88
  ```bash
125
89
  # Fast analysis for large projects
@@ -213,23 +177,6 @@ cat map.toon | head -50
213
177
  grep "SIGNATURES" map.toon
214
178
  ```
215
179
 
216
- ### `context.md` - LLM Narrative
217
- **Purpose**: Ready-to-paste context for AI assistants
218
- **Key sections**:
219
- - **Overview**: Project statistics
220
- - **Architecture**: Module breakdown
221
- - **Entry Points**: Public interfaces
222
- - **Patterns**: Design patterns detected
223
-
224
- **Example usage**:
225
- ```bash
226
- # Copy to clipboard for LLM
227
- cat context.md | pbcopy # macOS
228
- cat context.md | xclip -sel clip # Linux
229
-
230
- # Use with Claude/ChatGPT for code analysis
231
- ```
232
-
233
180
  ### `project.toon` - Project Logic (Code2Logic)
234
181
  **Purpose**: Compact module view generated by code2logic integration
235
182
  **Key sections**:
@@ -239,31 +186,49 @@ cat context.md | xclip -sel clip # Linux
239
186
 
240
187
  **When to use**: When you need a lightweight project overview combined with code2llm analysis
241
188
 
242
- **Generation**:
189
+ **Example usage**:
243
190
  ```bash
244
- # Generated automatically when using -f code2logic
245
- code2llm ./ -f toon,evolution,code2logic -o ./project
191
+ # View compact project structure
192
+ cat project.toon | head -30
193
+
194
+ # Find largest files
195
+ grep -E "^ .*[0-9]{3,}$" project.toon | sort -t',' -k2 -n -r | head -10
246
196
  ```
247
197
 
248
198
  ### `prompt.txt` - Ready-to-Send LLM Prompt
249
199
  **Purpose**: Pre-formatted prompt listing all generated files for LLM conversation
250
200
  **Contents**:
251
- - **Files section**: Lists all existing generated files (analysis.toon, context.md, etc.)
201
+ - **Files section**: Lists all existing generated files with descriptions
252
202
  - **Missing section**: Shows which files weren't generated (if any)
253
203
  - **Task section**: Instructions for LLM analysis
254
- - **Constraints section**: Guidelines for suggested changes
204
+ - **Requirements section**: Guidelines for suggested changes
255
205
 
256
206
  **Example usage**:
257
207
  ```bash
258
208
  # View the prompt
259
- cat project/prompt.txt
209
+ cat prompt.txt
260
210
 
261
211
  # Copy to clipboard and paste into ChatGPT/Claude
262
- cat project/prompt.txt | pbcopy # macOS
263
- cat project/prompt.txt | xclip -sel clip # Linux
212
+ cat prompt.txt | pbcopy # macOS
213
+ cat prompt.txt | xclip -sel clip # Linux
264
214
  ```
265
215
 
266
- **Generated when**: Using `-f code2logic` or `-f all`
216
+ ### `context.md` - LLM Narrative
217
+ **Purpose**: Ready-to-paste context for AI assistants
218
+ **Key sections**:
219
+ - **Overview**: Project statistics
220
+ - **Architecture**: Module breakdown
221
+ - **Entry Points**: Public interfaces
222
+ - **Patterns**: Design patterns detected
223
+
224
+ **Example usage**:
225
+ ```bash
226
+ # Copy to clipboard for LLM
227
+ cat context.md | pbcopy # macOS
228
+ cat context.md | xclip -sel clip # Linux
229
+
230
+ # Use with Claude/ChatGPT for code analysis
231
+ ```
267
232
 
268
233
  ### Visualization Files (`*.mmd`, `*.png`)
269
234
  **Purpose**: Visual understanding of code structure
@@ -409,10 +374,10 @@ code2llm ./ -f yaml --separate-orphans
409
374
  ---
410
375
 
411
376
  **Generated by**: `code2llm ./ -f all --readme`
412
- **Analysis Date**: 2026-03-01
413
- **Total Functions**: 645
414
- **Total Classes**: 94
415
- **Modules**: 76
377
+ **Analysis Date**: 2026-03-03
378
+ **Total Functions**: 700
379
+ **Total Classes**: 97
380
+ **Modules**: 83
416
381
 
417
382
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
418
383
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "code2llm"
7
- version = "0.5.14"
7
+ version = "0.5.16"
8
8
  description = "High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
File without changes
File without changes
File without changes
File without changes