code2llm 0.5.25__tar.gz → 0.5.27__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. {code2llm-0.5.25 → code2llm-0.5.27}/PKG-INFO +6 -63
  2. {code2llm-0.5.25 → code2llm-0.5.27}/README.md +5 -62
  3. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/__init__.py +1 -1
  4. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/call_graph.py +1 -11
  5. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/cli.py +1 -17
  6. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/core/file_analyzer.py +4 -30
  7. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/nlp/__init__.py +1 -1
  8. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm.egg-info/PKG-INFO +6 -63
  9. {code2llm-0.5.25 → code2llm-0.5.27}/pyproject.toml +1 -1
  10. {code2llm-0.5.25 → code2llm-0.5.27}/LICENSE +0 -0
  11. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/__main__.py +0 -0
  12. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/__init__.py +0 -0
  13. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/cfg.py +0 -0
  14. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/coupling.py +0 -0
  15. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/data_analysis.py +0 -0
  16. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/dfg.py +0 -0
  17. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/pipeline_detector.py +0 -0
  18. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/side_effects.py +0 -0
  19. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/smells.py +0 -0
  20. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/analysis/type_inference.py +0 -0
  21. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/cli_exports.py +0 -0
  22. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/__init__.py +0 -0
  23. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/analyzer.py +0 -0
  24. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/config.py +0 -0
  25. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/core/__init__.py +0 -0
  26. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/core/file_cache.py +0 -0
  27. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/core/file_filter.py +0 -0
  28. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/core/refactoring.py +0 -0
  29. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/large_repo.py +0 -0
  30. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/models.py +0 -0
  31. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/streaming/__init__.py +0 -0
  32. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/streaming/cache.py +0 -0
  33. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/streaming/incremental.py +0 -0
  34. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/streaming/prioritizer.py +0 -0
  35. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/streaming/scanner.py +0 -0
  36. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/streaming/strategies.py +0 -0
  37. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/streaming_analyzer.py +0 -0
  38. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/core/toon_size_manager.py +0 -0
  39. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/__init__.py +0 -0
  40. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/base.py +0 -0
  41. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/context_exporter.py +0 -0
  42. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/evolution_exporter.py +0 -0
  43. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/flow_constants.py +0 -0
  44. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/flow_exporter.py +0 -0
  45. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/flow_renderer.py +0 -0
  46. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/json_exporter.py +0 -0
  47. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/llm_exporter.py +0 -0
  48. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/map_exporter.py +0 -0
  49. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/mermaid_exporter.py +0 -0
  50. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/readme_exporter.py +0 -0
  51. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/toon/__init__.py +0 -0
  52. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/toon/helpers.py +0 -0
  53. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/toon/metrics.py +0 -0
  54. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/toon/module_detail.py +0 -0
  55. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/toon/renderer.py +0 -0
  56. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/toon.py +0 -0
  57. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/exporters/yaml_exporter.py +0 -0
  58. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/generators/__init__.py +0 -0
  59. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/generators/llm_flow.py +0 -0
  60. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/generators/llm_task.py +0 -0
  61. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/generators/mermaid.py +0 -0
  62. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/nlp/config.py +0 -0
  63. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/nlp/entity_resolution.py +0 -0
  64. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/nlp/intent_matching.py +0 -0
  65. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/nlp/normalization.py +0 -0
  66. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/nlp/pipeline.py +0 -0
  67. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/patterns/__init__.py +0 -0
  68. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/patterns/detector.py +0 -0
  69. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/refactor/__init__.py +0 -0
  70. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm/refactor/prompt_engine.py +0 -0
  71. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm.egg-info/SOURCES.txt +0 -0
  72. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm.egg-info/dependency_links.txt +0 -0
  73. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm.egg-info/entry_points.txt +0 -0
  74. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm.egg-info/requires.txt +0 -0
  75. {code2llm-0.5.25 → code2llm-0.5.27}/code2llm.egg-info/top_level.txt +0 -0
  76. {code2llm-0.5.25 → code2llm-0.5.27}/setup.cfg +0 -0
  77. {code2llm-0.5.25 → code2llm-0.5.27}/setup.py +0 -0
  78. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_advanced_analysis.py +0 -0
  79. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_analyzer.py +0 -0
  80. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_deep_analysis.py +0 -0
  81. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_edge_cases.py +0 -0
  82. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_flow_exporter.py +0 -0
  83. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_format_quality.py +0 -0
  84. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_nlp_pipeline.py +0 -0
  85. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_pipeline_detector.py +0 -0
  86. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_prompt_engine.py +0 -0
  87. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_prompt_txt.py +0 -0
  88. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_refactoring_engine.py +0 -0
  89. {code2llm-0.5.25 → code2llm-0.5.27}/tests/test_toon_v2.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.25
3
+ Version: 0.5.27
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -60,7 +60,7 @@ When you run `code2llm ./ -f all`, the following files are created:
60
60
 
61
61
  | File | Format | Purpose | Key Insights |
62
62
  |------|--------|---------|--------------|
63
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
63
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 46 critical functions, 0 god modules |
64
64
  | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
65
65
 
66
66
  ### 🤖 LLM-Ready Documentation
@@ -96,63 +96,6 @@ code2llm ./ -f all --max-memory 500
96
96
  code2llm ./ -f all --no-png
97
97
  ```
98
98
 
99
- ### Large Repository Analysis (Hierarchical Chunking)
100
- For large repositories, automatic hierarchical chunking ensures each output file stays under 256KB:
101
-
102
- ```bash
103
- # Auto-chunking when estimated output >256KB
104
- code2llm ./ -f toon,evolution,code2logic --verbose
105
-
106
- # Force chunking with custom size limit
107
- code2llm ./ -f toon --chunk --chunk-size 256
108
-
109
- # Analyze only specific subproject (matches level-1 or level-2 names)
110
- code2llm ./ -f toon --only-subproject src
111
- code2llm ./ -f toon --only-subproject src.core
112
-
113
- # Skip specific directories
114
- code2llm ./ -f toon --skip-subprojects tests examples docs
115
-
116
- # Customize chunking parameters
117
- code2llm ./ -f toon --chunk --max-files-per-chunk 50 --chunk-size 512
118
- ```
119
-
120
- **Hierarchical Splitting Strategy:**
121
- 1. **Level 0**: Entire project (if small enough, <256KB)
122
- 2. **Level 1**: Top-level directories (src/, tests/, examples/)
123
- 3. **Level 2**: Subdirectories if parent >256KB (src.core/, src.utils/)
124
- 4. **Level 3**: File chunks if still too large
125
-
126
- **Example Output Structure:**
127
- ```
128
- ./project/
129
- ├── src/ # Level 1: src/ fits in 256KB
130
- │ ├── analysis.toon # (~200KB)
131
- │ └── evolution.toon
132
- ├── src_core/ # Level 2: src/core/ was too big
133
- │ ├── analysis.toon # (~180KB)
134
- │ └── evolution.toon
135
- ├── src_utils_part1/ # Level 3: split by file count
136
- │ └── analysis.toon # (~150KB)
137
- ├── tests/ # Level 1: tests/
138
- │ └── analysis.toon
139
- ├── examples/ # Level 1: examples/
140
- │ └── analysis.toon
141
- ├── analysis.toon # Merged summary (all levels)
142
- └── evolution.toon # Full refactoring queue
143
- ```
144
-
145
- **Size Estimation:**
146
- - ~3KB per Python file in TOON format
147
- - Auto-detect chunking when: `file_count × 3KB > 256KB`
148
- - Example: 100 files ≈ 300KB → triggers chunking
149
-
150
- **Benefits:**
151
- - Each output file <256KB (easy for LLMs to process)
152
- - Natural code boundaries (module/submodule level)
153
- - Incremental analysis possible
154
- - Parallel processing ready
155
-
156
99
  ### Refactoring Focus
157
100
  ```bash
158
101
  # Get refactoring recommendations
@@ -431,10 +374,10 @@ code2llm ./ -f yaml --separate-orphans
431
374
  ---
432
375
 
433
376
  **Generated by**: `code2llm ./ -f all --readme`
434
- **Analysis Date**: 2026-03-03
435
- **Total Functions**: 700
436
- **Total Classes**: 97
437
- **Modules**: 83
377
+ **Analysis Date**: 2026-03-04
378
+ **Total Functions**: 750
379
+ **Total Classes**: 99
380
+ **Modules**: 90
438
381
 
439
382
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
440
383
 
@@ -10,7 +10,7 @@ When you run `code2llm ./ -f all`, the following files are created:
10
10
 
11
11
  | File | Format | Purpose | Key Insights |
12
12
  |------|--------|---------|--------------|
13
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
13
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 46 critical functions, 0 god modules |
14
14
  | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
15
15
 
16
16
  ### 🤖 LLM-Ready Documentation
@@ -46,63 +46,6 @@ code2llm ./ -f all --max-memory 500
46
46
  code2llm ./ -f all --no-png
47
47
  ```
48
48
 
49
- ### Large Repository Analysis (Hierarchical Chunking)
50
- For large repositories, automatic hierarchical chunking ensures each output file stays under 256KB:
51
-
52
- ```bash
53
- # Auto-chunking when estimated output >256KB
54
- code2llm ./ -f toon,evolution,code2logic --verbose
55
-
56
- # Force chunking with custom size limit
57
- code2llm ./ -f toon --chunk --chunk-size 256
58
-
59
- # Analyze only specific subproject (matches level-1 or level-2 names)
60
- code2llm ./ -f toon --only-subproject src
61
- code2llm ./ -f toon --only-subproject src.core
62
-
63
- # Skip specific directories
64
- code2llm ./ -f toon --skip-subprojects tests examples docs
65
-
66
- # Customize chunking parameters
67
- code2llm ./ -f toon --chunk --max-files-per-chunk 50 --chunk-size 512
68
- ```
69
-
70
- **Hierarchical Splitting Strategy:**
71
- 1. **Level 0**: Entire project (if small enough, <256KB)
72
- 2. **Level 1**: Top-level directories (src/, tests/, examples/)
73
- 3. **Level 2**: Subdirectories if parent >256KB (src.core/, src.utils/)
74
- 4. **Level 3**: File chunks if still too large
75
-
76
- **Example Output Structure:**
77
- ```
78
- ./project/
79
- ├── src/ # Level 1: src/ fits in 256KB
80
- │ ├── analysis.toon # (~200KB)
81
- │ └── evolution.toon
82
- ├── src_core/ # Level 2: src/core/ was too big
83
- │ ├── analysis.toon # (~180KB)
84
- │ └── evolution.toon
85
- ├── src_utils_part1/ # Level 3: split by file count
86
- │ └── analysis.toon # (~150KB)
87
- ├── tests/ # Level 1: tests/
88
- │ └── analysis.toon
89
- ├── examples/ # Level 1: examples/
90
- │ └── analysis.toon
91
- ├── analysis.toon # Merged summary (all levels)
92
- └── evolution.toon # Full refactoring queue
93
- ```
94
-
95
- **Size Estimation:**
96
- - ~3KB per Python file in TOON format
97
- - Auto-detect chunking when: `file_count × 3KB > 256KB`
98
- - Example: 100 files ≈ 300KB → triggers chunking
99
-
100
- **Benefits:**
101
- - Each output file <256KB (easy for LLMs to process)
102
- - Natural code boundaries (module/submodule level)
103
- - Incremental analysis possible
104
- - Parallel processing ready
105
-
106
49
  ### Refactoring Focus
107
50
  ```bash
108
51
  # Get refactoring recommendations
@@ -381,10 +324,10 @@ code2llm ./ -f yaml --separate-orphans
381
324
  ---
382
325
 
383
326
  **Generated by**: `code2llm ./ -f all --readme`
384
- **Analysis Date**: 2026-03-03
385
- **Total Functions**: 700
386
- **Total Classes**: 97
387
- **Modules**: 83
327
+ **Analysis Date**: 2026-03-04
328
+ **Total Functions**: 750
329
+ **Total Classes**: 99
330
+ **Modules**: 90
388
331
 
389
332
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
390
333
 
@@ -8,7 +8,7 @@ Includes NLP Processing Pipeline for query normalization, intent matching,
8
8
  and entity resolution with multilingual support.
9
9
  """
10
10
 
11
- __version__ = "0.5.25"
11
+ __version__ = "0.5.27"
12
12
  __author__ = "STTS Project"
13
13
 
14
14
  # Core analysis components
@@ -32,21 +32,11 @@ class CallGraphExtractor(ast.NodeVisitor):
32
32
  self.class_stack = []
33
33
  self.imports = {}
34
34
 
35
- # Suppress stderr at OS level to avoid syntax error messages from astroid/C parser
36
- import os
37
- null_fd = os.open(os.devnull, os.O_WRONLY)
38
- old_stderr_fd = os.dup(2)
39
- os.dup2(null_fd, 2)
40
-
35
+ # Try to get astroid tree for better resolution
41
36
  try:
42
- # Try to get astroid tree for better resolution
43
37
  self.astroid_tree = astroid.MANAGER.ast_from_file(file_path)
44
38
  except Exception:
45
39
  self.astroid_tree = None
46
- finally:
47
- os.dup2(old_stderr_fd, 2)
48
- os.close(null_fd)
49
- os.close(old_stderr_fd)
50
40
 
51
41
  self.visit(tree)
52
42
  self._calculate_metrics()
@@ -5,16 +5,8 @@ code2llm - CLI for Python code flow analysis
5
5
  Analyze control flow, data flow, and call graphs of Python codebases.
6
6
  """
7
7
 
8
- # Suppress stderr at OS level immediately to avoid syntax error messages from C parser
9
- # This must happen BEFORE any imports that might trigger file parsing
10
- import os
11
- import sys
12
- if os.name != 'nt': # Unix-like systems
13
- _code2llm_null_fd = os.open(os.devnull, os.O_WRONLY)
14
- _code2llm_old_stderr_fd = os.dup(2)
15
- os.dup2(_code2llm_null_fd, 2)
16
-
17
8
  import argparse
9
+ import sys
18
10
  from pathlib import Path
19
11
  from typing import List, Optional
20
12
 
@@ -361,14 +353,6 @@ def _validate_chunked_output(output_dir: Path, args) -> bool:
361
353
 
362
354
  def main():
363
355
  """Main CLI entry point."""
364
- # Restore stderr immediately so normal output works
365
- # The stderr was redirected at module level to suppress parser errors during imports
366
- global _code2llm_null_fd, _code2llm_old_stderr_fd
367
- if os.name != 'nt' and '_code2llm_old_stderr_fd' in globals():
368
- os.dup2(_code2llm_old_stderr_fd, 2)
369
- os.close(_code2llm_null_fd)
370
- os.close(_code2llm_old_stderr_fd)
371
-
372
356
  # Handle special sub-commands first
373
357
  special_result = _handle_special_commands()
374
358
  if special_result is not None:
@@ -94,13 +94,6 @@ class FileAnalyzer:
94
94
 
95
95
  def _calculate_complexity(self, content: str, file_path: str, result: Dict) -> None:
96
96
  """Calculate cyclomatic complexity using radon."""
97
- # Suppress stderr at OS level to avoid syntax error messages from radon/C parser
98
- import os
99
- import sys
100
- null_fd = os.open(os.devnull, os.O_WRONLY)
101
- old_stderr_fd = os.dup(2)
102
- os.dup2(null_fd, 2)
103
-
104
97
  try:
105
98
  complexity_results = cc_visit(content)
106
99
  for entry in complexity_results:
@@ -124,10 +117,6 @@ class FileAnalyzer:
124
117
  except Exception as e:
125
118
  if self.config.verbose:
126
119
  print(f"Error calculating complexity for {file_path}: {e}")
127
- finally:
128
- os.dup2(old_stderr_fd, 2)
129
- os.close(null_fd)
130
- os.close(old_stderr_fd)
131
120
 
132
121
  def _perform_deep_analysis(self, tree: ast.AST, module_name: str, file_path: str, result: Dict) -> None:
133
122
  """Perform deep analysis including DFG and call graph extraction."""
@@ -370,22 +359,7 @@ class FileAnalyzer:
370
359
  def _analyze_single_file(args):
371
360
  """Analyze single file - module level function for pickle compatibility."""
372
361
  file_path, module_name, config_dict = args
373
-
374
- # Suppress stderr at OS level to avoid syntax error messages from C parser
375
- import os
376
- import sys
377
- null_fd = os.open(os.devnull, os.O_WRONLY)
378
- old_stderr_fd = os.dup(2)
379
- os.dup2(null_fd, 2)
380
-
381
- try:
382
- from ..config import Config
383
- config = Config(**config_dict)
384
- analyzer = FileAnalyzer(config, None)
385
- result = analyzer.analyze_file(file_path, module_name)
386
- finally:
387
- os.dup2(old_stderr_fd, 2)
388
- os.close(null_fd)
389
- os.close(old_stderr_fd)
390
-
391
- return result
362
+ from ..config import Config
363
+ config = Config(**config_dict)
364
+ analyzer = FileAnalyzer(config, None)
365
+ return analyzer.analyze_file(file_path, module_name)
@@ -4,7 +4,7 @@ Provides query normalization, intent matching, and entity resolution
4
4
  with multilingual support and fuzzy matching.
5
5
  """
6
6
 
7
- __version__ = "0.5.25"
7
+ __version__ = "0.5.27"
8
8
 
9
9
  from .pipeline import NLPPipeline
10
10
  from .normalization import QueryNormalizer
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.25
3
+ Version: 0.5.27
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -60,7 +60,7 @@ When you run `code2llm ./ -f all`, the following files are created:
60
60
 
61
61
  | File | Format | Purpose | Key Insights |
62
62
  |------|--------|---------|--------------|
63
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
63
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 46 critical functions, 0 god modules |
64
64
  | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
65
65
 
66
66
  ### 🤖 LLM-Ready Documentation
@@ -96,63 +96,6 @@ code2llm ./ -f all --max-memory 500
96
96
  code2llm ./ -f all --no-png
97
97
  ```
98
98
 
99
- ### Large Repository Analysis (Hierarchical Chunking)
100
- For large repositories, automatic hierarchical chunking ensures each output file stays under 256KB:
101
-
102
- ```bash
103
- # Auto-chunking when estimated output >256KB
104
- code2llm ./ -f toon,evolution,code2logic --verbose
105
-
106
- # Force chunking with custom size limit
107
- code2llm ./ -f toon --chunk --chunk-size 256
108
-
109
- # Analyze only specific subproject (matches level-1 or level-2 names)
110
- code2llm ./ -f toon --only-subproject src
111
- code2llm ./ -f toon --only-subproject src.core
112
-
113
- # Skip specific directories
114
- code2llm ./ -f toon --skip-subprojects tests examples docs
115
-
116
- # Customize chunking parameters
117
- code2llm ./ -f toon --chunk --max-files-per-chunk 50 --chunk-size 512
118
- ```
119
-
120
- **Hierarchical Splitting Strategy:**
121
- 1. **Level 0**: Entire project (if small enough, <256KB)
122
- 2. **Level 1**: Top-level directories (src/, tests/, examples/)
123
- 3. **Level 2**: Subdirectories if parent >256KB (src.core/, src.utils/)
124
- 4. **Level 3**: File chunks if still too large
125
-
126
- **Example Output Structure:**
127
- ```
128
- ./project/
129
- ├── src/ # Level 1: src/ fits in 256KB
130
- │ ├── analysis.toon # (~200KB)
131
- │ └── evolution.toon
132
- ├── src_core/ # Level 2: src/core/ was too big
133
- │ ├── analysis.toon # (~180KB)
134
- │ └── evolution.toon
135
- ├── src_utils_part1/ # Level 3: split by file count
136
- │ └── analysis.toon # (~150KB)
137
- ├── tests/ # Level 1: tests/
138
- │ └── analysis.toon
139
- ├── examples/ # Level 1: examples/
140
- │ └── analysis.toon
141
- ├── analysis.toon # Merged summary (all levels)
142
- └── evolution.toon # Full refactoring queue
143
- ```
144
-
145
- **Size Estimation:**
146
- - ~3KB per Python file in TOON format
147
- - Auto-detect chunking when: `file_count × 3KB > 256KB`
148
- - Example: 100 files ≈ 300KB → triggers chunking
149
-
150
- **Benefits:**
151
- - Each output file <256KB (easy for LLMs to process)
152
- - Natural code boundaries (module/submodule level)
153
- - Incremental analysis possible
154
- - Parallel processing ready
155
-
156
99
  ### Refactoring Focus
157
100
  ```bash
158
101
  # Get refactoring recommendations
@@ -431,10 +374,10 @@ code2llm ./ -f yaml --separate-orphans
431
374
  ---
432
375
 
433
376
  **Generated by**: `code2llm ./ -f all --readme`
434
- **Analysis Date**: 2026-03-03
435
- **Total Functions**: 700
436
- **Total Classes**: 97
437
- **Modules**: 83
377
+ **Analysis Date**: 2026-03-04
378
+ **Total Functions**: 750
379
+ **Total Classes**: 99
380
+ **Modules**: 90
438
381
 
439
382
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
440
383
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "code2llm"
7
- version = "0.5.25"
7
+ version = "0.5.27"
8
8
  description = "High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
File without changes
File without changes
File without changes