code2llm 0.5.24__tar.gz → 0.5.26__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. {code2llm-0.5.24 → code2llm-0.5.26}/PKG-INFO +6 -63
  2. {code2llm-0.5.24 → code2llm-0.5.26}/README.md +5 -62
  3. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/__init__.py +1 -1
  4. code2llm-0.5.26/code2llm/__main__.py +25 -0
  5. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/call_graph.py +11 -1
  6. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/cli.py +21 -1
  7. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/core/file_analyzer.py +30 -4
  8. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/nlp/__init__.py +1 -1
  9. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm.egg-info/PKG-INFO +6 -63
  10. {code2llm-0.5.24 → code2llm-0.5.26}/pyproject.toml +1 -1
  11. code2llm-0.5.24/code2llm/__main__.py +0 -6
  12. {code2llm-0.5.24 → code2llm-0.5.26}/LICENSE +0 -0
  13. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/__init__.py +0 -0
  14. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/cfg.py +0 -0
  15. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/coupling.py +0 -0
  16. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/data_analysis.py +0 -0
  17. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/dfg.py +0 -0
  18. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/pipeline_detector.py +0 -0
  19. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/side_effects.py +0 -0
  20. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/smells.py +0 -0
  21. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/analysis/type_inference.py +0 -0
  22. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/cli_exports.py +0 -0
  23. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/__init__.py +0 -0
  24. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/analyzer.py +0 -0
  25. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/config.py +0 -0
  26. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/core/__init__.py +0 -0
  27. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/core/file_cache.py +0 -0
  28. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/core/file_filter.py +0 -0
  29. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/core/refactoring.py +0 -0
  30. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/large_repo.py +0 -0
  31. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/models.py +0 -0
  32. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/streaming/__init__.py +0 -0
  33. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/streaming/cache.py +0 -0
  34. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/streaming/incremental.py +0 -0
  35. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/streaming/prioritizer.py +0 -0
  36. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/streaming/scanner.py +0 -0
  37. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/streaming/strategies.py +0 -0
  38. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/streaming_analyzer.py +0 -0
  39. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/core/toon_size_manager.py +0 -0
  40. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/__init__.py +0 -0
  41. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/base.py +0 -0
  42. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/context_exporter.py +0 -0
  43. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/evolution_exporter.py +0 -0
  44. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/flow_constants.py +0 -0
  45. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/flow_exporter.py +0 -0
  46. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/flow_renderer.py +0 -0
  47. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/json_exporter.py +0 -0
  48. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/llm_exporter.py +0 -0
  49. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/map_exporter.py +0 -0
  50. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/mermaid_exporter.py +0 -0
  51. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/readme_exporter.py +0 -0
  52. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/toon/__init__.py +0 -0
  53. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/toon/helpers.py +0 -0
  54. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/toon/metrics.py +0 -0
  55. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/toon/module_detail.py +0 -0
  56. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/toon/renderer.py +0 -0
  57. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/toon.py +0 -0
  58. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/exporters/yaml_exporter.py +0 -0
  59. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/generators/__init__.py +0 -0
  60. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/generators/llm_flow.py +0 -0
  61. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/generators/llm_task.py +0 -0
  62. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/generators/mermaid.py +0 -0
  63. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/nlp/config.py +0 -0
  64. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/nlp/entity_resolution.py +0 -0
  65. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/nlp/intent_matching.py +0 -0
  66. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/nlp/normalization.py +0 -0
  67. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/nlp/pipeline.py +0 -0
  68. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/patterns/__init__.py +0 -0
  69. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/patterns/detector.py +0 -0
  70. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/refactor/__init__.py +0 -0
  71. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm/refactor/prompt_engine.py +0 -0
  72. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm.egg-info/SOURCES.txt +0 -0
  73. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm.egg-info/dependency_links.txt +0 -0
  74. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm.egg-info/entry_points.txt +0 -0
  75. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm.egg-info/requires.txt +0 -0
  76. {code2llm-0.5.24 → code2llm-0.5.26}/code2llm.egg-info/top_level.txt +0 -0
  77. {code2llm-0.5.24 → code2llm-0.5.26}/setup.cfg +0 -0
  78. {code2llm-0.5.24 → code2llm-0.5.26}/setup.py +0 -0
  79. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_advanced_analysis.py +0 -0
  80. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_analyzer.py +0 -0
  81. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_deep_analysis.py +0 -0
  82. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_edge_cases.py +0 -0
  83. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_flow_exporter.py +0 -0
  84. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_format_quality.py +0 -0
  85. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_nlp_pipeline.py +0 -0
  86. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_pipeline_detector.py +0 -0
  87. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_prompt_engine.py +0 -0
  88. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_prompt_txt.py +0 -0
  89. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_refactoring_engine.py +0 -0
  90. {code2llm-0.5.24 → code2llm-0.5.26}/tests/test_toon_v2.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.24
3
+ Version: 0.5.26
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -60,7 +60,7 @@ When you run `code2llm ./ -f all`, the following files are created:
60
60
 
61
61
  | File | Format | Purpose | Key Insights |
62
62
  |------|--------|---------|--------------|
63
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
63
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 46 critical functions, 0 god modules |
64
64
  | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
65
65
 
66
66
  ### 🤖 LLM-Ready Documentation
@@ -96,63 +96,6 @@ code2llm ./ -f all --max-memory 500
96
96
  code2llm ./ -f all --no-png
97
97
  ```
98
98
 
99
- ### Large Repository Analysis (Hierarchical Chunking)
100
- For large repositories, automatic hierarchical chunking ensures each output file stays under 256KB:
101
-
102
- ```bash
103
- # Auto-chunking when estimated output >256KB
104
- code2llm ./ -f toon,evolution,code2logic --verbose
105
-
106
- # Force chunking with custom size limit
107
- code2llm ./ -f toon --chunk --chunk-size 256
108
-
109
- # Analyze only specific subproject (matches level-1 or level-2 names)
110
- code2llm ./ -f toon --only-subproject src
111
- code2llm ./ -f toon --only-subproject src.core
112
-
113
- # Skip specific directories
114
- code2llm ./ -f toon --skip-subprojects tests examples docs
115
-
116
- # Customize chunking parameters
117
- code2llm ./ -f toon --chunk --max-files-per-chunk 50 --chunk-size 512
118
- ```
119
-
120
- **Hierarchical Splitting Strategy:**
121
- 1. **Level 0**: Entire project (if small enough, <256KB)
122
- 2. **Level 1**: Top-level directories (src/, tests/, examples/)
123
- 3. **Level 2**: Subdirectories if parent >256KB (src.core/, src.utils/)
124
- 4. **Level 3**: File chunks if still too large
125
-
126
- **Example Output Structure:**
127
- ```
128
- ./project/
129
- ├── src/ # Level 1: src/ fits in 256KB
130
- │ ├── analysis.toon # (~200KB)
131
- │ └── evolution.toon
132
- ├── src_core/ # Level 2: src/core/ was too big
133
- │ ├── analysis.toon # (~180KB)
134
- │ └── evolution.toon
135
- ├── src_utils_part1/ # Level 3: split by file count
136
- │ └── analysis.toon # (~150KB)
137
- ├── tests/ # Level 1: tests/
138
- │ └── analysis.toon
139
- ├── examples/ # Level 1: examples/
140
- │ └── analysis.toon
141
- ├── analysis.toon # Merged summary (all levels)
142
- └── evolution.toon # Full refactoring queue
143
- ```
144
-
145
- **Size Estimation:**
146
- - ~3KB per Python file in TOON format
147
- - Auto-detect chunking when: `file_count × 3KB > 256KB`
148
- - Example: 100 files ≈ 300KB → triggers chunking
149
-
150
- **Benefits:**
151
- - Each output file <256KB (easy for LLMs to process)
152
- - Natural code boundaries (module/submodule level)
153
- - Incremental analysis possible
154
- - Parallel processing ready
155
-
156
99
  ### Refactoring Focus
157
100
  ```bash
158
101
  # Get refactoring recommendations
@@ -431,10 +374,10 @@ code2llm ./ -f yaml --separate-orphans
431
374
  ---
432
375
 
433
376
  **Generated by**: `code2llm ./ -f all --readme`
434
- **Analysis Date**: 2026-03-03
435
- **Total Functions**: 700
436
- **Total Classes**: 97
437
- **Modules**: 83
377
+ **Analysis Date**: 2026-03-04
378
+ **Total Functions**: 750
379
+ **Total Classes**: 99
380
+ **Modules**: 90
438
381
 
439
382
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
440
383
 
@@ -10,7 +10,7 @@ When you run `code2llm ./ -f all`, the following files are created:
10
10
 
11
11
  | File | Format | Purpose | Key Insights |
12
12
  |------|--------|---------|--------------|
13
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
13
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 46 critical functions, 0 god modules |
14
14
  | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
15
15
 
16
16
  ### 🤖 LLM-Ready Documentation
@@ -46,63 +46,6 @@ code2llm ./ -f all --max-memory 500
46
46
  code2llm ./ -f all --no-png
47
47
  ```
48
48
 
49
- ### Large Repository Analysis (Hierarchical Chunking)
50
- For large repositories, automatic hierarchical chunking ensures each output file stays under 256KB:
51
-
52
- ```bash
53
- # Auto-chunking when estimated output >256KB
54
- code2llm ./ -f toon,evolution,code2logic --verbose
55
-
56
- # Force chunking with custom size limit
57
- code2llm ./ -f toon --chunk --chunk-size 256
58
-
59
- # Analyze only specific subproject (matches level-1 or level-2 names)
60
- code2llm ./ -f toon --only-subproject src
61
- code2llm ./ -f toon --only-subproject src.core
62
-
63
- # Skip specific directories
64
- code2llm ./ -f toon --skip-subprojects tests examples docs
65
-
66
- # Customize chunking parameters
67
- code2llm ./ -f toon --chunk --max-files-per-chunk 50 --chunk-size 512
68
- ```
69
-
70
- **Hierarchical Splitting Strategy:**
71
- 1. **Level 0**: Entire project (if small enough, <256KB)
72
- 2. **Level 1**: Top-level directories (src/, tests/, examples/)
73
- 3. **Level 2**: Subdirectories if parent >256KB (src.core/, src.utils/)
74
- 4. **Level 3**: File chunks if still too large
75
-
76
- **Example Output Structure:**
77
- ```
78
- ./project/
79
- ├── src/ # Level 1: src/ fits in 256KB
80
- │ ├── analysis.toon # (~200KB)
81
- │ └── evolution.toon
82
- ├── src_core/ # Level 2: src/core/ was too big
83
- │ ├── analysis.toon # (~180KB)
84
- │ └── evolution.toon
85
- ├── src_utils_part1/ # Level 3: split by file count
86
- │ └── analysis.toon # (~150KB)
87
- ├── tests/ # Level 1: tests/
88
- │ └── analysis.toon
89
- ├── examples/ # Level 1: examples/
90
- │ └── analysis.toon
91
- ├── analysis.toon # Merged summary (all levels)
92
- └── evolution.toon # Full refactoring queue
93
- ```
94
-
95
- **Size Estimation:**
96
- - ~3KB per Python file in TOON format
97
- - Auto-detect chunking when: `file_count × 3KB > 256KB`
98
- - Example: 100 files ≈ 300KB → triggers chunking
99
-
100
- **Benefits:**
101
- - Each output file <256KB (easy for LLMs to process)
102
- - Natural code boundaries (module/submodule level)
103
- - Incremental analysis possible
104
- - Parallel processing ready
105
-
106
49
  ### Refactoring Focus
107
50
  ```bash
108
51
  # Get refactoring recommendations
@@ -381,10 +324,10 @@ code2llm ./ -f yaml --separate-orphans
381
324
  ---
382
325
 
383
326
  **Generated by**: `code2llm ./ -f all --readme`
384
- **Analysis Date**: 2026-03-03
385
- **Total Functions**: 700
386
- **Total Classes**: 97
387
- **Modules**: 83
327
+ **Analysis Date**: 2026-03-04
328
+ **Total Functions**: 750
329
+ **Total Classes**: 99
330
+ **Modules**: 90
388
331
 
389
332
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
390
333
 
@@ -8,7 +8,7 @@ Includes NLP Processing Pipeline for query normalization, intent matching,
8
8
  and entity resolution with multilingual support.
9
9
  """
10
10
 
11
- __version__ = "0.5.24"
11
+ __version__ = "0.5.26"
12
12
  __author__ = "STTS Project"
13
13
 
14
14
  # Core analysis components
@@ -0,0 +1,25 @@
1
+ """Entry point for running code2llm as a module."""
2
+
3
+ # Suppress stderr at OS level immediately to avoid syntax error messages from C parser
4
+ # This must happen BEFORE any imports that might trigger file parsing
5
+ import os
6
+ import warnings
7
+
8
+ # Suppress SyntaxWarning which prints 'invalid syntax' messages
9
+ warnings.filterwarnings('ignore', category=SyntaxWarning)
10
+
11
+ if os.name != 'nt': # Unix-like systems
12
+ _code2llm_main_null_fd = os.open(os.devnull, os.O_WRONLY)
13
+ _code2llm_main_old_stderr_fd = os.dup(2)
14
+ os.dup2(_code2llm_main_null_fd, 2)
15
+
16
+ from .cli import main
17
+
18
+ # Restore stderr after imports
19
+ if os.name != 'nt' and '_code2llm_main_old_stderr_fd' in globals():
20
+ os.dup2(_code2llm_main_old_stderr_fd, 2)
21
+ os.close(_code2llm_main_null_fd)
22
+ os.close(_code2llm_main_old_stderr_fd)
23
+
24
+ if __name__ == '__main__':
25
+ main()
@@ -32,11 +32,21 @@ class CallGraphExtractor(ast.NodeVisitor):
32
32
  self.class_stack = []
33
33
  self.imports = {}
34
34
 
35
- # Try to get astroid tree for better resolution
35
+ # Suppress stderr at OS level to avoid syntax error messages from astroid/C parser
36
+ import os
37
+ null_fd = os.open(os.devnull, os.O_WRONLY)
38
+ old_stderr_fd = os.dup(2)
39
+ os.dup2(null_fd, 2)
40
+
36
41
  try:
42
+ # Try to get astroid tree for better resolution
37
43
  self.astroid_tree = astroid.MANAGER.ast_from_file(file_path)
38
44
  except Exception:
39
45
  self.astroid_tree = None
46
+ finally:
47
+ os.dup2(old_stderr_fd, 2)
48
+ os.close(null_fd)
49
+ os.close(old_stderr_fd)
40
50
 
41
51
  self.visit(tree)
42
52
  self._calculate_metrics()
@@ -5,8 +5,20 @@ code2llm - CLI for Python code flow analysis
5
5
  Analyze control flow, data flow, and call graphs of Python codebases.
6
6
  """
7
7
 
8
- import argparse
8
+ # Suppress SyntaxWarning messages by setting PYTHONWARNINGS before any imports
9
+ # that might trigger file parsing
10
+ import os
11
+ os.environ['PYTHONWARNINGS'] = 'ignore::SyntaxWarning'
12
+
13
+ # Suppress stderr at OS level immediately to avoid syntax error messages from C parser
14
+ # This must happen BEFORE any imports that might trigger file parsing
9
15
  import sys
16
+ if os.name != 'nt': # Unix-like systems
17
+ _code2llm_null_fd = os.open(os.devnull, os.O_WRONLY)
18
+ _code2llm_old_stderr_fd = os.dup(2)
19
+ os.dup2(_code2llm_null_fd, 2)
20
+
21
+ import argparse
10
22
  from pathlib import Path
11
23
  from typing import List, Optional
12
24
 
@@ -353,6 +365,14 @@ def _validate_chunked_output(output_dir: Path, args) -> bool:
353
365
 
354
366
  def main():
355
367
  """Main CLI entry point."""
368
+ # Restore stderr immediately so normal output works
369
+ # The stderr was redirected at module level to suppress parser errors during imports
370
+ global _code2llm_null_fd, _code2llm_old_stderr_fd
371
+ if os.name != 'nt' and '_code2llm_old_stderr_fd' in globals():
372
+ os.dup2(_code2llm_old_stderr_fd, 2)
373
+ os.close(_code2llm_null_fd)
374
+ os.close(_code2llm_old_stderr_fd)
375
+
356
376
  # Handle special sub-commands first
357
377
  special_result = _handle_special_commands()
358
378
  if special_result is not None:
@@ -94,6 +94,13 @@ class FileAnalyzer:
94
94
 
95
95
  def _calculate_complexity(self, content: str, file_path: str, result: Dict) -> None:
96
96
  """Calculate cyclomatic complexity using radon."""
97
+ # Suppress stderr at OS level to avoid syntax error messages from radon/C parser
98
+ import os
99
+ import sys
100
+ null_fd = os.open(os.devnull, os.O_WRONLY)
101
+ old_stderr_fd = os.dup(2)
102
+ os.dup2(null_fd, 2)
103
+
97
104
  try:
98
105
  complexity_results = cc_visit(content)
99
106
  for entry in complexity_results:
@@ -117,6 +124,10 @@ class FileAnalyzer:
117
124
  except Exception as e:
118
125
  if self.config.verbose:
119
126
  print(f"Error calculating complexity for {file_path}: {e}")
127
+ finally:
128
+ os.dup2(old_stderr_fd, 2)
129
+ os.close(null_fd)
130
+ os.close(old_stderr_fd)
120
131
 
121
132
  def _perform_deep_analysis(self, tree: ast.AST, module_name: str, file_path: str, result: Dict) -> None:
122
133
  """Perform deep analysis including DFG and call graph extraction."""
@@ -359,7 +370,22 @@ class FileAnalyzer:
359
370
  def _analyze_single_file(args):
360
371
  """Analyze single file - module level function for pickle compatibility."""
361
372
  file_path, module_name, config_dict = args
362
- from ..config import Config
363
- config = Config(**config_dict)
364
- analyzer = FileAnalyzer(config, None)
365
- return analyzer.analyze_file(file_path, module_name)
373
+
374
+ # Suppress stderr at OS level to avoid syntax error messages from C parser
375
+ import os
376
+ import sys
377
+ null_fd = os.open(os.devnull, os.O_WRONLY)
378
+ old_stderr_fd = os.dup(2)
379
+ os.dup2(null_fd, 2)
380
+
381
+ try:
382
+ from ..config import Config
383
+ config = Config(**config_dict)
384
+ analyzer = FileAnalyzer(config, None)
385
+ result = analyzer.analyze_file(file_path, module_name)
386
+ finally:
387
+ os.dup2(old_stderr_fd, 2)
388
+ os.close(null_fd)
389
+ os.close(old_stderr_fd)
390
+
391
+ return result
@@ -4,7 +4,7 @@ Provides query normalization, intent matching, and entity resolution
4
4
  with multilingual support and fuzzy matching.
5
5
  """
6
6
 
7
- __version__ = "0.5.24"
7
+ __version__ = "0.5.26"
8
8
 
9
9
  from .pipeline import NLPPipeline
10
10
  from .normalization import QueryNormalizer
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.24
3
+ Version: 0.5.26
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -60,7 +60,7 @@ When you run `code2llm ./ -f all`, the following files are created:
60
60
 
61
61
  | File | Format | Purpose | Key Insights |
62
62
  |------|--------|---------|--------------|
63
- | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 43 critical functions, 0 god modules |
63
+ | `analysis.toon` | **TOON** | **🔥 Health diagnostics** - Complexity, god modules, coupling | 46 critical functions, 0 god modules |
64
64
  | `project.toon` | **TOON** | **🧠 Project logic** - Compact module view from code2logic | Generated via code2logic integration |
65
65
 
66
66
  ### 🤖 LLM-Ready Documentation
@@ -96,63 +96,6 @@ code2llm ./ -f all --max-memory 500
96
96
  code2llm ./ -f all --no-png
97
97
  ```
98
98
 
99
- ### Large Repository Analysis (Hierarchical Chunking)
100
- For large repositories, automatic hierarchical chunking ensures each output file stays under 256KB:
101
-
102
- ```bash
103
- # Auto-chunking when estimated output >256KB
104
- code2llm ./ -f toon,evolution,code2logic --verbose
105
-
106
- # Force chunking with custom size limit
107
- code2llm ./ -f toon --chunk --chunk-size 256
108
-
109
- # Analyze only specific subproject (matches level-1 or level-2 names)
110
- code2llm ./ -f toon --only-subproject src
111
- code2llm ./ -f toon --only-subproject src.core
112
-
113
- # Skip specific directories
114
- code2llm ./ -f toon --skip-subprojects tests examples docs
115
-
116
- # Customize chunking parameters
117
- code2llm ./ -f toon --chunk --max-files-per-chunk 50 --chunk-size 512
118
- ```
119
-
120
- **Hierarchical Splitting Strategy:**
121
- 1. **Level 0**: Entire project (if small enough, <256KB)
122
- 2. **Level 1**: Top-level directories (src/, tests/, examples/)
123
- 3. **Level 2**: Subdirectories if parent >256KB (src.core/, src.utils/)
124
- 4. **Level 3**: File chunks if still too large
125
-
126
- **Example Output Structure:**
127
- ```
128
- ./project/
129
- ├── src/ # Level 1: src/ fits in 256KB
130
- │ ├── analysis.toon # (~200KB)
131
- │ └── evolution.toon
132
- ├── src_core/ # Level 2: src/core/ was too big
133
- │ ├── analysis.toon # (~180KB)
134
- │ └── evolution.toon
135
- ├── src_utils_part1/ # Level 3: split by file count
136
- │ └── analysis.toon # (~150KB)
137
- ├── tests/ # Level 1: tests/
138
- │ └── analysis.toon
139
- ├── examples/ # Level 1: examples/
140
- │ └── analysis.toon
141
- ├── analysis.toon # Merged summary (all levels)
142
- └── evolution.toon # Full refactoring queue
143
- ```
144
-
145
- **Size Estimation:**
146
- - ~3KB per Python file in TOON format
147
- - Auto-detect chunking when: `file_count × 3KB > 256KB`
148
- - Example: 100 files ≈ 300KB → triggers chunking
149
-
150
- **Benefits:**
151
- - Each output file <256KB (easy for LLMs to process)
152
- - Natural code boundaries (module/submodule level)
153
- - Incremental analysis possible
154
- - Parallel processing ready
155
-
156
99
  ### Refactoring Focus
157
100
  ```bash
158
101
  # Get refactoring recommendations
@@ -431,10 +374,10 @@ code2llm ./ -f yaml --separate-orphans
431
374
  ---
432
375
 
433
376
  **Generated by**: `code2llm ./ -f all --readme`
434
- **Analysis Date**: 2026-03-03
435
- **Total Functions**: 700
436
- **Total Classes**: 97
437
- **Modules**: 83
377
+ **Analysis Date**: 2026-03-04
378
+ **Total Functions**: 750
379
+ **Total Classes**: 99
380
+ **Modules**: 90
438
381
 
439
382
  For more information about code2llm, visit: https://github.com/tom-sapletta/code2llm
440
383
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "code2llm"
7
- version = "0.5.24"
7
+ version = "0.5.26"
8
8
  description = "High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -1,6 +0,0 @@
1
- """Entry point for running code2llm as a module."""
2
-
3
- from .cli import main
4
-
5
- if __name__ == '__main__':
6
- main()
File without changes
File without changes
File without changes