code2llm 0.5.38__tar.gz → 0.5.39__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. {code2llm-0.5.38 → code2llm-0.5.39}/PKG-INFO +1 -1
  2. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/__init__.py +1 -1
  3. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/cli_exports/prompt.py +124 -11
  4. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/nlp/__init__.py +1 -1
  5. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm.egg-info/PKG-INFO +1 -1
  6. {code2llm-0.5.38 → code2llm-0.5.39}/pyproject.toml +1 -1
  7. {code2llm-0.5.38 → code2llm-0.5.39}/LICENSE +0 -0
  8. {code2llm-0.5.38 → code2llm-0.5.39}/README.md +0 -0
  9. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/__main__.py +0 -0
  10. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/__init__.py +0 -0
  11. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/call_graph.py +0 -0
  12. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/cfg.py +0 -0
  13. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/coupling.py +0 -0
  14. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/data_analysis.py +0 -0
  15. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/dfg.py +0 -0
  16. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/pipeline_detector.py +0 -0
  17. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/side_effects.py +0 -0
  18. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/smells.py +0 -0
  19. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/analysis/type_inference.py +0 -0
  20. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/cli.py +0 -0
  21. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/cli_analysis.py +0 -0
  22. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/cli_exports/__init__.py +0 -0
  23. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/cli_exports/code2logic.py +0 -0
  24. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/cli_exports/formats.py +0 -0
  25. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/cli_exports/orchestrator.py +0 -0
  26. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/__init__.py +0 -0
  27. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/analyzer.py +0 -0
  28. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/config.py +0 -0
  29. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/core/__init__.py +0 -0
  30. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/core/file_analyzer.py +0 -0
  31. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/core/file_cache.py +0 -0
  32. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/core/file_filter.py +0 -0
  33. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/core/refactoring.py +0 -0
  34. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/large_repo.py +0 -0
  35. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/models.py +0 -0
  36. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/streaming/__init__.py +0 -0
  37. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/streaming/cache.py +0 -0
  38. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/streaming/incremental.py +0 -0
  39. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/streaming/prioritizer.py +0 -0
  40. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/streaming/scanner.py +0 -0
  41. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/streaming/strategies.py +0 -0
  42. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/streaming_analyzer.py +0 -0
  43. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/core/toon_size_manager.py +0 -0
  44. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/__init__.py +0 -0
  45. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/article_view.py +0 -0
  46. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/base.py +0 -0
  47. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/context_exporter.py +0 -0
  48. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/context_view.py +0 -0
  49. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/evolution_exporter.py +0 -0
  50. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/flow_constants.py +0 -0
  51. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/flow_exporter.py +0 -0
  52. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/flow_renderer.py +0 -0
  53. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/html_dashboard.py +0 -0
  54. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/json_exporter.py +0 -0
  55. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/llm_exporter.py +0 -0
  56. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/map_exporter.py +0 -0
  57. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/mermaid_exporter.py +0 -0
  58. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/project_yaml_exporter.py +0 -0
  59. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/readme_exporter.py +0 -0
  60. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/report_generators.py +0 -0
  61. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/toon/__init__.py +0 -0
  62. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/toon/helpers.py +0 -0
  63. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/toon/metrics.py +0 -0
  64. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/toon/module_detail.py +0 -0
  65. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/toon/renderer.py +0 -0
  66. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/toon.py +0 -0
  67. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/toon_view.py +0 -0
  68. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/validate_project.py +0 -0
  69. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/exporters/yaml_exporter.py +0 -0
  70. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/generators/__init__.py +0 -0
  71. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/generators/llm_flow.py +0 -0
  72. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/generators/llm_task.py +0 -0
  73. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/generators/mermaid.py +0 -0
  74. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/nlp/config.py +0 -0
  75. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/nlp/entity_resolution.py +0 -0
  76. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/nlp/intent_matching.py +0 -0
  77. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/nlp/normalization.py +0 -0
  78. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/nlp/pipeline.py +0 -0
  79. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/patterns/__init__.py +0 -0
  80. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/patterns/detector.py +0 -0
  81. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/refactor/__init__.py +0 -0
  82. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm/refactor/prompt_engine.py +0 -0
  83. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm.egg-info/SOURCES.txt +0 -0
  84. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm.egg-info/dependency_links.txt +0 -0
  85. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm.egg-info/entry_points.txt +0 -0
  86. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm.egg-info/requires.txt +0 -0
  87. {code2llm-0.5.38 → code2llm-0.5.39}/code2llm.egg-info/top_level.txt +0 -0
  88. {code2llm-0.5.38 → code2llm-0.5.39}/setup.cfg +0 -0
  89. {code2llm-0.5.38 → code2llm-0.5.39}/setup.py +0 -0
  90. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_advanced_analysis.py +0 -0
  91. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_analyzer.py +0 -0
  92. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_deep_analysis.py +0 -0
  93. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_edge_cases.py +0 -0
  94. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_flow_exporter.py +0 -0
  95. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_format_quality.py +0 -0
  96. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_nlp_pipeline.py +0 -0
  97. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_pipeline_detector.py +0 -0
  98. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_prompt_engine.py +0 -0
  99. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_prompt_txt.py +0 -0
  100. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_refactoring_engine.py +0 -0
  101. {code2llm-0.5.38 → code2llm-0.5.39}/tests/test_toon_v2.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.38
3
+ Version: 0.5.39
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -8,7 +8,7 @@ Includes NLP Processing Pipeline for query normalization, intent matching,
8
8
  and entity resolution with multilingual support.
9
9
  """
10
10
 
11
- __version__ = "0.5.38"
11
+ __version__ = "0.5.39"
12
12
  __author__ = "STTS Project"
13
13
 
14
14
  # Core analysis components (lightweight, always needed)
@@ -21,7 +21,9 @@ def _export_prompt_txt(args, output_dir: Path, formats: list[str], source_path:
21
21
  for name in missing:
22
22
  lines.append(f"- {output_rel_path}/{name}")
23
23
 
24
- lines.extend(_build_prompt_footer(chunked=False))
24
+ # Analyze generated files and build dynamic footer
25
+ file_analysis = _analyze_generated_files(output_dir)
26
+ lines.extend(_build_prompt_footer(chunked=False, file_analysis=file_analysis))
25
27
 
26
28
  prompt_path = output_dir / 'prompt.txt'
27
29
  prompt_path.write_text("\n".join(lines) + "\n", encoding='utf-8')
@@ -42,7 +44,10 @@ def _export_chunked_prompt_txt(args, output_dir: Path, formats: list[str], sourc
42
44
  lines.extend(_build_subprojects_section(subprojects, output_dir, output_rel_path))
43
45
 
44
46
  lines.extend(_build_missing_files_section(output_dir, output_rel_path))
45
- lines.extend(_build_prompt_footer(chunked=True))
47
+
48
+ # Analyze generated files and build dynamic footer
49
+ file_analysis = _analyze_generated_files(output_dir, subprojects=subprojects)
50
+ lines.extend(_build_prompt_footer(chunked=True, file_analysis=file_analysis))
46
51
 
47
52
  prompt_path = output_dir / 'prompt.txt'
48
53
  prompt_path.write_text("\n".join(lines) + "\n", encoding='utf-8')
@@ -139,19 +144,126 @@ def _build_missing_files_section(output_dir: Path, output_rel_path: str) -> List
139
144
  return lines
140
145
 
141
146
 
142
- def _build_prompt_footer(chunked: bool = False) -> List[str]:
143
- """Build footer section of prompt."""
144
- lines = [
145
- "",
146
- "Task:",
147
+ def _analyze_generated_files(output_dir: Path, subprojects: list = None) -> dict:
148
+ """Analyze which files were generated and determine appropriate focus areas."""
149
+ analysis = {
150
+ 'has_analysis_toon': (output_dir / 'analysis.toon').exists(),
151
+ 'has_context_md': (output_dir / 'context.md').exists(),
152
+ 'has_evolution_toon': (output_dir / 'evolution.toon').exists(),
153
+ 'has_project_toon': (output_dir / 'project.toon').exists(),
154
+ 'has_readme': (output_dir / 'README.md').exists(),
155
+ 'has_yaml': (output_dir / 'analysis.yaml').exists(),
156
+ 'has_json': (output_dir / 'analysis.json').exists(),
157
+ 'has_mermaid': (output_dir / 'flow.mmd').exists() or (output_dir / 'calls.mmd').exists(),
158
+ 'is_chunked': subprojects is not None and len(subprojects) > 0,
159
+ 'file_count': 0,
160
+ }
161
+
162
+ # Count total files
163
+ for key, exists in analysis.items():
164
+ if key.startswith('has_') and exists:
165
+ analysis['file_count'] += 1
166
+
167
+ return analysis
168
+
169
+
170
+ def _build_dynamic_focus_areas(file_analysis: dict) -> List[str]:
171
+ """Build focus areas based on generated files."""
172
+ focus_areas = []
173
+
174
+ if file_analysis['has_analysis_toon']:
175
+ focus_areas.append("1. **Code Health Analysis** - Review complexity metrics, god modules, coupling issues from analysis.toon")
176
+
177
+ if file_analysis['has_evolution_toon']:
178
+ focus_areas.append("2. **Refactoring Priorities** - Examine ranked refactoring actions and risk assessment from evolution.toon")
179
+
180
+ if file_analysis['has_context_md']:
181
+ focus_areas.append("3. **Architecture Overview** - Understand main flows, entry points, and public API from context.md")
182
+
183
+ if file_analysis['has_project_toon']:
184
+ focus_areas.append("4. **Project Structure** - Analyze module organization and dependencies from project.toon")
185
+
186
+ if file_analysis['has_yaml'] or file_analysis['has_json']:
187
+ focus_areas.append("5. **Structured Data** - Use machine-readable formats for automated analysis and metrics extraction")
188
+
189
+ if file_analysis['has_mermaid']:
190
+ focus_areas.append("6. **Visual Flow** - Review control flow diagrams and call graphs for architectural insights")
191
+
192
+ if file_analysis['is_chunked']:
193
+ focus_areas.append("7. **Large Repository Patterns** - Identify cross-chunk dependencies and consolidation opportunities")
194
+
195
+ if not focus_areas:
196
+ focus_areas.append("1. **General Code Review** - Provide overall architecture assessment and improvement recommendations")
197
+
198
+ return focus_areas
199
+
200
+
201
+ def _build_dynamic_tasks(file_analysis: dict) -> List[str]:
202
+ """Build tasks based on available files."""
203
+ tasks = [
147
204
  "- Summarize the architecture and main flows.",
148
205
  "- Identify the highest-risk areas and propose a refactoring plan.",
149
206
  "- If you suggest changes, keep behavior backward compatible and provide concrete steps.",
150
- "",
151
- "Constraints:",
152
- "- Prefer minimal, incremental changes.",
153
- "- If uncertain, ask clarifying questions.",
154
207
  ]
208
+
209
+ if file_analysis['has_analysis_toon']:
210
+ tasks.append("- Highlight critical functions (CC ≥ 10) and god modules from analysis.toon.")
211
+
212
+ if file_analysis['has_evolution_toon']:
213
+ tasks.append("- Prioritize refactoring actions by impact/effort ratio from evolution.toon.")
214
+
215
+ if file_analysis['has_context_md']:
216
+ tasks.append("- Validate entry points and public API surface match the architecture described.")
217
+
218
+ if file_analysis['is_chunked']:
219
+ tasks.append("- Analyze cross-chunk dependencies and suggest consolidation strategies.")
220
+
221
+ return tasks
222
+
223
+
224
+ def _build_prompt_footer(chunked: bool = False, file_analysis: dict = None) -> List[str]:
225
+ """Build dynamic footer section of prompt based on generated files."""
226
+ if file_analysis is None:
227
+ file_analysis = {}
228
+
229
+ lines = [""]
230
+
231
+ # Dynamic tasks
232
+ lines.append("Task:")
233
+ tasks = _build_dynamic_tasks(file_analysis)
234
+ for task in tasks:
235
+ lines.append(task)
236
+
237
+ # Dynamic focus areas
238
+ focus_areas = _build_dynamic_focus_areas(file_analysis)
239
+ if focus_areas:
240
+ lines.append("")
241
+ lines.append("Focus Areas for Analysis:")
242
+ for area in focus_areas:
243
+ lines.append(area)
244
+
245
+ # File-specific recommendations
246
+ if file_analysis['file_count'] > 0:
247
+ lines.append("")
248
+ lines.append("Analysis Strategy:")
249
+ if file_analysis['has_analysis_toon'] and file_analysis['has_evolution_toon']:
250
+ lines.append("- Start with analysis.toon for health metrics, then evolution.toon for action priorities")
251
+ elif file_analysis['has_context_md']:
252
+ lines.append("- Use context.md as the primary reference for architectural understanding")
253
+ elif file_analysis['has_project_toon']:
254
+ lines.append("- Begin with project.toon for structural overview")
255
+
256
+ if file_analysis['has_yaml']:
257
+ lines.append("- Reference analysis.yaml for precise metrics and programmatic data")
258
+
259
+ # Constraints
260
+ lines.append("")
261
+ lines.append("Constraints:")
262
+ lines.append("- Prefer minimal, incremental changes.")
263
+ lines.append("- Maintain full backward compatibility.")
264
+ lines.append("- Base recommendations on concrete metrics from the provided files.")
265
+ lines.append("- If uncertain, ask clarifying questions.")
266
+
155
267
  if chunked:
156
268
  lines.extend([
157
269
  "",
@@ -159,4 +271,5 @@ def _build_prompt_footer(chunked: bool = False) -> List[str]:
159
271
  " Start with the main files (analysis.toon, context.md) for overview,",
160
272
  " then examine specific subproject directories as needed.",
161
273
  ])
274
+
162
275
  return lines
@@ -4,7 +4,7 @@ Provides query normalization, intent matching, and entity resolution
4
4
  with multilingual support and fuzzy matching.
5
5
  """
6
6
 
7
- __version__ = "0.5.38"
7
+ __version__ = "0.5.39"
8
8
 
9
9
  from .pipeline import NLPPipeline
10
10
  from .normalization import QueryNormalizer
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code2llm
3
- Version: 0.5.38
3
+ Version: 0.5.39
4
4
  Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
5
5
  Home-page: https://github.com/wronai/stts
6
6
  Author: STTS Project
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "code2llm"
7
- version = "0.5.38"
7
+ version = "0.5.39"
8
8
  description = "High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
File without changes
File without changes
File without changes
File without changes
File without changes