code2llm 0.5.7__tar.gz → 0.5.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {code2llm-0.5.7 → code2llm-0.5.9}/PKG-INFO +55 -1
- {code2llm-0.5.7 → code2llm-0.5.9}/README.md +53 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/__init__.py +1 -1
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/cli.py +115 -4
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/nlp/__init__.py +1 -1
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm.egg-info/PKG-INFO +55 -1
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm.egg-info/SOURCES.txt +1 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm.egg-info/requires.txt +1 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/pyproject.toml +3 -2
- {code2llm-0.5.7 → code2llm-0.5.9}/setup.py +1 -0
- code2llm-0.5.9/tests/test_prompt_txt.py +141 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/LICENSE +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/__main__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/call_graph.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/cfg.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/coupling.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/data_analysis.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/dfg.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/pipeline_detector.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/side_effects.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/smells.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/analysis/type_inference.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/analyzer.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/config.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/core/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/core/file_analyzer.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/core/file_cache.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/core/file_filter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/core/refactoring.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/models.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/streaming/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/streaming/cache.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/streaming/incremental.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/streaming/prioritizer.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/streaming/scanner.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/streaming/strategies.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/core/streaming_analyzer.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/base.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/context_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/evolution_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/flow_constants.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/flow_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/flow_renderer.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/json_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/llm_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/map_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/mermaid_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/readme_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/toon/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/toon/helpers.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/toon/metrics.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/toon/module_detail.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/toon/renderer.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/toon.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/exporters/yaml_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/generators/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/generators/llm_flow.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/generators/llm_task.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/generators/mermaid.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/nlp/config.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/nlp/entity_resolution.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/nlp/intent_matching.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/nlp/normalization.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/nlp/pipeline.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/patterns/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/patterns/detector.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/refactor/__init__.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm/refactor/prompt_engine.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm.egg-info/dependency_links.txt +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm.egg-info/entry_points.txt +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/code2llm.egg-info/top_level.txt +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/setup.cfg +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_advanced_analysis.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_analyzer.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_deep_analysis.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_edge_cases.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_flow_exporter.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_format_quality.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_nlp_pipeline.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_pipeline_detector.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_prompt_engine.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_refactoring_engine.py +0 -0
- {code2llm-0.5.7 → code2llm-0.5.9}/tests/test_toon_v2.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: code2llm
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.9
|
|
4
4
|
Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
|
|
5
5
|
Home-page: https://github.com/wronai/stts
|
|
6
6
|
Author: STTS Project
|
|
@@ -32,6 +32,7 @@ Requires-Dist: numpy>=1.20
|
|
|
32
32
|
Requires-Dist: jinja2>=3.0
|
|
33
33
|
Requires-Dist: radon>=5.1
|
|
34
34
|
Requires-Dist: astroid>=3.0
|
|
35
|
+
Requires-Dist: code2logic
|
|
35
36
|
Requires-Dist: vulture>=2.10
|
|
36
37
|
Requires-Dist: tiktoken>=0.5
|
|
37
38
|
Requires-Dist: tree-sitter>=0.21
|
|
@@ -63,12 +64,14 @@ When you run `code2llm ./ -f all`, the following files are created:
|
|
|
63
64
|
| `evolution.toon` | **TOON** | **📋 Refactoring queue** - Prioritized improvements | 0 refactoring actions needed |
|
|
64
65
|
| `flow.toon` | **TOON** | **🔄 Data flow analysis** - Pipelines, contracts, types | Data dependencies and side effects |
|
|
65
66
|
| `map.toon` | **TOON** | **🗺️ Structural map** - Modules, imports, signatures | Project architecture overview |
|
|
67
|
+
| `project.toon` | **TOON** | **🧠 Project logic** - Code2Logic compact module view | Generated via code2logic integration |
|
|
66
68
|
|
|
67
69
|
### 🤖 LLM-Ready Documentation
|
|
68
70
|
|
|
69
71
|
| File | Format | Purpose | Use Case |
|
|
70
72
|
|------|--------|---------|----------|
|
|
71
73
|
| `context.md` | **Markdown** | **📖 LLM narrative** - Architecture summary | Paste into ChatGPT/Claude for code analysis |
|
|
74
|
+
| `prompt.txt` | **Text** | **📝 Ready-to-send prompt** - Lists all generated files with instructions | Attach to LLM conversation as context guide |
|
|
72
75
|
| `analysis.yaml` | **YAML** | **📊 Structured data** - Machine-readable | For scripts and automated processing |
|
|
73
76
|
| `analysis.json` | **JSON** | **🔧 API format** - Programmatic access | For integration with other tools |
|
|
74
77
|
|
|
@@ -83,6 +86,22 @@ When you run `code2llm ./ -f all`, the following files are created:
|
|
|
83
86
|
|
|
84
87
|
## 🚀 Quick Start Commands
|
|
85
88
|
|
|
89
|
+
### Complete LLM Analysis (with Code2Logic)
|
|
90
|
+
```bash
|
|
91
|
+
# Generate complete analysis with code2logic integration
|
|
92
|
+
# Creates: analysis.toon, evolution.toon, project.toon, prompt.txt, README.md
|
|
93
|
+
|
|
94
|
+
# Using specific formats (recommended)
|
|
95
|
+
code2llm ./ -f toon,evolution,code2logic -o ./project
|
|
96
|
+
|
|
97
|
+
# Using 'all' format (includes code2logic)
|
|
98
|
+
code2llm ./ -f all -o ./project
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
The `code2logic` format automatically:
|
|
102
|
+
1. Runs `code2logic` tool to generate `project.toon` (compact module view)
|
|
103
|
+
2. Creates `prompt.txt` - ready-to-send prompt listing all generated files
|
|
104
|
+
|
|
86
105
|
### Basic Analysis
|
|
87
106
|
```bash
|
|
88
107
|
# Quick health check (TOON format only)
|
|
@@ -211,6 +230,41 @@ cat context.md | xclip -sel clip # Linux
|
|
|
211
230
|
# Use with Claude/ChatGPT for code analysis
|
|
212
231
|
```
|
|
213
232
|
|
|
233
|
+
### `project.toon` - Project Logic (Code2Logic)
|
|
234
|
+
**Purpose**: Compact module view generated by code2logic integration
|
|
235
|
+
**Key sections**:
|
|
236
|
+
- **Modules list**: All project modules with file sizes
|
|
237
|
+
- **Imports**: Dependency information
|
|
238
|
+
- **Classes/Functions**: Summary counts
|
|
239
|
+
|
|
240
|
+
**When to use**: When you need a lightweight project overview combined with code2llm analysis
|
|
241
|
+
|
|
242
|
+
**Generation**:
|
|
243
|
+
```bash
|
|
244
|
+
# Generated automatically when using -f code2logic
|
|
245
|
+
code2llm ./ -f toon,evolution,code2logic -o ./project
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
### `prompt.txt` - Ready-to-Send LLM Prompt
|
|
249
|
+
**Purpose**: Pre-formatted prompt listing all generated files for LLM conversation
|
|
250
|
+
**Contents**:
|
|
251
|
+
- **Files section**: Lists all existing generated files (analysis.toon, context.md, etc.)
|
|
252
|
+
- **Missing section**: Shows which files weren't generated (if any)
|
|
253
|
+
- **Task section**: Instructions for LLM analysis
|
|
254
|
+
- **Constraints section**: Guidelines for suggested changes
|
|
255
|
+
|
|
256
|
+
**Example usage**:
|
|
257
|
+
```bash
|
|
258
|
+
# View the prompt
|
|
259
|
+
cat project/prompt.txt
|
|
260
|
+
|
|
261
|
+
# Copy to clipboard and paste into ChatGPT/Claude
|
|
262
|
+
cat project/prompt.txt | pbcopy # macOS
|
|
263
|
+
cat project/prompt.txt | xclip -sel clip # Linux
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
**Generated when**: Using `-f code2logic` or `-f all`
|
|
267
|
+
|
|
214
268
|
### Visualization Files (`*.mmd`, `*.png`)
|
|
215
269
|
**Purpose**: Visual understanding of code structure
|
|
216
270
|
**Files**:
|
|
@@ -14,12 +14,14 @@ When you run `code2llm ./ -f all`, the following files are created:
|
|
|
14
14
|
| `evolution.toon` | **TOON** | **📋 Refactoring queue** - Prioritized improvements | 0 refactoring actions needed |
|
|
15
15
|
| `flow.toon` | **TOON** | **🔄 Data flow analysis** - Pipelines, contracts, types | Data dependencies and side effects |
|
|
16
16
|
| `map.toon` | **TOON** | **🗺️ Structural map** - Modules, imports, signatures | Project architecture overview |
|
|
17
|
+
| `project.toon` | **TOON** | **🧠 Project logic** - Code2Logic compact module view | Generated via code2logic integration |
|
|
17
18
|
|
|
18
19
|
### 🤖 LLM-Ready Documentation
|
|
19
20
|
|
|
20
21
|
| File | Format | Purpose | Use Case |
|
|
21
22
|
|------|--------|---------|----------|
|
|
22
23
|
| `context.md` | **Markdown** | **📖 LLM narrative** - Architecture summary | Paste into ChatGPT/Claude for code analysis |
|
|
24
|
+
| `prompt.txt` | **Text** | **📝 Ready-to-send prompt** - Lists all generated files with instructions | Attach to LLM conversation as context guide |
|
|
23
25
|
| `analysis.yaml` | **YAML** | **📊 Structured data** - Machine-readable | For scripts and automated processing |
|
|
24
26
|
| `analysis.json` | **JSON** | **🔧 API format** - Programmatic access | For integration with other tools |
|
|
25
27
|
|
|
@@ -34,6 +36,22 @@ When you run `code2llm ./ -f all`, the following files are created:
|
|
|
34
36
|
|
|
35
37
|
## 🚀 Quick Start Commands
|
|
36
38
|
|
|
39
|
+
### Complete LLM Analysis (with Code2Logic)
|
|
40
|
+
```bash
|
|
41
|
+
# Generate complete analysis with code2logic integration
|
|
42
|
+
# Creates: analysis.toon, evolution.toon, project.toon, prompt.txt, README.md
|
|
43
|
+
|
|
44
|
+
# Using specific formats (recommended)
|
|
45
|
+
code2llm ./ -f toon,evolution,code2logic -o ./project
|
|
46
|
+
|
|
47
|
+
# Using 'all' format (includes code2logic)
|
|
48
|
+
code2llm ./ -f all -o ./project
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
The `code2logic` format automatically:
|
|
52
|
+
1. Runs `code2logic` tool to generate `project.toon` (compact module view)
|
|
53
|
+
2. Creates `prompt.txt` - ready-to-send prompt listing all generated files
|
|
54
|
+
|
|
37
55
|
### Basic Analysis
|
|
38
56
|
```bash
|
|
39
57
|
# Quick health check (TOON format only)
|
|
@@ -162,6 +180,41 @@ cat context.md | xclip -sel clip # Linux
|
|
|
162
180
|
# Use with Claude/ChatGPT for code analysis
|
|
163
181
|
```
|
|
164
182
|
|
|
183
|
+
### `project.toon` - Project Logic (Code2Logic)
|
|
184
|
+
**Purpose**: Compact module view generated by code2logic integration
|
|
185
|
+
**Key sections**:
|
|
186
|
+
- **Modules list**: All project modules with file sizes
|
|
187
|
+
- **Imports**: Dependency information
|
|
188
|
+
- **Classes/Functions**: Summary counts
|
|
189
|
+
|
|
190
|
+
**When to use**: When you need a lightweight project overview combined with code2llm analysis
|
|
191
|
+
|
|
192
|
+
**Generation**:
|
|
193
|
+
```bash
|
|
194
|
+
# Generated automatically when using -f code2logic
|
|
195
|
+
code2llm ./ -f toon,evolution,code2logic -o ./project
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
### `prompt.txt` - Ready-to-Send LLM Prompt
|
|
199
|
+
**Purpose**: Pre-formatted prompt listing all generated files for LLM conversation
|
|
200
|
+
**Contents**:
|
|
201
|
+
- **Files section**: Lists all existing generated files (analysis.toon, context.md, etc.)
|
|
202
|
+
- **Missing section**: Shows which files weren't generated (if any)
|
|
203
|
+
- **Task section**: Instructions for LLM analysis
|
|
204
|
+
- **Constraints section**: Guidelines for suggested changes
|
|
205
|
+
|
|
206
|
+
**Example usage**:
|
|
207
|
+
```bash
|
|
208
|
+
# View the prompt
|
|
209
|
+
cat project/prompt.txt
|
|
210
|
+
|
|
211
|
+
# Copy to clipboard and paste into ChatGPT/Claude
|
|
212
|
+
cat project/prompt.txt | pbcopy # macOS
|
|
213
|
+
cat project/prompt.txt | xclip -sel clip # Linux
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
**Generated when**: Using `-f code2logic` or `-f all`
|
|
217
|
+
|
|
165
218
|
### Visualization Files (`*.mmd`, `*.png`)
|
|
166
219
|
**Purpose**: Visual understanding of code structure
|
|
167
220
|
**Files**:
|
|
@@ -6,6 +6,8 @@ Analyze control flow, data flow, and call graphs of Python codebases.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import argparse
|
|
9
|
+
import shutil
|
|
10
|
+
import subprocess
|
|
9
11
|
import sys
|
|
10
12
|
from pathlib import Path
|
|
11
13
|
from typing import List, Optional
|
|
@@ -50,6 +52,7 @@ Format Options (-f):
|
|
|
50
52
|
map — Structural map (map.toon) — modules, imports, signatures
|
|
51
53
|
flow — Data-flow analysis (flow.toon) — pipelines, contracts, types
|
|
52
54
|
context — LLM narrative (context.md) — architecture summary
|
|
55
|
+
code2logic — Generate project logic (project.toon) via external code2logic
|
|
53
56
|
yaml — Standard YAML format
|
|
54
57
|
json — Machine-readable JSON
|
|
55
58
|
mermaid — Flowchart diagrams (flow.mmd, calls.mmd, compact_flow.mmd)
|
|
@@ -86,7 +89,7 @@ Strategy Options (--strategy):
|
|
|
86
89
|
parser.add_argument(
|
|
87
90
|
'-f', '--format',
|
|
88
91
|
default='toon',
|
|
89
|
-
help='Output formats: toon,map,flow,context,yaml,json,mermaid,evolution,png,all (default: toon)'
|
|
92
|
+
help='Output formats: toon,map,flow,context,code2logic,yaml,json,mermaid,evolution,png,all (default: toon)'
|
|
90
93
|
)
|
|
91
94
|
|
|
92
95
|
parser.add_argument(
|
|
@@ -256,7 +259,7 @@ def main():
|
|
|
256
259
|
|
|
257
260
|
# Analyze → Export
|
|
258
261
|
result = _run_analysis(args, source_path, output_dir)
|
|
259
|
-
_run_exports(args, result, output_dir)
|
|
262
|
+
_run_exports(args, result, output_dir, source_path=source_path)
|
|
260
263
|
|
|
261
264
|
if args.verbose:
|
|
262
265
|
print(f"\nAll outputs saved to: {output_dir}")
|
|
@@ -383,11 +386,115 @@ def _export_readme(args, result, output_dir: Path):
|
|
|
383
386
|
print(f" - README (documentation): {filepath}")
|
|
384
387
|
|
|
385
388
|
|
|
386
|
-
def
|
|
389
|
+
def _export_code2logic(args, source_path: Path, output_dir: Path, formats: list[str]) -> None:
|
|
390
|
+
"""Generate project.toon using external code2logic tool."""
|
|
391
|
+
if 'code2logic' not in formats and 'all' not in formats:
|
|
392
|
+
return
|
|
393
|
+
|
|
394
|
+
if shutil.which('code2logic') is None:
|
|
395
|
+
print("Error: requested format 'code2logic' but 'code2logic' executable was not found in PATH.", file=sys.stderr)
|
|
396
|
+
print("Install it with: pip install code2logic --upgrade", file=sys.stderr)
|
|
397
|
+
sys.exit(1)
|
|
398
|
+
|
|
399
|
+
# Align with the user's bash script:
|
|
400
|
+
# code2logic ./ -f toon --compact --name project -o ./project
|
|
401
|
+
cmd = [
|
|
402
|
+
'code2logic', str(source_path),
|
|
403
|
+
'-f', 'toon',
|
|
404
|
+
'--compact',
|
|
405
|
+
'--name', 'project',
|
|
406
|
+
'-o', str(output_dir),
|
|
407
|
+
]
|
|
408
|
+
|
|
409
|
+
try:
|
|
410
|
+
res = subprocess.run(cmd, capture_output=True, text=True)
|
|
411
|
+
except Exception as e:
|
|
412
|
+
print(f"Error running code2logic: {e}", file=sys.stderr)
|
|
413
|
+
sys.exit(1)
|
|
414
|
+
|
|
415
|
+
if res.returncode != 0:
|
|
416
|
+
if res.stdout:
|
|
417
|
+
print(res.stdout, file=sys.stderr)
|
|
418
|
+
if res.stderr:
|
|
419
|
+
print(res.stderr, file=sys.stderr)
|
|
420
|
+
print(f"Error: code2logic failed (exit code {res.returncode}).", file=sys.stderr)
|
|
421
|
+
sys.exit(res.returncode)
|
|
422
|
+
|
|
423
|
+
# Normalize output location to: <output_dir>/project.toon
|
|
424
|
+
candidate_paths = [
|
|
425
|
+
output_dir / 'project.toon',
|
|
426
|
+
output_dir / 'project' / 'project.toon',
|
|
427
|
+
output_dir / 'project.toon.txt',
|
|
428
|
+
]
|
|
429
|
+
found = next((p for p in candidate_paths if p.exists()), None)
|
|
430
|
+
if found is None:
|
|
431
|
+
# If code2logic changes its naming, show its stdout/stderr to help debugging.
|
|
432
|
+
if res.stdout:
|
|
433
|
+
print(res.stdout, file=sys.stderr)
|
|
434
|
+
if res.stderr:
|
|
435
|
+
print(res.stderr, file=sys.stderr)
|
|
436
|
+
print("Error: code2logic completed but project.toon was not found in the output directory.", file=sys.stderr)
|
|
437
|
+
sys.exit(1)
|
|
438
|
+
|
|
439
|
+
target = output_dir / 'project.toon'
|
|
440
|
+
if found != target:
|
|
441
|
+
target.parent.mkdir(parents=True, exist_ok=True)
|
|
442
|
+
shutil.copyfile(found, target)
|
|
443
|
+
|
|
444
|
+
if args.verbose:
|
|
445
|
+
print(f" - CODE2LOGIC (project logic): {target}")
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def _export_prompt_txt(args, output_dir: Path, formats: list[str]) -> None:
|
|
449
|
+
"""Generate prompt.txt useful to send to an LLM."""
|
|
450
|
+
# Keep it conservative: generate when code2logic is requested.
|
|
451
|
+
if 'code2logic' not in formats and 'all' not in formats:
|
|
452
|
+
return
|
|
453
|
+
|
|
454
|
+
prompt_path = output_dir / 'prompt.txt'
|
|
455
|
+
|
|
456
|
+
files = [
|
|
457
|
+
'analysis.toon',
|
|
458
|
+
'context.md',
|
|
459
|
+
'evolution.toon',
|
|
460
|
+
'project.toon',
|
|
461
|
+
'README.md',
|
|
462
|
+
]
|
|
463
|
+
existing = [f for f in files if (output_dir / f).exists()]
|
|
464
|
+
missing = [f for f in files if (output_dir / f).exists() is False]
|
|
465
|
+
|
|
466
|
+
lines: list[str] = []
|
|
467
|
+
lines.append("You are an AI assistant helping me understand and improve a codebase.")
|
|
468
|
+
lines.append("Use the attached/generated files as the authoritative context.")
|
|
469
|
+
lines.append("")
|
|
470
|
+
lines.append("Files:")
|
|
471
|
+
for f in existing:
|
|
472
|
+
lines.append(f"- {f}")
|
|
473
|
+
if missing:
|
|
474
|
+
lines.append("")
|
|
475
|
+
lines.append("Missing (not generated in this run):")
|
|
476
|
+
for f in missing:
|
|
477
|
+
lines.append(f"- {f}")
|
|
478
|
+
lines.append("")
|
|
479
|
+
lines.append("Task:")
|
|
480
|
+
lines.append("- Summarize the architecture and main flows.")
|
|
481
|
+
lines.append("- Identify the highest-risk areas and propose a refactoring plan.")
|
|
482
|
+
lines.append("- If you suggest changes, keep behavior backward compatible and provide concrete steps.")
|
|
483
|
+
lines.append("")
|
|
484
|
+
lines.append("Constraints:")
|
|
485
|
+
lines.append("- Prefer minimal, incremental changes.")
|
|
486
|
+
lines.append("- If uncertain, ask clarifying questions.")
|
|
487
|
+
|
|
488
|
+
prompt_path.write_text("\n".join(lines) + "\n", encoding='utf-8')
|
|
489
|
+
if args.verbose:
|
|
490
|
+
print(f" - PROMPT: {prompt_path}")
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
def _run_exports(args, result, output_dir: Path, source_path: Optional[Path] = None):
|
|
387
494
|
"""Export analysis results in requested formats."""
|
|
388
495
|
formats = [f.strip() for f in args.format.split(',')]
|
|
389
496
|
if 'all' in formats:
|
|
390
|
-
formats = ['toon', 'map', 'flow', 'context', 'yaml', 'json', 'mermaid', 'evolution']
|
|
497
|
+
formats = ['toon', 'map', 'flow', 'context', 'code2logic', 'yaml', 'json', 'mermaid', 'evolution']
|
|
391
498
|
|
|
392
499
|
try:
|
|
393
500
|
_export_simple_formats(args, result, output_dir, formats)
|
|
@@ -398,6 +505,10 @@ def _run_exports(args, result, output_dir: Path):
|
|
|
398
505
|
_export_evolution(args, result, output_dir)
|
|
399
506
|
_export_data_structures(args, result, output_dir)
|
|
400
507
|
_export_context_fallback(args, result, output_dir, formats)
|
|
508
|
+
|
|
509
|
+
if source_path is not None:
|
|
510
|
+
_export_code2logic(args, source_path, output_dir, formats)
|
|
511
|
+
_export_prompt_txt(args, output_dir, formats)
|
|
401
512
|
|
|
402
513
|
if args.refactor:
|
|
403
514
|
_export_refactor_prompts(args, result, output_dir)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: code2llm
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.9
|
|
4
4
|
Summary: High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries
|
|
5
5
|
Home-page: https://github.com/wronai/stts
|
|
6
6
|
Author: STTS Project
|
|
@@ -32,6 +32,7 @@ Requires-Dist: numpy>=1.20
|
|
|
32
32
|
Requires-Dist: jinja2>=3.0
|
|
33
33
|
Requires-Dist: radon>=5.1
|
|
34
34
|
Requires-Dist: astroid>=3.0
|
|
35
|
+
Requires-Dist: code2logic
|
|
35
36
|
Requires-Dist: vulture>=2.10
|
|
36
37
|
Requires-Dist: tiktoken>=0.5
|
|
37
38
|
Requires-Dist: tree-sitter>=0.21
|
|
@@ -63,12 +64,14 @@ When you run `code2llm ./ -f all`, the following files are created:
|
|
|
63
64
|
| `evolution.toon` | **TOON** | **📋 Refactoring queue** - Prioritized improvements | 0 refactoring actions needed |
|
|
64
65
|
| `flow.toon` | **TOON** | **🔄 Data flow analysis** - Pipelines, contracts, types | Data dependencies and side effects |
|
|
65
66
|
| `map.toon` | **TOON** | **🗺️ Structural map** - Modules, imports, signatures | Project architecture overview |
|
|
67
|
+
| `project.toon` | **TOON** | **🧠 Project logic** - Code2Logic compact module view | Generated via code2logic integration |
|
|
66
68
|
|
|
67
69
|
### 🤖 LLM-Ready Documentation
|
|
68
70
|
|
|
69
71
|
| File | Format | Purpose | Use Case |
|
|
70
72
|
|------|--------|---------|----------|
|
|
71
73
|
| `context.md` | **Markdown** | **📖 LLM narrative** - Architecture summary | Paste into ChatGPT/Claude for code analysis |
|
|
74
|
+
| `prompt.txt` | **Text** | **📝 Ready-to-send prompt** - Lists all generated files with instructions | Attach to LLM conversation as context guide |
|
|
72
75
|
| `analysis.yaml` | **YAML** | **📊 Structured data** - Machine-readable | For scripts and automated processing |
|
|
73
76
|
| `analysis.json` | **JSON** | **🔧 API format** - Programmatic access | For integration with other tools |
|
|
74
77
|
|
|
@@ -83,6 +86,22 @@ When you run `code2llm ./ -f all`, the following files are created:
|
|
|
83
86
|
|
|
84
87
|
## 🚀 Quick Start Commands
|
|
85
88
|
|
|
89
|
+
### Complete LLM Analysis (with Code2Logic)
|
|
90
|
+
```bash
|
|
91
|
+
# Generate complete analysis with code2logic integration
|
|
92
|
+
# Creates: analysis.toon, evolution.toon, project.toon, prompt.txt, README.md
|
|
93
|
+
|
|
94
|
+
# Using specific formats (recommended)
|
|
95
|
+
code2llm ./ -f toon,evolution,code2logic -o ./project
|
|
96
|
+
|
|
97
|
+
# Using 'all' format (includes code2logic)
|
|
98
|
+
code2llm ./ -f all -o ./project
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
The `code2logic` format automatically:
|
|
102
|
+
1. Runs `code2logic` tool to generate `project.toon` (compact module view)
|
|
103
|
+
2. Creates `prompt.txt` - ready-to-send prompt listing all generated files
|
|
104
|
+
|
|
86
105
|
### Basic Analysis
|
|
87
106
|
```bash
|
|
88
107
|
# Quick health check (TOON format only)
|
|
@@ -211,6 +230,41 @@ cat context.md | xclip -sel clip # Linux
|
|
|
211
230
|
# Use with Claude/ChatGPT for code analysis
|
|
212
231
|
```
|
|
213
232
|
|
|
233
|
+
### `project.toon` - Project Logic (Code2Logic)
|
|
234
|
+
**Purpose**: Compact module view generated by code2logic integration
|
|
235
|
+
**Key sections**:
|
|
236
|
+
- **Modules list**: All project modules with file sizes
|
|
237
|
+
- **Imports**: Dependency information
|
|
238
|
+
- **Classes/Functions**: Summary counts
|
|
239
|
+
|
|
240
|
+
**When to use**: When you need a lightweight project overview combined with code2llm analysis
|
|
241
|
+
|
|
242
|
+
**Generation**:
|
|
243
|
+
```bash
|
|
244
|
+
# Generated automatically when using -f code2logic
|
|
245
|
+
code2llm ./ -f toon,evolution,code2logic -o ./project
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
### `prompt.txt` - Ready-to-Send LLM Prompt
|
|
249
|
+
**Purpose**: Pre-formatted prompt listing all generated files for LLM conversation
|
|
250
|
+
**Contents**:
|
|
251
|
+
- **Files section**: Lists all existing generated files (analysis.toon, context.md, etc.)
|
|
252
|
+
- **Missing section**: Shows which files weren't generated (if any)
|
|
253
|
+
- **Task section**: Instructions for LLM analysis
|
|
254
|
+
- **Constraints section**: Guidelines for suggested changes
|
|
255
|
+
|
|
256
|
+
**Example usage**:
|
|
257
|
+
```bash
|
|
258
|
+
# View the prompt
|
|
259
|
+
cat project/prompt.txt
|
|
260
|
+
|
|
261
|
+
# Copy to clipboard and paste into ChatGPT/Claude
|
|
262
|
+
cat project/prompt.txt | pbcopy # macOS
|
|
263
|
+
cat project/prompt.txt | xclip -sel clip # Linux
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
**Generated when**: Using `-f code2logic` or `-f all`
|
|
267
|
+
|
|
214
268
|
### Visualization Files (`*.mmd`, `*.png`)
|
|
215
269
|
**Purpose**: Visual understanding of code structure
|
|
216
270
|
**Files**:
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "code2llm"
|
|
7
|
-
version = "0.5.
|
|
7
|
+
version = "0.5.9"
|
|
8
8
|
description = "High-performance Python code flow analysis with optimized TOON format - CFG, DFG, call graphs, and intelligent code queries"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.8"
|
|
@@ -46,6 +46,7 @@ dependencies = [
|
|
|
46
46
|
"jinja2>=3.0",
|
|
47
47
|
"radon>=5.1",
|
|
48
48
|
"astroid>=3.0",
|
|
49
|
+
"code2logic",
|
|
49
50
|
"vulture>=2.10",
|
|
50
51
|
"tiktoken>=0.5",
|
|
51
52
|
"tree-sitter>=0.21",
|
|
@@ -74,7 +75,7 @@ line-length = 100
|
|
|
74
75
|
target-version = ['py38']
|
|
75
76
|
|
|
76
77
|
[tool.mypy]
|
|
77
|
-
python_version = "0.5.
|
|
78
|
+
python_version = "0.5.8"
|
|
78
79
|
ignore_missing_imports = true
|
|
79
80
|
|
|
80
81
|
[tool.pytest.ini_options]
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
"""Tests for prompt.txt generation functionality."""
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
import tempfile
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from unittest.mock import MagicMock
|
|
7
|
+
|
|
8
|
+
from code2llm.cli import _export_prompt_txt
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TestPromptTxtGeneration:
|
|
12
|
+
"""Test the _export_prompt_txt function that generates prompt.txt for LLM."""
|
|
13
|
+
|
|
14
|
+
@pytest.fixture
|
|
15
|
+
def temp_output_dir(self):
|
|
16
|
+
"""Create temporary output directory."""
|
|
17
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
18
|
+
yield Path(tmpdir)
|
|
19
|
+
|
|
20
|
+
@pytest.fixture
|
|
21
|
+
def mock_args(self):
|
|
22
|
+
"""Create mock args object with verbose flag."""
|
|
23
|
+
args = MagicMock()
|
|
24
|
+
args.verbose = True
|
|
25
|
+
return args
|
|
26
|
+
|
|
27
|
+
def test_prompt_txt_not_generated_without_code2logic_format(self, temp_output_dir, mock_args):
|
|
28
|
+
"""Test that prompt.txt is NOT generated when code2logic is not in formats."""
|
|
29
|
+
formats = ['toon', 'evolution']
|
|
30
|
+
|
|
31
|
+
_export_prompt_txt(mock_args, temp_output_dir, formats)
|
|
32
|
+
|
|
33
|
+
prompt_file = temp_output_dir / 'prompt.txt'
|
|
34
|
+
assert not prompt_file.exists(), "prompt.txt should not be generated without code2logic format"
|
|
35
|
+
|
|
36
|
+
def test_prompt_txt_generated_with_code2logic_format(self, temp_output_dir, mock_args):
|
|
37
|
+
"""Test that prompt.txt IS generated when code2logic is in formats."""
|
|
38
|
+
formats = ['toon', 'evolution', 'code2logic']
|
|
39
|
+
|
|
40
|
+
# Create some existing files
|
|
41
|
+
(temp_output_dir / 'analysis.toon').write_text('test')
|
|
42
|
+
(temp_output_dir / 'context.md').write_text('test')
|
|
43
|
+
|
|
44
|
+
_export_prompt_txt(mock_args, temp_output_dir, formats)
|
|
45
|
+
|
|
46
|
+
prompt_file = temp_output_dir / 'prompt.txt'
|
|
47
|
+
assert prompt_file.exists(), "prompt.txt should be generated with code2logic format"
|
|
48
|
+
|
|
49
|
+
def test_prompt_txt_generated_with_all_format(self, temp_output_dir, mock_args):
|
|
50
|
+
"""Test that prompt.txt IS generated when 'all' is in formats."""
|
|
51
|
+
formats = ['all']
|
|
52
|
+
|
|
53
|
+
_export_prompt_txt(mock_args, temp_output_dir, formats)
|
|
54
|
+
|
|
55
|
+
prompt_file = temp_output_dir / 'prompt.txt'
|
|
56
|
+
assert prompt_file.exists(), "prompt.txt should be generated with 'all' format"
|
|
57
|
+
|
|
58
|
+
def test_prompt_txt_lists_existing_files(self, temp_output_dir, mock_args):
|
|
59
|
+
"""Test that prompt.txt correctly lists existing files."""
|
|
60
|
+
formats = ['code2logic']
|
|
61
|
+
|
|
62
|
+
# Create some files that should be detected
|
|
63
|
+
expected_files = ['analysis.toon', 'context.md']
|
|
64
|
+
for f in expected_files:
|
|
65
|
+
(temp_output_dir / f).write_text('test content')
|
|
66
|
+
|
|
67
|
+
_export_prompt_txt(mock_args, temp_output_dir, formats)
|
|
68
|
+
|
|
69
|
+
prompt_file = temp_output_dir / 'prompt.txt'
|
|
70
|
+
content = prompt_file.read_text()
|
|
71
|
+
|
|
72
|
+
# Check that existing files are listed
|
|
73
|
+
for f in expected_files:
|
|
74
|
+
assert f"- {f}" in content, f"Existing file {f} should be listed in prompt.txt"
|
|
75
|
+
|
|
76
|
+
# Check that missing files are marked
|
|
77
|
+
assert "Missing" in content or "project.toon" in content, "Missing files should be indicated"
|
|
78
|
+
|
|
79
|
+
def test_prompt_txt_shows_missing_files(self, temp_output_dir, mock_args):
|
|
80
|
+
"""Test that prompt.txt shows missing files section when files don't exist."""
|
|
81
|
+
formats = ['code2logic']
|
|
82
|
+
|
|
83
|
+
# Don't create any files - all should be missing
|
|
84
|
+
_export_prompt_txt(mock_args, temp_output_dir, formats)
|
|
85
|
+
|
|
86
|
+
prompt_file = temp_output_dir / 'prompt.txt'
|
|
87
|
+
content = prompt_file.read_text()
|
|
88
|
+
|
|
89
|
+
assert "Missing" in content, "Missing section should be present when files don't exist"
|
|
90
|
+
assert "analysis.toon" in content, "Missing files should be listed"
|
|
91
|
+
|
|
92
|
+
def test_prompt_txt_contains_task_instructions(self, temp_output_dir, mock_args):
|
|
93
|
+
"""Test that prompt.txt contains task instructions for LLM."""
|
|
94
|
+
formats = ['code2logic']
|
|
95
|
+
|
|
96
|
+
_export_prompt_txt(mock_args, temp_output_dir, formats)
|
|
97
|
+
|
|
98
|
+
prompt_file = temp_output_dir / 'prompt.txt'
|
|
99
|
+
content = prompt_file.read_text()
|
|
100
|
+
|
|
101
|
+
# Check for key sections
|
|
102
|
+
assert "Files:" in content, "Files section should be present"
|
|
103
|
+
assert "Task:" in content, "Task section should be present"
|
|
104
|
+
assert "Constraints:" in content, "Constraints section should be present"
|
|
105
|
+
|
|
106
|
+
def test_prompt_txt_content_structure(self, temp_output_dir, mock_args):
|
|
107
|
+
"""Test the overall structure of generated prompt.txt."""
|
|
108
|
+
formats = ['code2logic']
|
|
109
|
+
|
|
110
|
+
# Create all expected files
|
|
111
|
+
all_files = ['analysis.toon', 'context.md', 'evolution.toon', 'project.toon', 'README.md']
|
|
112
|
+
for f in all_files:
|
|
113
|
+
(temp_output_dir / f).write_text('test')
|
|
114
|
+
|
|
115
|
+
_export_prompt_txt(mock_args, temp_output_dir, formats)
|
|
116
|
+
|
|
117
|
+
prompt_file = temp_output_dir / 'prompt.txt'
|
|
118
|
+
content = prompt_file.read_text()
|
|
119
|
+
lines = content.split('\n')
|
|
120
|
+
|
|
121
|
+
# Check structure
|
|
122
|
+
assert any("AI assistant" in line or "helping me" in line for line in lines), \
|
|
123
|
+
"Prompt should mention AI assistant"
|
|
124
|
+
assert any("authoritative context" in line for line in lines), \
|
|
125
|
+
"Prompt should mention authoritative context"
|
|
126
|
+
|
|
127
|
+
# All files should be listed without missing section
|
|
128
|
+
assert "Missing" not in content, "No missing section when all files exist"
|
|
129
|
+
for f in all_files:
|
|
130
|
+
assert f"- {f}" in content, f"All files should be listed: {f}"
|
|
131
|
+
|
|
132
|
+
def test_prompt_txt_no_verbose_output(self, temp_output_dir):
|
|
133
|
+
"""Test that no print occurs when verbose is False."""
|
|
134
|
+
args = MagicMock()
|
|
135
|
+
args.verbose = False
|
|
136
|
+
formats = ['code2logic']
|
|
137
|
+
|
|
138
|
+
# Should not raise or print anything
|
|
139
|
+
_export_prompt_txt(args, temp_output_dir, formats)
|
|
140
|
+
|
|
141
|
+
assert (temp_output_dir / 'prompt.txt').exists()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|